1 // SPDX-License-Identifier: MIT
2 /*
3 * Copyright © 2016-2019 Intel Corporation
4 */
5
6 #include <linux/circ_buf.h>
7 #include <linux/ktime.h>
8 #include <linux/time64.h>
9 #include <linux/string_helpers.h>
10 #include <linux/timekeeping.h>
11
12 #include "i915_drv.h"
13 #include "intel_guc_ct.h"
14 #include "intel_guc_print.h"
15
ct_to_guc(struct intel_guc_ct * ct)16 static inline struct intel_guc *ct_to_guc(struct intel_guc_ct *ct)
17 {
18 return container_of(ct, struct intel_guc, ct);
19 }
20
21 #define CT_ERROR(_ct, _fmt, ...) \
22 guc_err(ct_to_guc(_ct), "CT: " _fmt, ##__VA_ARGS__)
23 #ifdef CONFIG_DRM_I915_DEBUG_GUC
24 #define CT_DEBUG(_ct, _fmt, ...) \
25 guc_dbg(ct_to_guc(_ct), "CT: " _fmt, ##__VA_ARGS__)
26 #else
27 #define CT_DEBUG(...) do { } while (0)
28 #endif
29 #define CT_PROBE_ERROR(_ct, _fmt, ...) \
30 guc_probe_error(ct_to_guc(ct), "CT: " _fmt, ##__VA_ARGS__)
31
32 /**
33 * DOC: CTB Blob
34 *
35 * We allocate single blob to hold both CTB descriptors and buffers:
36 *
37 * +--------+-----------------------------------------------+------+
38 * | offset | contents | size |
39 * +========+===============================================+======+
40 * | 0x0000 | H2G `CTB Descriptor`_ (send) | |
41 * +--------+-----------------------------------------------+ 4K |
42 * | 0x0800 | G2H `CTB Descriptor`_ (recv) | |
43 * +--------+-----------------------------------------------+------+
44 * | 0x1000 | H2G `CT Buffer`_ (send) | n*4K |
45 * | | | |
46 * +--------+-----------------------------------------------+------+
47 * | 0x1000 | G2H `CT Buffer`_ (recv) | m*4K |
48 * | + n*4K | | |
49 * +--------+-----------------------------------------------+------+
50 *
51 * Size of each `CT Buffer`_ must be multiple of 4K.
52 * We don't expect too many messages in flight at any time, unless we are
53 * using the GuC submission. In that case each request requires a minimum
54 * 2 dwords which gives us a maximum 256 queue'd requests. Hopefully this
55 * enough space to avoid backpressure on the driver. We increase the size
56 * of the receive buffer (relative to the send) to ensure a G2H response
57 * CTB has a landing spot.
58 */
59 #define CTB_DESC_SIZE ALIGN(sizeof(struct guc_ct_buffer_desc), SZ_2K)
60 #define CTB_H2G_BUFFER_SIZE (SZ_4K)
61 #define CTB_G2H_BUFFER_SIZE (4 * CTB_H2G_BUFFER_SIZE)
62 #define G2H_ROOM_BUFFER_SIZE (CTB_G2H_BUFFER_SIZE / 4)
63
64 struct ct_request {
65 struct list_head link;
66 u32 fence;
67 u32 status;
68 u32 response_len;
69 u32 *response_buf;
70 };
71
72 struct ct_incoming_msg {
73 struct list_head link;
74 u32 size;
75 u32 msg[];
76 };
77
78 enum { CTB_SEND = 0, CTB_RECV = 1 };
79
80 enum { CTB_OWNER_HOST = 0 };
81
82 static void ct_receive_tasklet_func(struct tasklet_struct *t);
83 static void ct_incoming_request_worker_func(struct work_struct *w);
84
85 /**
86 * intel_guc_ct_init_early - Initialize CT state without requiring device access
87 * @ct: pointer to CT struct
88 */
intel_guc_ct_init_early(struct intel_guc_ct * ct)89 void intel_guc_ct_init_early(struct intel_guc_ct *ct)
90 {
91 spin_lock_init(&ct->ctbs.send.lock);
92 spin_lock_init(&ct->ctbs.recv.lock);
93 spin_lock_init(&ct->requests.lock);
94 INIT_LIST_HEAD(&ct->requests.pending);
95 INIT_LIST_HEAD(&ct->requests.incoming);
96 INIT_WORK(&ct->requests.worker, ct_incoming_request_worker_func);
97 tasklet_setup(&ct->receive_tasklet, ct_receive_tasklet_func);
98 init_waitqueue_head(&ct->wq);
99 }
100
guc_ct_buffer_desc_init(struct guc_ct_buffer_desc * desc)101 static void guc_ct_buffer_desc_init(struct guc_ct_buffer_desc *desc)
102 {
103 memset(desc, 0, sizeof(*desc));
104 }
105
guc_ct_buffer_reset(struct intel_guc_ct_buffer * ctb)106 static void guc_ct_buffer_reset(struct intel_guc_ct_buffer *ctb)
107 {
108 u32 space;
109
110 ctb->broken = false;
111 ctb->tail = 0;
112 ctb->head = 0;
113 space = CIRC_SPACE(ctb->tail, ctb->head, ctb->size) - ctb->resv_space;
114 atomic_set(&ctb->space, space);
115
116 guc_ct_buffer_desc_init(ctb->desc);
117 }
118
guc_ct_buffer_init(struct intel_guc_ct_buffer * ctb,struct guc_ct_buffer_desc * desc,u32 * cmds,u32 size_in_bytes,u32 resv_space)119 static void guc_ct_buffer_init(struct intel_guc_ct_buffer *ctb,
120 struct guc_ct_buffer_desc *desc,
121 u32 *cmds, u32 size_in_bytes, u32 resv_space)
122 {
123 GEM_BUG_ON(size_in_bytes % 4);
124
125 ctb->desc = desc;
126 ctb->cmds = cmds;
127 ctb->size = size_in_bytes / 4;
128 ctb->resv_space = resv_space / 4;
129
130 guc_ct_buffer_reset(ctb);
131 }
132
guc_action_control_ctb(struct intel_guc * guc,u32 control)133 static int guc_action_control_ctb(struct intel_guc *guc, u32 control)
134 {
135 u32 request[HOST2GUC_CONTROL_CTB_REQUEST_MSG_LEN] = {
136 FIELD_PREP(GUC_HXG_MSG_0_ORIGIN, GUC_HXG_ORIGIN_HOST) |
137 FIELD_PREP(GUC_HXG_MSG_0_TYPE, GUC_HXG_TYPE_REQUEST) |
138 FIELD_PREP(GUC_HXG_REQUEST_MSG_0_ACTION, GUC_ACTION_HOST2GUC_CONTROL_CTB),
139 FIELD_PREP(HOST2GUC_CONTROL_CTB_REQUEST_MSG_1_CONTROL, control),
140 };
141 int ret;
142
143 GEM_BUG_ON(control != GUC_CTB_CONTROL_DISABLE && control != GUC_CTB_CONTROL_ENABLE);
144
145 /* CT control must go over MMIO */
146 ret = intel_guc_send_mmio(guc, request, ARRAY_SIZE(request), NULL, 0);
147
148 return ret > 0 ? -EPROTO : ret;
149 }
150
ct_control_enable(struct intel_guc_ct * ct,bool enable)151 static int ct_control_enable(struct intel_guc_ct *ct, bool enable)
152 {
153 int err;
154
155 err = guc_action_control_ctb(ct_to_guc(ct), enable ?
156 GUC_CTB_CONTROL_ENABLE : GUC_CTB_CONTROL_DISABLE);
157 if (unlikely(err))
158 CT_PROBE_ERROR(ct, "Failed to control/%s CTB (%pe)\n",
159 str_enable_disable(enable), ERR_PTR(err));
160
161 return err;
162 }
163
ct_register_buffer(struct intel_guc_ct * ct,bool send,u32 desc_addr,u32 buff_addr,u32 size)164 static int ct_register_buffer(struct intel_guc_ct *ct, bool send,
165 u32 desc_addr, u32 buff_addr, u32 size)
166 {
167 int err;
168
169 err = intel_guc_self_cfg64(ct_to_guc(ct), send ?
170 GUC_KLV_SELF_CFG_H2G_CTB_DESCRIPTOR_ADDR_KEY :
171 GUC_KLV_SELF_CFG_G2H_CTB_DESCRIPTOR_ADDR_KEY,
172 desc_addr);
173 if (unlikely(err))
174 goto failed;
175
176 err = intel_guc_self_cfg64(ct_to_guc(ct), send ?
177 GUC_KLV_SELF_CFG_H2G_CTB_ADDR_KEY :
178 GUC_KLV_SELF_CFG_G2H_CTB_ADDR_KEY,
179 buff_addr);
180 if (unlikely(err))
181 goto failed;
182
183 err = intel_guc_self_cfg32(ct_to_guc(ct), send ?
184 GUC_KLV_SELF_CFG_H2G_CTB_SIZE_KEY :
185 GUC_KLV_SELF_CFG_G2H_CTB_SIZE_KEY,
186 size);
187 if (unlikely(err))
188 failed:
189 CT_PROBE_ERROR(ct, "Failed to register %s buffer (%pe)\n",
190 send ? "SEND" : "RECV", ERR_PTR(err));
191
192 return err;
193 }
194
195 /**
196 * intel_guc_ct_init - Init buffer-based communication
197 * @ct: pointer to CT struct
198 *
199 * Allocate memory required for buffer-based communication.
200 *
201 * Return: 0 on success, a negative errno code on failure.
202 */
intel_guc_ct_init(struct intel_guc_ct * ct)203 int intel_guc_ct_init(struct intel_guc_ct *ct)
204 {
205 struct intel_guc *guc = ct_to_guc(ct);
206 struct guc_ct_buffer_desc *desc;
207 u32 blob_size;
208 u32 cmds_size;
209 u32 resv_space;
210 void *blob;
211 u32 *cmds;
212 int err;
213
214 err = i915_inject_probe_error(guc_to_gt(guc)->i915, -ENXIO);
215 if (err)
216 return err;
217
218 GEM_BUG_ON(ct->vma);
219
220 blob_size = 2 * CTB_DESC_SIZE + CTB_H2G_BUFFER_SIZE + CTB_G2H_BUFFER_SIZE;
221 err = intel_guc_allocate_and_map_vma(guc, blob_size, &ct->vma, &blob);
222 if (unlikely(err)) {
223 CT_PROBE_ERROR(ct, "Failed to allocate %u for CTB data (%pe)\n",
224 blob_size, ERR_PTR(err));
225 return err;
226 }
227
228 CT_DEBUG(ct, "base=%#x size=%u\n", intel_guc_ggtt_offset(guc, ct->vma), blob_size);
229
230 /* store pointers to desc and cmds for send ctb */
231 desc = blob;
232 cmds = blob + 2 * CTB_DESC_SIZE;
233 cmds_size = CTB_H2G_BUFFER_SIZE;
234 resv_space = 0;
235 CT_DEBUG(ct, "%s desc %#tx cmds %#tx size %u/%u\n", "send",
236 ptrdiff(desc, blob), ptrdiff(cmds, blob), cmds_size,
237 resv_space);
238
239 guc_ct_buffer_init(&ct->ctbs.send, desc, cmds, cmds_size, resv_space);
240
241 /* store pointers to desc and cmds for recv ctb */
242 desc = blob + CTB_DESC_SIZE;
243 cmds = blob + 2 * CTB_DESC_SIZE + CTB_H2G_BUFFER_SIZE;
244 cmds_size = CTB_G2H_BUFFER_SIZE;
245 resv_space = G2H_ROOM_BUFFER_SIZE;
246 CT_DEBUG(ct, "%s desc %#tx cmds %#tx size %u/%u\n", "recv",
247 ptrdiff(desc, blob), ptrdiff(cmds, blob), cmds_size,
248 resv_space);
249
250 guc_ct_buffer_init(&ct->ctbs.recv, desc, cmds, cmds_size, resv_space);
251
252 return 0;
253 }
254
255 /**
256 * intel_guc_ct_fini - Fini buffer-based communication
257 * @ct: pointer to CT struct
258 *
259 * Deallocate memory required for buffer-based communication.
260 */
intel_guc_ct_fini(struct intel_guc_ct * ct)261 void intel_guc_ct_fini(struct intel_guc_ct *ct)
262 {
263 GEM_BUG_ON(ct->enabled);
264
265 tasklet_kill(&ct->receive_tasklet);
266 i915_vma_unpin_and_release(&ct->vma, I915_VMA_RELEASE_MAP);
267 memset(ct, 0, sizeof(*ct));
268 }
269
270 /**
271 * intel_guc_ct_enable - Enable buffer based command transport.
272 * @ct: pointer to CT struct
273 *
274 * Return: 0 on success, a negative errno code on failure.
275 */
intel_guc_ct_enable(struct intel_guc_ct * ct)276 int intel_guc_ct_enable(struct intel_guc_ct *ct)
277 {
278 struct intel_guc *guc = ct_to_guc(ct);
279 u32 base, desc, cmds, size;
280 void *blob;
281 int err;
282
283 GEM_BUG_ON(ct->enabled);
284
285 /* vma should be already allocated and map'ed */
286 GEM_BUG_ON(!ct->vma);
287 GEM_BUG_ON(!i915_gem_object_has_pinned_pages(ct->vma->obj));
288 base = intel_guc_ggtt_offset(guc, ct->vma);
289
290 /* blob should start with send descriptor */
291 blob = __px_vaddr(ct->vma->obj);
292 GEM_BUG_ON(blob != ct->ctbs.send.desc);
293
294 /* (re)initialize descriptors */
295 guc_ct_buffer_reset(&ct->ctbs.send);
296 guc_ct_buffer_reset(&ct->ctbs.recv);
297
298 /*
299 * Register both CT buffers starting with RECV buffer.
300 * Descriptors are in first half of the blob.
301 */
302 desc = base + ptrdiff(ct->ctbs.recv.desc, blob);
303 cmds = base + ptrdiff(ct->ctbs.recv.cmds, blob);
304 size = ct->ctbs.recv.size * 4;
305 err = ct_register_buffer(ct, false, desc, cmds, size);
306 if (unlikely(err))
307 goto err_out;
308
309 desc = base + ptrdiff(ct->ctbs.send.desc, blob);
310 cmds = base + ptrdiff(ct->ctbs.send.cmds, blob);
311 size = ct->ctbs.send.size * 4;
312 err = ct_register_buffer(ct, true, desc, cmds, size);
313 if (unlikely(err))
314 goto err_out;
315
316 err = ct_control_enable(ct, true);
317 if (unlikely(err))
318 goto err_out;
319
320 ct->enabled = true;
321 ct->stall_time = KTIME_MAX;
322
323 return 0;
324
325 err_out:
326 CT_PROBE_ERROR(ct, "Failed to enable CTB (%pe)\n", ERR_PTR(err));
327 return err;
328 }
329
330 /**
331 * intel_guc_ct_disable - Disable buffer based command transport.
332 * @ct: pointer to CT struct
333 */
intel_guc_ct_disable(struct intel_guc_ct * ct)334 void intel_guc_ct_disable(struct intel_guc_ct *ct)
335 {
336 struct intel_guc *guc = ct_to_guc(ct);
337
338 GEM_BUG_ON(!ct->enabled);
339
340 ct->enabled = false;
341
342 if (intel_guc_is_fw_running(guc)) {
343 ct_control_enable(ct, false);
344 }
345 }
346
ct_get_next_fence(struct intel_guc_ct * ct)347 static u32 ct_get_next_fence(struct intel_guc_ct *ct)
348 {
349 /* For now it's trivial */
350 return ++ct->requests.last_fence;
351 }
352
ct_write(struct intel_guc_ct * ct,const u32 * action,u32 len,u32 fence,u32 flags)353 static int ct_write(struct intel_guc_ct *ct,
354 const u32 *action,
355 u32 len /* in dwords */,
356 u32 fence, u32 flags)
357 {
358 struct intel_guc_ct_buffer *ctb = &ct->ctbs.send;
359 struct guc_ct_buffer_desc *desc = ctb->desc;
360 u32 tail = ctb->tail;
361 u32 size = ctb->size;
362 u32 header;
363 u32 hxg;
364 u32 type;
365 u32 *cmds = ctb->cmds;
366 unsigned int i;
367
368 if (unlikely(desc->status))
369 goto corrupted;
370
371 GEM_BUG_ON(tail > size);
372
373 #ifdef CONFIG_DRM_I915_DEBUG_GUC
374 if (unlikely(tail != READ_ONCE(desc->tail))) {
375 CT_ERROR(ct, "Tail was modified %u != %u\n",
376 desc->tail, tail);
377 desc->status |= GUC_CTB_STATUS_MISMATCH;
378 goto corrupted;
379 }
380 if (unlikely(READ_ONCE(desc->head) >= size)) {
381 CT_ERROR(ct, "Invalid head offset %u >= %u)\n",
382 desc->head, size);
383 desc->status |= GUC_CTB_STATUS_OVERFLOW;
384 goto corrupted;
385 }
386 #endif
387
388 /*
389 * dw0: CT header (including fence)
390 * dw1: HXG header (including action code)
391 * dw2+: action data
392 */
393 header = FIELD_PREP(GUC_CTB_MSG_0_FORMAT, GUC_CTB_FORMAT_HXG) |
394 FIELD_PREP(GUC_CTB_MSG_0_NUM_DWORDS, len) |
395 FIELD_PREP(GUC_CTB_MSG_0_FENCE, fence);
396
397 type = (flags & INTEL_GUC_CT_SEND_NB) ? GUC_HXG_TYPE_EVENT :
398 GUC_HXG_TYPE_REQUEST;
399 hxg = FIELD_PREP(GUC_HXG_MSG_0_TYPE, type) |
400 FIELD_PREP(GUC_HXG_EVENT_MSG_0_ACTION |
401 GUC_HXG_EVENT_MSG_0_DATA0, action[0]);
402
403 CT_DEBUG(ct, "writing (tail %u) %*ph %*ph %*ph\n",
404 tail, 4, &header, 4, &hxg, 4 * (len - 1), &action[1]);
405
406 cmds[tail] = header;
407 tail = (tail + 1) % size;
408
409 cmds[tail] = hxg;
410 tail = (tail + 1) % size;
411
412 for (i = 1; i < len; i++) {
413 cmds[tail] = action[i];
414 tail = (tail + 1) % size;
415 }
416 GEM_BUG_ON(tail > size);
417
418 /*
419 * make sure H2G buffer update and LRC tail update (if this triggering a
420 * submission) are visible before updating the descriptor tail
421 */
422 intel_guc_write_barrier(ct_to_guc(ct));
423
424 /* update local copies */
425 ctb->tail = tail;
426 GEM_BUG_ON(atomic_read(&ctb->space) < len + GUC_CTB_HDR_LEN);
427 atomic_sub(len + GUC_CTB_HDR_LEN, &ctb->space);
428
429 /* now update descriptor */
430 WRITE_ONCE(desc->tail, tail);
431
432 return 0;
433
434 corrupted:
435 CT_ERROR(ct, "Corrupted descriptor head=%u tail=%u status=%#x\n",
436 desc->head, desc->tail, desc->status);
437 ctb->broken = true;
438 return -EPIPE;
439 }
440
441 /**
442 * wait_for_ct_request_update - Wait for CT request state update.
443 * @ct: pointer to CT
444 * @req: pointer to pending request
445 * @status: placeholder for status
446 *
447 * For each sent request, GuC shall send back CT response message.
448 * Our message handler will update status of tracked request once
449 * response message with given fence is received. Wait here and
450 * check for valid response status value.
451 *
452 * Return:
453 * * 0 response received (status is valid)
454 * * -ETIMEDOUT no response within hardcoded timeout
455 */
wait_for_ct_request_update(struct intel_guc_ct * ct,struct ct_request * req,u32 * status)456 static int wait_for_ct_request_update(struct intel_guc_ct *ct, struct ct_request *req, u32 *status)
457 {
458 int err;
459 bool ct_enabled;
460
461 /*
462 * Fast commands should complete in less than 10us, so sample quickly
463 * up to that length of time, then switch to a slower sleep-wait loop.
464 * No GuC command should ever take longer than 10ms but many GuC
465 * commands can be inflight at time, so use a 1s timeout on the slower
466 * sleep-wait loop.
467 */
468 #define GUC_CTB_RESPONSE_TIMEOUT_SHORT_MS 10
469 #define GUC_CTB_RESPONSE_TIMEOUT_LONG_MS 1000
470 #define done \
471 (!(ct_enabled = intel_guc_ct_enabled(ct)) || \
472 FIELD_GET(GUC_HXG_MSG_0_ORIGIN, READ_ONCE(req->status)) == \
473 GUC_HXG_ORIGIN_GUC)
474 err = wait_for_us(done, GUC_CTB_RESPONSE_TIMEOUT_SHORT_MS);
475 if (err)
476 err = wait_for(done, GUC_CTB_RESPONSE_TIMEOUT_LONG_MS);
477 #undef done
478 if (!ct_enabled)
479 err = -ENODEV;
480
481 *status = req->status;
482 return err;
483 }
484
485 #define GUC_CTB_TIMEOUT_MS 1500
ct_deadlocked(struct intel_guc_ct * ct)486 static inline bool ct_deadlocked(struct intel_guc_ct *ct)
487 {
488 long timeout = GUC_CTB_TIMEOUT_MS;
489 bool ret = ktime_ms_delta(ktime_get(), ct->stall_time) > timeout;
490
491 if (unlikely(ret)) {
492 struct guc_ct_buffer_desc *send = ct->ctbs.send.desc;
493 struct guc_ct_buffer_desc *recv = ct->ctbs.send.desc;
494
495 CT_ERROR(ct, "Communication stalled for %lld ms, desc status=%#x,%#x\n",
496 ktime_ms_delta(ktime_get(), ct->stall_time),
497 send->status, recv->status);
498 CT_ERROR(ct, "H2G Space: %u (Bytes)\n",
499 atomic_read(&ct->ctbs.send.space) * 4);
500 CT_ERROR(ct, "Head: %u (Dwords)\n", ct->ctbs.send.desc->head);
501 CT_ERROR(ct, "Tail: %u (Dwords)\n", ct->ctbs.send.desc->tail);
502 CT_ERROR(ct, "G2H Space: %u (Bytes)\n",
503 atomic_read(&ct->ctbs.recv.space) * 4);
504 CT_ERROR(ct, "Head: %u\n (Dwords)", ct->ctbs.recv.desc->head);
505 CT_ERROR(ct, "Tail: %u\n (Dwords)", ct->ctbs.recv.desc->tail);
506
507 ct->ctbs.send.broken = true;
508 }
509
510 return ret;
511 }
512
g2h_has_room(struct intel_guc_ct * ct,u32 g2h_len_dw)513 static inline bool g2h_has_room(struct intel_guc_ct *ct, u32 g2h_len_dw)
514 {
515 struct intel_guc_ct_buffer *ctb = &ct->ctbs.recv;
516
517 /*
518 * We leave a certain amount of space in the G2H CTB buffer for
519 * unexpected G2H CTBs (e.g. logging, engine hang, etc...)
520 */
521 return !g2h_len_dw || atomic_read(&ctb->space) >= g2h_len_dw;
522 }
523
g2h_reserve_space(struct intel_guc_ct * ct,u32 g2h_len_dw)524 static inline void g2h_reserve_space(struct intel_guc_ct *ct, u32 g2h_len_dw)
525 {
526 lockdep_assert_held(&ct->ctbs.send.lock);
527
528 GEM_BUG_ON(!g2h_has_room(ct, g2h_len_dw));
529
530 if (g2h_len_dw)
531 atomic_sub(g2h_len_dw, &ct->ctbs.recv.space);
532 }
533
g2h_release_space(struct intel_guc_ct * ct,u32 g2h_len_dw)534 static inline void g2h_release_space(struct intel_guc_ct *ct, u32 g2h_len_dw)
535 {
536 atomic_add(g2h_len_dw, &ct->ctbs.recv.space);
537 }
538
h2g_has_room(struct intel_guc_ct * ct,u32 len_dw)539 static inline bool h2g_has_room(struct intel_guc_ct *ct, u32 len_dw)
540 {
541 struct intel_guc_ct_buffer *ctb = &ct->ctbs.send;
542 struct guc_ct_buffer_desc *desc = ctb->desc;
543 u32 head;
544 u32 space;
545
546 if (atomic_read(&ctb->space) >= len_dw)
547 return true;
548
549 head = READ_ONCE(desc->head);
550 if (unlikely(head > ctb->size)) {
551 CT_ERROR(ct, "Invalid head offset %u >= %u)\n",
552 head, ctb->size);
553 desc->status |= GUC_CTB_STATUS_OVERFLOW;
554 ctb->broken = true;
555 return false;
556 }
557
558 space = CIRC_SPACE(ctb->tail, head, ctb->size);
559 atomic_set(&ctb->space, space);
560
561 return space >= len_dw;
562 }
563
has_room_nb(struct intel_guc_ct * ct,u32 h2g_dw,u32 g2h_dw)564 static int has_room_nb(struct intel_guc_ct *ct, u32 h2g_dw, u32 g2h_dw)
565 {
566 bool h2g = h2g_has_room(ct, h2g_dw);
567 bool g2h = g2h_has_room(ct, g2h_dw);
568
569 lockdep_assert_held(&ct->ctbs.send.lock);
570
571 if (unlikely(!h2g || !g2h)) {
572 if (ct->stall_time == KTIME_MAX)
573 ct->stall_time = ktime_get();
574
575 /* Be paranoid and kick G2H tasklet to free credits */
576 if (!g2h)
577 tasklet_hi_schedule(&ct->receive_tasklet);
578
579 if (unlikely(ct_deadlocked(ct)))
580 return -EPIPE;
581 else
582 return -EBUSY;
583 }
584
585 ct->stall_time = KTIME_MAX;
586 return 0;
587 }
588
589 #define G2H_LEN_DW(f) ({ \
590 typeof(f) f_ = (f); \
591 FIELD_GET(INTEL_GUC_CT_SEND_G2H_DW_MASK, f_) ? \
592 FIELD_GET(INTEL_GUC_CT_SEND_G2H_DW_MASK, f_) + \
593 GUC_CTB_HXG_MSG_MIN_LEN : 0; \
594 })
ct_send_nb(struct intel_guc_ct * ct,const u32 * action,u32 len,u32 flags)595 static int ct_send_nb(struct intel_guc_ct *ct,
596 const u32 *action,
597 u32 len,
598 u32 flags)
599 {
600 struct intel_guc_ct_buffer *ctb = &ct->ctbs.send;
601 unsigned long spin_flags;
602 u32 g2h_len_dw = G2H_LEN_DW(flags);
603 u32 fence;
604 int ret;
605
606 spin_lock_irqsave(&ctb->lock, spin_flags);
607
608 ret = has_room_nb(ct, len + GUC_CTB_HDR_LEN, g2h_len_dw);
609 if (unlikely(ret))
610 goto out;
611
612 fence = ct_get_next_fence(ct);
613 ret = ct_write(ct, action, len, fence, flags);
614 if (unlikely(ret))
615 goto out;
616
617 g2h_reserve_space(ct, g2h_len_dw);
618 intel_guc_notify(ct_to_guc(ct));
619
620 out:
621 spin_unlock_irqrestore(&ctb->lock, spin_flags);
622
623 return ret;
624 }
625
ct_send(struct intel_guc_ct * ct,const u32 * action,u32 len,u32 * response_buf,u32 response_buf_size,u32 * status)626 static int ct_send(struct intel_guc_ct *ct,
627 const u32 *action,
628 u32 len,
629 u32 *response_buf,
630 u32 response_buf_size,
631 u32 *status)
632 {
633 struct intel_guc_ct_buffer *ctb = &ct->ctbs.send;
634 struct ct_request request;
635 unsigned long flags;
636 unsigned int sleep_period_ms = 1;
637 bool send_again;
638 u32 fence;
639 int err;
640
641 GEM_BUG_ON(!ct->enabled);
642 GEM_BUG_ON(!len);
643 GEM_BUG_ON(len & ~GUC_CT_MSG_LEN_MASK);
644 GEM_BUG_ON(!response_buf && response_buf_size);
645 might_sleep();
646
647 resend:
648 send_again = false;
649
650 /*
651 * We use a lazy spin wait loop here as we believe that if the CT
652 * buffers are sized correctly the flow control condition should be
653 * rare. Reserving the maximum size in the G2H credits as we don't know
654 * how big the response is going to be.
655 */
656 retry:
657 spin_lock_irqsave(&ctb->lock, flags);
658 if (unlikely(!h2g_has_room(ct, len + GUC_CTB_HDR_LEN) ||
659 !g2h_has_room(ct, GUC_CTB_HXG_MSG_MAX_LEN))) {
660 if (ct->stall_time == KTIME_MAX)
661 ct->stall_time = ktime_get();
662 spin_unlock_irqrestore(&ctb->lock, flags);
663
664 if (unlikely(ct_deadlocked(ct)))
665 return -EPIPE;
666
667 if (msleep_interruptible(sleep_period_ms))
668 return -EINTR;
669 sleep_period_ms = sleep_period_ms << 1;
670
671 goto retry;
672 }
673
674 ct->stall_time = KTIME_MAX;
675
676 fence = ct_get_next_fence(ct);
677 request.fence = fence;
678 request.status = 0;
679 request.response_len = response_buf_size;
680 request.response_buf = response_buf;
681
682 spin_lock(&ct->requests.lock);
683 list_add_tail(&request.link, &ct->requests.pending);
684 spin_unlock(&ct->requests.lock);
685
686 err = ct_write(ct, action, len, fence, 0);
687 g2h_reserve_space(ct, GUC_CTB_HXG_MSG_MAX_LEN);
688
689 spin_unlock_irqrestore(&ctb->lock, flags);
690
691 if (unlikely(err))
692 goto unlink;
693
694 intel_guc_notify(ct_to_guc(ct));
695
696 err = wait_for_ct_request_update(ct, &request, status);
697 g2h_release_space(ct, GUC_CTB_HXG_MSG_MAX_LEN);
698 if (unlikely(err)) {
699 if (err == -ENODEV)
700 /* wait_for_ct_request_update returns -ENODEV on reset/suspend in progress.
701 * In this case, output is debug rather than error info
702 */
703 CT_DEBUG(ct, "Request %#x (fence %u) cancelled as CTB is disabled\n",
704 action[0], request.fence);
705 else
706 CT_ERROR(ct, "No response for request %#x (fence %u)\n",
707 action[0], request.fence);
708 goto unlink;
709 }
710
711 if (FIELD_GET(GUC_HXG_MSG_0_TYPE, *status) == GUC_HXG_TYPE_NO_RESPONSE_RETRY) {
712 CT_DEBUG(ct, "retrying request %#x (%u)\n", *action,
713 FIELD_GET(GUC_HXG_RETRY_MSG_0_REASON, *status));
714 send_again = true;
715 goto unlink;
716 }
717
718 if (FIELD_GET(GUC_HXG_MSG_0_TYPE, *status) != GUC_HXG_TYPE_RESPONSE_SUCCESS) {
719 err = -EIO;
720 goto unlink;
721 }
722
723 if (response_buf) {
724 /* There shall be no data in the status */
725 WARN_ON(FIELD_GET(GUC_HXG_RESPONSE_MSG_0_DATA0, request.status));
726 /* Return actual response len */
727 err = request.response_len;
728 } else {
729 /* There shall be no response payload */
730 WARN_ON(request.response_len);
731 /* Return data decoded from the status dword */
732 err = FIELD_GET(GUC_HXG_RESPONSE_MSG_0_DATA0, *status);
733 }
734
735 unlink:
736 spin_lock_irqsave(&ct->requests.lock, flags);
737 list_del(&request.link);
738 spin_unlock_irqrestore(&ct->requests.lock, flags);
739
740 if (unlikely(send_again))
741 goto resend;
742
743 return err;
744 }
745
746 /*
747 * Command Transport (CT) buffer based GuC send function.
748 */
intel_guc_ct_send(struct intel_guc_ct * ct,const u32 * action,u32 len,u32 * response_buf,u32 response_buf_size,u32 flags)749 int intel_guc_ct_send(struct intel_guc_ct *ct, const u32 *action, u32 len,
750 u32 *response_buf, u32 response_buf_size, u32 flags)
751 {
752 u32 status = ~0; /* undefined */
753 int ret;
754
755 if (unlikely(!ct->enabled)) {
756 struct intel_guc *guc = ct_to_guc(ct);
757 struct intel_uc *uc = container_of(guc, struct intel_uc, guc);
758
759 WARN(!uc->reset_in_progress, "Unexpected send: action=%#x\n", *action);
760 return -ENODEV;
761 }
762
763 if (unlikely(ct->ctbs.send.broken))
764 return -EPIPE;
765
766 if (flags & INTEL_GUC_CT_SEND_NB)
767 return ct_send_nb(ct, action, len, flags);
768
769 ret = ct_send(ct, action, len, response_buf, response_buf_size, &status);
770 if (unlikely(ret < 0)) {
771 if (ret != -ENODEV)
772 CT_ERROR(ct, "Sending action %#x failed (%pe) status=%#X\n",
773 action[0], ERR_PTR(ret), status);
774 } else if (unlikely(ret)) {
775 CT_DEBUG(ct, "send action %#x returned %d (%#x)\n",
776 action[0], ret, ret);
777 }
778
779 return ret;
780 }
781
ct_alloc_msg(u32 num_dwords)782 static struct ct_incoming_msg *ct_alloc_msg(u32 num_dwords)
783 {
784 struct ct_incoming_msg *msg;
785
786 msg = kmalloc(struct_size(msg, msg, num_dwords), GFP_ATOMIC);
787 if (msg)
788 msg->size = num_dwords;
789 return msg;
790 }
791
ct_free_msg(struct ct_incoming_msg * msg)792 static void ct_free_msg(struct ct_incoming_msg *msg)
793 {
794 kfree(msg);
795 }
796
797 /*
798 * Return: number available remaining dwords to read (0 if empty)
799 * or a negative error code on failure
800 */
ct_read(struct intel_guc_ct * ct,struct ct_incoming_msg ** msg)801 static int ct_read(struct intel_guc_ct *ct, struct ct_incoming_msg **msg)
802 {
803 struct intel_guc_ct_buffer *ctb = &ct->ctbs.recv;
804 struct guc_ct_buffer_desc *desc = ctb->desc;
805 u32 head = ctb->head;
806 u32 tail = READ_ONCE(desc->tail);
807 u32 size = ctb->size;
808 u32 *cmds = ctb->cmds;
809 s32 available;
810 unsigned int len;
811 unsigned int i;
812 u32 header;
813
814 if (unlikely(ctb->broken))
815 return -EPIPE;
816
817 if (unlikely(desc->status)) {
818 u32 status = desc->status;
819
820 if (status & GUC_CTB_STATUS_UNUSED) {
821 /*
822 * Potentially valid if a CLIENT_RESET request resulted in
823 * contexts/engines being reset. But should never happen as
824 * no contexts should be active when CLIENT_RESET is sent.
825 */
826 CT_ERROR(ct, "Unexpected G2H after GuC has stopped!\n");
827 status &= ~GUC_CTB_STATUS_UNUSED;
828 }
829
830 if (status)
831 goto corrupted;
832 }
833
834 GEM_BUG_ON(head > size);
835
836 #ifdef CONFIG_DRM_I915_DEBUG_GUC
837 if (unlikely(head != READ_ONCE(desc->head))) {
838 CT_ERROR(ct, "Head was modified %u != %u\n",
839 desc->head, head);
840 desc->status |= GUC_CTB_STATUS_MISMATCH;
841 goto corrupted;
842 }
843 #endif
844 if (unlikely(tail >= size)) {
845 CT_ERROR(ct, "Invalid tail offset %u >= %u)\n",
846 tail, size);
847 desc->status |= GUC_CTB_STATUS_OVERFLOW;
848 goto corrupted;
849 }
850
851 /* tail == head condition indicates empty */
852 available = tail - head;
853 if (unlikely(available == 0)) {
854 *msg = NULL;
855 return 0;
856 }
857
858 /* beware of buffer wrap case */
859 if (unlikely(available < 0))
860 available += size;
861 CT_DEBUG(ct, "available %d (%u:%u:%u)\n", available, head, tail, size);
862 GEM_BUG_ON(available < 0);
863
864 header = cmds[head];
865 head = (head + 1) % size;
866
867 /* message len with header */
868 len = FIELD_GET(GUC_CTB_MSG_0_NUM_DWORDS, header) + GUC_CTB_MSG_MIN_LEN;
869 if (unlikely(len > (u32)available)) {
870 CT_ERROR(ct, "Incomplete message %*ph %*ph %*ph\n",
871 4, &header,
872 4 * (head + available - 1 > size ?
873 size - head : available - 1), &cmds[head],
874 4 * (head + available - 1 > size ?
875 available - 1 - size + head : 0), &cmds[0]);
876 desc->status |= GUC_CTB_STATUS_UNDERFLOW;
877 goto corrupted;
878 }
879
880 *msg = ct_alloc_msg(len);
881 if (!*msg) {
882 CT_ERROR(ct, "No memory for message %*ph %*ph %*ph\n",
883 4, &header,
884 4 * (head + available - 1 > size ?
885 size - head : available - 1), &cmds[head],
886 4 * (head + available - 1 > size ?
887 available - 1 - size + head : 0), &cmds[0]);
888 return available;
889 }
890
891 (*msg)->msg[0] = header;
892
893 for (i = 1; i < len; i++) {
894 (*msg)->msg[i] = cmds[head];
895 head = (head + 1) % size;
896 }
897 CT_DEBUG(ct, "received %*ph\n", 4 * len, (*msg)->msg);
898
899 /* update local copies */
900 ctb->head = head;
901
902 /* now update descriptor */
903 WRITE_ONCE(desc->head, head);
904
905 return available - len;
906
907 corrupted:
908 CT_ERROR(ct, "Corrupted descriptor head=%u tail=%u status=%#x\n",
909 desc->head, desc->tail, desc->status);
910 ctb->broken = true;
911 return -EPIPE;
912 }
913
ct_handle_response(struct intel_guc_ct * ct,struct ct_incoming_msg * response)914 static int ct_handle_response(struct intel_guc_ct *ct, struct ct_incoming_msg *response)
915 {
916 u32 len = FIELD_GET(GUC_CTB_MSG_0_NUM_DWORDS, response->msg[0]);
917 u32 fence = FIELD_GET(GUC_CTB_MSG_0_FENCE, response->msg[0]);
918 const u32 *hxg = &response->msg[GUC_CTB_MSG_MIN_LEN];
919 const u32 *data = &hxg[GUC_HXG_MSG_MIN_LEN];
920 u32 datalen = len - GUC_HXG_MSG_MIN_LEN;
921 struct ct_request *req;
922 unsigned long flags;
923 bool found = false;
924 int err = 0;
925
926 GEM_BUG_ON(len < GUC_HXG_MSG_MIN_LEN);
927 GEM_BUG_ON(FIELD_GET(GUC_HXG_MSG_0_ORIGIN, hxg[0]) != GUC_HXG_ORIGIN_GUC);
928 GEM_BUG_ON(FIELD_GET(GUC_HXG_MSG_0_TYPE, hxg[0]) != GUC_HXG_TYPE_RESPONSE_SUCCESS &&
929 FIELD_GET(GUC_HXG_MSG_0_TYPE, hxg[0]) != GUC_HXG_TYPE_NO_RESPONSE_RETRY &&
930 FIELD_GET(GUC_HXG_MSG_0_TYPE, hxg[0]) != GUC_HXG_TYPE_RESPONSE_FAILURE);
931
932 CT_DEBUG(ct, "response fence %u status %#x\n", fence, hxg[0]);
933
934 spin_lock_irqsave(&ct->requests.lock, flags);
935 list_for_each_entry(req, &ct->requests.pending, link) {
936 if (unlikely(fence != req->fence)) {
937 CT_DEBUG(ct, "request %u awaits response\n",
938 req->fence);
939 continue;
940 }
941 if (unlikely(datalen > req->response_len)) {
942 CT_ERROR(ct, "Response %u too long (datalen %u > %u)\n",
943 req->fence, datalen, req->response_len);
944 datalen = min(datalen, req->response_len);
945 err = -EMSGSIZE;
946 }
947 if (datalen)
948 memcpy(req->response_buf, data, 4 * datalen);
949 req->response_len = datalen;
950 WRITE_ONCE(req->status, hxg[0]);
951 found = true;
952 break;
953 }
954 if (!found) {
955 CT_ERROR(ct, "Unsolicited response (fence %u)\n", fence);
956 CT_ERROR(ct, "Could not find fence=%u, last_fence=%u\n", fence,
957 ct->requests.last_fence);
958 list_for_each_entry(req, &ct->requests.pending, link)
959 CT_ERROR(ct, "request %u awaits response\n",
960 req->fence);
961 err = -ENOKEY;
962 }
963 spin_unlock_irqrestore(&ct->requests.lock, flags);
964
965 if (unlikely(err))
966 return err;
967
968 ct_free_msg(response);
969 return 0;
970 }
971
ct_process_request(struct intel_guc_ct * ct,struct ct_incoming_msg * request)972 static int ct_process_request(struct intel_guc_ct *ct, struct ct_incoming_msg *request)
973 {
974 struct intel_guc *guc = ct_to_guc(ct);
975 const u32 *hxg;
976 const u32 *payload;
977 u32 hxg_len, action, len;
978 int ret;
979
980 hxg = &request->msg[GUC_CTB_MSG_MIN_LEN];
981 hxg_len = request->size - GUC_CTB_MSG_MIN_LEN;
982 payload = &hxg[GUC_HXG_MSG_MIN_LEN];
983 action = FIELD_GET(GUC_HXG_EVENT_MSG_0_ACTION, hxg[0]);
984 len = hxg_len - GUC_HXG_MSG_MIN_LEN;
985
986 CT_DEBUG(ct, "request %x %*ph\n", action, 4 * len, payload);
987
988 switch (action) {
989 case INTEL_GUC_ACTION_DEFAULT:
990 ret = intel_guc_to_host_process_recv_msg(guc, payload, len);
991 break;
992 case INTEL_GUC_ACTION_DEREGISTER_CONTEXT_DONE:
993 ret = intel_guc_deregister_done_process_msg(guc, payload,
994 len);
995 break;
996 case INTEL_GUC_ACTION_SCHED_CONTEXT_MODE_DONE:
997 ret = intel_guc_sched_done_process_msg(guc, payload, len);
998 break;
999 case INTEL_GUC_ACTION_CONTEXT_RESET_NOTIFICATION:
1000 ret = intel_guc_context_reset_process_msg(guc, payload, len);
1001 break;
1002 case INTEL_GUC_ACTION_STATE_CAPTURE_NOTIFICATION:
1003 ret = intel_guc_error_capture_process_msg(guc, payload, len);
1004 if (unlikely(ret))
1005 CT_ERROR(ct, "error capture notification failed %x %*ph\n",
1006 action, 4 * len, payload);
1007 break;
1008 case INTEL_GUC_ACTION_ENGINE_FAILURE_NOTIFICATION:
1009 ret = intel_guc_engine_failure_process_msg(guc, payload, len);
1010 break;
1011 case INTEL_GUC_ACTION_NOTIFY_FLUSH_LOG_BUFFER_TO_FILE:
1012 intel_guc_log_handle_flush_event(&guc->log);
1013 ret = 0;
1014 break;
1015 case INTEL_GUC_ACTION_NOTIFY_CRASH_DUMP_POSTED:
1016 CT_ERROR(ct, "Received GuC crash dump notification!\n");
1017 ret = 0;
1018 break;
1019 case INTEL_GUC_ACTION_NOTIFY_EXCEPTION:
1020 CT_ERROR(ct, "Received GuC exception notification!\n");
1021 ret = 0;
1022 break;
1023 default:
1024 ret = -EOPNOTSUPP;
1025 break;
1026 }
1027
1028 if (unlikely(ret)) {
1029 CT_ERROR(ct, "Failed to process request %04x (%pe)\n",
1030 action, ERR_PTR(ret));
1031 return ret;
1032 }
1033
1034 ct_free_msg(request);
1035 return 0;
1036 }
1037
ct_process_incoming_requests(struct intel_guc_ct * ct)1038 static bool ct_process_incoming_requests(struct intel_guc_ct *ct)
1039 {
1040 unsigned long flags;
1041 struct ct_incoming_msg *request;
1042 bool done;
1043 int err;
1044
1045 spin_lock_irqsave(&ct->requests.lock, flags);
1046 request = list_first_entry_or_null(&ct->requests.incoming,
1047 struct ct_incoming_msg, link);
1048 if (request)
1049 list_del(&request->link);
1050 done = !!list_empty(&ct->requests.incoming);
1051 spin_unlock_irqrestore(&ct->requests.lock, flags);
1052
1053 if (!request)
1054 return true;
1055
1056 err = ct_process_request(ct, request);
1057 if (unlikely(err)) {
1058 CT_ERROR(ct, "Failed to process CT message (%pe) %*ph\n",
1059 ERR_PTR(err), 4 * request->size, request->msg);
1060 ct_free_msg(request);
1061 }
1062
1063 return done;
1064 }
1065
ct_incoming_request_worker_func(struct work_struct * w)1066 static void ct_incoming_request_worker_func(struct work_struct *w)
1067 {
1068 struct intel_guc_ct *ct =
1069 container_of(w, struct intel_guc_ct, requests.worker);
1070 bool done;
1071
1072 do {
1073 done = ct_process_incoming_requests(ct);
1074 } while (!done);
1075 }
1076
ct_handle_event(struct intel_guc_ct * ct,struct ct_incoming_msg * request)1077 static int ct_handle_event(struct intel_guc_ct *ct, struct ct_incoming_msg *request)
1078 {
1079 const u32 *hxg = &request->msg[GUC_CTB_MSG_MIN_LEN];
1080 u32 action = FIELD_GET(GUC_HXG_EVENT_MSG_0_ACTION, hxg[0]);
1081 unsigned long flags;
1082
1083 GEM_BUG_ON(FIELD_GET(GUC_HXG_MSG_0_TYPE, hxg[0]) != GUC_HXG_TYPE_EVENT);
1084
1085 /*
1086 * Adjusting the space must be done in IRQ or deadlock can occur as the
1087 * CTB processing in the below workqueue can send CTBs which creates a
1088 * circular dependency if the space was returned there.
1089 */
1090 switch (action) {
1091 case INTEL_GUC_ACTION_SCHED_CONTEXT_MODE_DONE:
1092 case INTEL_GUC_ACTION_DEREGISTER_CONTEXT_DONE:
1093 g2h_release_space(ct, request->size);
1094 }
1095
1096 spin_lock_irqsave(&ct->requests.lock, flags);
1097 list_add_tail(&request->link, &ct->requests.incoming);
1098 spin_unlock_irqrestore(&ct->requests.lock, flags);
1099
1100 queue_work(system_unbound_wq, &ct->requests.worker);
1101 return 0;
1102 }
1103
ct_handle_hxg(struct intel_guc_ct * ct,struct ct_incoming_msg * msg)1104 static int ct_handle_hxg(struct intel_guc_ct *ct, struct ct_incoming_msg *msg)
1105 {
1106 u32 origin, type;
1107 u32 *hxg;
1108 int err;
1109
1110 if (unlikely(msg->size < GUC_CTB_HXG_MSG_MIN_LEN))
1111 return -EBADMSG;
1112
1113 hxg = &msg->msg[GUC_CTB_MSG_MIN_LEN];
1114
1115 origin = FIELD_GET(GUC_HXG_MSG_0_ORIGIN, hxg[0]);
1116 if (unlikely(origin != GUC_HXG_ORIGIN_GUC)) {
1117 err = -EPROTO;
1118 goto failed;
1119 }
1120
1121 type = FIELD_GET(GUC_HXG_MSG_0_TYPE, hxg[0]);
1122 switch (type) {
1123 case GUC_HXG_TYPE_EVENT:
1124 err = ct_handle_event(ct, msg);
1125 break;
1126 case GUC_HXG_TYPE_RESPONSE_SUCCESS:
1127 case GUC_HXG_TYPE_RESPONSE_FAILURE:
1128 case GUC_HXG_TYPE_NO_RESPONSE_RETRY:
1129 err = ct_handle_response(ct, msg);
1130 break;
1131 default:
1132 err = -EOPNOTSUPP;
1133 }
1134
1135 if (unlikely(err)) {
1136 failed:
1137 CT_ERROR(ct, "Failed to handle HXG message (%pe) %*ph\n",
1138 ERR_PTR(err), 4 * GUC_HXG_MSG_MIN_LEN, hxg);
1139 }
1140 return err;
1141 }
1142
ct_handle_msg(struct intel_guc_ct * ct,struct ct_incoming_msg * msg)1143 static void ct_handle_msg(struct intel_guc_ct *ct, struct ct_incoming_msg *msg)
1144 {
1145 u32 format = FIELD_GET(GUC_CTB_MSG_0_FORMAT, msg->msg[0]);
1146 int err;
1147
1148 if (format == GUC_CTB_FORMAT_HXG)
1149 err = ct_handle_hxg(ct, msg);
1150 else
1151 err = -EOPNOTSUPP;
1152
1153 if (unlikely(err)) {
1154 CT_ERROR(ct, "Failed to process CT message (%pe) %*ph\n",
1155 ERR_PTR(err), 4 * msg->size, msg->msg);
1156 ct_free_msg(msg);
1157 }
1158 }
1159
1160 /*
1161 * Return: number available remaining dwords to read (0 if empty)
1162 * or a negative error code on failure
1163 */
ct_receive(struct intel_guc_ct * ct)1164 static int ct_receive(struct intel_guc_ct *ct)
1165 {
1166 struct ct_incoming_msg *msg = NULL;
1167 unsigned long flags;
1168 int ret;
1169
1170 spin_lock_irqsave(&ct->ctbs.recv.lock, flags);
1171 ret = ct_read(ct, &msg);
1172 spin_unlock_irqrestore(&ct->ctbs.recv.lock, flags);
1173 if (ret < 0)
1174 return ret;
1175
1176 if (msg)
1177 ct_handle_msg(ct, msg);
1178
1179 return ret;
1180 }
1181
ct_try_receive_message(struct intel_guc_ct * ct)1182 static void ct_try_receive_message(struct intel_guc_ct *ct)
1183 {
1184 int ret;
1185
1186 if (GEM_WARN_ON(!ct->enabled))
1187 return;
1188
1189 ret = ct_receive(ct);
1190 if (ret > 0)
1191 tasklet_hi_schedule(&ct->receive_tasklet);
1192 }
1193
ct_receive_tasklet_func(struct tasklet_struct * t)1194 static void ct_receive_tasklet_func(struct tasklet_struct *t)
1195 {
1196 struct intel_guc_ct *ct = from_tasklet(ct, t, receive_tasklet);
1197
1198 ct_try_receive_message(ct);
1199 }
1200
1201 /*
1202 * When we're communicating with the GuC over CT, GuC uses events
1203 * to notify us about new messages being posted on the RECV buffer.
1204 */
intel_guc_ct_event_handler(struct intel_guc_ct * ct)1205 void intel_guc_ct_event_handler(struct intel_guc_ct *ct)
1206 {
1207 if (unlikely(!ct->enabled)) {
1208 WARN(1, "Unexpected GuC event received while CT disabled!\n");
1209 return;
1210 }
1211
1212 ct_try_receive_message(ct);
1213 }
1214
intel_guc_ct_print_info(struct intel_guc_ct * ct,struct drm_printer * p)1215 void intel_guc_ct_print_info(struct intel_guc_ct *ct,
1216 struct drm_printer *p)
1217 {
1218 drm_printf(p, "CT %s\n", str_enabled_disabled(ct->enabled));
1219
1220 if (!ct->enabled)
1221 return;
1222
1223 drm_printf(p, "H2G Space: %u\n",
1224 atomic_read(&ct->ctbs.send.space) * 4);
1225 drm_printf(p, "Head: %u\n",
1226 ct->ctbs.send.desc->head);
1227 drm_printf(p, "Tail: %u\n",
1228 ct->ctbs.send.desc->tail);
1229 drm_printf(p, "G2H Space: %u\n",
1230 atomic_read(&ct->ctbs.recv.space) * 4);
1231 drm_printf(p, "Head: %u\n",
1232 ct->ctbs.recv.desc->head);
1233 drm_printf(p, "Tail: %u\n",
1234 ct->ctbs.recv.desc->tail);
1235 }
1236