1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2014-2018 Etnaviv Project
4  */
5 
6 #include <drm/drm_drv.h>
7 
8 #include "etnaviv_cmdbuf.h"
9 #include "etnaviv_gpu.h"
10 #include "etnaviv_gem.h"
11 #include "etnaviv_mmu.h"
12 
13 #include "common.xml.h"
14 #include "state.xml.h"
15 #include "state_blt.xml.h"
16 #include "state_hi.xml.h"
17 #include "state_3d.xml.h"
18 #include "cmdstream.xml.h"
19 
20 /*
21  * Command Buffer helper:
22  */
23 
24 
OUT(struct etnaviv_cmdbuf * buffer,u32 data)25 static inline void OUT(struct etnaviv_cmdbuf *buffer, u32 data)
26 {
27 	u32 *vaddr = (u32 *)buffer->vaddr;
28 
29 	BUG_ON(buffer->user_size >= buffer->size);
30 
31 	vaddr[buffer->user_size / 4] = data;
32 	buffer->user_size += 4;
33 }
34 
CMD_LOAD_STATE(struct etnaviv_cmdbuf * buffer,u32 reg,u32 value)35 static inline void CMD_LOAD_STATE(struct etnaviv_cmdbuf *buffer,
36 	u32 reg, u32 value)
37 {
38 	u32 index = reg >> VIV_FE_LOAD_STATE_HEADER_OFFSET__SHR;
39 
40 	buffer->user_size = ALIGN(buffer->user_size, 8);
41 
42 	/* write a register via cmd stream */
43 	OUT(buffer, VIV_FE_LOAD_STATE_HEADER_OP_LOAD_STATE |
44 		    VIV_FE_LOAD_STATE_HEADER_COUNT(1) |
45 		    VIV_FE_LOAD_STATE_HEADER_OFFSET(index));
46 	OUT(buffer, value);
47 }
48 
CMD_END(struct etnaviv_cmdbuf * buffer)49 static inline void CMD_END(struct etnaviv_cmdbuf *buffer)
50 {
51 	buffer->user_size = ALIGN(buffer->user_size, 8);
52 
53 	OUT(buffer, VIV_FE_END_HEADER_OP_END);
54 }
55 
CMD_WAIT(struct etnaviv_cmdbuf * buffer)56 static inline void CMD_WAIT(struct etnaviv_cmdbuf *buffer)
57 {
58 	buffer->user_size = ALIGN(buffer->user_size, 8);
59 
60 	OUT(buffer, VIV_FE_WAIT_HEADER_OP_WAIT | 200);
61 }
62 
CMD_LINK(struct etnaviv_cmdbuf * buffer,u16 prefetch,u32 address)63 static inline void CMD_LINK(struct etnaviv_cmdbuf *buffer,
64 	u16 prefetch, u32 address)
65 {
66 	buffer->user_size = ALIGN(buffer->user_size, 8);
67 
68 	OUT(buffer, VIV_FE_LINK_HEADER_OP_LINK |
69 		    VIV_FE_LINK_HEADER_PREFETCH(prefetch));
70 	OUT(buffer, address);
71 }
72 
CMD_STALL(struct etnaviv_cmdbuf * buffer,u32 from,u32 to)73 static inline void CMD_STALL(struct etnaviv_cmdbuf *buffer,
74 	u32 from, u32 to)
75 {
76 	buffer->user_size = ALIGN(buffer->user_size, 8);
77 
78 	OUT(buffer, VIV_FE_STALL_HEADER_OP_STALL);
79 	OUT(buffer, VIV_FE_STALL_TOKEN_FROM(from) | VIV_FE_STALL_TOKEN_TO(to));
80 }
81 
CMD_SEM(struct etnaviv_cmdbuf * buffer,u32 from,u32 to)82 static inline void CMD_SEM(struct etnaviv_cmdbuf *buffer, u32 from, u32 to)
83 {
84 	CMD_LOAD_STATE(buffer, VIVS_GL_SEMAPHORE_TOKEN,
85 		       VIVS_GL_SEMAPHORE_TOKEN_FROM(from) |
86 		       VIVS_GL_SEMAPHORE_TOKEN_TO(to));
87 }
88 
etnaviv_cmd_select_pipe(struct etnaviv_gpu * gpu,struct etnaviv_cmdbuf * buffer,u8 pipe)89 static void etnaviv_cmd_select_pipe(struct etnaviv_gpu *gpu,
90 	struct etnaviv_cmdbuf *buffer, u8 pipe)
91 {
92 	u32 flush = 0;
93 
94 	lockdep_assert_held(&gpu->lock);
95 
96 	/*
97 	 * This assumes that if we're switching to 2D, we're switching
98 	 * away from 3D, and vice versa.  Hence, if we're switching to
99 	 * the 2D core, we need to flush the 3D depth and color caches,
100 	 * otherwise we need to flush the 2D pixel engine cache.
101 	 */
102 	if (gpu->exec_state == ETNA_PIPE_2D)
103 		flush = VIVS_GL_FLUSH_CACHE_PE2D;
104 	else if (gpu->exec_state == ETNA_PIPE_3D)
105 		flush = VIVS_GL_FLUSH_CACHE_DEPTH | VIVS_GL_FLUSH_CACHE_COLOR;
106 
107 	CMD_LOAD_STATE(buffer, VIVS_GL_FLUSH_CACHE, flush);
108 	CMD_SEM(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
109 	CMD_STALL(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
110 
111 	CMD_LOAD_STATE(buffer, VIVS_GL_PIPE_SELECT,
112 		       VIVS_GL_PIPE_SELECT_PIPE(pipe));
113 }
114 
etnaviv_buffer_dump(struct etnaviv_gpu * gpu,struct etnaviv_cmdbuf * buf,u32 off,u32 len)115 static void etnaviv_buffer_dump(struct etnaviv_gpu *gpu,
116 	struct etnaviv_cmdbuf *buf, u32 off, u32 len)
117 {
118 	u32 size = buf->size;
119 	u32 *ptr = buf->vaddr + off;
120 
121 	dev_info(gpu->dev, "virt %p phys 0x%08x free 0x%08x\n",
122 			ptr, etnaviv_cmdbuf_get_va(buf,
123 			&gpu->mmu_context->cmdbuf_mapping) +
124 			off, size - len * 4 - off);
125 
126 	print_hex_dump(KERN_INFO, "cmd ", DUMP_PREFIX_OFFSET, 16, 4,
127 			ptr, len * 4, 0);
128 }
129 
130 /*
131  * Safely replace the WAIT of a waitlink with a new command and argument.
132  * The GPU may be executing this WAIT while we're modifying it, so we have
133  * to write it in a specific order to avoid the GPU branching to somewhere
134  * else.  'wl_offset' is the offset to the first byte of the WAIT command.
135  */
etnaviv_buffer_replace_wait(struct etnaviv_cmdbuf * buffer,unsigned int wl_offset,u32 cmd,u32 arg)136 static void etnaviv_buffer_replace_wait(struct etnaviv_cmdbuf *buffer,
137 	unsigned int wl_offset, u32 cmd, u32 arg)
138 {
139 	u32 *lw = buffer->vaddr + wl_offset;
140 
141 	lw[1] = arg;
142 	mb();
143 	lw[0] = cmd;
144 	mb();
145 }
146 
147 /*
148  * Ensure that there is space in the command buffer to contiguously write
149  * 'cmd_dwords' 64-bit words into the buffer, wrapping if necessary.
150  */
etnaviv_buffer_reserve(struct etnaviv_gpu * gpu,struct etnaviv_cmdbuf * buffer,unsigned int cmd_dwords)151 static u32 etnaviv_buffer_reserve(struct etnaviv_gpu *gpu,
152 	struct etnaviv_cmdbuf *buffer, unsigned int cmd_dwords)
153 {
154 	if (buffer->user_size + cmd_dwords * sizeof(u64) > buffer->size)
155 		buffer->user_size = 0;
156 
157 	return etnaviv_cmdbuf_get_va(buffer,
158 				     &gpu->mmu_context->cmdbuf_mapping) +
159 	       buffer->user_size;
160 }
161 
etnaviv_buffer_init(struct etnaviv_gpu * gpu)162 u16 etnaviv_buffer_init(struct etnaviv_gpu *gpu)
163 {
164 	struct etnaviv_cmdbuf *buffer = &gpu->buffer;
165 
166 	lockdep_assert_held(&gpu->lock);
167 
168 	/* initialize buffer */
169 	buffer->user_size = 0;
170 
171 	CMD_WAIT(buffer);
172 	CMD_LINK(buffer, 2,
173 		 etnaviv_cmdbuf_get_va(buffer, &gpu->mmu_context->cmdbuf_mapping)
174 		 + buffer->user_size - 4);
175 
176 	return buffer->user_size / 8;
177 }
178 
etnaviv_buffer_config_mmuv2(struct etnaviv_gpu * gpu,u32 mtlb_addr,u32 safe_addr)179 u16 etnaviv_buffer_config_mmuv2(struct etnaviv_gpu *gpu, u32 mtlb_addr, u32 safe_addr)
180 {
181 	struct etnaviv_cmdbuf *buffer = &gpu->buffer;
182 
183 	lockdep_assert_held(&gpu->lock);
184 
185 	buffer->user_size = 0;
186 
187 	if (gpu->identity.features & chipFeatures_PIPE_3D) {
188 		CMD_LOAD_STATE(buffer, VIVS_GL_PIPE_SELECT,
189 			       VIVS_GL_PIPE_SELECT_PIPE(ETNA_PIPE_3D));
190 		CMD_LOAD_STATE(buffer, VIVS_MMUv2_CONFIGURATION,
191 			mtlb_addr | VIVS_MMUv2_CONFIGURATION_MODE_MODE4_K);
192 		CMD_LOAD_STATE(buffer, VIVS_MMUv2_SAFE_ADDRESS, safe_addr);
193 		CMD_SEM(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
194 		CMD_STALL(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
195 	}
196 
197 	if (gpu->identity.features & chipFeatures_PIPE_2D) {
198 		CMD_LOAD_STATE(buffer, VIVS_GL_PIPE_SELECT,
199 			       VIVS_GL_PIPE_SELECT_PIPE(ETNA_PIPE_2D));
200 		CMD_LOAD_STATE(buffer, VIVS_MMUv2_CONFIGURATION,
201 			mtlb_addr | VIVS_MMUv2_CONFIGURATION_MODE_MODE4_K);
202 		CMD_LOAD_STATE(buffer, VIVS_MMUv2_SAFE_ADDRESS, safe_addr);
203 		CMD_SEM(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
204 		CMD_STALL(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
205 	}
206 
207 	CMD_END(buffer);
208 
209 	buffer->user_size = ALIGN(buffer->user_size, 8);
210 
211 	return buffer->user_size / 8;
212 }
213 
etnaviv_buffer_config_pta(struct etnaviv_gpu * gpu,unsigned short id)214 u16 etnaviv_buffer_config_pta(struct etnaviv_gpu *gpu, unsigned short id)
215 {
216 	struct etnaviv_cmdbuf *buffer = &gpu->buffer;
217 
218 	lockdep_assert_held(&gpu->lock);
219 
220 	buffer->user_size = 0;
221 
222 	CMD_LOAD_STATE(buffer, VIVS_MMUv2_PTA_CONFIG,
223 		       VIVS_MMUv2_PTA_CONFIG_INDEX(id));
224 
225 	CMD_END(buffer);
226 
227 	buffer->user_size = ALIGN(buffer->user_size, 8);
228 
229 	return buffer->user_size / 8;
230 }
231 
etnaviv_buffer_end(struct etnaviv_gpu * gpu)232 void etnaviv_buffer_end(struct etnaviv_gpu *gpu)
233 {
234 	struct etnaviv_cmdbuf *buffer = &gpu->buffer;
235 	unsigned int waitlink_offset = buffer->user_size - 16;
236 	u32 link_target, flush = 0;
237 	bool has_blt = !!(gpu->identity.minor_features5 &
238 			  chipMinorFeatures5_BLT_ENGINE);
239 
240 	lockdep_assert_held(&gpu->lock);
241 
242 	if (gpu->exec_state == ETNA_PIPE_2D)
243 		flush = VIVS_GL_FLUSH_CACHE_PE2D;
244 	else if (gpu->exec_state == ETNA_PIPE_3D)
245 		flush = VIVS_GL_FLUSH_CACHE_DEPTH |
246 			VIVS_GL_FLUSH_CACHE_COLOR |
247 			VIVS_GL_FLUSH_CACHE_TEXTURE |
248 			VIVS_GL_FLUSH_CACHE_TEXTUREVS |
249 			VIVS_GL_FLUSH_CACHE_SHADER_L2;
250 
251 	if (flush) {
252 		unsigned int dwords = 7;
253 
254 		if (has_blt)
255 			dwords += 10;
256 
257 		link_target = etnaviv_buffer_reserve(gpu, buffer, dwords);
258 
259 		CMD_SEM(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
260 		CMD_STALL(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
261 		if (has_blt) {
262 			CMD_LOAD_STATE(buffer, VIVS_BLT_ENABLE, 0x1);
263 			CMD_SEM(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_BLT);
264 			CMD_STALL(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_BLT);
265 			CMD_LOAD_STATE(buffer, VIVS_BLT_ENABLE, 0x0);
266 		}
267 		CMD_LOAD_STATE(buffer, VIVS_GL_FLUSH_CACHE, flush);
268 		if (gpu->exec_state == ETNA_PIPE_3D) {
269 			if (has_blt) {
270 				CMD_LOAD_STATE(buffer, VIVS_BLT_ENABLE, 0x1);
271 				CMD_LOAD_STATE(buffer, VIVS_BLT_SET_COMMAND, 0x1);
272 				CMD_LOAD_STATE(buffer, VIVS_BLT_ENABLE, 0x0);
273 			} else {
274 				CMD_LOAD_STATE(buffer, VIVS_TS_FLUSH_CACHE,
275 					       VIVS_TS_FLUSH_CACHE_FLUSH);
276 			}
277 		}
278 		CMD_SEM(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
279 		CMD_STALL(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
280 		if (has_blt) {
281 			CMD_LOAD_STATE(buffer, VIVS_BLT_ENABLE, 0x1);
282 			CMD_SEM(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_BLT);
283 			CMD_STALL(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_BLT);
284 			CMD_LOAD_STATE(buffer, VIVS_BLT_ENABLE, 0x0);
285 		}
286 		CMD_END(buffer);
287 
288 		etnaviv_buffer_replace_wait(buffer, waitlink_offset,
289 					    VIV_FE_LINK_HEADER_OP_LINK |
290 					    VIV_FE_LINK_HEADER_PREFETCH(dwords),
291 					    link_target);
292 	} else {
293 		/* Replace the last link-wait with an "END" command */
294 		etnaviv_buffer_replace_wait(buffer, waitlink_offset,
295 					    VIV_FE_END_HEADER_OP_END, 0);
296 	}
297 }
298 
299 /* Append a 'sync point' to the ring buffer. */
etnaviv_sync_point_queue(struct etnaviv_gpu * gpu,unsigned int event)300 void etnaviv_sync_point_queue(struct etnaviv_gpu *gpu, unsigned int event)
301 {
302 	struct etnaviv_cmdbuf *buffer = &gpu->buffer;
303 	unsigned int waitlink_offset = buffer->user_size - 16;
304 	u32 dwords, target;
305 
306 	lockdep_assert_held(&gpu->lock);
307 
308 	/*
309 	 * We need at most 3 dwords in the return target:
310 	 * 1 event + 1 end + 1 wait + 1 link.
311 	 */
312 	dwords = 4;
313 	target = etnaviv_buffer_reserve(gpu, buffer, dwords);
314 
315 	/* Signal sync point event */
316 	CMD_LOAD_STATE(buffer, VIVS_GL_EVENT, VIVS_GL_EVENT_EVENT_ID(event) |
317 		       VIVS_GL_EVENT_FROM_PE);
318 
319 	/* Stop the FE to 'pause' the GPU */
320 	CMD_END(buffer);
321 
322 	/* Append waitlink */
323 	CMD_WAIT(buffer);
324 	CMD_LINK(buffer, 2,
325 		 etnaviv_cmdbuf_get_va(buffer, &gpu->mmu_context->cmdbuf_mapping)
326 		 + buffer->user_size - 4);
327 
328 	/*
329 	 * Kick off the 'sync point' command by replacing the previous
330 	 * WAIT with a link to the address in the ring buffer.
331 	 */
332 	etnaviv_buffer_replace_wait(buffer, waitlink_offset,
333 				    VIV_FE_LINK_HEADER_OP_LINK |
334 				    VIV_FE_LINK_HEADER_PREFETCH(dwords),
335 				    target);
336 }
337 
338 /* Append a command buffer to the ring buffer. */
etnaviv_buffer_queue(struct etnaviv_gpu * gpu,u32 exec_state,struct etnaviv_iommu_context * mmu_context,unsigned int event,struct etnaviv_cmdbuf * cmdbuf)339 void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, u32 exec_state,
340 	struct etnaviv_iommu_context *mmu_context, unsigned int event,
341 	struct etnaviv_cmdbuf *cmdbuf)
342 {
343 	struct etnaviv_cmdbuf *buffer = &gpu->buffer;
344 	unsigned int waitlink_offset = buffer->user_size - 16;
345 	u32 return_target, return_dwords;
346 	u32 link_target, link_dwords;
347 	bool switch_context = gpu->exec_state != exec_state;
348 	bool switch_mmu_context = gpu->mmu_context != mmu_context;
349 	unsigned int new_flush_seq = READ_ONCE(gpu->mmu_context->flush_seq);
350 	bool need_flush = switch_mmu_context || gpu->flush_seq != new_flush_seq;
351 	bool has_blt = !!(gpu->identity.minor_features5 &
352 			  chipMinorFeatures5_BLT_ENGINE);
353 
354 	lockdep_assert_held(&gpu->lock);
355 
356 	if (drm_debug_enabled(DRM_UT_DRIVER))
357 		etnaviv_buffer_dump(gpu, buffer, 0, 0x50);
358 
359 	link_target = etnaviv_cmdbuf_get_va(cmdbuf,
360 					    &gpu->mmu_context->cmdbuf_mapping);
361 	link_dwords = cmdbuf->size / 8;
362 
363 	/*
364 	 * If we need maintenance prior to submitting this buffer, we will
365 	 * need to append a mmu flush load state, followed by a new
366 	 * link to this buffer - a total of four additional words.
367 	 */
368 	if (need_flush || switch_context) {
369 		u32 target, extra_dwords;
370 
371 		/* link command */
372 		extra_dwords = 1;
373 
374 		/* flush command */
375 		if (need_flush) {
376 			if (gpu->mmu_context->global->version == ETNAVIV_IOMMU_V1)
377 				extra_dwords += 1;
378 			else
379 				extra_dwords += 3;
380 		}
381 
382 		/* pipe switch commands */
383 		if (switch_context)
384 			extra_dwords += 4;
385 
386 		/* PTA load command */
387 		if (switch_mmu_context && gpu->sec_mode == ETNA_SEC_KERNEL)
388 			extra_dwords += 1;
389 
390 		target = etnaviv_buffer_reserve(gpu, buffer, extra_dwords);
391 		/*
392 		 * Switch MMU context if necessary. Must be done after the
393 		 * link target has been calculated, as the jump forward in the
394 		 * kernel ring still uses the last active MMU context before
395 		 * the switch.
396 		 */
397 		if (switch_mmu_context) {
398 			struct etnaviv_iommu_context *old_context = gpu->mmu_context;
399 
400 			gpu->mmu_context = etnaviv_iommu_context_get(mmu_context);
401 			etnaviv_iommu_context_put(old_context);
402 		}
403 
404 		if (need_flush) {
405 			/* Add the MMU flush */
406 			if (gpu->mmu_context->global->version == ETNAVIV_IOMMU_V1) {
407 				CMD_LOAD_STATE(buffer, VIVS_GL_FLUSH_MMU,
408 					       VIVS_GL_FLUSH_MMU_FLUSH_FEMMU |
409 					       VIVS_GL_FLUSH_MMU_FLUSH_UNK1 |
410 					       VIVS_GL_FLUSH_MMU_FLUSH_UNK2 |
411 					       VIVS_GL_FLUSH_MMU_FLUSH_PEMMU |
412 					       VIVS_GL_FLUSH_MMU_FLUSH_UNK4);
413 			} else {
414 				u32 flush = VIVS_MMUv2_CONFIGURATION_MODE_MASK |
415 					    VIVS_MMUv2_CONFIGURATION_FLUSH_FLUSH;
416 
417 				if (switch_mmu_context &&
418 				    gpu->sec_mode == ETNA_SEC_KERNEL) {
419 					unsigned short id =
420 						etnaviv_iommuv2_get_pta_id(gpu->mmu_context);
421 					CMD_LOAD_STATE(buffer,
422 						VIVS_MMUv2_PTA_CONFIG,
423 						VIVS_MMUv2_PTA_CONFIG_INDEX(id));
424 				}
425 
426 				if (gpu->sec_mode == ETNA_SEC_NONE)
427 					flush |= etnaviv_iommuv2_get_mtlb_addr(gpu->mmu_context);
428 
429 				CMD_LOAD_STATE(buffer, VIVS_MMUv2_CONFIGURATION,
430 					       flush);
431 				CMD_SEM(buffer, SYNC_RECIPIENT_FE,
432 					SYNC_RECIPIENT_PE);
433 				CMD_STALL(buffer, SYNC_RECIPIENT_FE,
434 					SYNC_RECIPIENT_PE);
435 			}
436 
437 			gpu->flush_seq = new_flush_seq;
438 		}
439 
440 		if (switch_context) {
441 			etnaviv_cmd_select_pipe(gpu, buffer, exec_state);
442 			gpu->exec_state = exec_state;
443 		}
444 
445 		/* And the link to the submitted buffer */
446 		link_target = etnaviv_cmdbuf_get_va(cmdbuf,
447 					&gpu->mmu_context->cmdbuf_mapping);
448 		CMD_LINK(buffer, link_dwords, link_target);
449 
450 		/* Update the link target to point to above instructions */
451 		link_target = target;
452 		link_dwords = extra_dwords;
453 	}
454 
455 	/*
456 	 * Append a LINK to the submitted command buffer to return to
457 	 * the ring buffer.  return_target is the ring target address.
458 	 * We need at most 7 dwords in the return target: 2 cache flush +
459 	 * 2 semaphore stall + 1 event + 1 wait + 1 link.
460 	 */
461 	return_dwords = 7;
462 
463 	/*
464 	 * When the BLT engine is present we need 6 more dwords in the return
465 	 * target: 3 enable/flush/disable + 4 enable/semaphore stall/disable,
466 	 * but we don't need the normal TS flush state.
467 	 */
468 	if (has_blt)
469 		return_dwords += 6;
470 
471 	return_target = etnaviv_buffer_reserve(gpu, buffer, return_dwords);
472 	CMD_LINK(cmdbuf, return_dwords, return_target);
473 
474 	/*
475 	 * Append a cache flush, stall, event, wait and link pointing back to
476 	 * the wait command to the ring buffer.
477 	 */
478 	if (gpu->exec_state == ETNA_PIPE_2D) {
479 		CMD_LOAD_STATE(buffer, VIVS_GL_FLUSH_CACHE,
480 				       VIVS_GL_FLUSH_CACHE_PE2D);
481 	} else {
482 		CMD_LOAD_STATE(buffer, VIVS_GL_FLUSH_CACHE,
483 				       VIVS_GL_FLUSH_CACHE_DEPTH |
484 				       VIVS_GL_FLUSH_CACHE_COLOR);
485 		if (has_blt) {
486 			CMD_LOAD_STATE(buffer, VIVS_BLT_ENABLE, 0x1);
487 			CMD_LOAD_STATE(buffer, VIVS_BLT_SET_COMMAND, 0x1);
488 			CMD_LOAD_STATE(buffer, VIVS_BLT_ENABLE, 0x0);
489 		} else {
490 			CMD_LOAD_STATE(buffer, VIVS_TS_FLUSH_CACHE,
491 					       VIVS_TS_FLUSH_CACHE_FLUSH);
492 		}
493 	}
494 	CMD_SEM(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
495 	CMD_STALL(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
496 
497 	if (has_blt) {
498 		CMD_LOAD_STATE(buffer, VIVS_BLT_ENABLE, 0x1);
499 		CMD_SEM(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_BLT);
500 		CMD_STALL(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_BLT);
501 		CMD_LOAD_STATE(buffer, VIVS_BLT_ENABLE, 0x0);
502 	}
503 
504 	CMD_LOAD_STATE(buffer, VIVS_GL_EVENT, VIVS_GL_EVENT_EVENT_ID(event) |
505 		       VIVS_GL_EVENT_FROM_PE);
506 	CMD_WAIT(buffer);
507 	CMD_LINK(buffer, 2,
508 		 etnaviv_cmdbuf_get_va(buffer, &gpu->mmu_context->cmdbuf_mapping)
509 		 + buffer->user_size - 4);
510 
511 	if (drm_debug_enabled(DRM_UT_DRIVER))
512 		pr_info("stream link to 0x%08x @ 0x%08x %p\n",
513 			return_target,
514 			etnaviv_cmdbuf_get_va(cmdbuf, &gpu->mmu_context->cmdbuf_mapping),
515 			cmdbuf->vaddr);
516 
517 	if (drm_debug_enabled(DRM_UT_DRIVER)) {
518 		print_hex_dump(KERN_INFO, "cmd ", DUMP_PREFIX_OFFSET, 16, 4,
519 			       cmdbuf->vaddr, cmdbuf->size, 0);
520 
521 		pr_info("link op: %p\n", buffer->vaddr + waitlink_offset);
522 		pr_info("addr: 0x%08x\n", link_target);
523 		pr_info("back: 0x%08x\n", return_target);
524 		pr_info("event: %d\n", event);
525 	}
526 
527 	/*
528 	 * Kick off the submitted command by replacing the previous
529 	 * WAIT with a link to the address in the ring buffer.
530 	 */
531 	etnaviv_buffer_replace_wait(buffer, waitlink_offset,
532 				    VIV_FE_LINK_HEADER_OP_LINK |
533 				    VIV_FE_LINK_HEADER_PREFETCH(link_dwords),
534 				    link_target);
535 
536 	if (drm_debug_enabled(DRM_UT_DRIVER))
537 		etnaviv_buffer_dump(gpu, buffer, 0, 0x50);
538 }
539