1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2013 Red Hat
4  * Copyright (c) 2014-2018, 2020-2021 The Linux Foundation. All rights reserved.
5  * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
6  *
7  * Author: Rob Clark <robdclark@gmail.com>
8  */
9 
10 #define pr_fmt(fmt)	"[drm:%s:%d] " fmt, __func__, __LINE__
11 #include <linux/debugfs.h>
12 #include <linux/kthread.h>
13 #include <linux/seq_file.h>
14 
15 #include <drm/drm_crtc.h>
16 #include <drm/drm_file.h>
17 #include <drm/drm_probe_helper.h>
18 
19 #include "msm_drv.h"
20 #include "dpu_kms.h"
21 #include "dpu_hwio.h"
22 #include "dpu_hw_catalog.h"
23 #include "dpu_hw_intf.h"
24 #include "dpu_hw_ctl.h"
25 #include "dpu_hw_dspp.h"
26 #include "dpu_hw_dsc.h"
27 #include "dpu_hw_merge3d.h"
28 #include "dpu_formats.h"
29 #include "dpu_encoder_phys.h"
30 #include "dpu_crtc.h"
31 #include "dpu_trace.h"
32 #include "dpu_core_irq.h"
33 #include "disp/msm_disp_snapshot.h"
34 
35 #define DPU_DEBUG_ENC(e, fmt, ...) DRM_DEBUG_ATOMIC("enc%d " fmt,\
36 		(e) ? (e)->base.base.id : -1, ##__VA_ARGS__)
37 
38 #define DPU_ERROR_ENC(e, fmt, ...) DPU_ERROR("enc%d " fmt,\
39 		(e) ? (e)->base.base.id : -1, ##__VA_ARGS__)
40 
41 /*
42  * Two to anticipate panels that can do cmd/vid dynamic switching
43  * plan is to create all possible physical encoder types, and switch between
44  * them at runtime
45  */
46 #define NUM_PHYS_ENCODER_TYPES 2
47 
48 #define MAX_PHYS_ENCODERS_PER_VIRTUAL \
49 	(MAX_H_TILES_PER_DISPLAY * NUM_PHYS_ENCODER_TYPES)
50 
51 #define MAX_CHANNELS_PER_ENC 2
52 
53 #define IDLE_SHORT_TIMEOUT	1
54 
55 #define MAX_HDISPLAY_SPLIT 1080
56 
57 /* timeout in frames waiting for frame done */
58 #define DPU_ENCODER_FRAME_DONE_TIMEOUT_FRAMES 5
59 
60 /**
61  * enum dpu_enc_rc_events - events for resource control state machine
62  * @DPU_ENC_RC_EVENT_KICKOFF:
63  *	This event happens at NORMAL priority.
64  *	Event that signals the start of the transfer. When this event is
65  *	received, enable MDP/DSI core clocks. Regardless of the previous
66  *	state, the resource should be in ON state at the end of this event.
67  * @DPU_ENC_RC_EVENT_FRAME_DONE:
68  *	This event happens at INTERRUPT level.
69  *	Event signals the end of the data transfer after the PP FRAME_DONE
70  *	event. At the end of this event, a delayed work is scheduled to go to
71  *	IDLE_PC state after IDLE_TIMEOUT time.
72  * @DPU_ENC_RC_EVENT_PRE_STOP:
73  *	This event happens at NORMAL priority.
74  *	This event, when received during the ON state, leave the RC STATE
75  *	in the PRE_OFF state. It should be followed by the STOP event as
76  *	part of encoder disable.
77  *	If received during IDLE or OFF states, it will do nothing.
78  * @DPU_ENC_RC_EVENT_STOP:
79  *	This event happens at NORMAL priority.
80  *	When this event is received, disable all the MDP/DSI core clocks, and
81  *	disable IRQs. It should be called from the PRE_OFF or IDLE states.
82  *	IDLE is expected when IDLE_PC has run, and PRE_OFF did nothing.
83  *	PRE_OFF is expected when PRE_STOP was executed during the ON state.
84  *	Resource state should be in OFF at the end of the event.
85  * @DPU_ENC_RC_EVENT_ENTER_IDLE:
86  *	This event happens at NORMAL priority from a work item.
87  *	Event signals that there were no frame updates for IDLE_TIMEOUT time.
88  *	This would disable MDP/DSI core clocks and change the resource state
89  *	to IDLE.
90  */
91 enum dpu_enc_rc_events {
92 	DPU_ENC_RC_EVENT_KICKOFF = 1,
93 	DPU_ENC_RC_EVENT_FRAME_DONE,
94 	DPU_ENC_RC_EVENT_PRE_STOP,
95 	DPU_ENC_RC_EVENT_STOP,
96 	DPU_ENC_RC_EVENT_ENTER_IDLE
97 };
98 
99 /*
100  * enum dpu_enc_rc_states - states that the resource control maintains
101  * @DPU_ENC_RC_STATE_OFF: Resource is in OFF state
102  * @DPU_ENC_RC_STATE_PRE_OFF: Resource is transitioning to OFF state
103  * @DPU_ENC_RC_STATE_ON: Resource is in ON state
104  * @DPU_ENC_RC_STATE_MODESET: Resource is in modeset state
105  * @DPU_ENC_RC_STATE_IDLE: Resource is in IDLE state
106  */
107 enum dpu_enc_rc_states {
108 	DPU_ENC_RC_STATE_OFF,
109 	DPU_ENC_RC_STATE_PRE_OFF,
110 	DPU_ENC_RC_STATE_ON,
111 	DPU_ENC_RC_STATE_IDLE
112 };
113 
114 /**
115  * struct dpu_encoder_virt - virtual encoder. Container of one or more physical
116  *	encoders. Virtual encoder manages one "logical" display. Physical
117  *	encoders manage one intf block, tied to a specific panel/sub-panel.
118  *	Virtual encoder defers as much as possible to the physical encoders.
119  *	Virtual encoder registers itself with the DRM Framework as the encoder.
120  * @base:		drm_encoder base class for registration with DRM
121  * @enc_spinlock:	Virtual-Encoder-Wide Spin Lock for IRQ purposes
122  * @enabled:		True if the encoder is active, protected by enc_lock
123  * @num_phys_encs:	Actual number of physical encoders contained.
124  * @phys_encs:		Container of physical encoders managed.
125  * @cur_master:		Pointer to the current master in this mode. Optimization
126  *			Only valid after enable. Cleared as disable.
127  * @cur_slave:		As above but for the slave encoder.
128  * @hw_pp:		Handle to the pingpong blocks used for the display. No.
129  *			pingpong blocks can be different than num_phys_encs.
130  * @hw_dsc:		Handle to the DSC blocks used for the display.
131  * @dsc_mask:		Bitmask of used DSC blocks.
132  * @intfs_swapped:	Whether or not the phys_enc interfaces have been swapped
133  *			for partial update right-only cases, such as pingpong
134  *			split where virtual pingpong does not generate IRQs
135  * @crtc:		Pointer to the currently assigned crtc. Normally you
136  *			would use crtc->state->encoder_mask to determine the
137  *			link between encoder/crtc. However in this case we need
138  *			to track crtc in the disable() hook which is called
139  *			_after_ encoder_mask is cleared.
140  * @connector:		If a mode is set, cached pointer to the active connector
141  * @crtc_kickoff_cb:		Callback into CRTC that will flush & start
142  *				all CTL paths
143  * @crtc_kickoff_cb_data:	Opaque user data given to crtc_kickoff_cb
144  * @debugfs_root:		Debug file system root file node
145  * @enc_lock:			Lock around physical encoder
146  *				create/destroy/enable/disable
147  * @frame_busy_mask:		Bitmask tracking which phys_enc we are still
148  *				busy processing current command.
149  *				Bit0 = phys_encs[0] etc.
150  * @crtc_frame_event_cb:	callback handler for frame event
151  * @crtc_frame_event_cb_data:	callback handler private data
152  * @frame_done_timeout_ms:	frame done timeout in ms
153  * @frame_done_timer:		watchdog timer for frame done event
154  * @vsync_event_timer:		vsync timer
155  * @disp_info:			local copy of msm_display_info struct
156  * @idle_pc_supported:		indicate if idle power collaps is supported
157  * @rc_lock:			resource control mutex lock to protect
158  *				virt encoder over various state changes
159  * @rc_state:			resource controller state
160  * @delayed_off_work:		delayed worker to schedule disabling of
161  *				clks and resources after IDLE_TIMEOUT time.
162  * @vsync_event_work:		worker to handle vsync event for autorefresh
163  * @topology:                   topology of the display
164  * @idle_timeout:		idle timeout duration in milliseconds
165  * @wide_bus_en:		wide bus is enabled on this interface
166  * @dsc:			drm_dsc_config pointer, for DSC-enabled encoders
167  */
168 struct dpu_encoder_virt {
169 	struct drm_encoder base;
170 	spinlock_t enc_spinlock;
171 
172 	bool enabled;
173 
174 	unsigned int num_phys_encs;
175 	struct dpu_encoder_phys *phys_encs[MAX_PHYS_ENCODERS_PER_VIRTUAL];
176 	struct dpu_encoder_phys *cur_master;
177 	struct dpu_encoder_phys *cur_slave;
178 	struct dpu_hw_pingpong *hw_pp[MAX_CHANNELS_PER_ENC];
179 	struct dpu_hw_dsc *hw_dsc[MAX_CHANNELS_PER_ENC];
180 
181 	unsigned int dsc_mask;
182 
183 	bool intfs_swapped;
184 
185 	struct drm_crtc *crtc;
186 	struct drm_connector *connector;
187 
188 	struct dentry *debugfs_root;
189 	struct mutex enc_lock;
190 	DECLARE_BITMAP(frame_busy_mask, MAX_PHYS_ENCODERS_PER_VIRTUAL);
191 	void (*crtc_frame_event_cb)(void *, u32 event);
192 	void *crtc_frame_event_cb_data;
193 
194 	atomic_t frame_done_timeout_ms;
195 	struct timer_list frame_done_timer;
196 	struct timer_list vsync_event_timer;
197 
198 	struct msm_display_info disp_info;
199 
200 	bool idle_pc_supported;
201 	struct mutex rc_lock;
202 	enum dpu_enc_rc_states rc_state;
203 	struct delayed_work delayed_off_work;
204 	struct kthread_work vsync_event_work;
205 	struct msm_display_topology topology;
206 
207 	u32 idle_timeout;
208 
209 	bool wide_bus_en;
210 
211 	/* DSC configuration */
212 	struct drm_dsc_config *dsc;
213 };
214 
215 #define to_dpu_encoder_virt(x) container_of(x, struct dpu_encoder_virt, base)
216 
217 static u32 dither_matrix[DITHER_MATRIX_SZ] = {
218 	15, 7, 13, 5, 3, 11, 1, 9, 12, 4, 14, 6, 0, 8, 2, 10
219 };
220 
221 
dpu_encoder_is_widebus_enabled(const struct drm_encoder * drm_enc)222 bool dpu_encoder_is_widebus_enabled(const struct drm_encoder *drm_enc)
223 {
224 	const struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
225 
226 	return dpu_enc->wide_bus_en;
227 }
228 
dpu_encoder_get_crc_values_cnt(const struct drm_encoder * drm_enc)229 int dpu_encoder_get_crc_values_cnt(const struct drm_encoder *drm_enc)
230 {
231 	struct dpu_encoder_virt *dpu_enc;
232 	int i, num_intf = 0;
233 
234 	dpu_enc = to_dpu_encoder_virt(drm_enc);
235 
236 	for (i = 0; i < dpu_enc->num_phys_encs; i++) {
237 		struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
238 
239 		if (phys->hw_intf && phys->hw_intf->ops.setup_misr
240 				&& phys->hw_intf->ops.collect_misr)
241 			num_intf++;
242 	}
243 
244 	return num_intf;
245 }
246 
dpu_encoder_setup_misr(const struct drm_encoder * drm_enc)247 void dpu_encoder_setup_misr(const struct drm_encoder *drm_enc)
248 {
249 	struct dpu_encoder_virt *dpu_enc;
250 
251 	int i;
252 
253 	dpu_enc = to_dpu_encoder_virt(drm_enc);
254 
255 	for (i = 0; i < dpu_enc->num_phys_encs; i++) {
256 		struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
257 
258 		if (!phys->hw_intf || !phys->hw_intf->ops.setup_misr)
259 			continue;
260 
261 		phys->hw_intf->ops.setup_misr(phys->hw_intf, true, 1);
262 	}
263 }
264 
dpu_encoder_get_crc(const struct drm_encoder * drm_enc,u32 * crcs,int pos)265 int dpu_encoder_get_crc(const struct drm_encoder *drm_enc, u32 *crcs, int pos)
266 {
267 	struct dpu_encoder_virt *dpu_enc;
268 
269 	int i, rc = 0, entries_added = 0;
270 
271 	if (!drm_enc->crtc) {
272 		DRM_ERROR("no crtc found for encoder %d\n", drm_enc->index);
273 		return -EINVAL;
274 	}
275 
276 	dpu_enc = to_dpu_encoder_virt(drm_enc);
277 
278 	for (i = 0; i < dpu_enc->num_phys_encs; i++) {
279 		struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
280 
281 		if (!phys->hw_intf || !phys->hw_intf->ops.collect_misr)
282 			continue;
283 
284 		rc = phys->hw_intf->ops.collect_misr(phys->hw_intf, &crcs[pos + entries_added]);
285 		if (rc)
286 			return rc;
287 		entries_added++;
288 	}
289 
290 	return entries_added;
291 }
292 
_dpu_encoder_setup_dither(struct dpu_hw_pingpong * hw_pp,unsigned bpc)293 static void _dpu_encoder_setup_dither(struct dpu_hw_pingpong *hw_pp, unsigned bpc)
294 {
295 	struct dpu_hw_dither_cfg dither_cfg = { 0 };
296 
297 	if (!hw_pp->ops.setup_dither)
298 		return;
299 
300 	switch (bpc) {
301 	case 6:
302 		dither_cfg.c0_bitdepth = 6;
303 		dither_cfg.c1_bitdepth = 6;
304 		dither_cfg.c2_bitdepth = 6;
305 		dither_cfg.c3_bitdepth = 6;
306 		dither_cfg.temporal_en = 0;
307 		break;
308 	default:
309 		hw_pp->ops.setup_dither(hw_pp, NULL);
310 		return;
311 	}
312 
313 	memcpy(&dither_cfg.matrix, dither_matrix,
314 			sizeof(u32) * DITHER_MATRIX_SZ);
315 
316 	hw_pp->ops.setup_dither(hw_pp, &dither_cfg);
317 }
318 
dpu_encoder_helper_get_intf_type(enum dpu_intf_mode intf_mode)319 static char *dpu_encoder_helper_get_intf_type(enum dpu_intf_mode intf_mode)
320 {
321 	switch (intf_mode) {
322 	case INTF_MODE_VIDEO:
323 		return "INTF_MODE_VIDEO";
324 	case INTF_MODE_CMD:
325 		return "INTF_MODE_CMD";
326 	case INTF_MODE_WB_BLOCK:
327 		return "INTF_MODE_WB_BLOCK";
328 	case INTF_MODE_WB_LINE:
329 		return "INTF_MODE_WB_LINE";
330 	default:
331 		return "INTF_MODE_UNKNOWN";
332 	}
333 }
334 
dpu_encoder_helper_report_irq_timeout(struct dpu_encoder_phys * phys_enc,enum dpu_intr_idx intr_idx)335 void dpu_encoder_helper_report_irq_timeout(struct dpu_encoder_phys *phys_enc,
336 		enum dpu_intr_idx intr_idx)
337 {
338 	DRM_ERROR("irq timeout id=%u, intf_mode=%s intf=%d wb=%d, pp=%d, intr=%d\n",
339 			DRMID(phys_enc->parent),
340 			dpu_encoder_helper_get_intf_type(phys_enc->intf_mode),
341 			phys_enc->intf_idx - INTF_0, phys_enc->wb_idx - WB_0,
342 			phys_enc->hw_pp->idx - PINGPONG_0, intr_idx);
343 
344 	dpu_encoder_frame_done_callback(phys_enc->parent, phys_enc,
345 				DPU_ENCODER_FRAME_EVENT_ERROR);
346 }
347 
348 static int dpu_encoder_helper_wait_event_timeout(int32_t drm_id,
349 		u32 irq_idx, struct dpu_encoder_wait_info *info);
350 
dpu_encoder_helper_wait_for_irq(struct dpu_encoder_phys * phys_enc,int irq,void (* func)(void * arg,int irq_idx),struct dpu_encoder_wait_info * wait_info)351 int dpu_encoder_helper_wait_for_irq(struct dpu_encoder_phys *phys_enc,
352 		int irq,
353 		void (*func)(void *arg, int irq_idx),
354 		struct dpu_encoder_wait_info *wait_info)
355 {
356 	u32 irq_status;
357 	int ret;
358 
359 	if (!wait_info) {
360 		DPU_ERROR("invalid params\n");
361 		return -EINVAL;
362 	}
363 	/* note: do master / slave checking outside */
364 
365 	/* return EWOULDBLOCK since we know the wait isn't necessary */
366 	if (phys_enc->enable_state == DPU_ENC_DISABLED) {
367 		DRM_ERROR("encoder is disabled id=%u, callback=%ps, irq=%d\n",
368 			  DRMID(phys_enc->parent), func,
369 			  irq);
370 		return -EWOULDBLOCK;
371 	}
372 
373 	if (irq < 0) {
374 		DRM_DEBUG_KMS("skip irq wait id=%u, callback=%ps\n",
375 			      DRMID(phys_enc->parent), func);
376 		return 0;
377 	}
378 
379 	DRM_DEBUG_KMS("id=%u, callback=%ps, irq=%d, pp=%d, pending_cnt=%d\n",
380 		      DRMID(phys_enc->parent), func,
381 		      irq, phys_enc->hw_pp->idx - PINGPONG_0,
382 		      atomic_read(wait_info->atomic_cnt));
383 
384 	ret = dpu_encoder_helper_wait_event_timeout(
385 			DRMID(phys_enc->parent),
386 			irq,
387 			wait_info);
388 
389 	if (ret <= 0) {
390 		irq_status = dpu_core_irq_read(phys_enc->dpu_kms, irq);
391 		if (irq_status) {
392 			unsigned long flags;
393 
394 			DRM_DEBUG_KMS("irq not triggered id=%u, callback=%ps, irq=%d, pp=%d, atomic_cnt=%d\n",
395 				      DRMID(phys_enc->parent), func,
396 				      irq,
397 				      phys_enc->hw_pp->idx - PINGPONG_0,
398 				      atomic_read(wait_info->atomic_cnt));
399 			local_irq_save(flags);
400 			func(phys_enc, irq);
401 			local_irq_restore(flags);
402 			ret = 0;
403 		} else {
404 			ret = -ETIMEDOUT;
405 			DRM_DEBUG_KMS("irq timeout id=%u, callback=%ps, irq=%d, pp=%d, atomic_cnt=%d\n",
406 				      DRMID(phys_enc->parent), func,
407 				      irq,
408 				      phys_enc->hw_pp->idx - PINGPONG_0,
409 				      atomic_read(wait_info->atomic_cnt));
410 		}
411 	} else {
412 		ret = 0;
413 		trace_dpu_enc_irq_wait_success(DRMID(phys_enc->parent),
414 			func, irq,
415 			phys_enc->hw_pp->idx - PINGPONG_0,
416 			atomic_read(wait_info->atomic_cnt));
417 	}
418 
419 	return ret;
420 }
421 
dpu_encoder_get_vsync_count(struct drm_encoder * drm_enc)422 int dpu_encoder_get_vsync_count(struct drm_encoder *drm_enc)
423 {
424 	struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
425 	struct dpu_encoder_phys *phys = dpu_enc ? dpu_enc->cur_master : NULL;
426 	return phys ? atomic_read(&phys->vsync_cnt) : 0;
427 }
428 
dpu_encoder_get_linecount(struct drm_encoder * drm_enc)429 int dpu_encoder_get_linecount(struct drm_encoder *drm_enc)
430 {
431 	struct dpu_encoder_virt *dpu_enc;
432 	struct dpu_encoder_phys *phys;
433 	int linecount = 0;
434 
435 	dpu_enc = to_dpu_encoder_virt(drm_enc);
436 	phys = dpu_enc ? dpu_enc->cur_master : NULL;
437 
438 	if (phys && phys->ops.get_line_count)
439 		linecount = phys->ops.get_line_count(phys);
440 
441 	return linecount;
442 }
443 
dpu_encoder_destroy(struct drm_encoder * drm_enc)444 static void dpu_encoder_destroy(struct drm_encoder *drm_enc)
445 {
446 	struct dpu_encoder_virt *dpu_enc = NULL;
447 	int i = 0;
448 
449 	if (!drm_enc) {
450 		DPU_ERROR("invalid encoder\n");
451 		return;
452 	}
453 
454 	dpu_enc = to_dpu_encoder_virt(drm_enc);
455 	DPU_DEBUG_ENC(dpu_enc, "\n");
456 
457 	mutex_lock(&dpu_enc->enc_lock);
458 
459 	for (i = 0; i < dpu_enc->num_phys_encs; i++) {
460 		struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
461 
462 		if (phys->ops.destroy) {
463 			phys->ops.destroy(phys);
464 			--dpu_enc->num_phys_encs;
465 			dpu_enc->phys_encs[i] = NULL;
466 		}
467 	}
468 
469 	if (dpu_enc->num_phys_encs)
470 		DPU_ERROR_ENC(dpu_enc, "expected 0 num_phys_encs not %d\n",
471 				dpu_enc->num_phys_encs);
472 	dpu_enc->num_phys_encs = 0;
473 	mutex_unlock(&dpu_enc->enc_lock);
474 
475 	drm_encoder_cleanup(drm_enc);
476 	mutex_destroy(&dpu_enc->enc_lock);
477 }
478 
dpu_encoder_helper_split_config(struct dpu_encoder_phys * phys_enc,enum dpu_intf interface)479 void dpu_encoder_helper_split_config(
480 		struct dpu_encoder_phys *phys_enc,
481 		enum dpu_intf interface)
482 {
483 	struct dpu_encoder_virt *dpu_enc;
484 	struct split_pipe_cfg cfg = { 0 };
485 	struct dpu_hw_mdp *hw_mdptop;
486 	struct msm_display_info *disp_info;
487 
488 	if (!phys_enc->hw_mdptop || !phys_enc->parent) {
489 		DPU_ERROR("invalid arg(s), encoder %d\n", phys_enc != NULL);
490 		return;
491 	}
492 
493 	dpu_enc = to_dpu_encoder_virt(phys_enc->parent);
494 	hw_mdptop = phys_enc->hw_mdptop;
495 	disp_info = &dpu_enc->disp_info;
496 
497 	if (disp_info->intf_type != DRM_MODE_ENCODER_DSI)
498 		return;
499 
500 	/**
501 	 * disable split modes since encoder will be operating in as the only
502 	 * encoder, either for the entire use case in the case of, for example,
503 	 * single DSI, or for this frame in the case of left/right only partial
504 	 * update.
505 	 */
506 	if (phys_enc->split_role == ENC_ROLE_SOLO) {
507 		if (hw_mdptop->ops.setup_split_pipe)
508 			hw_mdptop->ops.setup_split_pipe(hw_mdptop, &cfg);
509 		return;
510 	}
511 
512 	cfg.en = true;
513 	cfg.mode = phys_enc->intf_mode;
514 	cfg.intf = interface;
515 
516 	if (cfg.en && phys_enc->ops.needs_single_flush &&
517 			phys_enc->ops.needs_single_flush(phys_enc))
518 		cfg.split_flush_en = true;
519 
520 	if (phys_enc->split_role == ENC_ROLE_MASTER) {
521 		DPU_DEBUG_ENC(dpu_enc, "enable %d\n", cfg.en);
522 
523 		if (hw_mdptop->ops.setup_split_pipe)
524 			hw_mdptop->ops.setup_split_pipe(hw_mdptop, &cfg);
525 	}
526 }
527 
dpu_encoder_use_dsc_merge(struct drm_encoder * drm_enc)528 bool dpu_encoder_use_dsc_merge(struct drm_encoder *drm_enc)
529 {
530 	struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
531 	int i, intf_count = 0, num_dsc = 0;
532 
533 	for (i = 0; i < MAX_PHYS_ENCODERS_PER_VIRTUAL; i++)
534 		if (dpu_enc->phys_encs[i])
535 			intf_count++;
536 
537 	/* See dpu_encoder_get_topology, we only support 2:2:1 topology */
538 	if (dpu_enc->dsc)
539 		num_dsc = 2;
540 
541 	return (num_dsc > 0) && (num_dsc > intf_count);
542 }
543 
dpu_encoder_get_topology(struct dpu_encoder_virt * dpu_enc,struct dpu_kms * dpu_kms,struct drm_display_mode * mode)544 static struct msm_display_topology dpu_encoder_get_topology(
545 			struct dpu_encoder_virt *dpu_enc,
546 			struct dpu_kms *dpu_kms,
547 			struct drm_display_mode *mode)
548 {
549 	struct msm_display_topology topology = {0};
550 	int i, intf_count = 0;
551 
552 	for (i = 0; i < MAX_PHYS_ENCODERS_PER_VIRTUAL; i++)
553 		if (dpu_enc->phys_encs[i])
554 			intf_count++;
555 
556 	/* Datapath topology selection
557 	 *
558 	 * Dual display
559 	 * 2 LM, 2 INTF ( Split display using 2 interfaces)
560 	 *
561 	 * Single display
562 	 * 1 LM, 1 INTF
563 	 * 2 LM, 1 INTF (stream merge to support high resolution interfaces)
564 	 *
565 	 * Adding color blocks only to primary interface if available in
566 	 * sufficient number
567 	 */
568 	if (intf_count == 2)
569 		topology.num_lm = 2;
570 	else if (!dpu_kms->catalog->caps->has_3d_merge)
571 		topology.num_lm = 1;
572 	else
573 		topology.num_lm = (mode->hdisplay > MAX_HDISPLAY_SPLIT) ? 2 : 1;
574 
575 	if (dpu_enc->disp_info.intf_type == DRM_MODE_ENCODER_DSI) {
576 		if (dpu_kms->catalog->dspp &&
577 			(dpu_kms->catalog->dspp_count >= topology.num_lm))
578 			topology.num_dspp = topology.num_lm;
579 	}
580 
581 	topology.num_intf = intf_count;
582 
583 	if (dpu_enc->dsc) {
584 		/*
585 		 * In case of Display Stream Compression (DSC), we would use
586 		 * 2 DSC encoders, 2 layer mixers and 1 interface
587 		 * this is power optimal and can drive up to (including) 4k
588 		 * screens
589 		 */
590 		topology.num_dsc = 2;
591 		topology.num_lm = 2;
592 		topology.num_intf = 1;
593 	}
594 
595 	return topology;
596 }
597 
dpu_encoder_virt_atomic_check(struct drm_encoder * drm_enc,struct drm_crtc_state * crtc_state,struct drm_connector_state * conn_state)598 static int dpu_encoder_virt_atomic_check(
599 		struct drm_encoder *drm_enc,
600 		struct drm_crtc_state *crtc_state,
601 		struct drm_connector_state *conn_state)
602 {
603 	struct dpu_encoder_virt *dpu_enc;
604 	struct msm_drm_private *priv;
605 	struct dpu_kms *dpu_kms;
606 	struct drm_display_mode *adj_mode;
607 	struct msm_display_topology topology;
608 	struct dpu_global_state *global_state;
609 	int i = 0;
610 	int ret = 0;
611 
612 	if (!drm_enc || !crtc_state || !conn_state) {
613 		DPU_ERROR("invalid arg(s), drm_enc %d, crtc/conn state %d/%d\n",
614 				drm_enc != NULL, crtc_state != NULL, conn_state != NULL);
615 		return -EINVAL;
616 	}
617 
618 	dpu_enc = to_dpu_encoder_virt(drm_enc);
619 	DPU_DEBUG_ENC(dpu_enc, "\n");
620 
621 	priv = drm_enc->dev->dev_private;
622 	dpu_kms = to_dpu_kms(priv->kms);
623 	adj_mode = &crtc_state->adjusted_mode;
624 	global_state = dpu_kms_get_global_state(crtc_state->state);
625 	if (IS_ERR(global_state))
626 		return PTR_ERR(global_state);
627 
628 	trace_dpu_enc_atomic_check(DRMID(drm_enc));
629 
630 	/* perform atomic check on the first physical encoder (master) */
631 	for (i = 0; i < dpu_enc->num_phys_encs; i++) {
632 		struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
633 
634 		if (phys->ops.atomic_check)
635 			ret = phys->ops.atomic_check(phys, crtc_state,
636 					conn_state);
637 		if (ret) {
638 			DPU_ERROR_ENC(dpu_enc,
639 					"mode unsupported, phys idx %d\n", i);
640 			break;
641 		}
642 	}
643 
644 	topology = dpu_encoder_get_topology(dpu_enc, dpu_kms, adj_mode);
645 
646 	/* Reserve dynamic resources now. */
647 	if (!ret) {
648 		/*
649 		 * Release and Allocate resources on every modeset
650 		 * Dont allocate when active is false.
651 		 */
652 		if (drm_atomic_crtc_needs_modeset(crtc_state)) {
653 			dpu_rm_release(global_state, drm_enc);
654 
655 			if (!crtc_state->active_changed || crtc_state->active)
656 				ret = dpu_rm_reserve(&dpu_kms->rm, global_state,
657 						drm_enc, crtc_state, topology);
658 		}
659 	}
660 
661 	trace_dpu_enc_atomic_check_flags(DRMID(drm_enc), adj_mode->flags);
662 
663 	return ret;
664 }
665 
_dpu_encoder_update_vsync_source(struct dpu_encoder_virt * dpu_enc,struct msm_display_info * disp_info)666 static void _dpu_encoder_update_vsync_source(struct dpu_encoder_virt *dpu_enc,
667 			struct msm_display_info *disp_info)
668 {
669 	struct dpu_vsync_source_cfg vsync_cfg = { 0 };
670 	struct msm_drm_private *priv;
671 	struct dpu_kms *dpu_kms;
672 	struct dpu_hw_mdp *hw_mdptop;
673 	struct drm_encoder *drm_enc;
674 	int i;
675 
676 	if (!dpu_enc || !disp_info) {
677 		DPU_ERROR("invalid param dpu_enc:%d or disp_info:%d\n",
678 					dpu_enc != NULL, disp_info != NULL);
679 		return;
680 	} else if (dpu_enc->num_phys_encs > ARRAY_SIZE(dpu_enc->hw_pp)) {
681 		DPU_ERROR("invalid num phys enc %d/%d\n",
682 				dpu_enc->num_phys_encs,
683 				(int) ARRAY_SIZE(dpu_enc->hw_pp));
684 		return;
685 	}
686 
687 	drm_enc = &dpu_enc->base;
688 	/* this pointers are checked in virt_enable_helper */
689 	priv = drm_enc->dev->dev_private;
690 
691 	dpu_kms = to_dpu_kms(priv->kms);
692 	hw_mdptop = dpu_kms->hw_mdp;
693 	if (!hw_mdptop) {
694 		DPU_ERROR("invalid mdptop\n");
695 		return;
696 	}
697 
698 	if (hw_mdptop->ops.setup_vsync_source &&
699 			disp_info->is_cmd_mode) {
700 		for (i = 0; i < dpu_enc->num_phys_encs; i++)
701 			vsync_cfg.ppnumber[i] = dpu_enc->hw_pp[i]->idx;
702 
703 		vsync_cfg.pp_count = dpu_enc->num_phys_encs;
704 		if (disp_info->is_te_using_watchdog_timer)
705 			vsync_cfg.vsync_source = DPU_VSYNC_SOURCE_WD_TIMER_0;
706 		else
707 			vsync_cfg.vsync_source = DPU_VSYNC0_SOURCE_GPIO;
708 
709 		hw_mdptop->ops.setup_vsync_source(hw_mdptop, &vsync_cfg);
710 	}
711 }
712 
_dpu_encoder_irq_control(struct drm_encoder * drm_enc,bool enable)713 static void _dpu_encoder_irq_control(struct drm_encoder *drm_enc, bool enable)
714 {
715 	struct dpu_encoder_virt *dpu_enc;
716 	int i;
717 
718 	if (!drm_enc) {
719 		DPU_ERROR("invalid encoder\n");
720 		return;
721 	}
722 
723 	dpu_enc = to_dpu_encoder_virt(drm_enc);
724 
725 	DPU_DEBUG_ENC(dpu_enc, "enable:%d\n", enable);
726 	for (i = 0; i < dpu_enc->num_phys_encs; i++) {
727 		struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
728 
729 		if (phys->ops.irq_control)
730 			phys->ops.irq_control(phys, enable);
731 	}
732 
733 }
734 
_dpu_encoder_resource_control_helper(struct drm_encoder * drm_enc,bool enable)735 static void _dpu_encoder_resource_control_helper(struct drm_encoder *drm_enc,
736 		bool enable)
737 {
738 	struct msm_drm_private *priv;
739 	struct dpu_kms *dpu_kms;
740 	struct dpu_encoder_virt *dpu_enc;
741 
742 	dpu_enc = to_dpu_encoder_virt(drm_enc);
743 	priv = drm_enc->dev->dev_private;
744 	dpu_kms = to_dpu_kms(priv->kms);
745 
746 	trace_dpu_enc_rc_helper(DRMID(drm_enc), enable);
747 
748 	if (!dpu_enc->cur_master) {
749 		DPU_ERROR("encoder master not set\n");
750 		return;
751 	}
752 
753 	if (enable) {
754 		/* enable DPU core clks */
755 		pm_runtime_get_sync(&dpu_kms->pdev->dev);
756 
757 		/* enable all the irq */
758 		_dpu_encoder_irq_control(drm_enc, true);
759 
760 	} else {
761 		/* disable all the irq */
762 		_dpu_encoder_irq_control(drm_enc, false);
763 
764 		/* disable DPU core clks */
765 		pm_runtime_put_sync(&dpu_kms->pdev->dev);
766 	}
767 
768 }
769 
dpu_encoder_resource_control(struct drm_encoder * drm_enc,u32 sw_event)770 static int dpu_encoder_resource_control(struct drm_encoder *drm_enc,
771 		u32 sw_event)
772 {
773 	struct dpu_encoder_virt *dpu_enc;
774 	struct msm_drm_private *priv;
775 	bool is_vid_mode = false;
776 
777 	if (!drm_enc || !drm_enc->dev || !drm_enc->crtc) {
778 		DPU_ERROR("invalid parameters\n");
779 		return -EINVAL;
780 	}
781 	dpu_enc = to_dpu_encoder_virt(drm_enc);
782 	priv = drm_enc->dev->dev_private;
783 	is_vid_mode = !dpu_enc->disp_info.is_cmd_mode;
784 
785 	/*
786 	 * when idle_pc is not supported, process only KICKOFF, STOP and MODESET
787 	 * events and return early for other events (ie wb display).
788 	 */
789 	if (!dpu_enc->idle_pc_supported &&
790 			(sw_event != DPU_ENC_RC_EVENT_KICKOFF &&
791 			sw_event != DPU_ENC_RC_EVENT_STOP &&
792 			sw_event != DPU_ENC_RC_EVENT_PRE_STOP))
793 		return 0;
794 
795 	trace_dpu_enc_rc(DRMID(drm_enc), sw_event, dpu_enc->idle_pc_supported,
796 			 dpu_enc->rc_state, "begin");
797 
798 	switch (sw_event) {
799 	case DPU_ENC_RC_EVENT_KICKOFF:
800 		/* cancel delayed off work, if any */
801 		if (cancel_delayed_work_sync(&dpu_enc->delayed_off_work))
802 			DPU_DEBUG_ENC(dpu_enc, "sw_event:%d, work cancelled\n",
803 					sw_event);
804 
805 		mutex_lock(&dpu_enc->rc_lock);
806 
807 		/* return if the resource control is already in ON state */
808 		if (dpu_enc->rc_state == DPU_ENC_RC_STATE_ON) {
809 			DRM_DEBUG_ATOMIC("id;%u, sw_event:%d, rc in ON state\n",
810 				      DRMID(drm_enc), sw_event);
811 			mutex_unlock(&dpu_enc->rc_lock);
812 			return 0;
813 		} else if (dpu_enc->rc_state != DPU_ENC_RC_STATE_OFF &&
814 				dpu_enc->rc_state != DPU_ENC_RC_STATE_IDLE) {
815 			DRM_DEBUG_ATOMIC("id;%u, sw_event:%d, rc in state %d\n",
816 				      DRMID(drm_enc), sw_event,
817 				      dpu_enc->rc_state);
818 			mutex_unlock(&dpu_enc->rc_lock);
819 			return -EINVAL;
820 		}
821 
822 		if (is_vid_mode && dpu_enc->rc_state == DPU_ENC_RC_STATE_IDLE)
823 			_dpu_encoder_irq_control(drm_enc, true);
824 		else
825 			_dpu_encoder_resource_control_helper(drm_enc, true);
826 
827 		dpu_enc->rc_state = DPU_ENC_RC_STATE_ON;
828 
829 		trace_dpu_enc_rc(DRMID(drm_enc), sw_event,
830 				 dpu_enc->idle_pc_supported, dpu_enc->rc_state,
831 				 "kickoff");
832 
833 		mutex_unlock(&dpu_enc->rc_lock);
834 		break;
835 
836 	case DPU_ENC_RC_EVENT_FRAME_DONE:
837 		/*
838 		 * mutex lock is not used as this event happens at interrupt
839 		 * context. And locking is not required as, the other events
840 		 * like KICKOFF and STOP does a wait-for-idle before executing
841 		 * the resource_control
842 		 */
843 		if (dpu_enc->rc_state != DPU_ENC_RC_STATE_ON) {
844 			DRM_DEBUG_KMS("id:%d, sw_event:%d,rc:%d-unexpected\n",
845 				      DRMID(drm_enc), sw_event,
846 				      dpu_enc->rc_state);
847 			return -EINVAL;
848 		}
849 
850 		/*
851 		 * schedule off work item only when there are no
852 		 * frames pending
853 		 */
854 		if (dpu_crtc_frame_pending(drm_enc->crtc) > 1) {
855 			DRM_DEBUG_KMS("id:%d skip schedule work\n",
856 				      DRMID(drm_enc));
857 			return 0;
858 		}
859 
860 		queue_delayed_work(priv->wq, &dpu_enc->delayed_off_work,
861 				   msecs_to_jiffies(dpu_enc->idle_timeout));
862 
863 		trace_dpu_enc_rc(DRMID(drm_enc), sw_event,
864 				 dpu_enc->idle_pc_supported, dpu_enc->rc_state,
865 				 "frame done");
866 		break;
867 
868 	case DPU_ENC_RC_EVENT_PRE_STOP:
869 		/* cancel delayed off work, if any */
870 		if (cancel_delayed_work_sync(&dpu_enc->delayed_off_work))
871 			DPU_DEBUG_ENC(dpu_enc, "sw_event:%d, work cancelled\n",
872 					sw_event);
873 
874 		mutex_lock(&dpu_enc->rc_lock);
875 
876 		if (is_vid_mode &&
877 			  dpu_enc->rc_state == DPU_ENC_RC_STATE_IDLE) {
878 			_dpu_encoder_irq_control(drm_enc, true);
879 		}
880 		/* skip if is already OFF or IDLE, resources are off already */
881 		else if (dpu_enc->rc_state == DPU_ENC_RC_STATE_OFF ||
882 				dpu_enc->rc_state == DPU_ENC_RC_STATE_IDLE) {
883 			DRM_DEBUG_KMS("id:%u, sw_event:%d, rc in %d state\n",
884 				      DRMID(drm_enc), sw_event,
885 				      dpu_enc->rc_state);
886 			mutex_unlock(&dpu_enc->rc_lock);
887 			return 0;
888 		}
889 
890 		dpu_enc->rc_state = DPU_ENC_RC_STATE_PRE_OFF;
891 
892 		trace_dpu_enc_rc(DRMID(drm_enc), sw_event,
893 				 dpu_enc->idle_pc_supported, dpu_enc->rc_state,
894 				 "pre stop");
895 
896 		mutex_unlock(&dpu_enc->rc_lock);
897 		break;
898 
899 	case DPU_ENC_RC_EVENT_STOP:
900 		mutex_lock(&dpu_enc->rc_lock);
901 
902 		/* return if the resource control is already in OFF state */
903 		if (dpu_enc->rc_state == DPU_ENC_RC_STATE_OFF) {
904 			DRM_DEBUG_KMS("id: %u, sw_event:%d, rc in OFF state\n",
905 				      DRMID(drm_enc), sw_event);
906 			mutex_unlock(&dpu_enc->rc_lock);
907 			return 0;
908 		} else if (dpu_enc->rc_state == DPU_ENC_RC_STATE_ON) {
909 			DRM_ERROR("id: %u, sw_event:%d, rc in state %d\n",
910 				  DRMID(drm_enc), sw_event, dpu_enc->rc_state);
911 			mutex_unlock(&dpu_enc->rc_lock);
912 			return -EINVAL;
913 		}
914 
915 		/**
916 		 * expect to arrive here only if in either idle state or pre-off
917 		 * and in IDLE state the resources are already disabled
918 		 */
919 		if (dpu_enc->rc_state == DPU_ENC_RC_STATE_PRE_OFF)
920 			_dpu_encoder_resource_control_helper(drm_enc, false);
921 
922 		dpu_enc->rc_state = DPU_ENC_RC_STATE_OFF;
923 
924 		trace_dpu_enc_rc(DRMID(drm_enc), sw_event,
925 				 dpu_enc->idle_pc_supported, dpu_enc->rc_state,
926 				 "stop");
927 
928 		mutex_unlock(&dpu_enc->rc_lock);
929 		break;
930 
931 	case DPU_ENC_RC_EVENT_ENTER_IDLE:
932 		mutex_lock(&dpu_enc->rc_lock);
933 
934 		if (dpu_enc->rc_state != DPU_ENC_RC_STATE_ON) {
935 			DRM_ERROR("id: %u, sw_event:%d, rc:%d !ON state\n",
936 				  DRMID(drm_enc), sw_event, dpu_enc->rc_state);
937 			mutex_unlock(&dpu_enc->rc_lock);
938 			return 0;
939 		}
940 
941 		/*
942 		 * if we are in ON but a frame was just kicked off,
943 		 * ignore the IDLE event, it's probably a stale timer event
944 		 */
945 		if (dpu_enc->frame_busy_mask[0]) {
946 			DRM_ERROR("id:%u, sw_event:%d, rc:%d frame pending\n",
947 				  DRMID(drm_enc), sw_event, dpu_enc->rc_state);
948 			mutex_unlock(&dpu_enc->rc_lock);
949 			return 0;
950 		}
951 
952 		if (is_vid_mode)
953 			_dpu_encoder_irq_control(drm_enc, false);
954 		else
955 			_dpu_encoder_resource_control_helper(drm_enc, false);
956 
957 		dpu_enc->rc_state = DPU_ENC_RC_STATE_IDLE;
958 
959 		trace_dpu_enc_rc(DRMID(drm_enc), sw_event,
960 				 dpu_enc->idle_pc_supported, dpu_enc->rc_state,
961 				 "idle");
962 
963 		mutex_unlock(&dpu_enc->rc_lock);
964 		break;
965 
966 	default:
967 		DRM_ERROR("id:%u, unexpected sw_event: %d\n", DRMID(drm_enc),
968 			  sw_event);
969 		trace_dpu_enc_rc(DRMID(drm_enc), sw_event,
970 				 dpu_enc->idle_pc_supported, dpu_enc->rc_state,
971 				 "error");
972 		break;
973 	}
974 
975 	trace_dpu_enc_rc(DRMID(drm_enc), sw_event,
976 			 dpu_enc->idle_pc_supported, dpu_enc->rc_state,
977 			 "end");
978 	return 0;
979 }
980 
dpu_encoder_prepare_wb_job(struct drm_encoder * drm_enc,struct drm_writeback_job * job)981 void dpu_encoder_prepare_wb_job(struct drm_encoder *drm_enc,
982 		struct drm_writeback_job *job)
983 {
984 	struct dpu_encoder_virt *dpu_enc;
985 	int i;
986 
987 	dpu_enc = to_dpu_encoder_virt(drm_enc);
988 
989 	for (i = 0; i < dpu_enc->num_phys_encs; i++) {
990 		struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
991 
992 		if (phys->ops.prepare_wb_job)
993 			phys->ops.prepare_wb_job(phys, job);
994 
995 	}
996 }
997 
dpu_encoder_cleanup_wb_job(struct drm_encoder * drm_enc,struct drm_writeback_job * job)998 void dpu_encoder_cleanup_wb_job(struct drm_encoder *drm_enc,
999 		struct drm_writeback_job *job)
1000 {
1001 	struct dpu_encoder_virt *dpu_enc;
1002 	int i;
1003 
1004 	dpu_enc = to_dpu_encoder_virt(drm_enc);
1005 
1006 	for (i = 0; i < dpu_enc->num_phys_encs; i++) {
1007 		struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
1008 
1009 		if (phys->ops.cleanup_wb_job)
1010 			phys->ops.cleanup_wb_job(phys, job);
1011 
1012 	}
1013 }
1014 
dpu_encoder_virt_atomic_mode_set(struct drm_encoder * drm_enc,struct drm_crtc_state * crtc_state,struct drm_connector_state * conn_state)1015 static void dpu_encoder_virt_atomic_mode_set(struct drm_encoder *drm_enc,
1016 					     struct drm_crtc_state *crtc_state,
1017 					     struct drm_connector_state *conn_state)
1018 {
1019 	struct dpu_encoder_virt *dpu_enc;
1020 	struct msm_drm_private *priv;
1021 	struct dpu_kms *dpu_kms;
1022 	struct dpu_crtc_state *cstate;
1023 	struct dpu_global_state *global_state;
1024 	struct dpu_hw_blk *hw_pp[MAX_CHANNELS_PER_ENC];
1025 	struct dpu_hw_blk *hw_ctl[MAX_CHANNELS_PER_ENC];
1026 	struct dpu_hw_blk *hw_lm[MAX_CHANNELS_PER_ENC];
1027 	struct dpu_hw_blk *hw_dspp[MAX_CHANNELS_PER_ENC] = { NULL };
1028 	struct dpu_hw_blk *hw_dsc[MAX_CHANNELS_PER_ENC];
1029 	int num_lm, num_ctl, num_pp, num_dsc;
1030 	unsigned int dsc_mask = 0;
1031 	int i;
1032 
1033 	if (!drm_enc) {
1034 		DPU_ERROR("invalid encoder\n");
1035 		return;
1036 	}
1037 
1038 	dpu_enc = to_dpu_encoder_virt(drm_enc);
1039 	DPU_DEBUG_ENC(dpu_enc, "\n");
1040 
1041 	priv = drm_enc->dev->dev_private;
1042 	dpu_kms = to_dpu_kms(priv->kms);
1043 
1044 	global_state = dpu_kms_get_existing_global_state(dpu_kms);
1045 	if (IS_ERR_OR_NULL(global_state)) {
1046 		DPU_ERROR("Failed to get global state");
1047 		return;
1048 	}
1049 
1050 	trace_dpu_enc_mode_set(DRMID(drm_enc));
1051 
1052 	/* Query resource that have been reserved in atomic check step. */
1053 	num_pp = dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state,
1054 		drm_enc->base.id, DPU_HW_BLK_PINGPONG, hw_pp,
1055 		ARRAY_SIZE(hw_pp));
1056 	num_ctl = dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state,
1057 		drm_enc->base.id, DPU_HW_BLK_CTL, hw_ctl, ARRAY_SIZE(hw_ctl));
1058 	num_lm = dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state,
1059 		drm_enc->base.id, DPU_HW_BLK_LM, hw_lm, ARRAY_SIZE(hw_lm));
1060 	dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state,
1061 		drm_enc->base.id, DPU_HW_BLK_DSPP, hw_dspp,
1062 		ARRAY_SIZE(hw_dspp));
1063 
1064 	for (i = 0; i < MAX_CHANNELS_PER_ENC; i++)
1065 		dpu_enc->hw_pp[i] = i < num_pp ? to_dpu_hw_pingpong(hw_pp[i])
1066 						: NULL;
1067 
1068 	if (dpu_enc->dsc) {
1069 		num_dsc = dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state,
1070 							drm_enc->base.id, DPU_HW_BLK_DSC,
1071 							hw_dsc, ARRAY_SIZE(hw_dsc));
1072 		for (i = 0; i < num_dsc; i++) {
1073 			dpu_enc->hw_dsc[i] = to_dpu_hw_dsc(hw_dsc[i]);
1074 			dsc_mask |= BIT(dpu_enc->hw_dsc[i]->idx - DSC_0);
1075 		}
1076 	}
1077 
1078 	dpu_enc->dsc_mask = dsc_mask;
1079 
1080 	cstate = to_dpu_crtc_state(crtc_state);
1081 
1082 	for (i = 0; i < num_lm; i++) {
1083 		int ctl_idx = (i < num_ctl) ? i : (num_ctl-1);
1084 
1085 		cstate->mixers[i].hw_lm = to_dpu_hw_mixer(hw_lm[i]);
1086 		cstate->mixers[i].lm_ctl = to_dpu_hw_ctl(hw_ctl[ctl_idx]);
1087 		cstate->mixers[i].hw_dspp = to_dpu_hw_dspp(hw_dspp[i]);
1088 	}
1089 
1090 	cstate->num_mixers = num_lm;
1091 
1092 	dpu_enc->connector = conn_state->connector;
1093 
1094 	for (i = 0; i < dpu_enc->num_phys_encs; i++) {
1095 		struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
1096 
1097 		if (!dpu_enc->hw_pp[i]) {
1098 			DPU_ERROR_ENC(dpu_enc,
1099 				"no pp block assigned at idx: %d\n", i);
1100 			return;
1101 		}
1102 
1103 		if (!hw_ctl[i]) {
1104 			DPU_ERROR_ENC(dpu_enc,
1105 				"no ctl block assigned at idx: %d\n", i);
1106 			return;
1107 		}
1108 
1109 		phys->hw_pp = dpu_enc->hw_pp[i];
1110 		phys->hw_ctl = to_dpu_hw_ctl(hw_ctl[i]);
1111 
1112 		phys->cached_mode = crtc_state->adjusted_mode;
1113 		if (phys->ops.atomic_mode_set)
1114 			phys->ops.atomic_mode_set(phys, crtc_state, conn_state);
1115 	}
1116 }
1117 
_dpu_encoder_virt_enable_helper(struct drm_encoder * drm_enc)1118 static void _dpu_encoder_virt_enable_helper(struct drm_encoder *drm_enc)
1119 {
1120 	struct dpu_encoder_virt *dpu_enc = NULL;
1121 	int i;
1122 
1123 	if (!drm_enc || !drm_enc->dev) {
1124 		DPU_ERROR("invalid parameters\n");
1125 		return;
1126 	}
1127 
1128 	dpu_enc = to_dpu_encoder_virt(drm_enc);
1129 	if (!dpu_enc || !dpu_enc->cur_master) {
1130 		DPU_ERROR("invalid dpu encoder/master\n");
1131 		return;
1132 	}
1133 
1134 
1135 	if (dpu_enc->disp_info.intf_type == DRM_MODE_ENCODER_TMDS &&
1136 		dpu_enc->cur_master->hw_mdptop &&
1137 		dpu_enc->cur_master->hw_mdptop->ops.intf_audio_select)
1138 		dpu_enc->cur_master->hw_mdptop->ops.intf_audio_select(
1139 			dpu_enc->cur_master->hw_mdptop);
1140 
1141 	_dpu_encoder_update_vsync_source(dpu_enc, &dpu_enc->disp_info);
1142 
1143 	if (dpu_enc->disp_info.intf_type == DRM_MODE_ENCODER_DSI &&
1144 			!WARN_ON(dpu_enc->num_phys_encs == 0)) {
1145 		unsigned bpc = dpu_enc->connector->display_info.bpc;
1146 		for (i = 0; i < MAX_CHANNELS_PER_ENC; i++) {
1147 			if (!dpu_enc->hw_pp[i])
1148 				continue;
1149 			_dpu_encoder_setup_dither(dpu_enc->hw_pp[i], bpc);
1150 		}
1151 	}
1152 }
1153 
dpu_encoder_virt_runtime_resume(struct drm_encoder * drm_enc)1154 void dpu_encoder_virt_runtime_resume(struct drm_encoder *drm_enc)
1155 {
1156 	struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
1157 
1158 	mutex_lock(&dpu_enc->enc_lock);
1159 
1160 	if (!dpu_enc->enabled)
1161 		goto out;
1162 
1163 	if (dpu_enc->cur_slave && dpu_enc->cur_slave->ops.restore)
1164 		dpu_enc->cur_slave->ops.restore(dpu_enc->cur_slave);
1165 	if (dpu_enc->cur_master && dpu_enc->cur_master->ops.restore)
1166 		dpu_enc->cur_master->ops.restore(dpu_enc->cur_master);
1167 
1168 	_dpu_encoder_virt_enable_helper(drm_enc);
1169 
1170 out:
1171 	mutex_unlock(&dpu_enc->enc_lock);
1172 }
1173 
dpu_encoder_virt_enable(struct drm_encoder * drm_enc)1174 static void dpu_encoder_virt_enable(struct drm_encoder *drm_enc)
1175 {
1176 	struct dpu_encoder_virt *dpu_enc = NULL;
1177 	int ret = 0;
1178 	struct drm_display_mode *cur_mode = NULL;
1179 
1180 	dpu_enc = to_dpu_encoder_virt(drm_enc);
1181 
1182 	mutex_lock(&dpu_enc->enc_lock);
1183 	cur_mode = &dpu_enc->base.crtc->state->adjusted_mode;
1184 
1185 	trace_dpu_enc_enable(DRMID(drm_enc), cur_mode->hdisplay,
1186 			     cur_mode->vdisplay);
1187 
1188 	/* always enable slave encoder before master */
1189 	if (dpu_enc->cur_slave && dpu_enc->cur_slave->ops.enable)
1190 		dpu_enc->cur_slave->ops.enable(dpu_enc->cur_slave);
1191 
1192 	if (dpu_enc->cur_master && dpu_enc->cur_master->ops.enable)
1193 		dpu_enc->cur_master->ops.enable(dpu_enc->cur_master);
1194 
1195 	ret = dpu_encoder_resource_control(drm_enc, DPU_ENC_RC_EVENT_KICKOFF);
1196 	if (ret) {
1197 		DPU_ERROR_ENC(dpu_enc, "dpu resource control failed: %d\n",
1198 				ret);
1199 		goto out;
1200 	}
1201 
1202 	_dpu_encoder_virt_enable_helper(drm_enc);
1203 
1204 	dpu_enc->enabled = true;
1205 
1206 out:
1207 	mutex_unlock(&dpu_enc->enc_lock);
1208 }
1209 
dpu_encoder_virt_disable(struct drm_encoder * drm_enc)1210 static void dpu_encoder_virt_disable(struct drm_encoder *drm_enc)
1211 {
1212 	struct dpu_encoder_virt *dpu_enc = NULL;
1213 	int i = 0;
1214 
1215 	dpu_enc = to_dpu_encoder_virt(drm_enc);
1216 	DPU_DEBUG_ENC(dpu_enc, "\n");
1217 
1218 	mutex_lock(&dpu_enc->enc_lock);
1219 	dpu_enc->enabled = false;
1220 
1221 	trace_dpu_enc_disable(DRMID(drm_enc));
1222 
1223 	/* wait for idle */
1224 	dpu_encoder_wait_for_event(drm_enc, MSM_ENC_TX_COMPLETE);
1225 
1226 	dpu_encoder_resource_control(drm_enc, DPU_ENC_RC_EVENT_PRE_STOP);
1227 
1228 	for (i = 0; i < dpu_enc->num_phys_encs; i++) {
1229 		struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
1230 
1231 		if (phys->ops.disable)
1232 			phys->ops.disable(phys);
1233 	}
1234 
1235 
1236 	/* after phys waits for frame-done, should be no more frames pending */
1237 	if (atomic_xchg(&dpu_enc->frame_done_timeout_ms, 0)) {
1238 		DPU_ERROR("enc%d timeout pending\n", drm_enc->base.id);
1239 		del_timer_sync(&dpu_enc->frame_done_timer);
1240 	}
1241 
1242 	dpu_encoder_resource_control(drm_enc, DPU_ENC_RC_EVENT_STOP);
1243 
1244 	dpu_enc->connector = NULL;
1245 
1246 	DPU_DEBUG_ENC(dpu_enc, "encoder disabled\n");
1247 
1248 	mutex_unlock(&dpu_enc->enc_lock);
1249 }
1250 
dpu_encoder_get_intf(const struct dpu_mdss_cfg * catalog,enum dpu_intf_type type,u32 controller_id)1251 static enum dpu_intf dpu_encoder_get_intf(const struct dpu_mdss_cfg *catalog,
1252 		enum dpu_intf_type type, u32 controller_id)
1253 {
1254 	int i = 0;
1255 
1256 	if (type == INTF_WB)
1257 		return INTF_MAX;
1258 
1259 	for (i = 0; i < catalog->intf_count; i++) {
1260 		if (catalog->intf[i].type == type
1261 		    && catalog->intf[i].controller_id == controller_id) {
1262 			return catalog->intf[i].id;
1263 		}
1264 	}
1265 
1266 	return INTF_MAX;
1267 }
1268 
dpu_encoder_get_wb(const struct dpu_mdss_cfg * catalog,enum dpu_intf_type type,u32 controller_id)1269 static enum dpu_wb dpu_encoder_get_wb(const struct dpu_mdss_cfg *catalog,
1270 		enum dpu_intf_type type, u32 controller_id)
1271 {
1272 	int i = 0;
1273 
1274 	if (type != INTF_WB)
1275 		return WB_MAX;
1276 
1277 	for (i = 0; i < catalog->wb_count; i++) {
1278 		if (catalog->wb[i].id == controller_id)
1279 			return catalog->wb[i].id;
1280 	}
1281 
1282 	return WB_MAX;
1283 }
1284 
dpu_encoder_vblank_callback(struct drm_encoder * drm_enc,struct dpu_encoder_phys * phy_enc)1285 void dpu_encoder_vblank_callback(struct drm_encoder *drm_enc,
1286 		struct dpu_encoder_phys *phy_enc)
1287 {
1288 	struct dpu_encoder_virt *dpu_enc = NULL;
1289 	unsigned long lock_flags;
1290 
1291 	if (!drm_enc || !phy_enc)
1292 		return;
1293 
1294 	DPU_ATRACE_BEGIN("encoder_vblank_callback");
1295 	dpu_enc = to_dpu_encoder_virt(drm_enc);
1296 
1297 	atomic_inc(&phy_enc->vsync_cnt);
1298 
1299 	spin_lock_irqsave(&dpu_enc->enc_spinlock, lock_flags);
1300 	if (dpu_enc->crtc)
1301 		dpu_crtc_vblank_callback(dpu_enc->crtc);
1302 	spin_unlock_irqrestore(&dpu_enc->enc_spinlock, lock_flags);
1303 
1304 	DPU_ATRACE_END("encoder_vblank_callback");
1305 }
1306 
dpu_encoder_underrun_callback(struct drm_encoder * drm_enc,struct dpu_encoder_phys * phy_enc)1307 void dpu_encoder_underrun_callback(struct drm_encoder *drm_enc,
1308 		struct dpu_encoder_phys *phy_enc)
1309 {
1310 	if (!phy_enc)
1311 		return;
1312 
1313 	DPU_ATRACE_BEGIN("encoder_underrun_callback");
1314 	atomic_inc(&phy_enc->underrun_cnt);
1315 
1316 	/* trigger dump only on the first underrun */
1317 	if (atomic_read(&phy_enc->underrun_cnt) == 1)
1318 		msm_disp_snapshot_state(drm_enc->dev);
1319 
1320 	trace_dpu_enc_underrun_cb(DRMID(drm_enc),
1321 				  atomic_read(&phy_enc->underrun_cnt));
1322 	DPU_ATRACE_END("encoder_underrun_callback");
1323 }
1324 
dpu_encoder_assign_crtc(struct drm_encoder * drm_enc,struct drm_crtc * crtc)1325 void dpu_encoder_assign_crtc(struct drm_encoder *drm_enc, struct drm_crtc *crtc)
1326 {
1327 	struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
1328 	unsigned long lock_flags;
1329 
1330 	spin_lock_irqsave(&dpu_enc->enc_spinlock, lock_flags);
1331 	/* crtc should always be cleared before re-assigning */
1332 	WARN_ON(crtc && dpu_enc->crtc);
1333 	dpu_enc->crtc = crtc;
1334 	spin_unlock_irqrestore(&dpu_enc->enc_spinlock, lock_flags);
1335 }
1336 
dpu_encoder_toggle_vblank_for_crtc(struct drm_encoder * drm_enc,struct drm_crtc * crtc,bool enable)1337 void dpu_encoder_toggle_vblank_for_crtc(struct drm_encoder *drm_enc,
1338 					struct drm_crtc *crtc, bool enable)
1339 {
1340 	struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
1341 	unsigned long lock_flags;
1342 	int i;
1343 
1344 	trace_dpu_enc_vblank_cb(DRMID(drm_enc), enable);
1345 
1346 	spin_lock_irqsave(&dpu_enc->enc_spinlock, lock_flags);
1347 	if (dpu_enc->crtc != crtc) {
1348 		spin_unlock_irqrestore(&dpu_enc->enc_spinlock, lock_flags);
1349 		return;
1350 	}
1351 	spin_unlock_irqrestore(&dpu_enc->enc_spinlock, lock_flags);
1352 
1353 	for (i = 0; i < dpu_enc->num_phys_encs; i++) {
1354 		struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
1355 
1356 		if (phys->ops.control_vblank_irq)
1357 			phys->ops.control_vblank_irq(phys, enable);
1358 	}
1359 }
1360 
dpu_encoder_register_frame_event_callback(struct drm_encoder * drm_enc,void (* frame_event_cb)(void *,u32 event),void * frame_event_cb_data)1361 void dpu_encoder_register_frame_event_callback(struct drm_encoder *drm_enc,
1362 		void (*frame_event_cb)(void *, u32 event),
1363 		void *frame_event_cb_data)
1364 {
1365 	struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
1366 	unsigned long lock_flags;
1367 	bool enable;
1368 
1369 	enable = frame_event_cb ? true : false;
1370 
1371 	if (!drm_enc) {
1372 		DPU_ERROR("invalid encoder\n");
1373 		return;
1374 	}
1375 	trace_dpu_enc_frame_event_cb(DRMID(drm_enc), enable);
1376 
1377 	spin_lock_irqsave(&dpu_enc->enc_spinlock, lock_flags);
1378 	dpu_enc->crtc_frame_event_cb = frame_event_cb;
1379 	dpu_enc->crtc_frame_event_cb_data = frame_event_cb_data;
1380 	spin_unlock_irqrestore(&dpu_enc->enc_spinlock, lock_flags);
1381 }
1382 
dpu_encoder_frame_done_callback(struct drm_encoder * drm_enc,struct dpu_encoder_phys * ready_phys,u32 event)1383 void dpu_encoder_frame_done_callback(
1384 		struct drm_encoder *drm_enc,
1385 		struct dpu_encoder_phys *ready_phys, u32 event)
1386 {
1387 	struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
1388 	unsigned int i;
1389 
1390 	if (event & (DPU_ENCODER_FRAME_EVENT_DONE
1391 			| DPU_ENCODER_FRAME_EVENT_ERROR
1392 			| DPU_ENCODER_FRAME_EVENT_PANEL_DEAD)) {
1393 
1394 		if (!dpu_enc->frame_busy_mask[0]) {
1395 			/**
1396 			 * suppress frame_done without waiter,
1397 			 * likely autorefresh
1398 			 */
1399 			trace_dpu_enc_frame_done_cb_not_busy(DRMID(drm_enc), event,
1400 					dpu_encoder_helper_get_intf_type(ready_phys->intf_mode),
1401 					ready_phys->intf_idx, ready_phys->wb_idx);
1402 			return;
1403 		}
1404 
1405 		/* One of the physical encoders has become idle */
1406 		for (i = 0; i < dpu_enc->num_phys_encs; i++) {
1407 			if (dpu_enc->phys_encs[i] == ready_phys) {
1408 				trace_dpu_enc_frame_done_cb(DRMID(drm_enc), i,
1409 						dpu_enc->frame_busy_mask[0]);
1410 				clear_bit(i, dpu_enc->frame_busy_mask);
1411 			}
1412 		}
1413 
1414 		if (!dpu_enc->frame_busy_mask[0]) {
1415 			atomic_set(&dpu_enc->frame_done_timeout_ms, 0);
1416 			del_timer(&dpu_enc->frame_done_timer);
1417 
1418 			dpu_encoder_resource_control(drm_enc,
1419 					DPU_ENC_RC_EVENT_FRAME_DONE);
1420 
1421 			if (dpu_enc->crtc_frame_event_cb)
1422 				dpu_enc->crtc_frame_event_cb(
1423 					dpu_enc->crtc_frame_event_cb_data,
1424 					event);
1425 		}
1426 	} else {
1427 		if (dpu_enc->crtc_frame_event_cb)
1428 			dpu_enc->crtc_frame_event_cb(
1429 				dpu_enc->crtc_frame_event_cb_data, event);
1430 	}
1431 }
1432 
dpu_encoder_off_work(struct work_struct * work)1433 static void dpu_encoder_off_work(struct work_struct *work)
1434 {
1435 	struct dpu_encoder_virt *dpu_enc = container_of(work,
1436 			struct dpu_encoder_virt, delayed_off_work.work);
1437 
1438 	dpu_encoder_resource_control(&dpu_enc->base,
1439 						DPU_ENC_RC_EVENT_ENTER_IDLE);
1440 
1441 	dpu_encoder_frame_done_callback(&dpu_enc->base, NULL,
1442 				DPU_ENCODER_FRAME_EVENT_IDLE);
1443 }
1444 
1445 /**
1446  * _dpu_encoder_trigger_flush - trigger flush for a physical encoder
1447  * @drm_enc: Pointer to drm encoder structure
1448  * @phys: Pointer to physical encoder structure
1449  * @extra_flush_bits: Additional bit mask to include in flush trigger
1450  */
_dpu_encoder_trigger_flush(struct drm_encoder * drm_enc,struct dpu_encoder_phys * phys,uint32_t extra_flush_bits)1451 static void _dpu_encoder_trigger_flush(struct drm_encoder *drm_enc,
1452 		struct dpu_encoder_phys *phys, uint32_t extra_flush_bits)
1453 {
1454 	struct dpu_hw_ctl *ctl;
1455 	int pending_kickoff_cnt;
1456 	u32 ret = UINT_MAX;
1457 
1458 	if (!phys->hw_pp) {
1459 		DPU_ERROR("invalid pingpong hw\n");
1460 		return;
1461 	}
1462 
1463 	ctl = phys->hw_ctl;
1464 	if (!ctl->ops.trigger_flush) {
1465 		DPU_ERROR("missing trigger cb\n");
1466 		return;
1467 	}
1468 
1469 	pending_kickoff_cnt = dpu_encoder_phys_inc_pending(phys);
1470 
1471 	if (extra_flush_bits && ctl->ops.update_pending_flush)
1472 		ctl->ops.update_pending_flush(ctl, extra_flush_bits);
1473 
1474 	ctl->ops.trigger_flush(ctl);
1475 
1476 	if (ctl->ops.get_pending_flush)
1477 		ret = ctl->ops.get_pending_flush(ctl);
1478 
1479 	trace_dpu_enc_trigger_flush(DRMID(drm_enc),
1480 			dpu_encoder_helper_get_intf_type(phys->intf_mode),
1481 			phys->intf_idx, phys->wb_idx,
1482 			pending_kickoff_cnt, ctl->idx,
1483 			extra_flush_bits, ret);
1484 }
1485 
1486 /**
1487  * _dpu_encoder_trigger_start - trigger start for a physical encoder
1488  * @phys: Pointer to physical encoder structure
1489  */
_dpu_encoder_trigger_start(struct dpu_encoder_phys * phys)1490 static void _dpu_encoder_trigger_start(struct dpu_encoder_phys *phys)
1491 {
1492 	if (!phys) {
1493 		DPU_ERROR("invalid argument(s)\n");
1494 		return;
1495 	}
1496 
1497 	if (!phys->hw_pp) {
1498 		DPU_ERROR("invalid pingpong hw\n");
1499 		return;
1500 	}
1501 
1502 	if (phys->ops.trigger_start && phys->enable_state != DPU_ENC_DISABLED)
1503 		phys->ops.trigger_start(phys);
1504 }
1505 
dpu_encoder_helper_trigger_start(struct dpu_encoder_phys * phys_enc)1506 void dpu_encoder_helper_trigger_start(struct dpu_encoder_phys *phys_enc)
1507 {
1508 	struct dpu_hw_ctl *ctl;
1509 
1510 	ctl = phys_enc->hw_ctl;
1511 	if (ctl->ops.trigger_start) {
1512 		ctl->ops.trigger_start(ctl);
1513 		trace_dpu_enc_trigger_start(DRMID(phys_enc->parent), ctl->idx);
1514 	}
1515 }
1516 
dpu_encoder_helper_wait_event_timeout(int32_t drm_id,u32 irq_idx,struct dpu_encoder_wait_info * info)1517 static int dpu_encoder_helper_wait_event_timeout(
1518 		int32_t drm_id,
1519 		u32 irq_idx,
1520 		struct dpu_encoder_wait_info *info)
1521 {
1522 	int rc = 0;
1523 	s64 expected_time = ktime_to_ms(ktime_get()) + info->timeout_ms;
1524 	s64 jiffies = msecs_to_jiffies(info->timeout_ms);
1525 	s64 time;
1526 
1527 	do {
1528 		rc = wait_event_timeout(*(info->wq),
1529 				atomic_read(info->atomic_cnt) == 0, jiffies);
1530 		time = ktime_to_ms(ktime_get());
1531 
1532 		trace_dpu_enc_wait_event_timeout(drm_id, irq_idx, rc, time,
1533 						 expected_time,
1534 						 atomic_read(info->atomic_cnt));
1535 	/* If we timed out, counter is valid and time is less, wait again */
1536 	} while (atomic_read(info->atomic_cnt) && (rc == 0) &&
1537 			(time < expected_time));
1538 
1539 	return rc;
1540 }
1541 
dpu_encoder_helper_hw_reset(struct dpu_encoder_phys * phys_enc)1542 static void dpu_encoder_helper_hw_reset(struct dpu_encoder_phys *phys_enc)
1543 {
1544 	struct dpu_encoder_virt *dpu_enc;
1545 	struct dpu_hw_ctl *ctl;
1546 	int rc;
1547 	struct drm_encoder *drm_enc;
1548 
1549 	dpu_enc = to_dpu_encoder_virt(phys_enc->parent);
1550 	ctl = phys_enc->hw_ctl;
1551 	drm_enc = phys_enc->parent;
1552 
1553 	if (!ctl->ops.reset)
1554 		return;
1555 
1556 	DRM_DEBUG_KMS("id:%u ctl %d reset\n", DRMID(drm_enc),
1557 		      ctl->idx);
1558 
1559 	rc = ctl->ops.reset(ctl);
1560 	if (rc) {
1561 		DPU_ERROR_ENC(dpu_enc, "ctl %d reset failure\n",  ctl->idx);
1562 		msm_disp_snapshot_state(drm_enc->dev);
1563 	}
1564 
1565 	phys_enc->enable_state = DPU_ENC_ENABLED;
1566 }
1567 
1568 /**
1569  * _dpu_encoder_kickoff_phys - handle physical encoder kickoff
1570  *	Iterate through the physical encoders and perform consolidated flush
1571  *	and/or control start triggering as needed. This is done in the virtual
1572  *	encoder rather than the individual physical ones in order to handle
1573  *	use cases that require visibility into multiple physical encoders at
1574  *	a time.
1575  * @dpu_enc: Pointer to virtual encoder structure
1576  */
_dpu_encoder_kickoff_phys(struct dpu_encoder_virt * dpu_enc)1577 static void _dpu_encoder_kickoff_phys(struct dpu_encoder_virt *dpu_enc)
1578 {
1579 	struct dpu_hw_ctl *ctl;
1580 	uint32_t i, pending_flush;
1581 	unsigned long lock_flags;
1582 
1583 	pending_flush = 0x0;
1584 
1585 	/* update pending counts and trigger kickoff ctl flush atomically */
1586 	spin_lock_irqsave(&dpu_enc->enc_spinlock, lock_flags);
1587 
1588 	/* don't perform flush/start operations for slave encoders */
1589 	for (i = 0; i < dpu_enc->num_phys_encs; i++) {
1590 		struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
1591 
1592 		if (phys->enable_state == DPU_ENC_DISABLED)
1593 			continue;
1594 
1595 		ctl = phys->hw_ctl;
1596 
1597 		/*
1598 		 * This is cleared in frame_done worker, which isn't invoked
1599 		 * for async commits. So don't set this for async, since it'll
1600 		 * roll over to the next commit.
1601 		 */
1602 		if (phys->split_role != ENC_ROLE_SLAVE)
1603 			set_bit(i, dpu_enc->frame_busy_mask);
1604 
1605 		if (!phys->ops.needs_single_flush ||
1606 				!phys->ops.needs_single_flush(phys))
1607 			_dpu_encoder_trigger_flush(&dpu_enc->base, phys, 0x0);
1608 		else if (ctl->ops.get_pending_flush)
1609 			pending_flush |= ctl->ops.get_pending_flush(ctl);
1610 	}
1611 
1612 	/* for split flush, combine pending flush masks and send to master */
1613 	if (pending_flush && dpu_enc->cur_master) {
1614 		_dpu_encoder_trigger_flush(
1615 				&dpu_enc->base,
1616 				dpu_enc->cur_master,
1617 				pending_flush);
1618 	}
1619 
1620 	_dpu_encoder_trigger_start(dpu_enc->cur_master);
1621 
1622 	spin_unlock_irqrestore(&dpu_enc->enc_spinlock, lock_flags);
1623 }
1624 
dpu_encoder_trigger_kickoff_pending(struct drm_encoder * drm_enc)1625 void dpu_encoder_trigger_kickoff_pending(struct drm_encoder *drm_enc)
1626 {
1627 	struct dpu_encoder_virt *dpu_enc;
1628 	struct dpu_encoder_phys *phys;
1629 	unsigned int i;
1630 	struct dpu_hw_ctl *ctl;
1631 	struct msm_display_info *disp_info;
1632 
1633 	if (!drm_enc) {
1634 		DPU_ERROR("invalid encoder\n");
1635 		return;
1636 	}
1637 	dpu_enc = to_dpu_encoder_virt(drm_enc);
1638 	disp_info = &dpu_enc->disp_info;
1639 
1640 	for (i = 0; i < dpu_enc->num_phys_encs; i++) {
1641 		phys = dpu_enc->phys_encs[i];
1642 
1643 		ctl = phys->hw_ctl;
1644 		if (ctl->ops.clear_pending_flush)
1645 			ctl->ops.clear_pending_flush(ctl);
1646 
1647 		/* update only for command mode primary ctl */
1648 		if ((phys == dpu_enc->cur_master) &&
1649 		    disp_info->is_cmd_mode
1650 		    && ctl->ops.trigger_pending)
1651 			ctl->ops.trigger_pending(ctl);
1652 	}
1653 }
1654 
_dpu_encoder_calculate_linetime(struct dpu_encoder_virt * dpu_enc,struct drm_display_mode * mode)1655 static u32 _dpu_encoder_calculate_linetime(struct dpu_encoder_virt *dpu_enc,
1656 		struct drm_display_mode *mode)
1657 {
1658 	u64 pclk_rate;
1659 	u32 pclk_period;
1660 	u32 line_time;
1661 
1662 	/*
1663 	 * For linetime calculation, only operate on master encoder.
1664 	 */
1665 	if (!dpu_enc->cur_master)
1666 		return 0;
1667 
1668 	if (!dpu_enc->cur_master->ops.get_line_count) {
1669 		DPU_ERROR("get_line_count function not defined\n");
1670 		return 0;
1671 	}
1672 
1673 	pclk_rate = mode->clock; /* pixel clock in kHz */
1674 	if (pclk_rate == 0) {
1675 		DPU_ERROR("pclk is 0, cannot calculate line time\n");
1676 		return 0;
1677 	}
1678 
1679 	pclk_period = DIV_ROUND_UP_ULL(1000000000ull, pclk_rate);
1680 	if (pclk_period == 0) {
1681 		DPU_ERROR("pclk period is 0\n");
1682 		return 0;
1683 	}
1684 
1685 	/*
1686 	 * Line time calculation based on Pixel clock and HTOTAL.
1687 	 * Final unit is in ns.
1688 	 */
1689 	line_time = (pclk_period * mode->htotal) / 1000;
1690 	if (line_time == 0) {
1691 		DPU_ERROR("line time calculation is 0\n");
1692 		return 0;
1693 	}
1694 
1695 	DPU_DEBUG_ENC(dpu_enc,
1696 			"clk_rate=%lldkHz, clk_period=%d, linetime=%dns\n",
1697 			pclk_rate, pclk_period, line_time);
1698 
1699 	return line_time;
1700 }
1701 
dpu_encoder_vsync_time(struct drm_encoder * drm_enc,ktime_t * wakeup_time)1702 int dpu_encoder_vsync_time(struct drm_encoder *drm_enc, ktime_t *wakeup_time)
1703 {
1704 	struct drm_display_mode *mode;
1705 	struct dpu_encoder_virt *dpu_enc;
1706 	u32 cur_line;
1707 	u32 line_time;
1708 	u32 vtotal, time_to_vsync;
1709 	ktime_t cur_time;
1710 
1711 	dpu_enc = to_dpu_encoder_virt(drm_enc);
1712 
1713 	if (!drm_enc->crtc || !drm_enc->crtc->state) {
1714 		DPU_ERROR("crtc/crtc state object is NULL\n");
1715 		return -EINVAL;
1716 	}
1717 	mode = &drm_enc->crtc->state->adjusted_mode;
1718 
1719 	line_time = _dpu_encoder_calculate_linetime(dpu_enc, mode);
1720 	if (!line_time)
1721 		return -EINVAL;
1722 
1723 	cur_line = dpu_enc->cur_master->ops.get_line_count(dpu_enc->cur_master);
1724 
1725 	vtotal = mode->vtotal;
1726 	if (cur_line >= vtotal)
1727 		time_to_vsync = line_time * vtotal;
1728 	else
1729 		time_to_vsync = line_time * (vtotal - cur_line);
1730 
1731 	if (time_to_vsync == 0) {
1732 		DPU_ERROR("time to vsync should not be zero, vtotal=%d\n",
1733 				vtotal);
1734 		return -EINVAL;
1735 	}
1736 
1737 	cur_time = ktime_get();
1738 	*wakeup_time = ktime_add_ns(cur_time, time_to_vsync);
1739 
1740 	DPU_DEBUG_ENC(dpu_enc,
1741 			"cur_line=%u vtotal=%u time_to_vsync=%u, cur_time=%lld, wakeup_time=%lld\n",
1742 			cur_line, vtotal, time_to_vsync,
1743 			ktime_to_ms(cur_time),
1744 			ktime_to_ms(*wakeup_time));
1745 	return 0;
1746 }
1747 
dpu_encoder_vsync_event_handler(struct timer_list * t)1748 static void dpu_encoder_vsync_event_handler(struct timer_list *t)
1749 {
1750 	struct dpu_encoder_virt *dpu_enc = from_timer(dpu_enc, t,
1751 			vsync_event_timer);
1752 	struct drm_encoder *drm_enc = &dpu_enc->base;
1753 	struct msm_drm_private *priv;
1754 	struct msm_drm_thread *event_thread;
1755 
1756 	if (!drm_enc->dev || !drm_enc->crtc) {
1757 		DPU_ERROR("invalid parameters\n");
1758 		return;
1759 	}
1760 
1761 	priv = drm_enc->dev->dev_private;
1762 
1763 	if (drm_enc->crtc->index >= ARRAY_SIZE(priv->event_thread)) {
1764 		DPU_ERROR("invalid crtc index\n");
1765 		return;
1766 	}
1767 	event_thread = &priv->event_thread[drm_enc->crtc->index];
1768 	if (!event_thread) {
1769 		DPU_ERROR("event_thread not found for crtc:%d\n",
1770 				drm_enc->crtc->index);
1771 		return;
1772 	}
1773 
1774 	del_timer(&dpu_enc->vsync_event_timer);
1775 }
1776 
dpu_encoder_vsync_event_work_handler(struct kthread_work * work)1777 static void dpu_encoder_vsync_event_work_handler(struct kthread_work *work)
1778 {
1779 	struct dpu_encoder_virt *dpu_enc = container_of(work,
1780 			struct dpu_encoder_virt, vsync_event_work);
1781 	ktime_t wakeup_time;
1782 
1783 	if (dpu_encoder_vsync_time(&dpu_enc->base, &wakeup_time))
1784 		return;
1785 
1786 	trace_dpu_enc_vsync_event_work(DRMID(&dpu_enc->base), wakeup_time);
1787 	mod_timer(&dpu_enc->vsync_event_timer,
1788 			nsecs_to_jiffies(ktime_to_ns(wakeup_time)));
1789 }
1790 
1791 static u32
dpu_encoder_dsc_initial_line_calc(struct drm_dsc_config * dsc,u32 enc_ip_width)1792 dpu_encoder_dsc_initial_line_calc(struct drm_dsc_config *dsc,
1793 				  u32 enc_ip_width)
1794 {
1795 	int ssm_delay, total_pixels, soft_slice_per_enc;
1796 
1797 	soft_slice_per_enc = enc_ip_width / dsc->slice_width;
1798 
1799 	/*
1800 	 * minimum number of initial line pixels is a sum of:
1801 	 * 1. sub-stream multiplexer delay (83 groups for 8bpc,
1802 	 *    91 for 10 bpc) * 3
1803 	 * 2. for two soft slice cases, add extra sub-stream multiplexer * 3
1804 	 * 3. the initial xmit delay
1805 	 * 4. total pipeline delay through the "lock step" of encoder (47)
1806 	 * 5. 6 additional pixels as the output of the rate buffer is
1807 	 *    48 bits wide
1808 	 */
1809 	ssm_delay = ((dsc->bits_per_component < 10) ? 84 : 92);
1810 	total_pixels = ssm_delay * 3 + dsc->initial_xmit_delay + 47;
1811 	if (soft_slice_per_enc > 1)
1812 		total_pixels += (ssm_delay * 3);
1813 	return DIV_ROUND_UP(total_pixels, dsc->slice_width);
1814 }
1815 
dpu_encoder_dsc_pipe_cfg(struct dpu_hw_dsc * hw_dsc,struct dpu_hw_pingpong * hw_pp,struct drm_dsc_config * dsc,u32 common_mode,u32 initial_lines)1816 static void dpu_encoder_dsc_pipe_cfg(struct dpu_hw_dsc *hw_dsc,
1817 				     struct dpu_hw_pingpong *hw_pp,
1818 				     struct drm_dsc_config *dsc,
1819 				     u32 common_mode,
1820 				     u32 initial_lines)
1821 {
1822 	if (hw_dsc->ops.dsc_config)
1823 		hw_dsc->ops.dsc_config(hw_dsc, dsc, common_mode, initial_lines);
1824 
1825 	if (hw_dsc->ops.dsc_config_thresh)
1826 		hw_dsc->ops.dsc_config_thresh(hw_dsc, dsc);
1827 
1828 	if (hw_pp->ops.setup_dsc)
1829 		hw_pp->ops.setup_dsc(hw_pp);
1830 
1831 	if (hw_dsc->ops.dsc_bind_pingpong_blk)
1832 		hw_dsc->ops.dsc_bind_pingpong_blk(hw_dsc, true, hw_pp->idx);
1833 
1834 	if (hw_pp->ops.enable_dsc)
1835 		hw_pp->ops.enable_dsc(hw_pp);
1836 }
1837 
dpu_encoder_prep_dsc(struct dpu_encoder_virt * dpu_enc,struct drm_dsc_config * dsc)1838 static void dpu_encoder_prep_dsc(struct dpu_encoder_virt *dpu_enc,
1839 				 struct drm_dsc_config *dsc)
1840 {
1841 	/* coding only for 2LM, 2enc, 1 dsc config */
1842 	struct dpu_encoder_phys *enc_master = dpu_enc->cur_master;
1843 	struct dpu_hw_dsc *hw_dsc[MAX_CHANNELS_PER_ENC];
1844 	struct dpu_hw_pingpong *hw_pp[MAX_CHANNELS_PER_ENC];
1845 	int this_frame_slices;
1846 	int intf_ip_w, enc_ip_w;
1847 	int dsc_common_mode;
1848 	int pic_width;
1849 	u32 initial_lines;
1850 	int i;
1851 
1852 	for (i = 0; i < MAX_CHANNELS_PER_ENC; i++) {
1853 		hw_pp[i] = dpu_enc->hw_pp[i];
1854 		hw_dsc[i] = dpu_enc->hw_dsc[i];
1855 
1856 		if (!hw_pp[i] || !hw_dsc[i]) {
1857 			DPU_ERROR_ENC(dpu_enc, "invalid params for DSC\n");
1858 			return;
1859 		}
1860 	}
1861 
1862 	dsc_common_mode = 0;
1863 	pic_width = dsc->pic_width;
1864 
1865 	dsc_common_mode = DSC_MODE_MULTIPLEX | DSC_MODE_SPLIT_PANEL;
1866 	if (enc_master->intf_mode == INTF_MODE_VIDEO)
1867 		dsc_common_mode |= DSC_MODE_VIDEO;
1868 
1869 	this_frame_slices = pic_width / dsc->slice_width;
1870 	intf_ip_w = this_frame_slices * dsc->slice_width;
1871 
1872 	/*
1873 	 * dsc merge case: when using 2 encoders for the same stream,
1874 	 * no. of slices need to be same on both the encoders.
1875 	 */
1876 	enc_ip_w = intf_ip_w / 2;
1877 	initial_lines = dpu_encoder_dsc_initial_line_calc(dsc, enc_ip_w);
1878 
1879 	for (i = 0; i < MAX_CHANNELS_PER_ENC; i++)
1880 		dpu_encoder_dsc_pipe_cfg(hw_dsc[i], hw_pp[i], dsc, dsc_common_mode, initial_lines);
1881 }
1882 
dpu_encoder_prepare_for_kickoff(struct drm_encoder * drm_enc)1883 void dpu_encoder_prepare_for_kickoff(struct drm_encoder *drm_enc)
1884 {
1885 	struct dpu_encoder_virt *dpu_enc;
1886 	struct dpu_encoder_phys *phys;
1887 	bool needs_hw_reset = false;
1888 	unsigned int i;
1889 
1890 	dpu_enc = to_dpu_encoder_virt(drm_enc);
1891 
1892 	trace_dpu_enc_prepare_kickoff(DRMID(drm_enc));
1893 
1894 	/* prepare for next kickoff, may include waiting on previous kickoff */
1895 	DPU_ATRACE_BEGIN("enc_prepare_for_kickoff");
1896 	for (i = 0; i < dpu_enc->num_phys_encs; i++) {
1897 		phys = dpu_enc->phys_encs[i];
1898 		if (phys->ops.prepare_for_kickoff)
1899 			phys->ops.prepare_for_kickoff(phys);
1900 		if (phys->enable_state == DPU_ENC_ERR_NEEDS_HW_RESET)
1901 			needs_hw_reset = true;
1902 	}
1903 	DPU_ATRACE_END("enc_prepare_for_kickoff");
1904 
1905 	dpu_encoder_resource_control(drm_enc, DPU_ENC_RC_EVENT_KICKOFF);
1906 
1907 	/* if any phys needs reset, reset all phys, in-order */
1908 	if (needs_hw_reset) {
1909 		trace_dpu_enc_prepare_kickoff_reset(DRMID(drm_enc));
1910 		for (i = 0; i < dpu_enc->num_phys_encs; i++) {
1911 			dpu_encoder_helper_hw_reset(dpu_enc->phys_encs[i]);
1912 		}
1913 	}
1914 
1915 	if (dpu_enc->dsc)
1916 		dpu_encoder_prep_dsc(dpu_enc, dpu_enc->dsc);
1917 }
1918 
dpu_encoder_is_valid_for_commit(struct drm_encoder * drm_enc)1919 bool dpu_encoder_is_valid_for_commit(struct drm_encoder *drm_enc)
1920 {
1921 	struct dpu_encoder_virt *dpu_enc;
1922 	unsigned int i;
1923 	struct dpu_encoder_phys *phys;
1924 
1925 	dpu_enc = to_dpu_encoder_virt(drm_enc);
1926 
1927 	if (drm_enc->encoder_type == DRM_MODE_ENCODER_VIRTUAL) {
1928 		for (i = 0; i < dpu_enc->num_phys_encs; i++) {
1929 			phys = dpu_enc->phys_encs[i];
1930 			if (phys->ops.is_valid_for_commit && !phys->ops.is_valid_for_commit(phys)) {
1931 				DPU_DEBUG("invalid FB not kicking off\n");
1932 				return false;
1933 			}
1934 		}
1935 	}
1936 
1937 	return true;
1938 }
1939 
dpu_encoder_kickoff(struct drm_encoder * drm_enc)1940 void dpu_encoder_kickoff(struct drm_encoder *drm_enc)
1941 {
1942 	struct dpu_encoder_virt *dpu_enc;
1943 	struct dpu_encoder_phys *phys;
1944 	ktime_t wakeup_time;
1945 	unsigned long timeout_ms;
1946 	unsigned int i;
1947 
1948 	DPU_ATRACE_BEGIN("encoder_kickoff");
1949 	dpu_enc = to_dpu_encoder_virt(drm_enc);
1950 
1951 	trace_dpu_enc_kickoff(DRMID(drm_enc));
1952 
1953 	timeout_ms = DPU_ENCODER_FRAME_DONE_TIMEOUT_FRAMES * 1000 /
1954 			drm_mode_vrefresh(&drm_enc->crtc->state->adjusted_mode);
1955 
1956 	atomic_set(&dpu_enc->frame_done_timeout_ms, timeout_ms);
1957 	mod_timer(&dpu_enc->frame_done_timer,
1958 			jiffies + msecs_to_jiffies(timeout_ms));
1959 
1960 	/* All phys encs are ready to go, trigger the kickoff */
1961 	_dpu_encoder_kickoff_phys(dpu_enc);
1962 
1963 	/* allow phys encs to handle any post-kickoff business */
1964 	for (i = 0; i < dpu_enc->num_phys_encs; i++) {
1965 		phys = dpu_enc->phys_encs[i];
1966 		if (phys->ops.handle_post_kickoff)
1967 			phys->ops.handle_post_kickoff(phys);
1968 	}
1969 
1970 	if (dpu_enc->disp_info.intf_type == DRM_MODE_ENCODER_DSI &&
1971 			!dpu_encoder_vsync_time(drm_enc, &wakeup_time)) {
1972 		trace_dpu_enc_early_kickoff(DRMID(drm_enc),
1973 					    ktime_to_ms(wakeup_time));
1974 		mod_timer(&dpu_enc->vsync_event_timer,
1975 				nsecs_to_jiffies(ktime_to_ns(wakeup_time)));
1976 	}
1977 
1978 	DPU_ATRACE_END("encoder_kickoff");
1979 }
1980 
dpu_encoder_helper_reset_mixers(struct dpu_encoder_phys * phys_enc)1981 static void dpu_encoder_helper_reset_mixers(struct dpu_encoder_phys *phys_enc)
1982 {
1983 	struct dpu_hw_mixer_cfg mixer;
1984 	int i, num_lm;
1985 	struct dpu_global_state *global_state;
1986 	struct dpu_hw_blk *hw_lm[2];
1987 	struct dpu_hw_mixer *hw_mixer[2];
1988 	struct dpu_hw_ctl *ctl = phys_enc->hw_ctl;
1989 
1990 	memset(&mixer, 0, sizeof(mixer));
1991 
1992 	/* reset all mixers for this encoder */
1993 	if (phys_enc->hw_ctl->ops.clear_all_blendstages)
1994 		phys_enc->hw_ctl->ops.clear_all_blendstages(phys_enc->hw_ctl);
1995 
1996 	global_state = dpu_kms_get_existing_global_state(phys_enc->dpu_kms);
1997 
1998 	num_lm = dpu_rm_get_assigned_resources(&phys_enc->dpu_kms->rm, global_state,
1999 		phys_enc->parent->base.id, DPU_HW_BLK_LM, hw_lm, ARRAY_SIZE(hw_lm));
2000 
2001 	for (i = 0; i < num_lm; i++) {
2002 		hw_mixer[i] = to_dpu_hw_mixer(hw_lm[i]);
2003 		if (phys_enc->hw_ctl->ops.update_pending_flush_mixer)
2004 			phys_enc->hw_ctl->ops.update_pending_flush_mixer(ctl, hw_mixer[i]->idx);
2005 
2006 		/* clear all blendstages */
2007 		if (phys_enc->hw_ctl->ops.setup_blendstage)
2008 			phys_enc->hw_ctl->ops.setup_blendstage(ctl, hw_mixer[i]->idx, NULL);
2009 	}
2010 }
2011 
dpu_encoder_helper_phys_cleanup(struct dpu_encoder_phys * phys_enc)2012 void dpu_encoder_helper_phys_cleanup(struct dpu_encoder_phys *phys_enc)
2013 {
2014 	struct dpu_hw_ctl *ctl = phys_enc->hw_ctl;
2015 	struct dpu_hw_intf_cfg intf_cfg = { 0 };
2016 	int i;
2017 	struct dpu_encoder_virt *dpu_enc;
2018 
2019 	dpu_enc = to_dpu_encoder_virt(phys_enc->parent);
2020 
2021 	phys_enc->hw_ctl->ops.reset(ctl);
2022 
2023 	dpu_encoder_helper_reset_mixers(phys_enc);
2024 
2025 	/*
2026 	 * TODO: move the once-only operation like CTL flush/trigger
2027 	 * into dpu_encoder_virt_disable() and all operations which need
2028 	 * to be done per phys encoder into the phys_disable() op.
2029 	 */
2030 	if (phys_enc->hw_wb) {
2031 		/* disable the PP block */
2032 		if (phys_enc->hw_wb->ops.bind_pingpong_blk)
2033 			phys_enc->hw_wb->ops.bind_pingpong_blk(phys_enc->hw_wb, false,
2034 					phys_enc->hw_pp->idx);
2035 
2036 		/* mark WB flush as pending */
2037 		if (phys_enc->hw_ctl->ops.update_pending_flush_wb)
2038 			phys_enc->hw_ctl->ops.update_pending_flush_wb(ctl, phys_enc->hw_wb->idx);
2039 	} else {
2040 		for (i = 0; i < dpu_enc->num_phys_encs; i++) {
2041 			if (dpu_enc->phys_encs[i] && phys_enc->hw_intf->ops.bind_pingpong_blk)
2042 				phys_enc->hw_intf->ops.bind_pingpong_blk(
2043 						dpu_enc->phys_encs[i]->hw_intf, false,
2044 						dpu_enc->phys_encs[i]->hw_pp->idx);
2045 
2046 			/* mark INTF flush as pending */
2047 			if (phys_enc->hw_ctl->ops.update_pending_flush_intf)
2048 				phys_enc->hw_ctl->ops.update_pending_flush_intf(phys_enc->hw_ctl,
2049 						dpu_enc->phys_encs[i]->hw_intf->idx);
2050 		}
2051 	}
2052 
2053 	/* reset the merge 3D HW block */
2054 	if (phys_enc->hw_pp->merge_3d) {
2055 		phys_enc->hw_pp->merge_3d->ops.setup_3d_mode(phys_enc->hw_pp->merge_3d,
2056 				BLEND_3D_NONE);
2057 		if (phys_enc->hw_ctl->ops.update_pending_flush_merge_3d)
2058 			phys_enc->hw_ctl->ops.update_pending_flush_merge_3d(ctl,
2059 					phys_enc->hw_pp->merge_3d->idx);
2060 	}
2061 
2062 	intf_cfg.stream_sel = 0; /* Don't care value for video mode */
2063 	intf_cfg.mode_3d = dpu_encoder_helper_get_3d_blend_mode(phys_enc);
2064 
2065 	if (phys_enc->hw_intf)
2066 		intf_cfg.intf = phys_enc->hw_intf->idx;
2067 	if (phys_enc->hw_wb)
2068 		intf_cfg.wb = phys_enc->hw_wb->idx;
2069 
2070 	if (phys_enc->hw_pp->merge_3d)
2071 		intf_cfg.merge_3d = phys_enc->hw_pp->merge_3d->idx;
2072 
2073 	if (ctl->ops.reset_intf_cfg)
2074 		ctl->ops.reset_intf_cfg(ctl, &intf_cfg);
2075 
2076 	ctl->ops.trigger_flush(ctl);
2077 	ctl->ops.trigger_start(ctl);
2078 	ctl->ops.clear_pending_flush(ctl);
2079 }
2080 
dpu_encoder_prepare_commit(struct drm_encoder * drm_enc)2081 void dpu_encoder_prepare_commit(struct drm_encoder *drm_enc)
2082 {
2083 	struct dpu_encoder_virt *dpu_enc;
2084 	struct dpu_encoder_phys *phys;
2085 	int i;
2086 
2087 	if (!drm_enc) {
2088 		DPU_ERROR("invalid encoder\n");
2089 		return;
2090 	}
2091 	dpu_enc = to_dpu_encoder_virt(drm_enc);
2092 
2093 	for (i = 0; i < dpu_enc->num_phys_encs; i++) {
2094 		phys = dpu_enc->phys_encs[i];
2095 		if (phys->ops.prepare_commit)
2096 			phys->ops.prepare_commit(phys);
2097 	}
2098 }
2099 
2100 #ifdef CONFIG_DEBUG_FS
_dpu_encoder_status_show(struct seq_file * s,void * data)2101 static int _dpu_encoder_status_show(struct seq_file *s, void *data)
2102 {
2103 	struct dpu_encoder_virt *dpu_enc = s->private;
2104 	int i;
2105 
2106 	mutex_lock(&dpu_enc->enc_lock);
2107 	for (i = 0; i < dpu_enc->num_phys_encs; i++) {
2108 		struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
2109 
2110 		seq_printf(s, "intf:%d  wb:%d  vsync:%8d     underrun:%8d    ",
2111 				phys->intf_idx - INTF_0, phys->wb_idx - WB_0,
2112 				atomic_read(&phys->vsync_cnt),
2113 				atomic_read(&phys->underrun_cnt));
2114 
2115 		seq_printf(s, "mode: %s\n", dpu_encoder_helper_get_intf_type(phys->intf_mode));
2116 	}
2117 	mutex_unlock(&dpu_enc->enc_lock);
2118 
2119 	return 0;
2120 }
2121 
2122 DEFINE_SHOW_ATTRIBUTE(_dpu_encoder_status);
2123 
_dpu_encoder_init_debugfs(struct drm_encoder * drm_enc)2124 static int _dpu_encoder_init_debugfs(struct drm_encoder *drm_enc)
2125 {
2126 	struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
2127 	int i;
2128 
2129 	char name[DPU_NAME_SIZE];
2130 
2131 	if (!drm_enc->dev) {
2132 		DPU_ERROR("invalid encoder or kms\n");
2133 		return -EINVAL;
2134 	}
2135 
2136 	snprintf(name, DPU_NAME_SIZE, "encoder%u", drm_enc->base.id);
2137 
2138 	/* create overall sub-directory for the encoder */
2139 	dpu_enc->debugfs_root = debugfs_create_dir(name,
2140 			drm_enc->dev->primary->debugfs_root);
2141 
2142 	/* don't error check these */
2143 	debugfs_create_file("status", 0600,
2144 		dpu_enc->debugfs_root, dpu_enc, &_dpu_encoder_status_fops);
2145 
2146 	for (i = 0; i < dpu_enc->num_phys_encs; i++)
2147 		if (dpu_enc->phys_encs[i]->ops.late_register)
2148 			dpu_enc->phys_encs[i]->ops.late_register(
2149 					dpu_enc->phys_encs[i],
2150 					dpu_enc->debugfs_root);
2151 
2152 	return 0;
2153 }
2154 #else
_dpu_encoder_init_debugfs(struct drm_encoder * drm_enc)2155 static int _dpu_encoder_init_debugfs(struct drm_encoder *drm_enc)
2156 {
2157 	return 0;
2158 }
2159 #endif
2160 
dpu_encoder_late_register(struct drm_encoder * encoder)2161 static int dpu_encoder_late_register(struct drm_encoder *encoder)
2162 {
2163 	return _dpu_encoder_init_debugfs(encoder);
2164 }
2165 
dpu_encoder_early_unregister(struct drm_encoder * encoder)2166 static void dpu_encoder_early_unregister(struct drm_encoder *encoder)
2167 {
2168 	struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(encoder);
2169 
2170 	debugfs_remove_recursive(dpu_enc->debugfs_root);
2171 }
2172 
dpu_encoder_virt_add_phys_encs(struct msm_display_info * disp_info,struct dpu_encoder_virt * dpu_enc,struct dpu_enc_phys_init_params * params)2173 static int dpu_encoder_virt_add_phys_encs(
2174 		struct msm_display_info *disp_info,
2175 		struct dpu_encoder_virt *dpu_enc,
2176 		struct dpu_enc_phys_init_params *params)
2177 {
2178 	struct dpu_encoder_phys *enc = NULL;
2179 
2180 	DPU_DEBUG_ENC(dpu_enc, "\n");
2181 
2182 	/*
2183 	 * We may create up to NUM_PHYS_ENCODER_TYPES physical encoder types
2184 	 * in this function, check up-front.
2185 	 */
2186 	if (dpu_enc->num_phys_encs + NUM_PHYS_ENCODER_TYPES >=
2187 			ARRAY_SIZE(dpu_enc->phys_encs)) {
2188 		DPU_ERROR_ENC(dpu_enc, "too many physical encoders %d\n",
2189 			  dpu_enc->num_phys_encs);
2190 		return -EINVAL;
2191 	}
2192 
2193 
2194 	if (disp_info->intf_type == DRM_MODE_ENCODER_VIRTUAL) {
2195 		enc = dpu_encoder_phys_wb_init(params);
2196 
2197 		if (IS_ERR(enc)) {
2198 			DPU_ERROR_ENC(dpu_enc, "failed to init wb enc: %ld\n",
2199 				PTR_ERR(enc));
2200 			return PTR_ERR(enc);
2201 		}
2202 
2203 		dpu_enc->phys_encs[dpu_enc->num_phys_encs] = enc;
2204 		++dpu_enc->num_phys_encs;
2205 	} else if (disp_info->is_cmd_mode) {
2206 		enc = dpu_encoder_phys_cmd_init(params);
2207 
2208 		if (IS_ERR(enc)) {
2209 			DPU_ERROR_ENC(dpu_enc, "failed to init cmd enc: %ld\n",
2210 				PTR_ERR(enc));
2211 			return PTR_ERR(enc);
2212 		}
2213 
2214 		dpu_enc->phys_encs[dpu_enc->num_phys_encs] = enc;
2215 		++dpu_enc->num_phys_encs;
2216 	} else {
2217 		enc = dpu_encoder_phys_vid_init(params);
2218 
2219 		if (IS_ERR(enc)) {
2220 			DPU_ERROR_ENC(dpu_enc, "failed to init vid enc: %ld\n",
2221 				PTR_ERR(enc));
2222 			return PTR_ERR(enc);
2223 		}
2224 
2225 		dpu_enc->phys_encs[dpu_enc->num_phys_encs] = enc;
2226 		++dpu_enc->num_phys_encs;
2227 	}
2228 
2229 	if (params->split_role == ENC_ROLE_SLAVE)
2230 		dpu_enc->cur_slave = enc;
2231 	else
2232 		dpu_enc->cur_master = enc;
2233 
2234 	return 0;
2235 }
2236 
dpu_encoder_setup_display(struct dpu_encoder_virt * dpu_enc,struct dpu_kms * dpu_kms,struct msm_display_info * disp_info)2237 static int dpu_encoder_setup_display(struct dpu_encoder_virt *dpu_enc,
2238 				 struct dpu_kms *dpu_kms,
2239 				 struct msm_display_info *disp_info)
2240 {
2241 	int ret = 0;
2242 	int i = 0;
2243 	enum dpu_intf_type intf_type = INTF_NONE;
2244 	struct dpu_enc_phys_init_params phys_params;
2245 
2246 	if (!dpu_enc) {
2247 		DPU_ERROR("invalid arg(s), enc %d\n", dpu_enc != NULL);
2248 		return -EINVAL;
2249 	}
2250 
2251 	dpu_enc->cur_master = NULL;
2252 
2253 	memset(&phys_params, 0, sizeof(phys_params));
2254 	phys_params.dpu_kms = dpu_kms;
2255 	phys_params.parent = &dpu_enc->base;
2256 	phys_params.enc_spinlock = &dpu_enc->enc_spinlock;
2257 
2258 	switch (disp_info->intf_type) {
2259 	case DRM_MODE_ENCODER_DSI:
2260 		intf_type = INTF_DSI;
2261 		break;
2262 	case DRM_MODE_ENCODER_TMDS:
2263 		intf_type = INTF_DP;
2264 		break;
2265 	case DRM_MODE_ENCODER_VIRTUAL:
2266 		intf_type = INTF_WB;
2267 		break;
2268 	}
2269 
2270 	WARN_ON(disp_info->num_of_h_tiles < 1);
2271 
2272 	DPU_DEBUG("dsi_info->num_of_h_tiles %d\n", disp_info->num_of_h_tiles);
2273 
2274 	if (disp_info->intf_type != DRM_MODE_ENCODER_VIRTUAL)
2275 		dpu_enc->idle_pc_supported =
2276 				dpu_kms->catalog->caps->has_idle_pc;
2277 
2278 	dpu_enc->dsc = disp_info->dsc;
2279 
2280 	mutex_lock(&dpu_enc->enc_lock);
2281 	for (i = 0; i < disp_info->num_of_h_tiles && !ret; i++) {
2282 		/*
2283 		 * Left-most tile is at index 0, content is controller id
2284 		 * h_tile_instance_ids[2] = {0, 1}; DSI0 = left, DSI1 = right
2285 		 * h_tile_instance_ids[2] = {1, 0}; DSI1 = left, DSI0 = right
2286 		 */
2287 		u32 controller_id = disp_info->h_tile_instance[i];
2288 
2289 		if (disp_info->num_of_h_tiles > 1) {
2290 			if (i == 0)
2291 				phys_params.split_role = ENC_ROLE_MASTER;
2292 			else
2293 				phys_params.split_role = ENC_ROLE_SLAVE;
2294 		} else {
2295 			phys_params.split_role = ENC_ROLE_SOLO;
2296 		}
2297 
2298 		DPU_DEBUG("h_tile_instance %d = %d, split_role %d\n",
2299 				i, controller_id, phys_params.split_role);
2300 
2301 		phys_params.intf_idx = dpu_encoder_get_intf(dpu_kms->catalog,
2302 													intf_type,
2303 													controller_id);
2304 
2305 		phys_params.wb_idx = dpu_encoder_get_wb(dpu_kms->catalog,
2306 				intf_type, controller_id);
2307 		/*
2308 		 * The phys_params might represent either an INTF or a WB unit, but not
2309 		 * both of them at the same time.
2310 		 */
2311 		if ((phys_params.intf_idx == INTF_MAX) &&
2312 				(phys_params.wb_idx == WB_MAX)) {
2313 			DPU_ERROR_ENC(dpu_enc, "could not get intf or wb: type %d, id %d\n",
2314 						  intf_type, controller_id);
2315 			ret = -EINVAL;
2316 		}
2317 
2318 		if ((phys_params.intf_idx != INTF_MAX) &&
2319 				(phys_params.wb_idx != WB_MAX)) {
2320 			DPU_ERROR_ENC(dpu_enc, "both intf and wb present: type %d, id %d\n",
2321 						  intf_type, controller_id);
2322 			ret = -EINVAL;
2323 		}
2324 
2325 		if (!ret) {
2326 			ret = dpu_encoder_virt_add_phys_encs(disp_info,
2327 					dpu_enc, &phys_params);
2328 			if (ret)
2329 				DPU_ERROR_ENC(dpu_enc, "failed to add phys encs\n");
2330 		}
2331 	}
2332 
2333 	for (i = 0; i < dpu_enc->num_phys_encs; i++) {
2334 		struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
2335 		atomic_set(&phys->vsync_cnt, 0);
2336 		atomic_set(&phys->underrun_cnt, 0);
2337 
2338 		if (phys->intf_idx >= INTF_0 && phys->intf_idx < INTF_MAX)
2339 			phys->hw_intf = dpu_rm_get_intf(&dpu_kms->rm, phys->intf_idx);
2340 
2341 		if (phys->wb_idx >= WB_0 && phys->wb_idx < WB_MAX)
2342 			phys->hw_wb = dpu_rm_get_wb(&dpu_kms->rm, phys->wb_idx);
2343 
2344 		if (!phys->hw_intf && !phys->hw_wb) {
2345 			DPU_ERROR_ENC(dpu_enc, "no intf or wb block assigned at idx: %d\n", i);
2346 			ret = -EINVAL;
2347 		}
2348 
2349 		if (phys->hw_intf && phys->hw_wb) {
2350 			DPU_ERROR_ENC(dpu_enc,
2351 					"invalid phys both intf and wb block at idx: %d\n", i);
2352 			ret = -EINVAL;
2353 		}
2354 	}
2355 
2356 	mutex_unlock(&dpu_enc->enc_lock);
2357 
2358 	return ret;
2359 }
2360 
dpu_encoder_frame_done_timeout(struct timer_list * t)2361 static void dpu_encoder_frame_done_timeout(struct timer_list *t)
2362 {
2363 	struct dpu_encoder_virt *dpu_enc = from_timer(dpu_enc, t,
2364 			frame_done_timer);
2365 	struct drm_encoder *drm_enc = &dpu_enc->base;
2366 	u32 event;
2367 
2368 	if (!drm_enc->dev) {
2369 		DPU_ERROR("invalid parameters\n");
2370 		return;
2371 	}
2372 
2373 	if (!dpu_enc->frame_busy_mask[0] || !dpu_enc->crtc_frame_event_cb) {
2374 		DRM_DEBUG_KMS("id:%u invalid timeout frame_busy_mask=%lu\n",
2375 			      DRMID(drm_enc), dpu_enc->frame_busy_mask[0]);
2376 		return;
2377 	} else if (!atomic_xchg(&dpu_enc->frame_done_timeout_ms, 0)) {
2378 		DRM_DEBUG_KMS("id:%u invalid timeout\n", DRMID(drm_enc));
2379 		return;
2380 	}
2381 
2382 	DPU_ERROR_ENC(dpu_enc, "frame done timeout\n");
2383 
2384 	event = DPU_ENCODER_FRAME_EVENT_ERROR;
2385 	trace_dpu_enc_frame_done_timeout(DRMID(drm_enc), event);
2386 	dpu_enc->crtc_frame_event_cb(dpu_enc->crtc_frame_event_cb_data, event);
2387 }
2388 
2389 static const struct drm_encoder_helper_funcs dpu_encoder_helper_funcs = {
2390 	.atomic_mode_set = dpu_encoder_virt_atomic_mode_set,
2391 	.disable = dpu_encoder_virt_disable,
2392 	.enable = dpu_encoder_virt_enable,
2393 	.atomic_check = dpu_encoder_virt_atomic_check,
2394 };
2395 
2396 static const struct drm_encoder_funcs dpu_encoder_funcs = {
2397 		.destroy = dpu_encoder_destroy,
2398 		.late_register = dpu_encoder_late_register,
2399 		.early_unregister = dpu_encoder_early_unregister,
2400 };
2401 
dpu_encoder_setup(struct drm_device * dev,struct drm_encoder * enc,struct msm_display_info * disp_info)2402 int dpu_encoder_setup(struct drm_device *dev, struct drm_encoder *enc,
2403 		struct msm_display_info *disp_info)
2404 {
2405 	struct msm_drm_private *priv = dev->dev_private;
2406 	struct dpu_kms *dpu_kms = to_dpu_kms(priv->kms);
2407 	struct drm_encoder *drm_enc = NULL;
2408 	struct dpu_encoder_virt *dpu_enc = NULL;
2409 	int ret = 0;
2410 
2411 	dpu_enc = to_dpu_encoder_virt(enc);
2412 
2413 	ret = dpu_encoder_setup_display(dpu_enc, dpu_kms, disp_info);
2414 	if (ret)
2415 		goto fail;
2416 
2417 	atomic_set(&dpu_enc->frame_done_timeout_ms, 0);
2418 	timer_setup(&dpu_enc->frame_done_timer,
2419 			dpu_encoder_frame_done_timeout, 0);
2420 
2421 	if (disp_info->intf_type == DRM_MODE_ENCODER_DSI)
2422 		timer_setup(&dpu_enc->vsync_event_timer,
2423 				dpu_encoder_vsync_event_handler,
2424 				0);
2425 	else if (disp_info->intf_type == DRM_MODE_ENCODER_TMDS)
2426 		dpu_enc->wide_bus_en = msm_dp_wide_bus_available(
2427 				priv->dp[disp_info->h_tile_instance[0]]);
2428 
2429 	INIT_DELAYED_WORK(&dpu_enc->delayed_off_work,
2430 			dpu_encoder_off_work);
2431 	dpu_enc->idle_timeout = IDLE_TIMEOUT;
2432 
2433 	kthread_init_work(&dpu_enc->vsync_event_work,
2434 			dpu_encoder_vsync_event_work_handler);
2435 
2436 	memcpy(&dpu_enc->disp_info, disp_info, sizeof(*disp_info));
2437 
2438 	DPU_DEBUG_ENC(dpu_enc, "created\n");
2439 
2440 	return ret;
2441 
2442 fail:
2443 	DPU_ERROR("failed to create encoder\n");
2444 	if (drm_enc)
2445 		dpu_encoder_destroy(drm_enc);
2446 
2447 	return ret;
2448 
2449 
2450 }
2451 
dpu_encoder_init(struct drm_device * dev,int drm_enc_mode)2452 struct drm_encoder *dpu_encoder_init(struct drm_device *dev,
2453 		int drm_enc_mode)
2454 {
2455 	struct dpu_encoder_virt *dpu_enc = NULL;
2456 	int rc = 0;
2457 
2458 	dpu_enc = devm_kzalloc(dev->dev, sizeof(*dpu_enc), GFP_KERNEL);
2459 	if (!dpu_enc)
2460 		return ERR_PTR(-ENOMEM);
2461 
2462 
2463 	rc = drm_encoder_init(dev, &dpu_enc->base, &dpu_encoder_funcs,
2464 							  drm_enc_mode, NULL);
2465 	if (rc) {
2466 		devm_kfree(dev->dev, dpu_enc);
2467 		return ERR_PTR(rc);
2468 	}
2469 
2470 	drm_encoder_helper_add(&dpu_enc->base, &dpu_encoder_helper_funcs);
2471 
2472 	spin_lock_init(&dpu_enc->enc_spinlock);
2473 	dpu_enc->enabled = false;
2474 	mutex_init(&dpu_enc->enc_lock);
2475 	mutex_init(&dpu_enc->rc_lock);
2476 
2477 	return &dpu_enc->base;
2478 }
2479 
dpu_encoder_wait_for_event(struct drm_encoder * drm_enc,enum msm_event_wait event)2480 int dpu_encoder_wait_for_event(struct drm_encoder *drm_enc,
2481 	enum msm_event_wait event)
2482 {
2483 	int (*fn_wait)(struct dpu_encoder_phys *phys_enc) = NULL;
2484 	struct dpu_encoder_virt *dpu_enc = NULL;
2485 	int i, ret = 0;
2486 
2487 	if (!drm_enc) {
2488 		DPU_ERROR("invalid encoder\n");
2489 		return -EINVAL;
2490 	}
2491 	dpu_enc = to_dpu_encoder_virt(drm_enc);
2492 	DPU_DEBUG_ENC(dpu_enc, "\n");
2493 
2494 	for (i = 0; i < dpu_enc->num_phys_encs; i++) {
2495 		struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
2496 
2497 		switch (event) {
2498 		case MSM_ENC_COMMIT_DONE:
2499 			fn_wait = phys->ops.wait_for_commit_done;
2500 			break;
2501 		case MSM_ENC_TX_COMPLETE:
2502 			fn_wait = phys->ops.wait_for_tx_complete;
2503 			break;
2504 		case MSM_ENC_VBLANK:
2505 			fn_wait = phys->ops.wait_for_vblank;
2506 			break;
2507 		default:
2508 			DPU_ERROR_ENC(dpu_enc, "unknown wait event %d\n",
2509 					event);
2510 			return -EINVAL;
2511 		}
2512 
2513 		if (fn_wait) {
2514 			DPU_ATRACE_BEGIN("wait_for_completion_event");
2515 			ret = fn_wait(phys);
2516 			DPU_ATRACE_END("wait_for_completion_event");
2517 			if (ret)
2518 				return ret;
2519 		}
2520 	}
2521 
2522 	return ret;
2523 }
2524 
dpu_encoder_get_intf_mode(struct drm_encoder * encoder)2525 enum dpu_intf_mode dpu_encoder_get_intf_mode(struct drm_encoder *encoder)
2526 {
2527 	struct dpu_encoder_virt *dpu_enc = NULL;
2528 
2529 	if (!encoder) {
2530 		DPU_ERROR("invalid encoder\n");
2531 		return INTF_MODE_NONE;
2532 	}
2533 	dpu_enc = to_dpu_encoder_virt(encoder);
2534 
2535 	if (dpu_enc->cur_master)
2536 		return dpu_enc->cur_master->intf_mode;
2537 
2538 	if (dpu_enc->num_phys_encs)
2539 		return dpu_enc->phys_encs[0]->intf_mode;
2540 
2541 	return INTF_MODE_NONE;
2542 }
2543 
dpu_encoder_helper_get_dsc(struct dpu_encoder_phys * phys_enc)2544 unsigned int dpu_encoder_helper_get_dsc(struct dpu_encoder_phys *phys_enc)
2545 {
2546 	struct drm_encoder *encoder = phys_enc->parent;
2547 	struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(encoder);
2548 
2549 	return dpu_enc->dsc_mask;
2550 }
2551