1 // SPDX-License-Identifier: MIT
2 /*
3 * Copyright © 2020 Intel Corporation
4 */
5
6 #include "g4x_dp.h"
7 #include "i915_drv.h"
8 #include "i915_reg.h"
9 #include "intel_de.h"
10 #include "intel_display_power_well.h"
11 #include "intel_display_types.h"
12 #include "intel_dp.h"
13 #include "intel_dpio_phy.h"
14 #include "intel_dpll.h"
15 #include "intel_lvds.h"
16 #include "intel_pps.h"
17 #include "intel_quirks.h"
18
19 static void vlv_steal_power_sequencer(struct drm_i915_private *dev_priv,
20 enum pipe pipe);
21
22 static void pps_init_delays(struct intel_dp *intel_dp);
23 static void pps_init_registers(struct intel_dp *intel_dp, bool force_disable_vdd);
24
pps_name(struct drm_i915_private * i915,struct intel_pps * pps)25 static const char *pps_name(struct drm_i915_private *i915,
26 struct intel_pps *pps)
27 {
28 if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) {
29 switch (pps->pps_pipe) {
30 case INVALID_PIPE:
31 /*
32 * FIXME would be nice if we can guarantee
33 * to always have a valid PPS when calling this.
34 */
35 return "PPS <none>";
36 case PIPE_A:
37 return "PPS A";
38 case PIPE_B:
39 return "PPS B";
40 default:
41 MISSING_CASE(pps->pps_pipe);
42 break;
43 }
44 } else {
45 switch (pps->pps_idx) {
46 case 0:
47 return "PPS 0";
48 case 1:
49 return "PPS 1";
50 default:
51 MISSING_CASE(pps->pps_idx);
52 break;
53 }
54 }
55
56 return "PPS <invalid>";
57 }
58
intel_pps_lock(struct intel_dp * intel_dp)59 intel_wakeref_t intel_pps_lock(struct intel_dp *intel_dp)
60 {
61 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
62 intel_wakeref_t wakeref;
63
64 /*
65 * See intel_pps_reset_all() why we need a power domain reference here.
66 */
67 wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_DISPLAY_CORE);
68 mutex_lock(&dev_priv->display.pps.mutex);
69
70 return wakeref;
71 }
72
intel_pps_unlock(struct intel_dp * intel_dp,intel_wakeref_t wakeref)73 intel_wakeref_t intel_pps_unlock(struct intel_dp *intel_dp,
74 intel_wakeref_t wakeref)
75 {
76 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
77
78 mutex_unlock(&dev_priv->display.pps.mutex);
79 intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
80
81 return 0;
82 }
83
84 static void
vlv_power_sequencer_kick(struct intel_dp * intel_dp)85 vlv_power_sequencer_kick(struct intel_dp *intel_dp)
86 {
87 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
88 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
89 enum pipe pipe = intel_dp->pps.pps_pipe;
90 bool pll_enabled, release_cl_override = false;
91 enum dpio_phy phy = DPIO_PHY(pipe);
92 enum dpio_channel ch = vlv_pipe_to_channel(pipe);
93 u32 DP;
94
95 if (drm_WARN(&dev_priv->drm,
96 intel_de_read(dev_priv, intel_dp->output_reg) & DP_PORT_EN,
97 "skipping %s kick due to [ENCODER:%d:%s] being active\n",
98 pps_name(dev_priv, &intel_dp->pps),
99 dig_port->base.base.base.id, dig_port->base.base.name))
100 return;
101
102 drm_dbg_kms(&dev_priv->drm,
103 "kicking %s for [ENCODER:%d:%s]\n",
104 pps_name(dev_priv, &intel_dp->pps),
105 dig_port->base.base.base.id, dig_port->base.base.name);
106
107 /* Preserve the BIOS-computed detected bit. This is
108 * supposed to be read-only.
109 */
110 DP = intel_de_read(dev_priv, intel_dp->output_reg) & DP_DETECTED;
111 DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
112 DP |= DP_PORT_WIDTH(1);
113 DP |= DP_LINK_TRAIN_PAT_1;
114
115 if (IS_CHERRYVIEW(dev_priv))
116 DP |= DP_PIPE_SEL_CHV(pipe);
117 else
118 DP |= DP_PIPE_SEL(pipe);
119
120 pll_enabled = intel_de_read(dev_priv, DPLL(pipe)) & DPLL_VCO_ENABLE;
121
122 /*
123 * The DPLL for the pipe must be enabled for this to work.
124 * So enable temporarily it if it's not already enabled.
125 */
126 if (!pll_enabled) {
127 release_cl_override = IS_CHERRYVIEW(dev_priv) &&
128 !chv_phy_powergate_ch(dev_priv, phy, ch, true);
129
130 if (vlv_force_pll_on(dev_priv, pipe, vlv_get_dpll(dev_priv))) {
131 drm_err(&dev_priv->drm,
132 "Failed to force on PLL for pipe %c!\n",
133 pipe_name(pipe));
134 return;
135 }
136 }
137
138 /*
139 * Similar magic as in intel_dp_enable_port().
140 * We _must_ do this port enable + disable trick
141 * to make this power sequencer lock onto the port.
142 * Otherwise even VDD force bit won't work.
143 */
144 intel_de_write(dev_priv, intel_dp->output_reg, DP);
145 intel_de_posting_read(dev_priv, intel_dp->output_reg);
146
147 intel_de_write(dev_priv, intel_dp->output_reg, DP | DP_PORT_EN);
148 intel_de_posting_read(dev_priv, intel_dp->output_reg);
149
150 intel_de_write(dev_priv, intel_dp->output_reg, DP & ~DP_PORT_EN);
151 intel_de_posting_read(dev_priv, intel_dp->output_reg);
152
153 if (!pll_enabled) {
154 vlv_force_pll_off(dev_priv, pipe);
155
156 if (release_cl_override)
157 chv_phy_powergate_ch(dev_priv, phy, ch, false);
158 }
159 }
160
vlv_find_free_pps(struct drm_i915_private * dev_priv)161 static enum pipe vlv_find_free_pps(struct drm_i915_private *dev_priv)
162 {
163 struct intel_encoder *encoder;
164 unsigned int pipes = (1 << PIPE_A) | (1 << PIPE_B);
165
166 /*
167 * We don't have power sequencer currently.
168 * Pick one that's not used by other ports.
169 */
170 for_each_intel_dp(&dev_priv->drm, encoder) {
171 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
172
173 if (encoder->type == INTEL_OUTPUT_EDP) {
174 drm_WARN_ON(&dev_priv->drm,
175 intel_dp->pps.active_pipe != INVALID_PIPE &&
176 intel_dp->pps.active_pipe !=
177 intel_dp->pps.pps_pipe);
178
179 if (intel_dp->pps.pps_pipe != INVALID_PIPE)
180 pipes &= ~(1 << intel_dp->pps.pps_pipe);
181 } else {
182 drm_WARN_ON(&dev_priv->drm,
183 intel_dp->pps.pps_pipe != INVALID_PIPE);
184
185 if (intel_dp->pps.active_pipe != INVALID_PIPE)
186 pipes &= ~(1 << intel_dp->pps.active_pipe);
187 }
188 }
189
190 if (pipes == 0)
191 return INVALID_PIPE;
192
193 return ffs(pipes) - 1;
194 }
195
196 static enum pipe
vlv_power_sequencer_pipe(struct intel_dp * intel_dp)197 vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
198 {
199 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
200 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
201 enum pipe pipe;
202
203 lockdep_assert_held(&dev_priv->display.pps.mutex);
204
205 /* We should never land here with regular DP ports */
206 drm_WARN_ON(&dev_priv->drm, !intel_dp_is_edp(intel_dp));
207
208 drm_WARN_ON(&dev_priv->drm, intel_dp->pps.active_pipe != INVALID_PIPE &&
209 intel_dp->pps.active_pipe != intel_dp->pps.pps_pipe);
210
211 if (intel_dp->pps.pps_pipe != INVALID_PIPE)
212 return intel_dp->pps.pps_pipe;
213
214 pipe = vlv_find_free_pps(dev_priv);
215
216 /*
217 * Didn't find one. This should not happen since there
218 * are two power sequencers and up to two eDP ports.
219 */
220 if (drm_WARN_ON(&dev_priv->drm, pipe == INVALID_PIPE))
221 pipe = PIPE_A;
222
223 vlv_steal_power_sequencer(dev_priv, pipe);
224 intel_dp->pps.pps_pipe = pipe;
225
226 drm_dbg_kms(&dev_priv->drm,
227 "picked %s for [ENCODER:%d:%s]\n",
228 pps_name(dev_priv, &intel_dp->pps),
229 dig_port->base.base.base.id, dig_port->base.base.name);
230
231 /* init power sequencer on this pipe and port */
232 pps_init_delays(intel_dp);
233 pps_init_registers(intel_dp, true);
234
235 /*
236 * Even vdd force doesn't work until we've made
237 * the power sequencer lock in on the port.
238 */
239 vlv_power_sequencer_kick(intel_dp);
240
241 return intel_dp->pps.pps_pipe;
242 }
243
244 static int
bxt_power_sequencer_idx(struct intel_dp * intel_dp)245 bxt_power_sequencer_idx(struct intel_dp *intel_dp)
246 {
247 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
248 int pps_idx = intel_dp->pps.pps_idx;
249
250 lockdep_assert_held(&dev_priv->display.pps.mutex);
251
252 /* We should never land here with regular DP ports */
253 drm_WARN_ON(&dev_priv->drm, !intel_dp_is_edp(intel_dp));
254
255 if (!intel_dp->pps.pps_reset)
256 return pps_idx;
257
258 intel_dp->pps.pps_reset = false;
259
260 /*
261 * Only the HW needs to be reprogrammed, the SW state is fixed and
262 * has been setup during connector init.
263 */
264 pps_init_registers(intel_dp, false);
265
266 return pps_idx;
267 }
268
269 typedef bool (*pps_check)(struct drm_i915_private *dev_priv, int pps_idx);
270
pps_has_pp_on(struct drm_i915_private * dev_priv,int pps_idx)271 static bool pps_has_pp_on(struct drm_i915_private *dev_priv, int pps_idx)
272 {
273 return intel_de_read(dev_priv, PP_STATUS(pps_idx)) & PP_ON;
274 }
275
pps_has_vdd_on(struct drm_i915_private * dev_priv,int pps_idx)276 static bool pps_has_vdd_on(struct drm_i915_private *dev_priv, int pps_idx)
277 {
278 return intel_de_read(dev_priv, PP_CONTROL(pps_idx)) & EDP_FORCE_VDD;
279 }
280
pps_any(struct drm_i915_private * dev_priv,int pps_idx)281 static bool pps_any(struct drm_i915_private *dev_priv, int pps_idx)
282 {
283 return true;
284 }
285
286 static enum pipe
vlv_initial_pps_pipe(struct drm_i915_private * dev_priv,enum port port,pps_check check)287 vlv_initial_pps_pipe(struct drm_i915_private *dev_priv,
288 enum port port, pps_check check)
289 {
290 enum pipe pipe;
291
292 for (pipe = PIPE_A; pipe <= PIPE_B; pipe++) {
293 u32 port_sel = intel_de_read(dev_priv, PP_ON_DELAYS(pipe)) &
294 PANEL_PORT_SELECT_MASK;
295
296 if (port_sel != PANEL_PORT_SELECT_VLV(port))
297 continue;
298
299 if (!check(dev_priv, pipe))
300 continue;
301
302 return pipe;
303 }
304
305 return INVALID_PIPE;
306 }
307
308 static void
vlv_initial_power_sequencer_setup(struct intel_dp * intel_dp)309 vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp)
310 {
311 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
312 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
313 enum port port = dig_port->base.port;
314
315 lockdep_assert_held(&dev_priv->display.pps.mutex);
316
317 /* try to find a pipe with this port selected */
318 /* first pick one where the panel is on */
319 intel_dp->pps.pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
320 pps_has_pp_on);
321 /* didn't find one? pick one where vdd is on */
322 if (intel_dp->pps.pps_pipe == INVALID_PIPE)
323 intel_dp->pps.pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
324 pps_has_vdd_on);
325 /* didn't find one? pick one with just the correct port */
326 if (intel_dp->pps.pps_pipe == INVALID_PIPE)
327 intel_dp->pps.pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
328 pps_any);
329
330 /* didn't find one? just let vlv_power_sequencer_pipe() pick one when needed */
331 if (intel_dp->pps.pps_pipe == INVALID_PIPE) {
332 drm_dbg_kms(&dev_priv->drm,
333 "[ENCODER:%d:%s] no initial power sequencer\n",
334 dig_port->base.base.base.id, dig_port->base.base.name);
335 return;
336 }
337
338 drm_dbg_kms(&dev_priv->drm,
339 "[ENCODER:%d:%s] initial power sequencer: %s\n",
340 dig_port->base.base.base.id, dig_port->base.base.name,
341 pps_name(dev_priv, &intel_dp->pps));
342 }
343
intel_num_pps(struct drm_i915_private * i915)344 static int intel_num_pps(struct drm_i915_private *i915)
345 {
346 if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))
347 return 2;
348
349 if (IS_GEMINILAKE(i915) || IS_BROXTON(i915))
350 return 2;
351
352 if (INTEL_PCH_TYPE(i915) >= PCH_DG1)
353 return 1;
354
355 if (INTEL_PCH_TYPE(i915) >= PCH_ICP)
356 return 2;
357
358 return 1;
359 }
360
intel_pps_is_valid(struct intel_dp * intel_dp)361 static bool intel_pps_is_valid(struct intel_dp *intel_dp)
362 {
363 struct drm_i915_private *i915 = dp_to_i915(intel_dp);
364
365 if (intel_dp->pps.pps_idx == 1 &&
366 INTEL_PCH_TYPE(i915) >= PCH_ICP &&
367 INTEL_PCH_TYPE(i915) < PCH_MTP)
368 return intel_de_read(i915, SOUTH_CHICKEN1) & ICP_SECOND_PPS_IO_SELECT;
369
370 return true;
371 }
372
373 static int
bxt_initial_pps_idx(struct drm_i915_private * i915,pps_check check)374 bxt_initial_pps_idx(struct drm_i915_private *i915, pps_check check)
375 {
376 int pps_idx, pps_num = intel_num_pps(i915);
377
378 for (pps_idx = 0; pps_idx < pps_num; pps_idx++) {
379 if (check(i915, pps_idx))
380 return pps_idx;
381 }
382
383 return -1;
384 }
385
386 static bool
pps_initial_setup(struct intel_dp * intel_dp)387 pps_initial_setup(struct intel_dp *intel_dp)
388 {
389 struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
390 struct intel_connector *connector = intel_dp->attached_connector;
391 struct drm_i915_private *i915 = to_i915(encoder->base.dev);
392
393 lockdep_assert_held(&i915->display.pps.mutex);
394
395 if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) {
396 vlv_initial_power_sequencer_setup(intel_dp);
397 return true;
398 }
399
400 /* first ask the VBT */
401 if (intel_num_pps(i915) > 1)
402 intel_dp->pps.pps_idx = connector->panel.vbt.backlight.controller;
403 else
404 intel_dp->pps.pps_idx = 0;
405
406 if (drm_WARN_ON(&i915->drm, intel_dp->pps.pps_idx >= intel_num_pps(i915)))
407 intel_dp->pps.pps_idx = -1;
408
409 /* VBT wasn't parsed yet? pick one where the panel is on */
410 if (intel_dp->pps.pps_idx < 0)
411 intel_dp->pps.pps_idx = bxt_initial_pps_idx(i915, pps_has_pp_on);
412 /* didn't find one? pick one where vdd is on */
413 if (intel_dp->pps.pps_idx < 0)
414 intel_dp->pps.pps_idx = bxt_initial_pps_idx(i915, pps_has_vdd_on);
415 /* didn't find one? pick any */
416 if (intel_dp->pps.pps_idx < 0) {
417 intel_dp->pps.pps_idx = bxt_initial_pps_idx(i915, pps_any);
418
419 drm_dbg_kms(&i915->drm,
420 "[ENCODER:%d:%s] no initial power sequencer, assuming %s\n",
421 encoder->base.base.id, encoder->base.name,
422 pps_name(i915, &intel_dp->pps));
423 } else {
424 drm_dbg_kms(&i915->drm,
425 "[ENCODER:%d:%s] initial power sequencer: %s\n",
426 encoder->base.base.id, encoder->base.name,
427 pps_name(i915, &intel_dp->pps));
428 }
429
430 return intel_pps_is_valid(intel_dp);
431 }
432
intel_pps_reset_all(struct drm_i915_private * dev_priv)433 void intel_pps_reset_all(struct drm_i915_private *dev_priv)
434 {
435 struct intel_encoder *encoder;
436
437 if (drm_WARN_ON(&dev_priv->drm, !IS_LP(dev_priv)))
438 return;
439
440 if (!HAS_DISPLAY(dev_priv))
441 return;
442
443 /*
444 * We can't grab pps_mutex here due to deadlock with power_domain
445 * mutex when power_domain functions are called while holding pps_mutex.
446 * That also means that in order to use pps_pipe the code needs to
447 * hold both a power domain reference and pps_mutex, and the power domain
448 * reference get/put must be done while _not_ holding pps_mutex.
449 * pps_{lock,unlock}() do these steps in the correct order, so one
450 * should use them always.
451 */
452
453 for_each_intel_dp(&dev_priv->drm, encoder) {
454 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
455
456 drm_WARN_ON(&dev_priv->drm,
457 intel_dp->pps.active_pipe != INVALID_PIPE);
458
459 if (encoder->type != INTEL_OUTPUT_EDP)
460 continue;
461
462 if (DISPLAY_VER(dev_priv) >= 9)
463 intel_dp->pps.pps_reset = true;
464 else
465 intel_dp->pps.pps_pipe = INVALID_PIPE;
466 }
467 }
468
469 struct pps_registers {
470 i915_reg_t pp_ctrl;
471 i915_reg_t pp_stat;
472 i915_reg_t pp_on;
473 i915_reg_t pp_off;
474 i915_reg_t pp_div;
475 };
476
intel_pps_get_registers(struct intel_dp * intel_dp,struct pps_registers * regs)477 static void intel_pps_get_registers(struct intel_dp *intel_dp,
478 struct pps_registers *regs)
479 {
480 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
481 int pps_idx;
482
483 memset(regs, 0, sizeof(*regs));
484
485 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
486 pps_idx = vlv_power_sequencer_pipe(intel_dp);
487 else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
488 pps_idx = bxt_power_sequencer_idx(intel_dp);
489 else
490 pps_idx = intel_dp->pps.pps_idx;
491
492 regs->pp_ctrl = PP_CONTROL(pps_idx);
493 regs->pp_stat = PP_STATUS(pps_idx);
494 regs->pp_on = PP_ON_DELAYS(pps_idx);
495 regs->pp_off = PP_OFF_DELAYS(pps_idx);
496
497 /* Cycle delay moved from PP_DIVISOR to PP_CONTROL */
498 if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv) ||
499 INTEL_PCH_TYPE(dev_priv) >= PCH_CNP)
500 regs->pp_div = INVALID_MMIO_REG;
501 else
502 regs->pp_div = PP_DIVISOR(pps_idx);
503 }
504
505 static i915_reg_t
_pp_ctrl_reg(struct intel_dp * intel_dp)506 _pp_ctrl_reg(struct intel_dp *intel_dp)
507 {
508 struct pps_registers regs;
509
510 intel_pps_get_registers(intel_dp, ®s);
511
512 return regs.pp_ctrl;
513 }
514
515 static i915_reg_t
_pp_stat_reg(struct intel_dp * intel_dp)516 _pp_stat_reg(struct intel_dp *intel_dp)
517 {
518 struct pps_registers regs;
519
520 intel_pps_get_registers(intel_dp, ®s);
521
522 return regs.pp_stat;
523 }
524
edp_have_panel_power(struct intel_dp * intel_dp)525 static bool edp_have_panel_power(struct intel_dp *intel_dp)
526 {
527 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
528
529 lockdep_assert_held(&dev_priv->display.pps.mutex);
530
531 if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
532 intel_dp->pps.pps_pipe == INVALID_PIPE)
533 return false;
534
535 return (intel_de_read(dev_priv, _pp_stat_reg(intel_dp)) & PP_ON) != 0;
536 }
537
edp_have_panel_vdd(struct intel_dp * intel_dp)538 static bool edp_have_panel_vdd(struct intel_dp *intel_dp)
539 {
540 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
541
542 lockdep_assert_held(&dev_priv->display.pps.mutex);
543
544 if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
545 intel_dp->pps.pps_pipe == INVALID_PIPE)
546 return false;
547
548 return intel_de_read(dev_priv, _pp_ctrl_reg(intel_dp)) & EDP_FORCE_VDD;
549 }
550
intel_pps_check_power_unlocked(struct intel_dp * intel_dp)551 void intel_pps_check_power_unlocked(struct intel_dp *intel_dp)
552 {
553 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
554 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
555
556 if (!intel_dp_is_edp(intel_dp))
557 return;
558
559 if (!edp_have_panel_power(intel_dp) && !edp_have_panel_vdd(intel_dp)) {
560 drm_WARN(&dev_priv->drm, 1,
561 "[ENCODER:%d:%s] %s powered off while attempting AUX CH communication.\n",
562 dig_port->base.base.base.id, dig_port->base.base.name,
563 pps_name(dev_priv, &intel_dp->pps));
564 drm_dbg_kms(&dev_priv->drm,
565 "[ENCODER:%d:%s] %s PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
566 dig_port->base.base.base.id, dig_port->base.base.name,
567 pps_name(dev_priv, &intel_dp->pps),
568 intel_de_read(dev_priv, _pp_stat_reg(intel_dp)),
569 intel_de_read(dev_priv, _pp_ctrl_reg(intel_dp)));
570 }
571 }
572
573 #define IDLE_ON_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | PP_SEQUENCE_STATE_MASK)
574 #define IDLE_ON_VALUE (PP_ON | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_ON_IDLE)
575
576 #define IDLE_OFF_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | 0)
577 #define IDLE_OFF_VALUE (0 | PP_SEQUENCE_NONE | 0 | 0)
578
579 #define IDLE_CYCLE_MASK (PP_ON | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK)
580 #define IDLE_CYCLE_VALUE (0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_OFF_IDLE)
581
582 static void intel_pps_verify_state(struct intel_dp *intel_dp);
583
wait_panel_status(struct intel_dp * intel_dp,u32 mask,u32 value)584 static void wait_panel_status(struct intel_dp *intel_dp,
585 u32 mask, u32 value)
586 {
587 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
588 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
589 i915_reg_t pp_stat_reg, pp_ctrl_reg;
590
591 lockdep_assert_held(&dev_priv->display.pps.mutex);
592
593 intel_pps_verify_state(intel_dp);
594
595 pp_stat_reg = _pp_stat_reg(intel_dp);
596 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
597
598 drm_dbg_kms(&dev_priv->drm,
599 "[ENCODER:%d:%s] %s mask: 0x%08x value: 0x%08x PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
600 dig_port->base.base.base.id, dig_port->base.base.name,
601 pps_name(dev_priv, &intel_dp->pps),
602 mask, value,
603 intel_de_read(dev_priv, pp_stat_reg),
604 intel_de_read(dev_priv, pp_ctrl_reg));
605
606 if (intel_de_wait_for_register(dev_priv, pp_stat_reg,
607 mask, value, 5000))
608 drm_err(&dev_priv->drm,
609 "[ENCODER:%d:%s] %s panel status timeout: PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
610 dig_port->base.base.base.id, dig_port->base.base.name,
611 pps_name(dev_priv, &intel_dp->pps),
612 intel_de_read(dev_priv, pp_stat_reg),
613 intel_de_read(dev_priv, pp_ctrl_reg));
614
615 drm_dbg_kms(&dev_priv->drm, "Wait complete\n");
616 }
617
wait_panel_on(struct intel_dp * intel_dp)618 static void wait_panel_on(struct intel_dp *intel_dp)
619 {
620 struct drm_i915_private *i915 = dp_to_i915(intel_dp);
621 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
622
623 drm_dbg_kms(&i915->drm, "[ENCODER:%d:%s] %s wait for panel power on\n",
624 dig_port->base.base.base.id, dig_port->base.base.name,
625 pps_name(i915, &intel_dp->pps));
626 wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE);
627 }
628
wait_panel_off(struct intel_dp * intel_dp)629 static void wait_panel_off(struct intel_dp *intel_dp)
630 {
631 struct drm_i915_private *i915 = dp_to_i915(intel_dp);
632 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
633
634 drm_dbg_kms(&i915->drm, "[ENCODER:%d:%s] %s wait for panel power off time\n",
635 dig_port->base.base.base.id, dig_port->base.base.name,
636 pps_name(i915, &intel_dp->pps));
637 wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE);
638 }
639
wait_panel_power_cycle(struct intel_dp * intel_dp)640 static void wait_panel_power_cycle(struct intel_dp *intel_dp)
641 {
642 struct drm_i915_private *i915 = dp_to_i915(intel_dp);
643 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
644 ktime_t panel_power_on_time;
645 s64 panel_power_off_duration;
646
647 drm_dbg_kms(&i915->drm, "[ENCODER:%d:%s] %s wait for panel power cycle\n",
648 dig_port->base.base.base.id, dig_port->base.base.name,
649 pps_name(i915, &intel_dp->pps));
650
651 /* take the difference of current time and panel power off time
652 * and then make panel wait for t11_t12 if needed. */
653 panel_power_on_time = ktime_get_boottime();
654 panel_power_off_duration = ktime_ms_delta(panel_power_on_time, intel_dp->pps.panel_power_off_time);
655
656 /* When we disable the VDD override bit last we have to do the manual
657 * wait. */
658 if (panel_power_off_duration < (s64)intel_dp->pps.panel_power_cycle_delay)
659 wait_remaining_ms_from_jiffies(jiffies,
660 intel_dp->pps.panel_power_cycle_delay - panel_power_off_duration);
661
662 wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE);
663 }
664
intel_pps_wait_power_cycle(struct intel_dp * intel_dp)665 void intel_pps_wait_power_cycle(struct intel_dp *intel_dp)
666 {
667 intel_wakeref_t wakeref;
668
669 if (!intel_dp_is_edp(intel_dp))
670 return;
671
672 with_intel_pps_lock(intel_dp, wakeref)
673 wait_panel_power_cycle(intel_dp);
674 }
675
wait_backlight_on(struct intel_dp * intel_dp)676 static void wait_backlight_on(struct intel_dp *intel_dp)
677 {
678 wait_remaining_ms_from_jiffies(intel_dp->pps.last_power_on,
679 intel_dp->pps.backlight_on_delay);
680 }
681
edp_wait_backlight_off(struct intel_dp * intel_dp)682 static void edp_wait_backlight_off(struct intel_dp *intel_dp)
683 {
684 wait_remaining_ms_from_jiffies(intel_dp->pps.last_backlight_off,
685 intel_dp->pps.backlight_off_delay);
686 }
687
688 /* Read the current pp_control value, unlocking the register if it
689 * is locked
690 */
691
ilk_get_pp_control(struct intel_dp * intel_dp)692 static u32 ilk_get_pp_control(struct intel_dp *intel_dp)
693 {
694 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
695 u32 control;
696
697 lockdep_assert_held(&dev_priv->display.pps.mutex);
698
699 control = intel_de_read(dev_priv, _pp_ctrl_reg(intel_dp));
700 if (drm_WARN_ON(&dev_priv->drm, !HAS_DDI(dev_priv) &&
701 (control & PANEL_UNLOCK_MASK) != PANEL_UNLOCK_REGS)) {
702 control &= ~PANEL_UNLOCK_MASK;
703 control |= PANEL_UNLOCK_REGS;
704 }
705 return control;
706 }
707
708 /*
709 * Must be paired with intel_pps_vdd_off_unlocked().
710 * Must hold pps_mutex around the whole on/off sequence.
711 * Can be nested with intel_pps_vdd_{on,off}() calls.
712 */
intel_pps_vdd_on_unlocked(struct intel_dp * intel_dp)713 bool intel_pps_vdd_on_unlocked(struct intel_dp *intel_dp)
714 {
715 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
716 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
717 u32 pp;
718 i915_reg_t pp_stat_reg, pp_ctrl_reg;
719 bool need_to_disable = !intel_dp->pps.want_panel_vdd;
720
721 lockdep_assert_held(&dev_priv->display.pps.mutex);
722
723 if (!intel_dp_is_edp(intel_dp))
724 return false;
725
726 cancel_delayed_work(&intel_dp->pps.panel_vdd_work);
727 intel_dp->pps.want_panel_vdd = true;
728
729 if (edp_have_panel_vdd(intel_dp))
730 return need_to_disable;
731
732 drm_WARN_ON(&dev_priv->drm, intel_dp->pps.vdd_wakeref);
733 intel_dp->pps.vdd_wakeref = intel_display_power_get(dev_priv,
734 intel_aux_power_domain(dig_port));
735
736 pp_stat_reg = _pp_stat_reg(intel_dp);
737 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
738
739 drm_dbg_kms(&dev_priv->drm, "[ENCODER:%d:%s] %s turning VDD on\n",
740 dig_port->base.base.base.id, dig_port->base.base.name,
741 pps_name(dev_priv, &intel_dp->pps));
742
743 if (!edp_have_panel_power(intel_dp))
744 wait_panel_power_cycle(intel_dp);
745
746 pp = ilk_get_pp_control(intel_dp);
747 pp |= EDP_FORCE_VDD;
748
749 intel_de_write(dev_priv, pp_ctrl_reg, pp);
750 intel_de_posting_read(dev_priv, pp_ctrl_reg);
751 drm_dbg_kms(&dev_priv->drm, "[ENCODER:%d:%s] %s PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
752 dig_port->base.base.base.id, dig_port->base.base.name,
753 pps_name(dev_priv, &intel_dp->pps),
754 intel_de_read(dev_priv, pp_stat_reg),
755 intel_de_read(dev_priv, pp_ctrl_reg));
756 /*
757 * If the panel wasn't on, delay before accessing aux channel
758 */
759 if (!edp_have_panel_power(intel_dp)) {
760 drm_dbg_kms(&dev_priv->drm,
761 "[ENCODER:%d:%s] %s panel power wasn't enabled\n",
762 dig_port->base.base.base.id, dig_port->base.base.name,
763 pps_name(dev_priv, &intel_dp->pps));
764 msleep(intel_dp->pps.panel_power_up_delay);
765 }
766
767 return need_to_disable;
768 }
769
770 /*
771 * Must be paired with intel_pps_off().
772 * Nested calls to these functions are not allowed since
773 * we drop the lock. Caller must use some higher level
774 * locking to prevent nested calls from other threads.
775 */
intel_pps_vdd_on(struct intel_dp * intel_dp)776 void intel_pps_vdd_on(struct intel_dp *intel_dp)
777 {
778 struct drm_i915_private *i915 = dp_to_i915(intel_dp);
779 intel_wakeref_t wakeref;
780 bool vdd;
781
782 if (!intel_dp_is_edp(intel_dp))
783 return;
784
785 vdd = false;
786 with_intel_pps_lock(intel_dp, wakeref)
787 vdd = intel_pps_vdd_on_unlocked(intel_dp);
788 I915_STATE_WARN(!vdd, "[ENCODER:%d:%s] %s VDD already requested on\n",
789 dp_to_dig_port(intel_dp)->base.base.base.id,
790 dp_to_dig_port(intel_dp)->base.base.name,
791 pps_name(i915, &intel_dp->pps));
792 }
793
intel_pps_vdd_off_sync_unlocked(struct intel_dp * intel_dp)794 static void intel_pps_vdd_off_sync_unlocked(struct intel_dp *intel_dp)
795 {
796 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
797 struct intel_digital_port *dig_port =
798 dp_to_dig_port(intel_dp);
799 u32 pp;
800 i915_reg_t pp_stat_reg, pp_ctrl_reg;
801
802 lockdep_assert_held(&dev_priv->display.pps.mutex);
803
804 drm_WARN_ON(&dev_priv->drm, intel_dp->pps.want_panel_vdd);
805
806 if (!edp_have_panel_vdd(intel_dp))
807 return;
808
809 drm_dbg_kms(&dev_priv->drm, "[ENCODER:%d:%s] %s turning VDD off\n",
810 dig_port->base.base.base.id, dig_port->base.base.name,
811 pps_name(dev_priv, &intel_dp->pps));
812
813 pp = ilk_get_pp_control(intel_dp);
814 pp &= ~EDP_FORCE_VDD;
815
816 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
817 pp_stat_reg = _pp_stat_reg(intel_dp);
818
819 intel_de_write(dev_priv, pp_ctrl_reg, pp);
820 intel_de_posting_read(dev_priv, pp_ctrl_reg);
821
822 /* Make sure sequencer is idle before allowing subsequent activity */
823 drm_dbg_kms(&dev_priv->drm, "[ENCODER:%d:%s] %s PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
824 dig_port->base.base.base.id, dig_port->base.base.name,
825 pps_name(dev_priv, &intel_dp->pps),
826 intel_de_read(dev_priv, pp_stat_reg),
827 intel_de_read(dev_priv, pp_ctrl_reg));
828
829 if ((pp & PANEL_POWER_ON) == 0)
830 intel_dp->pps.panel_power_off_time = ktime_get_boottime();
831
832 intel_display_power_put(dev_priv,
833 intel_aux_power_domain(dig_port),
834 fetch_and_zero(&intel_dp->pps.vdd_wakeref));
835 }
836
intel_pps_vdd_off_sync(struct intel_dp * intel_dp)837 void intel_pps_vdd_off_sync(struct intel_dp *intel_dp)
838 {
839 intel_wakeref_t wakeref;
840
841 if (!intel_dp_is_edp(intel_dp))
842 return;
843
844 cancel_delayed_work_sync(&intel_dp->pps.panel_vdd_work);
845 /*
846 * vdd might still be enabled due to the delayed vdd off.
847 * Make sure vdd is actually turned off here.
848 */
849 with_intel_pps_lock(intel_dp, wakeref)
850 intel_pps_vdd_off_sync_unlocked(intel_dp);
851 }
852
edp_panel_vdd_work(struct work_struct * __work)853 static void edp_panel_vdd_work(struct work_struct *__work)
854 {
855 struct intel_pps *pps = container_of(to_delayed_work(__work),
856 struct intel_pps, panel_vdd_work);
857 struct intel_dp *intel_dp = container_of(pps, struct intel_dp, pps);
858 intel_wakeref_t wakeref;
859
860 with_intel_pps_lock(intel_dp, wakeref) {
861 if (!intel_dp->pps.want_panel_vdd)
862 intel_pps_vdd_off_sync_unlocked(intel_dp);
863 }
864 }
865
edp_panel_vdd_schedule_off(struct intel_dp * intel_dp)866 static void edp_panel_vdd_schedule_off(struct intel_dp *intel_dp)
867 {
868 unsigned long delay;
869
870 /*
871 * We may not yet know the real power sequencing delays,
872 * so keep VDD enabled until we're done with init.
873 */
874 if (intel_dp->pps.initializing)
875 return;
876
877 /*
878 * Queue the timer to fire a long time from now (relative to the power
879 * down delay) to keep the panel power up across a sequence of
880 * operations.
881 */
882 delay = msecs_to_jiffies(intel_dp->pps.panel_power_cycle_delay * 5);
883 schedule_delayed_work(&intel_dp->pps.panel_vdd_work, delay);
884 }
885
886 /*
887 * Must be paired with edp_panel_vdd_on().
888 * Must hold pps_mutex around the whole on/off sequence.
889 * Can be nested with intel_pps_vdd_{on,off}() calls.
890 */
intel_pps_vdd_off_unlocked(struct intel_dp * intel_dp,bool sync)891 void intel_pps_vdd_off_unlocked(struct intel_dp *intel_dp, bool sync)
892 {
893 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
894
895 lockdep_assert_held(&dev_priv->display.pps.mutex);
896
897 if (!intel_dp_is_edp(intel_dp))
898 return;
899
900 I915_STATE_WARN(!intel_dp->pps.want_panel_vdd, "[ENCODER:%d:%s] %s VDD not forced on",
901 dp_to_dig_port(intel_dp)->base.base.base.id,
902 dp_to_dig_port(intel_dp)->base.base.name,
903 pps_name(dev_priv, &intel_dp->pps));
904
905 intel_dp->pps.want_panel_vdd = false;
906
907 if (sync)
908 intel_pps_vdd_off_sync_unlocked(intel_dp);
909 else
910 edp_panel_vdd_schedule_off(intel_dp);
911 }
912
intel_pps_on_unlocked(struct intel_dp * intel_dp)913 void intel_pps_on_unlocked(struct intel_dp *intel_dp)
914 {
915 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
916 u32 pp;
917 i915_reg_t pp_ctrl_reg;
918
919 lockdep_assert_held(&dev_priv->display.pps.mutex);
920
921 if (!intel_dp_is_edp(intel_dp))
922 return;
923
924 drm_dbg_kms(&dev_priv->drm, "[ENCODER:%d:%s] %s turn panel power on\n",
925 dp_to_dig_port(intel_dp)->base.base.base.id,
926 dp_to_dig_port(intel_dp)->base.base.name,
927 pps_name(dev_priv, &intel_dp->pps));
928
929 if (drm_WARN(&dev_priv->drm, edp_have_panel_power(intel_dp),
930 "[ENCODER:%d:%s] %s panel power already on\n",
931 dp_to_dig_port(intel_dp)->base.base.base.id,
932 dp_to_dig_port(intel_dp)->base.base.name,
933 pps_name(dev_priv, &intel_dp->pps)))
934 return;
935
936 wait_panel_power_cycle(intel_dp);
937
938 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
939 pp = ilk_get_pp_control(intel_dp);
940 if (IS_IRONLAKE(dev_priv)) {
941 /* ILK workaround: disable reset around power sequence */
942 pp &= ~PANEL_POWER_RESET;
943 intel_de_write(dev_priv, pp_ctrl_reg, pp);
944 intel_de_posting_read(dev_priv, pp_ctrl_reg);
945 }
946
947 pp |= PANEL_POWER_ON;
948 if (!IS_IRONLAKE(dev_priv))
949 pp |= PANEL_POWER_RESET;
950
951 intel_de_write(dev_priv, pp_ctrl_reg, pp);
952 intel_de_posting_read(dev_priv, pp_ctrl_reg);
953
954 wait_panel_on(intel_dp);
955 intel_dp->pps.last_power_on = jiffies;
956
957 if (IS_IRONLAKE(dev_priv)) {
958 pp |= PANEL_POWER_RESET; /* restore panel reset bit */
959 intel_de_write(dev_priv, pp_ctrl_reg, pp);
960 intel_de_posting_read(dev_priv, pp_ctrl_reg);
961 }
962 }
963
intel_pps_on(struct intel_dp * intel_dp)964 void intel_pps_on(struct intel_dp *intel_dp)
965 {
966 intel_wakeref_t wakeref;
967
968 if (!intel_dp_is_edp(intel_dp))
969 return;
970
971 with_intel_pps_lock(intel_dp, wakeref)
972 intel_pps_on_unlocked(intel_dp);
973 }
974
intel_pps_off_unlocked(struct intel_dp * intel_dp)975 void intel_pps_off_unlocked(struct intel_dp *intel_dp)
976 {
977 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
978 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
979 u32 pp;
980 i915_reg_t pp_ctrl_reg;
981
982 lockdep_assert_held(&dev_priv->display.pps.mutex);
983
984 if (!intel_dp_is_edp(intel_dp))
985 return;
986
987 drm_dbg_kms(&dev_priv->drm, "[ENCODER:%d:%s] %s turn panel power off\n",
988 dig_port->base.base.base.id, dig_port->base.base.name,
989 pps_name(dev_priv, &intel_dp->pps));
990
991 drm_WARN(&dev_priv->drm, !intel_dp->pps.want_panel_vdd,
992 "[ENCODER:%d:%s] %s need VDD to turn off panel\n",
993 dig_port->base.base.base.id, dig_port->base.base.name,
994 pps_name(dev_priv, &intel_dp->pps));
995
996 pp = ilk_get_pp_control(intel_dp);
997 /* We need to switch off panel power _and_ force vdd, for otherwise some
998 * panels get very unhappy and cease to work. */
999 pp &= ~(PANEL_POWER_ON | PANEL_POWER_RESET | EDP_FORCE_VDD |
1000 EDP_BLC_ENABLE);
1001
1002 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1003
1004 intel_dp->pps.want_panel_vdd = false;
1005
1006 intel_de_write(dev_priv, pp_ctrl_reg, pp);
1007 intel_de_posting_read(dev_priv, pp_ctrl_reg);
1008
1009 wait_panel_off(intel_dp);
1010 intel_dp->pps.panel_power_off_time = ktime_get_boottime();
1011
1012 /* We got a reference when we enabled the VDD. */
1013 intel_display_power_put(dev_priv,
1014 intel_aux_power_domain(dig_port),
1015 fetch_and_zero(&intel_dp->pps.vdd_wakeref));
1016 }
1017
intel_pps_off(struct intel_dp * intel_dp)1018 void intel_pps_off(struct intel_dp *intel_dp)
1019 {
1020 intel_wakeref_t wakeref;
1021
1022 if (!intel_dp_is_edp(intel_dp))
1023 return;
1024
1025 with_intel_pps_lock(intel_dp, wakeref)
1026 intel_pps_off_unlocked(intel_dp);
1027 }
1028
1029 /* Enable backlight in the panel power control. */
intel_pps_backlight_on(struct intel_dp * intel_dp)1030 void intel_pps_backlight_on(struct intel_dp *intel_dp)
1031 {
1032 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1033 intel_wakeref_t wakeref;
1034
1035 /*
1036 * If we enable the backlight right away following a panel power
1037 * on, we may see slight flicker as the panel syncs with the eDP
1038 * link. So delay a bit to make sure the image is solid before
1039 * allowing it to appear.
1040 */
1041 wait_backlight_on(intel_dp);
1042
1043 with_intel_pps_lock(intel_dp, wakeref) {
1044 i915_reg_t pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1045 u32 pp;
1046
1047 pp = ilk_get_pp_control(intel_dp);
1048 pp |= EDP_BLC_ENABLE;
1049
1050 intel_de_write(dev_priv, pp_ctrl_reg, pp);
1051 intel_de_posting_read(dev_priv, pp_ctrl_reg);
1052 }
1053 }
1054
1055 /* Disable backlight in the panel power control. */
intel_pps_backlight_off(struct intel_dp * intel_dp)1056 void intel_pps_backlight_off(struct intel_dp *intel_dp)
1057 {
1058 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1059 intel_wakeref_t wakeref;
1060
1061 if (!intel_dp_is_edp(intel_dp))
1062 return;
1063
1064 with_intel_pps_lock(intel_dp, wakeref) {
1065 i915_reg_t pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1066 u32 pp;
1067
1068 pp = ilk_get_pp_control(intel_dp);
1069 pp &= ~EDP_BLC_ENABLE;
1070
1071 intel_de_write(dev_priv, pp_ctrl_reg, pp);
1072 intel_de_posting_read(dev_priv, pp_ctrl_reg);
1073 }
1074
1075 intel_dp->pps.last_backlight_off = jiffies;
1076 edp_wait_backlight_off(intel_dp);
1077 }
1078
1079 /*
1080 * Hook for controlling the panel power control backlight through the bl_power
1081 * sysfs attribute. Take care to handle multiple calls.
1082 */
intel_pps_backlight_power(struct intel_connector * connector,bool enable)1083 void intel_pps_backlight_power(struct intel_connector *connector, bool enable)
1084 {
1085 struct drm_i915_private *i915 = to_i915(connector->base.dev);
1086 struct intel_dp *intel_dp = intel_attached_dp(connector);
1087 intel_wakeref_t wakeref;
1088 bool is_enabled;
1089
1090 is_enabled = false;
1091 with_intel_pps_lock(intel_dp, wakeref)
1092 is_enabled = ilk_get_pp_control(intel_dp) & EDP_BLC_ENABLE;
1093 if (is_enabled == enable)
1094 return;
1095
1096 drm_dbg_kms(&i915->drm, "panel power control backlight %s\n",
1097 enable ? "enable" : "disable");
1098
1099 if (enable)
1100 intel_pps_backlight_on(intel_dp);
1101 else
1102 intel_pps_backlight_off(intel_dp);
1103 }
1104
vlv_detach_power_sequencer(struct intel_dp * intel_dp)1105 static void vlv_detach_power_sequencer(struct intel_dp *intel_dp)
1106 {
1107 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1108 struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
1109 enum pipe pipe = intel_dp->pps.pps_pipe;
1110 i915_reg_t pp_on_reg = PP_ON_DELAYS(pipe);
1111
1112 drm_WARN_ON(&dev_priv->drm, intel_dp->pps.active_pipe != INVALID_PIPE);
1113
1114 if (drm_WARN_ON(&dev_priv->drm, pipe != PIPE_A && pipe != PIPE_B))
1115 return;
1116
1117 intel_pps_vdd_off_sync_unlocked(intel_dp);
1118
1119 /*
1120 * VLV seems to get confused when multiple power sequencers
1121 * have the same port selected (even if only one has power/vdd
1122 * enabled). The failure manifests as vlv_wait_port_ready() failing
1123 * CHV on the other hand doesn't seem to mind having the same port
1124 * selected in multiple power sequencers, but let's clear the
1125 * port select always when logically disconnecting a power sequencer
1126 * from a port.
1127 */
1128 drm_dbg_kms(&dev_priv->drm,
1129 "detaching %s from [ENCODER:%d:%s]\n",
1130 pps_name(dev_priv, &intel_dp->pps),
1131 dig_port->base.base.base.id, dig_port->base.base.name);
1132 intel_de_write(dev_priv, pp_on_reg, 0);
1133 intel_de_posting_read(dev_priv, pp_on_reg);
1134
1135 intel_dp->pps.pps_pipe = INVALID_PIPE;
1136 }
1137
vlv_steal_power_sequencer(struct drm_i915_private * dev_priv,enum pipe pipe)1138 static void vlv_steal_power_sequencer(struct drm_i915_private *dev_priv,
1139 enum pipe pipe)
1140 {
1141 struct intel_encoder *encoder;
1142
1143 lockdep_assert_held(&dev_priv->display.pps.mutex);
1144
1145 for_each_intel_dp(&dev_priv->drm, encoder) {
1146 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
1147
1148 drm_WARN(&dev_priv->drm, intel_dp->pps.active_pipe == pipe,
1149 "stealing PPS %c from active [ENCODER:%d:%s]\n",
1150 pipe_name(pipe), encoder->base.base.id,
1151 encoder->base.name);
1152
1153 if (intel_dp->pps.pps_pipe != pipe)
1154 continue;
1155
1156 drm_dbg_kms(&dev_priv->drm,
1157 "stealing PPS %c from [ENCODER:%d:%s]\n",
1158 pipe_name(pipe), encoder->base.base.id,
1159 encoder->base.name);
1160
1161 /* make sure vdd is off before we steal it */
1162 vlv_detach_power_sequencer(intel_dp);
1163 }
1164 }
1165
vlv_pps_init(struct intel_encoder * encoder,const struct intel_crtc_state * crtc_state)1166 void vlv_pps_init(struct intel_encoder *encoder,
1167 const struct intel_crtc_state *crtc_state)
1168 {
1169 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
1170 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
1171 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1172
1173 lockdep_assert_held(&dev_priv->display.pps.mutex);
1174
1175 drm_WARN_ON(&dev_priv->drm, intel_dp->pps.active_pipe != INVALID_PIPE);
1176
1177 if (intel_dp->pps.pps_pipe != INVALID_PIPE &&
1178 intel_dp->pps.pps_pipe != crtc->pipe) {
1179 /*
1180 * If another power sequencer was being used on this
1181 * port previously make sure to turn off vdd there while
1182 * we still have control of it.
1183 */
1184 vlv_detach_power_sequencer(intel_dp);
1185 }
1186
1187 /*
1188 * We may be stealing the power
1189 * sequencer from another port.
1190 */
1191 vlv_steal_power_sequencer(dev_priv, crtc->pipe);
1192
1193 intel_dp->pps.active_pipe = crtc->pipe;
1194
1195 if (!intel_dp_is_edp(intel_dp))
1196 return;
1197
1198 /* now it's all ours */
1199 intel_dp->pps.pps_pipe = crtc->pipe;
1200
1201 drm_dbg_kms(&dev_priv->drm,
1202 "initializing %s for [ENCODER:%d:%s]\n",
1203 pps_name(dev_priv, &intel_dp->pps),
1204 encoder->base.base.id, encoder->base.name);
1205
1206 /* init power sequencer on this pipe and port */
1207 pps_init_delays(intel_dp);
1208 pps_init_registers(intel_dp, true);
1209 }
1210
pps_vdd_init(struct intel_dp * intel_dp)1211 static void pps_vdd_init(struct intel_dp *intel_dp)
1212 {
1213 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1214 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1215
1216 lockdep_assert_held(&dev_priv->display.pps.mutex);
1217
1218 if (!edp_have_panel_vdd(intel_dp))
1219 return;
1220
1221 /*
1222 * The VDD bit needs a power domain reference, so if the bit is
1223 * already enabled when we boot or resume, grab this reference and
1224 * schedule a vdd off, so we don't hold on to the reference
1225 * indefinitely.
1226 */
1227 drm_dbg_kms(&dev_priv->drm,
1228 "[ENCODER:%d:%s] %s VDD left on by BIOS, adjusting state tracking\n",
1229 dig_port->base.base.base.id, dig_port->base.base.name,
1230 pps_name(dev_priv, &intel_dp->pps));
1231 drm_WARN_ON(&dev_priv->drm, intel_dp->pps.vdd_wakeref);
1232 intel_dp->pps.vdd_wakeref = intel_display_power_get(dev_priv,
1233 intel_aux_power_domain(dig_port));
1234 }
1235
intel_pps_have_panel_power_or_vdd(struct intel_dp * intel_dp)1236 bool intel_pps_have_panel_power_or_vdd(struct intel_dp *intel_dp)
1237 {
1238 intel_wakeref_t wakeref;
1239 bool have_power = false;
1240
1241 with_intel_pps_lock(intel_dp, wakeref) {
1242 have_power = edp_have_panel_power(intel_dp) ||
1243 edp_have_panel_vdd(intel_dp);
1244 }
1245
1246 return have_power;
1247 }
1248
pps_init_timestamps(struct intel_dp * intel_dp)1249 static void pps_init_timestamps(struct intel_dp *intel_dp)
1250 {
1251 /*
1252 * Initialize panel power off time to 0, assuming panel power could have
1253 * been toggled between kernel boot and now only by a previously loaded
1254 * and removed i915, which has already ensured sufficient power off
1255 * delay at module remove.
1256 */
1257 intel_dp->pps.panel_power_off_time = 0;
1258 intel_dp->pps.last_power_on = jiffies;
1259 intel_dp->pps.last_backlight_off = jiffies;
1260 }
1261
1262 static void
intel_pps_readout_hw_state(struct intel_dp * intel_dp,struct edp_power_seq * seq)1263 intel_pps_readout_hw_state(struct intel_dp *intel_dp, struct edp_power_seq *seq)
1264 {
1265 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1266 u32 pp_on, pp_off, pp_ctl;
1267 struct pps_registers regs;
1268
1269 intel_pps_get_registers(intel_dp, ®s);
1270
1271 pp_ctl = ilk_get_pp_control(intel_dp);
1272
1273 /* Ensure PPS is unlocked */
1274 if (!HAS_DDI(dev_priv))
1275 intel_de_write(dev_priv, regs.pp_ctrl, pp_ctl);
1276
1277 pp_on = intel_de_read(dev_priv, regs.pp_on);
1278 pp_off = intel_de_read(dev_priv, regs.pp_off);
1279
1280 /* Pull timing values out of registers */
1281 seq->t1_t3 = REG_FIELD_GET(PANEL_POWER_UP_DELAY_MASK, pp_on);
1282 seq->t8 = REG_FIELD_GET(PANEL_LIGHT_ON_DELAY_MASK, pp_on);
1283 seq->t9 = REG_FIELD_GET(PANEL_LIGHT_OFF_DELAY_MASK, pp_off);
1284 seq->t10 = REG_FIELD_GET(PANEL_POWER_DOWN_DELAY_MASK, pp_off);
1285
1286 if (i915_mmio_reg_valid(regs.pp_div)) {
1287 u32 pp_div;
1288
1289 pp_div = intel_de_read(dev_priv, regs.pp_div);
1290
1291 seq->t11_t12 = REG_FIELD_GET(PANEL_POWER_CYCLE_DELAY_MASK, pp_div) * 1000;
1292 } else {
1293 seq->t11_t12 = REG_FIELD_GET(BXT_POWER_CYCLE_DELAY_MASK, pp_ctl) * 1000;
1294 }
1295 }
1296
1297 static void
intel_pps_dump_state(struct intel_dp * intel_dp,const char * state_name,const struct edp_power_seq * seq)1298 intel_pps_dump_state(struct intel_dp *intel_dp, const char *state_name,
1299 const struct edp_power_seq *seq)
1300 {
1301 struct drm_i915_private *i915 = dp_to_i915(intel_dp);
1302
1303 drm_dbg_kms(&i915->drm, "%s t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
1304 state_name,
1305 seq->t1_t3, seq->t8, seq->t9, seq->t10, seq->t11_t12);
1306 }
1307
1308 static void
intel_pps_verify_state(struct intel_dp * intel_dp)1309 intel_pps_verify_state(struct intel_dp *intel_dp)
1310 {
1311 struct drm_i915_private *i915 = dp_to_i915(intel_dp);
1312 struct edp_power_seq hw;
1313 struct edp_power_seq *sw = &intel_dp->pps.pps_delays;
1314
1315 intel_pps_readout_hw_state(intel_dp, &hw);
1316
1317 if (hw.t1_t3 != sw->t1_t3 || hw.t8 != sw->t8 || hw.t9 != sw->t9 ||
1318 hw.t10 != sw->t10 || hw.t11_t12 != sw->t11_t12) {
1319 drm_err(&i915->drm, "PPS state mismatch\n");
1320 intel_pps_dump_state(intel_dp, "sw", sw);
1321 intel_pps_dump_state(intel_dp, "hw", &hw);
1322 }
1323 }
1324
pps_delays_valid(struct edp_power_seq * delays)1325 static bool pps_delays_valid(struct edp_power_seq *delays)
1326 {
1327 return delays->t1_t3 || delays->t8 || delays->t9 ||
1328 delays->t10 || delays->t11_t12;
1329 }
1330
pps_init_delays_bios(struct intel_dp * intel_dp,struct edp_power_seq * bios)1331 static void pps_init_delays_bios(struct intel_dp *intel_dp,
1332 struct edp_power_seq *bios)
1333 {
1334 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1335
1336 lockdep_assert_held(&dev_priv->display.pps.mutex);
1337
1338 if (!pps_delays_valid(&intel_dp->pps.bios_pps_delays))
1339 intel_pps_readout_hw_state(intel_dp, &intel_dp->pps.bios_pps_delays);
1340
1341 *bios = intel_dp->pps.bios_pps_delays;
1342
1343 intel_pps_dump_state(intel_dp, "bios", bios);
1344 }
1345
pps_init_delays_vbt(struct intel_dp * intel_dp,struct edp_power_seq * vbt)1346 static void pps_init_delays_vbt(struct intel_dp *intel_dp,
1347 struct edp_power_seq *vbt)
1348 {
1349 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1350 struct intel_connector *connector = intel_dp->attached_connector;
1351
1352 *vbt = connector->panel.vbt.edp.pps;
1353
1354 if (!pps_delays_valid(vbt))
1355 return;
1356
1357 /* On Toshiba Satellite P50-C-18C system the VBT T12 delay
1358 * of 500ms appears to be too short. Ocassionally the panel
1359 * just fails to power back on. Increasing the delay to 800ms
1360 * seems sufficient to avoid this problem.
1361 */
1362 if (intel_has_quirk(dev_priv, QUIRK_INCREASE_T12_DELAY)) {
1363 vbt->t11_t12 = max_t(u16, vbt->t11_t12, 1300 * 10);
1364 drm_dbg_kms(&dev_priv->drm,
1365 "Increasing T12 panel delay as per the quirk to %d\n",
1366 vbt->t11_t12);
1367 }
1368
1369 /* T11_T12 delay is special and actually in units of 100ms, but zero
1370 * based in the hw (so we need to add 100 ms). But the sw vbt
1371 * table multiplies it with 1000 to make it in units of 100usec,
1372 * too. */
1373 vbt->t11_t12 += 100 * 10;
1374
1375 intel_pps_dump_state(intel_dp, "vbt", vbt);
1376 }
1377
pps_init_delays_spec(struct intel_dp * intel_dp,struct edp_power_seq * spec)1378 static void pps_init_delays_spec(struct intel_dp *intel_dp,
1379 struct edp_power_seq *spec)
1380 {
1381 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1382
1383 lockdep_assert_held(&dev_priv->display.pps.mutex);
1384
1385 /* Upper limits from eDP 1.3 spec. Note that we use the clunky units of
1386 * our hw here, which are all in 100usec. */
1387 spec->t1_t3 = 210 * 10;
1388 spec->t8 = 50 * 10; /* no limit for t8, use t7 instead */
1389 spec->t9 = 50 * 10; /* no limit for t9, make it symmetric with t8 */
1390 spec->t10 = 500 * 10;
1391 /* This one is special and actually in units of 100ms, but zero
1392 * based in the hw (so we need to add 100 ms). But the sw vbt
1393 * table multiplies it with 1000 to make it in units of 100usec,
1394 * too. */
1395 spec->t11_t12 = (510 + 100) * 10;
1396
1397 intel_pps_dump_state(intel_dp, "spec", spec);
1398 }
1399
pps_init_delays(struct intel_dp * intel_dp)1400 static void pps_init_delays(struct intel_dp *intel_dp)
1401 {
1402 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1403 struct edp_power_seq cur, vbt, spec,
1404 *final = &intel_dp->pps.pps_delays;
1405
1406 lockdep_assert_held(&dev_priv->display.pps.mutex);
1407
1408 /* already initialized? */
1409 if (pps_delays_valid(final))
1410 return;
1411
1412 pps_init_delays_bios(intel_dp, &cur);
1413 pps_init_delays_vbt(intel_dp, &vbt);
1414 pps_init_delays_spec(intel_dp, &spec);
1415
1416 /* Use the max of the register settings and vbt. If both are
1417 * unset, fall back to the spec limits. */
1418 #define assign_final(field) final->field = (max(cur.field, vbt.field) == 0 ? \
1419 spec.field : \
1420 max(cur.field, vbt.field))
1421 assign_final(t1_t3);
1422 assign_final(t8);
1423 assign_final(t9);
1424 assign_final(t10);
1425 assign_final(t11_t12);
1426 #undef assign_final
1427
1428 #define get_delay(field) (DIV_ROUND_UP(final->field, 10))
1429 intel_dp->pps.panel_power_up_delay = get_delay(t1_t3);
1430 intel_dp->pps.backlight_on_delay = get_delay(t8);
1431 intel_dp->pps.backlight_off_delay = get_delay(t9);
1432 intel_dp->pps.panel_power_down_delay = get_delay(t10);
1433 intel_dp->pps.panel_power_cycle_delay = get_delay(t11_t12);
1434 #undef get_delay
1435
1436 drm_dbg_kms(&dev_priv->drm,
1437 "panel power up delay %d, power down delay %d, power cycle delay %d\n",
1438 intel_dp->pps.panel_power_up_delay,
1439 intel_dp->pps.panel_power_down_delay,
1440 intel_dp->pps.panel_power_cycle_delay);
1441
1442 drm_dbg_kms(&dev_priv->drm, "backlight on delay %d, off delay %d\n",
1443 intel_dp->pps.backlight_on_delay,
1444 intel_dp->pps.backlight_off_delay);
1445
1446 /*
1447 * We override the HW backlight delays to 1 because we do manual waits
1448 * on them. For T8, even BSpec recommends doing it. For T9, if we
1449 * don't do this, we'll end up waiting for the backlight off delay
1450 * twice: once when we do the manual sleep, and once when we disable
1451 * the panel and wait for the PP_STATUS bit to become zero.
1452 */
1453 final->t8 = 1;
1454 final->t9 = 1;
1455
1456 /*
1457 * HW has only a 100msec granularity for t11_t12 so round it up
1458 * accordingly.
1459 */
1460 final->t11_t12 = roundup(final->t11_t12, 100 * 10);
1461 }
1462
pps_init_registers(struct intel_dp * intel_dp,bool force_disable_vdd)1463 static void pps_init_registers(struct intel_dp *intel_dp, bool force_disable_vdd)
1464 {
1465 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1466 u32 pp_on, pp_off, port_sel = 0;
1467 int div = RUNTIME_INFO(dev_priv)->rawclk_freq / 1000;
1468 struct pps_registers regs;
1469 enum port port = dp_to_dig_port(intel_dp)->base.port;
1470 const struct edp_power_seq *seq = &intel_dp->pps.pps_delays;
1471
1472 lockdep_assert_held(&dev_priv->display.pps.mutex);
1473
1474 intel_pps_get_registers(intel_dp, ®s);
1475
1476 /*
1477 * On some VLV machines the BIOS can leave the VDD
1478 * enabled even on power sequencers which aren't
1479 * hooked up to any port. This would mess up the
1480 * power domain tracking the first time we pick
1481 * one of these power sequencers for use since
1482 * intel_pps_vdd_on_unlocked() would notice that the VDD was
1483 * already on and therefore wouldn't grab the power
1484 * domain reference. Disable VDD first to avoid this.
1485 * This also avoids spuriously turning the VDD on as
1486 * soon as the new power sequencer gets initialized.
1487 */
1488 if (force_disable_vdd) {
1489 u32 pp = ilk_get_pp_control(intel_dp);
1490
1491 drm_WARN(&dev_priv->drm, pp & PANEL_POWER_ON,
1492 "Panel power already on\n");
1493
1494 if (pp & EDP_FORCE_VDD)
1495 drm_dbg_kms(&dev_priv->drm,
1496 "VDD already on, disabling first\n");
1497
1498 pp &= ~EDP_FORCE_VDD;
1499
1500 intel_de_write(dev_priv, regs.pp_ctrl, pp);
1501 }
1502
1503 pp_on = REG_FIELD_PREP(PANEL_POWER_UP_DELAY_MASK, seq->t1_t3) |
1504 REG_FIELD_PREP(PANEL_LIGHT_ON_DELAY_MASK, seq->t8);
1505 pp_off = REG_FIELD_PREP(PANEL_LIGHT_OFF_DELAY_MASK, seq->t9) |
1506 REG_FIELD_PREP(PANEL_POWER_DOWN_DELAY_MASK, seq->t10);
1507
1508 /* Haswell doesn't have any port selection bits for the panel
1509 * power sequencer any more. */
1510 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
1511 port_sel = PANEL_PORT_SELECT_VLV(port);
1512 } else if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)) {
1513 switch (port) {
1514 case PORT_A:
1515 port_sel = PANEL_PORT_SELECT_DPA;
1516 break;
1517 case PORT_C:
1518 port_sel = PANEL_PORT_SELECT_DPC;
1519 break;
1520 case PORT_D:
1521 port_sel = PANEL_PORT_SELECT_DPD;
1522 break;
1523 default:
1524 MISSING_CASE(port);
1525 break;
1526 }
1527 }
1528
1529 pp_on |= port_sel;
1530
1531 intel_de_write(dev_priv, regs.pp_on, pp_on);
1532 intel_de_write(dev_priv, regs.pp_off, pp_off);
1533
1534 /*
1535 * Compute the divisor for the pp clock, simply match the Bspec formula.
1536 */
1537 if (i915_mmio_reg_valid(regs.pp_div)) {
1538 intel_de_write(dev_priv, regs.pp_div,
1539 REG_FIELD_PREP(PP_REFERENCE_DIVIDER_MASK, (100 * div) / 2 - 1) | REG_FIELD_PREP(PANEL_POWER_CYCLE_DELAY_MASK, DIV_ROUND_UP(seq->t11_t12, 1000)));
1540 } else {
1541 u32 pp_ctl;
1542
1543 pp_ctl = intel_de_read(dev_priv, regs.pp_ctrl);
1544 pp_ctl &= ~BXT_POWER_CYCLE_DELAY_MASK;
1545 pp_ctl |= REG_FIELD_PREP(BXT_POWER_CYCLE_DELAY_MASK, DIV_ROUND_UP(seq->t11_t12, 1000));
1546 intel_de_write(dev_priv, regs.pp_ctrl, pp_ctl);
1547 }
1548
1549 drm_dbg_kms(&dev_priv->drm,
1550 "panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n",
1551 intel_de_read(dev_priv, regs.pp_on),
1552 intel_de_read(dev_priv, regs.pp_off),
1553 i915_mmio_reg_valid(regs.pp_div) ?
1554 intel_de_read(dev_priv, regs.pp_div) :
1555 (intel_de_read(dev_priv, regs.pp_ctrl) & BXT_POWER_CYCLE_DELAY_MASK));
1556 }
1557
intel_pps_encoder_reset(struct intel_dp * intel_dp)1558 void intel_pps_encoder_reset(struct intel_dp *intel_dp)
1559 {
1560 struct drm_i915_private *i915 = dp_to_i915(intel_dp);
1561 intel_wakeref_t wakeref;
1562
1563 if (!intel_dp_is_edp(intel_dp))
1564 return;
1565
1566 with_intel_pps_lock(intel_dp, wakeref) {
1567 /*
1568 * Reinit the power sequencer also on the resume path, in case
1569 * BIOS did something nasty with it.
1570 */
1571 if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))
1572 vlv_initial_power_sequencer_setup(intel_dp);
1573
1574 pps_init_delays(intel_dp);
1575 pps_init_registers(intel_dp, false);
1576 pps_vdd_init(intel_dp);
1577
1578 if (edp_have_panel_vdd(intel_dp))
1579 edp_panel_vdd_schedule_off(intel_dp);
1580 }
1581 }
1582
intel_pps_init(struct intel_dp * intel_dp)1583 bool intel_pps_init(struct intel_dp *intel_dp)
1584 {
1585 intel_wakeref_t wakeref;
1586 bool ret;
1587
1588 intel_dp->pps.initializing = true;
1589 INIT_DELAYED_WORK(&intel_dp->pps.panel_vdd_work, edp_panel_vdd_work);
1590
1591 pps_init_timestamps(intel_dp);
1592
1593 with_intel_pps_lock(intel_dp, wakeref) {
1594 ret = pps_initial_setup(intel_dp);
1595
1596 pps_init_delays(intel_dp);
1597 pps_init_registers(intel_dp, false);
1598 pps_vdd_init(intel_dp);
1599 }
1600
1601 return ret;
1602 }
1603
pps_init_late(struct intel_dp * intel_dp)1604 static void pps_init_late(struct intel_dp *intel_dp)
1605 {
1606 struct drm_i915_private *i915 = dp_to_i915(intel_dp);
1607 struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
1608 struct intel_connector *connector = intel_dp->attached_connector;
1609
1610 if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))
1611 return;
1612
1613 if (intel_num_pps(i915) < 2)
1614 return;
1615
1616 drm_WARN(&i915->drm, connector->panel.vbt.backlight.controller >= 0 &&
1617 intel_dp->pps.pps_idx != connector->panel.vbt.backlight.controller,
1618 "[ENCODER:%d:%s] power sequencer mismatch: %d (initial) vs. %d (VBT)\n",
1619 encoder->base.base.id, encoder->base.name,
1620 intel_dp->pps.pps_idx, connector->panel.vbt.backlight.controller);
1621
1622 if (connector->panel.vbt.backlight.controller >= 0)
1623 intel_dp->pps.pps_idx = connector->panel.vbt.backlight.controller;
1624 }
1625
intel_pps_init_late(struct intel_dp * intel_dp)1626 void intel_pps_init_late(struct intel_dp *intel_dp)
1627 {
1628 intel_wakeref_t wakeref;
1629
1630 with_intel_pps_lock(intel_dp, wakeref) {
1631 /* Reinit delays after per-panel info has been parsed from VBT */
1632 pps_init_late(intel_dp);
1633
1634 memset(&intel_dp->pps.pps_delays, 0, sizeof(intel_dp->pps.pps_delays));
1635 pps_init_delays(intel_dp);
1636 pps_init_registers(intel_dp, false);
1637
1638 intel_dp->pps.initializing = false;
1639
1640 if (edp_have_panel_vdd(intel_dp))
1641 edp_panel_vdd_schedule_off(intel_dp);
1642 }
1643 }
1644
intel_pps_unlock_regs_wa(struct drm_i915_private * dev_priv)1645 void intel_pps_unlock_regs_wa(struct drm_i915_private *dev_priv)
1646 {
1647 int pps_num;
1648 int pps_idx;
1649
1650 if (!HAS_DISPLAY(dev_priv) || HAS_DDI(dev_priv))
1651 return;
1652 /*
1653 * This w/a is needed at least on CPT/PPT, but to be sure apply it
1654 * everywhere where registers can be write protected.
1655 */
1656 pps_num = intel_num_pps(dev_priv);
1657
1658 for (pps_idx = 0; pps_idx < pps_num; pps_idx++) {
1659 u32 val = intel_de_read(dev_priv, PP_CONTROL(pps_idx));
1660
1661 val = (val & ~PANEL_UNLOCK_MASK) | PANEL_UNLOCK_REGS;
1662 intel_de_write(dev_priv, PP_CONTROL(pps_idx), val);
1663 }
1664 }
1665
intel_pps_setup(struct drm_i915_private * i915)1666 void intel_pps_setup(struct drm_i915_private *i915)
1667 {
1668 if (HAS_PCH_SPLIT(i915) || IS_GEMINILAKE(i915) || IS_BROXTON(i915))
1669 i915->display.pps.mmio_base = PCH_PPS_BASE;
1670 else if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))
1671 i915->display.pps.mmio_base = VLV_PPS_BASE;
1672 else
1673 i915->display.pps.mmio_base = PPS_BASE;
1674 }
1675
assert_pps_unlocked(struct drm_i915_private * dev_priv,enum pipe pipe)1676 void assert_pps_unlocked(struct drm_i915_private *dev_priv, enum pipe pipe)
1677 {
1678 i915_reg_t pp_reg;
1679 u32 val;
1680 enum pipe panel_pipe = INVALID_PIPE;
1681 bool locked = true;
1682
1683 if (drm_WARN_ON(&dev_priv->drm, HAS_DDI(dev_priv)))
1684 return;
1685
1686 if (HAS_PCH_SPLIT(dev_priv)) {
1687 u32 port_sel;
1688
1689 pp_reg = PP_CONTROL(0);
1690 port_sel = intel_de_read(dev_priv, PP_ON_DELAYS(0)) & PANEL_PORT_SELECT_MASK;
1691
1692 switch (port_sel) {
1693 case PANEL_PORT_SELECT_LVDS:
1694 intel_lvds_port_enabled(dev_priv, PCH_LVDS, &panel_pipe);
1695 break;
1696 case PANEL_PORT_SELECT_DPA:
1697 g4x_dp_port_enabled(dev_priv, DP_A, PORT_A, &panel_pipe);
1698 break;
1699 case PANEL_PORT_SELECT_DPC:
1700 g4x_dp_port_enabled(dev_priv, PCH_DP_C, PORT_C, &panel_pipe);
1701 break;
1702 case PANEL_PORT_SELECT_DPD:
1703 g4x_dp_port_enabled(dev_priv, PCH_DP_D, PORT_D, &panel_pipe);
1704 break;
1705 default:
1706 MISSING_CASE(port_sel);
1707 break;
1708 }
1709 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
1710 /* presumably write lock depends on pipe, not port select */
1711 pp_reg = PP_CONTROL(pipe);
1712 panel_pipe = pipe;
1713 } else {
1714 u32 port_sel;
1715
1716 pp_reg = PP_CONTROL(0);
1717 port_sel = intel_de_read(dev_priv, PP_ON_DELAYS(0)) & PANEL_PORT_SELECT_MASK;
1718
1719 drm_WARN_ON(&dev_priv->drm,
1720 port_sel != PANEL_PORT_SELECT_LVDS);
1721 intel_lvds_port_enabled(dev_priv, LVDS, &panel_pipe);
1722 }
1723
1724 val = intel_de_read(dev_priv, pp_reg);
1725 if (!(val & PANEL_POWER_ON) ||
1726 ((val & PANEL_UNLOCK_MASK) == PANEL_UNLOCK_REGS))
1727 locked = false;
1728
1729 I915_STATE_WARN(panel_pipe == pipe && locked,
1730 "panel assertion failure, pipe %c regs locked\n",
1731 pipe_name(pipe));
1732 }
1733