1 // SPDX-License-Identifier: MIT
2 /*
3 * Copyright © 2020 Intel Corporation
4 */
5
6 #include <linux/string_helpers.h>
7
8 #include "i915_reg.h"
9 #include "intel_atomic.h"
10 #include "intel_crtc.h"
11 #include "intel_ddi.h"
12 #include "intel_de.h"
13 #include "intel_display_types.h"
14 #include "intel_fdi.h"
15
16 struct intel_fdi_funcs {
17 void (*fdi_link_train)(struct intel_crtc *crtc,
18 const struct intel_crtc_state *crtc_state);
19 };
20
assert_fdi_tx(struct drm_i915_private * dev_priv,enum pipe pipe,bool state)21 static void assert_fdi_tx(struct drm_i915_private *dev_priv,
22 enum pipe pipe, bool state)
23 {
24 bool cur_state;
25
26 if (HAS_DDI(dev_priv)) {
27 /*
28 * DDI does not have a specific FDI_TX register.
29 *
30 * FDI is never fed from EDP transcoder
31 * so pipe->transcoder cast is fine here.
32 */
33 enum transcoder cpu_transcoder = (enum transcoder)pipe;
34 cur_state = intel_de_read(dev_priv, TRANS_DDI_FUNC_CTL(cpu_transcoder)) & TRANS_DDI_FUNC_ENABLE;
35 } else {
36 cur_state = intel_de_read(dev_priv, FDI_TX_CTL(pipe)) & FDI_TX_ENABLE;
37 }
38 I915_STATE_WARN(cur_state != state,
39 "FDI TX state assertion failure (expected %s, current %s)\n",
40 str_on_off(state), str_on_off(cur_state));
41 }
42
assert_fdi_tx_enabled(struct drm_i915_private * i915,enum pipe pipe)43 void assert_fdi_tx_enabled(struct drm_i915_private *i915, enum pipe pipe)
44 {
45 assert_fdi_tx(i915, pipe, true);
46 }
47
assert_fdi_tx_disabled(struct drm_i915_private * i915,enum pipe pipe)48 void assert_fdi_tx_disabled(struct drm_i915_private *i915, enum pipe pipe)
49 {
50 assert_fdi_tx(i915, pipe, false);
51 }
52
assert_fdi_rx(struct drm_i915_private * dev_priv,enum pipe pipe,bool state)53 static void assert_fdi_rx(struct drm_i915_private *dev_priv,
54 enum pipe pipe, bool state)
55 {
56 bool cur_state;
57
58 cur_state = intel_de_read(dev_priv, FDI_RX_CTL(pipe)) & FDI_RX_ENABLE;
59 I915_STATE_WARN(cur_state != state,
60 "FDI RX state assertion failure (expected %s, current %s)\n",
61 str_on_off(state), str_on_off(cur_state));
62 }
63
assert_fdi_rx_enabled(struct drm_i915_private * i915,enum pipe pipe)64 void assert_fdi_rx_enabled(struct drm_i915_private *i915, enum pipe pipe)
65 {
66 assert_fdi_rx(i915, pipe, true);
67 }
68
assert_fdi_rx_disabled(struct drm_i915_private * i915,enum pipe pipe)69 void assert_fdi_rx_disabled(struct drm_i915_private *i915, enum pipe pipe)
70 {
71 assert_fdi_rx(i915, pipe, false);
72 }
73
assert_fdi_tx_pll_enabled(struct drm_i915_private * i915,enum pipe pipe)74 void assert_fdi_tx_pll_enabled(struct drm_i915_private *i915,
75 enum pipe pipe)
76 {
77 bool cur_state;
78
79 /* ILK FDI PLL is always enabled */
80 if (IS_IRONLAKE(i915))
81 return;
82
83 /* On Haswell, DDI ports are responsible for the FDI PLL setup */
84 if (HAS_DDI(i915))
85 return;
86
87 cur_state = intel_de_read(i915, FDI_TX_CTL(pipe)) & FDI_TX_PLL_ENABLE;
88 I915_STATE_WARN(!cur_state, "FDI TX PLL assertion failure, should be active but is disabled\n");
89 }
90
assert_fdi_rx_pll(struct drm_i915_private * i915,enum pipe pipe,bool state)91 static void assert_fdi_rx_pll(struct drm_i915_private *i915,
92 enum pipe pipe, bool state)
93 {
94 bool cur_state;
95
96 cur_state = intel_de_read(i915, FDI_RX_CTL(pipe)) & FDI_RX_PLL_ENABLE;
97 I915_STATE_WARN(cur_state != state,
98 "FDI RX PLL assertion failure (expected %s, current %s)\n",
99 str_on_off(state), str_on_off(cur_state));
100 }
101
assert_fdi_rx_pll_enabled(struct drm_i915_private * i915,enum pipe pipe)102 void assert_fdi_rx_pll_enabled(struct drm_i915_private *i915, enum pipe pipe)
103 {
104 assert_fdi_rx_pll(i915, pipe, true);
105 }
106
assert_fdi_rx_pll_disabled(struct drm_i915_private * i915,enum pipe pipe)107 void assert_fdi_rx_pll_disabled(struct drm_i915_private *i915, enum pipe pipe)
108 {
109 assert_fdi_rx_pll(i915, pipe, false);
110 }
111
intel_fdi_link_train(struct intel_crtc * crtc,const struct intel_crtc_state * crtc_state)112 void intel_fdi_link_train(struct intel_crtc *crtc,
113 const struct intel_crtc_state *crtc_state)
114 {
115 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
116
117 dev_priv->display.funcs.fdi->fdi_link_train(crtc, crtc_state);
118 }
119
120 /* units of 100MHz */
pipe_required_fdi_lanes(struct intel_crtc_state * crtc_state)121 static int pipe_required_fdi_lanes(struct intel_crtc_state *crtc_state)
122 {
123 if (crtc_state->hw.enable && crtc_state->has_pch_encoder)
124 return crtc_state->fdi_lanes;
125
126 return 0;
127 }
128
ilk_check_fdi_lanes(struct drm_device * dev,enum pipe pipe,struct intel_crtc_state * pipe_config)129 static int ilk_check_fdi_lanes(struct drm_device *dev, enum pipe pipe,
130 struct intel_crtc_state *pipe_config)
131 {
132 struct drm_i915_private *dev_priv = to_i915(dev);
133 struct drm_atomic_state *state = pipe_config->uapi.state;
134 struct intel_crtc *other_crtc;
135 struct intel_crtc_state *other_crtc_state;
136
137 drm_dbg_kms(&dev_priv->drm,
138 "checking fdi config on pipe %c, lanes %i\n",
139 pipe_name(pipe), pipe_config->fdi_lanes);
140 if (pipe_config->fdi_lanes > 4) {
141 drm_dbg_kms(&dev_priv->drm,
142 "invalid fdi lane config on pipe %c: %i lanes\n",
143 pipe_name(pipe), pipe_config->fdi_lanes);
144 return -EINVAL;
145 }
146
147 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
148 if (pipe_config->fdi_lanes > 2) {
149 drm_dbg_kms(&dev_priv->drm,
150 "only 2 lanes on haswell, required: %i lanes\n",
151 pipe_config->fdi_lanes);
152 return -EINVAL;
153 } else {
154 return 0;
155 }
156 }
157
158 if (INTEL_NUM_PIPES(dev_priv) == 2)
159 return 0;
160
161 /* Ivybridge 3 pipe is really complicated */
162 switch (pipe) {
163 case PIPE_A:
164 return 0;
165 case PIPE_B:
166 if (pipe_config->fdi_lanes <= 2)
167 return 0;
168
169 other_crtc = intel_crtc_for_pipe(dev_priv, PIPE_C);
170 other_crtc_state =
171 intel_atomic_get_crtc_state(state, other_crtc);
172 if (IS_ERR(other_crtc_state))
173 return PTR_ERR(other_crtc_state);
174
175 if (pipe_required_fdi_lanes(other_crtc_state) > 0) {
176 drm_dbg_kms(&dev_priv->drm,
177 "invalid shared fdi lane config on pipe %c: %i lanes\n",
178 pipe_name(pipe), pipe_config->fdi_lanes);
179 return -EINVAL;
180 }
181 return 0;
182 case PIPE_C:
183 if (pipe_config->fdi_lanes > 2) {
184 drm_dbg_kms(&dev_priv->drm,
185 "only 2 lanes on pipe %c: required %i lanes\n",
186 pipe_name(pipe), pipe_config->fdi_lanes);
187 return -EINVAL;
188 }
189
190 other_crtc = intel_crtc_for_pipe(dev_priv, PIPE_B);
191 other_crtc_state =
192 intel_atomic_get_crtc_state(state, other_crtc);
193 if (IS_ERR(other_crtc_state))
194 return PTR_ERR(other_crtc_state);
195
196 if (pipe_required_fdi_lanes(other_crtc_state) > 2) {
197 drm_dbg_kms(&dev_priv->drm,
198 "fdi link B uses too many lanes to enable link C\n");
199 return -EINVAL;
200 }
201 return 0;
202 default:
203 MISSING_CASE(pipe);
204 return 0;
205 }
206 }
207
intel_fdi_pll_freq_update(struct drm_i915_private * i915)208 void intel_fdi_pll_freq_update(struct drm_i915_private *i915)
209 {
210 if (IS_IRONLAKE(i915)) {
211 u32 fdi_pll_clk =
212 intel_de_read(i915, FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK;
213
214 i915->display.fdi.pll_freq = (fdi_pll_clk + 2) * 10000;
215 } else if (IS_SANDYBRIDGE(i915) || IS_IVYBRIDGE(i915)) {
216 i915->display.fdi.pll_freq = 270000;
217 } else {
218 return;
219 }
220
221 drm_dbg(&i915->drm, "FDI PLL freq=%d\n", i915->display.fdi.pll_freq);
222 }
223
intel_fdi_link_freq(struct drm_i915_private * i915,const struct intel_crtc_state * pipe_config)224 int intel_fdi_link_freq(struct drm_i915_private *i915,
225 const struct intel_crtc_state *pipe_config)
226 {
227 if (HAS_DDI(i915))
228 return pipe_config->port_clock; /* SPLL */
229 else
230 return i915->display.fdi.pll_freq;
231 }
232
ilk_fdi_compute_config(struct intel_crtc * crtc,struct intel_crtc_state * pipe_config)233 int ilk_fdi_compute_config(struct intel_crtc *crtc,
234 struct intel_crtc_state *pipe_config)
235 {
236 struct drm_device *dev = crtc->base.dev;
237 struct drm_i915_private *i915 = to_i915(dev);
238 const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
239 int lane, link_bw, fdi_dotclock, ret;
240 bool needs_recompute = false;
241
242 retry:
243 /* FDI is a binary signal running at ~2.7GHz, encoding
244 * each output octet as 10 bits. The actual frequency
245 * is stored as a divider into a 100MHz clock, and the
246 * mode pixel clock is stored in units of 1KHz.
247 * Hence the bw of each lane in terms of the mode signal
248 * is:
249 */
250 link_bw = intel_fdi_link_freq(i915, pipe_config);
251
252 fdi_dotclock = adjusted_mode->crtc_clock;
253
254 lane = ilk_get_lanes_required(fdi_dotclock, link_bw,
255 pipe_config->pipe_bpp);
256
257 pipe_config->fdi_lanes = lane;
258
259 intel_link_compute_m_n(pipe_config->pipe_bpp, lane, fdi_dotclock,
260 link_bw, &pipe_config->fdi_m_n, false);
261
262 ret = ilk_check_fdi_lanes(dev, crtc->pipe, pipe_config);
263 if (ret == -EDEADLK)
264 return ret;
265
266 if (ret == -EINVAL && pipe_config->pipe_bpp > 6*3) {
267 pipe_config->pipe_bpp -= 2*3;
268 drm_dbg_kms(&i915->drm,
269 "fdi link bw constraint, reducing pipe bpp to %i\n",
270 pipe_config->pipe_bpp);
271 needs_recompute = true;
272 pipe_config->bw_constrained = true;
273
274 goto retry;
275 }
276
277 if (needs_recompute)
278 return -EAGAIN;
279
280 return ret;
281 }
282
cpt_set_fdi_bc_bifurcation(struct drm_i915_private * dev_priv,bool enable)283 static void cpt_set_fdi_bc_bifurcation(struct drm_i915_private *dev_priv, bool enable)
284 {
285 u32 temp;
286
287 temp = intel_de_read(dev_priv, SOUTH_CHICKEN1);
288 if (!!(temp & FDI_BC_BIFURCATION_SELECT) == enable)
289 return;
290
291 drm_WARN_ON(&dev_priv->drm,
292 intel_de_read(dev_priv, FDI_RX_CTL(PIPE_B)) &
293 FDI_RX_ENABLE);
294 drm_WARN_ON(&dev_priv->drm,
295 intel_de_read(dev_priv, FDI_RX_CTL(PIPE_C)) &
296 FDI_RX_ENABLE);
297
298 temp &= ~FDI_BC_BIFURCATION_SELECT;
299 if (enable)
300 temp |= FDI_BC_BIFURCATION_SELECT;
301
302 drm_dbg_kms(&dev_priv->drm, "%sabling fdi C rx\n",
303 enable ? "en" : "dis");
304 intel_de_write(dev_priv, SOUTH_CHICKEN1, temp);
305 intel_de_posting_read(dev_priv, SOUTH_CHICKEN1);
306 }
307
ivb_update_fdi_bc_bifurcation(const struct intel_crtc_state * crtc_state)308 static void ivb_update_fdi_bc_bifurcation(const struct intel_crtc_state *crtc_state)
309 {
310 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
311 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
312
313 switch (crtc->pipe) {
314 case PIPE_A:
315 break;
316 case PIPE_B:
317 if (crtc_state->fdi_lanes > 2)
318 cpt_set_fdi_bc_bifurcation(dev_priv, false);
319 else
320 cpt_set_fdi_bc_bifurcation(dev_priv, true);
321
322 break;
323 case PIPE_C:
324 cpt_set_fdi_bc_bifurcation(dev_priv, true);
325
326 break;
327 default:
328 MISSING_CASE(crtc->pipe);
329 }
330 }
331
intel_fdi_normal_train(struct intel_crtc * crtc)332 void intel_fdi_normal_train(struct intel_crtc *crtc)
333 {
334 struct drm_device *dev = crtc->base.dev;
335 struct drm_i915_private *dev_priv = to_i915(dev);
336 enum pipe pipe = crtc->pipe;
337 i915_reg_t reg;
338 u32 temp;
339
340 /* enable normal train */
341 reg = FDI_TX_CTL(pipe);
342 temp = intel_de_read(dev_priv, reg);
343 if (IS_IVYBRIDGE(dev_priv)) {
344 temp &= ~FDI_LINK_TRAIN_NONE_IVB;
345 temp |= FDI_LINK_TRAIN_NONE_IVB | FDI_TX_ENHANCE_FRAME_ENABLE;
346 } else {
347 temp &= ~FDI_LINK_TRAIN_NONE;
348 temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE;
349 }
350 intel_de_write(dev_priv, reg, temp);
351
352 reg = FDI_RX_CTL(pipe);
353 temp = intel_de_read(dev_priv, reg);
354 if (HAS_PCH_CPT(dev_priv)) {
355 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
356 temp |= FDI_LINK_TRAIN_NORMAL_CPT;
357 } else {
358 temp &= ~FDI_LINK_TRAIN_NONE;
359 temp |= FDI_LINK_TRAIN_NONE;
360 }
361 intel_de_write(dev_priv, reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE);
362
363 /* wait one idle pattern time */
364 intel_de_posting_read(dev_priv, reg);
365 udelay(1000);
366
367 /* IVB wants error correction enabled */
368 if (IS_IVYBRIDGE(dev_priv))
369 intel_de_write(dev_priv, reg,
370 intel_de_read(dev_priv, reg) | FDI_FS_ERRC_ENABLE | FDI_FE_ERRC_ENABLE);
371 }
372
373 /* The FDI link training functions for ILK/Ibexpeak. */
ilk_fdi_link_train(struct intel_crtc * crtc,const struct intel_crtc_state * crtc_state)374 static void ilk_fdi_link_train(struct intel_crtc *crtc,
375 const struct intel_crtc_state *crtc_state)
376 {
377 struct drm_device *dev = crtc->base.dev;
378 struct drm_i915_private *dev_priv = to_i915(dev);
379 enum pipe pipe = crtc->pipe;
380 i915_reg_t reg;
381 u32 temp, tries;
382
383 /*
384 * Write the TU size bits before fdi link training, so that error
385 * detection works.
386 */
387 intel_de_write(dev_priv, FDI_RX_TUSIZE1(pipe),
388 intel_de_read(dev_priv, PIPE_DATA_M1(pipe)) & TU_SIZE_MASK);
389
390 /* FDI needs bits from pipe first */
391 assert_transcoder_enabled(dev_priv, crtc_state->cpu_transcoder);
392
393 /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
394 for train result */
395 reg = FDI_RX_IMR(pipe);
396 temp = intel_de_read(dev_priv, reg);
397 temp &= ~FDI_RX_SYMBOL_LOCK;
398 temp &= ~FDI_RX_BIT_LOCK;
399 intel_de_write(dev_priv, reg, temp);
400 intel_de_read(dev_priv, reg);
401 udelay(150);
402
403 /* enable CPU FDI TX and PCH FDI RX */
404 reg = FDI_TX_CTL(pipe);
405 temp = intel_de_read(dev_priv, reg);
406 temp &= ~FDI_DP_PORT_WIDTH_MASK;
407 temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
408 temp &= ~FDI_LINK_TRAIN_NONE;
409 temp |= FDI_LINK_TRAIN_PATTERN_1;
410 intel_de_write(dev_priv, reg, temp | FDI_TX_ENABLE);
411
412 reg = FDI_RX_CTL(pipe);
413 temp = intel_de_read(dev_priv, reg);
414 temp &= ~FDI_LINK_TRAIN_NONE;
415 temp |= FDI_LINK_TRAIN_PATTERN_1;
416 intel_de_write(dev_priv, reg, temp | FDI_RX_ENABLE);
417
418 intel_de_posting_read(dev_priv, reg);
419 udelay(150);
420
421 /* Ironlake workaround, enable clock pointer after FDI enable*/
422 intel_de_write(dev_priv, FDI_RX_CHICKEN(pipe),
423 FDI_RX_PHASE_SYNC_POINTER_OVR);
424 intel_de_write(dev_priv, FDI_RX_CHICKEN(pipe),
425 FDI_RX_PHASE_SYNC_POINTER_OVR | FDI_RX_PHASE_SYNC_POINTER_EN);
426
427 reg = FDI_RX_IIR(pipe);
428 for (tries = 0; tries < 5; tries++) {
429 temp = intel_de_read(dev_priv, reg);
430 drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp);
431
432 if ((temp & FDI_RX_BIT_LOCK)) {
433 drm_dbg_kms(&dev_priv->drm, "FDI train 1 done.\n");
434 intel_de_write(dev_priv, reg, temp | FDI_RX_BIT_LOCK);
435 break;
436 }
437 }
438 if (tries == 5)
439 drm_err(&dev_priv->drm, "FDI train 1 fail!\n");
440
441 /* Train 2 */
442 reg = FDI_TX_CTL(pipe);
443 temp = intel_de_read(dev_priv, reg);
444 temp &= ~FDI_LINK_TRAIN_NONE;
445 temp |= FDI_LINK_TRAIN_PATTERN_2;
446 intel_de_write(dev_priv, reg, temp);
447
448 reg = FDI_RX_CTL(pipe);
449 temp = intel_de_read(dev_priv, reg);
450 temp &= ~FDI_LINK_TRAIN_NONE;
451 temp |= FDI_LINK_TRAIN_PATTERN_2;
452 intel_de_write(dev_priv, reg, temp);
453
454 intel_de_posting_read(dev_priv, reg);
455 udelay(150);
456
457 reg = FDI_RX_IIR(pipe);
458 for (tries = 0; tries < 5; tries++) {
459 temp = intel_de_read(dev_priv, reg);
460 drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp);
461
462 if (temp & FDI_RX_SYMBOL_LOCK) {
463 intel_de_write(dev_priv, reg,
464 temp | FDI_RX_SYMBOL_LOCK);
465 drm_dbg_kms(&dev_priv->drm, "FDI train 2 done.\n");
466 break;
467 }
468 }
469 if (tries == 5)
470 drm_err(&dev_priv->drm, "FDI train 2 fail!\n");
471
472 drm_dbg_kms(&dev_priv->drm, "FDI train done\n");
473
474 }
475
476 static const int snb_b_fdi_train_param[] = {
477 FDI_LINK_TRAIN_400MV_0DB_SNB_B,
478 FDI_LINK_TRAIN_400MV_6DB_SNB_B,
479 FDI_LINK_TRAIN_600MV_3_5DB_SNB_B,
480 FDI_LINK_TRAIN_800MV_0DB_SNB_B,
481 };
482
483 /* The FDI link training functions for SNB/Cougarpoint. */
gen6_fdi_link_train(struct intel_crtc * crtc,const struct intel_crtc_state * crtc_state)484 static void gen6_fdi_link_train(struct intel_crtc *crtc,
485 const struct intel_crtc_state *crtc_state)
486 {
487 struct drm_device *dev = crtc->base.dev;
488 struct drm_i915_private *dev_priv = to_i915(dev);
489 enum pipe pipe = crtc->pipe;
490 i915_reg_t reg;
491 u32 temp, i, retry;
492
493 /*
494 * Write the TU size bits before fdi link training, so that error
495 * detection works.
496 */
497 intel_de_write(dev_priv, FDI_RX_TUSIZE1(pipe),
498 intel_de_read(dev_priv, PIPE_DATA_M1(pipe)) & TU_SIZE_MASK);
499
500 /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
501 for train result */
502 reg = FDI_RX_IMR(pipe);
503 temp = intel_de_read(dev_priv, reg);
504 temp &= ~FDI_RX_SYMBOL_LOCK;
505 temp &= ~FDI_RX_BIT_LOCK;
506 intel_de_write(dev_priv, reg, temp);
507
508 intel_de_posting_read(dev_priv, reg);
509 udelay(150);
510
511 /* enable CPU FDI TX and PCH FDI RX */
512 reg = FDI_TX_CTL(pipe);
513 temp = intel_de_read(dev_priv, reg);
514 temp &= ~FDI_DP_PORT_WIDTH_MASK;
515 temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
516 temp &= ~FDI_LINK_TRAIN_NONE;
517 temp |= FDI_LINK_TRAIN_PATTERN_1;
518 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
519 /* SNB-B */
520 temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
521 intel_de_write(dev_priv, reg, temp | FDI_TX_ENABLE);
522
523 intel_de_write(dev_priv, FDI_RX_MISC(pipe),
524 FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
525
526 reg = FDI_RX_CTL(pipe);
527 temp = intel_de_read(dev_priv, reg);
528 if (HAS_PCH_CPT(dev_priv)) {
529 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
530 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
531 } else {
532 temp &= ~FDI_LINK_TRAIN_NONE;
533 temp |= FDI_LINK_TRAIN_PATTERN_1;
534 }
535 intel_de_write(dev_priv, reg, temp | FDI_RX_ENABLE);
536
537 intel_de_posting_read(dev_priv, reg);
538 udelay(150);
539
540 for (i = 0; i < 4; i++) {
541 reg = FDI_TX_CTL(pipe);
542 temp = intel_de_read(dev_priv, reg);
543 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
544 temp |= snb_b_fdi_train_param[i];
545 intel_de_write(dev_priv, reg, temp);
546
547 intel_de_posting_read(dev_priv, reg);
548 udelay(500);
549
550 for (retry = 0; retry < 5; retry++) {
551 reg = FDI_RX_IIR(pipe);
552 temp = intel_de_read(dev_priv, reg);
553 drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp);
554 if (temp & FDI_RX_BIT_LOCK) {
555 intel_de_write(dev_priv, reg,
556 temp | FDI_RX_BIT_LOCK);
557 drm_dbg_kms(&dev_priv->drm,
558 "FDI train 1 done.\n");
559 break;
560 }
561 udelay(50);
562 }
563 if (retry < 5)
564 break;
565 }
566 if (i == 4)
567 drm_err(&dev_priv->drm, "FDI train 1 fail!\n");
568
569 /* Train 2 */
570 reg = FDI_TX_CTL(pipe);
571 temp = intel_de_read(dev_priv, reg);
572 temp &= ~FDI_LINK_TRAIN_NONE;
573 temp |= FDI_LINK_TRAIN_PATTERN_2;
574 if (IS_SANDYBRIDGE(dev_priv)) {
575 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
576 /* SNB-B */
577 temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
578 }
579 intel_de_write(dev_priv, reg, temp);
580
581 reg = FDI_RX_CTL(pipe);
582 temp = intel_de_read(dev_priv, reg);
583 if (HAS_PCH_CPT(dev_priv)) {
584 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
585 temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
586 } else {
587 temp &= ~FDI_LINK_TRAIN_NONE;
588 temp |= FDI_LINK_TRAIN_PATTERN_2;
589 }
590 intel_de_write(dev_priv, reg, temp);
591
592 intel_de_posting_read(dev_priv, reg);
593 udelay(150);
594
595 for (i = 0; i < 4; i++) {
596 reg = FDI_TX_CTL(pipe);
597 temp = intel_de_read(dev_priv, reg);
598 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
599 temp |= snb_b_fdi_train_param[i];
600 intel_de_write(dev_priv, reg, temp);
601
602 intel_de_posting_read(dev_priv, reg);
603 udelay(500);
604
605 for (retry = 0; retry < 5; retry++) {
606 reg = FDI_RX_IIR(pipe);
607 temp = intel_de_read(dev_priv, reg);
608 drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp);
609 if (temp & FDI_RX_SYMBOL_LOCK) {
610 intel_de_write(dev_priv, reg,
611 temp | FDI_RX_SYMBOL_LOCK);
612 drm_dbg_kms(&dev_priv->drm,
613 "FDI train 2 done.\n");
614 break;
615 }
616 udelay(50);
617 }
618 if (retry < 5)
619 break;
620 }
621 if (i == 4)
622 drm_err(&dev_priv->drm, "FDI train 2 fail!\n");
623
624 drm_dbg_kms(&dev_priv->drm, "FDI train done.\n");
625 }
626
627 /* Manual link training for Ivy Bridge A0 parts */
ivb_manual_fdi_link_train(struct intel_crtc * crtc,const struct intel_crtc_state * crtc_state)628 static void ivb_manual_fdi_link_train(struct intel_crtc *crtc,
629 const struct intel_crtc_state *crtc_state)
630 {
631 struct drm_device *dev = crtc->base.dev;
632 struct drm_i915_private *dev_priv = to_i915(dev);
633 enum pipe pipe = crtc->pipe;
634 i915_reg_t reg;
635 u32 temp, i, j;
636
637 ivb_update_fdi_bc_bifurcation(crtc_state);
638
639 /*
640 * Write the TU size bits before fdi link training, so that error
641 * detection works.
642 */
643 intel_de_write(dev_priv, FDI_RX_TUSIZE1(pipe),
644 intel_de_read(dev_priv, PIPE_DATA_M1(pipe)) & TU_SIZE_MASK);
645
646 /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
647 for train result */
648 reg = FDI_RX_IMR(pipe);
649 temp = intel_de_read(dev_priv, reg);
650 temp &= ~FDI_RX_SYMBOL_LOCK;
651 temp &= ~FDI_RX_BIT_LOCK;
652 intel_de_write(dev_priv, reg, temp);
653
654 intel_de_posting_read(dev_priv, reg);
655 udelay(150);
656
657 drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR before link train 0x%x\n",
658 intel_de_read(dev_priv, FDI_RX_IIR(pipe)));
659
660 /* Try each vswing and preemphasis setting twice before moving on */
661 for (j = 0; j < ARRAY_SIZE(snb_b_fdi_train_param) * 2; j++) {
662 /* disable first in case we need to retry */
663 reg = FDI_TX_CTL(pipe);
664 temp = intel_de_read(dev_priv, reg);
665 temp &= ~(FDI_LINK_TRAIN_AUTO | FDI_LINK_TRAIN_NONE_IVB);
666 temp &= ~FDI_TX_ENABLE;
667 intel_de_write(dev_priv, reg, temp);
668
669 reg = FDI_RX_CTL(pipe);
670 temp = intel_de_read(dev_priv, reg);
671 temp &= ~FDI_LINK_TRAIN_AUTO;
672 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
673 temp &= ~FDI_RX_ENABLE;
674 intel_de_write(dev_priv, reg, temp);
675
676 /* enable CPU FDI TX and PCH FDI RX */
677 reg = FDI_TX_CTL(pipe);
678 temp = intel_de_read(dev_priv, reg);
679 temp &= ~FDI_DP_PORT_WIDTH_MASK;
680 temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
681 temp |= FDI_LINK_TRAIN_PATTERN_1_IVB;
682 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
683 temp |= snb_b_fdi_train_param[j/2];
684 temp |= FDI_COMPOSITE_SYNC;
685 intel_de_write(dev_priv, reg, temp | FDI_TX_ENABLE);
686
687 intel_de_write(dev_priv, FDI_RX_MISC(pipe),
688 FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
689
690 reg = FDI_RX_CTL(pipe);
691 temp = intel_de_read(dev_priv, reg);
692 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
693 temp |= FDI_COMPOSITE_SYNC;
694 intel_de_write(dev_priv, reg, temp | FDI_RX_ENABLE);
695
696 intel_de_posting_read(dev_priv, reg);
697 udelay(1); /* should be 0.5us */
698
699 for (i = 0; i < 4; i++) {
700 reg = FDI_RX_IIR(pipe);
701 temp = intel_de_read(dev_priv, reg);
702 drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp);
703
704 if (temp & FDI_RX_BIT_LOCK ||
705 (intel_de_read(dev_priv, reg) & FDI_RX_BIT_LOCK)) {
706 intel_de_write(dev_priv, reg,
707 temp | FDI_RX_BIT_LOCK);
708 drm_dbg_kms(&dev_priv->drm,
709 "FDI train 1 done, level %i.\n",
710 i);
711 break;
712 }
713 udelay(1); /* should be 0.5us */
714 }
715 if (i == 4) {
716 drm_dbg_kms(&dev_priv->drm,
717 "FDI train 1 fail on vswing %d\n", j / 2);
718 continue;
719 }
720
721 /* Train 2 */
722 reg = FDI_TX_CTL(pipe);
723 temp = intel_de_read(dev_priv, reg);
724 temp &= ~FDI_LINK_TRAIN_NONE_IVB;
725 temp |= FDI_LINK_TRAIN_PATTERN_2_IVB;
726 intel_de_write(dev_priv, reg, temp);
727
728 reg = FDI_RX_CTL(pipe);
729 temp = intel_de_read(dev_priv, reg);
730 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
731 temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
732 intel_de_write(dev_priv, reg, temp);
733
734 intel_de_posting_read(dev_priv, reg);
735 udelay(2); /* should be 1.5us */
736
737 for (i = 0; i < 4; i++) {
738 reg = FDI_RX_IIR(pipe);
739 temp = intel_de_read(dev_priv, reg);
740 drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp);
741
742 if (temp & FDI_RX_SYMBOL_LOCK ||
743 (intel_de_read(dev_priv, reg) & FDI_RX_SYMBOL_LOCK)) {
744 intel_de_write(dev_priv, reg,
745 temp | FDI_RX_SYMBOL_LOCK);
746 drm_dbg_kms(&dev_priv->drm,
747 "FDI train 2 done, level %i.\n",
748 i);
749 goto train_done;
750 }
751 udelay(2); /* should be 1.5us */
752 }
753 if (i == 4)
754 drm_dbg_kms(&dev_priv->drm,
755 "FDI train 2 fail on vswing %d\n", j / 2);
756 }
757
758 train_done:
759 drm_dbg_kms(&dev_priv->drm, "FDI train done.\n");
760 }
761
762 /* Starting with Haswell, different DDI ports can work in FDI mode for
763 * connection to the PCH-located connectors. For this, it is necessary to train
764 * both the DDI port and PCH receiver for the desired DDI buffer settings.
765 *
766 * The recommended port to work in FDI mode is DDI E, which we use here. Also,
767 * please note that when FDI mode is active on DDI E, it shares 2 lines with
768 * DDI A (which is used for eDP)
769 */
hsw_fdi_link_train(struct intel_encoder * encoder,const struct intel_crtc_state * crtc_state)770 void hsw_fdi_link_train(struct intel_encoder *encoder,
771 const struct intel_crtc_state *crtc_state)
772 {
773 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
774 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
775 u32 temp, i, rx_ctl_val;
776 int n_entries;
777
778 encoder->get_buf_trans(encoder, crtc_state, &n_entries);
779
780 hsw_prepare_dp_ddi_buffers(encoder, crtc_state);
781
782 /* Set the FDI_RX_MISC pwrdn lanes and the 2 workarounds listed at the
783 * mode set "sequence for CRT port" document:
784 * - TP1 to TP2 time with the default value
785 * - FDI delay to 90h
786 *
787 * WaFDIAutoLinkSetTimingOverrride:hsw
788 */
789 intel_de_write(dev_priv, FDI_RX_MISC(PIPE_A),
790 FDI_RX_PWRDN_LANE1_VAL(2) | FDI_RX_PWRDN_LANE0_VAL(2) | FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
791
792 /* Enable the PCH Receiver FDI PLL */
793 rx_ctl_val = dev_priv->display.fdi.rx_config | FDI_RX_ENHANCE_FRAME_ENABLE |
794 FDI_RX_PLL_ENABLE |
795 FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
796 intel_de_write(dev_priv, FDI_RX_CTL(PIPE_A), rx_ctl_val);
797 intel_de_posting_read(dev_priv, FDI_RX_CTL(PIPE_A));
798 udelay(220);
799
800 /* Switch from Rawclk to PCDclk */
801 rx_ctl_val |= FDI_PCDCLK;
802 intel_de_write(dev_priv, FDI_RX_CTL(PIPE_A), rx_ctl_val);
803
804 /* Configure Port Clock Select */
805 drm_WARN_ON(&dev_priv->drm, crtc_state->shared_dpll->info->id != DPLL_ID_SPLL);
806 intel_ddi_enable_clock(encoder, crtc_state);
807
808 /* Start the training iterating through available voltages and emphasis,
809 * testing each value twice. */
810 for (i = 0; i < n_entries * 2; i++) {
811 /* Configure DP_TP_CTL with auto-training */
812 intel_de_write(dev_priv, DP_TP_CTL(PORT_E),
813 DP_TP_CTL_FDI_AUTOTRAIN |
814 DP_TP_CTL_ENHANCED_FRAME_ENABLE |
815 DP_TP_CTL_LINK_TRAIN_PAT1 |
816 DP_TP_CTL_ENABLE);
817
818 /* Configure and enable DDI_BUF_CTL for DDI E with next voltage.
819 * DDI E does not support port reversal, the functionality is
820 * achieved on the PCH side in FDI_RX_CTL, so no need to set the
821 * port reversal bit */
822 intel_de_write(dev_priv, DDI_BUF_CTL(PORT_E),
823 DDI_BUF_CTL_ENABLE | ((crtc_state->fdi_lanes - 1) << 1) | DDI_BUF_TRANS_SELECT(i / 2));
824 intel_de_posting_read(dev_priv, DDI_BUF_CTL(PORT_E));
825
826 udelay(600);
827
828 /* Program PCH FDI Receiver TU */
829 intel_de_write(dev_priv, FDI_RX_TUSIZE1(PIPE_A), TU_SIZE(64));
830
831 /* Enable PCH FDI Receiver with auto-training */
832 rx_ctl_val |= FDI_RX_ENABLE | FDI_LINK_TRAIN_AUTO;
833 intel_de_write(dev_priv, FDI_RX_CTL(PIPE_A), rx_ctl_val);
834 intel_de_posting_read(dev_priv, FDI_RX_CTL(PIPE_A));
835
836 /* Wait for FDI receiver lane calibration */
837 udelay(30);
838
839 /* Unset FDI_RX_MISC pwrdn lanes */
840 temp = intel_de_read(dev_priv, FDI_RX_MISC(PIPE_A));
841 temp &= ~(FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK);
842 intel_de_write(dev_priv, FDI_RX_MISC(PIPE_A), temp);
843 intel_de_posting_read(dev_priv, FDI_RX_MISC(PIPE_A));
844
845 /* Wait for FDI auto training time */
846 udelay(5);
847
848 temp = intel_de_read(dev_priv, DP_TP_STATUS(PORT_E));
849 if (temp & DP_TP_STATUS_AUTOTRAIN_DONE) {
850 drm_dbg_kms(&dev_priv->drm,
851 "FDI link training done on step %d\n", i);
852 break;
853 }
854
855 /*
856 * Leave things enabled even if we failed to train FDI.
857 * Results in less fireworks from the state checker.
858 */
859 if (i == n_entries * 2 - 1) {
860 drm_err(&dev_priv->drm, "FDI link training failed!\n");
861 break;
862 }
863
864 rx_ctl_val &= ~FDI_RX_ENABLE;
865 intel_de_write(dev_priv, FDI_RX_CTL(PIPE_A), rx_ctl_val);
866 intel_de_posting_read(dev_priv, FDI_RX_CTL(PIPE_A));
867
868 temp = intel_de_read(dev_priv, DDI_BUF_CTL(PORT_E));
869 temp &= ~DDI_BUF_CTL_ENABLE;
870 intel_de_write(dev_priv, DDI_BUF_CTL(PORT_E), temp);
871 intel_de_posting_read(dev_priv, DDI_BUF_CTL(PORT_E));
872
873 /* Disable DP_TP_CTL and FDI_RX_CTL and retry */
874 temp = intel_de_read(dev_priv, DP_TP_CTL(PORT_E));
875 temp &= ~(DP_TP_CTL_ENABLE | DP_TP_CTL_LINK_TRAIN_MASK);
876 temp |= DP_TP_CTL_LINK_TRAIN_PAT1;
877 intel_de_write(dev_priv, DP_TP_CTL(PORT_E), temp);
878 intel_de_posting_read(dev_priv, DP_TP_CTL(PORT_E));
879
880 intel_wait_ddi_buf_idle(dev_priv, PORT_E);
881
882 /* Reset FDI_RX_MISC pwrdn lanes */
883 temp = intel_de_read(dev_priv, FDI_RX_MISC(PIPE_A));
884 temp &= ~(FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK);
885 temp |= FDI_RX_PWRDN_LANE1_VAL(2) | FDI_RX_PWRDN_LANE0_VAL(2);
886 intel_de_write(dev_priv, FDI_RX_MISC(PIPE_A), temp);
887 intel_de_posting_read(dev_priv, FDI_RX_MISC(PIPE_A));
888 }
889
890 /* Enable normal pixel sending for FDI */
891 intel_de_write(dev_priv, DP_TP_CTL(PORT_E),
892 DP_TP_CTL_FDI_AUTOTRAIN |
893 DP_TP_CTL_LINK_TRAIN_NORMAL |
894 DP_TP_CTL_ENHANCED_FRAME_ENABLE |
895 DP_TP_CTL_ENABLE);
896 }
897
hsw_fdi_disable(struct intel_encoder * encoder)898 void hsw_fdi_disable(struct intel_encoder *encoder)
899 {
900 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
901 u32 val;
902
903 /*
904 * Bspec lists this as both step 13 (before DDI_BUF_CTL disable)
905 * and step 18 (after clearing PORT_CLK_SEL). Based on a BUN,
906 * step 13 is the correct place for it. Step 18 is where it was
907 * originally before the BUN.
908 */
909 val = intel_de_read(dev_priv, FDI_RX_CTL(PIPE_A));
910 val &= ~FDI_RX_ENABLE;
911 intel_de_write(dev_priv, FDI_RX_CTL(PIPE_A), val);
912
913 val = intel_de_read(dev_priv, DDI_BUF_CTL(PORT_E));
914 val &= ~DDI_BUF_CTL_ENABLE;
915 intel_de_write(dev_priv, DDI_BUF_CTL(PORT_E), val);
916
917 intel_wait_ddi_buf_idle(dev_priv, PORT_E);
918
919 intel_ddi_disable_clock(encoder);
920
921 val = intel_de_read(dev_priv, FDI_RX_MISC(PIPE_A));
922 val &= ~(FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK);
923 val |= FDI_RX_PWRDN_LANE1_VAL(2) | FDI_RX_PWRDN_LANE0_VAL(2);
924 intel_de_write(dev_priv, FDI_RX_MISC(PIPE_A), val);
925
926 val = intel_de_read(dev_priv, FDI_RX_CTL(PIPE_A));
927 val &= ~FDI_PCDCLK;
928 intel_de_write(dev_priv, FDI_RX_CTL(PIPE_A), val);
929
930 val = intel_de_read(dev_priv, FDI_RX_CTL(PIPE_A));
931 val &= ~FDI_RX_PLL_ENABLE;
932 intel_de_write(dev_priv, FDI_RX_CTL(PIPE_A), val);
933 }
934
ilk_fdi_pll_enable(const struct intel_crtc_state * crtc_state)935 void ilk_fdi_pll_enable(const struct intel_crtc_state *crtc_state)
936 {
937 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
938 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
939 enum pipe pipe = crtc->pipe;
940 i915_reg_t reg;
941 u32 temp;
942
943 /* enable PCH FDI RX PLL, wait warmup plus DMI latency */
944 reg = FDI_RX_CTL(pipe);
945 temp = intel_de_read(dev_priv, reg);
946 temp &= ~(FDI_DP_PORT_WIDTH_MASK | (0x7 << 16));
947 temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
948 temp |= (intel_de_read(dev_priv, PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
949 intel_de_write(dev_priv, reg, temp | FDI_RX_PLL_ENABLE);
950
951 intel_de_posting_read(dev_priv, reg);
952 udelay(200);
953
954 /* Switch from Rawclk to PCDclk */
955 temp = intel_de_read(dev_priv, reg);
956 intel_de_write(dev_priv, reg, temp | FDI_PCDCLK);
957
958 intel_de_posting_read(dev_priv, reg);
959 udelay(200);
960
961 /* Enable CPU FDI TX PLL, always on for Ironlake */
962 reg = FDI_TX_CTL(pipe);
963 temp = intel_de_read(dev_priv, reg);
964 if ((temp & FDI_TX_PLL_ENABLE) == 0) {
965 intel_de_write(dev_priv, reg, temp | FDI_TX_PLL_ENABLE);
966
967 intel_de_posting_read(dev_priv, reg);
968 udelay(100);
969 }
970 }
971
ilk_fdi_pll_disable(struct intel_crtc * crtc)972 void ilk_fdi_pll_disable(struct intel_crtc *crtc)
973 {
974 struct drm_device *dev = crtc->base.dev;
975 struct drm_i915_private *dev_priv = to_i915(dev);
976 enum pipe pipe = crtc->pipe;
977 i915_reg_t reg;
978 u32 temp;
979
980 /* Switch from PCDclk to Rawclk */
981 reg = FDI_RX_CTL(pipe);
982 temp = intel_de_read(dev_priv, reg);
983 intel_de_write(dev_priv, reg, temp & ~FDI_PCDCLK);
984
985 /* Disable CPU FDI TX PLL */
986 reg = FDI_TX_CTL(pipe);
987 temp = intel_de_read(dev_priv, reg);
988 intel_de_write(dev_priv, reg, temp & ~FDI_TX_PLL_ENABLE);
989
990 intel_de_posting_read(dev_priv, reg);
991 udelay(100);
992
993 reg = FDI_RX_CTL(pipe);
994 temp = intel_de_read(dev_priv, reg);
995 intel_de_write(dev_priv, reg, temp & ~FDI_RX_PLL_ENABLE);
996
997 /* Wait for the clocks to turn off. */
998 intel_de_posting_read(dev_priv, reg);
999 udelay(100);
1000 }
1001
ilk_fdi_disable(struct intel_crtc * crtc)1002 void ilk_fdi_disable(struct intel_crtc *crtc)
1003 {
1004 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1005 enum pipe pipe = crtc->pipe;
1006 i915_reg_t reg;
1007 u32 temp;
1008
1009 /* disable CPU FDI tx and PCH FDI rx */
1010 reg = FDI_TX_CTL(pipe);
1011 temp = intel_de_read(dev_priv, reg);
1012 intel_de_write(dev_priv, reg, temp & ~FDI_TX_ENABLE);
1013 intel_de_posting_read(dev_priv, reg);
1014
1015 reg = FDI_RX_CTL(pipe);
1016 temp = intel_de_read(dev_priv, reg);
1017 temp &= ~(0x7 << 16);
1018 temp |= (intel_de_read(dev_priv, PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
1019 intel_de_write(dev_priv, reg, temp & ~FDI_RX_ENABLE);
1020
1021 intel_de_posting_read(dev_priv, reg);
1022 udelay(100);
1023
1024 /* Ironlake workaround, disable clock pointer after downing FDI */
1025 if (HAS_PCH_IBX(dev_priv))
1026 intel_de_write(dev_priv, FDI_RX_CHICKEN(pipe),
1027 FDI_RX_PHASE_SYNC_POINTER_OVR);
1028
1029 /* still set train pattern 1 */
1030 reg = FDI_TX_CTL(pipe);
1031 temp = intel_de_read(dev_priv, reg);
1032 temp &= ~FDI_LINK_TRAIN_NONE;
1033 temp |= FDI_LINK_TRAIN_PATTERN_1;
1034 intel_de_write(dev_priv, reg, temp);
1035
1036 reg = FDI_RX_CTL(pipe);
1037 temp = intel_de_read(dev_priv, reg);
1038 if (HAS_PCH_CPT(dev_priv)) {
1039 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
1040 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
1041 } else {
1042 temp &= ~FDI_LINK_TRAIN_NONE;
1043 temp |= FDI_LINK_TRAIN_PATTERN_1;
1044 }
1045 /* BPC in FDI rx is consistent with that in PIPECONF */
1046 temp &= ~(0x07 << 16);
1047 temp |= (intel_de_read(dev_priv, PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
1048 intel_de_write(dev_priv, reg, temp);
1049
1050 intel_de_posting_read(dev_priv, reg);
1051 udelay(100);
1052 }
1053
1054 static const struct intel_fdi_funcs ilk_funcs = {
1055 .fdi_link_train = ilk_fdi_link_train,
1056 };
1057
1058 static const struct intel_fdi_funcs gen6_funcs = {
1059 .fdi_link_train = gen6_fdi_link_train,
1060 };
1061
1062 static const struct intel_fdi_funcs ivb_funcs = {
1063 .fdi_link_train = ivb_manual_fdi_link_train,
1064 };
1065
1066 void
intel_fdi_init_hook(struct drm_i915_private * dev_priv)1067 intel_fdi_init_hook(struct drm_i915_private *dev_priv)
1068 {
1069 if (IS_IRONLAKE(dev_priv)) {
1070 dev_priv->display.funcs.fdi = &ilk_funcs;
1071 } else if (IS_SANDYBRIDGE(dev_priv)) {
1072 dev_priv->display.funcs.fdi = &gen6_funcs;
1073 } else if (IS_IVYBRIDGE(dev_priv)) {
1074 /* FIXME: detect B0+ stepping and use auto training */
1075 dev_priv->display.funcs.fdi = &ivb_funcs;
1076 }
1077 }
1078