1 /*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24 #include "priv.h"
25 #include "chan.h"
26 #include "head.h"
27 #include "ior.h"
28
29 #include <subdev/timer.h>
30
31 #include <nvif/class.h>
32
33 void
g94_sor_dp_watermark(struct nvkm_ior * sor,int head,u8 watermark)34 g94_sor_dp_watermark(struct nvkm_ior *sor, int head, u8 watermark)
35 {
36 struct nvkm_device *device = sor->disp->engine.subdev.device;
37 const u32 loff = nv50_sor_link(sor);
38
39 nvkm_mask(device, 0x61c128 + loff, 0x0000003f, watermark);
40 }
41
42 void
g94_sor_dp_activesym(struct nvkm_ior * sor,int head,u8 TU,u8 VTUa,u8 VTUf,u8 VTUi)43 g94_sor_dp_activesym(struct nvkm_ior *sor, int head,
44 u8 TU, u8 VTUa, u8 VTUf, u8 VTUi)
45 {
46 struct nvkm_device *device = sor->disp->engine.subdev.device;
47 const u32 loff = nv50_sor_link(sor);
48
49 nvkm_mask(device, 0x61c10c + loff, 0x000001fc, TU << 2);
50 nvkm_mask(device, 0x61c128 + loff, 0x010f7f00, VTUa << 24 | VTUf << 16 | VTUi << 8);
51 }
52
53 void
g94_sor_dp_audio_sym(struct nvkm_ior * sor,int head,u16 h,u32 v)54 g94_sor_dp_audio_sym(struct nvkm_ior *sor, int head, u16 h, u32 v)
55 {
56 struct nvkm_device *device = sor->disp->engine.subdev.device;
57 const u32 soff = nv50_ior_base(sor);
58
59 nvkm_mask(device, 0x61c1e8 + soff, 0x0000ffff, h);
60 nvkm_mask(device, 0x61c1ec + soff, 0x00ffffff, v);
61 }
62
63 void
g94_sor_dp_drive(struct nvkm_ior * sor,int ln,int pc,int dc,int pe,int pu)64 g94_sor_dp_drive(struct nvkm_ior *sor, int ln, int pc, int dc, int pe, int pu)
65 {
66 struct nvkm_device *device = sor->disp->engine.subdev.device;
67 const u32 loff = nv50_sor_link(sor);
68 const u32 shift = sor->func->dp->lanes[ln] * 8;
69 u32 data[3];
70
71 data[0] = nvkm_rd32(device, 0x61c118 + loff) & ~(0x000000ff << shift);
72 data[1] = nvkm_rd32(device, 0x61c120 + loff) & ~(0x000000ff << shift);
73 data[2] = nvkm_rd32(device, 0x61c130 + loff);
74 if ((data[2] & 0x0000ff00) < (pu << 8) || ln == 0)
75 data[2] = (data[2] & ~0x0000ff00) | (pu << 8);
76
77 nvkm_wr32(device, 0x61c118 + loff, data[0] | (dc << shift));
78 nvkm_wr32(device, 0x61c120 + loff, data[1] | (pe << shift));
79 nvkm_wr32(device, 0x61c130 + loff, data[2]);
80 }
81
82 void
g94_sor_dp_pattern(struct nvkm_ior * sor,int pattern)83 g94_sor_dp_pattern(struct nvkm_ior *sor, int pattern)
84 {
85 struct nvkm_device *device = sor->disp->engine.subdev.device;
86 const u32 loff = nv50_sor_link(sor);
87 u32 data;
88
89 switch (pattern) {
90 case 0: data = 0x00001000; break;
91 case 1: data = 0x01000000; break;
92 case 2: data = 0x02000000; break;
93 default:
94 WARN_ON(1);
95 return;
96 }
97
98 nvkm_mask(device, 0x61c10c + loff, 0x0f001000, data);
99 }
100
101 void
g94_sor_dp_power(struct nvkm_ior * sor,int nr)102 g94_sor_dp_power(struct nvkm_ior *sor, int nr)
103 {
104 struct nvkm_device *device = sor->disp->engine.subdev.device;
105 const u32 soff = nv50_ior_base(sor);
106 const u32 loff = nv50_sor_link(sor);
107 u32 mask = 0, i;
108
109 for (i = 0; i < nr; i++)
110 mask |= 1 << sor->func->dp->lanes[i];
111
112 nvkm_mask(device, 0x61c130 + loff, 0x0000000f, mask);
113 nvkm_mask(device, 0x61c034 + soff, 0x80000000, 0x80000000);
114 nvkm_msec(device, 2000,
115 if (!(nvkm_rd32(device, 0x61c034 + soff) & 0x80000000))
116 break;
117 );
118 }
119
120 int
g94_sor_dp_links(struct nvkm_ior * sor,struct nvkm_i2c_aux * aux)121 g94_sor_dp_links(struct nvkm_ior *sor, struct nvkm_i2c_aux *aux)
122 {
123 struct nvkm_device *device = sor->disp->engine.subdev.device;
124 const u32 soff = nv50_ior_base(sor);
125 const u32 loff = nv50_sor_link(sor);
126 u32 dpctrl = 0x00000000;
127 u32 clksor = 0x00000000;
128
129 dpctrl |= ((1 << sor->dp.nr) - 1) << 16;
130 if (sor->dp.ef)
131 dpctrl |= 0x00004000;
132 if (sor->dp.bw > 0x06)
133 clksor |= 0x00040000;
134
135 nvkm_mask(device, 0x614300 + soff, 0x000c0000, clksor);
136 nvkm_mask(device, 0x61c10c + loff, 0x001f4000, dpctrl);
137 return 0;
138 }
139
140 const struct nvkm_ior_func_dp
141 g94_sor_dp = {
142 .lanes = { 2, 1, 0, 3},
143 .links = g94_sor_dp_links,
144 .power = g94_sor_dp_power,
145 .pattern = g94_sor_dp_pattern,
146 .drive = g94_sor_dp_drive,
147 .audio_sym = g94_sor_dp_audio_sym,
148 .activesym = g94_sor_dp_activesym,
149 .watermark = g94_sor_dp_watermark,
150 };
151
152 static bool
g94_sor_war_needed(struct nvkm_ior * sor)153 g94_sor_war_needed(struct nvkm_ior *sor)
154 {
155 struct nvkm_device *device = sor->disp->engine.subdev.device;
156 const u32 soff = nv50_ior_base(sor);
157
158 if (sor->asy.proto == TMDS) {
159 switch (nvkm_rd32(device, 0x614300 + soff) & 0x00030000) {
160 case 0x00000000:
161 case 0x00030000:
162 return true;
163 default:
164 break;
165 }
166 }
167
168 return false;
169 }
170
171 static void
g94_sor_war_update_sppll1(struct nvkm_disp * disp)172 g94_sor_war_update_sppll1(struct nvkm_disp *disp)
173 {
174 struct nvkm_device *device = disp->engine.subdev.device;
175 struct nvkm_ior *ior;
176 bool used = false;
177 u32 clksor;
178
179 list_for_each_entry(ior, &disp->iors, head) {
180 if (ior->type != SOR)
181 continue;
182
183 clksor = nvkm_rd32(device, 0x614300 + nv50_ior_base(ior));
184 switch (clksor & 0x03000000) {
185 case 0x02000000:
186 case 0x03000000:
187 used = true;
188 break;
189 default:
190 break;
191 }
192 }
193
194 if (used)
195 return;
196
197 nvkm_mask(device, 0x00e840, 0x80000000, 0x00000000);
198 }
199
200 static void
g94_sor_war_3(struct nvkm_ior * sor)201 g94_sor_war_3(struct nvkm_ior *sor)
202 {
203 struct nvkm_device *device = sor->disp->engine.subdev.device;
204 const u32 soff = nv50_ior_base(sor);
205 u32 sorpwr;
206
207 if (!g94_sor_war_needed(sor))
208 return;
209
210 sorpwr = nvkm_rd32(device, 0x61c004 + soff);
211 if (sorpwr & 0x00000001) {
212 u32 seqctl = nvkm_rd32(device, 0x61c030 + soff);
213 u32 pd_pc = (seqctl & 0x00000f00) >> 8;
214 u32 pu_pc = seqctl & 0x0000000f;
215
216 nvkm_wr32(device, 0x61c040 + soff + pd_pc * 4, 0x1f008000);
217
218 nvkm_msec(device, 2000,
219 if (!(nvkm_rd32(device, 0x61c030 + soff) & 0x10000000))
220 break;
221 );
222 nvkm_mask(device, 0x61c004 + soff, 0x80000001, 0x80000000);
223 nvkm_msec(device, 2000,
224 if (!(nvkm_rd32(device, 0x61c030 + soff) & 0x10000000))
225 break;
226 );
227
228 nvkm_wr32(device, 0x61c040 + soff + pd_pc * 4, 0x00002000);
229 nvkm_wr32(device, 0x61c040 + soff + pu_pc * 4, 0x1f000000);
230 }
231
232 nvkm_mask(device, 0x61c10c + soff, 0x00000001, 0x00000000);
233 nvkm_mask(device, 0x614300 + soff, 0x03000000, 0x00000000);
234
235 if (sorpwr & 0x00000001)
236 nvkm_mask(device, 0x61c004 + soff, 0x80000001, 0x80000001);
237
238 g94_sor_war_update_sppll1(sor->disp);
239 }
240
241 static void
g94_sor_war_2(struct nvkm_ior * sor)242 g94_sor_war_2(struct nvkm_ior *sor)
243 {
244 struct nvkm_device *device = sor->disp->engine.subdev.device;
245 const u32 soff = nv50_ior_base(sor);
246
247 if (!g94_sor_war_needed(sor))
248 return;
249
250 nvkm_mask(device, 0x00e840, 0x80000000, 0x80000000);
251 nvkm_mask(device, 0x614300 + soff, 0x03000000, 0x03000000);
252 nvkm_mask(device, 0x61c10c + soff, 0x00000001, 0x00000001);
253
254 nvkm_mask(device, 0x61c00c + soff, 0x0f000000, 0x00000000);
255 nvkm_mask(device, 0x61c008 + soff, 0xff000000, 0x14000000);
256 nvkm_usec(device, 400, NVKM_DELAY);
257 nvkm_mask(device, 0x61c008 + soff, 0xff000000, 0x00000000);
258 nvkm_mask(device, 0x61c00c + soff, 0x0f000000, 0x01000000);
259
260 if (nvkm_rd32(device, 0x61c004 + soff) & 0x00000001) {
261 u32 seqctl = nvkm_rd32(device, 0x61c030 + soff);
262 u32 pu_pc = seqctl & 0x0000000f;
263 nvkm_wr32(device, 0x61c040 + soff + pu_pc * 4, 0x1f008000);
264 }
265 }
266
267 void
g94_sor_state(struct nvkm_ior * sor,struct nvkm_ior_state * state)268 g94_sor_state(struct nvkm_ior *sor, struct nvkm_ior_state *state)
269 {
270 struct nvkm_device *device = sor->disp->engine.subdev.device;
271 const u32 coff = sor->id * 8 + (state == &sor->arm) * 4;
272 u32 ctrl = nvkm_rd32(device, 0x610794 + coff);
273
274 state->proto_evo = (ctrl & 0x00000f00) >> 8;
275 switch (state->proto_evo) {
276 case 0: state->proto = LVDS; state->link = 1; break;
277 case 1: state->proto = TMDS; state->link = 1; break;
278 case 2: state->proto = TMDS; state->link = 2; break;
279 case 5: state->proto = TMDS; state->link = 3; break;
280 case 8: state->proto = DP; state->link = 1; break;
281 case 9: state->proto = DP; state->link = 2; break;
282 default:
283 state->proto = UNKNOWN;
284 break;
285 }
286
287 state->head = ctrl & 0x00000003;
288 nv50_pior_depth(sor, state, ctrl);
289 }
290
291 static const struct nvkm_ior_func
292 g94_sor = {
293 .state = g94_sor_state,
294 .power = nv50_sor_power,
295 .clock = nv50_sor_clock,
296 .war_2 = g94_sor_war_2,
297 .war_3 = g94_sor_war_3,
298 .dp = &g94_sor_dp,
299 };
300
301 static int
g94_sor_new(struct nvkm_disp * disp,int id)302 g94_sor_new(struct nvkm_disp *disp, int id)
303 {
304 return nvkm_ior_new_(&g94_sor, disp, SOR, id, false);
305 }
306
307 int
g94_sor_cnt(struct nvkm_disp * disp,unsigned long * pmask)308 g94_sor_cnt(struct nvkm_disp *disp, unsigned long *pmask)
309 {
310 struct nvkm_device *device = disp->engine.subdev.device;
311
312 *pmask = (nvkm_rd32(device, 0x610184) & 0x0f000000) >> 24;
313 return 4;
314 }
315
316 static const struct nvkm_disp_mthd_list
317 g94_disp_core_mthd_sor = {
318 .mthd = 0x0040,
319 .addr = 0x000008,
320 .data = {
321 { 0x0600, 0x610794 },
322 {}
323 }
324 };
325
326 const struct nvkm_disp_chan_mthd
327 g94_disp_core_mthd = {
328 .name = "Core",
329 .addr = 0x000000,
330 .prev = 0x000004,
331 .data = {
332 { "Global", 1, &nv50_disp_core_mthd_base },
333 { "DAC", 3, &g84_disp_core_mthd_dac },
334 { "SOR", 4, &g94_disp_core_mthd_sor },
335 { "PIOR", 3, &nv50_disp_core_mthd_pior },
336 { "HEAD", 2, &g84_disp_core_mthd_head },
337 {}
338 }
339 };
340
341 const struct nvkm_disp_chan_user
342 g94_disp_core = {
343 .func = &nv50_disp_core_func,
344 .ctrl = 0,
345 .user = 0,
346 .mthd = &g94_disp_core_mthd,
347 };
348
349 static const struct nvkm_disp_func
350 g94_disp = {
351 .oneinit = nv50_disp_oneinit,
352 .init = nv50_disp_init,
353 .fini = nv50_disp_fini,
354 .intr = nv50_disp_intr,
355 .super = nv50_disp_super,
356 .uevent = &nv50_disp_chan_uevent,
357 .head = { .cnt = nv50_head_cnt, .new = nv50_head_new },
358 .dac = { .cnt = nv50_dac_cnt, .new = nv50_dac_new },
359 .sor = { .cnt = g94_sor_cnt, .new = g94_sor_new },
360 .pior = { .cnt = nv50_pior_cnt, .new = nv50_pior_new },
361 .root = { 0,0,GT206_DISP },
362 .user = {
363 {{0,0, G82_DISP_CURSOR }, nvkm_disp_chan_new, & nv50_disp_curs },
364 {{0,0, G82_DISP_OVERLAY }, nvkm_disp_chan_new, & nv50_disp_oimm },
365 {{0,0,GT200_DISP_BASE_CHANNEL_DMA }, nvkm_disp_chan_new, & g84_disp_base },
366 {{0,0,GT206_DISP_CORE_CHANNEL_DMA }, nvkm_disp_core_new, & g94_disp_core },
367 {{0,0,GT200_DISP_OVERLAY_CHANNEL_DMA}, nvkm_disp_chan_new, >200_disp_ovly },
368 {}
369 },
370 };
371
372 int
g94_disp_new(struct nvkm_device * device,enum nvkm_subdev_type type,int inst,struct nvkm_disp ** pdisp)373 g94_disp_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
374 struct nvkm_disp **pdisp)
375 {
376 return nvkm_disp_new_(&g94_disp, device, type, inst, pdisp);
377 }
378