1 /*
2 * Copyright © 2014 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 */
24
25 #include <linux/firmware.h>
26
27 #include "i915_drv.h"
28 #include "i915_reg.h"
29 #include "intel_de.h"
30 #include "intel_dmc.h"
31 #include "intel_dmc_regs.h"
32
33 /**
34 * DOC: DMC Firmware Support
35 *
36 * From gen9 onwards we have newly added DMC (Display microcontroller) in display
37 * engine to save and restore the state of display engine when it enter into
38 * low-power state and comes back to normal.
39 */
40
41 #define DMC_VERSION(major, minor) ((major) << 16 | (minor))
42 #define DMC_VERSION_MAJOR(version) ((version) >> 16)
43 #define DMC_VERSION_MINOR(version) ((version) & 0xffff)
44
45 #define DMC_PATH(platform) \
46 "i915/" __stringify(platform) "_dmc.bin"
47
48 /*
49 * New DMC additions should not use this. This is used solely to remain
50 * compatible with systems that have not yet updated DMC blobs to use
51 * unversioned file names.
52 */
53 #define DMC_LEGACY_PATH(platform, major, minor) \
54 "i915/" \
55 __stringify(platform) "_dmc_ver" \
56 __stringify(major) "_" \
57 __stringify(minor) ".bin"
58
59 #define DISPLAY_VER13_DMC_MAX_FW_SIZE 0x20000
60
61 #define DISPLAY_VER12_DMC_MAX_FW_SIZE ICL_DMC_MAX_FW_SIZE
62
63 #define DG2_DMC_PATH DMC_LEGACY_PATH(dg2, 2, 08)
64 MODULE_FIRMWARE(DG2_DMC_PATH);
65
66 #define ADLP_DMC_PATH DMC_PATH(adlp)
67 #define ADLP_DMC_FALLBACK_PATH DMC_LEGACY_PATH(adlp, 2, 16)
68 MODULE_FIRMWARE(ADLP_DMC_PATH);
69 MODULE_FIRMWARE(ADLP_DMC_FALLBACK_PATH);
70
71 #define ADLS_DMC_PATH DMC_LEGACY_PATH(adls, 2, 01)
72 MODULE_FIRMWARE(ADLS_DMC_PATH);
73
74 #define DG1_DMC_PATH DMC_LEGACY_PATH(dg1, 2, 02)
75 MODULE_FIRMWARE(DG1_DMC_PATH);
76
77 #define RKL_DMC_PATH DMC_LEGACY_PATH(rkl, 2, 03)
78 MODULE_FIRMWARE(RKL_DMC_PATH);
79
80 #define TGL_DMC_PATH DMC_LEGACY_PATH(tgl, 2, 12)
81 MODULE_FIRMWARE(TGL_DMC_PATH);
82
83 #define ICL_DMC_PATH DMC_LEGACY_PATH(icl, 1, 09)
84 #define ICL_DMC_MAX_FW_SIZE 0x6000
85 MODULE_FIRMWARE(ICL_DMC_PATH);
86
87 #define GLK_DMC_PATH DMC_LEGACY_PATH(glk, 1, 04)
88 #define GLK_DMC_MAX_FW_SIZE 0x4000
89 MODULE_FIRMWARE(GLK_DMC_PATH);
90
91 #define KBL_DMC_PATH DMC_LEGACY_PATH(kbl, 1, 04)
92 #define KBL_DMC_MAX_FW_SIZE BXT_DMC_MAX_FW_SIZE
93 MODULE_FIRMWARE(KBL_DMC_PATH);
94
95 #define SKL_DMC_PATH DMC_LEGACY_PATH(skl, 1, 27)
96 #define SKL_DMC_MAX_FW_SIZE BXT_DMC_MAX_FW_SIZE
97 MODULE_FIRMWARE(SKL_DMC_PATH);
98
99 #define BXT_DMC_PATH DMC_LEGACY_PATH(bxt, 1, 07)
100 #define BXT_DMC_MAX_FW_SIZE 0x3000
101 MODULE_FIRMWARE(BXT_DMC_PATH);
102
103 #define DMC_DEFAULT_FW_OFFSET 0xFFFFFFFF
104 #define PACKAGE_MAX_FW_INFO_ENTRIES 20
105 #define PACKAGE_V2_MAX_FW_INFO_ENTRIES 32
106 #define DMC_V1_MAX_MMIO_COUNT 8
107 #define DMC_V3_MAX_MMIO_COUNT 20
108 #define DMC_V1_MMIO_START_RANGE 0x80000
109
110 #define PIPE_TO_DMC_ID(pipe) (DMC_FW_PIPEA + ((pipe) - PIPE_A))
111
112 struct intel_css_header {
113 /* 0x09 for DMC */
114 u32 module_type;
115
116 /* Includes the DMC specific header in dwords */
117 u32 header_len;
118
119 /* always value would be 0x10000 */
120 u32 header_ver;
121
122 /* Not used */
123 u32 module_id;
124
125 /* Not used */
126 u32 module_vendor;
127
128 /* in YYYYMMDD format */
129 u32 date;
130
131 /* Size in dwords (CSS_Headerlen + PackageHeaderLen + dmc FWsLen)/4 */
132 u32 size;
133
134 /* Not used */
135 u32 key_size;
136
137 /* Not used */
138 u32 modulus_size;
139
140 /* Not used */
141 u32 exponent_size;
142
143 /* Not used */
144 u32 reserved1[12];
145
146 /* Major Minor */
147 u32 version;
148
149 /* Not used */
150 u32 reserved2[8];
151
152 /* Not used */
153 u32 kernel_header_info;
154 } __packed;
155
156 struct intel_fw_info {
157 u8 reserved1;
158
159 /* reserved on package_header version 1, must be 0 on version 2 */
160 u8 dmc_id;
161
162 /* Stepping (A, B, C, ..., *). * is a wildcard */
163 char stepping;
164
165 /* Sub-stepping (0, 1, ..., *). * is a wildcard */
166 char substepping;
167
168 u32 offset;
169 u32 reserved2;
170 } __packed;
171
172 struct intel_package_header {
173 /* DMC container header length in dwords */
174 u8 header_len;
175
176 /* 0x01, 0x02 */
177 u8 header_ver;
178
179 u8 reserved[10];
180
181 /* Number of valid entries in the FWInfo array below */
182 u32 num_entries;
183 } __packed;
184
185 struct intel_dmc_header_base {
186 /* always value would be 0x40403E3E */
187 u32 signature;
188
189 /* DMC binary header length */
190 u8 header_len;
191
192 /* 0x01 */
193 u8 header_ver;
194
195 /* Reserved */
196 u16 dmcc_ver;
197
198 /* Major, Minor */
199 u32 project;
200
201 /* Firmware program size (excluding header) in dwords */
202 u32 fw_size;
203
204 /* Major Minor version */
205 u32 fw_version;
206 } __packed;
207
208 struct intel_dmc_header_v1 {
209 struct intel_dmc_header_base base;
210
211 /* Number of valid MMIO cycles present. */
212 u32 mmio_count;
213
214 /* MMIO address */
215 u32 mmioaddr[DMC_V1_MAX_MMIO_COUNT];
216
217 /* MMIO data */
218 u32 mmiodata[DMC_V1_MAX_MMIO_COUNT];
219
220 /* FW filename */
221 char dfile[32];
222
223 u32 reserved1[2];
224 } __packed;
225
226 struct intel_dmc_header_v3 {
227 struct intel_dmc_header_base base;
228
229 /* DMC RAM start MMIO address */
230 u32 start_mmioaddr;
231
232 u32 reserved[9];
233
234 /* FW filename */
235 char dfile[32];
236
237 /* Number of valid MMIO cycles present. */
238 u32 mmio_count;
239
240 /* MMIO address */
241 u32 mmioaddr[DMC_V3_MAX_MMIO_COUNT];
242
243 /* MMIO data */
244 u32 mmiodata[DMC_V3_MAX_MMIO_COUNT];
245 } __packed;
246
247 struct stepping_info {
248 char stepping;
249 char substepping;
250 };
251
has_dmc_id_fw(struct drm_i915_private * i915,int dmc_id)252 static bool has_dmc_id_fw(struct drm_i915_private *i915, int dmc_id)
253 {
254 return i915->display.dmc.dmc_info[dmc_id].payload;
255 }
256
intel_dmc_has_payload(struct drm_i915_private * i915)257 bool intel_dmc_has_payload(struct drm_i915_private *i915)
258 {
259 return has_dmc_id_fw(i915, DMC_FW_MAIN);
260 }
261
262 static const struct stepping_info *
intel_get_stepping_info(struct drm_i915_private * i915,struct stepping_info * si)263 intel_get_stepping_info(struct drm_i915_private *i915,
264 struct stepping_info *si)
265 {
266 const char *step_name = intel_step_name(RUNTIME_INFO(i915)->step.display_step);
267
268 si->stepping = step_name[0];
269 si->substepping = step_name[1];
270 return si;
271 }
272
gen9_set_dc_state_debugmask(struct drm_i915_private * dev_priv)273 static void gen9_set_dc_state_debugmask(struct drm_i915_private *dev_priv)
274 {
275 /* The below bit doesn't need to be cleared ever afterwards */
276 intel_de_rmw(dev_priv, DC_STATE_DEBUG, 0,
277 DC_STATE_DEBUG_MASK_CORES | DC_STATE_DEBUG_MASK_MEMORY_UP);
278 intel_de_posting_read(dev_priv, DC_STATE_DEBUG);
279 }
280
disable_event_handler(struct drm_i915_private * i915,i915_reg_t ctl_reg,i915_reg_t htp_reg)281 static void disable_event_handler(struct drm_i915_private *i915,
282 i915_reg_t ctl_reg, i915_reg_t htp_reg)
283 {
284 intel_de_write(i915, ctl_reg,
285 REG_FIELD_PREP(DMC_EVT_CTL_TYPE_MASK,
286 DMC_EVT_CTL_TYPE_EDGE_0_1) |
287 REG_FIELD_PREP(DMC_EVT_CTL_EVENT_ID_MASK,
288 DMC_EVT_CTL_EVENT_ID_FALSE));
289 intel_de_write(i915, htp_reg, 0);
290 }
291
292 static void
disable_flip_queue_event(struct drm_i915_private * i915,i915_reg_t ctl_reg,i915_reg_t htp_reg)293 disable_flip_queue_event(struct drm_i915_private *i915,
294 i915_reg_t ctl_reg, i915_reg_t htp_reg)
295 {
296 u32 event_ctl;
297 u32 event_htp;
298
299 event_ctl = intel_de_read(i915, ctl_reg);
300 event_htp = intel_de_read(i915, htp_reg);
301 if (event_ctl != (DMC_EVT_CTL_ENABLE |
302 DMC_EVT_CTL_RECURRING |
303 REG_FIELD_PREP(DMC_EVT_CTL_TYPE_MASK,
304 DMC_EVT_CTL_TYPE_EDGE_0_1) |
305 REG_FIELD_PREP(DMC_EVT_CTL_EVENT_ID_MASK,
306 DMC_EVT_CTL_EVENT_ID_CLK_MSEC)) ||
307 !event_htp) {
308 drm_dbg_kms(&i915->drm,
309 "Unexpected DMC event configuration (control %08x htp %08x)\n",
310 event_ctl, event_htp);
311 return;
312 }
313
314 disable_event_handler(i915, ctl_reg, htp_reg);
315 }
316
317 static bool
get_flip_queue_event_regs(struct drm_i915_private * i915,int dmc_id,i915_reg_t * ctl_reg,i915_reg_t * htp_reg)318 get_flip_queue_event_regs(struct drm_i915_private *i915, int dmc_id,
319 i915_reg_t *ctl_reg, i915_reg_t *htp_reg)
320 {
321 switch (dmc_id) {
322 case DMC_FW_MAIN:
323 if (DISPLAY_VER(i915) == 12) {
324 *ctl_reg = DMC_EVT_CTL(i915, dmc_id, 3);
325 *htp_reg = DMC_EVT_HTP(i915, dmc_id, 3);
326
327 return true;
328 }
329 break;
330 case DMC_FW_PIPEA ... DMC_FW_PIPED:
331 if (IS_DG2(i915)) {
332 *ctl_reg = DMC_EVT_CTL(i915, dmc_id, 2);
333 *htp_reg = DMC_EVT_HTP(i915, dmc_id, 2);
334
335 return true;
336 }
337 break;
338 }
339
340 return false;
341 }
342
343 static void
disable_all_flip_queue_events(struct drm_i915_private * i915)344 disable_all_flip_queue_events(struct drm_i915_private *i915)
345 {
346 int dmc_id;
347
348 /* TODO: check if the following applies to all D13+ platforms. */
349 if (!IS_DG2(i915) && !IS_TIGERLAKE(i915))
350 return;
351
352 for (dmc_id = 0; dmc_id < DMC_FW_MAX; dmc_id++) {
353 i915_reg_t ctl_reg;
354 i915_reg_t htp_reg;
355
356 if (!has_dmc_id_fw(i915, dmc_id))
357 continue;
358
359 if (!get_flip_queue_event_regs(i915, dmc_id, &ctl_reg, &htp_reg))
360 continue;
361
362 disable_flip_queue_event(i915, ctl_reg, htp_reg);
363 }
364 }
365
disable_all_event_handlers(struct drm_i915_private * i915)366 static void disable_all_event_handlers(struct drm_i915_private *i915)
367 {
368 int id;
369
370 /* TODO: disable the event handlers on pre-GEN12 platforms as well */
371 if (DISPLAY_VER(i915) < 12)
372 return;
373
374 for (id = DMC_FW_MAIN; id < DMC_FW_MAX; id++) {
375 int handler;
376
377 if (!has_dmc_id_fw(i915, id))
378 continue;
379
380 for (handler = 0; handler < DMC_EVENT_HANDLER_COUNT_GEN12; handler++)
381 disable_event_handler(i915,
382 DMC_EVT_CTL(i915, id, handler),
383 DMC_EVT_HTP(i915, id, handler));
384 }
385 }
386
pipedmc_clock_gating_wa(struct drm_i915_private * i915,bool enable)387 static void pipedmc_clock_gating_wa(struct drm_i915_private *i915, bool enable)
388 {
389 enum pipe pipe;
390
391 if (DISPLAY_VER(i915) < 13)
392 return;
393
394 /*
395 * Wa_16015201720:adl-p,dg2, mtl
396 * The WA requires clock gating to be disabled all the time
397 * for pipe A and B.
398 * For pipe C and D clock gating needs to be disabled only
399 * during initializing the firmware.
400 */
401 if (enable)
402 for (pipe = PIPE_A; pipe <= PIPE_D; pipe++)
403 intel_de_rmw(i915, CLKGATE_DIS_PSL_EXT(pipe),
404 0, PIPEDMC_GATING_DIS);
405 else
406 for (pipe = PIPE_C; pipe <= PIPE_D; pipe++)
407 intel_de_rmw(i915, CLKGATE_DIS_PSL_EXT(pipe),
408 PIPEDMC_GATING_DIS, 0);
409 }
410
intel_dmc_enable_pipe(struct drm_i915_private * i915,enum pipe pipe)411 void intel_dmc_enable_pipe(struct drm_i915_private *i915, enum pipe pipe)
412 {
413 if (!has_dmc_id_fw(i915, PIPE_TO_DMC_ID(pipe)))
414 return;
415
416 if (DISPLAY_VER(i915) >= 14)
417 intel_de_rmw(i915, MTL_PIPEDMC_CONTROL, 0, PIPEDMC_ENABLE_MTL(pipe));
418 else
419 intel_de_rmw(i915, PIPEDMC_CONTROL(pipe), 0, PIPEDMC_ENABLE);
420 }
421
intel_dmc_disable_pipe(struct drm_i915_private * i915,enum pipe pipe)422 void intel_dmc_disable_pipe(struct drm_i915_private *i915, enum pipe pipe)
423 {
424 if (!has_dmc_id_fw(i915, PIPE_TO_DMC_ID(pipe)))
425 return;
426
427 if (DISPLAY_VER(i915) >= 14)
428 intel_de_rmw(i915, MTL_PIPEDMC_CONTROL, PIPEDMC_ENABLE_MTL(pipe), 0);
429 else
430 intel_de_rmw(i915, PIPEDMC_CONTROL(pipe), PIPEDMC_ENABLE, 0);
431 }
432
433 /**
434 * intel_dmc_load_program() - write the firmware from memory to register.
435 * @dev_priv: i915 drm device.
436 *
437 * DMC firmware is read from a .bin file and kept in internal memory one time.
438 * Everytime display comes back from low power state this function is called to
439 * copy the firmware from internal memory to registers.
440 */
intel_dmc_load_program(struct drm_i915_private * dev_priv)441 void intel_dmc_load_program(struct drm_i915_private *dev_priv)
442 {
443 struct intel_dmc *dmc = &dev_priv->display.dmc;
444 u32 id, i;
445
446 if (!intel_dmc_has_payload(dev_priv))
447 return;
448
449 pipedmc_clock_gating_wa(dev_priv, true);
450
451 disable_all_event_handlers(dev_priv);
452
453 assert_rpm_wakelock_held(&dev_priv->runtime_pm);
454
455 preempt_disable();
456
457 for (id = 0; id < DMC_FW_MAX; id++) {
458 for (i = 0; i < dmc->dmc_info[id].dmc_fw_size; i++) {
459 intel_de_write_fw(dev_priv,
460 DMC_PROGRAM(dmc->dmc_info[id].start_mmioaddr, i),
461 dmc->dmc_info[id].payload[i]);
462 }
463 }
464
465 preempt_enable();
466
467 for (id = 0; id < DMC_FW_MAX; id++) {
468 for (i = 0; i < dmc->dmc_info[id].mmio_count; i++) {
469 intel_de_write(dev_priv, dmc->dmc_info[id].mmioaddr[i],
470 dmc->dmc_info[id].mmiodata[i]);
471 }
472 }
473
474 dev_priv->display.dmc.dc_state = 0;
475
476 gen9_set_dc_state_debugmask(dev_priv);
477
478 /*
479 * Flip queue events need to be disabled before enabling DC5/6.
480 * i915 doesn't use the flip queue feature, so disable it already
481 * here.
482 */
483 disable_all_flip_queue_events(dev_priv);
484
485 pipedmc_clock_gating_wa(dev_priv, false);
486 }
487
488 /**
489 * intel_dmc_disable_program() - disable the firmware
490 * @i915: i915 drm device
491 *
492 * Disable all event handlers in the firmware, making sure the firmware is
493 * inactive after the display is uninitialized.
494 */
intel_dmc_disable_program(struct drm_i915_private * i915)495 void intel_dmc_disable_program(struct drm_i915_private *i915)
496 {
497 if (!intel_dmc_has_payload(i915))
498 return;
499
500 pipedmc_clock_gating_wa(i915, true);
501 disable_all_event_handlers(i915);
502 pipedmc_clock_gating_wa(i915, false);
503 }
504
assert_dmc_loaded(struct drm_i915_private * i915)505 void assert_dmc_loaded(struct drm_i915_private *i915)
506 {
507 drm_WARN_ONCE(&i915->drm,
508 !intel_de_read(i915, DMC_PROGRAM(i915->display.dmc.dmc_info[DMC_FW_MAIN].start_mmioaddr, 0)),
509 "DMC program storage start is NULL\n");
510 drm_WARN_ONCE(&i915->drm, !intel_de_read(i915, DMC_SSP_BASE),
511 "DMC SSP Base Not fine\n");
512 drm_WARN_ONCE(&i915->drm, !intel_de_read(i915, DMC_HTP_SKL),
513 "DMC HTP Not fine\n");
514 }
515
fw_info_matches_stepping(const struct intel_fw_info * fw_info,const struct stepping_info * si)516 static bool fw_info_matches_stepping(const struct intel_fw_info *fw_info,
517 const struct stepping_info *si)
518 {
519 if ((fw_info->substepping == '*' && si->stepping == fw_info->stepping) ||
520 (si->stepping == fw_info->stepping && si->substepping == fw_info->substepping) ||
521 /*
522 * If we don't find a more specific one from above two checks, we
523 * then check for the generic one to be sure to work even with
524 * "broken firmware"
525 */
526 (si->stepping == '*' && si->substepping == fw_info->substepping) ||
527 (fw_info->stepping == '*' && fw_info->substepping == '*'))
528 return true;
529
530 return false;
531 }
532
533 /*
534 * Search fw_info table for dmc_offset to find firmware binary: num_entries is
535 * already sanitized.
536 */
dmc_set_fw_offset(struct intel_dmc * dmc,const struct intel_fw_info * fw_info,unsigned int num_entries,const struct stepping_info * si,u8 package_ver)537 static void dmc_set_fw_offset(struct intel_dmc *dmc,
538 const struct intel_fw_info *fw_info,
539 unsigned int num_entries,
540 const struct stepping_info *si,
541 u8 package_ver)
542 {
543 unsigned int i, id;
544
545 struct drm_i915_private *i915 = container_of(dmc, typeof(*i915), display.dmc);
546
547 for (i = 0; i < num_entries; i++) {
548 id = package_ver <= 1 ? DMC_FW_MAIN : fw_info[i].dmc_id;
549
550 if (id >= DMC_FW_MAX) {
551 drm_dbg(&i915->drm, "Unsupported firmware id: %u\n", id);
552 continue;
553 }
554
555 /* More specific versions come first, so we don't even have to
556 * check for the stepping since we already found a previous FW
557 * for this id.
558 */
559 if (dmc->dmc_info[id].present)
560 continue;
561
562 if (fw_info_matches_stepping(&fw_info[i], si)) {
563 dmc->dmc_info[id].present = true;
564 dmc->dmc_info[id].dmc_offset = fw_info[i].offset;
565 }
566 }
567 }
568
dmc_mmio_addr_sanity_check(struct intel_dmc * dmc,const u32 * mmioaddr,u32 mmio_count,int header_ver,u8 dmc_id)569 static bool dmc_mmio_addr_sanity_check(struct intel_dmc *dmc,
570 const u32 *mmioaddr, u32 mmio_count,
571 int header_ver, u8 dmc_id)
572 {
573 struct drm_i915_private *i915 = container_of(dmc, typeof(*i915), display.dmc);
574 u32 start_range, end_range;
575 int i;
576
577 if (dmc_id >= DMC_FW_MAX) {
578 drm_warn(&i915->drm, "Unsupported firmware id %u\n", dmc_id);
579 return false;
580 }
581
582 if (header_ver == 1) {
583 start_range = DMC_MMIO_START_RANGE;
584 end_range = DMC_MMIO_END_RANGE;
585 } else if (dmc_id == DMC_FW_MAIN) {
586 start_range = TGL_MAIN_MMIO_START;
587 end_range = TGL_MAIN_MMIO_END;
588 } else if (DISPLAY_VER(i915) >= 13) {
589 start_range = ADLP_PIPE_MMIO_START;
590 end_range = ADLP_PIPE_MMIO_END;
591 } else if (DISPLAY_VER(i915) >= 12) {
592 start_range = TGL_PIPE_MMIO_START(dmc_id);
593 end_range = TGL_PIPE_MMIO_END(dmc_id);
594 } else {
595 drm_warn(&i915->drm, "Unknown mmio range for sanity check");
596 return false;
597 }
598
599 for (i = 0; i < mmio_count; i++) {
600 if (mmioaddr[i] < start_range || mmioaddr[i] > end_range)
601 return false;
602 }
603
604 return true;
605 }
606
parse_dmc_fw_header(struct intel_dmc * dmc,const struct intel_dmc_header_base * dmc_header,size_t rem_size,u8 dmc_id)607 static u32 parse_dmc_fw_header(struct intel_dmc *dmc,
608 const struct intel_dmc_header_base *dmc_header,
609 size_t rem_size, u8 dmc_id)
610 {
611 struct drm_i915_private *i915 = container_of(dmc, typeof(*i915), display.dmc);
612 struct dmc_fw_info *dmc_info = &dmc->dmc_info[dmc_id];
613 unsigned int header_len_bytes, dmc_header_size, payload_size, i;
614 const u32 *mmioaddr, *mmiodata;
615 u32 mmio_count, mmio_count_max, start_mmioaddr;
616 u8 *payload;
617
618 BUILD_BUG_ON(ARRAY_SIZE(dmc_info->mmioaddr) < DMC_V3_MAX_MMIO_COUNT ||
619 ARRAY_SIZE(dmc_info->mmioaddr) < DMC_V1_MAX_MMIO_COUNT);
620
621 /*
622 * Check if we can access common fields, we will checkc again below
623 * after we have read the version
624 */
625 if (rem_size < sizeof(struct intel_dmc_header_base))
626 goto error_truncated;
627
628 /* Cope with small differences between v1 and v3 */
629 if (dmc_header->header_ver == 3) {
630 const struct intel_dmc_header_v3 *v3 =
631 (const struct intel_dmc_header_v3 *)dmc_header;
632
633 if (rem_size < sizeof(struct intel_dmc_header_v3))
634 goto error_truncated;
635
636 mmioaddr = v3->mmioaddr;
637 mmiodata = v3->mmiodata;
638 mmio_count = v3->mmio_count;
639 mmio_count_max = DMC_V3_MAX_MMIO_COUNT;
640 /* header_len is in dwords */
641 header_len_bytes = dmc_header->header_len * 4;
642 start_mmioaddr = v3->start_mmioaddr;
643 dmc_header_size = sizeof(*v3);
644 } else if (dmc_header->header_ver == 1) {
645 const struct intel_dmc_header_v1 *v1 =
646 (const struct intel_dmc_header_v1 *)dmc_header;
647
648 if (rem_size < sizeof(struct intel_dmc_header_v1))
649 goto error_truncated;
650
651 mmioaddr = v1->mmioaddr;
652 mmiodata = v1->mmiodata;
653 mmio_count = v1->mmio_count;
654 mmio_count_max = DMC_V1_MAX_MMIO_COUNT;
655 header_len_bytes = dmc_header->header_len;
656 start_mmioaddr = DMC_V1_MMIO_START_RANGE;
657 dmc_header_size = sizeof(*v1);
658 } else {
659 drm_err(&i915->drm, "Unknown DMC fw header version: %u\n",
660 dmc_header->header_ver);
661 return 0;
662 }
663
664 if (header_len_bytes != dmc_header_size) {
665 drm_err(&i915->drm, "DMC firmware has wrong dmc header length "
666 "(%u bytes)\n", header_len_bytes);
667 return 0;
668 }
669
670 /* Cache the dmc header info. */
671 if (mmio_count > mmio_count_max) {
672 drm_err(&i915->drm, "DMC firmware has wrong mmio count %u\n", mmio_count);
673 return 0;
674 }
675
676 if (!dmc_mmio_addr_sanity_check(dmc, mmioaddr, mmio_count,
677 dmc_header->header_ver, dmc_id)) {
678 drm_err(&i915->drm, "DMC firmware has Wrong MMIO Addresses\n");
679 return 0;
680 }
681
682 for (i = 0; i < mmio_count; i++) {
683 dmc_info->mmioaddr[i] = _MMIO(mmioaddr[i]);
684 dmc_info->mmiodata[i] = mmiodata[i];
685 }
686 dmc_info->mmio_count = mmio_count;
687 dmc_info->start_mmioaddr = start_mmioaddr;
688
689 rem_size -= header_len_bytes;
690
691 /* fw_size is in dwords, so multiplied by 4 to convert into bytes. */
692 payload_size = dmc_header->fw_size * 4;
693 if (rem_size < payload_size)
694 goto error_truncated;
695
696 if (payload_size > dmc->max_fw_size) {
697 drm_err(&i915->drm, "DMC FW too big (%u bytes)\n", payload_size);
698 return 0;
699 }
700 dmc_info->dmc_fw_size = dmc_header->fw_size;
701
702 dmc_info->payload = kmalloc(payload_size, GFP_KERNEL);
703 if (!dmc_info->payload)
704 return 0;
705
706 payload = (u8 *)(dmc_header) + header_len_bytes;
707 memcpy(dmc_info->payload, payload, payload_size);
708
709 return header_len_bytes + payload_size;
710
711 error_truncated:
712 drm_err(&i915->drm, "Truncated DMC firmware, refusing.\n");
713 return 0;
714 }
715
716 static u32
parse_dmc_fw_package(struct intel_dmc * dmc,const struct intel_package_header * package_header,const struct stepping_info * si,size_t rem_size)717 parse_dmc_fw_package(struct intel_dmc *dmc,
718 const struct intel_package_header *package_header,
719 const struct stepping_info *si,
720 size_t rem_size)
721 {
722 struct drm_i915_private *i915 = container_of(dmc, typeof(*i915), display.dmc);
723 u32 package_size = sizeof(struct intel_package_header);
724 u32 num_entries, max_entries;
725 const struct intel_fw_info *fw_info;
726
727 if (rem_size < package_size)
728 goto error_truncated;
729
730 if (package_header->header_ver == 1) {
731 max_entries = PACKAGE_MAX_FW_INFO_ENTRIES;
732 } else if (package_header->header_ver == 2) {
733 max_entries = PACKAGE_V2_MAX_FW_INFO_ENTRIES;
734 } else {
735 drm_err(&i915->drm, "DMC firmware has unknown header version %u\n",
736 package_header->header_ver);
737 return 0;
738 }
739
740 /*
741 * We should always have space for max_entries,
742 * even if not all are used
743 */
744 package_size += max_entries * sizeof(struct intel_fw_info);
745 if (rem_size < package_size)
746 goto error_truncated;
747
748 if (package_header->header_len * 4 != package_size) {
749 drm_err(&i915->drm, "DMC firmware has wrong package header length "
750 "(%u bytes)\n", package_size);
751 return 0;
752 }
753
754 num_entries = package_header->num_entries;
755 if (WARN_ON(package_header->num_entries > max_entries))
756 num_entries = max_entries;
757
758 fw_info = (const struct intel_fw_info *)
759 ((u8 *)package_header + sizeof(*package_header));
760 dmc_set_fw_offset(dmc, fw_info, num_entries, si,
761 package_header->header_ver);
762
763 /* dmc_offset is in dwords */
764 return package_size;
765
766 error_truncated:
767 drm_err(&i915->drm, "Truncated DMC firmware, refusing.\n");
768 return 0;
769 }
770
771 /* Return number of bytes parsed or 0 on error */
parse_dmc_fw_css(struct intel_dmc * dmc,struct intel_css_header * css_header,size_t rem_size)772 static u32 parse_dmc_fw_css(struct intel_dmc *dmc,
773 struct intel_css_header *css_header,
774 size_t rem_size)
775 {
776 struct drm_i915_private *i915 = container_of(dmc, typeof(*i915), display.dmc);
777
778 if (rem_size < sizeof(struct intel_css_header)) {
779 drm_err(&i915->drm, "Truncated DMC firmware, refusing.\n");
780 return 0;
781 }
782
783 if (sizeof(struct intel_css_header) !=
784 (css_header->header_len * 4)) {
785 drm_err(&i915->drm, "DMC firmware has wrong CSS header length "
786 "(%u bytes)\n",
787 (css_header->header_len * 4));
788 return 0;
789 }
790
791 dmc->version = css_header->version;
792
793 return sizeof(struct intel_css_header);
794 }
795
parse_dmc_fw(struct drm_i915_private * dev_priv,const struct firmware * fw)796 static void parse_dmc_fw(struct drm_i915_private *dev_priv,
797 const struct firmware *fw)
798 {
799 struct intel_css_header *css_header;
800 struct intel_package_header *package_header;
801 struct intel_dmc_header_base *dmc_header;
802 struct intel_dmc *dmc = &dev_priv->display.dmc;
803 struct stepping_info display_info = { '*', '*'};
804 const struct stepping_info *si = intel_get_stepping_info(dev_priv, &display_info);
805 u32 readcount = 0;
806 u32 r, offset;
807 int id;
808
809 if (!fw)
810 return;
811
812 /* Extract CSS Header information */
813 css_header = (struct intel_css_header *)fw->data;
814 r = parse_dmc_fw_css(dmc, css_header, fw->size);
815 if (!r)
816 return;
817
818 readcount += r;
819
820 /* Extract Package Header information */
821 package_header = (struct intel_package_header *)&fw->data[readcount];
822 r = parse_dmc_fw_package(dmc, package_header, si, fw->size - readcount);
823 if (!r)
824 return;
825
826 readcount += r;
827
828 for (id = 0; id < DMC_FW_MAX; id++) {
829 if (!dev_priv->display.dmc.dmc_info[id].present)
830 continue;
831
832 offset = readcount + dmc->dmc_info[id].dmc_offset * 4;
833 if (offset > fw->size) {
834 drm_err(&dev_priv->drm, "Reading beyond the fw_size\n");
835 continue;
836 }
837
838 dmc_header = (struct intel_dmc_header_base *)&fw->data[offset];
839 parse_dmc_fw_header(dmc, dmc_header, fw->size - offset, id);
840 }
841 }
842
intel_dmc_runtime_pm_get(struct drm_i915_private * dev_priv)843 static void intel_dmc_runtime_pm_get(struct drm_i915_private *dev_priv)
844 {
845 drm_WARN_ON(&dev_priv->drm, dev_priv->display.dmc.wakeref);
846 dev_priv->display.dmc.wakeref =
847 intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
848 }
849
intel_dmc_runtime_pm_put(struct drm_i915_private * dev_priv)850 static void intel_dmc_runtime_pm_put(struct drm_i915_private *dev_priv)
851 {
852 intel_wakeref_t wakeref __maybe_unused =
853 fetch_and_zero(&dev_priv->display.dmc.wakeref);
854
855 intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, wakeref);
856 }
857
dmc_fallback_path(struct drm_i915_private * i915)858 static const char *dmc_fallback_path(struct drm_i915_private *i915)
859 {
860 if (IS_ALDERLAKE_P(i915))
861 return ADLP_DMC_FALLBACK_PATH;
862
863 return NULL;
864 }
865
dmc_load_work_fn(struct work_struct * work)866 static void dmc_load_work_fn(struct work_struct *work)
867 {
868 struct drm_i915_private *dev_priv;
869 struct intel_dmc *dmc;
870 const struct firmware *fw = NULL;
871 const char *fallback_path;
872 int err;
873
874 dev_priv = container_of(work, typeof(*dev_priv), display.dmc.work);
875 dmc = &dev_priv->display.dmc;
876
877 err = request_firmware(&fw, dev_priv->display.dmc.fw_path, dev_priv->drm.dev);
878
879 if (err == -ENOENT && !dev_priv->params.dmc_firmware_path) {
880 fallback_path = dmc_fallback_path(dev_priv);
881 if (fallback_path) {
882 drm_dbg_kms(&dev_priv->drm,
883 "%s not found, falling back to %s\n",
884 dmc->fw_path,
885 fallback_path);
886 err = request_firmware(&fw, fallback_path, dev_priv->drm.dev);
887 if (err == 0)
888 dev_priv->display.dmc.fw_path = fallback_path;
889 }
890 }
891
892 parse_dmc_fw(dev_priv, fw);
893
894 if (intel_dmc_has_payload(dev_priv)) {
895 intel_dmc_load_program(dev_priv);
896 intel_dmc_runtime_pm_put(dev_priv);
897
898 drm_info(&dev_priv->drm,
899 "Finished loading DMC firmware %s (v%u.%u)\n",
900 dev_priv->display.dmc.fw_path, DMC_VERSION_MAJOR(dmc->version),
901 DMC_VERSION_MINOR(dmc->version));
902 } else {
903 drm_notice(&dev_priv->drm,
904 "Failed to load DMC firmware %s."
905 " Disabling runtime power management.\n",
906 dmc->fw_path);
907 drm_notice(&dev_priv->drm, "DMC firmware homepage: %s",
908 INTEL_UC_FIRMWARE_URL);
909 }
910
911 release_firmware(fw);
912 }
913
914 /**
915 * intel_dmc_ucode_init() - initialize the firmware loading.
916 * @dev_priv: i915 drm device.
917 *
918 * This function is called at the time of loading the display driver to read
919 * firmware from a .bin file and copied into a internal memory.
920 */
intel_dmc_ucode_init(struct drm_i915_private * dev_priv)921 void intel_dmc_ucode_init(struct drm_i915_private *dev_priv)
922 {
923 struct intel_dmc *dmc = &dev_priv->display.dmc;
924
925 INIT_WORK(&dev_priv->display.dmc.work, dmc_load_work_fn);
926
927 if (!HAS_DMC(dev_priv))
928 return;
929
930 /*
931 * Obtain a runtime pm reference, until DMC is loaded, to avoid entering
932 * runtime-suspend.
933 *
934 * On error, we return with the rpm wakeref held to prevent runtime
935 * suspend as runtime suspend *requires* a working DMC for whatever
936 * reason.
937 */
938 intel_dmc_runtime_pm_get(dev_priv);
939
940 if (IS_DG2(dev_priv)) {
941 dmc->fw_path = DG2_DMC_PATH;
942 dmc->max_fw_size = DISPLAY_VER13_DMC_MAX_FW_SIZE;
943 } else if (IS_ALDERLAKE_P(dev_priv)) {
944 dmc->fw_path = ADLP_DMC_PATH;
945 dmc->max_fw_size = DISPLAY_VER13_DMC_MAX_FW_SIZE;
946 } else if (IS_ALDERLAKE_S(dev_priv)) {
947 dmc->fw_path = ADLS_DMC_PATH;
948 dmc->max_fw_size = DISPLAY_VER12_DMC_MAX_FW_SIZE;
949 } else if (IS_DG1(dev_priv)) {
950 dmc->fw_path = DG1_DMC_PATH;
951 dmc->max_fw_size = DISPLAY_VER12_DMC_MAX_FW_SIZE;
952 } else if (IS_ROCKETLAKE(dev_priv)) {
953 dmc->fw_path = RKL_DMC_PATH;
954 dmc->max_fw_size = DISPLAY_VER12_DMC_MAX_FW_SIZE;
955 } else if (IS_TIGERLAKE(dev_priv)) {
956 dmc->fw_path = TGL_DMC_PATH;
957 dmc->max_fw_size = DISPLAY_VER12_DMC_MAX_FW_SIZE;
958 } else if (DISPLAY_VER(dev_priv) == 11) {
959 dmc->fw_path = ICL_DMC_PATH;
960 dmc->max_fw_size = ICL_DMC_MAX_FW_SIZE;
961 } else if (IS_GEMINILAKE(dev_priv)) {
962 dmc->fw_path = GLK_DMC_PATH;
963 dmc->max_fw_size = GLK_DMC_MAX_FW_SIZE;
964 } else if (IS_KABYLAKE(dev_priv) ||
965 IS_COFFEELAKE(dev_priv) ||
966 IS_COMETLAKE(dev_priv)) {
967 dmc->fw_path = KBL_DMC_PATH;
968 dmc->max_fw_size = KBL_DMC_MAX_FW_SIZE;
969 } else if (IS_SKYLAKE(dev_priv)) {
970 dmc->fw_path = SKL_DMC_PATH;
971 dmc->max_fw_size = SKL_DMC_MAX_FW_SIZE;
972 } else if (IS_BROXTON(dev_priv)) {
973 dmc->fw_path = BXT_DMC_PATH;
974 dmc->max_fw_size = BXT_DMC_MAX_FW_SIZE;
975 }
976
977 if (dev_priv->params.dmc_firmware_path) {
978 if (strlen(dev_priv->params.dmc_firmware_path) == 0) {
979 dmc->fw_path = NULL;
980 drm_info(&dev_priv->drm,
981 "Disabling DMC firmware and runtime PM\n");
982 return;
983 }
984
985 dmc->fw_path = dev_priv->params.dmc_firmware_path;
986 }
987
988 if (!dmc->fw_path) {
989 drm_dbg_kms(&dev_priv->drm,
990 "No known DMC firmware for platform, disabling runtime PM\n");
991 return;
992 }
993
994 drm_dbg_kms(&dev_priv->drm, "Loading %s\n", dmc->fw_path);
995 schedule_work(&dev_priv->display.dmc.work);
996 }
997
998 /**
999 * intel_dmc_ucode_suspend() - prepare DMC firmware before system suspend
1000 * @dev_priv: i915 drm device
1001 *
1002 * Prepare the DMC firmware before entering system suspend. This includes
1003 * flushing pending work items and releasing any resources acquired during
1004 * init.
1005 */
intel_dmc_ucode_suspend(struct drm_i915_private * dev_priv)1006 void intel_dmc_ucode_suspend(struct drm_i915_private *dev_priv)
1007 {
1008 if (!HAS_DMC(dev_priv))
1009 return;
1010
1011 flush_work(&dev_priv->display.dmc.work);
1012
1013 /* Drop the reference held in case DMC isn't loaded. */
1014 if (!intel_dmc_has_payload(dev_priv))
1015 intel_dmc_runtime_pm_put(dev_priv);
1016 }
1017
1018 /**
1019 * intel_dmc_ucode_resume() - init DMC firmware during system resume
1020 * @dev_priv: i915 drm device
1021 *
1022 * Reinitialize the DMC firmware during system resume, reacquiring any
1023 * resources released in intel_dmc_ucode_suspend().
1024 */
intel_dmc_ucode_resume(struct drm_i915_private * dev_priv)1025 void intel_dmc_ucode_resume(struct drm_i915_private *dev_priv)
1026 {
1027 if (!HAS_DMC(dev_priv))
1028 return;
1029
1030 /*
1031 * Reacquire the reference to keep RPM disabled in case DMC isn't
1032 * loaded.
1033 */
1034 if (!intel_dmc_has_payload(dev_priv))
1035 intel_dmc_runtime_pm_get(dev_priv);
1036 }
1037
1038 /**
1039 * intel_dmc_ucode_fini() - unload the DMC firmware.
1040 * @dev_priv: i915 drm device.
1041 *
1042 * Firmmware unloading includes freeing the internal memory and reset the
1043 * firmware loading status.
1044 */
intel_dmc_ucode_fini(struct drm_i915_private * dev_priv)1045 void intel_dmc_ucode_fini(struct drm_i915_private *dev_priv)
1046 {
1047 int id;
1048
1049 if (!HAS_DMC(dev_priv))
1050 return;
1051
1052 intel_dmc_ucode_suspend(dev_priv);
1053 drm_WARN_ON(&dev_priv->drm, dev_priv->display.dmc.wakeref);
1054
1055 for (id = 0; id < DMC_FW_MAX; id++)
1056 kfree(dev_priv->display.dmc.dmc_info[id].payload);
1057 }
1058
intel_dmc_print_error_state(struct drm_i915_error_state_buf * m,struct drm_i915_private * i915)1059 void intel_dmc_print_error_state(struct drm_i915_error_state_buf *m,
1060 struct drm_i915_private *i915)
1061 {
1062 struct intel_dmc *dmc = &i915->display.dmc;
1063
1064 if (!HAS_DMC(i915))
1065 return;
1066
1067 i915_error_printf(m, "DMC loaded: %s\n",
1068 str_yes_no(intel_dmc_has_payload(i915)));
1069 i915_error_printf(m, "DMC fw version: %d.%d\n",
1070 DMC_VERSION_MAJOR(dmc->version),
1071 DMC_VERSION_MINOR(dmc->version));
1072 }
1073
intel_dmc_debugfs_status_show(struct seq_file * m,void * unused)1074 static int intel_dmc_debugfs_status_show(struct seq_file *m, void *unused)
1075 {
1076 struct drm_i915_private *i915 = m->private;
1077 intel_wakeref_t wakeref;
1078 struct intel_dmc *dmc;
1079 i915_reg_t dc5_reg, dc6_reg = INVALID_MMIO_REG;
1080
1081 if (!HAS_DMC(i915))
1082 return -ENODEV;
1083
1084 dmc = &i915->display.dmc;
1085
1086 wakeref = intel_runtime_pm_get(&i915->runtime_pm);
1087
1088 seq_printf(m, "fw loaded: %s\n",
1089 str_yes_no(intel_dmc_has_payload(i915)));
1090 seq_printf(m, "path: %s\n", dmc->fw_path);
1091 seq_printf(m, "Pipe A fw needed: %s\n",
1092 str_yes_no(GRAPHICS_VER(i915) >= 12));
1093 seq_printf(m, "Pipe A fw loaded: %s\n",
1094 str_yes_no(dmc->dmc_info[DMC_FW_PIPEA].payload));
1095 seq_printf(m, "Pipe B fw needed: %s\n",
1096 str_yes_no(IS_ALDERLAKE_P(i915) ||
1097 DISPLAY_VER(i915) >= 14));
1098 seq_printf(m, "Pipe B fw loaded: %s\n",
1099 str_yes_no(dmc->dmc_info[DMC_FW_PIPEB].payload));
1100
1101 if (!intel_dmc_has_payload(i915))
1102 goto out;
1103
1104 seq_printf(m, "version: %d.%d\n", DMC_VERSION_MAJOR(dmc->version),
1105 DMC_VERSION_MINOR(dmc->version));
1106
1107 if (DISPLAY_VER(i915) >= 12) {
1108 i915_reg_t dc3co_reg;
1109
1110 if (IS_DGFX(i915) || DISPLAY_VER(i915) >= 14) {
1111 dc3co_reg = DG1_DMC_DEBUG3;
1112 dc5_reg = DG1_DMC_DEBUG_DC5_COUNT;
1113 } else {
1114 dc3co_reg = TGL_DMC_DEBUG3;
1115 dc5_reg = TGL_DMC_DEBUG_DC5_COUNT;
1116 dc6_reg = TGL_DMC_DEBUG_DC6_COUNT;
1117 }
1118
1119 seq_printf(m, "DC3CO count: %d\n",
1120 intel_de_read(i915, dc3co_reg));
1121 } else {
1122 dc5_reg = IS_BROXTON(i915) ? BXT_DMC_DC3_DC5_COUNT :
1123 SKL_DMC_DC3_DC5_COUNT;
1124 if (!IS_GEMINILAKE(i915) && !IS_BROXTON(i915))
1125 dc6_reg = SKL_DMC_DC5_DC6_COUNT;
1126 }
1127
1128 seq_printf(m, "DC3 -> DC5 count: %d\n", intel_de_read(i915, dc5_reg));
1129 if (i915_mmio_reg_valid(dc6_reg))
1130 seq_printf(m, "DC5 -> DC6 count: %d\n",
1131 intel_de_read(i915, dc6_reg));
1132
1133 out:
1134 seq_printf(m, "program base: 0x%08x\n",
1135 intel_de_read(i915, DMC_PROGRAM(dmc->dmc_info[DMC_FW_MAIN].start_mmioaddr, 0)));
1136 seq_printf(m, "ssp base: 0x%08x\n",
1137 intel_de_read(i915, DMC_SSP_BASE));
1138 seq_printf(m, "htp: 0x%08x\n", intel_de_read(i915, DMC_HTP_SKL));
1139
1140 intel_runtime_pm_put(&i915->runtime_pm, wakeref);
1141
1142 return 0;
1143 }
1144
1145 DEFINE_SHOW_ATTRIBUTE(intel_dmc_debugfs_status);
1146
intel_dmc_debugfs_register(struct drm_i915_private * i915)1147 void intel_dmc_debugfs_register(struct drm_i915_private *i915)
1148 {
1149 struct drm_minor *minor = i915->drm.primary;
1150
1151 debugfs_create_file("i915_dmc_info", 0444, minor->debugfs_root,
1152 i915, &intel_dmc_debugfs_status_fops);
1153 }
1154