1 /*
2 * Copyright © 2013 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include <drm/drm_managed.h>
25 #include <linux/pm_runtime.h>
26
27 #include "gt/intel_engine_regs.h"
28 #include "gt/intel_gt_regs.h"
29
30 #include "i915_drv.h"
31 #include "i915_iosf_mbi.h"
32 #include "i915_reg.h"
33 #include "i915_trace.h"
34 #include "i915_vgpu.h"
35 #include "intel_pm.h"
36
37 #define FORCEWAKE_ACK_TIMEOUT_MS 50
38 #define GT_FIFO_TIMEOUT_MS 10
39
40 #define __raw_posting_read(...) ((void)__raw_uncore_read32(__VA_ARGS__))
41
42 static void
fw_domains_get(struct intel_uncore * uncore,enum forcewake_domains fw_domains)43 fw_domains_get(struct intel_uncore *uncore, enum forcewake_domains fw_domains)
44 {
45 uncore->fw_get_funcs->force_wake_get(uncore, fw_domains);
46 }
47
48 void
intel_uncore_mmio_debug_init_early(struct drm_i915_private * i915)49 intel_uncore_mmio_debug_init_early(struct drm_i915_private *i915)
50 {
51 spin_lock_init(&i915->mmio_debug.lock);
52 i915->mmio_debug.unclaimed_mmio_check = 1;
53
54 i915->uncore.debug = &i915->mmio_debug;
55 }
56
mmio_debug_suspend(struct intel_uncore * uncore)57 static void mmio_debug_suspend(struct intel_uncore *uncore)
58 {
59 if (!uncore->debug)
60 return;
61
62 spin_lock(&uncore->debug->lock);
63
64 /* Save and disable mmio debugging for the user bypass */
65 if (!uncore->debug->suspend_count++) {
66 uncore->debug->saved_mmio_check = uncore->debug->unclaimed_mmio_check;
67 uncore->debug->unclaimed_mmio_check = 0;
68 }
69
70 spin_unlock(&uncore->debug->lock);
71 }
72
73 static bool check_for_unclaimed_mmio(struct intel_uncore *uncore);
74
mmio_debug_resume(struct intel_uncore * uncore)75 static void mmio_debug_resume(struct intel_uncore *uncore)
76 {
77 if (!uncore->debug)
78 return;
79
80 spin_lock(&uncore->debug->lock);
81
82 if (!--uncore->debug->suspend_count)
83 uncore->debug->unclaimed_mmio_check = uncore->debug->saved_mmio_check;
84
85 if (check_for_unclaimed_mmio(uncore))
86 drm_info(&uncore->i915->drm,
87 "Invalid mmio detected during user access\n");
88
89 spin_unlock(&uncore->debug->lock);
90 }
91
92 static const char * const forcewake_domain_names[] = {
93 "render",
94 "gt",
95 "media",
96 "vdbox0",
97 "vdbox1",
98 "vdbox2",
99 "vdbox3",
100 "vdbox4",
101 "vdbox5",
102 "vdbox6",
103 "vdbox7",
104 "vebox0",
105 "vebox1",
106 "vebox2",
107 "vebox3",
108 "gsc",
109 };
110
111 const char *
intel_uncore_forcewake_domain_to_str(const enum forcewake_domain_id id)112 intel_uncore_forcewake_domain_to_str(const enum forcewake_domain_id id)
113 {
114 BUILD_BUG_ON(ARRAY_SIZE(forcewake_domain_names) != FW_DOMAIN_ID_COUNT);
115
116 if (id >= 0 && id < FW_DOMAIN_ID_COUNT)
117 return forcewake_domain_names[id];
118
119 WARN_ON(id);
120
121 return "unknown";
122 }
123
124 #define fw_ack(d) readl((d)->reg_ack)
125 #define fw_set(d, val) writel(_MASKED_BIT_ENABLE((val)), (d)->reg_set)
126 #define fw_clear(d, val) writel(_MASKED_BIT_DISABLE((val)), (d)->reg_set)
127
128 static inline void
fw_domain_reset(const struct intel_uncore_forcewake_domain * d)129 fw_domain_reset(const struct intel_uncore_forcewake_domain *d)
130 {
131 /*
132 * We don't really know if the powerwell for the forcewake domain we are
133 * trying to reset here does exist at this point (engines could be fused
134 * off in ICL+), so no waiting for acks
135 */
136 /* WaRsClearFWBitsAtReset */
137 if (GRAPHICS_VER(d->uncore->i915) >= 12)
138 fw_clear(d, 0xefff);
139 else
140 fw_clear(d, 0xffff);
141 }
142
143 static inline void
fw_domain_arm_timer(struct intel_uncore_forcewake_domain * d)144 fw_domain_arm_timer(struct intel_uncore_forcewake_domain *d)
145 {
146 GEM_BUG_ON(d->uncore->fw_domains_timer & d->mask);
147 d->uncore->fw_domains_timer |= d->mask;
148 d->wake_count++;
149 hrtimer_start_range_ns(&d->timer,
150 NSEC_PER_MSEC,
151 NSEC_PER_MSEC,
152 HRTIMER_MODE_REL);
153 }
154
155 static inline int
__wait_for_ack(const struct intel_uncore_forcewake_domain * d,const u32 ack,const u32 value)156 __wait_for_ack(const struct intel_uncore_forcewake_domain *d,
157 const u32 ack,
158 const u32 value)
159 {
160 return wait_for_atomic((fw_ack(d) & ack) == value,
161 FORCEWAKE_ACK_TIMEOUT_MS);
162 }
163
164 static inline int
wait_ack_clear(const struct intel_uncore_forcewake_domain * d,const u32 ack)165 wait_ack_clear(const struct intel_uncore_forcewake_domain *d,
166 const u32 ack)
167 {
168 return __wait_for_ack(d, ack, 0);
169 }
170
171 static inline int
wait_ack_set(const struct intel_uncore_forcewake_domain * d,const u32 ack)172 wait_ack_set(const struct intel_uncore_forcewake_domain *d,
173 const u32 ack)
174 {
175 return __wait_for_ack(d, ack, ack);
176 }
177
178 static inline void
fw_domain_wait_ack_clear(const struct intel_uncore_forcewake_domain * d)179 fw_domain_wait_ack_clear(const struct intel_uncore_forcewake_domain *d)
180 {
181 if (wait_ack_clear(d, FORCEWAKE_KERNEL)) {
182 drm_err(&d->uncore->i915->drm,
183 "%s: timed out waiting for forcewake ack to clear.\n",
184 intel_uncore_forcewake_domain_to_str(d->id));
185 add_taint_for_CI(d->uncore->i915, TAINT_WARN); /* CI now unreliable */
186 }
187 }
188
189 enum ack_type {
190 ACK_CLEAR = 0,
191 ACK_SET
192 };
193
194 static int
fw_domain_wait_ack_with_fallback(const struct intel_uncore_forcewake_domain * d,const enum ack_type type)195 fw_domain_wait_ack_with_fallback(const struct intel_uncore_forcewake_domain *d,
196 const enum ack_type type)
197 {
198 const u32 ack_bit = FORCEWAKE_KERNEL;
199 const u32 value = type == ACK_SET ? ack_bit : 0;
200 unsigned int pass;
201 bool ack_detected;
202
203 /*
204 * There is a possibility of driver's wake request colliding
205 * with hardware's own wake requests and that can cause
206 * hardware to not deliver the driver's ack message.
207 *
208 * Use a fallback bit toggle to kick the gpu state machine
209 * in the hope that the original ack will be delivered along with
210 * the fallback ack.
211 *
212 * This workaround is described in HSDES #1604254524 and it's known as:
213 * WaRsForcewakeAddDelayForAck:skl,bxt,kbl,glk,cfl,cnl,icl
214 * although the name is a bit misleading.
215 */
216
217 pass = 1;
218 do {
219 wait_ack_clear(d, FORCEWAKE_KERNEL_FALLBACK);
220
221 fw_set(d, FORCEWAKE_KERNEL_FALLBACK);
222 /* Give gt some time to relax before the polling frenzy */
223 udelay(10 * pass);
224 wait_ack_set(d, FORCEWAKE_KERNEL_FALLBACK);
225
226 ack_detected = (fw_ack(d) & ack_bit) == value;
227
228 fw_clear(d, FORCEWAKE_KERNEL_FALLBACK);
229 } while (!ack_detected && pass++ < 10);
230
231 drm_dbg(&d->uncore->i915->drm,
232 "%s had to use fallback to %s ack, 0x%x (passes %u)\n",
233 intel_uncore_forcewake_domain_to_str(d->id),
234 type == ACK_SET ? "set" : "clear",
235 fw_ack(d),
236 pass);
237
238 return ack_detected ? 0 : -ETIMEDOUT;
239 }
240
241 static inline void
fw_domain_wait_ack_clear_fallback(const struct intel_uncore_forcewake_domain * d)242 fw_domain_wait_ack_clear_fallback(const struct intel_uncore_forcewake_domain *d)
243 {
244 if (likely(!wait_ack_clear(d, FORCEWAKE_KERNEL)))
245 return;
246
247 if (fw_domain_wait_ack_with_fallback(d, ACK_CLEAR))
248 fw_domain_wait_ack_clear(d);
249 }
250
251 static inline void
fw_domain_get(const struct intel_uncore_forcewake_domain * d)252 fw_domain_get(const struct intel_uncore_forcewake_domain *d)
253 {
254 fw_set(d, FORCEWAKE_KERNEL);
255 }
256
257 static inline void
fw_domain_wait_ack_set(const struct intel_uncore_forcewake_domain * d)258 fw_domain_wait_ack_set(const struct intel_uncore_forcewake_domain *d)
259 {
260 if (wait_ack_set(d, FORCEWAKE_KERNEL)) {
261 drm_err(&d->uncore->i915->drm,
262 "%s: timed out waiting for forcewake ack request.\n",
263 intel_uncore_forcewake_domain_to_str(d->id));
264 add_taint_for_CI(d->uncore->i915, TAINT_WARN); /* CI now unreliable */
265 }
266 }
267
268 static inline void
fw_domain_wait_ack_set_fallback(const struct intel_uncore_forcewake_domain * d)269 fw_domain_wait_ack_set_fallback(const struct intel_uncore_forcewake_domain *d)
270 {
271 if (likely(!wait_ack_set(d, FORCEWAKE_KERNEL)))
272 return;
273
274 if (fw_domain_wait_ack_with_fallback(d, ACK_SET))
275 fw_domain_wait_ack_set(d);
276 }
277
278 static inline void
fw_domain_put(const struct intel_uncore_forcewake_domain * d)279 fw_domain_put(const struct intel_uncore_forcewake_domain *d)
280 {
281 fw_clear(d, FORCEWAKE_KERNEL);
282 }
283
284 static void
fw_domains_get_normal(struct intel_uncore * uncore,enum forcewake_domains fw_domains)285 fw_domains_get_normal(struct intel_uncore *uncore, enum forcewake_domains fw_domains)
286 {
287 struct intel_uncore_forcewake_domain *d;
288 unsigned int tmp;
289
290 GEM_BUG_ON(fw_domains & ~uncore->fw_domains);
291
292 for_each_fw_domain_masked(d, fw_domains, uncore, tmp) {
293 fw_domain_wait_ack_clear(d);
294 fw_domain_get(d);
295 }
296
297 for_each_fw_domain_masked(d, fw_domains, uncore, tmp)
298 fw_domain_wait_ack_set(d);
299
300 uncore->fw_domains_active |= fw_domains;
301 }
302
303 static void
fw_domains_get_with_fallback(struct intel_uncore * uncore,enum forcewake_domains fw_domains)304 fw_domains_get_with_fallback(struct intel_uncore *uncore,
305 enum forcewake_domains fw_domains)
306 {
307 struct intel_uncore_forcewake_domain *d;
308 unsigned int tmp;
309
310 GEM_BUG_ON(fw_domains & ~uncore->fw_domains);
311
312 for_each_fw_domain_masked(d, fw_domains, uncore, tmp) {
313 fw_domain_wait_ack_clear_fallback(d);
314 fw_domain_get(d);
315 }
316
317 for_each_fw_domain_masked(d, fw_domains, uncore, tmp)
318 fw_domain_wait_ack_set_fallback(d);
319
320 uncore->fw_domains_active |= fw_domains;
321 }
322
323 static void
fw_domains_put(struct intel_uncore * uncore,enum forcewake_domains fw_domains)324 fw_domains_put(struct intel_uncore *uncore, enum forcewake_domains fw_domains)
325 {
326 struct intel_uncore_forcewake_domain *d;
327 unsigned int tmp;
328
329 GEM_BUG_ON(fw_domains & ~uncore->fw_domains);
330
331 for_each_fw_domain_masked(d, fw_domains, uncore, tmp)
332 fw_domain_put(d);
333
334 uncore->fw_domains_active &= ~fw_domains;
335 }
336
337 static void
fw_domains_reset(struct intel_uncore * uncore,enum forcewake_domains fw_domains)338 fw_domains_reset(struct intel_uncore *uncore,
339 enum forcewake_domains fw_domains)
340 {
341 struct intel_uncore_forcewake_domain *d;
342 unsigned int tmp;
343
344 if (!fw_domains)
345 return;
346
347 GEM_BUG_ON(fw_domains & ~uncore->fw_domains);
348
349 for_each_fw_domain_masked(d, fw_domains, uncore, tmp)
350 fw_domain_reset(d);
351 }
352
gt_thread_status(struct intel_uncore * uncore)353 static inline u32 gt_thread_status(struct intel_uncore *uncore)
354 {
355 u32 val;
356
357 val = __raw_uncore_read32(uncore, GEN6_GT_THREAD_STATUS_REG);
358 val &= GEN6_GT_THREAD_STATUS_CORE_MASK;
359
360 return val;
361 }
362
__gen6_gt_wait_for_thread_c0(struct intel_uncore * uncore)363 static void __gen6_gt_wait_for_thread_c0(struct intel_uncore *uncore)
364 {
365 /*
366 * w/a for a sporadic read returning 0 by waiting for the GT
367 * thread to wake up.
368 */
369 drm_WARN_ONCE(&uncore->i915->drm,
370 wait_for_atomic_us(gt_thread_status(uncore) == 0, 5000),
371 "GT thread status wait timed out\n");
372 }
373
fw_domains_get_with_thread_status(struct intel_uncore * uncore,enum forcewake_domains fw_domains)374 static void fw_domains_get_with_thread_status(struct intel_uncore *uncore,
375 enum forcewake_domains fw_domains)
376 {
377 fw_domains_get_normal(uncore, fw_domains);
378
379 /* WaRsForcewakeWaitTC0:snb,ivb,hsw,bdw,vlv */
380 __gen6_gt_wait_for_thread_c0(uncore);
381 }
382
fifo_free_entries(struct intel_uncore * uncore)383 static inline u32 fifo_free_entries(struct intel_uncore *uncore)
384 {
385 u32 count = __raw_uncore_read32(uncore, GTFIFOCTL);
386
387 return count & GT_FIFO_FREE_ENTRIES_MASK;
388 }
389
__gen6_gt_wait_for_fifo(struct intel_uncore * uncore)390 static void __gen6_gt_wait_for_fifo(struct intel_uncore *uncore)
391 {
392 u32 n;
393
394 /* On VLV, FIFO will be shared by both SW and HW.
395 * So, we need to read the FREE_ENTRIES everytime */
396 if (IS_VALLEYVIEW(uncore->i915))
397 n = fifo_free_entries(uncore);
398 else
399 n = uncore->fifo_count;
400
401 if (n <= GT_FIFO_NUM_RESERVED_ENTRIES) {
402 if (wait_for_atomic((n = fifo_free_entries(uncore)) >
403 GT_FIFO_NUM_RESERVED_ENTRIES,
404 GT_FIFO_TIMEOUT_MS)) {
405 drm_dbg(&uncore->i915->drm,
406 "GT_FIFO timeout, entries: %u\n", n);
407 return;
408 }
409 }
410
411 uncore->fifo_count = n - 1;
412 }
413
414 static enum hrtimer_restart
intel_uncore_fw_release_timer(struct hrtimer * timer)415 intel_uncore_fw_release_timer(struct hrtimer *timer)
416 {
417 struct intel_uncore_forcewake_domain *domain =
418 container_of(timer, struct intel_uncore_forcewake_domain, timer);
419 struct intel_uncore *uncore = domain->uncore;
420 unsigned long irqflags;
421
422 assert_rpm_device_not_suspended(uncore->rpm);
423
424 if (xchg(&domain->active, false))
425 return HRTIMER_RESTART;
426
427 spin_lock_irqsave(&uncore->lock, irqflags);
428
429 uncore->fw_domains_timer &= ~domain->mask;
430
431 GEM_BUG_ON(!domain->wake_count);
432 if (--domain->wake_count == 0)
433 fw_domains_put(uncore, domain->mask);
434
435 spin_unlock_irqrestore(&uncore->lock, irqflags);
436
437 return HRTIMER_NORESTART;
438 }
439
440 /* Note callers must have acquired the PUNIT->PMIC bus, before calling this. */
441 static unsigned int
intel_uncore_forcewake_reset(struct intel_uncore * uncore)442 intel_uncore_forcewake_reset(struct intel_uncore *uncore)
443 {
444 unsigned long irqflags;
445 struct intel_uncore_forcewake_domain *domain;
446 int retry_count = 100;
447 enum forcewake_domains fw, active_domains;
448
449 iosf_mbi_assert_punit_acquired();
450
451 /* Hold uncore.lock across reset to prevent any register access
452 * with forcewake not set correctly. Wait until all pending
453 * timers are run before holding.
454 */
455 while (1) {
456 unsigned int tmp;
457
458 active_domains = 0;
459
460 for_each_fw_domain(domain, uncore, tmp) {
461 smp_store_mb(domain->active, false);
462 if (hrtimer_cancel(&domain->timer) == 0)
463 continue;
464
465 intel_uncore_fw_release_timer(&domain->timer);
466 }
467
468 spin_lock_irqsave(&uncore->lock, irqflags);
469
470 for_each_fw_domain(domain, uncore, tmp) {
471 if (hrtimer_active(&domain->timer))
472 active_domains |= domain->mask;
473 }
474
475 if (active_domains == 0)
476 break;
477
478 if (--retry_count == 0) {
479 drm_err(&uncore->i915->drm, "Timed out waiting for forcewake timers to finish\n");
480 break;
481 }
482
483 spin_unlock_irqrestore(&uncore->lock, irqflags);
484 cond_resched();
485 }
486
487 drm_WARN_ON(&uncore->i915->drm, active_domains);
488
489 fw = uncore->fw_domains_active;
490 if (fw)
491 fw_domains_put(uncore, fw);
492
493 fw_domains_reset(uncore, uncore->fw_domains);
494 assert_forcewakes_inactive(uncore);
495
496 spin_unlock_irqrestore(&uncore->lock, irqflags);
497
498 return fw; /* track the lost user forcewake domains */
499 }
500
501 static bool
fpga_check_for_unclaimed_mmio(struct intel_uncore * uncore)502 fpga_check_for_unclaimed_mmio(struct intel_uncore *uncore)
503 {
504 u32 dbg;
505
506 dbg = __raw_uncore_read32(uncore, FPGA_DBG);
507 if (likely(!(dbg & FPGA_DBG_RM_NOCLAIM)))
508 return false;
509
510 /*
511 * Bugs in PCI programming (or failing hardware) can occasionally cause
512 * us to lose access to the MMIO BAR. When this happens, register
513 * reads will come back with 0xFFFFFFFF for every register and things
514 * go bad very quickly. Let's try to detect that special case and at
515 * least try to print a more informative message about what has
516 * happened.
517 *
518 * During normal operation the FPGA_DBG register has several unused
519 * bits that will always read back as 0's so we can use them as canaries
520 * to recognize when MMIO accesses are just busted.
521 */
522 if (unlikely(dbg == ~0))
523 drm_err(&uncore->i915->drm,
524 "Lost access to MMIO BAR; all registers now read back as 0xFFFFFFFF!\n");
525
526 __raw_uncore_write32(uncore, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
527
528 return true;
529 }
530
531 static bool
vlv_check_for_unclaimed_mmio(struct intel_uncore * uncore)532 vlv_check_for_unclaimed_mmio(struct intel_uncore *uncore)
533 {
534 u32 cer;
535
536 cer = __raw_uncore_read32(uncore, CLAIM_ER);
537 if (likely(!(cer & (CLAIM_ER_OVERFLOW | CLAIM_ER_CTR_MASK))))
538 return false;
539
540 __raw_uncore_write32(uncore, CLAIM_ER, CLAIM_ER_CLR);
541
542 return true;
543 }
544
545 static bool
gen6_check_for_fifo_debug(struct intel_uncore * uncore)546 gen6_check_for_fifo_debug(struct intel_uncore *uncore)
547 {
548 u32 fifodbg;
549
550 fifodbg = __raw_uncore_read32(uncore, GTFIFODBG);
551
552 if (unlikely(fifodbg)) {
553 drm_dbg(&uncore->i915->drm, "GTFIFODBG = 0x08%x\n", fifodbg);
554 __raw_uncore_write32(uncore, GTFIFODBG, fifodbg);
555 }
556
557 return fifodbg;
558 }
559
560 static bool
check_for_unclaimed_mmio(struct intel_uncore * uncore)561 check_for_unclaimed_mmio(struct intel_uncore *uncore)
562 {
563 bool ret = false;
564
565 lockdep_assert_held(&uncore->debug->lock);
566
567 if (uncore->debug->suspend_count)
568 return false;
569
570 if (intel_uncore_has_fpga_dbg_unclaimed(uncore))
571 ret |= fpga_check_for_unclaimed_mmio(uncore);
572
573 if (intel_uncore_has_dbg_unclaimed(uncore))
574 ret |= vlv_check_for_unclaimed_mmio(uncore);
575
576 if (intel_uncore_has_fifo(uncore))
577 ret |= gen6_check_for_fifo_debug(uncore);
578
579 return ret;
580 }
581
forcewake_early_sanitize(struct intel_uncore * uncore,unsigned int restore_forcewake)582 static void forcewake_early_sanitize(struct intel_uncore *uncore,
583 unsigned int restore_forcewake)
584 {
585 GEM_BUG_ON(!intel_uncore_has_forcewake(uncore));
586
587 /* WaDisableShadowRegForCpd:chv */
588 if (IS_CHERRYVIEW(uncore->i915)) {
589 __raw_uncore_write32(uncore, GTFIFOCTL,
590 __raw_uncore_read32(uncore, GTFIFOCTL) |
591 GT_FIFO_CTL_BLOCK_ALL_POLICY_STALL |
592 GT_FIFO_CTL_RC6_POLICY_STALL);
593 }
594
595 iosf_mbi_punit_acquire();
596 intel_uncore_forcewake_reset(uncore);
597 if (restore_forcewake) {
598 spin_lock_irq(&uncore->lock);
599 fw_domains_get(uncore, restore_forcewake);
600
601 if (intel_uncore_has_fifo(uncore))
602 uncore->fifo_count = fifo_free_entries(uncore);
603 spin_unlock_irq(&uncore->lock);
604 }
605 iosf_mbi_punit_release();
606 }
607
intel_uncore_suspend(struct intel_uncore * uncore)608 void intel_uncore_suspend(struct intel_uncore *uncore)
609 {
610 if (!intel_uncore_has_forcewake(uncore))
611 return;
612
613 iosf_mbi_punit_acquire();
614 iosf_mbi_unregister_pmic_bus_access_notifier_unlocked(
615 &uncore->pmic_bus_access_nb);
616 uncore->fw_domains_saved = intel_uncore_forcewake_reset(uncore);
617 iosf_mbi_punit_release();
618 }
619
intel_uncore_resume_early(struct intel_uncore * uncore)620 void intel_uncore_resume_early(struct intel_uncore *uncore)
621 {
622 unsigned int restore_forcewake;
623
624 if (intel_uncore_unclaimed_mmio(uncore))
625 drm_dbg(&uncore->i915->drm, "unclaimed mmio detected on resume, clearing\n");
626
627 if (!intel_uncore_has_forcewake(uncore))
628 return;
629
630 restore_forcewake = fetch_and_zero(&uncore->fw_domains_saved);
631 forcewake_early_sanitize(uncore, restore_forcewake);
632
633 iosf_mbi_register_pmic_bus_access_notifier(&uncore->pmic_bus_access_nb);
634 }
635
intel_uncore_runtime_resume(struct intel_uncore * uncore)636 void intel_uncore_runtime_resume(struct intel_uncore *uncore)
637 {
638 if (!intel_uncore_has_forcewake(uncore))
639 return;
640
641 iosf_mbi_register_pmic_bus_access_notifier(&uncore->pmic_bus_access_nb);
642 }
643
__intel_uncore_forcewake_get(struct intel_uncore * uncore,enum forcewake_domains fw_domains)644 static void __intel_uncore_forcewake_get(struct intel_uncore *uncore,
645 enum forcewake_domains fw_domains)
646 {
647 struct intel_uncore_forcewake_domain *domain;
648 unsigned int tmp;
649
650 fw_domains &= uncore->fw_domains;
651
652 for_each_fw_domain_masked(domain, fw_domains, uncore, tmp) {
653 if (domain->wake_count++) {
654 fw_domains &= ~domain->mask;
655 domain->active = true;
656 }
657 }
658
659 if (fw_domains)
660 fw_domains_get(uncore, fw_domains);
661 }
662
663 /**
664 * intel_uncore_forcewake_get - grab forcewake domain references
665 * @uncore: the intel_uncore structure
666 * @fw_domains: forcewake domains to get reference on
667 *
668 * This function can be used get GT's forcewake domain references.
669 * Normal register access will handle the forcewake domains automatically.
670 * However if some sequence requires the GT to not power down a particular
671 * forcewake domains this function should be called at the beginning of the
672 * sequence. And subsequently the reference should be dropped by symmetric
673 * call to intel_unforce_forcewake_put(). Usually caller wants all the domains
674 * to be kept awake so the @fw_domains would be then FORCEWAKE_ALL.
675 */
intel_uncore_forcewake_get(struct intel_uncore * uncore,enum forcewake_domains fw_domains)676 void intel_uncore_forcewake_get(struct intel_uncore *uncore,
677 enum forcewake_domains fw_domains)
678 {
679 unsigned long irqflags;
680
681 if (!uncore->fw_get_funcs)
682 return;
683
684 assert_rpm_wakelock_held(uncore->rpm);
685
686 spin_lock_irqsave(&uncore->lock, irqflags);
687 __intel_uncore_forcewake_get(uncore, fw_domains);
688 spin_unlock_irqrestore(&uncore->lock, irqflags);
689 }
690
691 /**
692 * intel_uncore_forcewake_user_get - claim forcewake on behalf of userspace
693 * @uncore: the intel_uncore structure
694 *
695 * This function is a wrapper around intel_uncore_forcewake_get() to acquire
696 * the GT powerwell and in the process disable our debugging for the
697 * duration of userspace's bypass.
698 */
intel_uncore_forcewake_user_get(struct intel_uncore * uncore)699 void intel_uncore_forcewake_user_get(struct intel_uncore *uncore)
700 {
701 spin_lock_irq(&uncore->lock);
702 if (!uncore->user_forcewake_count++) {
703 intel_uncore_forcewake_get__locked(uncore, FORCEWAKE_ALL);
704 mmio_debug_suspend(uncore);
705 }
706 spin_unlock_irq(&uncore->lock);
707 }
708
709 /**
710 * intel_uncore_forcewake_user_put - release forcewake on behalf of userspace
711 * @uncore: the intel_uncore structure
712 *
713 * This function complements intel_uncore_forcewake_user_get() and releases
714 * the GT powerwell taken on behalf of the userspace bypass.
715 */
intel_uncore_forcewake_user_put(struct intel_uncore * uncore)716 void intel_uncore_forcewake_user_put(struct intel_uncore *uncore)
717 {
718 spin_lock_irq(&uncore->lock);
719 if (!--uncore->user_forcewake_count) {
720 mmio_debug_resume(uncore);
721 intel_uncore_forcewake_put__locked(uncore, FORCEWAKE_ALL);
722 }
723 spin_unlock_irq(&uncore->lock);
724 }
725
726 /**
727 * intel_uncore_forcewake_get__locked - grab forcewake domain references
728 * @uncore: the intel_uncore structure
729 * @fw_domains: forcewake domains to get reference on
730 *
731 * See intel_uncore_forcewake_get(). This variant places the onus
732 * on the caller to explicitly handle the dev_priv->uncore.lock spinlock.
733 */
intel_uncore_forcewake_get__locked(struct intel_uncore * uncore,enum forcewake_domains fw_domains)734 void intel_uncore_forcewake_get__locked(struct intel_uncore *uncore,
735 enum forcewake_domains fw_domains)
736 {
737 lockdep_assert_held(&uncore->lock);
738
739 if (!uncore->fw_get_funcs)
740 return;
741
742 __intel_uncore_forcewake_get(uncore, fw_domains);
743 }
744
__intel_uncore_forcewake_put(struct intel_uncore * uncore,enum forcewake_domains fw_domains,bool delayed)745 static void __intel_uncore_forcewake_put(struct intel_uncore *uncore,
746 enum forcewake_domains fw_domains,
747 bool delayed)
748 {
749 struct intel_uncore_forcewake_domain *domain;
750 unsigned int tmp;
751
752 fw_domains &= uncore->fw_domains;
753
754 for_each_fw_domain_masked(domain, fw_domains, uncore, tmp) {
755 GEM_BUG_ON(!domain->wake_count);
756
757 if (--domain->wake_count) {
758 domain->active = true;
759 continue;
760 }
761
762 if (delayed &&
763 !(domain->uncore->fw_domains_timer & domain->mask))
764 fw_domain_arm_timer(domain);
765 else
766 fw_domains_put(uncore, domain->mask);
767 }
768 }
769
770 /**
771 * intel_uncore_forcewake_put - release a forcewake domain reference
772 * @uncore: the intel_uncore structure
773 * @fw_domains: forcewake domains to put references
774 *
775 * This function drops the device-level forcewakes for specified
776 * domains obtained by intel_uncore_forcewake_get().
777 */
intel_uncore_forcewake_put(struct intel_uncore * uncore,enum forcewake_domains fw_domains)778 void intel_uncore_forcewake_put(struct intel_uncore *uncore,
779 enum forcewake_domains fw_domains)
780 {
781 unsigned long irqflags;
782
783 if (!uncore->fw_get_funcs)
784 return;
785
786 spin_lock_irqsave(&uncore->lock, irqflags);
787 __intel_uncore_forcewake_put(uncore, fw_domains, false);
788 spin_unlock_irqrestore(&uncore->lock, irqflags);
789 }
790
intel_uncore_forcewake_put_delayed(struct intel_uncore * uncore,enum forcewake_domains fw_domains)791 void intel_uncore_forcewake_put_delayed(struct intel_uncore *uncore,
792 enum forcewake_domains fw_domains)
793 {
794 unsigned long irqflags;
795
796 if (!uncore->fw_get_funcs)
797 return;
798
799 spin_lock_irqsave(&uncore->lock, irqflags);
800 __intel_uncore_forcewake_put(uncore, fw_domains, true);
801 spin_unlock_irqrestore(&uncore->lock, irqflags);
802 }
803
804 /**
805 * intel_uncore_forcewake_flush - flush the delayed release
806 * @uncore: the intel_uncore structure
807 * @fw_domains: forcewake domains to flush
808 */
intel_uncore_forcewake_flush(struct intel_uncore * uncore,enum forcewake_domains fw_domains)809 void intel_uncore_forcewake_flush(struct intel_uncore *uncore,
810 enum forcewake_domains fw_domains)
811 {
812 struct intel_uncore_forcewake_domain *domain;
813 unsigned int tmp;
814
815 if (!uncore->fw_get_funcs)
816 return;
817
818 fw_domains &= uncore->fw_domains;
819 for_each_fw_domain_masked(domain, fw_domains, uncore, tmp) {
820 WRITE_ONCE(domain->active, false);
821 if (hrtimer_cancel(&domain->timer))
822 intel_uncore_fw_release_timer(&domain->timer);
823 }
824 }
825
826 /**
827 * intel_uncore_forcewake_put__locked - release forcewake domain references
828 * @uncore: the intel_uncore structure
829 * @fw_domains: forcewake domains to put references
830 *
831 * See intel_uncore_forcewake_put(). This variant places the onus
832 * on the caller to explicitly handle the dev_priv->uncore.lock spinlock.
833 */
intel_uncore_forcewake_put__locked(struct intel_uncore * uncore,enum forcewake_domains fw_domains)834 void intel_uncore_forcewake_put__locked(struct intel_uncore *uncore,
835 enum forcewake_domains fw_domains)
836 {
837 lockdep_assert_held(&uncore->lock);
838
839 if (!uncore->fw_get_funcs)
840 return;
841
842 __intel_uncore_forcewake_put(uncore, fw_domains, false);
843 }
844
assert_forcewakes_inactive(struct intel_uncore * uncore)845 void assert_forcewakes_inactive(struct intel_uncore *uncore)
846 {
847 if (!uncore->fw_get_funcs)
848 return;
849
850 drm_WARN(&uncore->i915->drm, uncore->fw_domains_active,
851 "Expected all fw_domains to be inactive, but %08x are still on\n",
852 uncore->fw_domains_active);
853 }
854
assert_forcewakes_active(struct intel_uncore * uncore,enum forcewake_domains fw_domains)855 void assert_forcewakes_active(struct intel_uncore *uncore,
856 enum forcewake_domains fw_domains)
857 {
858 struct intel_uncore_forcewake_domain *domain;
859 unsigned int tmp;
860
861 if (!IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM))
862 return;
863
864 if (!uncore->fw_get_funcs)
865 return;
866
867 spin_lock_irq(&uncore->lock);
868
869 assert_rpm_wakelock_held(uncore->rpm);
870
871 fw_domains &= uncore->fw_domains;
872 drm_WARN(&uncore->i915->drm, fw_domains & ~uncore->fw_domains_active,
873 "Expected %08x fw_domains to be active, but %08x are off\n",
874 fw_domains, fw_domains & ~uncore->fw_domains_active);
875
876 /*
877 * Check that the caller has an explicit wakeref and we don't mistake
878 * it for the auto wakeref.
879 */
880 for_each_fw_domain_masked(domain, fw_domains, uncore, tmp) {
881 unsigned int actual = READ_ONCE(domain->wake_count);
882 unsigned int expect = 1;
883
884 if (uncore->fw_domains_timer & domain->mask)
885 expect++; /* pending automatic release */
886
887 if (drm_WARN(&uncore->i915->drm, actual < expect,
888 "Expected domain %d to be held awake by caller, count=%d\n",
889 domain->id, actual))
890 break;
891 }
892
893 spin_unlock_irq(&uncore->lock);
894 }
895
896 /*
897 * We give fast paths for the really cool registers. The second range includes
898 * media domains (and the GSC starting from Xe_LPM+)
899 */
900 #define NEEDS_FORCE_WAKE(reg) ({ \
901 u32 __reg = (reg); \
902 __reg < 0x40000 || __reg >= 0x116000; \
903 })
904
fw_range_cmp(u32 offset,const struct intel_forcewake_range * entry)905 static int fw_range_cmp(u32 offset, const struct intel_forcewake_range *entry)
906 {
907 if (offset < entry->start)
908 return -1;
909 else if (offset > entry->end)
910 return 1;
911 else
912 return 0;
913 }
914
915 /* Copied and "macroized" from lib/bsearch.c */
916 #define BSEARCH(key, base, num, cmp) ({ \
917 unsigned int start__ = 0, end__ = (num); \
918 typeof(base) result__ = NULL; \
919 while (start__ < end__) { \
920 unsigned int mid__ = start__ + (end__ - start__) / 2; \
921 int ret__ = (cmp)((key), (base) + mid__); \
922 if (ret__ < 0) { \
923 end__ = mid__; \
924 } else if (ret__ > 0) { \
925 start__ = mid__ + 1; \
926 } else { \
927 result__ = (base) + mid__; \
928 break; \
929 } \
930 } \
931 result__; \
932 })
933
934 static enum forcewake_domains
find_fw_domain(struct intel_uncore * uncore,u32 offset)935 find_fw_domain(struct intel_uncore *uncore, u32 offset)
936 {
937 const struct intel_forcewake_range *entry;
938
939 if (IS_GSI_REG(offset))
940 offset += uncore->gsi_offset;
941
942 entry = BSEARCH(offset,
943 uncore->fw_domains_table,
944 uncore->fw_domains_table_entries,
945 fw_range_cmp);
946
947 if (!entry)
948 return 0;
949
950 /*
951 * The list of FW domains depends on the SKU in gen11+ so we
952 * can't determine it statically. We use FORCEWAKE_ALL and
953 * translate it here to the list of available domains.
954 */
955 if (entry->domains == FORCEWAKE_ALL)
956 return uncore->fw_domains;
957
958 drm_WARN(&uncore->i915->drm, entry->domains & ~uncore->fw_domains,
959 "Uninitialized forcewake domain(s) 0x%x accessed at 0x%x\n",
960 entry->domains & ~uncore->fw_domains, offset);
961
962 return entry->domains;
963 }
964
965 /*
966 * Shadowed register tables describe special register ranges that i915 is
967 * allowed to write to without acquiring forcewake. If these registers' power
968 * wells are down, the hardware will save values written by i915 to a shadow
969 * copy and automatically transfer them into the real register the next time
970 * the power well is woken up. Shadowing only applies to writes; forcewake
971 * must still be acquired when reading from registers in these ranges.
972 *
973 * The documentation for shadowed registers is somewhat spotty on older
974 * platforms. However missing registers from these lists is non-fatal; it just
975 * means we'll wake up the hardware for some register accesses where we didn't
976 * really need to.
977 *
978 * The ranges listed in these tables must be sorted by offset.
979 *
980 * When adding new tables here, please also add them to
981 * intel_shadow_table_check() in selftests/intel_uncore.c so that they will be
982 * scanned for obvious mistakes or typos by the selftests.
983 */
984
985 static const struct i915_range gen8_shadowed_regs[] = {
986 { .start = 0x2030, .end = 0x2030 },
987 { .start = 0xA008, .end = 0xA00C },
988 { .start = 0x12030, .end = 0x12030 },
989 { .start = 0x1a030, .end = 0x1a030 },
990 { .start = 0x22030, .end = 0x22030 },
991 };
992
993 static const struct i915_range gen11_shadowed_regs[] = {
994 { .start = 0x2030, .end = 0x2030 },
995 { .start = 0x2550, .end = 0x2550 },
996 { .start = 0xA008, .end = 0xA00C },
997 { .start = 0x22030, .end = 0x22030 },
998 { .start = 0x22230, .end = 0x22230 },
999 { .start = 0x22510, .end = 0x22550 },
1000 { .start = 0x1C0030, .end = 0x1C0030 },
1001 { .start = 0x1C0230, .end = 0x1C0230 },
1002 { .start = 0x1C0510, .end = 0x1C0550 },
1003 { .start = 0x1C4030, .end = 0x1C4030 },
1004 { .start = 0x1C4230, .end = 0x1C4230 },
1005 { .start = 0x1C4510, .end = 0x1C4550 },
1006 { .start = 0x1C8030, .end = 0x1C8030 },
1007 { .start = 0x1C8230, .end = 0x1C8230 },
1008 { .start = 0x1C8510, .end = 0x1C8550 },
1009 { .start = 0x1D0030, .end = 0x1D0030 },
1010 { .start = 0x1D0230, .end = 0x1D0230 },
1011 { .start = 0x1D0510, .end = 0x1D0550 },
1012 { .start = 0x1D4030, .end = 0x1D4030 },
1013 { .start = 0x1D4230, .end = 0x1D4230 },
1014 { .start = 0x1D4510, .end = 0x1D4550 },
1015 { .start = 0x1D8030, .end = 0x1D8030 },
1016 { .start = 0x1D8230, .end = 0x1D8230 },
1017 { .start = 0x1D8510, .end = 0x1D8550 },
1018 };
1019
1020 static const struct i915_range gen12_shadowed_regs[] = {
1021 { .start = 0x2030, .end = 0x2030 },
1022 { .start = 0x2510, .end = 0x2550 },
1023 { .start = 0xA008, .end = 0xA00C },
1024 { .start = 0xA188, .end = 0xA188 },
1025 { .start = 0xA278, .end = 0xA278 },
1026 { .start = 0xA540, .end = 0xA56C },
1027 { .start = 0xC4C8, .end = 0xC4C8 },
1028 { .start = 0xC4D4, .end = 0xC4D4 },
1029 { .start = 0xC600, .end = 0xC600 },
1030 { .start = 0x22030, .end = 0x22030 },
1031 { .start = 0x22510, .end = 0x22550 },
1032 { .start = 0x1C0030, .end = 0x1C0030 },
1033 { .start = 0x1C0510, .end = 0x1C0550 },
1034 { .start = 0x1C4030, .end = 0x1C4030 },
1035 { .start = 0x1C4510, .end = 0x1C4550 },
1036 { .start = 0x1C8030, .end = 0x1C8030 },
1037 { .start = 0x1C8510, .end = 0x1C8550 },
1038 { .start = 0x1D0030, .end = 0x1D0030 },
1039 { .start = 0x1D0510, .end = 0x1D0550 },
1040 { .start = 0x1D4030, .end = 0x1D4030 },
1041 { .start = 0x1D4510, .end = 0x1D4550 },
1042 { .start = 0x1D8030, .end = 0x1D8030 },
1043 { .start = 0x1D8510, .end = 0x1D8550 },
1044
1045 /*
1046 * The rest of these ranges are specific to Xe_HP and beyond, but
1047 * are reserved/unused ranges on earlier gen12 platforms, so they can
1048 * be safely added to the gen12 table.
1049 */
1050 { .start = 0x1E0030, .end = 0x1E0030 },
1051 { .start = 0x1E0510, .end = 0x1E0550 },
1052 { .start = 0x1E4030, .end = 0x1E4030 },
1053 { .start = 0x1E4510, .end = 0x1E4550 },
1054 { .start = 0x1E8030, .end = 0x1E8030 },
1055 { .start = 0x1E8510, .end = 0x1E8550 },
1056 { .start = 0x1F0030, .end = 0x1F0030 },
1057 { .start = 0x1F0510, .end = 0x1F0550 },
1058 { .start = 0x1F4030, .end = 0x1F4030 },
1059 { .start = 0x1F4510, .end = 0x1F4550 },
1060 { .start = 0x1F8030, .end = 0x1F8030 },
1061 { .start = 0x1F8510, .end = 0x1F8550 },
1062 };
1063
1064 static const struct i915_range dg2_shadowed_regs[] = {
1065 { .start = 0x2030, .end = 0x2030 },
1066 { .start = 0x2510, .end = 0x2550 },
1067 { .start = 0xA008, .end = 0xA00C },
1068 { .start = 0xA188, .end = 0xA188 },
1069 { .start = 0xA278, .end = 0xA278 },
1070 { .start = 0xA540, .end = 0xA56C },
1071 { .start = 0xC4C8, .end = 0xC4C8 },
1072 { .start = 0xC4E0, .end = 0xC4E0 },
1073 { .start = 0xC600, .end = 0xC600 },
1074 { .start = 0xC658, .end = 0xC658 },
1075 { .start = 0x22030, .end = 0x22030 },
1076 { .start = 0x22510, .end = 0x22550 },
1077 { .start = 0x1C0030, .end = 0x1C0030 },
1078 { .start = 0x1C0510, .end = 0x1C0550 },
1079 { .start = 0x1C4030, .end = 0x1C4030 },
1080 { .start = 0x1C4510, .end = 0x1C4550 },
1081 { .start = 0x1C8030, .end = 0x1C8030 },
1082 { .start = 0x1C8510, .end = 0x1C8550 },
1083 { .start = 0x1D0030, .end = 0x1D0030 },
1084 { .start = 0x1D0510, .end = 0x1D0550 },
1085 { .start = 0x1D4030, .end = 0x1D4030 },
1086 { .start = 0x1D4510, .end = 0x1D4550 },
1087 { .start = 0x1D8030, .end = 0x1D8030 },
1088 { .start = 0x1D8510, .end = 0x1D8550 },
1089 { .start = 0x1E0030, .end = 0x1E0030 },
1090 { .start = 0x1E0510, .end = 0x1E0550 },
1091 { .start = 0x1E4030, .end = 0x1E4030 },
1092 { .start = 0x1E4510, .end = 0x1E4550 },
1093 { .start = 0x1E8030, .end = 0x1E8030 },
1094 { .start = 0x1E8510, .end = 0x1E8550 },
1095 { .start = 0x1F0030, .end = 0x1F0030 },
1096 { .start = 0x1F0510, .end = 0x1F0550 },
1097 { .start = 0x1F4030, .end = 0x1F4030 },
1098 { .start = 0x1F4510, .end = 0x1F4550 },
1099 { .start = 0x1F8030, .end = 0x1F8030 },
1100 { .start = 0x1F8510, .end = 0x1F8550 },
1101 };
1102
1103 static const struct i915_range pvc_shadowed_regs[] = {
1104 { .start = 0x2030, .end = 0x2030 },
1105 { .start = 0x2510, .end = 0x2550 },
1106 { .start = 0xA008, .end = 0xA00C },
1107 { .start = 0xA188, .end = 0xA188 },
1108 { .start = 0xA278, .end = 0xA278 },
1109 { .start = 0xA540, .end = 0xA56C },
1110 { .start = 0xC4C8, .end = 0xC4C8 },
1111 { .start = 0xC4E0, .end = 0xC4E0 },
1112 { .start = 0xC600, .end = 0xC600 },
1113 { .start = 0xC658, .end = 0xC658 },
1114 { .start = 0x22030, .end = 0x22030 },
1115 { .start = 0x22510, .end = 0x22550 },
1116 { .start = 0x1C0030, .end = 0x1C0030 },
1117 { .start = 0x1C0510, .end = 0x1C0550 },
1118 { .start = 0x1C4030, .end = 0x1C4030 },
1119 { .start = 0x1C4510, .end = 0x1C4550 },
1120 { .start = 0x1C8030, .end = 0x1C8030 },
1121 { .start = 0x1C8510, .end = 0x1C8550 },
1122 { .start = 0x1D0030, .end = 0x1D0030 },
1123 { .start = 0x1D0510, .end = 0x1D0550 },
1124 { .start = 0x1D4030, .end = 0x1D4030 },
1125 { .start = 0x1D4510, .end = 0x1D4550 },
1126 { .start = 0x1D8030, .end = 0x1D8030 },
1127 { .start = 0x1D8510, .end = 0x1D8550 },
1128 { .start = 0x1E0030, .end = 0x1E0030 },
1129 { .start = 0x1E0510, .end = 0x1E0550 },
1130 { .start = 0x1E4030, .end = 0x1E4030 },
1131 { .start = 0x1E4510, .end = 0x1E4550 },
1132 { .start = 0x1E8030, .end = 0x1E8030 },
1133 { .start = 0x1E8510, .end = 0x1E8550 },
1134 { .start = 0x1F0030, .end = 0x1F0030 },
1135 { .start = 0x1F0510, .end = 0x1F0550 },
1136 { .start = 0x1F4030, .end = 0x1F4030 },
1137 { .start = 0x1F4510, .end = 0x1F4550 },
1138 { .start = 0x1F8030, .end = 0x1F8030 },
1139 { .start = 0x1F8510, .end = 0x1F8550 },
1140 };
1141
1142 static const struct i915_range mtl_shadowed_regs[] = {
1143 { .start = 0x2030, .end = 0x2030 },
1144 { .start = 0x2510, .end = 0x2550 },
1145 { .start = 0xA008, .end = 0xA00C },
1146 { .start = 0xA188, .end = 0xA188 },
1147 { .start = 0xA278, .end = 0xA278 },
1148 { .start = 0xA540, .end = 0xA56C },
1149 { .start = 0xC050, .end = 0xC050 },
1150 { .start = 0xC340, .end = 0xC340 },
1151 { .start = 0xC4C8, .end = 0xC4C8 },
1152 { .start = 0xC4E0, .end = 0xC4E0 },
1153 { .start = 0xC600, .end = 0xC600 },
1154 { .start = 0xC658, .end = 0xC658 },
1155 { .start = 0xCFD4, .end = 0xCFDC },
1156 { .start = 0x22030, .end = 0x22030 },
1157 { .start = 0x22510, .end = 0x22550 },
1158 };
1159
1160 static const struct i915_range xelpmp_shadowed_regs[] = {
1161 { .start = 0x1C0030, .end = 0x1C0030 },
1162 { .start = 0x1C0510, .end = 0x1C0550 },
1163 { .start = 0x1C8030, .end = 0x1C8030 },
1164 { .start = 0x1C8510, .end = 0x1C8550 },
1165 { .start = 0x1D0030, .end = 0x1D0030 },
1166 { .start = 0x1D0510, .end = 0x1D0550 },
1167 { .start = 0x38A008, .end = 0x38A00C },
1168 { .start = 0x38A188, .end = 0x38A188 },
1169 { .start = 0x38A278, .end = 0x38A278 },
1170 { .start = 0x38A540, .end = 0x38A56C },
1171 { .start = 0x38A618, .end = 0x38A618 },
1172 { .start = 0x38C050, .end = 0x38C050 },
1173 { .start = 0x38C340, .end = 0x38C340 },
1174 { .start = 0x38C4C8, .end = 0x38C4C8 },
1175 { .start = 0x38C4E0, .end = 0x38C4E4 },
1176 { .start = 0x38C600, .end = 0x38C600 },
1177 { .start = 0x38C658, .end = 0x38C658 },
1178 { .start = 0x38CFD4, .end = 0x38CFDC },
1179 };
1180
mmio_range_cmp(u32 key,const struct i915_range * range)1181 static int mmio_range_cmp(u32 key, const struct i915_range *range)
1182 {
1183 if (key < range->start)
1184 return -1;
1185 else if (key > range->end)
1186 return 1;
1187 else
1188 return 0;
1189 }
1190
is_shadowed(struct intel_uncore * uncore,u32 offset)1191 static bool is_shadowed(struct intel_uncore *uncore, u32 offset)
1192 {
1193 if (drm_WARN_ON(&uncore->i915->drm, !uncore->shadowed_reg_table))
1194 return false;
1195
1196 if (IS_GSI_REG(offset))
1197 offset += uncore->gsi_offset;
1198
1199 return BSEARCH(offset,
1200 uncore->shadowed_reg_table,
1201 uncore->shadowed_reg_table_entries,
1202 mmio_range_cmp);
1203 }
1204
1205 static enum forcewake_domains
gen6_reg_write_fw_domains(struct intel_uncore * uncore,i915_reg_t reg)1206 gen6_reg_write_fw_domains(struct intel_uncore *uncore, i915_reg_t reg)
1207 {
1208 return FORCEWAKE_RENDER;
1209 }
1210
1211 #define __fwtable_reg_read_fw_domains(uncore, offset) \
1212 ({ \
1213 enum forcewake_domains __fwd = 0; \
1214 if (NEEDS_FORCE_WAKE((offset))) \
1215 __fwd = find_fw_domain(uncore, offset); \
1216 __fwd; \
1217 })
1218
1219 #define __fwtable_reg_write_fw_domains(uncore, offset) \
1220 ({ \
1221 enum forcewake_domains __fwd = 0; \
1222 const u32 __offset = (offset); \
1223 if (NEEDS_FORCE_WAKE((__offset)) && !is_shadowed(uncore, __offset)) \
1224 __fwd = find_fw_domain(uncore, __offset); \
1225 __fwd; \
1226 })
1227
1228 #define GEN_FW_RANGE(s, e, d) \
1229 { .start = (s), .end = (e), .domains = (d) }
1230
1231 /*
1232 * All platforms' forcewake tables below must be sorted by offset ranges.
1233 * Furthermore, new forcewake tables added should be "watertight" and have
1234 * no gaps between ranges.
1235 *
1236 * When there are multiple consecutive ranges listed in the bspec with
1237 * the same forcewake domain, it is customary to combine them into a single
1238 * row in the tables below to keep the tables small and lookups fast.
1239 * Likewise, reserved/unused ranges may be combined with the preceding and/or
1240 * following ranges since the driver will never be making MMIO accesses in
1241 * those ranges.
1242 *
1243 * For example, if the bspec were to list:
1244 *
1245 * ...
1246 * 0x1000 - 0x1fff: GT
1247 * 0x2000 - 0x2cff: GT
1248 * 0x2d00 - 0x2fff: unused/reserved
1249 * 0x3000 - 0xffff: GT
1250 * ...
1251 *
1252 * these could all be represented by a single line in the code:
1253 *
1254 * GEN_FW_RANGE(0x1000, 0xffff, FORCEWAKE_GT)
1255 *
1256 * When adding new forcewake tables here, please also add them to
1257 * intel_uncore_mock_selftests in selftests/intel_uncore.c so that they will be
1258 * scanned for obvious mistakes or typos by the selftests.
1259 */
1260
1261 static const struct intel_forcewake_range __gen6_fw_ranges[] = {
1262 GEN_FW_RANGE(0x0, 0x3ffff, FORCEWAKE_RENDER),
1263 };
1264
1265 static const struct intel_forcewake_range __vlv_fw_ranges[] = {
1266 GEN_FW_RANGE(0x2000, 0x3fff, FORCEWAKE_RENDER),
1267 GEN_FW_RANGE(0x5000, 0x7fff, FORCEWAKE_RENDER),
1268 GEN_FW_RANGE(0xb000, 0x11fff, FORCEWAKE_RENDER),
1269 GEN_FW_RANGE(0x12000, 0x13fff, FORCEWAKE_MEDIA),
1270 GEN_FW_RANGE(0x22000, 0x23fff, FORCEWAKE_MEDIA),
1271 GEN_FW_RANGE(0x2e000, 0x2ffff, FORCEWAKE_RENDER),
1272 GEN_FW_RANGE(0x30000, 0x3ffff, FORCEWAKE_MEDIA),
1273 };
1274
1275 static const struct intel_forcewake_range __chv_fw_ranges[] = {
1276 GEN_FW_RANGE(0x2000, 0x3fff, FORCEWAKE_RENDER),
1277 GEN_FW_RANGE(0x4000, 0x4fff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
1278 GEN_FW_RANGE(0x5200, 0x7fff, FORCEWAKE_RENDER),
1279 GEN_FW_RANGE(0x8000, 0x82ff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
1280 GEN_FW_RANGE(0x8300, 0x84ff, FORCEWAKE_RENDER),
1281 GEN_FW_RANGE(0x8500, 0x85ff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
1282 GEN_FW_RANGE(0x8800, 0x88ff, FORCEWAKE_MEDIA),
1283 GEN_FW_RANGE(0x9000, 0xafff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
1284 GEN_FW_RANGE(0xb000, 0xb47f, FORCEWAKE_RENDER),
1285 GEN_FW_RANGE(0xd000, 0xd7ff, FORCEWAKE_MEDIA),
1286 GEN_FW_RANGE(0xe000, 0xe7ff, FORCEWAKE_RENDER),
1287 GEN_FW_RANGE(0xf000, 0xffff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
1288 GEN_FW_RANGE(0x12000, 0x13fff, FORCEWAKE_MEDIA),
1289 GEN_FW_RANGE(0x1a000, 0x1bfff, FORCEWAKE_MEDIA),
1290 GEN_FW_RANGE(0x1e800, 0x1e9ff, FORCEWAKE_MEDIA),
1291 GEN_FW_RANGE(0x30000, 0x37fff, FORCEWAKE_MEDIA),
1292 };
1293
1294 static const struct intel_forcewake_range __gen9_fw_ranges[] = {
1295 GEN_FW_RANGE(0x0, 0xaff, FORCEWAKE_GT),
1296 GEN_FW_RANGE(0xb00, 0x1fff, 0), /* uncore range */
1297 GEN_FW_RANGE(0x2000, 0x26ff, FORCEWAKE_RENDER),
1298 GEN_FW_RANGE(0x2700, 0x2fff, FORCEWAKE_GT),
1299 GEN_FW_RANGE(0x3000, 0x3fff, FORCEWAKE_RENDER),
1300 GEN_FW_RANGE(0x4000, 0x51ff, FORCEWAKE_GT),
1301 GEN_FW_RANGE(0x5200, 0x7fff, FORCEWAKE_RENDER),
1302 GEN_FW_RANGE(0x8000, 0x812f, FORCEWAKE_GT),
1303 GEN_FW_RANGE(0x8130, 0x813f, FORCEWAKE_MEDIA),
1304 GEN_FW_RANGE(0x8140, 0x815f, FORCEWAKE_RENDER),
1305 GEN_FW_RANGE(0x8160, 0x82ff, FORCEWAKE_GT),
1306 GEN_FW_RANGE(0x8300, 0x84ff, FORCEWAKE_RENDER),
1307 GEN_FW_RANGE(0x8500, 0x87ff, FORCEWAKE_GT),
1308 GEN_FW_RANGE(0x8800, 0x89ff, FORCEWAKE_MEDIA),
1309 GEN_FW_RANGE(0x8a00, 0x8bff, FORCEWAKE_GT),
1310 GEN_FW_RANGE(0x8c00, 0x8cff, FORCEWAKE_RENDER),
1311 GEN_FW_RANGE(0x8d00, 0x93ff, FORCEWAKE_GT),
1312 GEN_FW_RANGE(0x9400, 0x97ff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
1313 GEN_FW_RANGE(0x9800, 0xafff, FORCEWAKE_GT),
1314 GEN_FW_RANGE(0xb000, 0xb47f, FORCEWAKE_RENDER),
1315 GEN_FW_RANGE(0xb480, 0xcfff, FORCEWAKE_GT),
1316 GEN_FW_RANGE(0xd000, 0xd7ff, FORCEWAKE_MEDIA),
1317 GEN_FW_RANGE(0xd800, 0xdfff, FORCEWAKE_GT),
1318 GEN_FW_RANGE(0xe000, 0xe8ff, FORCEWAKE_RENDER),
1319 GEN_FW_RANGE(0xe900, 0x11fff, FORCEWAKE_GT),
1320 GEN_FW_RANGE(0x12000, 0x13fff, FORCEWAKE_MEDIA),
1321 GEN_FW_RANGE(0x14000, 0x19fff, FORCEWAKE_GT),
1322 GEN_FW_RANGE(0x1a000, 0x1e9ff, FORCEWAKE_MEDIA),
1323 GEN_FW_RANGE(0x1ea00, 0x243ff, FORCEWAKE_GT),
1324 GEN_FW_RANGE(0x24400, 0x247ff, FORCEWAKE_RENDER),
1325 GEN_FW_RANGE(0x24800, 0x2ffff, FORCEWAKE_GT),
1326 GEN_FW_RANGE(0x30000, 0x3ffff, FORCEWAKE_MEDIA),
1327 };
1328
1329 static const struct intel_forcewake_range __gen11_fw_ranges[] = {
1330 GEN_FW_RANGE(0x0, 0x1fff, 0), /* uncore range */
1331 GEN_FW_RANGE(0x2000, 0x26ff, FORCEWAKE_RENDER),
1332 GEN_FW_RANGE(0x2700, 0x2fff, FORCEWAKE_GT),
1333 GEN_FW_RANGE(0x3000, 0x3fff, FORCEWAKE_RENDER),
1334 GEN_FW_RANGE(0x4000, 0x51ff, FORCEWAKE_GT),
1335 GEN_FW_RANGE(0x5200, 0x7fff, FORCEWAKE_RENDER),
1336 GEN_FW_RANGE(0x8000, 0x813f, FORCEWAKE_GT),
1337 GEN_FW_RANGE(0x8140, 0x815f, FORCEWAKE_RENDER),
1338 GEN_FW_RANGE(0x8160, 0x82ff, FORCEWAKE_GT),
1339 GEN_FW_RANGE(0x8300, 0x84ff, FORCEWAKE_RENDER),
1340 GEN_FW_RANGE(0x8500, 0x87ff, FORCEWAKE_GT),
1341 GEN_FW_RANGE(0x8800, 0x8bff, 0),
1342 GEN_FW_RANGE(0x8c00, 0x8cff, FORCEWAKE_RENDER),
1343 GEN_FW_RANGE(0x8d00, 0x94cf, FORCEWAKE_GT),
1344 GEN_FW_RANGE(0x94d0, 0x955f, FORCEWAKE_RENDER),
1345 GEN_FW_RANGE(0x9560, 0x95ff, 0),
1346 GEN_FW_RANGE(0x9600, 0xafff, FORCEWAKE_GT),
1347 GEN_FW_RANGE(0xb000, 0xb47f, FORCEWAKE_RENDER),
1348 GEN_FW_RANGE(0xb480, 0xdeff, FORCEWAKE_GT),
1349 GEN_FW_RANGE(0xdf00, 0xe8ff, FORCEWAKE_RENDER),
1350 GEN_FW_RANGE(0xe900, 0x16dff, FORCEWAKE_GT),
1351 GEN_FW_RANGE(0x16e00, 0x19fff, FORCEWAKE_RENDER),
1352 GEN_FW_RANGE(0x1a000, 0x23fff, FORCEWAKE_GT),
1353 GEN_FW_RANGE(0x24000, 0x2407f, 0),
1354 GEN_FW_RANGE(0x24080, 0x2417f, FORCEWAKE_GT),
1355 GEN_FW_RANGE(0x24180, 0x242ff, FORCEWAKE_RENDER),
1356 GEN_FW_RANGE(0x24300, 0x243ff, FORCEWAKE_GT),
1357 GEN_FW_RANGE(0x24400, 0x24fff, FORCEWAKE_RENDER),
1358 GEN_FW_RANGE(0x25000, 0x3ffff, FORCEWAKE_GT),
1359 GEN_FW_RANGE(0x40000, 0x1bffff, 0),
1360 GEN_FW_RANGE(0x1c0000, 0x1c3fff, FORCEWAKE_MEDIA_VDBOX0),
1361 GEN_FW_RANGE(0x1c4000, 0x1c7fff, 0),
1362 GEN_FW_RANGE(0x1c8000, 0x1cffff, FORCEWAKE_MEDIA_VEBOX0),
1363 GEN_FW_RANGE(0x1d0000, 0x1d3fff, FORCEWAKE_MEDIA_VDBOX2),
1364 GEN_FW_RANGE(0x1d4000, 0x1dbfff, 0)
1365 };
1366
1367 static const struct intel_forcewake_range __gen12_fw_ranges[] = {
1368 GEN_FW_RANGE(0x0, 0x1fff, 0), /*
1369 0x0 - 0xaff: reserved
1370 0xb00 - 0x1fff: always on */
1371 GEN_FW_RANGE(0x2000, 0x26ff, FORCEWAKE_RENDER),
1372 GEN_FW_RANGE(0x2700, 0x27ff, FORCEWAKE_GT),
1373 GEN_FW_RANGE(0x2800, 0x2aff, FORCEWAKE_RENDER),
1374 GEN_FW_RANGE(0x2b00, 0x2fff, FORCEWAKE_GT),
1375 GEN_FW_RANGE(0x3000, 0x3fff, FORCEWAKE_RENDER),
1376 GEN_FW_RANGE(0x4000, 0x51ff, FORCEWAKE_GT), /*
1377 0x4000 - 0x48ff: gt
1378 0x4900 - 0x51ff: reserved */
1379 GEN_FW_RANGE(0x5200, 0x7fff, FORCEWAKE_RENDER), /*
1380 0x5200 - 0x53ff: render
1381 0x5400 - 0x54ff: reserved
1382 0x5500 - 0x7fff: render */
1383 GEN_FW_RANGE(0x8000, 0x813f, FORCEWAKE_GT),
1384 GEN_FW_RANGE(0x8140, 0x815f, FORCEWAKE_RENDER),
1385 GEN_FW_RANGE(0x8160, 0x81ff, 0), /*
1386 0x8160 - 0x817f: reserved
1387 0x8180 - 0x81ff: always on */
1388 GEN_FW_RANGE(0x8200, 0x82ff, FORCEWAKE_GT),
1389 GEN_FW_RANGE(0x8300, 0x84ff, FORCEWAKE_RENDER),
1390 GEN_FW_RANGE(0x8500, 0x94cf, FORCEWAKE_GT), /*
1391 0x8500 - 0x87ff: gt
1392 0x8800 - 0x8fff: reserved
1393 0x9000 - 0x947f: gt
1394 0x9480 - 0x94cf: reserved */
1395 GEN_FW_RANGE(0x94d0, 0x955f, FORCEWAKE_RENDER),
1396 GEN_FW_RANGE(0x9560, 0x97ff, 0), /*
1397 0x9560 - 0x95ff: always on
1398 0x9600 - 0x97ff: reserved */
1399 GEN_FW_RANGE(0x9800, 0xafff, FORCEWAKE_GT),
1400 GEN_FW_RANGE(0xb000, 0xb3ff, FORCEWAKE_RENDER),
1401 GEN_FW_RANGE(0xb400, 0xcfff, FORCEWAKE_GT), /*
1402 0xb400 - 0xbf7f: gt
1403 0xb480 - 0xbfff: reserved
1404 0xc000 - 0xcfff: gt */
1405 GEN_FW_RANGE(0xd000, 0xd7ff, 0),
1406 GEN_FW_RANGE(0xd800, 0xd8ff, FORCEWAKE_RENDER),
1407 GEN_FW_RANGE(0xd900, 0xdbff, FORCEWAKE_GT),
1408 GEN_FW_RANGE(0xdc00, 0xefff, FORCEWAKE_RENDER), /*
1409 0xdc00 - 0xddff: render
1410 0xde00 - 0xde7f: reserved
1411 0xde80 - 0xe8ff: render
1412 0xe900 - 0xefff: reserved */
1413 GEN_FW_RANGE(0xf000, 0x147ff, FORCEWAKE_GT), /*
1414 0xf000 - 0xffff: gt
1415 0x10000 - 0x147ff: reserved */
1416 GEN_FW_RANGE(0x14800, 0x1ffff, FORCEWAKE_RENDER), /*
1417 0x14800 - 0x14fff: render
1418 0x15000 - 0x16dff: reserved
1419 0x16e00 - 0x1bfff: render
1420 0x1c000 - 0x1ffff: reserved */
1421 GEN_FW_RANGE(0x20000, 0x20fff, FORCEWAKE_MEDIA_VDBOX0),
1422 GEN_FW_RANGE(0x21000, 0x21fff, FORCEWAKE_MEDIA_VDBOX2),
1423 GEN_FW_RANGE(0x22000, 0x23fff, FORCEWAKE_GT),
1424 GEN_FW_RANGE(0x24000, 0x2417f, 0), /*
1425 0x24000 - 0x2407f: always on
1426 0x24080 - 0x2417f: reserved */
1427 GEN_FW_RANGE(0x24180, 0x249ff, FORCEWAKE_GT), /*
1428 0x24180 - 0x241ff: gt
1429 0x24200 - 0x249ff: reserved */
1430 GEN_FW_RANGE(0x24a00, 0x251ff, FORCEWAKE_RENDER), /*
1431 0x24a00 - 0x24a7f: render
1432 0x24a80 - 0x251ff: reserved */
1433 GEN_FW_RANGE(0x25200, 0x255ff, FORCEWAKE_GT), /*
1434 0x25200 - 0x252ff: gt
1435 0x25300 - 0x255ff: reserved */
1436 GEN_FW_RANGE(0x25600, 0x2567f, FORCEWAKE_MEDIA_VDBOX0),
1437 GEN_FW_RANGE(0x25680, 0x259ff, FORCEWAKE_MEDIA_VDBOX2), /*
1438 0x25680 - 0x256ff: VD2
1439 0x25700 - 0x259ff: reserved */
1440 GEN_FW_RANGE(0x25a00, 0x25a7f, FORCEWAKE_MEDIA_VDBOX0),
1441 GEN_FW_RANGE(0x25a80, 0x2ffff, FORCEWAKE_MEDIA_VDBOX2), /*
1442 0x25a80 - 0x25aff: VD2
1443 0x25b00 - 0x2ffff: reserved */
1444 GEN_FW_RANGE(0x30000, 0x3ffff, FORCEWAKE_GT),
1445 GEN_FW_RANGE(0x40000, 0x1bffff, 0),
1446 GEN_FW_RANGE(0x1c0000, 0x1c3fff, FORCEWAKE_MEDIA_VDBOX0), /*
1447 0x1c0000 - 0x1c2bff: VD0
1448 0x1c2c00 - 0x1c2cff: reserved
1449 0x1c2d00 - 0x1c2dff: VD0
1450 0x1c2e00 - 0x1c3eff: reserved
1451 0x1c3f00 - 0x1c3fff: VD0 */
1452 GEN_FW_RANGE(0x1c4000, 0x1c7fff, 0),
1453 GEN_FW_RANGE(0x1c8000, 0x1cbfff, FORCEWAKE_MEDIA_VEBOX0), /*
1454 0x1c8000 - 0x1ca0ff: VE0
1455 0x1ca100 - 0x1cbeff: reserved
1456 0x1cbf00 - 0x1cbfff: VE0 */
1457 GEN_FW_RANGE(0x1cc000, 0x1cffff, FORCEWAKE_MEDIA_VDBOX0), /*
1458 0x1cc000 - 0x1ccfff: VD0
1459 0x1cd000 - 0x1cffff: reserved */
1460 GEN_FW_RANGE(0x1d0000, 0x1d3fff, FORCEWAKE_MEDIA_VDBOX2), /*
1461 0x1d0000 - 0x1d2bff: VD2
1462 0x1d2c00 - 0x1d2cff: reserved
1463 0x1d2d00 - 0x1d2dff: VD2
1464 0x1d2e00 - 0x1d3eff: reserved
1465 0x1d3f00 - 0x1d3fff: VD2 */
1466 };
1467
1468 /*
1469 * Graphics IP version 12.55 brings a slight change to the 0xd800 range,
1470 * switching it from the GT domain to the render domain.
1471 */
1472 #define XEHP_FWRANGES(FW_RANGE_D800) \
1473 GEN_FW_RANGE(0x0, 0x1fff, 0), /* \
1474 0x0 - 0xaff: reserved \
1475 0xb00 - 0x1fff: always on */ \
1476 GEN_FW_RANGE(0x2000, 0x26ff, FORCEWAKE_RENDER), \
1477 GEN_FW_RANGE(0x2700, 0x4aff, FORCEWAKE_GT), \
1478 GEN_FW_RANGE(0x4b00, 0x51ff, 0), /* \
1479 0x4b00 - 0x4fff: reserved \
1480 0x5000 - 0x51ff: always on */ \
1481 GEN_FW_RANGE(0x5200, 0x7fff, FORCEWAKE_RENDER), \
1482 GEN_FW_RANGE(0x8000, 0x813f, FORCEWAKE_GT), \
1483 GEN_FW_RANGE(0x8140, 0x815f, FORCEWAKE_RENDER), \
1484 GEN_FW_RANGE(0x8160, 0x81ff, 0), /* \
1485 0x8160 - 0x817f: reserved \
1486 0x8180 - 0x81ff: always on */ \
1487 GEN_FW_RANGE(0x8200, 0x82ff, FORCEWAKE_GT), \
1488 GEN_FW_RANGE(0x8300, 0x84ff, FORCEWAKE_RENDER), \
1489 GEN_FW_RANGE(0x8500, 0x8cff, FORCEWAKE_GT), /* \
1490 0x8500 - 0x87ff: gt \
1491 0x8800 - 0x8c7f: reserved \
1492 0x8c80 - 0x8cff: gt (DG2 only) */ \
1493 GEN_FW_RANGE(0x8d00, 0x8fff, FORCEWAKE_RENDER), /* \
1494 0x8d00 - 0x8dff: render (DG2 only) \
1495 0x8e00 - 0x8fff: reserved */ \
1496 GEN_FW_RANGE(0x9000, 0x94cf, FORCEWAKE_GT), /* \
1497 0x9000 - 0x947f: gt \
1498 0x9480 - 0x94cf: reserved */ \
1499 GEN_FW_RANGE(0x94d0, 0x955f, FORCEWAKE_RENDER), \
1500 GEN_FW_RANGE(0x9560, 0x967f, 0), /* \
1501 0x9560 - 0x95ff: always on \
1502 0x9600 - 0x967f: reserved */ \
1503 GEN_FW_RANGE(0x9680, 0x97ff, FORCEWAKE_RENDER), /* \
1504 0x9680 - 0x96ff: render (DG2 only) \
1505 0x9700 - 0x97ff: reserved */ \
1506 GEN_FW_RANGE(0x9800, 0xcfff, FORCEWAKE_GT), /* \
1507 0x9800 - 0xb4ff: gt \
1508 0xb500 - 0xbfff: reserved \
1509 0xc000 - 0xcfff: gt */ \
1510 GEN_FW_RANGE(0xd000, 0xd7ff, 0), \
1511 GEN_FW_RANGE(0xd800, 0xd87f, FW_RANGE_D800), \
1512 GEN_FW_RANGE(0xd880, 0xdbff, FORCEWAKE_GT), \
1513 GEN_FW_RANGE(0xdc00, 0xdcff, FORCEWAKE_RENDER), \
1514 GEN_FW_RANGE(0xdd00, 0xde7f, FORCEWAKE_GT), /* \
1515 0xdd00 - 0xddff: gt \
1516 0xde00 - 0xde7f: reserved */ \
1517 GEN_FW_RANGE(0xde80, 0xe8ff, FORCEWAKE_RENDER), /* \
1518 0xde80 - 0xdfff: render \
1519 0xe000 - 0xe0ff: reserved \
1520 0xe100 - 0xe8ff: render */ \
1521 GEN_FW_RANGE(0xe900, 0xffff, FORCEWAKE_GT), /* \
1522 0xe900 - 0xe9ff: gt \
1523 0xea00 - 0xefff: reserved \
1524 0xf000 - 0xffff: gt */ \
1525 GEN_FW_RANGE(0x10000, 0x12fff, 0), /* \
1526 0x10000 - 0x11fff: reserved \
1527 0x12000 - 0x127ff: always on \
1528 0x12800 - 0x12fff: reserved */ \
1529 GEN_FW_RANGE(0x13000, 0x131ff, FORCEWAKE_MEDIA_VDBOX0), /* DG2 only */ \
1530 GEN_FW_RANGE(0x13200, 0x13fff, FORCEWAKE_MEDIA_VDBOX2), /* \
1531 0x13200 - 0x133ff: VD2 (DG2 only) \
1532 0x13400 - 0x13fff: reserved */ \
1533 GEN_FW_RANGE(0x14000, 0x141ff, FORCEWAKE_MEDIA_VDBOX0), /* XEHPSDV only */ \
1534 GEN_FW_RANGE(0x14200, 0x143ff, FORCEWAKE_MEDIA_VDBOX2), /* XEHPSDV only */ \
1535 GEN_FW_RANGE(0x14400, 0x145ff, FORCEWAKE_MEDIA_VDBOX4), /* XEHPSDV only */ \
1536 GEN_FW_RANGE(0x14600, 0x147ff, FORCEWAKE_MEDIA_VDBOX6), /* XEHPSDV only */ \
1537 GEN_FW_RANGE(0x14800, 0x14fff, FORCEWAKE_RENDER), \
1538 GEN_FW_RANGE(0x15000, 0x16dff, FORCEWAKE_GT), /* \
1539 0x15000 - 0x15fff: gt (DG2 only) \
1540 0x16000 - 0x16dff: reserved */ \
1541 GEN_FW_RANGE(0x16e00, 0x1ffff, FORCEWAKE_RENDER), \
1542 GEN_FW_RANGE(0x20000, 0x21fff, FORCEWAKE_MEDIA_VDBOX0), /* \
1543 0x20000 - 0x20fff: VD0 (XEHPSDV only) \
1544 0x21000 - 0x21fff: reserved */ \
1545 GEN_FW_RANGE(0x22000, 0x23fff, FORCEWAKE_GT), \
1546 GEN_FW_RANGE(0x24000, 0x2417f, 0), /* \
1547 0x24000 - 0x2407f: always on \
1548 0x24080 - 0x2417f: reserved */ \
1549 GEN_FW_RANGE(0x24180, 0x249ff, FORCEWAKE_GT), /* \
1550 0x24180 - 0x241ff: gt \
1551 0x24200 - 0x249ff: reserved */ \
1552 GEN_FW_RANGE(0x24a00, 0x251ff, FORCEWAKE_RENDER), /* \
1553 0x24a00 - 0x24a7f: render \
1554 0x24a80 - 0x251ff: reserved */ \
1555 GEN_FW_RANGE(0x25200, 0x25fff, FORCEWAKE_GT), /* \
1556 0x25200 - 0x252ff: gt \
1557 0x25300 - 0x25fff: reserved */ \
1558 GEN_FW_RANGE(0x26000, 0x2ffff, FORCEWAKE_RENDER), /* \
1559 0x26000 - 0x27fff: render \
1560 0x28000 - 0x29fff: reserved \
1561 0x2a000 - 0x2ffff: undocumented */ \
1562 GEN_FW_RANGE(0x30000, 0x3ffff, FORCEWAKE_GT), \
1563 GEN_FW_RANGE(0x40000, 0x1bffff, 0), \
1564 GEN_FW_RANGE(0x1c0000, 0x1c3fff, FORCEWAKE_MEDIA_VDBOX0), /* \
1565 0x1c0000 - 0x1c2bff: VD0 \
1566 0x1c2c00 - 0x1c2cff: reserved \
1567 0x1c2d00 - 0x1c2dff: VD0 \
1568 0x1c2e00 - 0x1c3eff: VD0 (DG2 only) \
1569 0x1c3f00 - 0x1c3fff: VD0 */ \
1570 GEN_FW_RANGE(0x1c4000, 0x1c7fff, FORCEWAKE_MEDIA_VDBOX1), /* \
1571 0x1c4000 - 0x1c6bff: VD1 \
1572 0x1c6c00 - 0x1c6cff: reserved \
1573 0x1c6d00 - 0x1c6dff: VD1 \
1574 0x1c6e00 - 0x1c7fff: reserved */ \
1575 GEN_FW_RANGE(0x1c8000, 0x1cbfff, FORCEWAKE_MEDIA_VEBOX0), /* \
1576 0x1c8000 - 0x1ca0ff: VE0 \
1577 0x1ca100 - 0x1cbfff: reserved */ \
1578 GEN_FW_RANGE(0x1cc000, 0x1ccfff, FORCEWAKE_MEDIA_VDBOX0), \
1579 GEN_FW_RANGE(0x1cd000, 0x1cdfff, FORCEWAKE_MEDIA_VDBOX2), \
1580 GEN_FW_RANGE(0x1ce000, 0x1cefff, FORCEWAKE_MEDIA_VDBOX4), \
1581 GEN_FW_RANGE(0x1cf000, 0x1cffff, FORCEWAKE_MEDIA_VDBOX6), \
1582 GEN_FW_RANGE(0x1d0000, 0x1d3fff, FORCEWAKE_MEDIA_VDBOX2), /* \
1583 0x1d0000 - 0x1d2bff: VD2 \
1584 0x1d2c00 - 0x1d2cff: reserved \
1585 0x1d2d00 - 0x1d2dff: VD2 \
1586 0x1d2e00 - 0x1d3dff: VD2 (DG2 only) \
1587 0x1d3e00 - 0x1d3eff: reserved \
1588 0x1d3f00 - 0x1d3fff: VD2 */ \
1589 GEN_FW_RANGE(0x1d4000, 0x1d7fff, FORCEWAKE_MEDIA_VDBOX3), /* \
1590 0x1d4000 - 0x1d6bff: VD3 \
1591 0x1d6c00 - 0x1d6cff: reserved \
1592 0x1d6d00 - 0x1d6dff: VD3 \
1593 0x1d6e00 - 0x1d7fff: reserved */ \
1594 GEN_FW_RANGE(0x1d8000, 0x1dffff, FORCEWAKE_MEDIA_VEBOX1), /* \
1595 0x1d8000 - 0x1da0ff: VE1 \
1596 0x1da100 - 0x1dffff: reserved */ \
1597 GEN_FW_RANGE(0x1e0000, 0x1e3fff, FORCEWAKE_MEDIA_VDBOX4), /* \
1598 0x1e0000 - 0x1e2bff: VD4 \
1599 0x1e2c00 - 0x1e2cff: reserved \
1600 0x1e2d00 - 0x1e2dff: VD4 \
1601 0x1e2e00 - 0x1e3eff: reserved \
1602 0x1e3f00 - 0x1e3fff: VD4 */ \
1603 GEN_FW_RANGE(0x1e4000, 0x1e7fff, FORCEWAKE_MEDIA_VDBOX5), /* \
1604 0x1e4000 - 0x1e6bff: VD5 \
1605 0x1e6c00 - 0x1e6cff: reserved \
1606 0x1e6d00 - 0x1e6dff: VD5 \
1607 0x1e6e00 - 0x1e7fff: reserved */ \
1608 GEN_FW_RANGE(0x1e8000, 0x1effff, FORCEWAKE_MEDIA_VEBOX2), /* \
1609 0x1e8000 - 0x1ea0ff: VE2 \
1610 0x1ea100 - 0x1effff: reserved */ \
1611 GEN_FW_RANGE(0x1f0000, 0x1f3fff, FORCEWAKE_MEDIA_VDBOX6), /* \
1612 0x1f0000 - 0x1f2bff: VD6 \
1613 0x1f2c00 - 0x1f2cff: reserved \
1614 0x1f2d00 - 0x1f2dff: VD6 \
1615 0x1f2e00 - 0x1f3eff: reserved \
1616 0x1f3f00 - 0x1f3fff: VD6 */ \
1617 GEN_FW_RANGE(0x1f4000, 0x1f7fff, FORCEWAKE_MEDIA_VDBOX7), /* \
1618 0x1f4000 - 0x1f6bff: VD7 \
1619 0x1f6c00 - 0x1f6cff: reserved \
1620 0x1f6d00 - 0x1f6dff: VD7 \
1621 0x1f6e00 - 0x1f7fff: reserved */ \
1622 GEN_FW_RANGE(0x1f8000, 0x1fa0ff, FORCEWAKE_MEDIA_VEBOX3),
1623
1624 static const struct intel_forcewake_range __xehp_fw_ranges[] = {
1625 XEHP_FWRANGES(FORCEWAKE_GT)
1626 };
1627
1628 static const struct intel_forcewake_range __dg2_fw_ranges[] = {
1629 XEHP_FWRANGES(FORCEWAKE_RENDER)
1630 };
1631
1632 static const struct intel_forcewake_range __pvc_fw_ranges[] = {
1633 GEN_FW_RANGE(0x0, 0xaff, 0),
1634 GEN_FW_RANGE(0xb00, 0xbff, FORCEWAKE_GT),
1635 GEN_FW_RANGE(0xc00, 0xfff, 0),
1636 GEN_FW_RANGE(0x1000, 0x1fff, FORCEWAKE_GT),
1637 GEN_FW_RANGE(0x2000, 0x26ff, FORCEWAKE_RENDER),
1638 GEN_FW_RANGE(0x2700, 0x2fff, FORCEWAKE_GT),
1639 GEN_FW_RANGE(0x3000, 0x3fff, FORCEWAKE_RENDER),
1640 GEN_FW_RANGE(0x4000, 0x813f, FORCEWAKE_GT), /*
1641 0x4000 - 0x4aff: gt
1642 0x4b00 - 0x4fff: reserved
1643 0x5000 - 0x51ff: gt
1644 0x5200 - 0x52ff: reserved
1645 0x5300 - 0x53ff: gt
1646 0x5400 - 0x7fff: reserved
1647 0x8000 - 0x813f: gt */
1648 GEN_FW_RANGE(0x8140, 0x817f, FORCEWAKE_RENDER),
1649 GEN_FW_RANGE(0x8180, 0x81ff, 0),
1650 GEN_FW_RANGE(0x8200, 0x94cf, FORCEWAKE_GT), /*
1651 0x8200 - 0x82ff: gt
1652 0x8300 - 0x84ff: reserved
1653 0x8500 - 0x887f: gt
1654 0x8880 - 0x8a7f: reserved
1655 0x8a80 - 0x8aff: gt
1656 0x8b00 - 0x8fff: reserved
1657 0x9000 - 0x947f: gt
1658 0x9480 - 0x94cf: reserved */
1659 GEN_FW_RANGE(0x94d0, 0x955f, FORCEWAKE_RENDER),
1660 GEN_FW_RANGE(0x9560, 0x967f, 0), /*
1661 0x9560 - 0x95ff: always on
1662 0x9600 - 0x967f: reserved */
1663 GEN_FW_RANGE(0x9680, 0x97ff, FORCEWAKE_RENDER), /*
1664 0x9680 - 0x96ff: render
1665 0x9700 - 0x97ff: reserved */
1666 GEN_FW_RANGE(0x9800, 0xcfff, FORCEWAKE_GT), /*
1667 0x9800 - 0xb4ff: gt
1668 0xb500 - 0xbfff: reserved
1669 0xc000 - 0xcfff: gt */
1670 GEN_FW_RANGE(0xd000, 0xd3ff, 0),
1671 GEN_FW_RANGE(0xd400, 0xdbff, FORCEWAKE_GT),
1672 GEN_FW_RANGE(0xdc00, 0xdcff, FORCEWAKE_RENDER),
1673 GEN_FW_RANGE(0xdd00, 0xde7f, FORCEWAKE_GT), /*
1674 0xdd00 - 0xddff: gt
1675 0xde00 - 0xde7f: reserved */
1676 GEN_FW_RANGE(0xde80, 0xe8ff, FORCEWAKE_RENDER), /*
1677 0xde80 - 0xdeff: render
1678 0xdf00 - 0xe1ff: reserved
1679 0xe200 - 0xe7ff: render
1680 0xe800 - 0xe8ff: reserved */
1681 GEN_FW_RANGE(0xe900, 0x11fff, FORCEWAKE_GT), /*
1682 0xe900 - 0xe9ff: gt
1683 0xea00 - 0xebff: reserved
1684 0xec00 - 0xffff: gt
1685 0x10000 - 0x11fff: reserved */
1686 GEN_FW_RANGE(0x12000, 0x12fff, 0), /*
1687 0x12000 - 0x127ff: always on
1688 0x12800 - 0x12fff: reserved */
1689 GEN_FW_RANGE(0x13000, 0x19fff, FORCEWAKE_GT), /*
1690 0x13000 - 0x135ff: gt
1691 0x13600 - 0x147ff: reserved
1692 0x14800 - 0x153ff: gt
1693 0x15400 - 0x19fff: reserved */
1694 GEN_FW_RANGE(0x1a000, 0x21fff, FORCEWAKE_RENDER), /*
1695 0x1a000 - 0x1ffff: render
1696 0x20000 - 0x21fff: reserved */
1697 GEN_FW_RANGE(0x22000, 0x23fff, FORCEWAKE_GT),
1698 GEN_FW_RANGE(0x24000, 0x2417f, 0), /*
1699 24000 - 0x2407f: always on
1700 24080 - 0x2417f: reserved */
1701 GEN_FW_RANGE(0x24180, 0x25fff, FORCEWAKE_GT), /*
1702 0x24180 - 0x241ff: gt
1703 0x24200 - 0x251ff: reserved
1704 0x25200 - 0x252ff: gt
1705 0x25300 - 0x25fff: reserved */
1706 GEN_FW_RANGE(0x26000, 0x2ffff, FORCEWAKE_RENDER), /*
1707 0x26000 - 0x27fff: render
1708 0x28000 - 0x2ffff: reserved */
1709 GEN_FW_RANGE(0x30000, 0x3ffff, FORCEWAKE_GT),
1710 GEN_FW_RANGE(0x40000, 0x1bffff, 0),
1711 GEN_FW_RANGE(0x1c0000, 0x1c3fff, FORCEWAKE_MEDIA_VDBOX0), /*
1712 0x1c0000 - 0x1c2bff: VD0
1713 0x1c2c00 - 0x1c2cff: reserved
1714 0x1c2d00 - 0x1c2dff: VD0
1715 0x1c2e00 - 0x1c3eff: reserved
1716 0x1c3f00 - 0x1c3fff: VD0 */
1717 GEN_FW_RANGE(0x1c4000, 0x1cffff, FORCEWAKE_MEDIA_VDBOX1), /*
1718 0x1c4000 - 0x1c6aff: VD1
1719 0x1c6b00 - 0x1c7eff: reserved
1720 0x1c7f00 - 0x1c7fff: VD1
1721 0x1c8000 - 0x1cffff: reserved */
1722 GEN_FW_RANGE(0x1d0000, 0x23ffff, FORCEWAKE_MEDIA_VDBOX2), /*
1723 0x1d0000 - 0x1d2aff: VD2
1724 0x1d2b00 - 0x1d3eff: reserved
1725 0x1d3f00 - 0x1d3fff: VD2
1726 0x1d4000 - 0x23ffff: reserved */
1727 GEN_FW_RANGE(0x240000, 0x3dffff, 0),
1728 GEN_FW_RANGE(0x3e0000, 0x3effff, FORCEWAKE_GT),
1729 };
1730
1731 static const struct intel_forcewake_range __mtl_fw_ranges[] = {
1732 GEN_FW_RANGE(0x0, 0xaff, 0),
1733 GEN_FW_RANGE(0xb00, 0xbff, FORCEWAKE_GT),
1734 GEN_FW_RANGE(0xc00, 0xfff, 0),
1735 GEN_FW_RANGE(0x1000, 0x1fff, FORCEWAKE_GT),
1736 GEN_FW_RANGE(0x2000, 0x26ff, FORCEWAKE_RENDER),
1737 GEN_FW_RANGE(0x2700, 0x2fff, FORCEWAKE_GT),
1738 GEN_FW_RANGE(0x3000, 0x3fff, FORCEWAKE_RENDER),
1739 GEN_FW_RANGE(0x4000, 0x51ff, FORCEWAKE_GT), /*
1740 0x4000 - 0x48ff: render
1741 0x4900 - 0x51ff: reserved */
1742 GEN_FW_RANGE(0x5200, 0x7fff, FORCEWAKE_RENDER), /*
1743 0x5200 - 0x53ff: render
1744 0x5400 - 0x54ff: reserved
1745 0x5500 - 0x7fff: render */
1746 GEN_FW_RANGE(0x8000, 0x813f, FORCEWAKE_GT),
1747 GEN_FW_RANGE(0x8140, 0x817f, FORCEWAKE_RENDER), /*
1748 0x8140 - 0x815f: render
1749 0x8160 - 0x817f: reserved */
1750 GEN_FW_RANGE(0x8180, 0x81ff, 0),
1751 GEN_FW_RANGE(0x8200, 0x94cf, FORCEWAKE_GT), /*
1752 0x8200 - 0x87ff: gt
1753 0x8800 - 0x8dff: reserved
1754 0x8e00 - 0x8f7f: gt
1755 0x8f80 - 0x8fff: reserved
1756 0x9000 - 0x947f: gt
1757 0x9480 - 0x94cf: reserved */
1758 GEN_FW_RANGE(0x94d0, 0x955f, FORCEWAKE_RENDER),
1759 GEN_FW_RANGE(0x9560, 0x967f, 0), /*
1760 0x9560 - 0x95ff: always on
1761 0x9600 - 0x967f: reserved */
1762 GEN_FW_RANGE(0x9680, 0x97ff, FORCEWAKE_RENDER), /*
1763 0x9680 - 0x96ff: render
1764 0x9700 - 0x97ff: reserved */
1765 GEN_FW_RANGE(0x9800, 0xcfff, FORCEWAKE_GT), /*
1766 0x9800 - 0xb4ff: gt
1767 0xb500 - 0xbfff: reserved
1768 0xc000 - 0xcfff: gt */
1769 GEN_FW_RANGE(0xd000, 0xd7ff, 0), /*
1770 0xd000 - 0xd3ff: always on
1771 0xd400 - 0xd7ff: reserved */
1772 GEN_FW_RANGE(0xd800, 0xd87f, FORCEWAKE_RENDER),
1773 GEN_FW_RANGE(0xd880, 0xdbff, FORCEWAKE_GT),
1774 GEN_FW_RANGE(0xdc00, 0xdcff, FORCEWAKE_RENDER),
1775 GEN_FW_RANGE(0xdd00, 0xde7f, FORCEWAKE_GT), /*
1776 0xdd00 - 0xddff: gt
1777 0xde00 - 0xde7f: reserved */
1778 GEN_FW_RANGE(0xde80, 0xe8ff, FORCEWAKE_RENDER), /*
1779 0xde80 - 0xdfff: render
1780 0xe000 - 0xe0ff: reserved
1781 0xe100 - 0xe8ff: render */
1782 GEN_FW_RANGE(0xe900, 0xe9ff, FORCEWAKE_GT),
1783 GEN_FW_RANGE(0xea00, 0x147ff, 0), /*
1784 0xea00 - 0x11fff: reserved
1785 0x12000 - 0x127ff: always on
1786 0x12800 - 0x147ff: reserved */
1787 GEN_FW_RANGE(0x14800, 0x19fff, FORCEWAKE_GT), /*
1788 0x14800 - 0x153ff: gt
1789 0x15400 - 0x19fff: reserved */
1790 GEN_FW_RANGE(0x1a000, 0x21fff, FORCEWAKE_RENDER), /*
1791 0x1a000 - 0x1bfff: render
1792 0x1c000 - 0x21fff: reserved */
1793 GEN_FW_RANGE(0x22000, 0x23fff, FORCEWAKE_GT),
1794 GEN_FW_RANGE(0x24000, 0x2ffff, 0), /*
1795 0x24000 - 0x2407f: always on
1796 0x24080 - 0x2ffff: reserved */
1797 GEN_FW_RANGE(0x30000, 0x3ffff, FORCEWAKE_GT)
1798 };
1799
1800 /*
1801 * Note that the register ranges here are the final offsets after
1802 * translation of the GSI block to the 0x380000 offset.
1803 *
1804 * NOTE: There are a couple MCR ranges near the bottom of this table
1805 * that need to power up either VD0 or VD2 depending on which replicated
1806 * instance of the register we're trying to access. Our forcewake logic
1807 * at the moment doesn't have a good way to take steering into consideration,
1808 * and the driver doesn't even access any registers in those ranges today,
1809 * so for now we just mark those ranges as FORCEWAKE_ALL. That will ensure
1810 * proper operation if we do start using the ranges in the future, and we
1811 * can determine at that time whether it's worth adding extra complexity to
1812 * the forcewake handling to take steering into consideration.
1813 */
1814 static const struct intel_forcewake_range __xelpmp_fw_ranges[] = {
1815 GEN_FW_RANGE(0x0, 0x115fff, 0), /* render GT range */
1816 GEN_FW_RANGE(0x116000, 0x11ffff, FORCEWAKE_GSC), /*
1817 0x116000 - 0x117fff: gsc
1818 0x118000 - 0x119fff: reserved
1819 0x11a000 - 0x11efff: gsc
1820 0x11f000 - 0x11ffff: reserved */
1821 GEN_FW_RANGE(0x120000, 0x1bffff, 0), /* non-GT range */
1822 GEN_FW_RANGE(0x1c0000, 0x1c7fff, FORCEWAKE_MEDIA_VDBOX0), /*
1823 0x1c0000 - 0x1c3dff: VD0
1824 0x1c3e00 - 0x1c3eff: reserved
1825 0x1c3f00 - 0x1c3fff: VD0
1826 0x1c4000 - 0x1c7fff: reserved */
1827 GEN_FW_RANGE(0x1c8000, 0x1cbfff, FORCEWAKE_MEDIA_VEBOX0), /*
1828 0x1c8000 - 0x1ca0ff: VE0
1829 0x1ca100 - 0x1cbfff: reserved */
1830 GEN_FW_RANGE(0x1cc000, 0x1cffff, FORCEWAKE_MEDIA_VDBOX0), /*
1831 0x1cc000 - 0x1cdfff: VD0
1832 0x1ce000 - 0x1cffff: reserved */
1833 GEN_FW_RANGE(0x1d0000, 0x1d7fff, FORCEWAKE_MEDIA_VDBOX2), /*
1834 0x1d0000 - 0x1d3dff: VD2
1835 0x1d3e00 - 0x1d3eff: reserved
1836 0x1d4000 - 0x1d7fff: VD2 */
1837 GEN_FW_RANGE(0x1d8000, 0x1da0ff, FORCEWAKE_MEDIA_VEBOX1),
1838 GEN_FW_RANGE(0x1da100, 0x380aff, 0), /*
1839 0x1da100 - 0x23ffff: reserved
1840 0x240000 - 0x37ffff: non-GT range
1841 0x380000 - 0x380aff: reserved */
1842 GEN_FW_RANGE(0x380b00, 0x380bff, FORCEWAKE_GT),
1843 GEN_FW_RANGE(0x380c00, 0x380fff, 0),
1844 GEN_FW_RANGE(0x381000, 0x38817f, FORCEWAKE_GT), /*
1845 0x381000 - 0x381fff: gt
1846 0x382000 - 0x383fff: reserved
1847 0x384000 - 0x384aff: gt
1848 0x384b00 - 0x3851ff: reserved
1849 0x385200 - 0x3871ff: gt
1850 0x387200 - 0x387fff: reserved
1851 0x388000 - 0x38813f: gt
1852 0x388140 - 0x38817f: reserved */
1853 GEN_FW_RANGE(0x388180, 0x3882ff, 0), /*
1854 0x388180 - 0x3881ff: always on
1855 0x388200 - 0x3882ff: reserved */
1856 GEN_FW_RANGE(0x388300, 0x38955f, FORCEWAKE_GT), /*
1857 0x388300 - 0x38887f: gt
1858 0x388880 - 0x388fff: reserved
1859 0x389000 - 0x38947f: gt
1860 0x389480 - 0x38955f: reserved */
1861 GEN_FW_RANGE(0x389560, 0x389fff, 0), /*
1862 0x389560 - 0x3895ff: always on
1863 0x389600 - 0x389fff: reserved */
1864 GEN_FW_RANGE(0x38a000, 0x38cfff, FORCEWAKE_GT), /*
1865 0x38a000 - 0x38afff: gt
1866 0x38b000 - 0x38bfff: reserved
1867 0x38c000 - 0x38cfff: gt */
1868 GEN_FW_RANGE(0x38d000, 0x38d11f, 0),
1869 GEN_FW_RANGE(0x38d120, 0x391fff, FORCEWAKE_GT), /*
1870 0x38d120 - 0x38dfff: gt
1871 0x38e000 - 0x38efff: reserved
1872 0x38f000 - 0x38ffff: gt
1873 0x389000 - 0x391fff: reserved */
1874 GEN_FW_RANGE(0x392000, 0x392fff, 0), /*
1875 0x392000 - 0x3927ff: always on
1876 0x392800 - 0x292fff: reserved */
1877 GEN_FW_RANGE(0x393000, 0x3931ff, FORCEWAKE_GT),
1878 GEN_FW_RANGE(0x393200, 0x39323f, FORCEWAKE_ALL), /* instance-based, see note above */
1879 GEN_FW_RANGE(0x393240, 0x3933ff, FORCEWAKE_GT),
1880 GEN_FW_RANGE(0x393400, 0x3934ff, FORCEWAKE_ALL), /* instance-based, see note above */
1881 GEN_FW_RANGE(0x393500, 0x393c7f, 0), /*
1882 0x393500 - 0x393bff: reserved
1883 0x393c00 - 0x393c7f: always on */
1884 GEN_FW_RANGE(0x393c80, 0x393dff, FORCEWAKE_GT),
1885 };
1886
1887 static void
ilk_dummy_write(struct intel_uncore * uncore)1888 ilk_dummy_write(struct intel_uncore *uncore)
1889 {
1890 /* WaIssueDummyWriteToWakeupFromRC6:ilk Issue a dummy write to wake up
1891 * the chip from rc6 before touching it for real. MI_MODE is masked,
1892 * hence harmless to write 0 into. */
1893 __raw_uncore_write32(uncore, RING_MI_MODE(RENDER_RING_BASE), 0);
1894 }
1895
1896 static void
__unclaimed_reg_debug(struct intel_uncore * uncore,const i915_reg_t reg,const bool read)1897 __unclaimed_reg_debug(struct intel_uncore *uncore,
1898 const i915_reg_t reg,
1899 const bool read)
1900 {
1901 if (drm_WARN(&uncore->i915->drm,
1902 check_for_unclaimed_mmio(uncore),
1903 "Unclaimed %s register 0x%x\n",
1904 read ? "read from" : "write to",
1905 i915_mmio_reg_offset(reg)))
1906 /* Only report the first N failures */
1907 uncore->i915->params.mmio_debug--;
1908 }
1909
1910 static void
__unclaimed_previous_reg_debug(struct intel_uncore * uncore,const i915_reg_t reg,const bool read)1911 __unclaimed_previous_reg_debug(struct intel_uncore *uncore,
1912 const i915_reg_t reg,
1913 const bool read)
1914 {
1915 if (check_for_unclaimed_mmio(uncore))
1916 drm_dbg(&uncore->i915->drm,
1917 "Unclaimed access detected before %s register 0x%x\n",
1918 read ? "read from" : "write to",
1919 i915_mmio_reg_offset(reg));
1920 }
1921
1922 static inline void
unclaimed_reg_debug(struct intel_uncore * uncore,const i915_reg_t reg,const bool read,const bool before)1923 unclaimed_reg_debug(struct intel_uncore *uncore,
1924 const i915_reg_t reg,
1925 const bool read,
1926 const bool before)
1927 {
1928 if (likely(!uncore->i915->params.mmio_debug) || !uncore->debug)
1929 return;
1930
1931 /* interrupts are disabled and re-enabled around uncore->lock usage */
1932 lockdep_assert_held(&uncore->lock);
1933
1934 if (before) {
1935 spin_lock(&uncore->debug->lock);
1936 __unclaimed_previous_reg_debug(uncore, reg, read);
1937 } else {
1938 __unclaimed_reg_debug(uncore, reg, read);
1939 spin_unlock(&uncore->debug->lock);
1940 }
1941 }
1942
1943 #define __vgpu_read(x) \
1944 static u##x \
1945 vgpu_read##x(struct intel_uncore *uncore, i915_reg_t reg, bool trace) { \
1946 u##x val = __raw_uncore_read##x(uncore, reg); \
1947 trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \
1948 return val; \
1949 }
1950 __vgpu_read(8)
1951 __vgpu_read(16)
1952 __vgpu_read(32)
1953 __vgpu_read(64)
1954
1955 #define GEN2_READ_HEADER(x) \
1956 u##x val = 0; \
1957 assert_rpm_wakelock_held(uncore->rpm);
1958
1959 #define GEN2_READ_FOOTER \
1960 trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \
1961 return val
1962
1963 #define __gen2_read(x) \
1964 static u##x \
1965 gen2_read##x(struct intel_uncore *uncore, i915_reg_t reg, bool trace) { \
1966 GEN2_READ_HEADER(x); \
1967 val = __raw_uncore_read##x(uncore, reg); \
1968 GEN2_READ_FOOTER; \
1969 }
1970
1971 #define __gen5_read(x) \
1972 static u##x \
1973 gen5_read##x(struct intel_uncore *uncore, i915_reg_t reg, bool trace) { \
1974 GEN2_READ_HEADER(x); \
1975 ilk_dummy_write(uncore); \
1976 val = __raw_uncore_read##x(uncore, reg); \
1977 GEN2_READ_FOOTER; \
1978 }
1979
1980 __gen5_read(8)
1981 __gen5_read(16)
1982 __gen5_read(32)
1983 __gen5_read(64)
1984 __gen2_read(8)
1985 __gen2_read(16)
1986 __gen2_read(32)
1987 __gen2_read(64)
1988
1989 #undef __gen5_read
1990 #undef __gen2_read
1991
1992 #undef GEN2_READ_FOOTER
1993 #undef GEN2_READ_HEADER
1994
1995 #define GEN6_READ_HEADER(x) \
1996 u32 offset = i915_mmio_reg_offset(reg); \
1997 unsigned long irqflags; \
1998 u##x val = 0; \
1999 assert_rpm_wakelock_held(uncore->rpm); \
2000 spin_lock_irqsave(&uncore->lock, irqflags); \
2001 unclaimed_reg_debug(uncore, reg, true, true)
2002
2003 #define GEN6_READ_FOOTER \
2004 unclaimed_reg_debug(uncore, reg, true, false); \
2005 spin_unlock_irqrestore(&uncore->lock, irqflags); \
2006 trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \
2007 return val
2008
___force_wake_auto(struct intel_uncore * uncore,enum forcewake_domains fw_domains)2009 static noinline void ___force_wake_auto(struct intel_uncore *uncore,
2010 enum forcewake_domains fw_domains)
2011 {
2012 struct intel_uncore_forcewake_domain *domain;
2013 unsigned int tmp;
2014
2015 GEM_BUG_ON(fw_domains & ~uncore->fw_domains);
2016
2017 for_each_fw_domain_masked(domain, fw_domains, uncore, tmp)
2018 fw_domain_arm_timer(domain);
2019
2020 fw_domains_get(uncore, fw_domains);
2021 }
2022
__force_wake_auto(struct intel_uncore * uncore,enum forcewake_domains fw_domains)2023 static inline void __force_wake_auto(struct intel_uncore *uncore,
2024 enum forcewake_domains fw_domains)
2025 {
2026 GEM_BUG_ON(!fw_domains);
2027
2028 /* Turn on all requested but inactive supported forcewake domains. */
2029 fw_domains &= uncore->fw_domains;
2030 fw_domains &= ~uncore->fw_domains_active;
2031
2032 if (fw_domains)
2033 ___force_wake_auto(uncore, fw_domains);
2034 }
2035
2036 #define __gen_fwtable_read(x) \
2037 static u##x \
2038 fwtable_read##x(struct intel_uncore *uncore, i915_reg_t reg, bool trace) \
2039 { \
2040 enum forcewake_domains fw_engine; \
2041 GEN6_READ_HEADER(x); \
2042 fw_engine = __fwtable_reg_read_fw_domains(uncore, offset); \
2043 if (fw_engine) \
2044 __force_wake_auto(uncore, fw_engine); \
2045 val = __raw_uncore_read##x(uncore, reg); \
2046 GEN6_READ_FOOTER; \
2047 }
2048
2049 static enum forcewake_domains
fwtable_reg_read_fw_domains(struct intel_uncore * uncore,i915_reg_t reg)2050 fwtable_reg_read_fw_domains(struct intel_uncore *uncore, i915_reg_t reg) {
2051 return __fwtable_reg_read_fw_domains(uncore, i915_mmio_reg_offset(reg));
2052 }
2053
2054 __gen_fwtable_read(8)
2055 __gen_fwtable_read(16)
2056 __gen_fwtable_read(32)
2057 __gen_fwtable_read(64)
2058
2059 #undef __gen_fwtable_read
2060 #undef GEN6_READ_FOOTER
2061 #undef GEN6_READ_HEADER
2062
2063 #define GEN2_WRITE_HEADER \
2064 trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
2065 assert_rpm_wakelock_held(uncore->rpm); \
2066
2067 #define GEN2_WRITE_FOOTER
2068
2069 #define __gen2_write(x) \
2070 static void \
2071 gen2_write##x(struct intel_uncore *uncore, i915_reg_t reg, u##x val, bool trace) { \
2072 GEN2_WRITE_HEADER; \
2073 __raw_uncore_write##x(uncore, reg, val); \
2074 GEN2_WRITE_FOOTER; \
2075 }
2076
2077 #define __gen5_write(x) \
2078 static void \
2079 gen5_write##x(struct intel_uncore *uncore, i915_reg_t reg, u##x val, bool trace) { \
2080 GEN2_WRITE_HEADER; \
2081 ilk_dummy_write(uncore); \
2082 __raw_uncore_write##x(uncore, reg, val); \
2083 GEN2_WRITE_FOOTER; \
2084 }
2085
2086 __gen5_write(8)
2087 __gen5_write(16)
2088 __gen5_write(32)
2089 __gen2_write(8)
2090 __gen2_write(16)
2091 __gen2_write(32)
2092
2093 #undef __gen5_write
2094 #undef __gen2_write
2095
2096 #undef GEN2_WRITE_FOOTER
2097 #undef GEN2_WRITE_HEADER
2098
2099 #define GEN6_WRITE_HEADER \
2100 u32 offset = i915_mmio_reg_offset(reg); \
2101 unsigned long irqflags; \
2102 trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
2103 assert_rpm_wakelock_held(uncore->rpm); \
2104 spin_lock_irqsave(&uncore->lock, irqflags); \
2105 unclaimed_reg_debug(uncore, reg, false, true)
2106
2107 #define GEN6_WRITE_FOOTER \
2108 unclaimed_reg_debug(uncore, reg, false, false); \
2109 spin_unlock_irqrestore(&uncore->lock, irqflags)
2110
2111 #define __gen6_write(x) \
2112 static void \
2113 gen6_write##x(struct intel_uncore *uncore, i915_reg_t reg, u##x val, bool trace) { \
2114 GEN6_WRITE_HEADER; \
2115 if (NEEDS_FORCE_WAKE(offset)) \
2116 __gen6_gt_wait_for_fifo(uncore); \
2117 __raw_uncore_write##x(uncore, reg, val); \
2118 GEN6_WRITE_FOOTER; \
2119 }
2120 __gen6_write(8)
2121 __gen6_write(16)
2122 __gen6_write(32)
2123
2124 #define __gen_fwtable_write(x) \
2125 static void \
2126 fwtable_write##x(struct intel_uncore *uncore, i915_reg_t reg, u##x val, bool trace) { \
2127 enum forcewake_domains fw_engine; \
2128 GEN6_WRITE_HEADER; \
2129 fw_engine = __fwtable_reg_write_fw_domains(uncore, offset); \
2130 if (fw_engine) \
2131 __force_wake_auto(uncore, fw_engine); \
2132 __raw_uncore_write##x(uncore, reg, val); \
2133 GEN6_WRITE_FOOTER; \
2134 }
2135
2136 static enum forcewake_domains
fwtable_reg_write_fw_domains(struct intel_uncore * uncore,i915_reg_t reg)2137 fwtable_reg_write_fw_domains(struct intel_uncore *uncore, i915_reg_t reg)
2138 {
2139 return __fwtable_reg_write_fw_domains(uncore, i915_mmio_reg_offset(reg));
2140 }
2141
2142 __gen_fwtable_write(8)
2143 __gen_fwtable_write(16)
2144 __gen_fwtable_write(32)
2145
2146 #undef __gen_fwtable_write
2147 #undef GEN6_WRITE_FOOTER
2148 #undef GEN6_WRITE_HEADER
2149
2150 #define __vgpu_write(x) \
2151 static void \
2152 vgpu_write##x(struct intel_uncore *uncore, i915_reg_t reg, u##x val, bool trace) { \
2153 trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
2154 __raw_uncore_write##x(uncore, reg, val); \
2155 }
2156 __vgpu_write(8)
2157 __vgpu_write(16)
2158 __vgpu_write(32)
2159
2160 #define ASSIGN_RAW_WRITE_MMIO_VFUNCS(uncore, x) \
2161 do { \
2162 (uncore)->funcs.mmio_writeb = x##_write8; \
2163 (uncore)->funcs.mmio_writew = x##_write16; \
2164 (uncore)->funcs.mmio_writel = x##_write32; \
2165 } while (0)
2166
2167 #define ASSIGN_RAW_READ_MMIO_VFUNCS(uncore, x) \
2168 do { \
2169 (uncore)->funcs.mmio_readb = x##_read8; \
2170 (uncore)->funcs.mmio_readw = x##_read16; \
2171 (uncore)->funcs.mmio_readl = x##_read32; \
2172 (uncore)->funcs.mmio_readq = x##_read64; \
2173 } while (0)
2174
2175 #define ASSIGN_WRITE_MMIO_VFUNCS(uncore, x) \
2176 do { \
2177 ASSIGN_RAW_WRITE_MMIO_VFUNCS((uncore), x); \
2178 (uncore)->funcs.write_fw_domains = x##_reg_write_fw_domains; \
2179 } while (0)
2180
2181 #define ASSIGN_READ_MMIO_VFUNCS(uncore, x) \
2182 do { \
2183 ASSIGN_RAW_READ_MMIO_VFUNCS(uncore, x); \
2184 (uncore)->funcs.read_fw_domains = x##_reg_read_fw_domains; \
2185 } while (0)
2186
__fw_domain_init(struct intel_uncore * uncore,enum forcewake_domain_id domain_id,i915_reg_t reg_set,i915_reg_t reg_ack)2187 static int __fw_domain_init(struct intel_uncore *uncore,
2188 enum forcewake_domain_id domain_id,
2189 i915_reg_t reg_set,
2190 i915_reg_t reg_ack)
2191 {
2192 struct intel_uncore_forcewake_domain *d;
2193
2194 GEM_BUG_ON(domain_id >= FW_DOMAIN_ID_COUNT);
2195 GEM_BUG_ON(uncore->fw_domain[domain_id]);
2196
2197 if (i915_inject_probe_failure(uncore->i915))
2198 return -ENOMEM;
2199
2200 d = kzalloc(sizeof(*d), GFP_KERNEL);
2201 if (!d)
2202 return -ENOMEM;
2203
2204 drm_WARN_ON(&uncore->i915->drm, !i915_mmio_reg_valid(reg_set));
2205 drm_WARN_ON(&uncore->i915->drm, !i915_mmio_reg_valid(reg_ack));
2206
2207 d->uncore = uncore;
2208 d->wake_count = 0;
2209 d->reg_set = uncore->regs + i915_mmio_reg_offset(reg_set) + uncore->gsi_offset;
2210 d->reg_ack = uncore->regs + i915_mmio_reg_offset(reg_ack) + uncore->gsi_offset;
2211
2212 d->id = domain_id;
2213
2214 BUILD_BUG_ON(FORCEWAKE_RENDER != (1 << FW_DOMAIN_ID_RENDER));
2215 BUILD_BUG_ON(FORCEWAKE_GT != (1 << FW_DOMAIN_ID_GT));
2216 BUILD_BUG_ON(FORCEWAKE_MEDIA != (1 << FW_DOMAIN_ID_MEDIA));
2217 BUILD_BUG_ON(FORCEWAKE_MEDIA_VDBOX0 != (1 << FW_DOMAIN_ID_MEDIA_VDBOX0));
2218 BUILD_BUG_ON(FORCEWAKE_MEDIA_VDBOX1 != (1 << FW_DOMAIN_ID_MEDIA_VDBOX1));
2219 BUILD_BUG_ON(FORCEWAKE_MEDIA_VDBOX2 != (1 << FW_DOMAIN_ID_MEDIA_VDBOX2));
2220 BUILD_BUG_ON(FORCEWAKE_MEDIA_VDBOX3 != (1 << FW_DOMAIN_ID_MEDIA_VDBOX3));
2221 BUILD_BUG_ON(FORCEWAKE_MEDIA_VDBOX4 != (1 << FW_DOMAIN_ID_MEDIA_VDBOX4));
2222 BUILD_BUG_ON(FORCEWAKE_MEDIA_VDBOX5 != (1 << FW_DOMAIN_ID_MEDIA_VDBOX5));
2223 BUILD_BUG_ON(FORCEWAKE_MEDIA_VDBOX6 != (1 << FW_DOMAIN_ID_MEDIA_VDBOX6));
2224 BUILD_BUG_ON(FORCEWAKE_MEDIA_VDBOX7 != (1 << FW_DOMAIN_ID_MEDIA_VDBOX7));
2225 BUILD_BUG_ON(FORCEWAKE_MEDIA_VEBOX0 != (1 << FW_DOMAIN_ID_MEDIA_VEBOX0));
2226 BUILD_BUG_ON(FORCEWAKE_MEDIA_VEBOX1 != (1 << FW_DOMAIN_ID_MEDIA_VEBOX1));
2227 BUILD_BUG_ON(FORCEWAKE_MEDIA_VEBOX2 != (1 << FW_DOMAIN_ID_MEDIA_VEBOX2));
2228 BUILD_BUG_ON(FORCEWAKE_MEDIA_VEBOX3 != (1 << FW_DOMAIN_ID_MEDIA_VEBOX3));
2229 BUILD_BUG_ON(FORCEWAKE_GSC != (1 << FW_DOMAIN_ID_GSC));
2230
2231 d->mask = BIT(domain_id);
2232
2233 hrtimer_init(&d->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
2234 d->timer.function = intel_uncore_fw_release_timer;
2235
2236 uncore->fw_domains |= BIT(domain_id);
2237
2238 fw_domain_reset(d);
2239
2240 uncore->fw_domain[domain_id] = d;
2241
2242 return 0;
2243 }
2244
fw_domain_fini(struct intel_uncore * uncore,enum forcewake_domain_id domain_id)2245 static void fw_domain_fini(struct intel_uncore *uncore,
2246 enum forcewake_domain_id domain_id)
2247 {
2248 struct intel_uncore_forcewake_domain *d;
2249
2250 GEM_BUG_ON(domain_id >= FW_DOMAIN_ID_COUNT);
2251
2252 d = fetch_and_zero(&uncore->fw_domain[domain_id]);
2253 if (!d)
2254 return;
2255
2256 uncore->fw_domains &= ~BIT(domain_id);
2257 drm_WARN_ON(&uncore->i915->drm, d->wake_count);
2258 drm_WARN_ON(&uncore->i915->drm, hrtimer_cancel(&d->timer));
2259 kfree(d);
2260 }
2261
intel_uncore_fw_domains_fini(struct intel_uncore * uncore)2262 static void intel_uncore_fw_domains_fini(struct intel_uncore *uncore)
2263 {
2264 struct intel_uncore_forcewake_domain *d;
2265 int tmp;
2266
2267 for_each_fw_domain(d, uncore, tmp)
2268 fw_domain_fini(uncore, d->id);
2269 }
2270
2271 static const struct intel_uncore_fw_get uncore_get_fallback = {
2272 .force_wake_get = fw_domains_get_with_fallback
2273 };
2274
2275 static const struct intel_uncore_fw_get uncore_get_normal = {
2276 .force_wake_get = fw_domains_get_normal,
2277 };
2278
2279 static const struct intel_uncore_fw_get uncore_get_thread_status = {
2280 .force_wake_get = fw_domains_get_with_thread_status
2281 };
2282
intel_uncore_fw_domains_init(struct intel_uncore * uncore)2283 static int intel_uncore_fw_domains_init(struct intel_uncore *uncore)
2284 {
2285 struct drm_i915_private *i915 = uncore->i915;
2286 int ret = 0;
2287
2288 GEM_BUG_ON(!intel_uncore_has_forcewake(uncore));
2289
2290 #define fw_domain_init(uncore__, id__, set__, ack__) \
2291 (ret ?: (ret = __fw_domain_init((uncore__), (id__), (set__), (ack__))))
2292
2293 if (GRAPHICS_VER(i915) >= 11) {
2294 intel_engine_mask_t emask;
2295 int i;
2296
2297 /* we'll prune the domains of missing engines later */
2298 emask = uncore->gt->info.engine_mask;
2299
2300 uncore->fw_get_funcs = &uncore_get_fallback;
2301 if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 70))
2302 fw_domain_init(uncore, FW_DOMAIN_ID_GT,
2303 FORCEWAKE_GT_GEN9,
2304 FORCEWAKE_ACK_GT_MTL);
2305 else
2306 fw_domain_init(uncore, FW_DOMAIN_ID_GT,
2307 FORCEWAKE_GT_GEN9,
2308 FORCEWAKE_ACK_GT_GEN9);
2309
2310 if (RCS_MASK(uncore->gt) || CCS_MASK(uncore->gt))
2311 fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
2312 FORCEWAKE_RENDER_GEN9,
2313 FORCEWAKE_ACK_RENDER_GEN9);
2314
2315 for (i = 0; i < I915_MAX_VCS; i++) {
2316 if (!__HAS_ENGINE(emask, _VCS(i)))
2317 continue;
2318
2319 fw_domain_init(uncore, FW_DOMAIN_ID_MEDIA_VDBOX0 + i,
2320 FORCEWAKE_MEDIA_VDBOX_GEN11(i),
2321 FORCEWAKE_ACK_MEDIA_VDBOX_GEN11(i));
2322 }
2323 for (i = 0; i < I915_MAX_VECS; i++) {
2324 if (!__HAS_ENGINE(emask, _VECS(i)))
2325 continue;
2326
2327 fw_domain_init(uncore, FW_DOMAIN_ID_MEDIA_VEBOX0 + i,
2328 FORCEWAKE_MEDIA_VEBOX_GEN11(i),
2329 FORCEWAKE_ACK_MEDIA_VEBOX_GEN11(i));
2330 }
2331
2332 if (uncore->gt->type == GT_MEDIA)
2333 fw_domain_init(uncore, FW_DOMAIN_ID_GSC,
2334 FORCEWAKE_REQ_GSC, FORCEWAKE_ACK_GSC);
2335 } else if (IS_GRAPHICS_VER(i915, 9, 10)) {
2336 uncore->fw_get_funcs = &uncore_get_fallback;
2337 fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
2338 FORCEWAKE_RENDER_GEN9,
2339 FORCEWAKE_ACK_RENDER_GEN9);
2340 fw_domain_init(uncore, FW_DOMAIN_ID_GT,
2341 FORCEWAKE_GT_GEN9,
2342 FORCEWAKE_ACK_GT_GEN9);
2343 fw_domain_init(uncore, FW_DOMAIN_ID_MEDIA,
2344 FORCEWAKE_MEDIA_GEN9, FORCEWAKE_ACK_MEDIA_GEN9);
2345 } else if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) {
2346 uncore->fw_get_funcs = &uncore_get_normal;
2347 fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
2348 FORCEWAKE_VLV, FORCEWAKE_ACK_VLV);
2349 fw_domain_init(uncore, FW_DOMAIN_ID_MEDIA,
2350 FORCEWAKE_MEDIA_VLV, FORCEWAKE_ACK_MEDIA_VLV);
2351 } else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) {
2352 uncore->fw_get_funcs = &uncore_get_thread_status;
2353 fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
2354 FORCEWAKE_MT, FORCEWAKE_ACK_HSW);
2355 } else if (IS_IVYBRIDGE(i915)) {
2356 u32 ecobus;
2357
2358 /* IVB configs may use multi-threaded forcewake */
2359
2360 /* A small trick here - if the bios hasn't configured
2361 * MT forcewake, and if the device is in RC6, then
2362 * force_wake_mt_get will not wake the device and the
2363 * ECOBUS read will return zero. Which will be
2364 * (correctly) interpreted by the test below as MT
2365 * forcewake being disabled.
2366 */
2367 uncore->fw_get_funcs = &uncore_get_thread_status;
2368
2369 /* We need to init first for ECOBUS access and then
2370 * determine later if we want to reinit, in case of MT access is
2371 * not working. In this stage we don't know which flavour this
2372 * ivb is, so it is better to reset also the gen6 fw registers
2373 * before the ecobus check.
2374 */
2375
2376 __raw_uncore_write32(uncore, FORCEWAKE, 0);
2377 __raw_posting_read(uncore, ECOBUS);
2378
2379 ret = __fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
2380 FORCEWAKE_MT, FORCEWAKE_MT_ACK);
2381 if (ret)
2382 goto out;
2383
2384 spin_lock_irq(&uncore->lock);
2385 fw_domains_get_with_thread_status(uncore, FORCEWAKE_RENDER);
2386 ecobus = __raw_uncore_read32(uncore, ECOBUS);
2387 fw_domains_put(uncore, FORCEWAKE_RENDER);
2388 spin_unlock_irq(&uncore->lock);
2389
2390 if (!(ecobus & FORCEWAKE_MT_ENABLE)) {
2391 drm_info(&i915->drm, "No MT forcewake available on Ivybridge, this can result in issues\n");
2392 drm_info(&i915->drm, "when using vblank-synced partial screen updates.\n");
2393 fw_domain_fini(uncore, FW_DOMAIN_ID_RENDER);
2394 fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
2395 FORCEWAKE, FORCEWAKE_ACK);
2396 }
2397 } else if (GRAPHICS_VER(i915) == 6) {
2398 uncore->fw_get_funcs = &uncore_get_thread_status;
2399 fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
2400 FORCEWAKE, FORCEWAKE_ACK);
2401 }
2402
2403 #undef fw_domain_init
2404
2405 /* All future platforms are expected to require complex power gating */
2406 drm_WARN_ON(&i915->drm, !ret && uncore->fw_domains == 0);
2407
2408 out:
2409 if (ret)
2410 intel_uncore_fw_domains_fini(uncore);
2411
2412 return ret;
2413 }
2414
2415 #define ASSIGN_FW_DOMAINS_TABLE(uncore, d) \
2416 { \
2417 (uncore)->fw_domains_table = \
2418 (struct intel_forcewake_range *)(d); \
2419 (uncore)->fw_domains_table_entries = ARRAY_SIZE((d)); \
2420 }
2421
2422 #define ASSIGN_SHADOW_TABLE(uncore, d) \
2423 { \
2424 (uncore)->shadowed_reg_table = d; \
2425 (uncore)->shadowed_reg_table_entries = ARRAY_SIZE((d)); \
2426 }
2427
i915_pmic_bus_access_notifier(struct notifier_block * nb,unsigned long action,void * data)2428 static int i915_pmic_bus_access_notifier(struct notifier_block *nb,
2429 unsigned long action, void *data)
2430 {
2431 struct intel_uncore *uncore = container_of(nb,
2432 struct intel_uncore, pmic_bus_access_nb);
2433
2434 switch (action) {
2435 case MBI_PMIC_BUS_ACCESS_BEGIN:
2436 /*
2437 * forcewake all now to make sure that we don't need to do a
2438 * forcewake later which on systems where this notifier gets
2439 * called requires the punit to access to the shared pmic i2c
2440 * bus, which will be busy after this notification, leading to:
2441 * "render: timed out waiting for forcewake ack request."
2442 * errors.
2443 *
2444 * The notifier is unregistered during intel_runtime_suspend(),
2445 * so it's ok to access the HW here without holding a RPM
2446 * wake reference -> disable wakeref asserts for the time of
2447 * the access.
2448 */
2449 disable_rpm_wakeref_asserts(uncore->rpm);
2450 intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL);
2451 enable_rpm_wakeref_asserts(uncore->rpm);
2452 break;
2453 case MBI_PMIC_BUS_ACCESS_END:
2454 intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL);
2455 break;
2456 }
2457
2458 return NOTIFY_OK;
2459 }
2460
uncore_unmap_mmio(struct drm_device * drm,void * regs)2461 static void uncore_unmap_mmio(struct drm_device *drm, void *regs)
2462 {
2463 iounmap(regs);
2464 }
2465
intel_uncore_setup_mmio(struct intel_uncore * uncore,phys_addr_t phys_addr)2466 int intel_uncore_setup_mmio(struct intel_uncore *uncore, phys_addr_t phys_addr)
2467 {
2468 struct drm_i915_private *i915 = uncore->i915;
2469 int mmio_size;
2470
2471 /*
2472 * Before gen4, the registers and the GTT are behind different BARs.
2473 * However, from gen4 onwards, the registers and the GTT are shared
2474 * in the same BAR, so we want to restrict this ioremap from
2475 * clobbering the GTT which we want ioremap_wc instead. Fortunately,
2476 * the register BAR remains the same size for all the earlier
2477 * generations up to Ironlake.
2478 * For dgfx chips register range is expanded to 4MB, and this larger
2479 * range is also used for integrated gpus beginning with Meteor Lake.
2480 */
2481 if (IS_DGFX(i915) || GRAPHICS_VER_FULL(i915) >= IP_VER(12, 70))
2482 mmio_size = 4 * 1024 * 1024;
2483 else if (GRAPHICS_VER(i915) >= 5)
2484 mmio_size = 2 * 1024 * 1024;
2485 else
2486 mmio_size = 512 * 1024;
2487
2488 uncore->regs = ioremap(phys_addr, mmio_size);
2489 if (uncore->regs == NULL) {
2490 drm_err(&i915->drm, "failed to map registers\n");
2491 return -EIO;
2492 }
2493
2494 return drmm_add_action_or_reset(&i915->drm, uncore_unmap_mmio, uncore->regs);
2495 }
2496
intel_uncore_init_early(struct intel_uncore * uncore,struct intel_gt * gt)2497 void intel_uncore_init_early(struct intel_uncore *uncore,
2498 struct intel_gt *gt)
2499 {
2500 spin_lock_init(&uncore->lock);
2501 uncore->i915 = gt->i915;
2502 uncore->gt = gt;
2503 uncore->rpm = >->i915->runtime_pm;
2504 }
2505
uncore_raw_init(struct intel_uncore * uncore)2506 static void uncore_raw_init(struct intel_uncore *uncore)
2507 {
2508 GEM_BUG_ON(intel_uncore_has_forcewake(uncore));
2509
2510 if (intel_vgpu_active(uncore->i915)) {
2511 ASSIGN_RAW_WRITE_MMIO_VFUNCS(uncore, vgpu);
2512 ASSIGN_RAW_READ_MMIO_VFUNCS(uncore, vgpu);
2513 } else if (GRAPHICS_VER(uncore->i915) == 5) {
2514 ASSIGN_RAW_WRITE_MMIO_VFUNCS(uncore, gen5);
2515 ASSIGN_RAW_READ_MMIO_VFUNCS(uncore, gen5);
2516 } else {
2517 ASSIGN_RAW_WRITE_MMIO_VFUNCS(uncore, gen2);
2518 ASSIGN_RAW_READ_MMIO_VFUNCS(uncore, gen2);
2519 }
2520 }
2521
uncore_media_forcewake_init(struct intel_uncore * uncore)2522 static int uncore_media_forcewake_init(struct intel_uncore *uncore)
2523 {
2524 struct drm_i915_private *i915 = uncore->i915;
2525
2526 if (MEDIA_VER(i915) >= 13) {
2527 ASSIGN_FW_DOMAINS_TABLE(uncore, __xelpmp_fw_ranges);
2528 ASSIGN_SHADOW_TABLE(uncore, xelpmp_shadowed_regs);
2529 ASSIGN_WRITE_MMIO_VFUNCS(uncore, fwtable);
2530 } else {
2531 MISSING_CASE(MEDIA_VER(i915));
2532 return -ENODEV;
2533 }
2534
2535 return 0;
2536 }
2537
uncore_forcewake_init(struct intel_uncore * uncore)2538 static int uncore_forcewake_init(struct intel_uncore *uncore)
2539 {
2540 struct drm_i915_private *i915 = uncore->i915;
2541 int ret;
2542
2543 GEM_BUG_ON(!intel_uncore_has_forcewake(uncore));
2544
2545 ret = intel_uncore_fw_domains_init(uncore);
2546 if (ret)
2547 return ret;
2548 forcewake_early_sanitize(uncore, 0);
2549
2550 ASSIGN_READ_MMIO_VFUNCS(uncore, fwtable);
2551
2552 if (uncore->gt->type == GT_MEDIA)
2553 return uncore_media_forcewake_init(uncore);
2554
2555 if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 70)) {
2556 ASSIGN_FW_DOMAINS_TABLE(uncore, __mtl_fw_ranges);
2557 ASSIGN_SHADOW_TABLE(uncore, mtl_shadowed_regs);
2558 ASSIGN_WRITE_MMIO_VFUNCS(uncore, fwtable);
2559 } else if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 60)) {
2560 ASSIGN_FW_DOMAINS_TABLE(uncore, __pvc_fw_ranges);
2561 ASSIGN_SHADOW_TABLE(uncore, pvc_shadowed_regs);
2562 ASSIGN_WRITE_MMIO_VFUNCS(uncore, fwtable);
2563 } else if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 55)) {
2564 ASSIGN_FW_DOMAINS_TABLE(uncore, __dg2_fw_ranges);
2565 ASSIGN_SHADOW_TABLE(uncore, dg2_shadowed_regs);
2566 ASSIGN_WRITE_MMIO_VFUNCS(uncore, fwtable);
2567 } else if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 50)) {
2568 ASSIGN_FW_DOMAINS_TABLE(uncore, __xehp_fw_ranges);
2569 ASSIGN_SHADOW_TABLE(uncore, gen12_shadowed_regs);
2570 ASSIGN_WRITE_MMIO_VFUNCS(uncore, fwtable);
2571 } else if (GRAPHICS_VER(i915) >= 12) {
2572 ASSIGN_FW_DOMAINS_TABLE(uncore, __gen12_fw_ranges);
2573 ASSIGN_SHADOW_TABLE(uncore, gen12_shadowed_regs);
2574 ASSIGN_WRITE_MMIO_VFUNCS(uncore, fwtable);
2575 } else if (GRAPHICS_VER(i915) == 11) {
2576 ASSIGN_FW_DOMAINS_TABLE(uncore, __gen11_fw_ranges);
2577 ASSIGN_SHADOW_TABLE(uncore, gen11_shadowed_regs);
2578 ASSIGN_WRITE_MMIO_VFUNCS(uncore, fwtable);
2579 } else if (IS_GRAPHICS_VER(i915, 9, 10)) {
2580 ASSIGN_FW_DOMAINS_TABLE(uncore, __gen9_fw_ranges);
2581 ASSIGN_SHADOW_TABLE(uncore, gen8_shadowed_regs);
2582 ASSIGN_WRITE_MMIO_VFUNCS(uncore, fwtable);
2583 } else if (IS_CHERRYVIEW(i915)) {
2584 ASSIGN_FW_DOMAINS_TABLE(uncore, __chv_fw_ranges);
2585 ASSIGN_SHADOW_TABLE(uncore, gen8_shadowed_regs);
2586 ASSIGN_WRITE_MMIO_VFUNCS(uncore, fwtable);
2587 } else if (GRAPHICS_VER(i915) == 8) {
2588 ASSIGN_FW_DOMAINS_TABLE(uncore, __gen6_fw_ranges);
2589 ASSIGN_SHADOW_TABLE(uncore, gen8_shadowed_regs);
2590 ASSIGN_WRITE_MMIO_VFUNCS(uncore, fwtable);
2591 } else if (IS_VALLEYVIEW(i915)) {
2592 ASSIGN_FW_DOMAINS_TABLE(uncore, __vlv_fw_ranges);
2593 ASSIGN_WRITE_MMIO_VFUNCS(uncore, gen6);
2594 } else if (IS_GRAPHICS_VER(i915, 6, 7)) {
2595 ASSIGN_FW_DOMAINS_TABLE(uncore, __gen6_fw_ranges);
2596 ASSIGN_WRITE_MMIO_VFUNCS(uncore, gen6);
2597 }
2598
2599 uncore->pmic_bus_access_nb.notifier_call = i915_pmic_bus_access_notifier;
2600 iosf_mbi_register_pmic_bus_access_notifier(&uncore->pmic_bus_access_nb);
2601
2602 return 0;
2603 }
2604
intel_uncore_init_mmio(struct intel_uncore * uncore)2605 int intel_uncore_init_mmio(struct intel_uncore *uncore)
2606 {
2607 struct drm_i915_private *i915 = uncore->i915;
2608 int ret;
2609
2610 /*
2611 * The boot firmware initializes local memory and assesses its health.
2612 * If memory training fails, the punit will have been instructed to
2613 * keep the GT powered down; we won't be able to communicate with it
2614 * and we should not continue with driver initialization.
2615 */
2616 if (IS_DGFX(i915) &&
2617 !(__raw_uncore_read32(uncore, GU_CNTL) & LMEM_INIT)) {
2618 drm_err(&i915->drm, "LMEM not initialized by firmware\n");
2619 return -ENODEV;
2620 }
2621
2622 if (GRAPHICS_VER(i915) > 5 && !intel_vgpu_active(i915))
2623 uncore->flags |= UNCORE_HAS_FORCEWAKE;
2624
2625 if (!intel_uncore_has_forcewake(uncore)) {
2626 uncore_raw_init(uncore);
2627 } else {
2628 ret = uncore_forcewake_init(uncore);
2629 if (ret)
2630 return ret;
2631 }
2632
2633 /* make sure fw funcs are set if and only if we have fw*/
2634 GEM_BUG_ON(intel_uncore_has_forcewake(uncore) != !!uncore->fw_get_funcs);
2635 GEM_BUG_ON(intel_uncore_has_forcewake(uncore) != !!uncore->funcs.read_fw_domains);
2636 GEM_BUG_ON(intel_uncore_has_forcewake(uncore) != !!uncore->funcs.write_fw_domains);
2637
2638 if (HAS_FPGA_DBG_UNCLAIMED(i915))
2639 uncore->flags |= UNCORE_HAS_FPGA_DBG_UNCLAIMED;
2640
2641 if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))
2642 uncore->flags |= UNCORE_HAS_DBG_UNCLAIMED;
2643
2644 if (IS_GRAPHICS_VER(i915, 6, 7))
2645 uncore->flags |= UNCORE_HAS_FIFO;
2646
2647 /* clear out unclaimed reg detection bit */
2648 if (intel_uncore_unclaimed_mmio(uncore))
2649 drm_dbg(&i915->drm, "unclaimed mmio detected on uncore init, clearing\n");
2650
2651 return 0;
2652 }
2653
2654 /*
2655 * We might have detected that some engines are fused off after we initialized
2656 * the forcewake domains. Prune them, to make sure they only reference existing
2657 * engines.
2658 */
intel_uncore_prune_engine_fw_domains(struct intel_uncore * uncore,struct intel_gt * gt)2659 void intel_uncore_prune_engine_fw_domains(struct intel_uncore *uncore,
2660 struct intel_gt *gt)
2661 {
2662 enum forcewake_domains fw_domains = uncore->fw_domains;
2663 enum forcewake_domain_id domain_id;
2664 int i;
2665
2666 if (!intel_uncore_has_forcewake(uncore) || GRAPHICS_VER(uncore->i915) < 11)
2667 return;
2668
2669 for (i = 0; i < I915_MAX_VCS; i++) {
2670 domain_id = FW_DOMAIN_ID_MEDIA_VDBOX0 + i;
2671
2672 if (HAS_ENGINE(gt, _VCS(i)))
2673 continue;
2674
2675 /*
2676 * Starting with XeHP, the power well for an even-numbered
2677 * VDBOX is also used for shared units within the
2678 * media slice such as SFC. So even if the engine
2679 * itself is fused off, we still need to initialize
2680 * the forcewake domain if any of the other engines
2681 * in the same media slice are present.
2682 */
2683 if (GRAPHICS_VER_FULL(uncore->i915) >= IP_VER(12, 50) && i % 2 == 0) {
2684 if ((i + 1 < I915_MAX_VCS) && HAS_ENGINE(gt, _VCS(i + 1)))
2685 continue;
2686
2687 if (HAS_ENGINE(gt, _VECS(i / 2)))
2688 continue;
2689 }
2690
2691 if (fw_domains & BIT(domain_id))
2692 fw_domain_fini(uncore, domain_id);
2693 }
2694
2695 for (i = 0; i < I915_MAX_VECS; i++) {
2696 domain_id = FW_DOMAIN_ID_MEDIA_VEBOX0 + i;
2697
2698 if (HAS_ENGINE(gt, _VECS(i)))
2699 continue;
2700
2701 if (fw_domains & BIT(domain_id))
2702 fw_domain_fini(uncore, domain_id);
2703 }
2704
2705 if ((fw_domains & BIT(FW_DOMAIN_ID_GSC)) && !HAS_ENGINE(gt, GSC0))
2706 fw_domain_fini(uncore, FW_DOMAIN_ID_GSC);
2707 }
2708
2709 /*
2710 * The driver-initiated FLR is the highest level of reset that we can trigger
2711 * from within the driver. It is different from the PCI FLR in that it doesn't
2712 * fully reset the SGUnit and doesn't modify the PCI config space and therefore
2713 * it doesn't require a re-enumeration of the PCI BARs. However, the
2714 * driver-initiated FLR does still cause a reset of both GT and display and a
2715 * memory wipe of local and stolen memory, so recovery would require a full HW
2716 * re-init and saving/restoring (or re-populating) the wiped memory. Since we
2717 * perform the FLR as the very last action before releasing access to the HW
2718 * during the driver release flow, we don't attempt recovery at all, because
2719 * if/when a new instance of i915 is bound to the device it will do a full
2720 * re-init anyway.
2721 */
driver_initiated_flr(struct intel_uncore * uncore)2722 static void driver_initiated_flr(struct intel_uncore *uncore)
2723 {
2724 struct drm_i915_private *i915 = uncore->i915;
2725 const unsigned int flr_timeout_ms = 3000; /* specs recommend a 3s wait */
2726 int ret;
2727
2728 drm_dbg(&i915->drm, "Triggering Driver-FLR\n");
2729
2730 /*
2731 * Make sure any pending FLR requests have cleared by waiting for the
2732 * FLR trigger bit to go to zero. Also clear GU_DEBUG's DRIVERFLR_STATUS
2733 * to make sure it's not still set from a prior attempt (it's a write to
2734 * clear bit).
2735 * Note that we should never be in a situation where a previous attempt
2736 * is still pending (unless the HW is totally dead), but better to be
2737 * safe in case something unexpected happens
2738 */
2739 ret = intel_wait_for_register_fw(uncore, GU_CNTL, DRIVERFLR, 0, flr_timeout_ms);
2740 if (ret) {
2741 drm_err(&i915->drm,
2742 "Failed to wait for Driver-FLR bit to clear! %d\n",
2743 ret);
2744 return;
2745 }
2746 intel_uncore_write_fw(uncore, GU_DEBUG, DRIVERFLR_STATUS);
2747
2748 /* Trigger the actual Driver-FLR */
2749 intel_uncore_rmw_fw(uncore, GU_CNTL, 0, DRIVERFLR);
2750
2751 ret = intel_wait_for_register_fw(uncore, GU_DEBUG,
2752 DRIVERFLR_STATUS, DRIVERFLR_STATUS,
2753 flr_timeout_ms);
2754 if (ret) {
2755 drm_err(&i915->drm, "wait for Driver-FLR completion failed! %d\n", ret);
2756 return;
2757 }
2758
2759 intel_uncore_write_fw(uncore, GU_DEBUG, DRIVERFLR_STATUS);
2760 }
2761
2762 /* Called via drm-managed action */
intel_uncore_fini_mmio(struct drm_device * dev,void * data)2763 void intel_uncore_fini_mmio(struct drm_device *dev, void *data)
2764 {
2765 struct intel_uncore *uncore = data;
2766
2767 if (intel_uncore_has_forcewake(uncore)) {
2768 iosf_mbi_punit_acquire();
2769 iosf_mbi_unregister_pmic_bus_access_notifier_unlocked(
2770 &uncore->pmic_bus_access_nb);
2771 intel_uncore_forcewake_reset(uncore);
2772 intel_uncore_fw_domains_fini(uncore);
2773 iosf_mbi_punit_release();
2774 }
2775
2776 if (intel_uncore_needs_flr_on_fini(uncore))
2777 driver_initiated_flr(uncore);
2778 }
2779
2780 /**
2781 * __intel_wait_for_register_fw - wait until register matches expected state
2782 * @uncore: the struct intel_uncore
2783 * @reg: the register to read
2784 * @mask: mask to apply to register value
2785 * @value: expected value
2786 * @fast_timeout_us: fast timeout in microsecond for atomic/tight wait
2787 * @slow_timeout_ms: slow timeout in millisecond
2788 * @out_value: optional placeholder to hold registry value
2789 *
2790 * This routine waits until the target register @reg contains the expected
2791 * @value after applying the @mask, i.e. it waits until ::
2792 *
2793 * (intel_uncore_read_fw(uncore, reg) & mask) == value
2794 *
2795 * Otherwise, the wait will timeout after @slow_timeout_ms milliseconds.
2796 * For atomic context @slow_timeout_ms must be zero and @fast_timeout_us
2797 * must be not larger than 20,0000 microseconds.
2798 *
2799 * Note that this routine assumes the caller holds forcewake asserted, it is
2800 * not suitable for very long waits. See intel_wait_for_register() if you
2801 * wish to wait without holding forcewake for the duration (i.e. you expect
2802 * the wait to be slow).
2803 *
2804 * Return: 0 if the register matches the desired condition, or -ETIMEDOUT.
2805 */
__intel_wait_for_register_fw(struct intel_uncore * uncore,i915_reg_t reg,u32 mask,u32 value,unsigned int fast_timeout_us,unsigned int slow_timeout_ms,u32 * out_value)2806 int __intel_wait_for_register_fw(struct intel_uncore *uncore,
2807 i915_reg_t reg,
2808 u32 mask,
2809 u32 value,
2810 unsigned int fast_timeout_us,
2811 unsigned int slow_timeout_ms,
2812 u32 *out_value)
2813 {
2814 u32 reg_value = 0;
2815 #define done (((reg_value = intel_uncore_read_fw(uncore, reg)) & mask) == value)
2816 int ret;
2817
2818 /* Catch any overuse of this function */
2819 might_sleep_if(slow_timeout_ms);
2820 GEM_BUG_ON(fast_timeout_us > 20000);
2821 GEM_BUG_ON(!fast_timeout_us && !slow_timeout_ms);
2822
2823 ret = -ETIMEDOUT;
2824 if (fast_timeout_us && fast_timeout_us <= 20000)
2825 ret = _wait_for_atomic(done, fast_timeout_us, 0);
2826 if (ret && slow_timeout_ms)
2827 ret = wait_for(done, slow_timeout_ms);
2828
2829 if (out_value)
2830 *out_value = reg_value;
2831
2832 return ret;
2833 #undef done
2834 }
2835
2836 /**
2837 * __intel_wait_for_register - wait until register matches expected state
2838 * @uncore: the struct intel_uncore
2839 * @reg: the register to read
2840 * @mask: mask to apply to register value
2841 * @value: expected value
2842 * @fast_timeout_us: fast timeout in microsecond for atomic/tight wait
2843 * @slow_timeout_ms: slow timeout in millisecond
2844 * @out_value: optional placeholder to hold registry value
2845 *
2846 * This routine waits until the target register @reg contains the expected
2847 * @value after applying the @mask, i.e. it waits until ::
2848 *
2849 * (intel_uncore_read(uncore, reg) & mask) == value
2850 *
2851 * Otherwise, the wait will timeout after @timeout_ms milliseconds.
2852 *
2853 * Return: 0 if the register matches the desired condition, or -ETIMEDOUT.
2854 */
__intel_wait_for_register(struct intel_uncore * uncore,i915_reg_t reg,u32 mask,u32 value,unsigned int fast_timeout_us,unsigned int slow_timeout_ms,u32 * out_value)2855 int __intel_wait_for_register(struct intel_uncore *uncore,
2856 i915_reg_t reg,
2857 u32 mask,
2858 u32 value,
2859 unsigned int fast_timeout_us,
2860 unsigned int slow_timeout_ms,
2861 u32 *out_value)
2862 {
2863 unsigned fw =
2864 intel_uncore_forcewake_for_reg(uncore, reg, FW_REG_READ);
2865 u32 reg_value;
2866 int ret;
2867
2868 might_sleep_if(slow_timeout_ms);
2869
2870 spin_lock_irq(&uncore->lock);
2871 intel_uncore_forcewake_get__locked(uncore, fw);
2872
2873 ret = __intel_wait_for_register_fw(uncore,
2874 reg, mask, value,
2875 fast_timeout_us, 0, ®_value);
2876
2877 intel_uncore_forcewake_put__locked(uncore, fw);
2878 spin_unlock_irq(&uncore->lock);
2879
2880 if (ret && slow_timeout_ms)
2881 ret = __wait_for(reg_value = intel_uncore_read_notrace(uncore,
2882 reg),
2883 (reg_value & mask) == value,
2884 slow_timeout_ms * 1000, 10, 1000);
2885
2886 /* just trace the final value */
2887 trace_i915_reg_rw(false, reg, reg_value, sizeof(reg_value), true);
2888
2889 if (out_value)
2890 *out_value = reg_value;
2891
2892 return ret;
2893 }
2894
intel_uncore_unclaimed_mmio(struct intel_uncore * uncore)2895 bool intel_uncore_unclaimed_mmio(struct intel_uncore *uncore)
2896 {
2897 bool ret;
2898
2899 if (!uncore->debug)
2900 return false;
2901
2902 spin_lock_irq(&uncore->debug->lock);
2903 ret = check_for_unclaimed_mmio(uncore);
2904 spin_unlock_irq(&uncore->debug->lock);
2905
2906 return ret;
2907 }
2908
2909 bool
intel_uncore_arm_unclaimed_mmio_detection(struct intel_uncore * uncore)2910 intel_uncore_arm_unclaimed_mmio_detection(struct intel_uncore *uncore)
2911 {
2912 bool ret = false;
2913
2914 if (drm_WARN_ON(&uncore->i915->drm, !uncore->debug))
2915 return false;
2916
2917 spin_lock_irq(&uncore->debug->lock);
2918
2919 if (unlikely(uncore->debug->unclaimed_mmio_check <= 0))
2920 goto out;
2921
2922 if (unlikely(check_for_unclaimed_mmio(uncore))) {
2923 if (!uncore->i915->params.mmio_debug) {
2924 drm_dbg(&uncore->i915->drm,
2925 "Unclaimed register detected, "
2926 "enabling oneshot unclaimed register reporting. "
2927 "Please use i915.mmio_debug=N for more information.\n");
2928 uncore->i915->params.mmio_debug++;
2929 }
2930 uncore->debug->unclaimed_mmio_check--;
2931 ret = true;
2932 }
2933
2934 out:
2935 spin_unlock_irq(&uncore->debug->lock);
2936
2937 return ret;
2938 }
2939
2940 /**
2941 * intel_uncore_forcewake_for_reg - which forcewake domains are needed to access
2942 * a register
2943 * @uncore: pointer to struct intel_uncore
2944 * @reg: register in question
2945 * @op: operation bitmask of FW_REG_READ and/or FW_REG_WRITE
2946 *
2947 * Returns a set of forcewake domains required to be taken with for example
2948 * intel_uncore_forcewake_get for the specified register to be accessible in the
2949 * specified mode (read, write or read/write) with raw mmio accessors.
2950 *
2951 * NOTE: On Gen6 and Gen7 write forcewake domain (FORCEWAKE_RENDER) requires the
2952 * callers to do FIFO management on their own or risk losing writes.
2953 */
2954 enum forcewake_domains
intel_uncore_forcewake_for_reg(struct intel_uncore * uncore,i915_reg_t reg,unsigned int op)2955 intel_uncore_forcewake_for_reg(struct intel_uncore *uncore,
2956 i915_reg_t reg, unsigned int op)
2957 {
2958 enum forcewake_domains fw_domains = 0;
2959
2960 drm_WARN_ON(&uncore->i915->drm, !op);
2961
2962 if (!intel_uncore_has_forcewake(uncore))
2963 return 0;
2964
2965 if (op & FW_REG_READ)
2966 fw_domains = uncore->funcs.read_fw_domains(uncore, reg);
2967
2968 if (op & FW_REG_WRITE)
2969 fw_domains |= uncore->funcs.write_fw_domains(uncore, reg);
2970
2971 drm_WARN_ON(&uncore->i915->drm, fw_domains & ~uncore->fw_domains);
2972
2973 return fw_domains;
2974 }
2975
2976 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
2977 #include "selftests/mock_uncore.c"
2978 #include "selftests/intel_uncore.c"
2979 #endif
2980