Lines Matching defs:uncore

42 fw_domains_get(struct intel_uncore *uncore, enum forcewake_domains fw_domains)
44 uncore->fw_get_funcs->force_wake_get(uncore, fw_domains);
53 i915->uncore.debug = &i915->mmio_debug;
56 static void mmio_debug_suspend(struct intel_uncore *uncore)
58 if (!uncore->debug)
61 spin_lock(&uncore->debug->lock);
64 if (!uncore->debug->suspend_count++) {
65 uncore->debug->saved_mmio_check = uncore->debug->unclaimed_mmio_check;
66 uncore->debug->unclaimed_mmio_check = 0;
69 spin_unlock(&uncore->debug->lock);
72 static bool check_for_unclaimed_mmio(struct intel_uncore *uncore);
74 static void mmio_debug_resume(struct intel_uncore *uncore)
76 if (!uncore->debug)
79 spin_lock(&uncore->debug->lock);
81 if (!--uncore->debug->suspend_count)
82 uncore->debug->unclaimed_mmio_check = uncore->debug->saved_mmio_check;
84 if (check_for_unclaimed_mmio(uncore))
85 drm_info(&uncore->i915->drm,
88 spin_unlock(&uncore->debug->lock);
136 if (GRAPHICS_VER(d->uncore->i915) >= 12)
145 GEM_BUG_ON(d->uncore->fw_domains_timer & d->mask);
146 d->uncore->fw_domains_timer |= d->mask;
184 drm_err(&d->uncore->i915->drm,
188 drm_err(&d->uncore->i915->drm,
192 add_taint_for_CI(d->uncore->i915, TAINT_WARN); /* CI now unreliable */
237 drm_dbg(&d->uncore->i915->drm,
267 drm_err(&d->uncore->i915->drm,
270 add_taint_for_CI(d->uncore->i915, TAINT_WARN); /* CI now unreliable */
291 fw_domains_get_normal(struct intel_uncore *uncore, enum forcewake_domains fw_domains)
296 GEM_BUG_ON(fw_domains & ~uncore->fw_domains);
298 for_each_fw_domain_masked(d, fw_domains, uncore, tmp) {
303 for_each_fw_domain_masked(d, fw_domains, uncore, tmp)
306 uncore->fw_domains_active |= fw_domains;
310 fw_domains_get_with_fallback(struct intel_uncore *uncore,
316 GEM_BUG_ON(fw_domains & ~uncore->fw_domains);
318 for_each_fw_domain_masked(d, fw_domains, uncore, tmp) {
323 for_each_fw_domain_masked(d, fw_domains, uncore, tmp)
326 uncore->fw_domains_active |= fw_domains;
330 fw_domains_put(struct intel_uncore *uncore, enum forcewake_domains fw_domains)
335 GEM_BUG_ON(fw_domains & ~uncore->fw_domains);
337 for_each_fw_domain_masked(d, fw_domains, uncore, tmp)
340 uncore->fw_domains_active &= ~fw_domains;
344 fw_domains_reset(struct intel_uncore *uncore,
353 GEM_BUG_ON(fw_domains & ~uncore->fw_domains);
355 for_each_fw_domain_masked(d, fw_domains, uncore, tmp)
359 static inline u32 gt_thread_status(struct intel_uncore *uncore)
363 val = __raw_uncore_read32(uncore, GEN6_GT_THREAD_STATUS_REG);
369 static void __gen6_gt_wait_for_thread_c0(struct intel_uncore *uncore)
375 drm_WARN_ONCE(&uncore->i915->drm,
376 wait_for_atomic_us(gt_thread_status(uncore) == 0, 5000),
380 static void fw_domains_get_with_thread_status(struct intel_uncore *uncore,
383 fw_domains_get_normal(uncore, fw_domains);
386 __gen6_gt_wait_for_thread_c0(uncore);
389 static inline u32 fifo_free_entries(struct intel_uncore *uncore)
391 u32 count = __raw_uncore_read32(uncore, GTFIFOCTL);
396 static void __gen6_gt_wait_for_fifo(struct intel_uncore *uncore)
402 if (IS_VALLEYVIEW(uncore->i915))
403 n = fifo_free_entries(uncore);
405 n = uncore->fifo_count;
408 if (wait_for_atomic((n = fifo_free_entries(uncore)) >
411 drm_dbg(&uncore->i915->drm,
417 uncore->fifo_count = n - 1;
425 struct intel_uncore *uncore = domain->uncore;
428 assert_rpm_device_not_suspended(uncore->rpm);
433 spin_lock_irqsave(&uncore->lock, irqflags);
435 uncore->fw_domains_timer &= ~domain->mask;
439 fw_domains_put(uncore, domain->mask);
441 spin_unlock_irqrestore(&uncore->lock, irqflags);
448 intel_uncore_forcewake_reset(struct intel_uncore *uncore)
457 /* Hold uncore.lock across reset to prevent any register access
466 for_each_fw_domain(domain, uncore, tmp) {
474 spin_lock_irqsave(&uncore->lock, irqflags);
476 for_each_fw_domain(domain, uncore, tmp) {
485 drm_err(&uncore->i915->drm, "Timed out waiting for forcewake timers to finish\n");
489 spin_unlock_irqrestore(&uncore->lock, irqflags);
493 drm_WARN_ON(&uncore->i915->drm, active_domains);
495 fw = uncore->fw_domains_active;
497 fw_domains_put(uncore, fw);
499 fw_domains_reset(uncore, uncore->fw_domains);
500 assert_forcewakes_inactive(uncore);
502 spin_unlock_irqrestore(&uncore->lock, irqflags);
508 fpga_check_for_unclaimed_mmio(struct intel_uncore *uncore)
512 dbg = __raw_uncore_read32(uncore, FPGA_DBG);
529 drm_err(&uncore->i915->drm,
532 __raw_uncore_write32(uncore, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
538 vlv_check_for_unclaimed_mmio(struct intel_uncore *uncore)
542 cer = __raw_uncore_read32(uncore, CLAIM_ER);
546 __raw_uncore_write32(uncore, CLAIM_ER, CLAIM_ER_CLR);
552 gen6_check_for_fifo_debug(struct intel_uncore *uncore)
556 fifodbg = __raw_uncore_read32(uncore, GTFIFODBG);
559 drm_dbg(&uncore->i915->drm, "GTFIFODBG = 0x08%x\n", fifodbg);
560 __raw_uncore_write32(uncore, GTFIFODBG, fifodbg);
567 check_for_unclaimed_mmio(struct intel_uncore *uncore)
571 lockdep_assert_held(&uncore->debug->lock);
573 if (uncore->debug->suspend_count)
576 if (intel_uncore_has_fpga_dbg_unclaimed(uncore))
577 ret |= fpga_check_for_unclaimed_mmio(uncore);
579 if (intel_uncore_has_dbg_unclaimed(uncore))
580 ret |= vlv_check_for_unclaimed_mmio(uncore);
582 if (intel_uncore_has_fifo(uncore))
583 ret |= gen6_check_for_fifo_debug(uncore);
588 static void forcewake_early_sanitize(struct intel_uncore *uncore,
591 GEM_BUG_ON(!intel_uncore_has_forcewake(uncore));
594 if (IS_CHERRYVIEW(uncore->i915)) {
595 __raw_uncore_write32(uncore, GTFIFOCTL,
596 __raw_uncore_read32(uncore, GTFIFOCTL) |
602 intel_uncore_forcewake_reset(uncore);
604 spin_lock_irq(&uncore->lock);
605 fw_domains_get(uncore, restore_forcewake);
607 if (intel_uncore_has_fifo(uncore))
608 uncore->fifo_count = fifo_free_entries(uncore);
609 spin_unlock_irq(&uncore->lock);
614 void intel_uncore_suspend(struct intel_uncore *uncore)
616 if (!intel_uncore_has_forcewake(uncore))
621 &uncore->pmic_bus_access_nb);
622 uncore->fw_domains_saved = intel_uncore_forcewake_reset(uncore);
626 void intel_uncore_resume_early(struct intel_uncore *uncore)
630 if (intel_uncore_unclaimed_mmio(uncore))
631 drm_dbg(&uncore->i915->drm, "unclaimed mmio detected on resume, clearing\n");
633 if (!intel_uncore_has_forcewake(uncore))
636 restore_forcewake = fetch_and_zero(&uncore->fw_domains_saved);
637 forcewake_early_sanitize(uncore, restore_forcewake);
639 iosf_mbi_register_pmic_bus_access_notifier(&uncore->pmic_bus_access_nb);
642 void intel_uncore_runtime_resume(struct intel_uncore *uncore)
644 if (!intel_uncore_has_forcewake(uncore))
647 iosf_mbi_register_pmic_bus_access_notifier(&uncore->pmic_bus_access_nb);
650 static void __intel_uncore_forcewake_get(struct intel_uncore *uncore,
656 fw_domains &= uncore->fw_domains;
658 for_each_fw_domain_masked(domain, fw_domains, uncore, tmp) {
666 fw_domains_get(uncore, fw_domains);
671 * @uncore: the intel_uncore structure
682 void intel_uncore_forcewake_get(struct intel_uncore *uncore,
687 if (!uncore->fw_get_funcs)
690 assert_rpm_wakelock_held(uncore->rpm);
692 spin_lock_irqsave(&uncore->lock, irqflags);
693 __intel_uncore_forcewake_get(uncore, fw_domains);
694 spin_unlock_irqrestore(&uncore->lock, irqflags);
699 * @uncore: the intel_uncore structure
705 void intel_uncore_forcewake_user_get(struct intel_uncore *uncore)
707 spin_lock_irq(&uncore->lock);
708 if (!uncore->user_forcewake_count++) {
709 intel_uncore_forcewake_get__locked(uncore, FORCEWAKE_ALL);
710 mmio_debug_suspend(uncore);
712 spin_unlock_irq(&uncore->lock);
717 * @uncore: the intel_uncore structure
722 void intel_uncore_forcewake_user_put(struct intel_uncore *uncore)
724 spin_lock_irq(&uncore->lock);
725 if (!--uncore->user_forcewake_count) {
726 mmio_debug_resume(uncore);
727 intel_uncore_forcewake_put__locked(uncore, FORCEWAKE_ALL);
729 spin_unlock_irq(&uncore->lock);
734 * @uncore: the intel_uncore structure
738 * on the caller to explicitly handle the dev_priv->uncore.lock spinlock.
740 void intel_uncore_forcewake_get__locked(struct intel_uncore *uncore,
743 lockdep_assert_held(&uncore->lock);
745 if (!uncore->fw_get_funcs)
748 __intel_uncore_forcewake_get(uncore, fw_domains);
751 static void __intel_uncore_forcewake_put(struct intel_uncore *uncore,
758 fw_domains &= uncore->fw_domains;
760 for_each_fw_domain_masked(domain, fw_domains, uncore, tmp) {
769 !(domain->uncore->fw_domains_timer & domain->mask))
772 fw_domains_put(uncore, domain->mask);
778 * @uncore: the intel_uncore structure
784 void intel_uncore_forcewake_put(struct intel_uncore *uncore,
789 if (!uncore->fw_get_funcs)
792 spin_lock_irqsave(&uncore->lock, irqflags);
793 __intel_uncore_forcewake_put(uncore, fw_domains, false);
794 spin_unlock_irqrestore(&uncore->lock, irqflags);
797 void intel_uncore_forcewake_put_delayed(struct intel_uncore *uncore,
802 if (!uncore->fw_get_funcs)
805 spin_lock_irqsave(&uncore->lock, irqflags);
806 __intel_uncore_forcewake_put(uncore, fw_domains, true);
807 spin_unlock_irqrestore(&uncore->lock, irqflags);
812 * @uncore: the intel_uncore structure
815 void intel_uncore_forcewake_flush(struct intel_uncore *uncore,
821 if (!uncore->fw_get_funcs)
824 fw_domains &= uncore->fw_domains;
825 for_each_fw_domain_masked(domain, fw_domains, uncore, tmp) {
834 * @uncore: the intel_uncore structure
838 * on the caller to explicitly handle the dev_priv->uncore.lock spinlock.
840 void intel_uncore_forcewake_put__locked(struct intel_uncore *uncore,
843 lockdep_assert_held(&uncore->lock);
845 if (!uncore->fw_get_funcs)
848 __intel_uncore_forcewake_put(uncore, fw_domains, false);
851 void assert_forcewakes_inactive(struct intel_uncore *uncore)
853 if (!uncore->fw_get_funcs)
856 drm_WARN(&uncore->i915->drm, uncore->fw_domains_active,
858 uncore->fw_domains_active);
861 void assert_forcewakes_active(struct intel_uncore *uncore,
870 if (!uncore->fw_get_funcs)
873 spin_lock_irq(&uncore->lock);
875 assert_rpm_wakelock_held(uncore->rpm);
877 fw_domains &= uncore->fw_domains;
878 drm_WARN(&uncore->i915->drm, fw_domains & ~uncore->fw_domains_active,
880 fw_domains, fw_domains & ~uncore->fw_domains_active);
886 for_each_fw_domain_masked(domain, fw_domains, uncore, tmp) {
890 if (uncore->fw_domains_timer & domain->mask)
893 if (drm_WARN(&uncore->i915->drm, actual < expect,
899 spin_unlock_irq(&uncore->lock);
941 find_fw_domain(struct intel_uncore *uncore, u32 offset)
946 offset += uncore->gsi_offset;
949 uncore->fw_domains_table,
950 uncore->fw_domains_table_entries,
962 return uncore->fw_domains;
964 drm_WARN(&uncore->i915->drm, entry->domains & ~uncore->fw_domains,
966 entry->domains & ~uncore->fw_domains, offset);
1158 static bool is_shadowed(struct intel_uncore *uncore, u32 offset)
1160 if (drm_WARN_ON(&uncore->i915->drm, !uncore->shadowed_reg_table))
1164 offset += uncore->gsi_offset;
1167 uncore->shadowed_reg_table,
1168 uncore->shadowed_reg_table_entries,
1173 gen6_reg_write_fw_domains(struct intel_uncore *uncore, i915_reg_t reg)
1178 #define __fwtable_reg_read_fw_domains(uncore, offset) \
1182 __fwd = find_fw_domain(uncore, offset); \
1186 #define __fwtable_reg_write_fw_domains(uncore, offset) \
1190 if (NEEDS_FORCE_WAKE((__offset)) && !is_shadowed(uncore, __offset)) \
1191 __fwd = find_fw_domain(uncore, __offset); \
1263 GEN_FW_RANGE(0xb00, 0x1fff, 0), /* uncore range */
1297 GEN_FW_RANGE(0x0, 0x1fff, 0), /* uncore range */
1743 ilk_dummy_write(struct intel_uncore *uncore)
1748 __raw_uncore_write32(uncore, RING_MI_MODE(RENDER_RING_BASE), 0);
1752 __unclaimed_reg_debug(struct intel_uncore *uncore,
1756 if (drm_WARN(&uncore->i915->drm,
1757 check_for_unclaimed_mmio(uncore),
1762 uncore->i915->params.mmio_debug--;
1766 __unclaimed_previous_reg_debug(struct intel_uncore *uncore,
1770 if (check_for_unclaimed_mmio(uncore))
1771 drm_dbg(&uncore->i915->drm,
1778 unclaimed_reg_debug_header(struct intel_uncore *uncore,
1781 if (likely(!uncore->i915->params.mmio_debug) || !uncore->debug)
1784 /* interrupts are disabled and re-enabled around uncore->lock usage */
1785 lockdep_assert_held(&uncore->lock);
1787 spin_lock(&uncore->debug->lock);
1788 __unclaimed_previous_reg_debug(uncore, reg, read);
1794 unclaimed_reg_debug_footer(struct intel_uncore *uncore,
1797 /* interrupts are disabled and re-enabled around uncore->lock usage */
1798 lockdep_assert_held(&uncore->lock);
1800 __unclaimed_reg_debug(uncore, reg, read);
1801 spin_unlock(&uncore->debug->lock);
1806 vgpu_read##x(struct intel_uncore *uncore, i915_reg_t reg, bool trace) { \
1807 u##x val = __raw_uncore_read##x(uncore, reg); \
1818 assert_rpm_wakelock_held(uncore->rpm);
1826 gen2_read##x(struct intel_uncore *uncore, i915_reg_t reg, bool trace) { \
1828 val = __raw_uncore_read##x(uncore, reg); \
1834 gen5_read##x(struct intel_uncore *uncore, i915_reg_t reg, bool trace) { \
1836 ilk_dummy_write(uncore); \
1837 val = __raw_uncore_read##x(uncore, reg); \
1861 assert_rpm_wakelock_held(uncore->rpm); \
1862 spin_lock_irqsave(&uncore->lock, irqflags); \
1863 unclaimed_reg_debug = unclaimed_reg_debug_header(uncore, reg, true)
1867 unclaimed_reg_debug_footer(uncore, reg, true); \
1868 spin_unlock_irqrestore(&uncore->lock, irqflags); \
1872 static noinline void ___force_wake_auto(struct intel_uncore *uncore,
1878 GEM_BUG_ON(fw_domains & ~uncore->fw_domains);
1880 for_each_fw_domain_masked(domain, fw_domains, uncore, tmp)
1883 fw_domains_get(uncore, fw_domains);
1886 static inline void __force_wake_auto(struct intel_uncore *uncore,
1892 fw_domains &= uncore->fw_domains;
1893 fw_domains &= ~uncore->fw_domains_active;
1896 ___force_wake_auto(uncore, fw_domains);
1901 fwtable_read##x(struct intel_uncore *uncore, i915_reg_t reg, bool trace) \
1905 fw_engine = __fwtable_reg_read_fw_domains(uncore, offset); \
1907 __force_wake_auto(uncore, fw_engine); \
1908 val = __raw_uncore_read##x(uncore, reg); \
1913 fwtable_reg_read_fw_domains(struct intel_uncore *uncore, i915_reg_t reg) {
1914 return __fwtable_reg_read_fw_domains(uncore, i915_mmio_reg_offset(reg));
1928 assert_rpm_wakelock_held(uncore->rpm); \
1934 gen2_write##x(struct intel_uncore *uncore, i915_reg_t reg, u##x val, bool trace) { \
1936 __raw_uncore_write##x(uncore, reg, val); \
1942 gen5_write##x(struct intel_uncore *uncore, i915_reg_t reg, u##x val, bool trace) { \
1944 ilk_dummy_write(uncore); \
1945 __raw_uncore_write##x(uncore, reg, val); \
1967 assert_rpm_wakelock_held(uncore->rpm); \
1968 spin_lock_irqsave(&uncore->lock, irqflags); \
1969 unclaimed_reg_debug = unclaimed_reg_debug_header(uncore, reg, false)
1973 unclaimed_reg_debug_footer(uncore, reg, false); \
1974 spin_unlock_irqrestore(&uncore->lock, irqflags)
1978 gen6_write##x(struct intel_uncore *uncore, i915_reg_t reg, u##x val, bool trace) { \
1981 __gen6_gt_wait_for_fifo(uncore); \
1982 __raw_uncore_write##x(uncore, reg, val); \
1991 fwtable_write##x(struct intel_uncore *uncore, i915_reg_t reg, u##x val, bool trace) { \
1994 fw_engine = __fwtable_reg_write_fw_domains(uncore, offset); \
1996 __force_wake_auto(uncore, fw_engine); \
1997 __raw_uncore_write##x(uncore, reg, val); \
2002 fwtable_reg_write_fw_domains(struct intel_uncore *uncore, i915_reg_t reg)
2004 return __fwtable_reg_write_fw_domains(uncore, i915_mmio_reg_offset(reg));
2017 vgpu_write##x(struct intel_uncore *uncore, i915_reg_t reg, u##x val, bool trace) { \
2019 __raw_uncore_write##x(uncore, reg, val); \
2025 #define ASSIGN_RAW_WRITE_MMIO_VFUNCS(uncore, x) \
2027 (uncore)->funcs.mmio_writeb = x##_write8; \
2028 (uncore)->funcs.mmio_writew = x##_write16; \
2029 (uncore)->funcs.mmio_writel = x##_write32; \
2032 #define ASSIGN_RAW_READ_MMIO_VFUNCS(uncore, x) \
2034 (uncore)->funcs.mmio_readb = x##_read8; \
2035 (uncore)->funcs.mmio_readw = x##_read16; \
2036 (uncore)->funcs.mmio_readl = x##_read32; \
2037 (uncore)->funcs.mmio_readq = x##_read64; \
2040 #define ASSIGN_WRITE_MMIO_VFUNCS(uncore, x) \
2042 ASSIGN_RAW_WRITE_MMIO_VFUNCS((uncore), x); \
2043 (uncore)->funcs.write_fw_domains = x##_reg_write_fw_domains; \
2046 #define ASSIGN_READ_MMIO_VFUNCS(uncore, x) \
2048 ASSIGN_RAW_READ_MMIO_VFUNCS(uncore, x); \
2049 (uncore)->funcs.read_fw_domains = x##_reg_read_fw_domains; \
2052 static int __fw_domain_init(struct intel_uncore *uncore,
2060 GEM_BUG_ON(uncore->fw_domain[domain_id]);
2062 if (i915_inject_probe_failure(uncore->i915))
2069 drm_WARN_ON(&uncore->i915->drm, !i915_mmio_reg_valid(reg_set));
2070 drm_WARN_ON(&uncore->i915->drm, !i915_mmio_reg_valid(reg_ack));
2072 d->uncore = uncore;
2074 d->reg_set = uncore->regs + i915_mmio_reg_offset(reg_set) + uncore->gsi_offset;
2075 d->reg_ack = uncore->regs + i915_mmio_reg_offset(reg_ack) + uncore->gsi_offset;
2101 uncore->fw_domains |= BIT(domain_id);
2105 uncore->fw_domain[domain_id] = d;
2110 static void fw_domain_fini(struct intel_uncore *uncore,
2117 d = fetch_and_zero(&uncore->fw_domain[domain_id]);
2121 uncore->fw_domains &= ~BIT(domain_id);
2122 drm_WARN_ON(&uncore->i915->drm, d->wake_count);
2123 drm_WARN_ON(&uncore->i915->drm, hrtimer_cancel(&d->timer));
2127 static void intel_uncore_fw_domains_fini(struct intel_uncore *uncore)
2132 for_each_fw_domain(d, uncore, tmp)
2133 fw_domain_fini(uncore, d->id);
2148 static int intel_uncore_fw_domains_init(struct intel_uncore *uncore)
2150 struct drm_i915_private *i915 = uncore->i915;
2153 GEM_BUG_ON(!intel_uncore_has_forcewake(uncore));
2163 emask = uncore->gt->info.engine_mask;
2165 uncore->fw_get_funcs = &uncore_get_fallback;
2167 fw_domain_init(uncore, FW_DOMAIN_ID_GT,
2171 fw_domain_init(uncore, FW_DOMAIN_ID_GT,
2175 if (RCS_MASK(uncore->gt) || CCS_MASK(uncore->gt))
2176 fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
2184 fw_domain_init(uncore, FW_DOMAIN_ID_MEDIA_VDBOX0 + i,
2192 fw_domain_init(uncore, FW_DOMAIN_ID_MEDIA_VEBOX0 + i,
2197 if (uncore->gt->type == GT_MEDIA)
2198 fw_domain_init(uncore, FW_DOMAIN_ID_GSC,
2201 uncore->fw_get_funcs = &uncore_get_fallback;
2202 fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
2205 fw_domain_init(uncore, FW_DOMAIN_ID_GT,
2208 fw_domain_init(uncore, FW_DOMAIN_ID_MEDIA,
2211 uncore->fw_get_funcs = &uncore_get_normal;
2212 fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
2214 fw_domain_init(uncore, FW_DOMAIN_ID_MEDIA,
2217 uncore->fw_get_funcs = &uncore_get_thread_status;
2218 fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
2232 uncore->fw_get_funcs = &uncore_get_thread_status;
2241 __raw_uncore_write32(uncore, FORCEWAKE, 0);
2242 __raw_posting_read(uncore, ECOBUS);
2244 ret = __fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
2249 spin_lock_irq(&uncore->lock);
2250 fw_domains_get_with_thread_status(uncore, FORCEWAKE_RENDER);
2251 ecobus = __raw_uncore_read32(uncore, ECOBUS);
2252 fw_domains_put(uncore, FORCEWAKE_RENDER);
2253 spin_unlock_irq(&uncore->lock);
2258 fw_domain_fini(uncore, FW_DOMAIN_ID_RENDER);
2259 fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
2263 uncore->fw_get_funcs = &uncore_get_thread_status;
2264 fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
2271 drm_WARN_ON(&i915->drm, !ret && uncore->fw_domains == 0);
2275 intel_uncore_fw_domains_fini(uncore);
2280 #define ASSIGN_FW_DOMAINS_TABLE(uncore, d) \
2282 (uncore)->fw_domains_table = \
2284 (uncore)->fw_domains_table_entries = ARRAY_SIZE((d)); \
2287 #define ASSIGN_SHADOW_TABLE(uncore, d) \
2289 (uncore)->shadowed_reg_table = d; \
2290 (uncore)->shadowed_reg_table_entries = ARRAY_SIZE((d)); \
2296 struct intel_uncore *uncore = container_of(nb,
2314 disable_rpm_wakeref_asserts(uncore->rpm);
2315 intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL);
2316 enable_rpm_wakeref_asserts(uncore->rpm);
2319 intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL);
2331 int intel_uncore_setup_mmio(struct intel_uncore *uncore, phys_addr_t phys_addr)
2333 struct drm_i915_private *i915 = uncore->i915;
2353 uncore->regs = ioremap(phys_addr, mmio_size);
2354 if (uncore->regs == NULL) {
2360 (void __force *)uncore->regs);
2363 void intel_uncore_init_early(struct intel_uncore *uncore,
2366 spin_lock_init(&uncore->lock);
2367 uncore->i915 = gt->i915;
2368 uncore->gt = gt;
2369 uncore->rpm = &gt->i915->runtime_pm;
2372 static void uncore_raw_init(struct intel_uncore *uncore)
2374 GEM_BUG_ON(intel_uncore_has_forcewake(uncore));
2376 if (intel_vgpu_active(uncore->i915)) {
2377 ASSIGN_RAW_WRITE_MMIO_VFUNCS(uncore, vgpu);
2378 ASSIGN_RAW_READ_MMIO_VFUNCS(uncore, vgpu);
2379 } else if (GRAPHICS_VER(uncore->i915) == 5) {
2380 ASSIGN_RAW_WRITE_MMIO_VFUNCS(uncore, gen5);
2381 ASSIGN_RAW_READ_MMIO_VFUNCS(uncore, gen5);
2383 ASSIGN_RAW_WRITE_MMIO_VFUNCS(uncore, gen2);
2384 ASSIGN_RAW_READ_MMIO_VFUNCS(uncore, gen2);
2388 static int uncore_media_forcewake_init(struct intel_uncore *uncore)
2390 struct drm_i915_private *i915 = uncore->i915;
2393 ASSIGN_FW_DOMAINS_TABLE(uncore, __xelpmp_fw_ranges);
2394 ASSIGN_SHADOW_TABLE(uncore, xelpmp_shadowed_regs);
2395 ASSIGN_WRITE_MMIO_VFUNCS(uncore, fwtable);
2404 static int uncore_forcewake_init(struct intel_uncore *uncore)
2406 struct drm_i915_private *i915 = uncore->i915;
2409 GEM_BUG_ON(!intel_uncore_has_forcewake(uncore));
2411 ret = intel_uncore_fw_domains_init(uncore);
2414 forcewake_early_sanitize(uncore, 0);
2416 ASSIGN_READ_MMIO_VFUNCS(uncore, fwtable);
2418 if (uncore->gt->type == GT_MEDIA)
2419 return uncore_media_forcewake_init(uncore);
2422 ASSIGN_FW_DOMAINS_TABLE(uncore, __mtl_fw_ranges);
2423 ASSIGN_SHADOW_TABLE(uncore, mtl_shadowed_regs);
2424 ASSIGN_WRITE_MMIO_VFUNCS(uncore, fwtable);
2426 ASSIGN_FW_DOMAINS_TABLE(uncore, __dg2_fw_ranges);
2427 ASSIGN_SHADOW_TABLE(uncore, dg2_shadowed_regs);
2428 ASSIGN_WRITE_MMIO_VFUNCS(uncore, fwtable);
2430 ASSIGN_FW_DOMAINS_TABLE(uncore, __gen12_fw_ranges);
2431 ASSIGN_SHADOW_TABLE(uncore, gen12_shadowed_regs);
2432 ASSIGN_WRITE_MMIO_VFUNCS(uncore, fwtable);
2434 ASSIGN_FW_DOMAINS_TABLE(uncore, __gen11_fw_ranges);
2435 ASSIGN_SHADOW_TABLE(uncore, gen11_shadowed_regs);
2436 ASSIGN_WRITE_MMIO_VFUNCS(uncore, fwtable);
2438 ASSIGN_FW_DOMAINS_TABLE(uncore, __gen9_fw_ranges);
2439 ASSIGN_SHADOW_TABLE(uncore, gen8_shadowed_regs);
2440 ASSIGN_WRITE_MMIO_VFUNCS(uncore, fwtable);
2442 ASSIGN_FW_DOMAINS_TABLE(uncore, __chv_fw_ranges);
2443 ASSIGN_SHADOW_TABLE(uncore, gen8_shadowed_regs);
2444 ASSIGN_WRITE_MMIO_VFUNCS(uncore, fwtable);
2446 ASSIGN_FW_DOMAINS_TABLE(uncore, __gen6_fw_ranges);
2447 ASSIGN_SHADOW_TABLE(uncore, gen8_shadowed_regs);
2448 ASSIGN_WRITE_MMIO_VFUNCS(uncore, fwtable);
2450 ASSIGN_FW_DOMAINS_TABLE(uncore, __vlv_fw_ranges);
2451 ASSIGN_WRITE_MMIO_VFUNCS(uncore, gen6);
2453 ASSIGN_FW_DOMAINS_TABLE(uncore, __gen6_fw_ranges);
2454 ASSIGN_WRITE_MMIO_VFUNCS(uncore, gen6);
2457 uncore->pmic_bus_access_nb.notifier_call = i915_pmic_bus_access_notifier;
2458 iosf_mbi_register_pmic_bus_access_notifier(&uncore->pmic_bus_access_nb);
2463 static int sanity_check_mmio_access(struct intel_uncore *uncore)
2465 struct drm_i915_private *i915 = uncore->i915;
2484 #define COND (__raw_uncore_read32(uncore, FORCEWAKE_MT) != ~0)
2493 int intel_uncore_init_mmio(struct intel_uncore *uncore)
2495 struct drm_i915_private *i915 = uncore->i915;
2498 ret = sanity_check_mmio_access(uncore);
2509 !(__raw_uncore_read32(uncore, GU_CNTL) & LMEM_INIT)) {
2515 uncore->flags |= UNCORE_HAS_FORCEWAKE;
2517 if (!intel_uncore_has_forcewake(uncore)) {
2518 uncore_raw_init(uncore);
2520 ret = uncore_forcewake_init(uncore);
2526 GEM_BUG_ON(intel_uncore_has_forcewake(uncore) != !!uncore->fw_get_funcs);
2527 GEM_BUG_ON(intel_uncore_has_forcewake(uncore) != !!uncore->funcs.read_fw_domains);
2528 GEM_BUG_ON(intel_uncore_has_forcewake(uncore) != !!uncore->funcs.write_fw_domains);
2531 uncore->flags |= UNCORE_HAS_FPGA_DBG_UNCLAIMED;
2534 uncore->flags |= UNCORE_HAS_DBG_UNCLAIMED;
2537 uncore->flags |= UNCORE_HAS_FIFO;
2540 if (intel_uncore_unclaimed_mmio(uncore))
2541 drm_dbg(&i915->drm, "unclaimed mmio detected on uncore init, clearing\n");
2551 void intel_uncore_prune_engine_fw_domains(struct intel_uncore *uncore,
2554 enum forcewake_domains fw_domains = uncore->fw_domains;
2558 if (!intel_uncore_has_forcewake(uncore) || GRAPHICS_VER(uncore->i915) < 11)
2575 if (GRAPHICS_VER_FULL(uncore->i915) >= IP_VER(12, 55) && i % 2 == 0) {
2584 fw_domain_fini(uncore, domain_id);
2594 fw_domain_fini(uncore, domain_id);
2598 fw_domain_fini(uncore, FW_DOMAIN_ID_GSC);
2614 static void driver_initiated_flr(struct intel_uncore *uncore)
2616 struct drm_i915_private *i915 = uncore->i915;
2631 ret = intel_wait_for_register_fw(uncore, GU_CNTL, DRIVERFLR, 0, flr_timeout_ms);
2638 intel_uncore_write_fw(uncore, GU_DEBUG, DRIVERFLR_STATUS);
2641 intel_uncore_rmw_fw(uncore, GU_CNTL, 0, DRIVERFLR);
2644 ret = intel_wait_for_register_fw(uncore, GU_CNTL,
2653 ret = intel_wait_for_register_fw(uncore, GU_DEBUG,
2662 intel_uncore_write_fw(uncore, GU_DEBUG, DRIVERFLR_STATUS);
2668 struct intel_uncore *uncore = data;
2670 if (intel_uncore_has_forcewake(uncore)) {
2673 &uncore->pmic_bus_access_nb);
2674 intel_uncore_forcewake_reset(uncore);
2675 intel_uncore_fw_domains_fini(uncore);
2679 if (intel_uncore_needs_flr_on_fini(uncore))
2680 driver_initiated_flr(uncore);
2685 * @uncore: the struct intel_uncore
2696 * (intel_uncore_read_fw(uncore, reg) & mask) == value
2709 int __intel_wait_for_register_fw(struct intel_uncore *uncore,
2718 #define done (((reg_value = intel_uncore_read_fw(uncore, reg)) & mask) == value)
2741 * @uncore: the struct intel_uncore
2752 * (intel_uncore_read(uncore, reg) & mask) == value
2758 int __intel_wait_for_register(struct intel_uncore *uncore,
2767 intel_uncore_forcewake_for_reg(uncore, reg, FW_REG_READ);
2773 spin_lock_irq(&uncore->lock);
2774 intel_uncore_forcewake_get__locked(uncore, fw);
2776 ret = __intel_wait_for_register_fw(uncore,
2780 intel_uncore_forcewake_put__locked(uncore, fw);
2781 spin_unlock_irq(&uncore->lock);
2784 ret = __wait_for(reg_value = intel_uncore_read_notrace(uncore,
2798 bool intel_uncore_unclaimed_mmio(struct intel_uncore *uncore)
2802 if (!uncore->debug)
2805 spin_lock_irq(&uncore->debug->lock);
2806 ret = check_for_unclaimed_mmio(uncore);
2807 spin_unlock_irq(&uncore->debug->lock);
2813 intel_uncore_arm_unclaimed_mmio_detection(struct intel_uncore *uncore)
2817 if (drm_WARN_ON(&uncore->i915->drm, !uncore->debug))
2820 spin_lock_irq(&uncore->debug->lock);
2822 if (unlikely(uncore->debug->unclaimed_mmio_check <= 0))
2825 if (unlikely(check_for_unclaimed_mmio(uncore))) {
2826 if (!uncore->i915->params.mmio_debug) {
2827 drm_dbg(&uncore->i915->drm,
2831 uncore->i915->params.mmio_debug++;
2833 uncore->debug->unclaimed_mmio_check--;
2838 spin_unlock_irq(&uncore->debug->lock);
2846 * @uncore: pointer to struct intel_uncore
2858 intel_uncore_forcewake_for_reg(struct intel_uncore *uncore,
2863 drm_WARN_ON(&uncore->i915->drm, !op);
2865 if (!intel_uncore_has_forcewake(uncore))
2869 fw_domains = uncore->funcs.read_fw_domains(uncore, reg);
2872 fw_domains |= uncore->funcs.write_fw_domains(uncore, reg);
2874 drm_WARN_ON(&uncore->i915->drm, fw_domains & ~uncore->fw_domains);