Lines Matching refs:uncore

92 	void (*force_wake_get)(struct intel_uncore *uncore,
97 enum forcewake_domains (*read_fw_domains)(struct intel_uncore *uncore,
99 enum forcewake_domains (*write_fw_domains)(struct intel_uncore *uncore,
102 u8 (*mmio_readb)(struct intel_uncore *uncore,
104 u16 (*mmio_readw)(struct intel_uncore *uncore,
106 u32 (*mmio_readl)(struct intel_uncore *uncore,
108 u64 (*mmio_readq)(struct intel_uncore *uncore,
111 void (*mmio_writeb)(struct intel_uncore *uncore,
113 void (*mmio_writew)(struct intel_uncore *uncore,
115 void (*mmio_writel)(struct intel_uncore *uncore,
180 struct intel_uncore *uncore;
204 intel_uncore_has_forcewake(const struct intel_uncore *uncore)
206 return uncore->flags & UNCORE_HAS_FORCEWAKE;
210 intel_uncore_has_fpga_dbg_unclaimed(const struct intel_uncore *uncore)
212 return uncore->flags & UNCORE_HAS_FPGA_DBG_UNCLAIMED;
216 intel_uncore_has_dbg_unclaimed(const struct intel_uncore *uncore)
218 return uncore->flags & UNCORE_HAS_DBG_UNCLAIMED;
222 intel_uncore_has_fifo(const struct intel_uncore *uncore)
224 return uncore->flags & UNCORE_HAS_FIFO;
228 intel_uncore_needs_flr_on_fini(const struct intel_uncore *uncore)
230 return uncore->flags & UNCORE_NEEDS_FLR_ON_FINI;
234 intel_uncore_set_flr_on_fini(struct intel_uncore *uncore)
236 return uncore->flags |= UNCORE_NEEDS_FLR_ON_FINI;
240 void intel_uncore_init_early(struct intel_uncore *uncore,
242 int intel_uncore_setup_mmio(struct intel_uncore *uncore, phys_addr_t phys_addr);
243 int intel_uncore_init_mmio(struct intel_uncore *uncore);
244 void intel_uncore_prune_engine_fw_domains(struct intel_uncore *uncore,
246 bool intel_uncore_unclaimed_mmio(struct intel_uncore *uncore);
247 bool intel_uncore_arm_unclaimed_mmio_detection(struct intel_uncore *uncore);
248 void intel_uncore_cleanup_mmio(struct intel_uncore *uncore);
250 void intel_uncore_suspend(struct intel_uncore *uncore);
251 void intel_uncore_resume_early(struct intel_uncore *uncore);
252 void intel_uncore_runtime_resume(struct intel_uncore *uncore);
254 void assert_forcewakes_inactive(struct intel_uncore *uncore);
255 void assert_forcewakes_active(struct intel_uncore *uncore,
260 intel_uncore_forcewake_for_reg(struct intel_uncore *uncore,
265 void intel_uncore_forcewake_get(struct intel_uncore *uncore,
267 void intel_uncore_forcewake_put(struct intel_uncore *uncore,
269 void intel_uncore_forcewake_put_delayed(struct intel_uncore *uncore,
271 void intel_uncore_forcewake_flush(struct intel_uncore *uncore,
275 * Like above but the caller must manage the uncore.lock itself.
278 void intel_uncore_forcewake_get__locked(struct intel_uncore *uncore,
280 void intel_uncore_forcewake_put__locked(struct intel_uncore *uncore,
283 void intel_uncore_forcewake_user_get(struct intel_uncore *uncore);
284 void intel_uncore_forcewake_user_put(struct intel_uncore *uncore);
286 int __intel_wait_for_register(struct intel_uncore *uncore,
294 intel_wait_for_register(struct intel_uncore *uncore,
300 return __intel_wait_for_register(uncore, reg, mask, value, 2,
304 int __intel_wait_for_register_fw(struct intel_uncore *uncore,
312 intel_wait_for_register_fw(struct intel_uncore *uncore,
318 return __intel_wait_for_register_fw(uncore, reg, mask, value,
326 static inline u##x__ __raw_uncore_read##x__(const struct intel_uncore *uncore, \
331 offset += uncore->gsi_offset; \
332 return read##s__(uncore->regs + offset); \
336 static inline void __raw_uncore_write##x__(const struct intel_uncore *uncore, \
341 offset += uncore->gsi_offset; \
342 write##s__(val, uncore->regs + offset); \
358 static inline u##x__ intel_uncore_##name__(struct intel_uncore *uncore, \
361 return uncore->funcs.mmio_read##s__(uncore, reg, (trace__)); \
365 static inline void intel_uncore_##name__(struct intel_uncore *uncore, \
368 uncore->funcs.mmio_write##s__(uncore, reg, val, (trace__)); \
387 * or uncore->funcs.mmio_writeq.
412 * spin_lock_irq(&uncore->lock);
418 * spin_unlock_irq(&uncore->lock);
427 * therefore generally be serialised, by either the dev_priv->uncore.lock or
435 static inline u32 intel_uncore_rmw(struct intel_uncore *uncore,
440 old = intel_uncore_read(uncore, reg);
442 intel_uncore_write(uncore, reg, val);
446 static inline void intel_uncore_rmw_fw(struct intel_uncore *uncore,
451 old = intel_uncore_read_fw(uncore, reg);
454 intel_uncore_write_fw(uncore, reg, val);
458 intel_uncore_read64_2x32(struct intel_uncore *uncore,
465 fw_domains = intel_uncore_forcewake_for_reg(uncore, lower_reg,
468 fw_domains |= intel_uncore_forcewake_for_reg(uncore, upper_reg,
471 spin_lock_irqsave(&uncore->lock, flags);
472 intel_uncore_forcewake_get__locked(uncore, fw_domains);
474 upper = intel_uncore_read_fw(uncore, upper_reg);
477 lower = intel_uncore_read_fw(uncore, lower_reg);
478 upper = intel_uncore_read_fw(uncore, upper_reg);
481 intel_uncore_forcewake_put__locked(uncore, fw_domains);
482 spin_unlock_irqrestore(&uncore->lock, flags);
487 static inline int intel_uncore_write_and_verify(struct intel_uncore *uncore,
493 intel_uncore_write(uncore, reg, val);
494 reg_val = intel_uncore_read(uncore, reg);
499 static inline void __iomem *intel_uncore_regs(struct intel_uncore *uncore)
501 return uncore->regs;
506 * interrupt handlers so that the pointer indirection on uncore->regs can
512 * these macros do not account for uncore->gsi_offset. Any caller that needs