1/* SPDX-License-Identifier: MIT */
2/*
3 * Copyright �� 2022-2023 Intel Corporation
4 */
5
6#ifndef _XE_DEVICE_TYPES_H_
7#define _XE_DEVICE_TYPES_H_
8
9#include <linux/pci.h>
10
11#include <drm/drm_device.h>
12#include <drm/drm_file.h>
13#include <drm/ttm/ttm_device.h>
14
15#include "xe_devcoredump_types.h"
16#include "xe_heci_gsc.h"
17#include "xe_gt_types.h"
18#include "xe_lmtt_types.h"
19#include "xe_memirq_types.h"
20#include "xe_platform_types.h"
21#include "xe_pt_types.h"
22#include "xe_sriov_types.h"
23#include "xe_step_types.h"
24
25#if IS_ENABLED(CONFIG_DRM_XE_DISPLAY)
26#include "soc/intel_pch.h"
27#include "intel_display_core.h"
28#include "intel_display_device.h"
29#endif
30
31struct xe_ggtt;
32struct xe_pat_ops;
33
34#define XE_BO_INVALID_OFFSET	LONG_MAX
35
36#define GRAPHICS_VER(xe) ((xe)->info.graphics_verx100 / 100)
37#define MEDIA_VER(xe) ((xe)->info.media_verx100 / 100)
38#define GRAPHICS_VERx100(xe) ((xe)->info.graphics_verx100)
39#define MEDIA_VERx100(xe) ((xe)->info.media_verx100)
40#define IS_DGFX(xe) ((xe)->info.is_dgfx)
41#define HAS_HECI_GSCFI(xe) ((xe)->info.has_heci_gscfi)
42
43#define XE_VRAM_FLAGS_NEED64K		BIT(0)
44
45#define XE_GT0		0
46#define XE_GT1		1
47#define XE_MAX_TILES_PER_DEVICE	(XE_GT1 + 1)
48
49#define XE_MAX_ASID	(BIT(20))
50
51#define IS_PLATFORM_STEP(_xe, _platform, min_step, max_step)	\
52	((_xe)->info.platform == (_platform) &&			\
53	 (_xe)->info.step.graphics >= (min_step) &&		\
54	 (_xe)->info.step.graphics < (max_step))
55#define IS_SUBPLATFORM_STEP(_xe, _platform, sub, min_step, max_step)	\
56	((_xe)->info.platform == (_platform) &&				\
57	 (_xe)->info.subplatform == (sub) &&				\
58	 (_xe)->info.step.graphics >= (min_step) &&			\
59	 (_xe)->info.step.graphics < (max_step))
60
61#define tile_to_xe(tile__)								\
62	_Generic(tile__,								\
63		 const struct xe_tile * : (const struct xe_device *)((tile__)->xe),	\
64		 struct xe_tile * : (tile__)->xe)
65
66/**
67 * struct xe_mem_region - memory region structure
68 * This is used to describe a memory region in xe
69 * device, such as HBM memory or CXL extension memory.
70 */
71struct xe_mem_region {
72	/** @io_start: IO start address of this VRAM instance */
73	resource_size_t io_start;
74	/**
75	 * @io_size: IO size of this VRAM instance
76	 *
77	 * This represents how much of this VRAM we can access
78	 * via the CPU through the VRAM BAR. This can be smaller
79	 * than @usable_size, in which case only part of VRAM is CPU
80	 * accessible (typically the first 256M). This
81	 * configuration is known as small-bar.
82	 */
83	resource_size_t io_size;
84	/** @dpa_base: This memory regions's DPA (device physical address) base */
85	resource_size_t dpa_base;
86	/**
87	 * @usable_size: usable size of VRAM
88	 *
89	 * Usable size of VRAM excluding reserved portions
90	 * (e.g stolen mem)
91	 */
92	resource_size_t usable_size;
93	/**
94	 * @actual_physical_size: Actual VRAM size
95	 *
96	 * Actual VRAM size including reserved portions
97	 * (e.g stolen mem)
98	 */
99	resource_size_t actual_physical_size;
100	/** @mapping: pointer to VRAM mappable space */
101	void __iomem *mapping;
102};
103
104/**
105 * struct xe_tile - hardware tile structure
106 *
107 * From a driver perspective, a "tile" is effectively a complete GPU, containing
108 * an SGunit, 1-2 GTs, and (for discrete platforms) VRAM.
109 *
110 * Multi-tile platforms effectively bundle multiple GPUs behind a single PCI
111 * device and designate one "root" tile as being responsible for external PCI
112 * communication.  PCI BAR0 exposes the GGTT and MMIO register space for each
113 * tile in a stacked layout, and PCI BAR2 exposes the local memory associated
114 * with each tile similarly.  Device-wide interrupts can be enabled/disabled
115 * at the root tile, and the MSTR_TILE_INTR register will report which tiles
116 * have interrupts that need servicing.
117 */
118struct xe_tile {
119	/** @xe: Backpointer to tile's PCI device */
120	struct xe_device *xe;
121
122	/** @id: ID of the tile */
123	u8 id;
124
125	/**
126	 * @primary_gt: Primary GT
127	 */
128	struct xe_gt *primary_gt;
129
130	/**
131	 * @media_gt: Media GT
132	 *
133	 * Only present on devices with media version >= 13.
134	 */
135	struct xe_gt *media_gt;
136
137	/**
138	 * @mmio: MMIO info for a tile.
139	 *
140	 * Each tile has its own 16MB space in BAR0, laid out as:
141	 * * 0-4MB: registers
142	 * * 4MB-8MB: reserved
143	 * * 8MB-16MB: global GTT
144	 */
145	struct {
146		/** @mmio.size: size of tile's MMIO space */
147		size_t size;
148
149		/** @mmio.regs: pointer to tile's MMIO space (starting with registers) */
150		void __iomem *regs;
151	} mmio;
152
153	/**
154	 * @mmio_ext: MMIO-extension info for a tile.
155	 *
156	 * Each tile has its own additional 256MB (28-bit) MMIO-extension space.
157	 */
158	struct {
159		/** @mmio_ext.size: size of tile's additional MMIO-extension space */
160		size_t size;
161
162		/** @mmio_ext.regs: pointer to tile's additional MMIO-extension space */
163		void __iomem *regs;
164	} mmio_ext;
165
166	/** @mem: memory management info for tile */
167	struct {
168		/**
169		 * @mem.vram: VRAM info for tile.
170		 *
171		 * Although VRAM is associated with a specific tile, it can
172		 * still be accessed by all tiles' GTs.
173		 */
174		struct xe_mem_region vram;
175
176		/** @mem.vram_mgr: VRAM TTM manager */
177		struct xe_ttm_vram_mgr *vram_mgr;
178
179		/** @mem.ggtt: Global graphics translation table */
180		struct xe_ggtt *ggtt;
181
182		/**
183		 * @mem.kernel_bb_pool: Pool from which batchbuffers are allocated.
184		 *
185		 * Media GT shares a pool with its primary GT.
186		 */
187		struct xe_sa_manager *kernel_bb_pool;
188	} mem;
189
190	/** @sriov: tile level virtualization data */
191	union {
192		struct {
193			/** @sriov.pf.lmtt: Local Memory Translation Table. */
194			struct xe_lmtt lmtt;
195		} pf;
196		struct {
197			/** @sriov.vf.memirq: Memory Based Interrupts. */
198			struct xe_memirq memirq;
199		} vf;
200	} sriov;
201
202	/** @migrate: Migration helper for vram blits and clearing */
203	struct xe_migrate *migrate;
204
205	/** @sysfs: sysfs' kobj used by xe_tile_sysfs */
206	struct kobject *sysfs;
207};
208
209/**
210 * struct xe_device - Top level struct of XE device
211 */
212struct xe_device {
213	/** @drm: drm device */
214	struct drm_device drm;
215
216	/** @devcoredump: device coredump */
217	struct xe_devcoredump devcoredump;
218
219	/** @info: device info */
220	struct intel_device_info {
221		/** @info.graphics_name: graphics IP name */
222		const char *graphics_name;
223		/** @info.media_name: media IP name */
224		const char *media_name;
225		/** @info.tile_mmio_ext_size: size of MMIO extension space, per-tile */
226		u32 tile_mmio_ext_size;
227		/** @info.graphics_verx100: graphics IP version */
228		u32 graphics_verx100;
229		/** @info.media_verx100: media IP version */
230		u32 media_verx100;
231		/** @info.mem_region_mask: mask of valid memory regions */
232		u32 mem_region_mask;
233		/** @info.platform: XE platform enum */
234		enum xe_platform platform;
235		/** @info.subplatform: XE subplatform enum */
236		enum xe_subplatform subplatform;
237		/** @info.devid: device ID */
238		u16 devid;
239		/** @info.revid: device revision */
240		u8 revid;
241		/** @info.step: stepping information for each IP */
242		struct xe_step_info step;
243		/** @info.dma_mask_size: DMA address bits */
244		u8 dma_mask_size;
245		/** @info.vram_flags: Vram flags */
246		u8 vram_flags;
247		/** @info.tile_count: Number of tiles */
248		u8 tile_count;
249		/** @info.gt_count: Total number of GTs for entire device */
250		u8 gt_count;
251		/** @info.vm_max_level: Max VM level */
252		u8 vm_max_level;
253		/** @info.va_bits: Maximum bits of a virtual address */
254		u8 va_bits;
255
256		/** @info.is_dgfx: is discrete device */
257		u8 is_dgfx:1;
258		/** @info.has_asid: Has address space ID */
259		u8 has_asid:1;
260		/** @info.force_execlist: Forced execlist submission */
261		u8 force_execlist:1;
262		/** @info.has_flat_ccs: Whether flat CCS metadata is used */
263		u8 has_flat_ccs:1;
264		/** @info.has_llc: Device has a shared CPU+GPU last level cache */
265		u8 has_llc:1;
266		/** @info.has_mmio_ext: Device has extra MMIO address range */
267		u8 has_mmio_ext:1;
268		/** @info.has_range_tlb_invalidation: Has range based TLB invalidations */
269		u8 has_range_tlb_invalidation:1;
270		/** @info.has_sriov: Supports SR-IOV */
271		u8 has_sriov:1;
272		/** @info.has_usm: Device has unified shared memory support */
273		u8 has_usm:1;
274		/** @info.enable_display: display enabled */
275		u8 enable_display:1;
276		/** @info.skip_mtcfg: skip Multi-Tile configuration from MTCFG register */
277		u8 skip_mtcfg:1;
278		/** @info.skip_pcode: skip access to PCODE uC */
279		u8 skip_pcode:1;
280		/** @info.has_heci_gscfi: device has heci gscfi */
281		u8 has_heci_gscfi:1;
282		/** @info.skip_guc_pc: Skip GuC based PM feature init */
283		u8 skip_guc_pc:1;
284
285#if IS_ENABLED(CONFIG_DRM_XE_DISPLAY)
286		struct {
287			u32 rawclk_freq;
288		} i915_runtime;
289#endif
290	} info;
291
292	/** @irq: device interrupt state */
293	struct {
294		/** @irq.lock: lock for processing irq's on this device */
295		spinlock_t lock;
296
297		/** @irq.enabled: interrupts enabled on this device */
298		bool enabled;
299	} irq;
300
301	/** @ttm: ttm device */
302	struct ttm_device ttm;
303
304	/** @mmio: mmio info for device */
305	struct {
306		/** @mmio.size: size of MMIO space for device */
307		size_t size;
308		/** @mmio.regs: pointer to MMIO space for device */
309		void __iomem *regs;
310	} mmio;
311
312	/** @mem: memory info for device */
313	struct {
314		/** @mem.vram: VRAM info for device */
315		struct xe_mem_region vram;
316		/** @mem.sys_mgr: system TTM manager */
317		struct ttm_resource_manager sys_mgr;
318	} mem;
319
320	/** @sriov: device level virtualization data */
321	struct {
322		/** @sriov.__mode: SR-IOV mode (Don't access directly!) */
323		enum xe_sriov_mode __mode;
324		/** @sriov.wq: workqueue used by the virtualization workers */
325		struct workqueue_struct *wq;
326	} sriov;
327
328	/** @clients: drm clients info */
329	struct {
330		/** @clients.lock: Protects drm clients info */
331		spinlock_t lock;
332
333		/** @clients.count: number of drm clients */
334		u64 count;
335	} clients;
336
337	/** @usm: unified memory state */
338	struct {
339		/** @usm.asid: convert a ASID to VM */
340		struct xarray asid_to_vm;
341		/** @usm.next_asid: next ASID, used to cyclical alloc asids */
342		u32 next_asid;
343		/** @usm.num_vm_in_fault_mode: number of VM in fault mode */
344		u32 num_vm_in_fault_mode;
345		/** @usm.num_vm_in_non_fault_mode: number of VM in non-fault mode */
346		u32 num_vm_in_non_fault_mode;
347		/** @usm.lock: protects UM state */
348		struct mutex lock;
349	} usm;
350
351	/** @pinned: pinned BO state */
352	struct {
353		/** @pinned.lock: protected pinned BO list state */
354		spinlock_t lock;
355		/** @pinned.kernel_bo_present: pinned kernel BO that are present */
356		struct list_head kernel_bo_present;
357		/** @pinned.evicted: pinned BO that have been evicted */
358		struct list_head evicted;
359		/** @pinned.external_vram: pinned external BO in vram*/
360		struct list_head external_vram;
361	} pinned;
362
363	/** @ufence_wq: user fence wait queue */
364	wait_queue_head_t ufence_wq;
365
366	/** @preempt_fence_wq: used to serialize preempt fences */
367	struct workqueue_struct *preempt_fence_wq;
368
369	/** @ordered_wq: used to serialize compute mode resume */
370	struct workqueue_struct *ordered_wq;
371
372	/** @unordered_wq: used to serialize unordered work, mostly display */
373	struct workqueue_struct *unordered_wq;
374
375	/** @tiles: device tiles */
376	struct xe_tile tiles[XE_MAX_TILES_PER_DEVICE];
377
378	/**
379	 * @mem_access: keep track of memory access in the device, possibly
380	 * triggering additional actions when they occur.
381	 */
382	struct {
383		/** @mem_access.ref: ref count of memory accesses */
384		atomic_t ref;
385
386		/**
387		 * @mem_access.vram_userfault: Encapsulate vram_userfault
388		 * related stuff
389		 */
390		struct {
391			/**
392			 * @mem_access.vram_userfault.lock: Protects access to
393			 * @vram_usefault.list Using mutex instead of spinlock
394			 * as lock is applied to entire list operation which
395			 * may sleep
396			 */
397			struct mutex lock;
398
399			/**
400			 * @mem_access.vram_userfault.list: Keep list of userfaulted
401			 * vram bo, which require to release their mmap mappings
402			 * at runtime suspend path
403			 */
404			struct list_head list;
405		} vram_userfault;
406	} mem_access;
407
408	/**
409	 * @pat: Encapsulate PAT related stuff
410	 */
411	struct {
412		/** @pat.ops: Internal operations to abstract platforms */
413		const struct xe_pat_ops *ops;
414		/** @pat.table: PAT table to program in the HW */
415		const struct xe_pat_table_entry *table;
416		/** @pat.n_entries: Number of PAT entries */
417		int n_entries;
418		u32 idx[__XE_CACHE_LEVEL_COUNT];
419	} pat;
420
421	/** @d3cold: Encapsulate d3cold related stuff */
422	struct {
423		/** @d3cold.capable: Indicates if root port is d3cold capable */
424		bool capable;
425
426		/** @d3cold.allowed: Indicates if d3cold is a valid device state */
427		bool allowed;
428
429		/** @d3cold.power_lost: Indicates if card has really lost power. */
430		bool power_lost;
431
432		/**
433		 * @d3cold.vram_threshold:
434		 *
435		 * This represents the permissible threshold(in megabytes)
436		 * for vram save/restore. d3cold will be disallowed,
437		 * when vram_usages is above or equals the threshold value
438		 * to avoid the vram save/restore latency.
439		 * Default threshold value is 300mb.
440		 */
441		u32 vram_threshold;
442		/** @d3cold.lock: protect vram_threshold */
443		struct mutex lock;
444	} d3cold;
445
446	/**
447	 * @pm_callback_task: Track the active task that is running in either
448	 * the runtime_suspend or runtime_resume callbacks.
449	 */
450	struct task_struct *pm_callback_task;
451
452	/** @hwmon: hwmon subsystem integration */
453	struct xe_hwmon *hwmon;
454
455	/** @heci_gsc: graphics security controller */
456	struct xe_heci_gsc heci_gsc;
457
458	/** @needs_flr_on_fini: requests function-reset on fini */
459	bool needs_flr_on_fini;
460
461	/* private: */
462
463#if IS_ENABLED(CONFIG_DRM_XE_DISPLAY)
464	/*
465	 * Any fields below this point are the ones used by display.
466	 * They are temporarily added here so xe_device can be desguised as
467	 * drm_i915_private during build. After cleanup these should go away,
468	 * migrating to the right sub-structs
469	 */
470	struct intel_display display;
471	enum intel_pch pch_type;
472	u16 pch_id;
473
474	struct dram_info {
475		bool wm_lv_0_adjust_needed;
476		u8 num_channels;
477		bool symmetric_memory;
478		enum intel_dram_type {
479			INTEL_DRAM_UNKNOWN,
480			INTEL_DRAM_DDR3,
481			INTEL_DRAM_DDR4,
482			INTEL_DRAM_LPDDR3,
483			INTEL_DRAM_LPDDR4,
484			INTEL_DRAM_DDR5,
485			INTEL_DRAM_LPDDR5,
486		} type;
487		u8 num_qgv_points;
488		u8 num_psf_gv_points;
489	} dram_info;
490
491	/*
492	 * edram size in MB.
493	 * Cannot be determined by PCIID. You must always read a register.
494	 */
495	u32 edram_size_mb;
496
497	/* To shut up runtime pm macros.. */
498	struct xe_runtime_pm {} runtime_pm;
499
500	/* For pcode */
501	struct mutex sb_lock;
502
503	/* Should be in struct intel_display */
504	u32 skl_preferred_vco_freq, max_dotclk_freq, hti_state;
505	u8 snps_phy_failed_calibration;
506	struct drm_atomic_state *modeset_restore_state;
507	struct list_head global_obj_list;
508
509	union {
510		/* only to allow build, not used functionally */
511		u32 irq_mask;
512		u32 de_irq_mask[I915_MAX_PIPES];
513	};
514	u32 pipestat_irq_mask[I915_MAX_PIPES];
515
516	bool display_irqs_enabled;
517	u32 enabled_irq_mask;
518
519	struct intel_uncore {
520		spinlock_t lock;
521	} uncore;
522
523	/* only to allow build, not used functionally */
524	struct {
525		unsigned int hpll_freq;
526		unsigned int czclk_freq;
527		unsigned int fsb_freq, mem_freq, is_ddr3;
528		u8 vblank_enabled;
529	};
530	struct {
531		const char *dmc_firmware_path;
532	} params;
533
534	void *pxp;
535#endif
536};
537
538/**
539 * struct xe_file - file handle for XE driver
540 */
541struct xe_file {
542	/** @xe: xe DEVICE **/
543	struct xe_device *xe;
544
545	/** @drm: base DRM file */
546	struct drm_file *drm;
547
548	/** @vm: VM state for file */
549	struct {
550		/** @vm.xe: xarray to store VMs */
551		struct xarray xa;
552		/** @vm.lock: protects file VM state */
553		struct mutex lock;
554	} vm;
555
556	/** @exec_queue: Submission exec queue state for file */
557	struct {
558		/** @exec_queue.xe: xarray to store engines */
559		struct xarray xa;
560		/** @exec_queue.lock: protects file engine state */
561		struct mutex lock;
562	} exec_queue;
563
564	/** @client: drm client */
565	struct xe_drm_client *client;
566};
567
568#endif
569