1/* SPDX-License-Identifier: MIT */
2/*
3 * Copyright �� 2019 Intel Corporation
4 */
5
6#ifndef __INTEL_GT_TYPES__
7#define __INTEL_GT_TYPES__
8
9#include <linux/ktime.h>
10#include <linux/list.h>
11#include <linux/llist.h>
12#include <linux/mutex.h>
13#include <linux/notifier.h>
14#include <linux/seqlock.h>
15#include <linux/spinlock.h>
16#include <linux/types.h>
17#include <linux/workqueue.h>
18
19#include "uc/intel_uc.h"
20#include "intel_gsc.h"
21
22#include "i915_vma.h"
23#include "i915_perf_types.h"
24#include "intel_engine_types.h"
25#include "intel_gt_buffer_pool_types.h"
26#include "intel_hwconfig.h"
27#include "intel_llc_types.h"
28#include "intel_reset_types.h"
29#include "intel_rc6_types.h"
30#include "intel_rps_types.h"
31#include "intel_migrate_types.h"
32#include "intel_wakeref.h"
33#include "intel_wopcm.h"
34
35struct drm_i915_private;
36struct i915_ggtt;
37struct intel_engine_cs;
38struct intel_uncore;
39
40struct intel_mmio_range {
41	u32 start;
42	u32 end;
43};
44
45/*
46 * The hardware has multiple kinds of multicast register ranges that need
47 * special register steering (and future platforms are expected to add
48 * additional types).
49 *
50 * During driver startup, we initialize the steering control register to
51 * direct reads to a slice/subslice that are valid for the 'subslice' class
52 * of multicast registers.  If another type of steering does not have any
53 * overlap in valid steering targets with 'subslice' style registers, we will
54 * need to explicitly re-steer reads of registers of the other type.
55 *
56 * Only the replication types that may need additional non-default steering
57 * are listed here.
58 */
59enum intel_steering_type {
60	L3BANK,
61	MSLICE,
62	LNCF,
63	GAM,
64	DSS,
65	OADDRM,
66
67	/*
68	 * On some platforms there are multiple types of MCR registers that
69	 * will always return a non-terminated value at instance (0, 0).  We'll
70	 * lump those all into a single category to keep things simple.
71	 */
72	INSTANCE0,
73
74	NUM_STEERING_TYPES
75};
76
77enum intel_submission_method {
78	INTEL_SUBMISSION_RING,
79	INTEL_SUBMISSION_ELSP,
80	INTEL_SUBMISSION_GUC,
81};
82
83struct gt_defaults {
84	u32 min_freq;
85	u32 max_freq;
86
87	u8 rps_up_threshold;
88	u8 rps_down_threshold;
89};
90
91enum intel_gt_type {
92	GT_PRIMARY,
93	GT_TILE,
94	GT_MEDIA,
95};
96
97struct intel_gt {
98	struct drm_i915_private *i915;
99	const char *name;
100	enum intel_gt_type type;
101
102	struct intel_uncore *uncore;
103	struct i915_ggtt *ggtt;
104
105	struct intel_uc uc;
106	struct intel_gsc gsc;
107	struct intel_wopcm wopcm;
108
109	struct {
110		/* Serialize global tlb invalidations */
111		struct mutex invalidate_lock;
112
113		/*
114		 * Batch TLB invalidations
115		 *
116		 * After unbinding the PTE, we need to ensure the TLB
117		 * are invalidated prior to releasing the physical pages.
118		 * But we only need one such invalidation for all unbinds,
119		 * so we track how many TLB invalidations have been
120		 * performed since unbind the PTE and only emit an extra
121		 * invalidate if no full barrier has been passed.
122		 */
123		seqcount_mutex_t seqno;
124	} tlb;
125
126	struct i915_wa_list wa_list;
127
128	struct intel_gt_timelines {
129		spinlock_t lock; /* protects active_list */
130		struct list_head active_list;
131	} timelines;
132
133	struct intel_gt_requests {
134		/**
135		 * We leave the user IRQ off as much as possible,
136		 * but this means that requests will finish and never
137		 * be retired once the system goes idle. Set a timer to
138		 * fire periodically while the ring is running. When it
139		 * fires, go retire requests.
140		 */
141		struct delayed_work retire_work;
142	} requests;
143
144	struct {
145		struct llist_head list;
146		struct work_struct work;
147	} watchdog;
148
149	struct intel_wakeref wakeref;
150	atomic_t user_wakeref;
151
152	struct list_head closed_vma;
153	spinlock_t closed_lock; /* guards the list of closed_vma */
154
155	ktime_t last_init_time;
156	struct intel_reset reset;
157
158	/**
159	 * Is the GPU currently considered idle, or busy executing
160	 * userspace requests? Whilst idle, we allow runtime power
161	 * management to power down the hardware and display clocks.
162	 * In order to reduce the effect on performance, there
163	 * is a slight delay before we do so.
164	 */
165	intel_wakeref_t awake;
166
167	u32 clock_frequency;
168	u32 clock_period_ns;
169
170	struct intel_llc llc;
171	struct intel_rc6 rc6;
172	struct intel_rps rps;
173
174	spinlock_t *irq_lock;
175	u32 gt_imr;
176	u32 pm_ier;
177	u32 pm_imr;
178
179	u32 pm_guc_events;
180
181	struct {
182		bool active;
183
184		/**
185		 * @lock: Lock protecting the below fields.
186		 */
187		seqcount_mutex_t lock;
188
189		/**
190		 * @total: Total time this engine was busy.
191		 *
192		 * Accumulated time not counting the most recent block in cases
193		 * where engine is currently busy (active > 0).
194		 */
195		ktime_t total;
196
197		/**
198		 * @start: Timestamp of the last idle to active transition.
199		 *
200		 * Idle is defined as active == 0, active is active > 0.
201		 */
202		ktime_t start;
203	} stats;
204
205	struct intel_engine_cs *engine[I915_NUM_ENGINES];
206	struct intel_engine_cs *engine_class[MAX_ENGINE_CLASS + 1]
207					    [MAX_ENGINE_INSTANCE + 1];
208	enum intel_submission_method submission_method;
209
210	/*
211	 * Default address space (either GGTT or ppGTT depending on arch).
212	 *
213	 * Reserved for exclusive use by the kernel.
214	 */
215	struct i915_address_space *vm;
216
217	/*
218	 * A pool of objects to use as shadow copies of client batch buffers
219	 * when the command parser is enabled. Prevents the client from
220	 * modifying the batch contents after software parsing.
221	 *
222	 * Buffers older than 1s are periodically reaped from the pool,
223	 * or may be reclaimed by the shrinker before then.
224	 */
225	struct intel_gt_buffer_pool buffer_pool;
226
227	struct i915_vma *scratch;
228
229	struct intel_migrate migrate;
230
231	const struct intel_mmio_range *steering_table[NUM_STEERING_TYPES];
232
233	struct {
234		u8 groupid;
235		u8 instanceid;
236	} default_steering;
237
238	/**
239	 * @mcr_lock: Protects the MCR steering register
240	 *
241	 * Protects the MCR steering register (e.g., GEN8_MCR_SELECTOR).
242	 * Should be taken before uncore->lock in cases where both are desired.
243	 */
244	spinlock_t mcr_lock;
245
246	/*
247	 * Base of per-tile GTTMMADR where we can derive the MMIO and the GGTT.
248	 */
249	phys_addr_t phys_addr;
250
251	struct intel_gt_info {
252		unsigned int id;
253
254		intel_engine_mask_t engine_mask;
255
256		u32 l3bank_mask;
257
258		u8 num_engines;
259
260		/* General presence of SFC units */
261		u8 sfc_mask;
262
263		/* Media engine access to SFC per instance */
264		u8 vdbox_sfc_access;
265
266		/* Slice/subslice/EU info */
267		struct sseu_dev_info sseu;
268
269		unsigned long mslice_mask;
270
271		/** @hwconfig: hardware configuration data */
272		struct intel_hwconfig hwconfig;
273	} info;
274
275	struct {
276		u8 uc_index;
277		u8 wb_index; /* Only used on HAS_L3_CCS_READ() platforms */
278	} mocs;
279
280	/* gt/gtN sysfs */
281	struct kobject sysfs_gt;
282
283	/* sysfs defaults per gt */
284	struct gt_defaults defaults;
285	struct kobject *sysfs_defaults;
286
287	struct i915_perf_gt perf;
288
289	/** link: &ggtt.gt_list */
290	struct list_head ggtt_link;
291};
292
293struct intel_gt_definition {
294	enum intel_gt_type type;
295	char *name;
296	u32 mapping_base;
297	u32 gsi_offset;
298	intel_engine_mask_t engine_mask;
299};
300
301enum intel_gt_scratch_field {
302	/* 8 bytes */
303	INTEL_GT_SCRATCH_FIELD_DEFAULT = 0,
304
305	/* 8 bytes */
306	INTEL_GT_SCRATCH_FIELD_RENDER_FLUSH = 128,
307
308	/* 8 bytes */
309	INTEL_GT_SCRATCH_FIELD_COHERENTL3_WA = 256,
310};
311
312#define intel_gt_support_legacy_fencing(gt) ((gt)->ggtt->num_fences > 0)
313
314#endif /* __INTEL_GT_TYPES_H__ */
315