1// SPDX-License-Identifier: MIT
2/*
3 * Copyright �� 2022 Intel Corporation
4 */
5
6#include "xe_mocs.h"
7
8#include "regs/xe_gt_regs.h"
9#include "xe_bo.h"
10#include "xe_device.h"
11#include "xe_exec_queue.h"
12#include "xe_gt.h"
13#include "xe_gt_mcr.h"
14#include "xe_mmio.h"
15#include "xe_platform_types.h"
16#include "xe_sriov.h"
17#include "xe_step_types.h"
18
19#if IS_ENABLED(CONFIG_DRM_XE_DEBUG)
20#define mocs_dbg drm_dbg
21#else
22__printf(2, 3)
23static inline void mocs_dbg(const struct drm_device *dev,
24			    const char *format, ...)
25{ /* noop */ }
26#endif
27
28enum {
29	HAS_GLOBAL_MOCS = BIT(0),
30	HAS_LNCF_MOCS = BIT(1),
31};
32
33struct xe_mocs_entry {
34	u32 control_value;
35	u16 l3cc_value;
36	u16 used;
37};
38
39struct xe_mocs_info {
40	unsigned int size;
41	unsigned int n_entries;
42	const struct xe_mocs_entry *table;
43	u8 uc_index;
44	u8 wb_index;
45	u8 unused_entries_index;
46};
47
48/* Defines for the tables (XXX_MOCS_0 - XXX_MOCS_63) */
49#define _LE_CACHEABILITY(value)	((value) << 0)
50#define _LE_TGT_CACHE(value)	((value) << 2)
51#define LE_LRUM(value)		((value) << 4)
52#define LE_AOM(value)		((value) << 6)
53#define LE_RSC(value)		((value) << 7)
54#define LE_SCC(value)		((value) << 8)
55#define LE_PFM(value)		((value) << 11)
56#define LE_SCF(value)		((value) << 14)
57#define LE_COS(value)		((value) << 15)
58#define LE_SSE(value)		((value) << 17)
59
60/* Defines for the tables (LNCFMOCS0 - LNCFMOCS31) - two entries per word */
61#define L3_ESC(value)		((value) << 0)
62#define L3_SCC(value)		((value) << 1)
63#define _L3_CACHEABILITY(value)	((value) << 4)
64#define L3_GLBGO(value)		((value) << 6)
65#define L3_LKUP(value)		((value) << 7)
66
67/* Defines for the tables (GLOB_MOCS_0 - GLOB_MOCS_16) */
68#define IG_PAT				REG_BIT(8)
69#define L3_CACHE_POLICY_MASK		REG_GENMASK(5, 4)
70#define L4_CACHE_POLICY_MASK		REG_GENMASK(3, 2)
71
72/* Helper defines */
73#define XELP_NUM_MOCS_ENTRIES	64  /* 63-64 are reserved, but configured. */
74#define PVC_NUM_MOCS_ENTRIES	3
75#define MTL_NUM_MOCS_ENTRIES    16
76#define XE2_NUM_MOCS_ENTRIES	16
77
78/* (e)LLC caching options */
79/*
80 * Note: LE_0_PAGETABLE works only up to Gen11; for newer gens it means
81 * the same as LE_UC
82 */
83#define LE_0_PAGETABLE		_LE_CACHEABILITY(0)
84#define LE_1_UC			_LE_CACHEABILITY(1)
85#define LE_2_WT			_LE_CACHEABILITY(2)
86#define LE_3_WB			_LE_CACHEABILITY(3)
87
88/* Target cache */
89#define LE_TC_0_PAGETABLE	_LE_TGT_CACHE(0)
90#define LE_TC_1_LLC		_LE_TGT_CACHE(1)
91#define LE_TC_2_LLC_ELLC	_LE_TGT_CACHE(2)
92#define LE_TC_3_LLC_ELLC_ALT	_LE_TGT_CACHE(3)
93
94/* L3 caching options */
95#define L3_0_DIRECT		_L3_CACHEABILITY(0)
96#define L3_1_UC			_L3_CACHEABILITY(1)
97#define L3_2_RESERVED		_L3_CACHEABILITY(2)
98#define L3_3_WB			_L3_CACHEABILITY(3)
99
100/* L4 caching options */
101#define L4_0_WB                 REG_FIELD_PREP(L4_CACHE_POLICY_MASK, 0)
102#define L4_1_WT                 REG_FIELD_PREP(L4_CACHE_POLICY_MASK, 1)
103#define L4_3_UC                 REG_FIELD_PREP(L4_CACHE_POLICY_MASK, 3)
104
105#define XE2_L3_0_WB		REG_FIELD_PREP(L3_CACHE_POLICY_MASK, 0)
106/* XD: WB Transient Display */
107#define XE2_L3_1_XD		REG_FIELD_PREP(L3_CACHE_POLICY_MASK, 1)
108#define XE2_L3_3_UC		REG_FIELD_PREP(L3_CACHE_POLICY_MASK, 3)
109
110#define MOCS_ENTRY(__idx, __control_value, __l3cc_value) \
111	[__idx] = { \
112		.control_value = __control_value, \
113		.l3cc_value = __l3cc_value, \
114		.used = 1, \
115	}
116
117/*
118 * MOCS tables
119 *
120 * These are the MOCS tables that are programmed across all the rings.
121 * The control value is programmed to all the rings that support the
122 * MOCS registers. While the l3cc_values are only programmed to the
123 * LNCFCMOCS0 - LNCFCMOCS32 registers.
124 *
125 * These tables are intended to be kept reasonably consistent across
126 * HW platforms, and for ICL+, be identical across OSes. To achieve
127 * that, the list of entries is published as part of bspec.
128 *
129 * Entries not part of the following tables are undefined as far as userspace is
130 * concerned and shouldn't be relied upon. The last few entries are reserved by
131 * the hardware. They should be initialized according to bspec and never used.
132 *
133 * NOTE1: These tables are part of bspec and defined as part of the hardware
134 * interface. It is expected that, for specific hardware platform, existing
135 * entries will remain constant and the table will only be updated by adding new
136 * entries, filling unused positions.
137 *
138 * NOTE2: Reserved and unspecified MOCS indices have been set to L3 WB. These
139 * reserved entries should never be used. They may be changed to low performant
140 * variants with better coherency in the future if more entries are needed.
141 */
142
143static const struct xe_mocs_entry gen12_mocs_desc[] = {
144	/* Base - L3 + LLC */
145	MOCS_ENTRY(2,
146		   LE_3_WB | LE_TC_1_LLC | LE_LRUM(3),
147		   L3_3_WB),
148	/* Base - Uncached */
149	MOCS_ENTRY(3,
150		   LE_1_UC | LE_TC_1_LLC,
151		   L3_1_UC),
152	/* Base - L3 */
153	MOCS_ENTRY(4,
154		   LE_1_UC | LE_TC_1_LLC,
155		   L3_3_WB),
156	/* Base - LLC */
157	MOCS_ENTRY(5,
158		   LE_3_WB | LE_TC_1_LLC | LE_LRUM(3),
159		   L3_1_UC),
160	/* Age 0 - LLC */
161	MOCS_ENTRY(6,
162		   LE_3_WB | LE_TC_1_LLC | LE_LRUM(1),
163		   L3_1_UC),
164	/* Age 0 - L3 + LLC */
165	MOCS_ENTRY(7,
166		   LE_3_WB | LE_TC_1_LLC | LE_LRUM(1),
167		   L3_3_WB),
168	/* Age: Don't Chg. - LLC */
169	MOCS_ENTRY(8,
170		   LE_3_WB | LE_TC_1_LLC | LE_LRUM(2),
171		   L3_1_UC),
172	/* Age: Don't Chg. - L3 + LLC */
173	MOCS_ENTRY(9,
174		   LE_3_WB | LE_TC_1_LLC | LE_LRUM(2),
175		   L3_3_WB),
176	/* No AOM - LLC */
177	MOCS_ENTRY(10,
178		   LE_3_WB | LE_TC_1_LLC | LE_LRUM(3) | LE_AOM(1),
179		   L3_1_UC),
180	/* No AOM - L3 + LLC */
181	MOCS_ENTRY(11,
182		   LE_3_WB | LE_TC_1_LLC | LE_LRUM(3) | LE_AOM(1),
183		   L3_3_WB),
184	/* No AOM; Age 0 - LLC */
185	MOCS_ENTRY(12,
186		   LE_3_WB | LE_TC_1_LLC | LE_LRUM(1) | LE_AOM(1),
187		   L3_1_UC),
188	/* No AOM; Age 0 - L3 + LLC */
189	MOCS_ENTRY(13,
190		   LE_3_WB | LE_TC_1_LLC | LE_LRUM(1) | LE_AOM(1),
191		   L3_3_WB),
192	/* No AOM; Age:DC - LLC */
193	MOCS_ENTRY(14,
194		   LE_3_WB | LE_TC_1_LLC | LE_LRUM(2) | LE_AOM(1),
195		   L3_1_UC),
196	/* No AOM; Age:DC - L3 + LLC */
197	MOCS_ENTRY(15,
198		   LE_3_WB | LE_TC_1_LLC | LE_LRUM(2) | LE_AOM(1),
199		   L3_3_WB),
200	/* Self-Snoop - L3 + LLC */
201	MOCS_ENTRY(18,
202		   LE_3_WB | LE_TC_1_LLC | LE_LRUM(3) | LE_SSE(3),
203		   L3_3_WB),
204	/* Skip Caching - L3 + LLC(12.5%) */
205	MOCS_ENTRY(19,
206		   LE_3_WB | LE_TC_1_LLC | LE_LRUM(3) | LE_SCC(7),
207		   L3_3_WB),
208	/* Skip Caching - L3 + LLC(25%) */
209	MOCS_ENTRY(20,
210		   LE_3_WB | LE_TC_1_LLC | LE_LRUM(3) | LE_SCC(3),
211		   L3_3_WB),
212	/* Skip Caching - L3 + LLC(50%) */
213	MOCS_ENTRY(21,
214		   LE_3_WB | LE_TC_1_LLC | LE_LRUM(3) | LE_SCC(1),
215		   L3_3_WB),
216	/* Skip Caching - L3 + LLC(75%) */
217	MOCS_ENTRY(22,
218		   LE_3_WB | LE_TC_1_LLC | LE_LRUM(3) | LE_RSC(1) | LE_SCC(3),
219		   L3_3_WB),
220	/* Skip Caching - L3 + LLC(87.5%) */
221	MOCS_ENTRY(23,
222		   LE_3_WB | LE_TC_1_LLC | LE_LRUM(3) | LE_RSC(1) | LE_SCC(7),
223		   L3_3_WB),
224	/* Implicitly enable L1 - HDC:L1 + L3 + LLC */
225	MOCS_ENTRY(48,
226		   LE_3_WB | LE_TC_1_LLC | LE_LRUM(3),
227		   L3_3_WB),
228	/* Implicitly enable L1 - HDC:L1 + L3 */
229	MOCS_ENTRY(49,
230		   LE_1_UC | LE_TC_1_LLC,
231		   L3_3_WB),
232	/* Implicitly enable L1 - HDC:L1 + LLC */
233	MOCS_ENTRY(50,
234		   LE_3_WB | LE_TC_1_LLC | LE_LRUM(3),
235		   L3_1_UC),
236	/* Implicitly enable L1 - HDC:L1 */
237	MOCS_ENTRY(51,
238		   LE_1_UC | LE_TC_1_LLC,
239		   L3_1_UC),
240	/* HW Special Case (CCS) */
241	MOCS_ENTRY(60,
242		   LE_3_WB | LE_TC_1_LLC | LE_LRUM(3),
243		   L3_1_UC),
244	/* HW Special Case (Displayable) */
245	MOCS_ENTRY(61,
246		   LE_1_UC | LE_TC_1_LLC,
247		   L3_3_WB),
248	/* HW Reserved - SW program but never use */
249	MOCS_ENTRY(62,
250		   LE_3_WB | LE_TC_1_LLC | LE_LRUM(3),
251		   L3_1_UC),
252	/* HW Reserved - SW program but never use */
253	MOCS_ENTRY(63,
254		   LE_3_WB | LE_TC_1_LLC | LE_LRUM(3),
255		   L3_1_UC)
256};
257
258static const struct xe_mocs_entry dg1_mocs_desc[] = {
259	/* UC */
260	MOCS_ENTRY(1, 0, L3_1_UC),
261	/* WB - L3 */
262	MOCS_ENTRY(5, 0, L3_3_WB),
263	/* WB - L3 50% */
264	MOCS_ENTRY(6, 0, L3_ESC(1) | L3_SCC(1) | L3_3_WB),
265	/* WB - L3 25% */
266	MOCS_ENTRY(7, 0, L3_ESC(1) | L3_SCC(3) | L3_3_WB),
267	/* WB - L3 12.5% */
268	MOCS_ENTRY(8, 0, L3_ESC(1) | L3_SCC(7) | L3_3_WB),
269
270	/* HDC:L1 + L3 */
271	MOCS_ENTRY(48, 0, L3_3_WB),
272	/* HDC:L1 */
273	MOCS_ENTRY(49, 0, L3_1_UC),
274
275	/* HW Reserved */
276	MOCS_ENTRY(60, 0, L3_1_UC),
277	MOCS_ENTRY(61, 0, L3_1_UC),
278	MOCS_ENTRY(62, 0, L3_1_UC),
279	MOCS_ENTRY(63, 0, L3_1_UC),
280};
281
282static const struct xe_mocs_entry dg2_mocs_desc[] = {
283	/* UC - Coherent; GO:L3 */
284	MOCS_ENTRY(0, 0, L3_1_UC | L3_LKUP(1)),
285	/* UC - Coherent; GO:Memory */
286	MOCS_ENTRY(1, 0, L3_1_UC | L3_GLBGO(1) | L3_LKUP(1)),
287	/* UC - Non-Coherent; GO:Memory */
288	MOCS_ENTRY(2, 0, L3_1_UC | L3_GLBGO(1)),
289
290	/* WB - LC */
291	MOCS_ENTRY(3, 0, L3_3_WB | L3_LKUP(1)),
292};
293
294static const struct xe_mocs_entry pvc_mocs_desc[] = {
295	/* Error */
296	MOCS_ENTRY(0, 0, L3_3_WB),
297
298	/* UC */
299	MOCS_ENTRY(1, 0, L3_1_UC),
300
301	/* WB */
302	MOCS_ENTRY(2, 0, L3_3_WB),
303};
304
305static const struct xe_mocs_entry mtl_mocs_desc[] = {
306	/* Error - Reserved for Non-Use */
307	MOCS_ENTRY(0,
308		   0,
309		   L3_LKUP(1) | L3_3_WB),
310	/* Cached - L3 + L4 */
311	MOCS_ENTRY(1,
312		   IG_PAT,
313		   L3_LKUP(1) | L3_3_WB),
314	/* L4 - GO:L3 */
315	MOCS_ENTRY(2,
316		   IG_PAT,
317		   L3_LKUP(1) | L3_1_UC),
318	/* Uncached - GO:L3 */
319	MOCS_ENTRY(3,
320		   IG_PAT | L4_3_UC,
321		   L3_LKUP(1) | L3_1_UC),
322	/* L4 - GO:Mem */
323	MOCS_ENTRY(4,
324		   IG_PAT,
325		   L3_LKUP(1) | L3_GLBGO(1) | L3_1_UC),
326	/* Uncached - GO:Mem */
327	MOCS_ENTRY(5,
328		   IG_PAT | L4_3_UC,
329		   L3_LKUP(1) | L3_GLBGO(1) | L3_1_UC),
330	/* L4 - L3:NoLKUP; GO:L3 */
331	MOCS_ENTRY(6,
332		   IG_PAT,
333		   L3_1_UC),
334	/* Uncached - L3:NoLKUP; GO:L3 */
335	MOCS_ENTRY(7,
336		   IG_PAT | L4_3_UC,
337		   L3_1_UC),
338	/* L4 - L3:NoLKUP; GO:Mem */
339	MOCS_ENTRY(8,
340		   IG_PAT,
341		   L3_GLBGO(1) | L3_1_UC),
342	/* Uncached - L3:NoLKUP; GO:Mem */
343	MOCS_ENTRY(9,
344		   IG_PAT | L4_3_UC,
345		   L3_GLBGO(1) | L3_1_UC),
346	/* Display - L3; L4:WT */
347	MOCS_ENTRY(14,
348		   IG_PAT | L4_1_WT,
349		   L3_LKUP(1) | L3_3_WB),
350	/* CCS - Non-Displayable */
351	MOCS_ENTRY(15,
352		   IG_PAT,
353		   L3_GLBGO(1) | L3_1_UC),
354};
355
356static const struct xe_mocs_entry xe2_mocs_table[] = {
357	/* Defer to PAT */
358	MOCS_ENTRY(0, XE2_L3_0_WB | L4_3_UC, 0),
359	/* Cached L3, Uncached L4 */
360	MOCS_ENTRY(1, IG_PAT | XE2_L3_0_WB | L4_3_UC, 0),
361	/* Uncached L3, Cached L4 */
362	MOCS_ENTRY(2, IG_PAT | XE2_L3_3_UC | L4_0_WB, 0),
363	/* Uncached L3 + L4 */
364	MOCS_ENTRY(3, IG_PAT | XE2_L3_3_UC | L4_3_UC, 0),
365	/* Cached L3 + L4 */
366	MOCS_ENTRY(4, IG_PAT | XE2_L3_0_WB | L4_0_WB, 0),
367};
368
369static unsigned int get_mocs_settings(struct xe_device *xe,
370				      struct xe_mocs_info *info)
371{
372	unsigned int flags = 0;
373
374	memset(info, 0, sizeof(struct xe_mocs_info));
375
376	switch (xe->info.platform) {
377	case XE_LUNARLAKE:
378		info->size = ARRAY_SIZE(xe2_mocs_table);
379		info->table = xe2_mocs_table;
380		info->n_entries = XE2_NUM_MOCS_ENTRIES;
381		info->uc_index = 3;
382		info->wb_index = 4;
383		info->unused_entries_index = 4;
384		break;
385	case XE_PVC:
386		info->size = ARRAY_SIZE(pvc_mocs_desc);
387		info->table = pvc_mocs_desc;
388		info->n_entries = PVC_NUM_MOCS_ENTRIES;
389		info->uc_index = 1;
390		info->wb_index = 2;
391		info->unused_entries_index = 2;
392		break;
393	case XE_METEORLAKE:
394		info->size = ARRAY_SIZE(mtl_mocs_desc);
395		info->table = mtl_mocs_desc;
396		info->n_entries = MTL_NUM_MOCS_ENTRIES;
397		info->uc_index = 9;
398		info->unused_entries_index = 1;
399		break;
400	case XE_DG2:
401		info->size = ARRAY_SIZE(dg2_mocs_desc);
402		info->table = dg2_mocs_desc;
403		info->uc_index = 1;
404		info->n_entries = XELP_NUM_MOCS_ENTRIES;
405		info->unused_entries_index = 3;
406		break;
407	case XE_DG1:
408		info->size = ARRAY_SIZE(dg1_mocs_desc);
409		info->table = dg1_mocs_desc;
410		info->uc_index = 1;
411		info->n_entries = XELP_NUM_MOCS_ENTRIES;
412		info->unused_entries_index = 5;
413		break;
414	case XE_TIGERLAKE:
415	case XE_ROCKETLAKE:
416	case XE_ALDERLAKE_S:
417	case XE_ALDERLAKE_P:
418	case XE_ALDERLAKE_N:
419		info->size  = ARRAY_SIZE(gen12_mocs_desc);
420		info->table = gen12_mocs_desc;
421		info->n_entries = XELP_NUM_MOCS_ENTRIES;
422		info->uc_index = 3;
423		info->unused_entries_index = 2;
424		break;
425	default:
426		drm_err(&xe->drm, "Platform that should have a MOCS table does not.\n");
427		return 0;
428	}
429
430	/*
431	 * Index 0 is a reserved/unused table entry on most platforms, but
432	 * even on those where it does represent a legitimate MOCS entry, it
433	 * never represents the "most cached, least coherent" behavior we want
434	 * to populate undefined table rows with.  So if unused_entries_index
435	 * is still 0 at this point, we'll assume that it was omitted by
436	 * mistake in the switch statement above.
437	 */
438	xe_assert(xe, info->unused_entries_index != 0);
439
440	if (XE_WARN_ON(info->size > info->n_entries)) {
441		info->table = NULL;
442		return 0;
443	}
444
445	if (!IS_DGFX(xe) || GRAPHICS_VER(xe) >= 20)
446		flags |= HAS_GLOBAL_MOCS;
447	if (GRAPHICS_VER(xe) < 20)
448		flags |= HAS_LNCF_MOCS;
449
450	return flags;
451}
452
453/*
454 * Get control_value from MOCS entry.  If the table entry is not defined, the
455 * settings from unused_entries_index will be returned.
456 */
457static u32 get_entry_control(const struct xe_mocs_info *info,
458			     unsigned int index)
459{
460	if (index < info->size && info->table[index].used)
461		return info->table[index].control_value;
462	return info->table[info->unused_entries_index].control_value;
463}
464
465static void __init_mocs_table(struct xe_gt *gt,
466			      const struct xe_mocs_info *info)
467{
468	struct xe_device *xe = gt_to_xe(gt);
469
470	unsigned int i;
471	u32 mocs;
472
473	mocs_dbg(&gt_to_xe(gt)->drm, "entries:%d\n", info->n_entries);
474	drm_WARN_ONCE(&xe->drm, !info->unused_entries_index,
475		      "Unused entries index should have been defined\n");
476	for (i = 0;
477	     i < info->n_entries ? (mocs = get_entry_control(info, i)), 1 : 0;
478	     i++) {
479		mocs_dbg(&gt_to_xe(gt)->drm, "GLOB_MOCS[%d] 0x%x 0x%x\n", i,
480			 XELP_GLOBAL_MOCS(i).addr, mocs);
481
482		if (GRAPHICS_VERx100(gt_to_xe(gt)) > 1250)
483			xe_gt_mcr_multicast_write(gt, XEHP_GLOBAL_MOCS(i), mocs);
484		else
485			xe_mmio_write32(gt, XELP_GLOBAL_MOCS(i), mocs);
486	}
487}
488
489/*
490 * Get l3cc_value from MOCS entry taking into account when it's not used
491 * then if unused_entries_index is not zero then its value will be returned
492 * otherwise I915_MOCS_PTE's value is returned in this case.
493 */
494static u16 get_entry_l3cc(const struct xe_mocs_info *info,
495			  unsigned int index)
496{
497	if (index < info->size && info->table[index].used)
498		return info->table[index].l3cc_value;
499	return info->table[info->unused_entries_index].l3cc_value;
500}
501
502static u32 l3cc_combine(u16 low, u16 high)
503{
504	return low | (u32)high << 16;
505}
506
507static void init_l3cc_table(struct xe_gt *gt,
508			    const struct xe_mocs_info *info)
509{
510	unsigned int i;
511	u32 l3cc;
512
513	mocs_dbg(&gt_to_xe(gt)->drm, "entries:%d\n", info->n_entries);
514	for (i = 0;
515	     i < (info->n_entries + 1) / 2 ?
516	     (l3cc = l3cc_combine(get_entry_l3cc(info, 2 * i),
517				  get_entry_l3cc(info, 2 * i + 1))), 1 : 0;
518	     i++) {
519		mocs_dbg(&gt_to_xe(gt)->drm, "LNCFCMOCS[%d] 0x%x 0x%x\n", i, XELP_LNCFCMOCS(i).addr,
520			 l3cc);
521
522		if (GRAPHICS_VERx100(gt_to_xe(gt)) >= 1250)
523			xe_gt_mcr_multicast_write(gt, XEHP_LNCFCMOCS(i), l3cc);
524		else
525			xe_mmio_write32(gt, XELP_LNCFCMOCS(i), l3cc);
526	}
527}
528
529void xe_mocs_init_early(struct xe_gt *gt)
530{
531	struct xe_mocs_info table;
532
533	get_mocs_settings(gt_to_xe(gt), &table);
534	gt->mocs.uc_index = table.uc_index;
535	gt->mocs.wb_index = table.wb_index;
536}
537
538void xe_mocs_init(struct xe_gt *gt)
539{
540	struct xe_mocs_info table;
541	unsigned int flags;
542
543	if (IS_SRIOV_VF(gt_to_xe(gt)))
544		return;
545
546	/*
547	 * MOCS settings are split between "GLOB_MOCS" and/or "LNCFCMOCS"
548	 * registers depending on platform.
549	 *
550	 * These registers should be programmed before GuC initialization
551	 * since their values will affect some of the memory transactions
552	 * performed by the GuC.
553	 */
554	flags = get_mocs_settings(gt_to_xe(gt), &table);
555	mocs_dbg(&gt_to_xe(gt)->drm, "flag:0x%x\n", flags);
556
557	if (flags & HAS_GLOBAL_MOCS)
558		__init_mocs_table(gt, &table);
559	if (flags & HAS_LNCF_MOCS)
560		init_l3cc_table(gt, &table);
561}
562
563#if IS_ENABLED(CONFIG_DRM_XE_KUNIT_TEST)
564#include "tests/xe_mocs.c"
565#endif
566