1/*
2 * SPDX-License-Identifier: MIT
3 *
4 * Copyright �� 2008-2012 Intel Corporation
5 */
6
7#include <linux/errno.h>
8#include <linux/mutex.h>
9
10#include <drm/drm_mm.h>
11#include <drm/i915_drm.h>
12
13#include "gem/i915_gem_lmem.h"
14#include "gem/i915_gem_region.h"
15#include "gt/intel_gt.h"
16#include "gt/intel_gt_mcr.h"
17#include "gt/intel_gt_regs.h"
18#include "gt/intel_region_lmem.h"
19#include "i915_drv.h"
20#include "i915_gem_stolen.h"
21#include "i915_pci.h"
22#include "i915_reg.h"
23#include "i915_utils.h"
24#include "i915_vgpu.h"
25#include "intel_mchbar_regs.h"
26#include "intel_pci_config.h"
27
28/*
29 * The BIOS typically reserves some of the system's memory for the exclusive
30 * use of the integrated graphics. This memory is no longer available for
31 * use by the OS and so the user finds that his system has less memory
32 * available than he put in. We refer to this memory as stolen.
33 *
34 * The BIOS will allocate its framebuffer from the stolen memory. Our
35 * goal is try to reuse that object for our own fbcon which must always
36 * be available for panics. Anything else we can reuse the stolen memory
37 * for is a boon.
38 */
39
40int i915_gem_stolen_insert_node_in_range(struct drm_i915_private *i915,
41					 struct drm_mm_node *node, u64 size,
42					 unsigned alignment, u64 start, u64 end)
43{
44	int ret;
45
46	if (!drm_mm_initialized(&i915->mm.stolen))
47		return -ENODEV;
48
49	/* WaSkipStolenMemoryFirstPage:bdw+ */
50	if (GRAPHICS_VER(i915) >= 8 && start < 4096)
51		start = 4096;
52
53	mutex_lock(&i915->mm.stolen_lock);
54	ret = drm_mm_insert_node_in_range(&i915->mm.stolen, node,
55					  size, alignment, 0,
56					  start, end, DRM_MM_INSERT_BEST);
57	mutex_unlock(&i915->mm.stolen_lock);
58
59	return ret;
60}
61
62int i915_gem_stolen_insert_node(struct drm_i915_private *i915,
63				struct drm_mm_node *node, u64 size,
64				unsigned alignment)
65{
66	return i915_gem_stolen_insert_node_in_range(i915, node,
67						    size, alignment,
68						    I915_GEM_STOLEN_BIAS,
69						    U64_MAX);
70}
71
72void i915_gem_stolen_remove_node(struct drm_i915_private *i915,
73				 struct drm_mm_node *node)
74{
75	mutex_lock(&i915->mm.stolen_lock);
76	drm_mm_remove_node(node);
77	mutex_unlock(&i915->mm.stolen_lock);
78}
79
80static bool valid_stolen_size(struct drm_i915_private *i915, struct resource *dsm)
81{
82	return (dsm->start != 0 || HAS_LMEMBAR_SMEM_STOLEN(i915)) && dsm->end > dsm->start;
83}
84
85static int adjust_stolen(struct drm_i915_private *i915,
86			 struct resource *dsm)
87{
88	struct i915_ggtt *ggtt = to_gt(i915)->ggtt;
89	struct intel_uncore *uncore = ggtt->vm.gt->uncore;
90
91	if (!valid_stolen_size(i915, dsm))
92		return -EINVAL;
93
94	/*
95	 * Make sure we don't clobber the GTT if it's within stolen memory
96	 *
97	 * TODO: We have yet too encounter the case where the GTT wasn't at the
98	 * end of stolen. With that assumption we could simplify this.
99	 */
100	if (GRAPHICS_VER(i915) <= 4 &&
101	    !IS_G33(i915) && !IS_PINEVIEW(i915) && !IS_G4X(i915)) {
102		struct resource stolen[2] = {*dsm, *dsm};
103		struct resource ggtt_res;
104		resource_size_t ggtt_start;
105
106		ggtt_start = intel_uncore_read(uncore, PGTBL_CTL);
107		if (GRAPHICS_VER(i915) == 4)
108			ggtt_start = (ggtt_start & PGTBL_ADDRESS_LO_MASK) |
109				     (ggtt_start & PGTBL_ADDRESS_HI_MASK) << 28;
110		else
111			ggtt_start &= PGTBL_ADDRESS_LO_MASK;
112
113		ggtt_res = DEFINE_RES_MEM(ggtt_start, ggtt_total_entries(ggtt) * 4);
114
115		if (ggtt_res.start >= stolen[0].start && ggtt_res.start < stolen[0].end)
116			stolen[0].end = ggtt_res.start;
117		if (ggtt_res.end > stolen[1].start && ggtt_res.end <= stolen[1].end)
118			stolen[1].start = ggtt_res.end;
119
120		/* Pick the larger of the two chunks */
121		if (resource_size(&stolen[0]) > resource_size(&stolen[1]))
122			*dsm = stolen[0];
123		else
124			*dsm = stolen[1];
125
126		if (stolen[0].start != stolen[1].start ||
127		    stolen[0].end != stolen[1].end) {
128			drm_dbg(&i915->drm,
129				"GTT within stolen memory at %pR\n",
130				&ggtt_res);
131			drm_dbg(&i915->drm, "Stolen memory adjusted to %pR\n",
132				dsm);
133		}
134	}
135
136	if (!valid_stolen_size(i915, dsm))
137		return -EINVAL;
138
139	return 0;
140}
141
142static int request_smem_stolen(struct drm_i915_private *i915,
143			       struct resource *dsm)
144{
145	struct resource *r;
146
147	/*
148	 * With stolen lmem, we don't need to request system memory for the
149	 * address range since it's local to the gpu.
150	 *
151	 * Starting MTL, in IGFX devices the stolen memory is exposed via
152	 * LMEMBAR and shall be considered similar to stolen lmem.
153	 */
154	if (HAS_LMEM(i915) || HAS_LMEMBAR_SMEM_STOLEN(i915))
155		return 0;
156
157	/*
158	 * Verify that nothing else uses this physical address. Stolen
159	 * memory should be reserved by the BIOS and hidden from the
160	 * kernel. So if the region is already marked as busy, something
161	 * is seriously wrong.
162	 */
163	r = devm_request_mem_region(i915->drm.dev, dsm->start,
164				    resource_size(dsm),
165				    "Graphics Stolen Memory");
166	if (r == NULL) {
167		/*
168		 * One more attempt but this time requesting region from
169		 * start + 1, as we have seen that this resolves the region
170		 * conflict with the PCI Bus.
171		 * This is a BIOS w/a: Some BIOS wrap stolen in the root
172		 * PCI bus, but have an off-by-one error. Hence retry the
173		 * reservation starting from 1 instead of 0.
174		 * There's also BIOS with off-by-one on the other end.
175		 */
176		r = devm_request_mem_region(i915->drm.dev, dsm->start + 1,
177					    resource_size(dsm) - 2,
178					    "Graphics Stolen Memory");
179		/*
180		 * GEN3 firmware likes to smash pci bridges into the stolen
181		 * range. Apparently this works.
182		 */
183		if (!r && GRAPHICS_VER(i915) != 3) {
184			drm_err(&i915->drm,
185				"conflict detected with stolen region: %pR\n",
186				dsm);
187
188			return -EBUSY;
189		}
190	}
191
192	return 0;
193}
194
195static void i915_gem_cleanup_stolen(struct drm_i915_private *i915)
196{
197	if (!drm_mm_initialized(&i915->mm.stolen))
198		return;
199
200	drm_mm_takedown(&i915->mm.stolen);
201}
202
203static void g4x_get_stolen_reserved(struct drm_i915_private *i915,
204				    struct intel_uncore *uncore,
205				    resource_size_t *base,
206				    resource_size_t *size)
207{
208	u32 reg_val = intel_uncore_read(uncore,
209					IS_GM45(i915) ?
210					CTG_STOLEN_RESERVED :
211					ELK_STOLEN_RESERVED);
212	resource_size_t stolen_top = i915->dsm.stolen.end + 1;
213
214	drm_dbg(&i915->drm, "%s_STOLEN_RESERVED = %08x\n",
215		IS_GM45(i915) ? "CTG" : "ELK", reg_val);
216
217	if ((reg_val & G4X_STOLEN_RESERVED_ENABLE) == 0)
218		return;
219
220	/*
221	 * Whether ILK really reuses the ELK register for this is unclear.
222	 * Let's see if we catch anyone with this supposedly enabled on ILK.
223	 */
224	drm_WARN(&i915->drm, GRAPHICS_VER(i915) == 5,
225		 "ILK stolen reserved found? 0x%08x\n",
226		 reg_val);
227
228	if (!(reg_val & G4X_STOLEN_RESERVED_ADDR2_MASK))
229		return;
230
231	*base = (reg_val & G4X_STOLEN_RESERVED_ADDR2_MASK) << 16;
232	drm_WARN_ON(&i915->drm,
233		    (reg_val & G4X_STOLEN_RESERVED_ADDR1_MASK) < *base);
234
235	*size = stolen_top - *base;
236}
237
238static void gen6_get_stolen_reserved(struct drm_i915_private *i915,
239				     struct intel_uncore *uncore,
240				     resource_size_t *base,
241				     resource_size_t *size)
242{
243	u32 reg_val = intel_uncore_read(uncore, GEN6_STOLEN_RESERVED);
244
245	drm_dbg(&i915->drm, "GEN6_STOLEN_RESERVED = %08x\n", reg_val);
246
247	if (!(reg_val & GEN6_STOLEN_RESERVED_ENABLE))
248		return;
249
250	*base = reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK;
251
252	switch (reg_val & GEN6_STOLEN_RESERVED_SIZE_MASK) {
253	case GEN6_STOLEN_RESERVED_1M:
254		*size = 1024 * 1024;
255		break;
256	case GEN6_STOLEN_RESERVED_512K:
257		*size = 512 * 1024;
258		break;
259	case GEN6_STOLEN_RESERVED_256K:
260		*size = 256 * 1024;
261		break;
262	case GEN6_STOLEN_RESERVED_128K:
263		*size = 128 * 1024;
264		break;
265	default:
266		*size = 1024 * 1024;
267		MISSING_CASE(reg_val & GEN6_STOLEN_RESERVED_SIZE_MASK);
268	}
269}
270
271static void vlv_get_stolen_reserved(struct drm_i915_private *i915,
272				    struct intel_uncore *uncore,
273				    resource_size_t *base,
274				    resource_size_t *size)
275{
276	u32 reg_val = intel_uncore_read(uncore, GEN6_STOLEN_RESERVED);
277	resource_size_t stolen_top = i915->dsm.stolen.end + 1;
278
279	drm_dbg(&i915->drm, "GEN6_STOLEN_RESERVED = %08x\n", reg_val);
280
281	if (!(reg_val & GEN6_STOLEN_RESERVED_ENABLE))
282		return;
283
284	switch (reg_val & GEN7_STOLEN_RESERVED_SIZE_MASK) {
285	default:
286		MISSING_CASE(reg_val & GEN7_STOLEN_RESERVED_SIZE_MASK);
287		fallthrough;
288	case GEN7_STOLEN_RESERVED_1M:
289		*size = 1024 * 1024;
290		break;
291	}
292
293	/*
294	 * On vlv, the ADDR_MASK portion is left as 0 and HW deduces the
295	 * reserved location as (top - size).
296	 */
297	*base = stolen_top - *size;
298}
299
300static void gen7_get_stolen_reserved(struct drm_i915_private *i915,
301				     struct intel_uncore *uncore,
302				     resource_size_t *base,
303				     resource_size_t *size)
304{
305	u32 reg_val = intel_uncore_read(uncore, GEN6_STOLEN_RESERVED);
306
307	drm_dbg(&i915->drm, "GEN6_STOLEN_RESERVED = %08x\n", reg_val);
308
309	if (!(reg_val & GEN6_STOLEN_RESERVED_ENABLE))
310		return;
311
312	*base = reg_val & GEN7_STOLEN_RESERVED_ADDR_MASK;
313
314	switch (reg_val & GEN7_STOLEN_RESERVED_SIZE_MASK) {
315	case GEN7_STOLEN_RESERVED_1M:
316		*size = 1024 * 1024;
317		break;
318	case GEN7_STOLEN_RESERVED_256K:
319		*size = 256 * 1024;
320		break;
321	default:
322		*size = 1024 * 1024;
323		MISSING_CASE(reg_val & GEN7_STOLEN_RESERVED_SIZE_MASK);
324	}
325}
326
327static void chv_get_stolen_reserved(struct drm_i915_private *i915,
328				    struct intel_uncore *uncore,
329				    resource_size_t *base,
330				    resource_size_t *size)
331{
332	u32 reg_val = intel_uncore_read(uncore, GEN6_STOLEN_RESERVED);
333
334	drm_dbg(&i915->drm, "GEN6_STOLEN_RESERVED = %08x\n", reg_val);
335
336	if (!(reg_val & GEN6_STOLEN_RESERVED_ENABLE))
337		return;
338
339	*base = reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK;
340
341	switch (reg_val & GEN8_STOLEN_RESERVED_SIZE_MASK) {
342	case GEN8_STOLEN_RESERVED_1M:
343		*size = 1024 * 1024;
344		break;
345	case GEN8_STOLEN_RESERVED_2M:
346		*size = 2 * 1024 * 1024;
347		break;
348	case GEN8_STOLEN_RESERVED_4M:
349		*size = 4 * 1024 * 1024;
350		break;
351	case GEN8_STOLEN_RESERVED_8M:
352		*size = 8 * 1024 * 1024;
353		break;
354	default:
355		*size = 8 * 1024 * 1024;
356		MISSING_CASE(reg_val & GEN8_STOLEN_RESERVED_SIZE_MASK);
357	}
358}
359
360static void bdw_get_stolen_reserved(struct drm_i915_private *i915,
361				    struct intel_uncore *uncore,
362				    resource_size_t *base,
363				    resource_size_t *size)
364{
365	u32 reg_val = intel_uncore_read(uncore, GEN6_STOLEN_RESERVED);
366	resource_size_t stolen_top = i915->dsm.stolen.end + 1;
367
368	drm_dbg(&i915->drm, "GEN6_STOLEN_RESERVED = %08x\n", reg_val);
369
370	if (!(reg_val & GEN6_STOLEN_RESERVED_ENABLE))
371		return;
372
373	if (!(reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK))
374		return;
375
376	*base = reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK;
377	*size = stolen_top - *base;
378}
379
380static void icl_get_stolen_reserved(struct drm_i915_private *i915,
381				    struct intel_uncore *uncore,
382				    resource_size_t *base,
383				    resource_size_t *size)
384{
385	u64 reg_val = intel_uncore_read64(uncore, GEN6_STOLEN_RESERVED);
386
387	drm_dbg(&i915->drm, "GEN6_STOLEN_RESERVED = 0x%016llx\n", reg_val);
388
389	/* Wa_14019821291 */
390	if (MEDIA_VER_FULL(i915) == IP_VER(13, 0)) {
391		/*
392		 * This workaround is primarily implemented by the BIOS.  We
393		 * just need to figure out whether the BIOS has applied the
394		 * workaround (meaning the programmed address falls within
395		 * the DSM) and, if so, reserve that part of the DSM to
396		 * prevent accidental reuse.  The DSM location should be just
397		 * below the WOPCM.
398		 */
399		u64 gscpsmi_base = intel_uncore_read64_2x32(uncore,
400							    MTL_GSCPSMI_BASEADDR_LSB,
401							    MTL_GSCPSMI_BASEADDR_MSB);
402		if (gscpsmi_base >= i915->dsm.stolen.start &&
403		    gscpsmi_base < i915->dsm.stolen.end) {
404			*base = gscpsmi_base;
405			*size = i915->dsm.stolen.end - gscpsmi_base;
406			return;
407		}
408	}
409
410	switch (reg_val & GEN8_STOLEN_RESERVED_SIZE_MASK) {
411	case GEN8_STOLEN_RESERVED_1M:
412		*size = 1024 * 1024;
413		break;
414	case GEN8_STOLEN_RESERVED_2M:
415		*size = 2 * 1024 * 1024;
416		break;
417	case GEN8_STOLEN_RESERVED_4M:
418		*size = 4 * 1024 * 1024;
419		break;
420	case GEN8_STOLEN_RESERVED_8M:
421		*size = 8 * 1024 * 1024;
422		break;
423	default:
424		*size = 8 * 1024 * 1024;
425		MISSING_CASE(reg_val & GEN8_STOLEN_RESERVED_SIZE_MASK);
426	}
427
428	if (HAS_LMEMBAR_SMEM_STOLEN(i915))
429		/* the base is initialized to stolen top so subtract size to get base */
430		*base -= *size;
431	else
432		*base = reg_val & GEN11_STOLEN_RESERVED_ADDR_MASK;
433}
434
435/*
436 * Initialize i915->dsm.reserved to contain the reserved space within the Data
437 * Stolen Memory. This is a range on the top of DSM that is reserved, not to
438 * be used by driver, so must be excluded from the region passed to the
439 * allocator later. In the spec this is also called as WOPCM.
440 *
441 * Our expectation is that the reserved space is at the top of the stolen
442 * region, as it has been the case for every platform, and *never* at the
443 * bottom, so the calculation here can be simplified.
444 */
445static int init_reserved_stolen(struct drm_i915_private *i915)
446{
447	struct intel_uncore *uncore = &i915->uncore;
448	resource_size_t reserved_base, stolen_top;
449	resource_size_t reserved_size;
450	int ret = 0;
451
452	stolen_top = i915->dsm.stolen.end + 1;
453	reserved_base = stolen_top;
454	reserved_size = 0;
455
456	if (GRAPHICS_VER(i915) >= 11) {
457		icl_get_stolen_reserved(i915, uncore,
458					&reserved_base, &reserved_size);
459	} else if (GRAPHICS_VER(i915) >= 8) {
460		if (IS_LP(i915))
461			chv_get_stolen_reserved(i915, uncore,
462						&reserved_base, &reserved_size);
463		else
464			bdw_get_stolen_reserved(i915, uncore,
465						&reserved_base, &reserved_size);
466	} else if (GRAPHICS_VER(i915) >= 7) {
467		if (IS_VALLEYVIEW(i915))
468			vlv_get_stolen_reserved(i915, uncore,
469						&reserved_base, &reserved_size);
470		else
471			gen7_get_stolen_reserved(i915, uncore,
472						 &reserved_base, &reserved_size);
473	} else if (GRAPHICS_VER(i915) >= 6) {
474		gen6_get_stolen_reserved(i915, uncore,
475					 &reserved_base, &reserved_size);
476	} else if (GRAPHICS_VER(i915) >= 5 || IS_G4X(i915)) {
477		g4x_get_stolen_reserved(i915, uncore,
478					&reserved_base, &reserved_size);
479	}
480
481	/* No reserved stolen */
482	if (reserved_base == stolen_top)
483		goto bail_out;
484
485	if (!reserved_base) {
486		drm_err(&i915->drm,
487			"inconsistent reservation %pa + %pa; ignoring\n",
488			&reserved_base, &reserved_size);
489		ret = -EINVAL;
490		goto bail_out;
491	}
492
493	i915->dsm.reserved = DEFINE_RES_MEM(reserved_base, reserved_size);
494
495	if (!resource_contains(&i915->dsm.stolen, &i915->dsm.reserved)) {
496		drm_err(&i915->drm,
497			"Stolen reserved area %pR outside stolen memory %pR\n",
498			&i915->dsm.reserved, &i915->dsm.stolen);
499		ret = -EINVAL;
500		goto bail_out;
501	}
502
503	return 0;
504
505bail_out:
506	i915->dsm.reserved = DEFINE_RES_MEM(reserved_base, 0);
507
508	return ret;
509}
510
511static int i915_gem_init_stolen(struct intel_memory_region *mem)
512{
513	struct drm_i915_private *i915 = mem->i915;
514
515	mutex_init(&i915->mm.stolen_lock);
516
517	if (intel_vgpu_active(i915)) {
518		drm_notice(&i915->drm,
519			   "%s, disabling use of stolen memory\n",
520			   "iGVT-g active");
521		return -ENOSPC;
522	}
523
524	if (i915_vtd_active(i915) && GRAPHICS_VER(i915) < 8) {
525		drm_notice(&i915->drm,
526			   "%s, disabling use of stolen memory\n",
527			   "DMAR active");
528		return -ENOSPC;
529	}
530
531	if (adjust_stolen(i915, &mem->region))
532		return -ENOSPC;
533
534	if (request_smem_stolen(i915, &mem->region))
535		return -ENOSPC;
536
537	i915->dsm.stolen = mem->region;
538
539	if (init_reserved_stolen(i915))
540		return -ENOSPC;
541
542	/* Exclude the reserved region from driver use */
543	mem->region.end = i915->dsm.reserved.start - 1;
544	mem->io = DEFINE_RES_MEM(mem->io.start,
545				 min(resource_size(&mem->io),
546				     resource_size(&mem->region)));
547
548	i915->dsm.usable_size = resource_size(&mem->region);
549
550	drm_dbg(&i915->drm,
551		"Memory reserved for graphics device: %lluK, usable: %lluK\n",
552		(u64)resource_size(&i915->dsm.stolen) >> 10,
553		(u64)i915->dsm.usable_size >> 10);
554
555	if (i915->dsm.usable_size == 0)
556		return -ENOSPC;
557
558	/* Basic memrange allocator for stolen space. */
559	drm_mm_init(&i915->mm.stolen, 0, i915->dsm.usable_size);
560
561	/*
562	 * Access to stolen lmem beyond certain size for MTL A0 stepping
563	 * would crash the machine. Disable stolen lmem for userspace access
564	 * by setting usable_size to zero.
565	 */
566	if (IS_METEORLAKE(i915) && INTEL_REVID(i915) == 0x0)
567		i915->dsm.usable_size = 0;
568
569	return 0;
570}
571
572static void dbg_poison(struct i915_ggtt *ggtt,
573		       dma_addr_t addr, resource_size_t size,
574		       u8 x)
575{
576#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)
577	if (!drm_mm_node_allocated(&ggtt->error_capture))
578		return;
579
580	if (ggtt->vm.bind_async_flags & I915_VMA_GLOBAL_BIND)
581		return; /* beware stop_machine() inversion */
582
583	GEM_BUG_ON(!IS_ALIGNED(size, PAGE_SIZE));
584
585	mutex_lock(&ggtt->error_mutex);
586	while (size) {
587		void __iomem *s;
588
589		ggtt->vm.insert_page(&ggtt->vm, addr,
590				     ggtt->error_capture.start,
591				     i915_gem_get_pat_index(ggtt->vm.i915,
592							    I915_CACHE_NONE),
593				     0);
594		mb();
595
596		s = io_mapping_map_wc(&ggtt->iomap,
597				      ggtt->error_capture.start,
598				      PAGE_SIZE);
599		memset_io(s, x, PAGE_SIZE);
600		io_mapping_unmap(s);
601
602		addr += PAGE_SIZE;
603		size -= PAGE_SIZE;
604	}
605	mb();
606	ggtt->vm.clear_range(&ggtt->vm, ggtt->error_capture.start, PAGE_SIZE);
607	mutex_unlock(&ggtt->error_mutex);
608#endif
609}
610
611static struct sg_table *
612i915_pages_create_for_stolen(struct drm_device *dev,
613			     resource_size_t offset, resource_size_t size)
614{
615	struct drm_i915_private *i915 = to_i915(dev);
616	struct sg_table *st;
617	struct scatterlist *sg;
618
619	GEM_BUG_ON(range_overflows(offset, size, resource_size(&i915->dsm.stolen)));
620
621	/* We hide that we have no struct page backing our stolen object
622	 * by wrapping the contiguous physical allocation with a fake
623	 * dma mapping in a single scatterlist.
624	 */
625
626	st = kmalloc(sizeof(*st), GFP_KERNEL);
627	if (st == NULL)
628		return ERR_PTR(-ENOMEM);
629
630	if (sg_alloc_table(st, 1, GFP_KERNEL)) {
631		kfree(st);
632		return ERR_PTR(-ENOMEM);
633	}
634
635	sg = st->sgl;
636	sg->offset = 0;
637	sg->length = size;
638
639	sg_dma_address(sg) = (dma_addr_t)i915->dsm.stolen.start + offset;
640	sg_dma_len(sg) = size;
641
642	return st;
643}
644
645static int i915_gem_object_get_pages_stolen(struct drm_i915_gem_object *obj)
646{
647	struct drm_i915_private *i915 = to_i915(obj->base.dev);
648	struct sg_table *pages =
649		i915_pages_create_for_stolen(obj->base.dev,
650					     obj->stolen->start,
651					     obj->stolen->size);
652	if (IS_ERR(pages))
653		return PTR_ERR(pages);
654
655	dbg_poison(to_gt(i915)->ggtt,
656		   sg_dma_address(pages->sgl),
657		   sg_dma_len(pages->sgl),
658		   POISON_INUSE);
659
660	__i915_gem_object_set_pages(obj, pages);
661
662	return 0;
663}
664
665static void i915_gem_object_put_pages_stolen(struct drm_i915_gem_object *obj,
666					     struct sg_table *pages)
667{
668	struct drm_i915_private *i915 = to_i915(obj->base.dev);
669	/* Should only be called from i915_gem_object_release_stolen() */
670
671	dbg_poison(to_gt(i915)->ggtt,
672		   sg_dma_address(pages->sgl),
673		   sg_dma_len(pages->sgl),
674		   POISON_FREE);
675
676	sg_free_table(pages);
677	kfree(pages);
678}
679
680static void
681i915_gem_object_release_stolen(struct drm_i915_gem_object *obj)
682{
683	struct drm_i915_private *i915 = to_i915(obj->base.dev);
684	struct drm_mm_node *stolen = fetch_and_zero(&obj->stolen);
685
686	GEM_BUG_ON(!stolen);
687	i915_gem_stolen_remove_node(i915, stolen);
688	kfree(stolen);
689
690	i915_gem_object_release_memory_region(obj);
691}
692
693static const struct drm_i915_gem_object_ops i915_gem_object_stolen_ops = {
694	.name = "i915_gem_object_stolen",
695	.get_pages = i915_gem_object_get_pages_stolen,
696	.put_pages = i915_gem_object_put_pages_stolen,
697	.release = i915_gem_object_release_stolen,
698};
699
700static int __i915_gem_object_create_stolen(struct intel_memory_region *mem,
701					   struct drm_i915_gem_object *obj,
702					   struct drm_mm_node *stolen)
703{
704	static struct lock_class_key lock_class;
705	unsigned int cache_level;
706	unsigned int flags;
707	int err;
708
709	/*
710	 * Stolen objects are always physically contiguous since we just
711	 * allocate one big block underneath using the drm_mm range allocator.
712	 */
713	flags = I915_BO_ALLOC_CONTIGUOUS;
714
715	drm_gem_private_object_init(&mem->i915->drm, &obj->base, stolen->size);
716	i915_gem_object_init(obj, &i915_gem_object_stolen_ops, &lock_class, flags);
717
718	obj->stolen = stolen;
719	obj->read_domains = I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT;
720	cache_level = HAS_LLC(mem->i915) ? I915_CACHE_LLC : I915_CACHE_NONE;
721	i915_gem_object_set_cache_coherency(obj, cache_level);
722
723	if (WARN_ON(!i915_gem_object_trylock(obj, NULL)))
724		return -EBUSY;
725
726	i915_gem_object_init_memory_region(obj, mem);
727
728	err = i915_gem_object_pin_pages(obj);
729	if (err)
730		i915_gem_object_release_memory_region(obj);
731	i915_gem_object_unlock(obj);
732
733	return err;
734}
735
736static int _i915_gem_object_stolen_init(struct intel_memory_region *mem,
737					struct drm_i915_gem_object *obj,
738					resource_size_t offset,
739					resource_size_t size,
740					resource_size_t page_size,
741					unsigned int flags)
742{
743	struct drm_i915_private *i915 = mem->i915;
744	struct drm_mm_node *stolen;
745	int ret;
746
747	if (!drm_mm_initialized(&i915->mm.stolen))
748		return -ENODEV;
749
750	if (size == 0)
751		return -EINVAL;
752
753	/*
754	 * With discrete devices, where we lack a mappable aperture there is no
755	 * possible way to ever access this memory on the CPU side.
756	 */
757	if (mem->type == INTEL_MEMORY_STOLEN_LOCAL && !resource_size(&mem->io) &&
758	    !(flags & I915_BO_ALLOC_GPU_ONLY))
759		return -ENOSPC;
760
761	stolen = kzalloc(sizeof(*stolen), GFP_KERNEL);
762	if (!stolen)
763		return -ENOMEM;
764
765	if (offset != I915_BO_INVALID_OFFSET) {
766		drm_dbg(&i915->drm,
767			"creating preallocated stolen object: stolen_offset=%pa, size=%pa\n",
768			&offset, &size);
769
770		stolen->start = offset;
771		stolen->size = size;
772		mutex_lock(&i915->mm.stolen_lock);
773		ret = drm_mm_reserve_node(&i915->mm.stolen, stolen);
774		mutex_unlock(&i915->mm.stolen_lock);
775	} else {
776		ret = i915_gem_stolen_insert_node(i915, stolen, size,
777						  mem->min_page_size);
778	}
779	if (ret)
780		goto err_free;
781
782	ret = __i915_gem_object_create_stolen(mem, obj, stolen);
783	if (ret)
784		goto err_remove;
785
786	return 0;
787
788err_remove:
789	i915_gem_stolen_remove_node(i915, stolen);
790err_free:
791	kfree(stolen);
792	return ret;
793}
794
795struct drm_i915_gem_object *
796i915_gem_object_create_stolen(struct drm_i915_private *i915,
797			      resource_size_t size)
798{
799	return i915_gem_object_create_region(i915->mm.stolen_region, size, 0, 0);
800}
801
802static int init_stolen_smem(struct intel_memory_region *mem)
803{
804	int err;
805
806	/*
807	 * Initialise stolen early so that we may reserve preallocated
808	 * objects for the BIOS to KMS transition.
809	 */
810	err = i915_gem_init_stolen(mem);
811	if (err)
812		drm_dbg(&mem->i915->drm, "Skip stolen region: failed to setup\n");
813
814	return 0;
815}
816
817static int release_stolen_smem(struct intel_memory_region *mem)
818{
819	i915_gem_cleanup_stolen(mem->i915);
820	return 0;
821}
822
823static const struct intel_memory_region_ops i915_region_stolen_smem_ops = {
824	.init = init_stolen_smem,
825	.release = release_stolen_smem,
826	.init_object = _i915_gem_object_stolen_init,
827};
828
829static int init_stolen_lmem(struct intel_memory_region *mem)
830{
831	int err;
832
833	if (GEM_WARN_ON(resource_size(&mem->region) == 0))
834		return 0;
835
836	err = i915_gem_init_stolen(mem);
837	if (err) {
838		drm_dbg(&mem->i915->drm, "Skip stolen region: failed to setup\n");
839		return 0;
840	}
841
842	if (resource_size(&mem->io) &&
843	    !io_mapping_init_wc(&mem->iomap, mem->io.start, resource_size(&mem->io)))
844		goto err_cleanup;
845
846	return 0;
847
848err_cleanup:
849	i915_gem_cleanup_stolen(mem->i915);
850	return err;
851}
852
853static int release_stolen_lmem(struct intel_memory_region *mem)
854{
855	if (resource_size(&mem->io))
856		io_mapping_fini(&mem->iomap);
857	i915_gem_cleanup_stolen(mem->i915);
858	return 0;
859}
860
861static const struct intel_memory_region_ops i915_region_stolen_lmem_ops = {
862	.init = init_stolen_lmem,
863	.release = release_stolen_lmem,
864	.init_object = _i915_gem_object_stolen_init,
865};
866
867static int mtl_get_gms_size(struct intel_uncore *uncore)
868{
869	u16 ggc, gms;
870
871	ggc = intel_uncore_read16(uncore, GGC);
872
873	/* check GGMS, should be fixed 0x3 (8MB) */
874	if ((ggc & GGMS_MASK) != GGMS_MASK)
875		return -EIO;
876
877	/* return valid GMS value, -EIO if invalid */
878	gms = REG_FIELD_GET(GMS_MASK, ggc);
879	switch (gms) {
880	case 0x0 ... 0x04:
881		return gms * 32;
882	case 0xf0 ... 0xfe:
883		return (gms - 0xf0 + 1) * 4;
884	default:
885		MISSING_CASE(gms);
886		return -EIO;
887	}
888}
889
890struct intel_memory_region *
891i915_gem_stolen_lmem_setup(struct drm_i915_private *i915, u16 type,
892			   u16 instance)
893{
894	struct intel_uncore *uncore = &i915->uncore;
895	struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
896	resource_size_t dsm_size, dsm_base, lmem_size;
897	struct intel_memory_region *mem;
898	resource_size_t io_start, io_size;
899	resource_size_t min_page_size;
900	int ret;
901
902	if (WARN_ON_ONCE(instance))
903		return ERR_PTR(-ENODEV);
904
905	if (!i915_pci_resource_valid(pdev, GEN12_LMEM_BAR))
906		return ERR_PTR(-ENXIO);
907
908	if (HAS_LMEMBAR_SMEM_STOLEN(i915) || IS_DG1(i915)) {
909		lmem_size = pci_resource_len(pdev, GEN12_LMEM_BAR);
910	} else {
911		resource_size_t lmem_range;
912
913		lmem_range = intel_gt_mcr_read_any(to_gt(i915), XEHP_TILE0_ADDR_RANGE) & 0xFFFF;
914		lmem_size = lmem_range >> XEHP_TILE_LMEM_RANGE_SHIFT;
915		lmem_size *= SZ_1G;
916	}
917
918	if (HAS_LMEMBAR_SMEM_STOLEN(i915)) {
919		/*
920		 * MTL dsm size is in GGC register.
921		 * Also MTL uses offset to GSMBASE in ptes, so i915
922		 * uses dsm_base = 8MBs to setup stolen region, since
923		 * DSMBASE = GSMBASE + 8MB.
924		 */
925		ret = mtl_get_gms_size(uncore);
926		if (ret < 0) {
927			drm_err(&i915->drm, "invalid MTL GGC register setting\n");
928			return ERR_PTR(ret);
929		}
930
931		dsm_base = SZ_8M;
932		dsm_size = (resource_size_t)(ret * SZ_1M);
933
934		GEM_BUG_ON(pci_resource_len(pdev, GEN12_LMEM_BAR) != SZ_256M);
935		GEM_BUG_ON((dsm_base + dsm_size) > lmem_size);
936	} else {
937		/* Use DSM base address instead for stolen memory */
938		dsm_base = intel_uncore_read64(uncore, GEN6_DSMBASE) & GEN11_BDSM_MASK;
939		if (WARN_ON(lmem_size < dsm_base))
940			return ERR_PTR(-ENODEV);
941		dsm_size = ALIGN_DOWN(lmem_size - dsm_base, SZ_1M);
942	}
943
944	if (i915_direct_stolen_access(i915)) {
945		drm_dbg(&i915->drm, "Using direct DSM access\n");
946		io_start = intel_uncore_read64(uncore, GEN6_DSMBASE) & GEN11_BDSM_MASK;
947		io_size = dsm_size;
948	} else if (pci_resource_len(pdev, GEN12_LMEM_BAR) < lmem_size) {
949		io_start = 0;
950		io_size = 0;
951	} else {
952		io_start = pci_resource_start(pdev, GEN12_LMEM_BAR) + dsm_base;
953		io_size = dsm_size;
954	}
955
956	min_page_size = HAS_64K_PAGES(i915) ? I915_GTT_PAGE_SIZE_64K :
957						I915_GTT_PAGE_SIZE_4K;
958
959	mem = intel_memory_region_create(i915, dsm_base, dsm_size,
960					 min_page_size,
961					 io_start, io_size,
962					 type, instance,
963					 &i915_region_stolen_lmem_ops);
964	if (IS_ERR(mem))
965		return mem;
966
967	intel_memory_region_set_name(mem, "stolen-local");
968
969	mem->private = true;
970
971	return mem;
972}
973
974struct intel_memory_region*
975i915_gem_stolen_smem_setup(struct drm_i915_private *i915, u16 type,
976			   u16 instance)
977{
978	struct intel_memory_region *mem;
979
980	mem = intel_memory_region_create(i915,
981					 intel_graphics_stolen_res.start,
982					 resource_size(&intel_graphics_stolen_res),
983					 PAGE_SIZE, 0, 0, type, instance,
984					 &i915_region_stolen_smem_ops);
985	if (IS_ERR(mem))
986		return mem;
987
988	intel_memory_region_set_name(mem, "stolen-system");
989
990	mem->private = true;
991
992	return mem;
993}
994
995bool i915_gem_object_is_stolen(const struct drm_i915_gem_object *obj)
996{
997	return obj->ops == &i915_gem_object_stolen_ops;
998}
999
1000bool i915_gem_stolen_initialized(const struct drm_i915_private *i915)
1001{
1002	return drm_mm_initialized(&i915->mm.stolen);
1003}
1004
1005u64 i915_gem_stolen_area_address(const struct drm_i915_private *i915)
1006{
1007	return i915->dsm.stolen.start;
1008}
1009
1010u64 i915_gem_stolen_area_size(const struct drm_i915_private *i915)
1011{
1012	return resource_size(&i915->dsm.stolen);
1013}
1014
1015u64 i915_gem_stolen_node_address(const struct drm_i915_private *i915,
1016				 const struct drm_mm_node *node)
1017{
1018	return i915->dsm.stolen.start + i915_gem_stolen_node_offset(node);
1019}
1020
1021bool i915_gem_stolen_node_allocated(const struct drm_mm_node *node)
1022{
1023	return drm_mm_node_allocated(node);
1024}
1025
1026u64 i915_gem_stolen_node_offset(const struct drm_mm_node *node)
1027{
1028	return node->start;
1029}
1030
1031u64 i915_gem_stolen_node_size(const struct drm_mm_node *node)
1032{
1033	return node->size;
1034}
1035