1/*
2 * SPDX-License-Identifier: MIT
3 *
4 * Copyright �� 2014-2016 Intel Corporation
5 */
6
7#include <linux/highmem.h>
8#include <linux/shmem_fs.h>
9#include <linux/swap.h>
10
11#include <drm/drm_cache.h>
12
13#include "gt/intel_gt.h"
14#include "i915_drv.h"
15#include "i915_gem_object.h"
16#include "i915_gem_object_frontbuffer.h"
17#include "i915_gem_region.h"
18#include "i915_gem_tiling.h"
19#include "i915_scatterlist.h"
20
21static int i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
22{
23	struct address_space *mapping = obj->base.filp->f_mapping;
24	struct drm_i915_private *i915 = to_i915(obj->base.dev);
25	struct scatterlist *sg;
26	struct sg_table *st;
27	dma_addr_t dma;
28	void *vaddr;
29	void *dst;
30	int i;
31
32	/* Contiguous chunk, with a single scatterlist element */
33	if (overflows_type(obj->base.size, sg->length))
34		return -E2BIG;
35
36	if (GEM_WARN_ON(i915_gem_object_needs_bit17_swizzle(obj)))
37		return -EINVAL;
38
39	/*
40	 * Always aligning to the object size, allows a single allocation
41	 * to handle all possible callers, and given typical object sizes,
42	 * the alignment of the buddy allocation will naturally match.
43	 */
44	vaddr = dma_alloc_coherent(obj->base.dev->dev,
45				   roundup_pow_of_two(obj->base.size),
46				   &dma, GFP_KERNEL);
47	if (!vaddr)
48		return -ENOMEM;
49
50	st = kmalloc(sizeof(*st), GFP_KERNEL);
51	if (!st)
52		goto err_pci;
53
54	if (sg_alloc_table(st, 1, GFP_KERNEL))
55		goto err_st;
56
57	sg = st->sgl;
58	sg->offset = 0;
59	sg->length = obj->base.size;
60
61	sg_assign_page(sg, (struct page *)vaddr);
62	sg_dma_address(sg) = dma;
63	sg_dma_len(sg) = obj->base.size;
64
65	dst = vaddr;
66	for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
67		struct page *page;
68
69		page = shmem_read_mapping_page(mapping, i);
70		if (IS_ERR(page))
71			goto err_st;
72
73		memcpy_from_page(dst, page, 0, PAGE_SIZE);
74		drm_clflush_virt_range(dst, PAGE_SIZE);
75
76		put_page(page);
77		dst += PAGE_SIZE;
78	}
79
80	intel_gt_chipset_flush(to_gt(i915));
81
82	/* We're no longer struct page backed */
83	obj->mem_flags &= ~I915_BO_FLAG_STRUCT_PAGE;
84	__i915_gem_object_set_pages(obj, st);
85
86	return 0;
87
88err_st:
89	kfree(st);
90err_pci:
91	dma_free_coherent(obj->base.dev->dev,
92			  roundup_pow_of_two(obj->base.size),
93			  vaddr, dma);
94	return -ENOMEM;
95}
96
97void
98i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj,
99			       struct sg_table *pages)
100{
101	dma_addr_t dma = sg_dma_address(pages->sgl);
102	void *vaddr = sg_page(pages->sgl);
103
104	__i915_gem_object_release_shmem(obj, pages, false);
105
106	if (obj->mm.dirty) {
107		struct address_space *mapping = obj->base.filp->f_mapping;
108		void *src = vaddr;
109		int i;
110
111		for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
112			struct page *page;
113
114			page = shmem_read_mapping_page(mapping, i);
115			if (IS_ERR(page))
116				continue;
117
118			drm_clflush_virt_range(src, PAGE_SIZE);
119			memcpy_to_page(page, 0, src, PAGE_SIZE);
120
121			set_page_dirty(page);
122			if (obj->mm.madv == I915_MADV_WILLNEED)
123				mark_page_accessed(page);
124			put_page(page);
125
126			src += PAGE_SIZE;
127		}
128		obj->mm.dirty = false;
129	}
130
131	sg_free_table(pages);
132	kfree(pages);
133
134	dma_free_coherent(obj->base.dev->dev,
135			  roundup_pow_of_two(obj->base.size),
136			  vaddr, dma);
137}
138
139int i915_gem_object_pwrite_phys(struct drm_i915_gem_object *obj,
140				const struct drm_i915_gem_pwrite *args)
141{
142	void *vaddr = sg_page(obj->mm.pages->sgl) + args->offset;
143	char __user *user_data = u64_to_user_ptr(args->data_ptr);
144	struct drm_i915_private *i915 = to_i915(obj->base.dev);
145	int err;
146
147	err = i915_gem_object_wait(obj,
148				   I915_WAIT_INTERRUPTIBLE |
149				   I915_WAIT_ALL,
150				   MAX_SCHEDULE_TIMEOUT);
151	if (err)
152		return err;
153
154	/*
155	 * We manually control the domain here and pretend that it
156	 * remains coherent i.e. in the GTT domain, like shmem_pwrite.
157	 */
158	i915_gem_object_invalidate_frontbuffer(obj, ORIGIN_CPU);
159
160	if (copy_from_user(vaddr, user_data, args->size))
161		return -EFAULT;
162
163	drm_clflush_virt_range(vaddr, args->size);
164	intel_gt_chipset_flush(to_gt(i915));
165
166	i915_gem_object_flush_frontbuffer(obj, ORIGIN_CPU);
167	return 0;
168}
169
170int i915_gem_object_pread_phys(struct drm_i915_gem_object *obj,
171			       const struct drm_i915_gem_pread *args)
172{
173	void *vaddr = sg_page(obj->mm.pages->sgl) + args->offset;
174	char __user *user_data = u64_to_user_ptr(args->data_ptr);
175	int err;
176
177	err = i915_gem_object_wait(obj,
178				   I915_WAIT_INTERRUPTIBLE,
179				   MAX_SCHEDULE_TIMEOUT);
180	if (err)
181		return err;
182
183	drm_clflush_virt_range(vaddr, args->size);
184	if (copy_to_user(user_data, vaddr, args->size))
185		return -EFAULT;
186
187	return 0;
188}
189
190static int i915_gem_object_shmem_to_phys(struct drm_i915_gem_object *obj)
191{
192	struct sg_table *pages;
193	int err;
194
195	pages = __i915_gem_object_unset_pages(obj);
196
197	err = i915_gem_object_get_pages_phys(obj);
198	if (err)
199		goto err_xfer;
200
201	/* Perma-pin (until release) the physical set of pages */
202	__i915_gem_object_pin_pages(obj);
203
204	if (!IS_ERR_OR_NULL(pages))
205		i915_gem_object_put_pages_shmem(obj, pages);
206
207	i915_gem_object_release_memory_region(obj);
208	return 0;
209
210err_xfer:
211	if (!IS_ERR_OR_NULL(pages))
212		__i915_gem_object_set_pages(obj, pages);
213	return err;
214}
215
216int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, int align)
217{
218	int err;
219
220	assert_object_held(obj);
221
222	if (align > obj->base.size)
223		return -EINVAL;
224
225	if (!i915_gem_object_is_shmem(obj))
226		return -EINVAL;
227
228	if (!i915_gem_object_has_struct_page(obj))
229		return 0;
230
231	err = i915_gem_object_unbind(obj, I915_GEM_OBJECT_UNBIND_ACTIVE);
232	if (err)
233		return err;
234
235	if (obj->mm.madv != I915_MADV_WILLNEED)
236		return -EFAULT;
237
238	if (i915_gem_object_has_tiling_quirk(obj))
239		return -EFAULT;
240
241	if (obj->mm.mapping || i915_gem_object_has_pinned_pages(obj))
242		return -EBUSY;
243
244	if (unlikely(obj->mm.madv != I915_MADV_WILLNEED)) {
245		drm_dbg(obj->base.dev,
246			"Attempting to obtain a purgeable object\n");
247		return -EFAULT;
248	}
249
250	return i915_gem_object_shmem_to_phys(obj);
251}
252
253#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
254#include "selftests/i915_gem_phys.c"
255#endif
256