1// SPDX-License-Identifier: GPL-2.0
2/*
3 * DMABUF System heap exporter
4 *
5 * Copyright (C) 2011 Google, Inc.
6 * Copyright (C) 2019, 2020 Linaro Ltd.
7 *
8 * Portions based off of Andrew Davis' SRAM heap:
9 * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com/
10 *	Andrew F. Davis <afd@ti.com>
11 */
12
13#include <linux/dma-buf.h>
14#include <linux/dma-mapping.h>
15#include <linux/dma-heap.h>
16#include <linux/err.h>
17#include <linux/highmem.h>
18#include <linux/mm.h>
19#include <linux/module.h>
20#include <linux/scatterlist.h>
21#include <linux/slab.h>
22#include <linux/vmalloc.h>
23
24static struct dma_heap *sys_heap;
25
26struct system_heap_buffer {
27	struct dma_heap *heap;
28	struct list_head attachments;
29	struct mutex lock;
30	unsigned long len;
31	struct sg_table sg_table;
32	int vmap_cnt;
33	void *vaddr;
34};
35
36struct dma_heap_attachment {
37	struct device *dev;
38	struct sg_table *table;
39	struct list_head list;
40	bool mapped;
41};
42
43#define LOW_ORDER_GFP (GFP_HIGHUSER | __GFP_ZERO)
44#define HIGH_ORDER_GFP  (((GFP_HIGHUSER | __GFP_ZERO | __GFP_NOWARN \
45				| __GFP_NORETRY) & ~__GFP_RECLAIM) \
46				| __GFP_COMP)
47static gfp_t order_flags[] = {HIGH_ORDER_GFP, HIGH_ORDER_GFP, LOW_ORDER_GFP};
48/*
49 * The selection of the orders used for allocation (1MB, 64K, 4K) is designed
50 * to match with the sizes often found in IOMMUs. Using order 4 pages instead
51 * of order 0 pages can significantly improve the performance of many IOMMUs
52 * by reducing TLB pressure and time spent updating page tables.
53 */
54static const unsigned int orders[] = {8, 4, 0};
55#define NUM_ORDERS ARRAY_SIZE(orders)
56
57static struct sg_table *dup_sg_table(struct sg_table *table)
58{
59	struct sg_table *new_table;
60	int ret, i;
61	struct scatterlist *sg, *new_sg;
62
63	new_table = kzalloc(sizeof(*new_table), GFP_KERNEL);
64	if (!new_table)
65		return ERR_PTR(-ENOMEM);
66
67	ret = sg_alloc_table(new_table, table->orig_nents, GFP_KERNEL);
68	if (ret) {
69		kfree(new_table);
70		return ERR_PTR(-ENOMEM);
71	}
72
73	new_sg = new_table->sgl;
74	for_each_sgtable_sg(table, sg, i) {
75		sg_set_page(new_sg, sg_page(sg), sg->length, sg->offset);
76		new_sg = sg_next(new_sg);
77	}
78
79	return new_table;
80}
81
82static int system_heap_attach(struct dma_buf *dmabuf,
83			      struct dma_buf_attachment *attachment)
84{
85	struct system_heap_buffer *buffer = dmabuf->priv;
86	struct dma_heap_attachment *a;
87	struct sg_table *table;
88
89	a = kzalloc(sizeof(*a), GFP_KERNEL);
90	if (!a)
91		return -ENOMEM;
92
93	table = dup_sg_table(&buffer->sg_table);
94	if (IS_ERR(table)) {
95		kfree(a);
96		return -ENOMEM;
97	}
98
99	a->table = table;
100	a->dev = attachment->dev;
101	INIT_LIST_HEAD(&a->list);
102	a->mapped = false;
103
104	attachment->priv = a;
105
106	mutex_lock(&buffer->lock);
107	list_add(&a->list, &buffer->attachments);
108	mutex_unlock(&buffer->lock);
109
110	return 0;
111}
112
113static void system_heap_detach(struct dma_buf *dmabuf,
114			       struct dma_buf_attachment *attachment)
115{
116	struct system_heap_buffer *buffer = dmabuf->priv;
117	struct dma_heap_attachment *a = attachment->priv;
118
119	mutex_lock(&buffer->lock);
120	list_del(&a->list);
121	mutex_unlock(&buffer->lock);
122
123	sg_free_table(a->table);
124	kfree(a->table);
125	kfree(a);
126}
127
128static struct sg_table *system_heap_map_dma_buf(struct dma_buf_attachment *attachment,
129						enum dma_data_direction direction)
130{
131	struct dma_heap_attachment *a = attachment->priv;
132	struct sg_table *table = a->table;
133	int ret;
134
135	ret = dma_map_sgtable(attachment->dev, table, direction, 0);
136	if (ret)
137		return ERR_PTR(ret);
138
139	a->mapped = true;
140	return table;
141}
142
143static void system_heap_unmap_dma_buf(struct dma_buf_attachment *attachment,
144				      struct sg_table *table,
145				      enum dma_data_direction direction)
146{
147	struct dma_heap_attachment *a = attachment->priv;
148
149	a->mapped = false;
150	dma_unmap_sgtable(attachment->dev, table, direction, 0);
151}
152
153static int system_heap_dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
154						enum dma_data_direction direction)
155{
156	struct system_heap_buffer *buffer = dmabuf->priv;
157	struct dma_heap_attachment *a;
158
159	mutex_lock(&buffer->lock);
160
161	if (buffer->vmap_cnt)
162		invalidate_kernel_vmap_range(buffer->vaddr, buffer->len);
163
164	list_for_each_entry(a, &buffer->attachments, list) {
165		if (!a->mapped)
166			continue;
167		dma_sync_sgtable_for_cpu(a->dev, a->table, direction);
168	}
169	mutex_unlock(&buffer->lock);
170
171	return 0;
172}
173
174static int system_heap_dma_buf_end_cpu_access(struct dma_buf *dmabuf,
175					      enum dma_data_direction direction)
176{
177	struct system_heap_buffer *buffer = dmabuf->priv;
178	struct dma_heap_attachment *a;
179
180	mutex_lock(&buffer->lock);
181
182	if (buffer->vmap_cnt)
183		flush_kernel_vmap_range(buffer->vaddr, buffer->len);
184
185	list_for_each_entry(a, &buffer->attachments, list) {
186		if (!a->mapped)
187			continue;
188		dma_sync_sgtable_for_device(a->dev, a->table, direction);
189	}
190	mutex_unlock(&buffer->lock);
191
192	return 0;
193}
194
195static int system_heap_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
196{
197	struct system_heap_buffer *buffer = dmabuf->priv;
198	struct sg_table *table = &buffer->sg_table;
199	unsigned long addr = vma->vm_start;
200	struct sg_page_iter piter;
201	int ret;
202
203	for_each_sgtable_page(table, &piter, vma->vm_pgoff) {
204		struct page *page = sg_page_iter_page(&piter);
205
206		ret = remap_pfn_range(vma, addr, page_to_pfn(page), PAGE_SIZE,
207				      vma->vm_page_prot);
208		if (ret)
209			return ret;
210		addr += PAGE_SIZE;
211		if (addr >= vma->vm_end)
212			return 0;
213	}
214	return 0;
215}
216
217static void *system_heap_do_vmap(struct system_heap_buffer *buffer)
218{
219	struct sg_table *table = &buffer->sg_table;
220	int npages = PAGE_ALIGN(buffer->len) / PAGE_SIZE;
221	struct page **pages = vmalloc(sizeof(struct page *) * npages);
222	struct page **tmp = pages;
223	struct sg_page_iter piter;
224	void *vaddr;
225
226	if (!pages)
227		return ERR_PTR(-ENOMEM);
228
229	for_each_sgtable_page(table, &piter, 0) {
230		WARN_ON(tmp - pages >= npages);
231		*tmp++ = sg_page_iter_page(&piter);
232	}
233
234	vaddr = vmap(pages, npages, VM_MAP, PAGE_KERNEL);
235	vfree(pages);
236
237	if (!vaddr)
238		return ERR_PTR(-ENOMEM);
239
240	return vaddr;
241}
242
243static int system_heap_vmap(struct dma_buf *dmabuf, struct iosys_map *map)
244{
245	struct system_heap_buffer *buffer = dmabuf->priv;
246	void *vaddr;
247	int ret = 0;
248
249	mutex_lock(&buffer->lock);
250	if (buffer->vmap_cnt) {
251		buffer->vmap_cnt++;
252		iosys_map_set_vaddr(map, buffer->vaddr);
253		goto out;
254	}
255
256	vaddr = system_heap_do_vmap(buffer);
257	if (IS_ERR(vaddr)) {
258		ret = PTR_ERR(vaddr);
259		goto out;
260	}
261
262	buffer->vaddr = vaddr;
263	buffer->vmap_cnt++;
264	iosys_map_set_vaddr(map, buffer->vaddr);
265out:
266	mutex_unlock(&buffer->lock);
267
268	return ret;
269}
270
271static void system_heap_vunmap(struct dma_buf *dmabuf, struct iosys_map *map)
272{
273	struct system_heap_buffer *buffer = dmabuf->priv;
274
275	mutex_lock(&buffer->lock);
276	if (!--buffer->vmap_cnt) {
277		vunmap(buffer->vaddr);
278		buffer->vaddr = NULL;
279	}
280	mutex_unlock(&buffer->lock);
281	iosys_map_clear(map);
282}
283
284static void system_heap_dma_buf_release(struct dma_buf *dmabuf)
285{
286	struct system_heap_buffer *buffer = dmabuf->priv;
287	struct sg_table *table;
288	struct scatterlist *sg;
289	int i;
290
291	table = &buffer->sg_table;
292	for_each_sgtable_sg(table, sg, i) {
293		struct page *page = sg_page(sg);
294
295		__free_pages(page, compound_order(page));
296	}
297	sg_free_table(table);
298	kfree(buffer);
299}
300
301static const struct dma_buf_ops system_heap_buf_ops = {
302	.attach = system_heap_attach,
303	.detach = system_heap_detach,
304	.map_dma_buf = system_heap_map_dma_buf,
305	.unmap_dma_buf = system_heap_unmap_dma_buf,
306	.begin_cpu_access = system_heap_dma_buf_begin_cpu_access,
307	.end_cpu_access = system_heap_dma_buf_end_cpu_access,
308	.mmap = system_heap_mmap,
309	.vmap = system_heap_vmap,
310	.vunmap = system_heap_vunmap,
311	.release = system_heap_dma_buf_release,
312};
313
314static struct page *alloc_largest_available(unsigned long size,
315					    unsigned int max_order)
316{
317	struct page *page;
318	int i;
319
320	for (i = 0; i < NUM_ORDERS; i++) {
321		if (size <  (PAGE_SIZE << orders[i]))
322			continue;
323		if (max_order < orders[i])
324			continue;
325
326		page = alloc_pages(order_flags[i], orders[i]);
327		if (!page)
328			continue;
329		return page;
330	}
331	return NULL;
332}
333
334static struct dma_buf *system_heap_allocate(struct dma_heap *heap,
335					    unsigned long len,
336					    unsigned long fd_flags,
337					    unsigned long heap_flags)
338{
339	struct system_heap_buffer *buffer;
340	DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
341	unsigned long size_remaining = len;
342	unsigned int max_order = orders[0];
343	struct dma_buf *dmabuf;
344	struct sg_table *table;
345	struct scatterlist *sg;
346	struct list_head pages;
347	struct page *page, *tmp_page;
348	int i, ret = -ENOMEM;
349
350	buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
351	if (!buffer)
352		return ERR_PTR(-ENOMEM);
353
354	INIT_LIST_HEAD(&buffer->attachments);
355	mutex_init(&buffer->lock);
356	buffer->heap = heap;
357	buffer->len = len;
358
359	INIT_LIST_HEAD(&pages);
360	i = 0;
361	while (size_remaining > 0) {
362		/*
363		 * Avoid trying to allocate memory if the process
364		 * has been killed by SIGKILL
365		 */
366		if (fatal_signal_pending(current)) {
367			ret = -EINTR;
368			goto free_buffer;
369		}
370
371		page = alloc_largest_available(size_remaining, max_order);
372		if (!page)
373			goto free_buffer;
374
375		list_add_tail(&page->lru, &pages);
376		size_remaining -= page_size(page);
377		max_order = compound_order(page);
378		i++;
379	}
380
381	table = &buffer->sg_table;
382	if (sg_alloc_table(table, i, GFP_KERNEL))
383		goto free_buffer;
384
385	sg = table->sgl;
386	list_for_each_entry_safe(page, tmp_page, &pages, lru) {
387		sg_set_page(sg, page, page_size(page), 0);
388		sg = sg_next(sg);
389		list_del(&page->lru);
390	}
391
392	/* create the dmabuf */
393	exp_info.exp_name = dma_heap_get_name(heap);
394	exp_info.ops = &system_heap_buf_ops;
395	exp_info.size = buffer->len;
396	exp_info.flags = fd_flags;
397	exp_info.priv = buffer;
398	dmabuf = dma_buf_export(&exp_info);
399	if (IS_ERR(dmabuf)) {
400		ret = PTR_ERR(dmabuf);
401		goto free_pages;
402	}
403	return dmabuf;
404
405free_pages:
406	for_each_sgtable_sg(table, sg, i) {
407		struct page *p = sg_page(sg);
408
409		__free_pages(p, compound_order(p));
410	}
411	sg_free_table(table);
412free_buffer:
413	list_for_each_entry_safe(page, tmp_page, &pages, lru)
414		__free_pages(page, compound_order(page));
415	kfree(buffer);
416
417	return ERR_PTR(ret);
418}
419
420static const struct dma_heap_ops system_heap_ops = {
421	.allocate = system_heap_allocate,
422};
423
424static int system_heap_create(void)
425{
426	struct dma_heap_export_info exp_info;
427
428	exp_info.name = "system";
429	exp_info.ops = &system_heap_ops;
430	exp_info.priv = NULL;
431
432	sys_heap = dma_heap_add(&exp_info);
433	if (IS_ERR(sys_heap))
434		return PTR_ERR(sys_heap);
435
436	return 0;
437}
438module_init(system_heap_create);
439