1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Helpers for DMA ops implementations.  These generally rely on the fact that
4 * the allocated memory contains normal pages in the direct kernel mapping.
5 */
6#include <linux/dma-map-ops.h>
7
8static struct page *dma_common_vaddr_to_page(void *cpu_addr)
9{
10	if (is_vmalloc_addr(cpu_addr))
11		return vmalloc_to_page(cpu_addr);
12	return virt_to_page(cpu_addr);
13}
14
15/*
16 * Create scatter-list for the already allocated DMA buffer.
17 */
18int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt,
19		 void *cpu_addr, dma_addr_t dma_addr, size_t size,
20		 unsigned long attrs)
21{
22	struct page *page = dma_common_vaddr_to_page(cpu_addr);
23	int ret;
24
25	ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
26	if (!ret)
27		sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0);
28	return ret;
29}
30
31/*
32 * Create userspace mapping for the DMA-coherent memory.
33 */
34int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
35		void *cpu_addr, dma_addr_t dma_addr, size_t size,
36		unsigned long attrs)
37{
38#ifdef CONFIG_MMU
39	unsigned long user_count = vma_pages(vma);
40	unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
41	unsigned long off = vma->vm_pgoff;
42	struct page *page = dma_common_vaddr_to_page(cpu_addr);
43	int ret = -ENXIO;
44
45	vma->vm_page_prot = dma_pgprot(dev, vma->vm_page_prot, attrs);
46
47	if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret))
48		return ret;
49
50	if (off >= count || user_count > count - off)
51		return -ENXIO;
52
53	return remap_pfn_range(vma, vma->vm_start,
54			page_to_pfn(page) + vma->vm_pgoff,
55			user_count << PAGE_SHIFT, vma->vm_page_prot);
56#else
57	return -ENXIO;
58#endif /* CONFIG_MMU */
59}
60
61struct page *dma_common_alloc_pages(struct device *dev, size_t size,
62		dma_addr_t *dma_handle, enum dma_data_direction dir, gfp_t gfp)
63{
64	const struct dma_map_ops *ops = get_dma_ops(dev);
65	struct page *page;
66
67	page = dma_alloc_contiguous(dev, size, gfp);
68	if (!page)
69		page = alloc_pages_node(dev_to_node(dev), gfp, get_order(size));
70	if (!page)
71		return NULL;
72
73	*dma_handle = ops->map_page(dev, page, 0, size, dir,
74				    DMA_ATTR_SKIP_CPU_SYNC);
75	if (*dma_handle == DMA_MAPPING_ERROR) {
76		dma_free_contiguous(dev, page, size);
77		return NULL;
78	}
79
80	memset(page_address(page), 0, size);
81	return page;
82}
83
84void dma_common_free_pages(struct device *dev, size_t size, struct page *page,
85		dma_addr_t dma_handle, enum dma_data_direction dir)
86{
87	const struct dma_map_ops *ops = get_dma_ops(dev);
88
89	if (ops->unmap_page)
90		ops->unmap_page(dev, dma_handle, size, dir,
91				DMA_ATTR_SKIP_CPU_SYNC);
92	dma_free_contiguous(dev, page, size);
93}
94