1// SPDX-License-Identifier: GPL-2.0+
2/*
3 * Contiguous Memory Allocator for DMA mapping framework
4 * Copyright (c) 2010-2011 by Samsung Electronics.
5 * Written by:
6 *	Marek Szyprowski <m.szyprowski@samsung.com>
7 *	Michal Nazarewicz <mina86@mina86.com>
8 *
9 * Contiguous Memory Allocator
10 *
11 *   The Contiguous Memory Allocator (CMA) makes it possible to
12 *   allocate big contiguous chunks of memory after the system has
13 *   booted.
14 *
15 * Why is it needed?
16 *
17 *   Various devices on embedded systems have no scatter-getter and/or
18 *   IO map support and require contiguous blocks of memory to
19 *   operate.  They include devices such as cameras, hardware video
20 *   coders, etc.
21 *
22 *   Such devices often require big memory buffers (a full HD frame
23 *   is, for instance, more than 2 mega pixels large, i.e. more than 6
24 *   MB of memory), which makes mechanisms such as kmalloc() or
25 *   alloc_page() ineffective.
26 *
27 *   At the same time, a solution where a big memory region is
28 *   reserved for a device is suboptimal since often more memory is
29 *   reserved then strictly required and, moreover, the memory is
30 *   inaccessible to page system even if device drivers don't use it.
31 *
32 *   CMA tries to solve this issue by operating on memory regions
33 *   where only movable pages can be allocated from.  This way, kernel
34 *   can use the memory for pagecache and when device driver requests
35 *   it, allocated pages can be migrated.
36 */
37
38#define pr_fmt(fmt) "cma: " fmt
39
40#include <asm/page.h>
41
42#include <linux/memblock.h>
43#include <linux/err.h>
44#include <linux/sizes.h>
45#include <linux/dma-map-ops.h>
46#include <linux/cma.h>
47#include <linux/nospec.h>
48
49#ifdef CONFIG_CMA_SIZE_MBYTES
50#define CMA_SIZE_MBYTES CONFIG_CMA_SIZE_MBYTES
51#else
52#define CMA_SIZE_MBYTES 0
53#endif
54
55struct cma *dma_contiguous_default_area;
56
57/*
58 * Default global CMA area size can be defined in kernel's .config.
59 * This is useful mainly for distro maintainers to create a kernel
60 * that works correctly for most supported systems.
61 * The size can be set in bytes or as a percentage of the total memory
62 * in the system.
63 *
64 * Users, who want to set the size of global CMA area for their system
65 * should use cma= kernel parameter.
66 */
67static const phys_addr_t size_bytes __initconst =
68	(phys_addr_t)CMA_SIZE_MBYTES * SZ_1M;
69static phys_addr_t  size_cmdline __initdata = -1;
70static phys_addr_t base_cmdline __initdata;
71static phys_addr_t limit_cmdline __initdata;
72
73static int __init early_cma(char *p)
74{
75	if (!p) {
76		pr_err("Config string not provided\n");
77		return -EINVAL;
78	}
79
80	size_cmdline = memparse(p, &p);
81	if (*p != '@')
82		return 0;
83	base_cmdline = memparse(p + 1, &p);
84	if (*p != '-') {
85		limit_cmdline = base_cmdline + size_cmdline;
86		return 0;
87	}
88	limit_cmdline = memparse(p + 1, &p);
89
90	return 0;
91}
92early_param("cma", early_cma);
93
94#ifdef CONFIG_DMA_NUMA_CMA
95
96static struct cma *dma_contiguous_numa_area[MAX_NUMNODES];
97static phys_addr_t numa_cma_size[MAX_NUMNODES] __initdata;
98static struct cma *dma_contiguous_pernuma_area[MAX_NUMNODES];
99static phys_addr_t pernuma_size_bytes __initdata;
100
101static int __init early_numa_cma(char *p)
102{
103	int nid, count = 0;
104	unsigned long tmp;
105	char *s = p;
106
107	while (*s) {
108		if (sscanf(s, "%lu%n", &tmp, &count) != 1)
109			break;
110
111		if (s[count] == ':') {
112			if (tmp >= MAX_NUMNODES)
113				break;
114			nid = array_index_nospec(tmp, MAX_NUMNODES);
115
116			s += count + 1;
117			tmp = memparse(s, &s);
118			numa_cma_size[nid] = tmp;
119
120			if (*s == ',')
121				s++;
122			else
123				break;
124		} else
125			break;
126	}
127
128	return 0;
129}
130early_param("numa_cma", early_numa_cma);
131
132static int __init early_cma_pernuma(char *p)
133{
134	pernuma_size_bytes = memparse(p, &p);
135	return 0;
136}
137early_param("cma_pernuma", early_cma_pernuma);
138#endif
139
140#ifdef CONFIG_CMA_SIZE_PERCENTAGE
141
142static phys_addr_t __init __maybe_unused cma_early_percent_memory(void)
143{
144	unsigned long total_pages = PHYS_PFN(memblock_phys_mem_size());
145
146	return (total_pages * CONFIG_CMA_SIZE_PERCENTAGE / 100) << PAGE_SHIFT;
147}
148
149#else
150
151static inline __maybe_unused phys_addr_t cma_early_percent_memory(void)
152{
153	return 0;
154}
155
156#endif
157
158#ifdef CONFIG_DMA_NUMA_CMA
159static void __init dma_numa_cma_reserve(void)
160{
161	int nid;
162
163	for_each_node(nid) {
164		int ret;
165		char name[CMA_MAX_NAME];
166		struct cma **cma;
167
168		if (!node_online(nid)) {
169			if (pernuma_size_bytes || numa_cma_size[nid])
170				pr_warn("invalid node %d specified\n", nid);
171			continue;
172		}
173
174		if (pernuma_size_bytes) {
175
176			cma = &dma_contiguous_pernuma_area[nid];
177			snprintf(name, sizeof(name), "pernuma%d", nid);
178			ret = cma_declare_contiguous_nid(0, pernuma_size_bytes, 0, 0,
179							 0, false, name, cma, nid);
180			if (ret)
181				pr_warn("%s: reservation failed: err %d, node %d", __func__,
182					ret, nid);
183		}
184
185		if (numa_cma_size[nid]) {
186
187			cma = &dma_contiguous_numa_area[nid];
188			snprintf(name, sizeof(name), "numa%d", nid);
189			ret = cma_declare_contiguous_nid(0, numa_cma_size[nid], 0, 0, 0, false,
190							 name, cma, nid);
191			if (ret)
192				pr_warn("%s: reservation failed: err %d, node %d", __func__,
193					ret, nid);
194		}
195	}
196}
197#else
198static inline void __init dma_numa_cma_reserve(void)
199{
200}
201#endif
202
203/**
204 * dma_contiguous_reserve() - reserve area(s) for contiguous memory handling
205 * @limit: End address of the reserved memory (optional, 0 for any).
206 *
207 * This function reserves memory from early allocator. It should be
208 * called by arch specific code once the early allocator (memblock or bootmem)
209 * has been activated and all other subsystems have already allocated/reserved
210 * memory.
211 */
212void __init dma_contiguous_reserve(phys_addr_t limit)
213{
214	phys_addr_t selected_size = 0;
215	phys_addr_t selected_base = 0;
216	phys_addr_t selected_limit = limit;
217	bool fixed = false;
218
219	dma_numa_cma_reserve();
220
221	pr_debug("%s(limit %08lx)\n", __func__, (unsigned long)limit);
222
223	if (size_cmdline != -1) {
224		selected_size = size_cmdline;
225		selected_base = base_cmdline;
226		selected_limit = min_not_zero(limit_cmdline, limit);
227		if (base_cmdline + size_cmdline == limit_cmdline)
228			fixed = true;
229	} else {
230#ifdef CONFIG_CMA_SIZE_SEL_MBYTES
231		selected_size = size_bytes;
232#elif defined(CONFIG_CMA_SIZE_SEL_PERCENTAGE)
233		selected_size = cma_early_percent_memory();
234#elif defined(CONFIG_CMA_SIZE_SEL_MIN)
235		selected_size = min(size_bytes, cma_early_percent_memory());
236#elif defined(CONFIG_CMA_SIZE_SEL_MAX)
237		selected_size = max(size_bytes, cma_early_percent_memory());
238#endif
239	}
240
241	if (selected_size && !dma_contiguous_default_area) {
242		pr_debug("%s: reserving %ld MiB for global area\n", __func__,
243			 (unsigned long)selected_size / SZ_1M);
244
245		dma_contiguous_reserve_area(selected_size, selected_base,
246					    selected_limit,
247					    &dma_contiguous_default_area,
248					    fixed);
249	}
250}
251
252void __weak
253dma_contiguous_early_fixup(phys_addr_t base, unsigned long size)
254{
255}
256
257/**
258 * dma_contiguous_reserve_area() - reserve custom contiguous area
259 * @size: Size of the reserved area (in bytes),
260 * @base: Base address of the reserved area optional, use 0 for any
261 * @limit: End address of the reserved memory (optional, 0 for any).
262 * @res_cma: Pointer to store the created cma region.
263 * @fixed: hint about where to place the reserved area
264 *
265 * This function reserves memory from early allocator. It should be
266 * called by arch specific code once the early allocator (memblock or bootmem)
267 * has been activated and all other subsystems have already allocated/reserved
268 * memory. This function allows to create custom reserved areas for specific
269 * devices.
270 *
271 * If @fixed is true, reserve contiguous area at exactly @base.  If false,
272 * reserve in range from @base to @limit.
273 */
274int __init dma_contiguous_reserve_area(phys_addr_t size, phys_addr_t base,
275				       phys_addr_t limit, struct cma **res_cma,
276				       bool fixed)
277{
278	int ret;
279
280	ret = cma_declare_contiguous(base, size, limit, 0, 0, fixed,
281					"reserved", res_cma);
282	if (ret)
283		return ret;
284
285	/* Architecture specific contiguous memory fixup. */
286	dma_contiguous_early_fixup(cma_get_base(*res_cma),
287				cma_get_size(*res_cma));
288
289	return 0;
290}
291
292/**
293 * dma_alloc_from_contiguous() - allocate pages from contiguous area
294 * @dev:   Pointer to device for which the allocation is performed.
295 * @count: Requested number of pages.
296 * @align: Requested alignment of pages (in PAGE_SIZE order).
297 * @no_warn: Avoid printing message about failed allocation.
298 *
299 * This function allocates memory buffer for specified device. It uses
300 * device specific contiguous memory area if available or the default
301 * global one. Requires architecture specific dev_get_cma_area() helper
302 * function.
303 */
304struct page *dma_alloc_from_contiguous(struct device *dev, size_t count,
305				       unsigned int align, bool no_warn)
306{
307	if (align > CONFIG_CMA_ALIGNMENT)
308		align = CONFIG_CMA_ALIGNMENT;
309
310	return cma_alloc(dev_get_cma_area(dev), count, align, no_warn);
311}
312
313/**
314 * dma_release_from_contiguous() - release allocated pages
315 * @dev:   Pointer to device for which the pages were allocated.
316 * @pages: Allocated pages.
317 * @count: Number of allocated pages.
318 *
319 * This function releases memory allocated by dma_alloc_from_contiguous().
320 * It returns false when provided pages do not belong to contiguous area and
321 * true otherwise.
322 */
323bool dma_release_from_contiguous(struct device *dev, struct page *pages,
324				 int count)
325{
326	return cma_release(dev_get_cma_area(dev), pages, count);
327}
328
329static struct page *cma_alloc_aligned(struct cma *cma, size_t size, gfp_t gfp)
330{
331	unsigned int align = min(get_order(size), CONFIG_CMA_ALIGNMENT);
332
333	return cma_alloc(cma, size >> PAGE_SHIFT, align, gfp & __GFP_NOWARN);
334}
335
336/**
337 * dma_alloc_contiguous() - allocate contiguous pages
338 * @dev:   Pointer to device for which the allocation is performed.
339 * @size:  Requested allocation size.
340 * @gfp:   Allocation flags.
341 *
342 * tries to use device specific contiguous memory area if available, or it
343 * tries to use per-numa cma, if the allocation fails, it will fallback to
344 * try default global one.
345 *
346 * Note that it bypass one-page size of allocations from the per-numa and
347 * global area as the addresses within one page are always contiguous, so
348 * there is no need to waste CMA pages for that kind; it also helps reduce
349 * fragmentations.
350 */
351struct page *dma_alloc_contiguous(struct device *dev, size_t size, gfp_t gfp)
352{
353#ifdef CONFIG_DMA_NUMA_CMA
354	int nid = dev_to_node(dev);
355#endif
356
357	/* CMA can be used only in the context which permits sleeping */
358	if (!gfpflags_allow_blocking(gfp))
359		return NULL;
360	if (dev->cma_area)
361		return cma_alloc_aligned(dev->cma_area, size, gfp);
362	if (size <= PAGE_SIZE)
363		return NULL;
364
365#ifdef CONFIG_DMA_NUMA_CMA
366	if (nid != NUMA_NO_NODE && !(gfp & (GFP_DMA | GFP_DMA32))) {
367		struct cma *cma = dma_contiguous_pernuma_area[nid];
368		struct page *page;
369
370		if (cma) {
371			page = cma_alloc_aligned(cma, size, gfp);
372			if (page)
373				return page;
374		}
375
376		cma = dma_contiguous_numa_area[nid];
377		if (cma) {
378			page = cma_alloc_aligned(cma, size, gfp);
379			if (page)
380				return page;
381		}
382	}
383#endif
384	if (!dma_contiguous_default_area)
385		return NULL;
386
387	return cma_alloc_aligned(dma_contiguous_default_area, size, gfp);
388}
389
390/**
391 * dma_free_contiguous() - release allocated pages
392 * @dev:   Pointer to device for which the pages were allocated.
393 * @page:  Pointer to the allocated pages.
394 * @size:  Size of allocated pages.
395 *
396 * This function releases memory allocated by dma_alloc_contiguous(). As the
397 * cma_release returns false when provided pages do not belong to contiguous
398 * area and true otherwise, this function then does a fallback __free_pages()
399 * upon a false-return.
400 */
401void dma_free_contiguous(struct device *dev, struct page *page, size_t size)
402{
403	unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
404
405	/* if dev has its own cma, free page from there */
406	if (dev->cma_area) {
407		if (cma_release(dev->cma_area, page, count))
408			return;
409	} else {
410		/*
411		 * otherwise, page is from either per-numa cma or default cma
412		 */
413#ifdef CONFIG_DMA_NUMA_CMA
414		if (cma_release(dma_contiguous_pernuma_area[page_to_nid(page)],
415					page, count))
416			return;
417		if (cma_release(dma_contiguous_numa_area[page_to_nid(page)],
418					page, count))
419			return;
420#endif
421		if (cma_release(dma_contiguous_default_area, page, count))
422			return;
423	}
424
425	/* not in any cma, free from buddy */
426	__free_pages(page, get_order(size));
427}
428
429/*
430 * Support for reserved memory regions defined in device tree
431 */
432#ifdef CONFIG_OF_RESERVED_MEM
433#include <linux/of.h>
434#include <linux/of_fdt.h>
435#include <linux/of_reserved_mem.h>
436
437#undef pr_fmt
438#define pr_fmt(fmt) fmt
439
440static int rmem_cma_device_init(struct reserved_mem *rmem, struct device *dev)
441{
442	dev->cma_area = rmem->priv;
443	return 0;
444}
445
446static void rmem_cma_device_release(struct reserved_mem *rmem,
447				    struct device *dev)
448{
449	dev->cma_area = NULL;
450}
451
452static const struct reserved_mem_ops rmem_cma_ops = {
453	.device_init	= rmem_cma_device_init,
454	.device_release = rmem_cma_device_release,
455};
456
457static int __init rmem_cma_setup(struct reserved_mem *rmem)
458{
459	unsigned long node = rmem->fdt_node;
460	bool default_cma = of_get_flat_dt_prop(node, "linux,cma-default", NULL);
461	struct cma *cma;
462	int err;
463
464	if (size_cmdline != -1 && default_cma) {
465		pr_info("Reserved memory: bypass %s node, using cmdline CMA params instead\n",
466			rmem->name);
467		return -EBUSY;
468	}
469
470	if (!of_get_flat_dt_prop(node, "reusable", NULL) ||
471	    of_get_flat_dt_prop(node, "no-map", NULL))
472		return -EINVAL;
473
474	if (!IS_ALIGNED(rmem->base | rmem->size, CMA_MIN_ALIGNMENT_BYTES)) {
475		pr_err("Reserved memory: incorrect alignment of CMA region\n");
476		return -EINVAL;
477	}
478
479	err = cma_init_reserved_mem(rmem->base, rmem->size, 0, rmem->name, &cma);
480	if (err) {
481		pr_err("Reserved memory: unable to setup CMA region\n");
482		return err;
483	}
484	/* Architecture specific contiguous memory fixup. */
485	dma_contiguous_early_fixup(rmem->base, rmem->size);
486
487	if (default_cma)
488		dma_contiguous_default_area = cma;
489
490	rmem->ops = &rmem_cma_ops;
491	rmem->priv = cma;
492
493	pr_info("Reserved memory: created CMA memory pool at %pa, size %ld MiB\n",
494		&rmem->base, (unsigned long)rmem->size / SZ_1M);
495
496	return 0;
497}
498RESERVEDMEM_OF_DECLARE(cma, "shared-dma-pool", rmem_cma_setup);
499#endif
500