1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * Contiguous Memory Allocator
4 *
5 * Copyright (c) 2010-2011 by Samsung Electronics.
6 * Copyright IBM Corporation, 2013
7 * Copyright LG Electronics Inc., 2014
8 * Written by:
9 *	Marek Szyprowski <m.szyprowski@samsung.com>
10 *	Michal Nazarewicz <mina86@mina86.com>
11 *	Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
12 *	Joonsoo Kim <iamjoonsoo.kim@lge.com>
13 */
14
15#define pr_fmt(fmt) "cma: " fmt
16
17#define CREATE_TRACE_POINTS
18
19#include <linux/memblock.h>
20#include <linux/err.h>
21#include <linux/mm.h>
22#include <linux/sizes.h>
23#include <linux/slab.h>
24#include <linux/log2.h>
25#include <linux/cma.h>
26#include <linux/highmem.h>
27#include <linux/io.h>
28#include <linux/kmemleak.h>
29#include <trace/events/cma.h>
30
31#include "internal.h"
32#include "cma.h"
33
34struct cma cma_areas[MAX_CMA_AREAS];
35unsigned cma_area_count;
36static DEFINE_MUTEX(cma_mutex);
37
38phys_addr_t cma_get_base(const struct cma *cma)
39{
40	return PFN_PHYS(cma->base_pfn);
41}
42
43unsigned long cma_get_size(const struct cma *cma)
44{
45	return cma->count << PAGE_SHIFT;
46}
47
48const char *cma_get_name(const struct cma *cma)
49{
50	return cma->name;
51}
52
53static unsigned long cma_bitmap_aligned_mask(const struct cma *cma,
54					     unsigned int align_order)
55{
56	if (align_order <= cma->order_per_bit)
57		return 0;
58	return (1UL << (align_order - cma->order_per_bit)) - 1;
59}
60
61/*
62 * Find the offset of the base PFN from the specified align_order.
63 * The value returned is represented in order_per_bits.
64 */
65static unsigned long cma_bitmap_aligned_offset(const struct cma *cma,
66					       unsigned int align_order)
67{
68	return (cma->base_pfn & ((1UL << align_order) - 1))
69		>> cma->order_per_bit;
70}
71
72static unsigned long cma_bitmap_pages_to_bits(const struct cma *cma,
73					      unsigned long pages)
74{
75	return ALIGN(pages, 1UL << cma->order_per_bit) >> cma->order_per_bit;
76}
77
78static void cma_clear_bitmap(struct cma *cma, unsigned long pfn,
79			     unsigned long count)
80{
81	unsigned long bitmap_no, bitmap_count;
82	unsigned long flags;
83
84	bitmap_no = (pfn - cma->base_pfn) >> cma->order_per_bit;
85	bitmap_count = cma_bitmap_pages_to_bits(cma, count);
86
87	spin_lock_irqsave(&cma->lock, flags);
88	bitmap_clear(cma->bitmap, bitmap_no, bitmap_count);
89	spin_unlock_irqrestore(&cma->lock, flags);
90}
91
92static void __init cma_activate_area(struct cma *cma)
93{
94	unsigned long base_pfn = cma->base_pfn, pfn;
95	struct zone *zone;
96
97	cma->bitmap = bitmap_zalloc(cma_bitmap_maxno(cma), GFP_KERNEL);
98	if (!cma->bitmap)
99		goto out_error;
100
101	/*
102	 * alloc_contig_range() requires the pfn range specified to be in the
103	 * same zone. Simplify by forcing the entire CMA resv range to be in the
104	 * same zone.
105	 */
106	WARN_ON_ONCE(!pfn_valid(base_pfn));
107	zone = page_zone(pfn_to_page(base_pfn));
108	for (pfn = base_pfn + 1; pfn < base_pfn + cma->count; pfn++) {
109		WARN_ON_ONCE(!pfn_valid(pfn));
110		if (page_zone(pfn_to_page(pfn)) != zone)
111			goto not_in_zone;
112	}
113
114	for (pfn = base_pfn; pfn < base_pfn + cma->count;
115	     pfn += pageblock_nr_pages)
116		init_cma_reserved_pageblock(pfn_to_page(pfn));
117
118	spin_lock_init(&cma->lock);
119
120#ifdef CONFIG_CMA_DEBUGFS
121	INIT_HLIST_HEAD(&cma->mem_head);
122	spin_lock_init(&cma->mem_head_lock);
123#endif
124
125	return;
126
127not_in_zone:
128	bitmap_free(cma->bitmap);
129out_error:
130	/* Expose all pages to the buddy, they are useless for CMA. */
131	if (!cma->reserve_pages_on_error) {
132		for (pfn = base_pfn; pfn < base_pfn + cma->count; pfn++)
133			free_reserved_page(pfn_to_page(pfn));
134	}
135	totalcma_pages -= cma->count;
136	cma->count = 0;
137	pr_err("CMA area %s could not be activated\n", cma->name);
138	return;
139}
140
141static int __init cma_init_reserved_areas(void)
142{
143	int i;
144
145	for (i = 0; i < cma_area_count; i++)
146		cma_activate_area(&cma_areas[i]);
147
148	return 0;
149}
150core_initcall(cma_init_reserved_areas);
151
152void __init cma_reserve_pages_on_error(struct cma *cma)
153{
154	cma->reserve_pages_on_error = true;
155}
156
157/**
158 * cma_init_reserved_mem() - create custom contiguous area from reserved memory
159 * @base: Base address of the reserved area
160 * @size: Size of the reserved area (in bytes),
161 * @order_per_bit: Order of pages represented by one bit on bitmap.
162 * @name: The name of the area. If this parameter is NULL, the name of
163 *        the area will be set to "cmaN", where N is a running counter of
164 *        used areas.
165 * @res_cma: Pointer to store the created cma region.
166 *
167 * This function creates custom contiguous area from already reserved memory.
168 */
169int __init cma_init_reserved_mem(phys_addr_t base, phys_addr_t size,
170				 unsigned int order_per_bit,
171				 const char *name,
172				 struct cma **res_cma)
173{
174	struct cma *cma;
175
176	/* Sanity checks */
177	if (cma_area_count == ARRAY_SIZE(cma_areas)) {
178		pr_err("Not enough slots for CMA reserved regions!\n");
179		return -ENOSPC;
180	}
181
182	if (!size || !memblock_is_region_reserved(base, size))
183		return -EINVAL;
184
185	/* alignment should be aligned with order_per_bit */
186	if (!IS_ALIGNED(CMA_MIN_ALIGNMENT_PAGES, 1 << order_per_bit))
187		return -EINVAL;
188
189	/* ensure minimal alignment required by mm core */
190	if (!IS_ALIGNED(base | size, CMA_MIN_ALIGNMENT_BYTES))
191		return -EINVAL;
192
193	/*
194	 * Each reserved area must be initialised later, when more kernel
195	 * subsystems (like slab allocator) are available.
196	 */
197	cma = &cma_areas[cma_area_count];
198
199	if (name)
200		snprintf(cma->name, CMA_MAX_NAME, name);
201	else
202		snprintf(cma->name, CMA_MAX_NAME,  "cma%d\n", cma_area_count);
203
204	cma->base_pfn = PFN_DOWN(base);
205	cma->count = size >> PAGE_SHIFT;
206	cma->order_per_bit = order_per_bit;
207	*res_cma = cma;
208	cma_area_count++;
209	totalcma_pages += (size / PAGE_SIZE);
210
211	return 0;
212}
213
214/**
215 * cma_declare_contiguous_nid() - reserve custom contiguous area
216 * @base: Base address of the reserved area optional, use 0 for any
217 * @size: Size of the reserved area (in bytes),
218 * @limit: End address of the reserved memory (optional, 0 for any).
219 * @alignment: Alignment for the CMA area, should be power of 2 or zero
220 * @order_per_bit: Order of pages represented by one bit on bitmap.
221 * @fixed: hint about where to place the reserved area
222 * @name: The name of the area. See function cma_init_reserved_mem()
223 * @res_cma: Pointer to store the created cma region.
224 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
225 *
226 * This function reserves memory from early allocator. It should be
227 * called by arch specific code once the early allocator (memblock or bootmem)
228 * has been activated and all other subsystems have already allocated/reserved
229 * memory. This function allows to create custom reserved areas.
230 *
231 * If @fixed is true, reserve contiguous area at exactly @base.  If false,
232 * reserve in range from @base to @limit.
233 */
234int __init cma_declare_contiguous_nid(phys_addr_t base,
235			phys_addr_t size, phys_addr_t limit,
236			phys_addr_t alignment, unsigned int order_per_bit,
237			bool fixed, const char *name, struct cma **res_cma,
238			int nid)
239{
240	phys_addr_t memblock_end = memblock_end_of_DRAM();
241	phys_addr_t highmem_start;
242	int ret;
243
244	/*
245	 * We can't use __pa(high_memory) directly, since high_memory
246	 * isn't a valid direct map VA, and DEBUG_VIRTUAL will (validly)
247	 * complain. Find the boundary by adding one to the last valid
248	 * address.
249	 */
250	highmem_start = __pa(high_memory - 1) + 1;
251	pr_debug("%s(size %pa, base %pa, limit %pa alignment %pa)\n",
252		__func__, &size, &base, &limit, &alignment);
253
254	if (cma_area_count == ARRAY_SIZE(cma_areas)) {
255		pr_err("Not enough slots for CMA reserved regions!\n");
256		return -ENOSPC;
257	}
258
259	if (!size)
260		return -EINVAL;
261
262	if (alignment && !is_power_of_2(alignment))
263		return -EINVAL;
264
265	if (!IS_ENABLED(CONFIG_NUMA))
266		nid = NUMA_NO_NODE;
267
268	/* Sanitise input arguments. */
269	alignment = max_t(phys_addr_t, alignment, CMA_MIN_ALIGNMENT_BYTES);
270	if (fixed && base & (alignment - 1)) {
271		ret = -EINVAL;
272		pr_err("Region at %pa must be aligned to %pa bytes\n",
273			&base, &alignment);
274		goto err;
275	}
276	base = ALIGN(base, alignment);
277	size = ALIGN(size, alignment);
278	limit &= ~(alignment - 1);
279
280	if (!base)
281		fixed = false;
282
283	/* size should be aligned with order_per_bit */
284	if (!IS_ALIGNED(size >> PAGE_SHIFT, 1 << order_per_bit))
285		return -EINVAL;
286
287	/*
288	 * If allocating at a fixed base the request region must not cross the
289	 * low/high memory boundary.
290	 */
291	if (fixed && base < highmem_start && base + size > highmem_start) {
292		ret = -EINVAL;
293		pr_err("Region at %pa defined on low/high memory boundary (%pa)\n",
294			&base, &highmem_start);
295		goto err;
296	}
297
298	/*
299	 * If the limit is unspecified or above the memblock end, its effective
300	 * value will be the memblock end. Set it explicitly to simplify further
301	 * checks.
302	 */
303	if (limit == 0 || limit > memblock_end)
304		limit = memblock_end;
305
306	if (base + size > limit) {
307		ret = -EINVAL;
308		pr_err("Size (%pa) of region at %pa exceeds limit (%pa)\n",
309			&size, &base, &limit);
310		goto err;
311	}
312
313	/* Reserve memory */
314	if (fixed) {
315		if (memblock_is_region_reserved(base, size) ||
316		    memblock_reserve(base, size) < 0) {
317			ret = -EBUSY;
318			goto err;
319		}
320	} else {
321		phys_addr_t addr = 0;
322
323		/*
324		 * If there is enough memory, try a bottom-up allocation first.
325		 * It will place the new cma area close to the start of the node
326		 * and guarantee that the compaction is moving pages out of the
327		 * cma area and not into it.
328		 * Avoid using first 4GB to not interfere with constrained zones
329		 * like DMA/DMA32.
330		 */
331#ifdef CONFIG_PHYS_ADDR_T_64BIT
332		if (!memblock_bottom_up() && memblock_end >= SZ_4G + size) {
333			memblock_set_bottom_up(true);
334			addr = memblock_alloc_range_nid(size, alignment, SZ_4G,
335							limit, nid, true);
336			memblock_set_bottom_up(false);
337		}
338#endif
339
340		/*
341		 * All pages in the reserved area must come from the same zone.
342		 * If the requested region crosses the low/high memory boundary,
343		 * try allocating from high memory first and fall back to low
344		 * memory in case of failure.
345		 */
346		if (!addr && base < highmem_start && limit > highmem_start) {
347			addr = memblock_alloc_range_nid(size, alignment,
348					highmem_start, limit, nid, true);
349			limit = highmem_start;
350		}
351
352		if (!addr) {
353			addr = memblock_alloc_range_nid(size, alignment, base,
354					limit, nid, true);
355			if (!addr) {
356				ret = -ENOMEM;
357				goto err;
358			}
359		}
360
361		/*
362		 * kmemleak scans/reads tracked objects for pointers to other
363		 * objects but this address isn't mapped and accessible
364		 */
365		kmemleak_ignore_phys(addr);
366		base = addr;
367	}
368
369	ret = cma_init_reserved_mem(base, size, order_per_bit, name, res_cma);
370	if (ret)
371		goto free_mem;
372
373	pr_info("Reserved %ld MiB at %pa on node %d\n", (unsigned long)size / SZ_1M,
374		&base, nid);
375	return 0;
376
377free_mem:
378	memblock_phys_free(base, size);
379err:
380	pr_err("Failed to reserve %ld MiB on node %d\n", (unsigned long)size / SZ_1M,
381	       nid);
382	return ret;
383}
384
385static void cma_debug_show_areas(struct cma *cma)
386{
387	unsigned long next_zero_bit, next_set_bit, nr_zero;
388	unsigned long start = 0;
389	unsigned long nr_part, nr_total = 0;
390	unsigned long nbits = cma_bitmap_maxno(cma);
391
392	spin_lock_irq(&cma->lock);
393	pr_info("number of available pages: ");
394	for (;;) {
395		next_zero_bit = find_next_zero_bit(cma->bitmap, nbits, start);
396		if (next_zero_bit >= nbits)
397			break;
398		next_set_bit = find_next_bit(cma->bitmap, nbits, next_zero_bit);
399		nr_zero = next_set_bit - next_zero_bit;
400		nr_part = nr_zero << cma->order_per_bit;
401		pr_cont("%s%lu@%lu", nr_total ? "+" : "", nr_part,
402			next_zero_bit);
403		nr_total += nr_part;
404		start = next_zero_bit + nr_zero;
405	}
406	pr_cont("=> %lu free of %lu total pages\n", nr_total, cma->count);
407	spin_unlock_irq(&cma->lock);
408}
409
410/**
411 * cma_alloc() - allocate pages from contiguous area
412 * @cma:   Contiguous memory region for which the allocation is performed.
413 * @count: Requested number of pages.
414 * @align: Requested alignment of pages (in PAGE_SIZE order).
415 * @no_warn: Avoid printing message about failed allocation
416 *
417 * This function allocates part of contiguous memory on specific
418 * contiguous memory area.
419 */
420struct page *cma_alloc(struct cma *cma, unsigned long count,
421		       unsigned int align, bool no_warn)
422{
423	unsigned long mask, offset;
424	unsigned long pfn = -1;
425	unsigned long start = 0;
426	unsigned long bitmap_maxno, bitmap_no, bitmap_count;
427	unsigned long i;
428	struct page *page = NULL;
429	int ret = -ENOMEM;
430	const char *name = cma ? cma->name : NULL;
431
432	trace_cma_alloc_start(name, count, align);
433
434	if (!cma || !cma->count || !cma->bitmap)
435		return page;
436
437	pr_debug("%s(cma %p, name: %s, count %lu, align %d)\n", __func__,
438		(void *)cma, cma->name, count, align);
439
440	if (!count)
441		return page;
442
443	mask = cma_bitmap_aligned_mask(cma, align);
444	offset = cma_bitmap_aligned_offset(cma, align);
445	bitmap_maxno = cma_bitmap_maxno(cma);
446	bitmap_count = cma_bitmap_pages_to_bits(cma, count);
447
448	if (bitmap_count > bitmap_maxno)
449		return page;
450
451	for (;;) {
452		spin_lock_irq(&cma->lock);
453		bitmap_no = bitmap_find_next_zero_area_off(cma->bitmap,
454				bitmap_maxno, start, bitmap_count, mask,
455				offset);
456		if (bitmap_no >= bitmap_maxno) {
457			spin_unlock_irq(&cma->lock);
458			break;
459		}
460		bitmap_set(cma->bitmap, bitmap_no, bitmap_count);
461		/*
462		 * It's safe to drop the lock here. We've marked this region for
463		 * our exclusive use. If the migration fails we will take the
464		 * lock again and unmark it.
465		 */
466		spin_unlock_irq(&cma->lock);
467
468		pfn = cma->base_pfn + (bitmap_no << cma->order_per_bit);
469		mutex_lock(&cma_mutex);
470		ret = alloc_contig_range(pfn, pfn + count, MIGRATE_CMA,
471				     GFP_KERNEL | (no_warn ? __GFP_NOWARN : 0));
472		mutex_unlock(&cma_mutex);
473		if (ret == 0) {
474			page = pfn_to_page(pfn);
475			break;
476		}
477
478		cma_clear_bitmap(cma, pfn, count);
479		if (ret != -EBUSY)
480			break;
481
482		pr_debug("%s(): memory range at pfn 0x%lx %p is busy, retrying\n",
483			 __func__, pfn, pfn_to_page(pfn));
484
485		trace_cma_alloc_busy_retry(cma->name, pfn, pfn_to_page(pfn),
486					   count, align);
487		/* try again with a bit different memory target */
488		start = bitmap_no + mask + 1;
489	}
490
491	/*
492	 * CMA can allocate multiple page blocks, which results in different
493	 * blocks being marked with different tags. Reset the tags to ignore
494	 * those page blocks.
495	 */
496	if (page) {
497		for (i = 0; i < count; i++)
498			page_kasan_tag_reset(nth_page(page, i));
499	}
500
501	if (ret && !no_warn) {
502		pr_err_ratelimited("%s: %s: alloc failed, req-size: %lu pages, ret: %d\n",
503				   __func__, cma->name, count, ret);
504		cma_debug_show_areas(cma);
505	}
506
507	pr_debug("%s(): returned %p\n", __func__, page);
508	trace_cma_alloc_finish(name, pfn, page, count, align, ret);
509	if (page) {
510		count_vm_event(CMA_ALLOC_SUCCESS);
511		cma_sysfs_account_success_pages(cma, count);
512	} else {
513		count_vm_event(CMA_ALLOC_FAIL);
514		cma_sysfs_account_fail_pages(cma, count);
515	}
516
517	return page;
518}
519
520bool cma_pages_valid(struct cma *cma, const struct page *pages,
521		     unsigned long count)
522{
523	unsigned long pfn;
524
525	if (!cma || !pages)
526		return false;
527
528	pfn = page_to_pfn(pages);
529
530	if (pfn < cma->base_pfn || pfn >= cma->base_pfn + cma->count) {
531		pr_debug("%s(page %p, count %lu)\n", __func__,
532						(void *)pages, count);
533		return false;
534	}
535
536	return true;
537}
538
539/**
540 * cma_release() - release allocated pages
541 * @cma:   Contiguous memory region for which the allocation is performed.
542 * @pages: Allocated pages.
543 * @count: Number of allocated pages.
544 *
545 * This function releases memory allocated by cma_alloc().
546 * It returns false when provided pages do not belong to contiguous area and
547 * true otherwise.
548 */
549bool cma_release(struct cma *cma, const struct page *pages,
550		 unsigned long count)
551{
552	unsigned long pfn;
553
554	if (!cma_pages_valid(cma, pages, count))
555		return false;
556
557	pr_debug("%s(page %p, count %lu)\n", __func__, (void *)pages, count);
558
559	pfn = page_to_pfn(pages);
560
561	VM_BUG_ON(pfn + count > cma->base_pfn + cma->count);
562
563	free_contig_range(pfn, count);
564	cma_clear_bitmap(cma, pfn, count);
565	cma_sysfs_account_release_pages(cma, count);
566	trace_cma_release(cma->name, pfn, pages, count);
567
568	return true;
569}
570
571int cma_for_each_area(int (*it)(struct cma *cma, void *data), void *data)
572{
573	int i;
574
575	for (i = 0; i < cma_area_count; i++) {
576		int ret = it(&cma_areas[i], data);
577
578		if (ret)
579			return ret;
580	}
581
582	return 0;
583}
584