1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License.  See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 2011 Wind River Systems,
7 *   written by Ralf Baechle <ralf@linux-mips.org>
8 */
9#include <linux/compiler.h>
10#include <linux/elf-randomize.h>
11#include <linux/errno.h>
12#include <linux/mm.h>
13#include <linux/mman.h>
14#include <linux/export.h>
15#include <linux/personality.h>
16#include <linux/random.h>
17#include <linux/sched/signal.h>
18#include <linux/sched/mm.h>
19
20unsigned long shm_align_mask = PAGE_SIZE - 1;	/* Sane caches */
21EXPORT_SYMBOL(shm_align_mask);
22
23#define COLOUR_ALIGN(addr, pgoff)				\
24	((((addr) + shm_align_mask) & ~shm_align_mask) +	\
25	 (((pgoff) << PAGE_SHIFT) & shm_align_mask))
26
27enum mmap_allocation_direction {UP, DOWN};
28
29static unsigned long arch_get_unmapped_area_common(struct file *filp,
30	unsigned long addr0, unsigned long len, unsigned long pgoff,
31	unsigned long flags, enum mmap_allocation_direction dir)
32{
33	struct mm_struct *mm = current->mm;
34	struct vm_area_struct *vma;
35	unsigned long addr = addr0;
36	int do_color_align;
37	struct vm_unmapped_area_info info;
38
39	if (unlikely(len > TASK_SIZE))
40		return -ENOMEM;
41
42	if (flags & MAP_FIXED) {
43		/* Even MAP_FIXED mappings must reside within TASK_SIZE */
44		if (TASK_SIZE - len < addr)
45			return -EINVAL;
46
47		/*
48		 * We do not accept a shared mapping if it would violate
49		 * cache aliasing constraints.
50		 */
51		if ((flags & MAP_SHARED) &&
52		    ((addr - (pgoff << PAGE_SHIFT)) & shm_align_mask))
53			return -EINVAL;
54		return addr;
55	}
56
57	do_color_align = 0;
58	if (filp || (flags & MAP_SHARED))
59		do_color_align = 1;
60
61	/* requesting a specific address */
62	if (addr) {
63		if (do_color_align)
64			addr = COLOUR_ALIGN(addr, pgoff);
65		else
66			addr = PAGE_ALIGN(addr);
67
68		vma = find_vma(mm, addr);
69		if (TASK_SIZE - len >= addr &&
70		    (!vma || addr + len <= vm_start_gap(vma)))
71			return addr;
72	}
73
74	info.length = len;
75	info.align_mask = do_color_align ? (PAGE_MASK & shm_align_mask) : 0;
76	info.align_offset = pgoff << PAGE_SHIFT;
77
78	if (dir == DOWN) {
79		info.flags = VM_UNMAPPED_AREA_TOPDOWN;
80		info.low_limit = PAGE_SIZE;
81		info.high_limit = mm->mmap_base;
82		addr = vm_unmapped_area(&info);
83
84		if (!(addr & ~PAGE_MASK))
85			return addr;
86
87		/*
88		 * A failed mmap() very likely causes application failure,
89		 * so fall back to the bottom-up function here. This scenario
90		 * can happen with large stack limits and large mmap()
91		 * allocations.
92		 */
93	}
94
95	info.flags = 0;
96	info.low_limit = mm->mmap_base;
97	info.high_limit = TASK_SIZE;
98	return vm_unmapped_area(&info);
99}
100
101unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr0,
102	unsigned long len, unsigned long pgoff, unsigned long flags)
103{
104	return arch_get_unmapped_area_common(filp,
105			addr0, len, pgoff, flags, UP);
106}
107
108/*
109 * There is no need to export this but sched.h declares the function as
110 * extern so making it static here results in an error.
111 */
112unsigned long arch_get_unmapped_area_topdown(struct file *filp,
113	unsigned long addr0, unsigned long len, unsigned long pgoff,
114	unsigned long flags)
115{
116	return arch_get_unmapped_area_common(filp,
117			addr0, len, pgoff, flags, DOWN);
118}
119
120bool __virt_addr_valid(const volatile void *kaddr)
121{
122	unsigned long vaddr = (unsigned long)kaddr;
123
124	if ((vaddr < PAGE_OFFSET) || (vaddr >= MAP_BASE))
125		return false;
126
127	return pfn_valid(PFN_DOWN(virt_to_phys(kaddr)));
128}
129EXPORT_SYMBOL_GPL(__virt_addr_valid);
130