1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Based on arch/arm/mm/mmap.c
4 *
5 * Copyright (C) 2012 ARM Ltd.
6 */
7
8#include <linux/io.h>
9#include <linux/memblock.h>
10#include <linux/mm.h>
11#include <linux/types.h>
12
13#include <asm/cpufeature.h>
14#include <asm/page.h>
15
16static pgprot_t protection_map[16] __ro_after_init = {
17	[VM_NONE]					= PAGE_NONE,
18	[VM_READ]					= PAGE_READONLY,
19	[VM_WRITE]					= PAGE_READONLY,
20	[VM_WRITE | VM_READ]				= PAGE_READONLY,
21	/* PAGE_EXECONLY if Enhanced PAN */
22	[VM_EXEC]					= PAGE_READONLY_EXEC,
23	[VM_EXEC | VM_READ]				= PAGE_READONLY_EXEC,
24	[VM_EXEC | VM_WRITE]				= PAGE_READONLY_EXEC,
25	[VM_EXEC | VM_WRITE | VM_READ]			= PAGE_READONLY_EXEC,
26	[VM_SHARED]					= PAGE_NONE,
27	[VM_SHARED | VM_READ]				= PAGE_READONLY,
28	[VM_SHARED | VM_WRITE]				= PAGE_SHARED,
29	[VM_SHARED | VM_WRITE | VM_READ]		= PAGE_SHARED,
30	/* PAGE_EXECONLY if Enhanced PAN */
31	[VM_SHARED | VM_EXEC]				= PAGE_READONLY_EXEC,
32	[VM_SHARED | VM_EXEC | VM_READ]			= PAGE_READONLY_EXEC,
33	[VM_SHARED | VM_EXEC | VM_WRITE]		= PAGE_SHARED_EXEC,
34	[VM_SHARED | VM_EXEC | VM_WRITE | VM_READ]	= PAGE_SHARED_EXEC
35};
36
37/*
38 * You really shouldn't be using read() or write() on /dev/mem.  This might go
39 * away in the future.
40 */
41int valid_phys_addr_range(phys_addr_t addr, size_t size)
42{
43	/*
44	 * Check whether addr is covered by a memory region without the
45	 * MEMBLOCK_NOMAP attribute, and whether that region covers the
46	 * entire range. In theory, this could lead to false negatives
47	 * if the range is covered by distinct but adjacent memory regions
48	 * that only differ in other attributes. However, few of such
49	 * attributes have been defined, and it is debatable whether it
50	 * follows that /dev/mem read() calls should be able traverse
51	 * such boundaries.
52	 */
53	return memblock_is_region_memory(addr, size) &&
54	       memblock_is_map_memory(addr);
55}
56
57/*
58 * Do not allow /dev/mem mappings beyond the supported physical range.
59 */
60int valid_mmap_phys_addr_range(unsigned long pfn, size_t size)
61{
62	return !(((pfn << PAGE_SHIFT) + size) & ~PHYS_MASK);
63}
64
65static int __init adjust_protection_map(void)
66{
67	/*
68	 * With Enhanced PAN we can honour the execute-only permissions as
69	 * there is no PAN override with such mappings.
70	 */
71	if (cpus_have_cap(ARM64_HAS_EPAN)) {
72		protection_map[VM_EXEC] = PAGE_EXECONLY;
73		protection_map[VM_EXEC | VM_SHARED] = PAGE_EXECONLY;
74	}
75
76	if (lpa2_is_enabled())
77		for (int i = 0; i < ARRAY_SIZE(protection_map); i++)
78			pgprot_val(protection_map[i]) &= ~PTE_SHARED;
79
80	return 0;
81}
82arch_initcall(adjust_protection_map);
83
84pgprot_t vm_get_page_prot(unsigned long vm_flags)
85{
86	pteval_t prot = pgprot_val(protection_map[vm_flags &
87				   (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]);
88
89	if (vm_flags & VM_ARM64_BTI)
90		prot |= PTE_GP;
91
92	/*
93	 * There are two conditions required for returning a Normal Tagged
94	 * memory type: (1) the user requested it via PROT_MTE passed to
95	 * mmap() or mprotect() and (2) the corresponding vma supports MTE. We
96	 * register (1) as VM_MTE in the vma->vm_flags and (2) as
97	 * VM_MTE_ALLOWED. Note that the latter can only be set during the
98	 * mmap() call since mprotect() does not accept MAP_* flags.
99	 * Checking for VM_MTE only is sufficient since arch_validate_flags()
100	 * does not permit (VM_MTE & !VM_MTE_ALLOWED).
101	 */
102	if (vm_flags & VM_MTE)
103		prot |= PTE_ATTRINDX(MT_NORMAL_TAGGED);
104
105	return __pgprot(prot);
106}
107EXPORT_SYMBOL(vm_get_page_prot);
108