1// Copyright 2016 The Fuchsia Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#include <errno.h>
6#include <unistd.h>
7
8#include <zircon/process.h>
9#include <zircon/syscalls.h>
10#include <zircon/syscalls/object.h>
11#include <unittest/unittest.h>
12#include <sys/mman.h>
13
14namespace {
15
16#if defined(__x86_64__)
17
18#include <cpuid.h>
19
20// This is based on code from kernel/ which isn't usable by code in system/.
21enum { X86_CPUID_ADDR_WIDTH = 0x80000008 };
22
23uint32_t x86_linear_address_width() {
24    uint32_t eax, ebx, ecx, edx;
25    __cpuid(X86_CPUID_ADDR_WIDTH, eax, ebx, ecx, edx);
26    return (eax >> 8) & 0xff;
27}
28
29#endif
30
31bool address_space_limits_test() {
32    BEGIN_TEST;
33
34#if defined(__x86_64__)
35    size_t page_size = getpagesize();
36    zx_handle_t vmo;
37    EXPECT_EQ(zx_vmo_create(page_size, 0, &vmo), ZX_OK);
38    EXPECT_NE(vmo, ZX_HANDLE_INVALID, "vm_object_create");
39
40    // This is the lowest non-canonical address on x86-64.  We want to
41    // make sure that userland cannot map a page immediately below
42    // this address.  See docs/sysret_problem.md for an explanation of
43    // the reason.
44    uintptr_t noncanon_addr =
45        ((uintptr_t) 1) << (x86_linear_address_width() - 1);
46
47    zx_info_vmar_t vmar_info;
48    zx_status_t status = zx_object_get_info(zx_vmar_root_self(), ZX_INFO_VMAR,
49                                            &vmar_info, sizeof(vmar_info),
50                                            NULL, NULL);
51    EXPECT_EQ(ZX_OK, status, "get_info");
52
53    // Check that we cannot map a page ending at |noncanon_addr|.
54    size_t offset = noncanon_addr - page_size - vmar_info.base;
55    uintptr_t addr;
56    status = zx_vmar_map(
57        zx_vmar_root_self(),
58        ZX_VM_PERM_READ | ZX_VM_PERM_WRITE | ZX_VM_SPECIFIC,
59        offset, vmo, 0, page_size, &addr);
60    EXPECT_EQ(ZX_ERR_INVALID_ARGS, status, "vm_map");
61
62    // Check that we can map at the next address down.  This helps to
63    // verify that the previous check didn't fail for some unexpected
64    // reason.
65    offset = noncanon_addr - page_size * 2 - vmar_info.base;
66    status = zx_vmar_map(
67        zx_vmar_root_self(),
68        ZX_VM_PERM_READ | ZX_VM_PERM_WRITE | ZX_VM_SPECIFIC,
69        offset, vmo, 0, page_size, &addr);
70    EXPECT_EQ(ZX_OK, status, "vm_map");
71
72    // Check that ZX_VM_SPECIFIC fails on already-mapped locations.
73    // Otherwise, the previous mapping could have overwritten
74    // something that was in use, which could cause problems later.
75    status = zx_vmar_map(
76        zx_vmar_root_self(),
77        ZX_VM_PERM_READ | ZX_VM_PERM_WRITE | ZX_VM_SPECIFIC,
78        offset, vmo, 0, page_size, &addr);
79    EXPECT_EQ(ZX_ERR_NO_MEMORY, status, "vm_map");
80
81    // Clean up.
82    status = zx_vmar_unmap(zx_vmar_root_self(), addr, page_size);
83    EXPECT_EQ(ZX_OK, status, "vm_unmap");
84    status = zx_handle_close(vmo);
85    EXPECT_EQ(ZX_OK, status, "handle_close");
86#endif
87
88    END_TEST;
89}
90
91bool mmap_zerofilled_test() {
92    BEGIN_TEST;
93
94    char* addr = (char *)mmap(NULL, 16384, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANON, -1, 0);
95    for (size_t i = 0; i < 16384; i++) {
96        EXPECT_EQ('\0', addr[i], "non-zero memory found");
97    }
98    int unmap_result = munmap(addr, 16384);
99    EXPECT_EQ(0, unmap_result, "munmap should have succeeded");
100
101    END_TEST;
102}
103
104bool mmap_len_test() {
105    BEGIN_TEST;
106
107    uint32_t* addr = (uint32_t*)mmap(NULL, 0, PROT_READ, MAP_PRIVATE|MAP_ANON, -1, 0);
108    auto test_errno = errno;
109    EXPECT_EQ(MAP_FAILED, addr, "mmap should fail when len == 0");
110    EXPECT_EQ(EINVAL, test_errno, "mmap errno should be EINVAL when len == 0");
111
112    addr = (uint32_t*)mmap(NULL, PTRDIFF_MAX, PROT_READ, MAP_PRIVATE|MAP_ANON, -1, 0);
113    test_errno = errno;
114    EXPECT_EQ(MAP_FAILED, addr, "mmap should fail when len >= PTRDIFF_MAX");
115    EXPECT_EQ(ENOMEM, test_errno, "mmap errno should be ENOMEM when len >= PTRDIFF_MAX");
116
117    END_TEST;
118}
119
120bool mmap_offset_test() {
121    BEGIN_TEST;
122
123    uint32_t* addr = (uint32_t*)mmap(NULL, sizeof(uint32_t), PROT_READ, MAP_PRIVATE|MAP_ANON, -1, 4);
124    auto test_errno = errno;
125    EXPECT_EQ(MAP_FAILED, addr, "mmap should fail for unaligned offset");
126    EXPECT_EQ(EINVAL, test_errno, "mmap errno should be EINVAL for unaligned offset");
127
128    END_TEST;
129}
130
131// Define a little fragment of code that we can copy.
132extern "C" const uint8_t begin_add[], end_add[];
133__asm__(".pushsection .rodata.add_code\n"
134        ".globl begin_add\n"
135        "begin_add:"
136#ifdef __x86_64__
137        "mov %rdi, %rax\n"
138        "add %rsi, %rax\n"
139        "ret\n"
140#elif defined(__aarch64__)
141        "add x0, x0, x1\n"
142        "ret\n"
143#else
144# error "what machine?"
145#endif
146        ".globl end_add\n"
147        "end_add:"
148        ".popsection");
149
150bool mmap_PROT_EXEC_test() {
151    BEGIN_TEST;
152
153    // Allocate a page that will later be made executable.
154    size_t page_size = getpagesize();
155    void* addr = mmap(NULL, page_size, PROT_READ|PROT_WRITE,
156                      MAP_PRIVATE|MAP_ANON, -1, 0);
157    EXPECT_NE(MAP_FAILED, addr,
158              "mmap should have succeeded for PROT_READ|PROT_WRITE");
159
160    // Copy over code from our address space into the newly allocated memory.
161    ASSERT_LE(static_cast<size_t>(end_add - begin_add), page_size);
162    memcpy(addr, begin_add, end_add - begin_add);
163
164    // mark the code executable
165    int result = mprotect(addr, page_size, PROT_READ|PROT_EXEC);
166    EXPECT_EQ(0, result, "Unable to mark pages PROT_READ|PROT_EXEC");
167
168    // Execute the code from our new location.
169    auto add_func = reinterpret_cast<int (*)(int, int)>(
170        reinterpret_cast<uintptr_t>(addr));
171    int add_result = add_func(1, 2);
172
173    // Check that the result of adding 1+2 is 3.
174    EXPECT_EQ(3, add_result);
175
176    // Deallocate pages
177    result = munmap(addr, page_size);
178    EXPECT_EQ(0, result, "munmap unexpectedly failed");
179
180    END_TEST;
181}
182
183bool mmap_prot_test() {
184    BEGIN_TEST;
185
186    volatile uint32_t* addr = (uint32_t*)mmap(NULL, sizeof(uint32_t), PROT_NONE, MAP_PRIVATE|MAP_ANON, -1, 0);
187    EXPECT_NE(MAP_FAILED, addr, "mmap should have succeeded for PROT_NONE");
188
189    addr = (uint32_t*)mmap(NULL, sizeof(uint32_t), PROT_READ, MAP_PRIVATE|MAP_ANON, -1, 0);
190    EXPECT_NE(MAP_FAILED, addr, "mmap failed for read-only alloc");
191
192    // This is somewhat pointless, to have a private read-only mapping, but we
193    // should be able to read it.
194    EXPECT_EQ(*addr, *addr, "could not read from mmaped address");
195
196    addr = (uint32_t*)mmap(NULL, sizeof(uint32_t), PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANON, -1, 0);
197    EXPECT_NE(MAP_FAILED, addr, "mmap failed for read-write alloc");
198
199    // Now we test writing to the mapped memory, and verify that we can read it
200    // back.
201    *addr = 5678u;
202    EXPECT_EQ(5678u, *addr, "writing to address returned by mmap failed");
203
204    END_TEST;
205}
206
207bool mmap_flags_test() {
208    BEGIN_TEST;
209
210    uint32_t* addr = (uint32_t*)mmap(NULL, sizeof(uint32_t), PROT_READ, MAP_ANON, -1, 0);
211    auto test_errno = errno;
212    EXPECT_EQ(MAP_FAILED, addr, "mmap should fail without MAP_PRIVATE or MAP_SHARED");
213    EXPECT_EQ(EINVAL, test_errno, "mmap errno should be EINVAL with bad flags");
214
215    addr = (uint32_t*)mmap(NULL, sizeof(uint32_t), PROT_READ, MAP_PRIVATE|MAP_SHARED|MAP_ANON, -1, 0);
216    test_errno = errno;
217    EXPECT_EQ(MAP_FAILED, addr, "mmap should fail with both MAP_PRIVATE and MAP_SHARED");
218    EXPECT_EQ(EINVAL, test_errno, "mmap errno should be EINVAL with bad flags");
219
220    addr = (uint32_t*)mmap(NULL, sizeof(uint32_t), PROT_READ, MAP_PRIVATE|MAP_ANON, -1, 0);
221    EXPECT_NE(MAP_FAILED, addr, "mmap failed with MAP_PRIVATE flags");
222
223    addr = (uint32_t*)mmap(NULL, sizeof(uint32_t), PROT_READ, MAP_SHARED|MAP_ANON, -1, 0);
224    EXPECT_NE(MAP_FAILED, addr, "mmap failed with MAP_SHARED flags");
225
226    END_TEST;
227}
228
229bool mprotect_test() {
230    BEGIN_TEST;
231
232    uint32_t* addr = (uint32_t*)mmap(NULL, sizeof(uint32_t), PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANON, -1, 0);
233    ASSERT_NE(MAP_FAILED, addr, "mmap failed to map");
234
235    int page_size = getpagesize();
236    // Should be able to write.
237    *addr = 10;
238    EXPECT_EQ(10u, *addr, "read after write failed");
239
240    int status = mprotect(addr, page_size, PROT_READ);
241    EXPECT_EQ(0, status, "mprotect failed to downgrade to read-only");
242
243    ASSERT_DEATH([](void* crashaddr) {
244        uint32_t *intptr = static_cast<uint32_t *>(crashaddr);
245        *intptr = 12;
246    }, addr, "write to addr should have caused a crash");
247
248    status = mprotect(addr, page_size, PROT_WRITE);
249    auto test_errno = errno;
250    EXPECT_EQ(-1, status, "mprotect should fail for write-only");
251    EXPECT_EQ(ENOTSUP, test_errno, "mprotect should return ENOTSUP for write-only");
252
253    status = mprotect(addr, page_size, PROT_NONE);
254    test_errno = errno;
255    EXPECT_EQ(0, status, "mprotect should succeed for PROT_NONE");
256
257    END_TEST;
258}
259
260}
261
262BEGIN_TEST_CASE(memory_mapping_tests)
263RUN_TEST(address_space_limits_test);
264RUN_TEST(mmap_zerofilled_test);
265RUN_TEST(mmap_len_test);
266RUN_TEST(mmap_PROT_EXEC_test);
267RUN_TEST(mmap_offset_test);
268RUN_TEST(mmap_prot_test);
269RUN_TEST(mmap_flags_test);
270RUN_TEST_ENABLE_CRASH_HANDLER(mprotect_test);
271END_TEST_CASE(memory_mapping_tests)
272
273#ifndef BUILD_COMBINED_TESTS
274int main(int argc, char** argv) {
275    bool success = unittest_run_all_tests(argc, argv);
276    return success ? 0 : -1;
277}
278#endif
279