1// SPDX-License-Identifier: GPL-2.0+
2//
3// Copyright 2019, Michael Ellerman, IBM Corp.
4//
5// Test that out-of-bounds reads/writes behave as expected.
6
7#include <setjmp.h>
8#include <stdbool.h>
9#include <stdio.h>
10#include <stdlib.h>
11#include <string.h>
12#include <sys/types.h>
13#include <sys/wait.h>
14#include <unistd.h>
15
16#include "utils.h"
17
18// Old distros (Ubuntu 16.04 at least) don't define this
19#ifndef SEGV_BNDERR
20#define SEGV_BNDERR	3
21#endif
22
23// 64-bit kernel is always here
24#define PAGE_OFFSET	(0xcul << 60)
25
26static unsigned long kernel_virt_end;
27
28static volatile int fault_code;
29static volatile unsigned long fault_addr;
30static jmp_buf setjmp_env;
31
32static void segv_handler(int n, siginfo_t *info, void *ctxt_v)
33{
34	fault_code = info->si_code;
35	fault_addr = (unsigned long)info->si_addr;
36	siglongjmp(setjmp_env, 1);
37}
38
39int bad_access(char *p, bool write)
40{
41	char x = 0;
42
43	fault_code = 0;
44	fault_addr = 0;
45
46	if (sigsetjmp(setjmp_env, 1) == 0) {
47		if (write)
48			*p = 1;
49		else
50			x = *p;
51
52		printf("Bad - no SEGV! (%c)\n", x);
53		return 1;
54	}
55
56	// If we see MAPERR that means we took a page fault rather than an SLB
57	// miss. We only expect to take page faults for addresses within the
58	// valid kernel range.
59	FAIL_IF(fault_code == SEGV_MAPERR && \
60		(fault_addr < PAGE_OFFSET || fault_addr >= kernel_virt_end));
61
62	FAIL_IF(fault_code != SEGV_MAPERR && fault_code != SEGV_BNDERR);
63
64	return 0;
65}
66
67static int test(void)
68{
69	unsigned long i, j, addr, region_shift, page_shift, page_size;
70	struct sigaction sig;
71	bool hash_mmu;
72
73	sig = (struct sigaction) {
74		.sa_sigaction = segv_handler,
75		.sa_flags = SA_SIGINFO,
76	};
77
78	FAIL_IF(sigaction(SIGSEGV, &sig, NULL) != 0);
79
80	FAIL_IF(using_hash_mmu(&hash_mmu));
81
82	page_size = sysconf(_SC_PAGESIZE);
83	if (page_size == (64 * 1024))
84		page_shift = 16;
85	else
86		page_shift = 12;
87
88	if (page_size == (64 * 1024) || !hash_mmu) {
89		region_shift = 52;
90
91		// We have 7 512T regions (4 kernel linear, vmalloc, io, vmemmap)
92		kernel_virt_end = PAGE_OFFSET + (7 * (512ul << 40));
93	} else if (page_size == (4 * 1024) && hash_mmu) {
94		region_shift = 46;
95
96		// We have 7 64T regions (4 kernel linear, vmalloc, io, vmemmap)
97		kernel_virt_end = PAGE_OFFSET + (7 * (64ul << 40));
98	} else
99		FAIL_IF(true);
100
101	printf("Using %s MMU, PAGE_SIZE = %dKB start address 0x%016lx\n",
102	       hash_mmu ? "hash" : "radix",
103	       (1 << page_shift) >> 10,
104	       1ul << region_shift);
105
106	// This generates access patterns like:
107	//   0x0010000000000000
108	//   0x0010000000010000
109	//   0x0010000000020000
110	//   ...
111	//   0x0014000000000000
112	//   0x0018000000000000
113	//   0x0020000000000000
114	//   0x0020000000010000
115	//   0x0020000000020000
116	//   ...
117	//   0xf400000000000000
118	//   0xf800000000000000
119
120	for (i = 1; i <= ((0xful << 60) >> region_shift); i++) {
121		for (j = page_shift - 1; j < 60; j++) {
122			unsigned long base, delta;
123
124			base  = i << region_shift;
125			delta = 1ul << j;
126
127			if (delta >= base)
128				break;
129
130			addr = (base | delta) & ~((1 << page_shift) - 1);
131
132			FAIL_IF(bad_access((char *)addr, false));
133			FAIL_IF(bad_access((char *)addr, true));
134		}
135	}
136
137	return 0;
138}
139
140int main(void)
141{
142	test_harness_set_timeout(300);
143	return test_harness(test, "bad_accesses");
144}
145