1//===-- sanitizer_stacktrace_test.cpp -------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file is a part of ThreadSanitizer/AddressSanitizer runtime.
10//
11//===----------------------------------------------------------------------===//
12
13#include "sanitizer_common/sanitizer_stacktrace.h"
14
15#include <string.h>
16
17#include <algorithm>
18#include <string>
19
20#include "gmock/gmock.h"
21#include "gtest/gtest.h"
22#include "sanitizer_common/sanitizer_common.h"
23#include "sanitizer_internal_defs.h"
24
25using testing::ContainsRegex;
26using testing::MatchesRegex;
27
28namespace __sanitizer {
29
30class FastUnwindTest : public ::testing::Test {
31 protected:
32  virtual void SetUp();
33  virtual void TearDown();
34
35  void UnwindFast();
36
37  void *mapping;
38  uhwptr *fake_stack;
39  const uptr fake_stack_size = 10;
40  uhwptr start_pc;
41
42  uhwptr fake_bp;
43  uhwptr fake_top;
44  uhwptr fake_bottom;
45  BufferedStackTrace trace;
46
47#if defined(__loongarch__) || defined(__riscv)
48  const uptr kFpOffset = 4;
49  const uptr kBpOffset = 2;
50#else
51  const uptr kFpOffset = 2;
52  const uptr kBpOffset = 0;
53#endif
54
55 private:
56  CommonFlags tmp_flags_;
57};
58
59static uptr PC(uptr idx) {
60  return (1<<20) + idx;
61}
62
63void FastUnwindTest::SetUp() {
64  size_t ps = GetPageSize();
65  mapping = MmapOrDie(2 * ps, "FastUnwindTest");
66  MprotectNoAccess((uptr)mapping, ps);
67
68  // Unwinder may peek 1 word down from the starting FP.
69  fake_stack = (uhwptr *)((uptr)mapping + ps + sizeof(uhwptr));
70
71  // Fill an array of pointers with fake fp+retaddr pairs.  Frame pointers have
72  // even indices.
73  for (uptr i = 0; i + 1 < fake_stack_size; i += 2) {
74    fake_stack[i] = (uptr)&fake_stack[i + kFpOffset];  // fp
75    fake_stack[i+1] = PC(i + 1); // retaddr
76  }
77  // Mark the last fp point back up to terminate the stack trace.
78  fake_stack[RoundDownTo(fake_stack_size - 1, 2)] = (uhwptr)&fake_stack[0];
79
80  // Top is two slots past the end because UnwindFast subtracts two.
81  fake_top = (uhwptr)&fake_stack[fake_stack_size + kFpOffset];
82  // Bottom is one slot before the start because UnwindFast uses >.
83  fake_bottom = (uhwptr)mapping;
84  fake_bp = (uptr)&fake_stack[kBpOffset];
85  start_pc = PC(0);
86
87  tmp_flags_.CopyFrom(*common_flags());
88}
89
90void FastUnwindTest::TearDown() {
91  size_t ps = GetPageSize();
92  UnmapOrDie(mapping, 2 * ps);
93
94  // Restore default flags.
95  OverrideCommonFlags(tmp_flags_);
96}
97
98#if SANITIZER_CAN_FAST_UNWIND
99
100#ifdef __sparc__
101// Fake stacks don't meet SPARC UnwindFast requirements.
102#define SKIP_ON_SPARC(x) DISABLED_##x
103#else
104#define SKIP_ON_SPARC(x) x
105#endif
106
107void FastUnwindTest::UnwindFast() {
108  trace.UnwindFast(start_pc, fake_bp, fake_top, fake_bottom, kStackTraceMax);
109}
110
111TEST_F(FastUnwindTest, SKIP_ON_SPARC(Basic)) {
112  UnwindFast();
113  // Should get all on-stack retaddrs and start_pc.
114  EXPECT_EQ(6U, trace.size);
115  EXPECT_EQ(start_pc, trace.trace[0]);
116  for (uptr i = 1; i <= 5; i++) {
117    EXPECT_EQ(PC(i*2 - 1), trace.trace[i]);
118  }
119}
120
121// From: https://github.com/google/sanitizers/issues/162
122TEST_F(FastUnwindTest, SKIP_ON_SPARC(FramePointerLoop)) {
123  // Make one fp point to itself.
124  fake_stack[4] = (uhwptr)&fake_stack[4];
125  UnwindFast();
126  // Should get all on-stack retaddrs up to the 4th slot and start_pc.
127  EXPECT_EQ(4U, trace.size);
128  EXPECT_EQ(start_pc, trace.trace[0]);
129  for (uptr i = 1; i <= 3; i++) {
130    EXPECT_EQ(PC(i*2 - 1), trace.trace[i]);
131  }
132}
133
134TEST_F(FastUnwindTest, SKIP_ON_SPARC(MisalignedFramePointer)) {
135  // Make one fp misaligned.
136  fake_stack[4] += 3;
137  UnwindFast();
138  // Should get all on-stack retaddrs up to the 4th slot and start_pc.
139  EXPECT_EQ(4U, trace.size);
140  EXPECT_EQ(start_pc, trace.trace[0]);
141  for (uptr i = 1; i < 4U; i++) {
142    EXPECT_EQ(PC(i*2 - 1), trace.trace[i]);
143  }
144}
145
146TEST_F(FastUnwindTest, OneFrameStackTrace) {
147  trace.Unwind(start_pc, fake_bp, nullptr, true, 1);
148  EXPECT_EQ(1U, trace.size);
149  EXPECT_EQ(start_pc, trace.trace[0]);
150  EXPECT_EQ((uhwptr)&fake_stack[kBpOffset], trace.top_frame_bp);
151}
152
153TEST_F(FastUnwindTest, ZeroFramesStackTrace) {
154  trace.Unwind(start_pc, fake_bp, nullptr, true, 0);
155  EXPECT_EQ(0U, trace.size);
156  EXPECT_EQ(0U, trace.top_frame_bp);
157}
158
159TEST_F(FastUnwindTest, SKIP_ON_SPARC(FPBelowPrevFP)) {
160  // The next FP points to unreadable memory inside the stack limits, but below
161  // current FP.
162  fake_stack[0] = (uhwptr)&fake_stack[-50];
163  fake_stack[1] = PC(1);
164  UnwindFast();
165  EXPECT_EQ(2U, trace.size);
166  EXPECT_EQ(PC(0), trace.trace[0]);
167  EXPECT_EQ(PC(1), trace.trace[1]);
168}
169
170TEST_F(FastUnwindTest, SKIP_ON_SPARC(CloseToZeroFrame)) {
171  // Make one pc a NULL pointer.
172  fake_stack[5] = 0x0;
173  UnwindFast();
174  // The stack should be truncated at the NULL pointer (and not include it).
175  EXPECT_EQ(3U, trace.size);
176  EXPECT_EQ(start_pc, trace.trace[0]);
177  for (uptr i = 1; i < 3U; i++) {
178    EXPECT_EQ(PC(i*2 - 1), trace.trace[i]);
179  }
180}
181
182using StackPrintTest = FastUnwindTest;
183
184TEST_F(StackPrintTest, SKIP_ON_SPARC(ContainsFullTrace)) {
185  // Override stack trace format to make testing code independent of default
186  // flag values.
187  CommonFlags flags;
188  flags.CopyFrom(*common_flags());
189  flags.stack_trace_format = "#%n %p";
190  OverrideCommonFlags(flags);
191
192  UnwindFast();
193
194  char buf[3000];
195  trace.PrintTo(buf, sizeof(buf));
196  EXPECT_THAT(std::string(buf),
197              MatchesRegex("(#[0-9]+ 0x[0-9a-f]+\n){" +
198                           std::to_string(trace.size) + "}\n"));
199}
200
201TEST_F(StackPrintTest, SKIP_ON_SPARC(TruncatesContents)) {
202  UnwindFast();
203
204  char buf[3000];
205  uptr actual_len = trace.PrintTo(buf, sizeof(buf));
206  ASSERT_LT(actual_len, sizeof(buf));
207
208  char tinybuf[10];
209  trace.PrintTo(tinybuf, sizeof(tinybuf));
210
211  // This the the truncation case.
212  ASSERT_GT(actual_len, sizeof(tinybuf));
213
214  // The truncated contents should be a prefix of the full contents.
215  size_t lastpos = sizeof(tinybuf) - 1;
216  EXPECT_EQ(strncmp(buf, tinybuf, lastpos), 0);
217  EXPECT_EQ(tinybuf[lastpos], '\0');
218
219  // Full bufffer has more contents...
220  EXPECT_NE(buf[lastpos], '\0');
221}
222
223TEST_F(StackPrintTest, SKIP_ON_SPARC(WorksWithEmptyStack)) {
224  char buf[3000];
225  trace.PrintTo(buf, sizeof(buf));
226  EXPECT_NE(strstr(buf, "<empty stack>"), nullptr);
227}
228
229TEST_F(StackPrintTest, SKIP_ON_SPARC(ReturnsCorrectLength)) {
230  UnwindFast();
231
232  char buf[3000];
233  uptr len = trace.PrintTo(buf, sizeof(buf));
234  size_t actual_len = strlen(buf);
235  ASSERT_LT(len, sizeof(buf));
236  EXPECT_EQ(len, actual_len);
237
238  char tinybuf[5];
239  len = trace.PrintTo(tinybuf, sizeof(tinybuf));
240  size_t truncated_len = strlen(tinybuf);
241  ASSERT_GE(len, sizeof(tinybuf));
242  EXPECT_EQ(len, actual_len);
243  EXPECT_EQ(truncated_len, sizeof(tinybuf) - 1);
244}
245
246TEST_F(StackPrintTest, SKIP_ON_SPARC(AcceptsZeroSize)) {
247  UnwindFast();
248  char buf[1];
249  EXPECT_GT(trace.PrintTo(buf, 0), 0u);
250}
251
252using StackPrintDeathTest = StackPrintTest;
253
254TEST_F(StackPrintDeathTest, SKIP_ON_SPARC(RequiresNonNullBuffer)) {
255  UnwindFast();
256  EXPECT_DEATH(trace.PrintTo(NULL, 100), "");
257}
258
259#endif // SANITIZER_CAN_FAST_UNWIND
260
261TEST(SlowUnwindTest, ShortStackTrace) {
262  BufferedStackTrace stack;
263  uptr pc = StackTrace::GetCurrentPc();
264  uptr bp = GET_CURRENT_FRAME();
265  stack.Unwind(pc, bp, nullptr, false, /*max_depth=*/0);
266  EXPECT_EQ(0U, stack.size);
267  EXPECT_EQ(0U, stack.top_frame_bp);
268  stack.Unwind(pc, bp, nullptr, false, /*max_depth=*/1);
269  EXPECT_EQ(1U, stack.size);
270  EXPECT_EQ(pc, stack.trace[0]);
271  EXPECT_EQ(bp, stack.top_frame_bp);
272}
273
274TEST(GetCurrentPc, Basic) {
275  // Test that PCs obtained via GET_CURRENT_PC()
276  // and StackTrace::GetCurrentPc() are all different
277  // and are close to the function start.
278  struct Local {
279    static NOINLINE void Test() {
280      const uptr pcs[] = {
281          (uptr)&Local::Test,
282          GET_CURRENT_PC(),
283          StackTrace::GetCurrentPc(),
284          StackTrace::GetCurrentPc(),
285      };
286      for (uptr i = 0; i < ARRAY_SIZE(pcs); i++)
287        Printf("pc%zu: 0x%zx\n", i, pcs[i]);
288      for (uptr i = 1; i < ARRAY_SIZE(pcs); i++) {
289        EXPECT_GT(pcs[i], pcs[0]);
290        EXPECT_LT(pcs[i], pcs[0] + 1000);
291        for (uptr j = 0; j < i; j++) EXPECT_NE(pcs[i], pcs[j]);
292      }
293    }
294  };
295  Local::Test();
296}
297
298// Dummy implementation. This should never be called, but is required to link
299// non-optimized builds of this test.
300void BufferedStackTrace::UnwindImpl(uptr pc, uptr bp, void *context,
301                                    bool request_fast, u32 max_depth) {
302  UNIMPLEMENTED();
303}
304
305}  // namespace __sanitizer
306