1//===-- sanitizer_stacktrace.cpp ------------------------------------------===// 2// 3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4// See https://llvm.org/LICENSE.txt for license information. 5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6// 7//===----------------------------------------------------------------------===// 8// 9// This file is shared between AddressSanitizer and ThreadSanitizer 10// run-time libraries. 11//===----------------------------------------------------------------------===// 12 13#include "sanitizer_stacktrace.h" 14 15#include "sanitizer_common.h" 16#include "sanitizer_flags.h" 17#include "sanitizer_platform.h" 18#include "sanitizer_ptrauth.h" 19 20namespace __sanitizer { 21 22uptr StackTrace::GetNextInstructionPc(uptr pc) { 23#if defined(__sparc__) || defined(__mips__) 24 return pc + 8; 25#elif defined(__powerpc__) || defined(__arm__) || defined(__aarch64__) || \ 26 defined(__hexagon__) 27 return STRIP_PAC_PC((void *)pc) + 4; 28#elif SANITIZER_RISCV64 29 // Current check order is 4 -> 2 -> 6 -> 8 30 u8 InsnByte = *(u8 *)(pc); 31 if (((InsnByte & 0x3) == 0x3) && ((InsnByte & 0x1c) != 0x1c)) { 32 // xxxxxxxxxxxbbb11 | 32 bit | bbb != 111 33 return pc + 4; 34 } 35 if ((InsnByte & 0x3) != 0x3) { 36 // xxxxxxxxxxxxxxaa | 16 bit | aa != 11 37 return pc + 2; 38 } 39 // RISC-V encoding allows instructions to be up to 8 bytes long 40 if ((InsnByte & 0x3f) == 0x1f) { 41 // xxxxxxxxxx011111 | 48 bit | 42 return pc + 6; 43 } 44 if ((InsnByte & 0x7f) == 0x3f) { 45 // xxxxxxxxx0111111 | 64 bit | 46 return pc + 8; 47 } 48 // bail-out if could not figure out the instruction size 49 return 0; 50#else 51 return pc + 1; 52#endif 53} 54 55uptr StackTrace::GetCurrentPc() { 56 return GET_CALLER_PC(); 57} 58 59void BufferedStackTrace::Init(const uptr *pcs, uptr cnt, uptr extra_top_pc) { 60 size = cnt + !!extra_top_pc; 61 CHECK_LE(size, kStackTraceMax); 62 internal_memcpy(trace_buffer, pcs, cnt * sizeof(trace_buffer[0])); 63 if (extra_top_pc) 64 trace_buffer[cnt] = extra_top_pc; 65 top_frame_bp = 0; 66} 67 68// Sparc implementation is in its own file. 69#if !defined(__sparc__) 70 71// In GCC on ARM bp points to saved lr, not fp, so we should check the next 72// cell in stack to be a saved frame pointer. GetCanonicFrame returns the 73// pointer to saved frame pointer in any case. 74static inline uhwptr *GetCanonicFrame(uptr bp, 75 uptr stack_top, 76 uptr stack_bottom) { 77 CHECK_GT(stack_top, stack_bottom); 78#ifdef __arm__ 79 if (!IsValidFrame(bp, stack_top, stack_bottom)) return 0; 80 uhwptr *bp_prev = (uhwptr *)bp; 81 if (IsValidFrame((uptr)bp_prev[0], stack_top, stack_bottom)) return bp_prev; 82 // The next frame pointer does not look right. This could be a GCC frame, step 83 // back by 1 word and try again. 84 if (IsValidFrame((uptr)bp_prev[-1], stack_top, stack_bottom)) 85 return bp_prev - 1; 86 // Nope, this does not look right either. This means the frame after next does 87 // not have a valid frame pointer, but we can still extract the caller PC. 88 // Unfortunately, there is no way to decide between GCC and LLVM frame 89 // layouts. Assume GCC. 90 return bp_prev - 1; 91#else 92 return (uhwptr*)bp; 93#endif 94} 95 96void BufferedStackTrace::UnwindFast(uptr pc, uptr bp, uptr stack_top, 97 uptr stack_bottom, u32 max_depth) { 98 // TODO(yln): add arg sanity check for stack_top/stack_bottom 99 CHECK_GE(max_depth, 2); 100 const uptr kPageSize = GetPageSizeCached(); 101 trace_buffer[0] = pc; 102 size = 1; 103 if (stack_top < 4096) return; // Sanity check for stack top. 104 uhwptr *frame = GetCanonicFrame(bp, stack_top, stack_bottom); 105 // Lowest possible address that makes sense as the next frame pointer. 106 // Goes up as we walk the stack. 107 uptr bottom = stack_bottom; 108 // Avoid infinite loop when frame == frame[0] by using frame > prev_frame. 109 while (IsValidFrame((uptr)frame, stack_top, bottom) && 110 IsAligned((uptr)frame, sizeof(*frame)) && 111 size < max_depth) { 112#ifdef __powerpc__ 113 // PowerPC ABIs specify that the return address is saved on the 114 // *caller's* stack frame. Thus we must dereference the back chain 115 // to find the caller frame before extracting it. 116 uhwptr *caller_frame = (uhwptr*)frame[0]; 117 if (!IsValidFrame((uptr)caller_frame, stack_top, bottom) || 118 !IsAligned((uptr)caller_frame, sizeof(uhwptr))) 119 break; 120 // For most ABIs the offset where the return address is saved is two 121 // register sizes. The exception is the SVR4 ABI, which uses an 122 // offset of only one register size. 123#ifdef _CALL_SYSV 124 uhwptr pc1 = caller_frame[1]; 125#else 126 uhwptr pc1 = caller_frame[2]; 127#endif 128#elif defined(__s390__) 129 uhwptr pc1 = frame[14]; 130#elif defined(__riscv) 131 // frame[-1] contains the return address 132 uhwptr pc1 = frame[-1]; 133#else 134 uhwptr pc1 = STRIP_PAC_PC((void *)frame[1]); 135#endif 136 // Let's assume that any pointer in the 0th page (i.e. <0x1000 on i386 and 137 // x86_64) is invalid and stop unwinding here. If we're adding support for 138 // a platform where this isn't true, we need to reconsider this check. 139 if (pc1 < kPageSize) 140 break; 141 if (pc1 != pc) { 142 trace_buffer[size++] = (uptr) pc1; 143 } 144 bottom = (uptr)frame; 145#if defined(__riscv) 146 // frame[-2] contain fp of the previous frame 147 uptr new_bp = (uptr)frame[-2]; 148#else 149 uptr new_bp = (uptr)frame[0]; 150#endif 151 frame = GetCanonicFrame(new_bp, stack_top, bottom); 152 } 153} 154 155#endif // !defined(__sparc__) 156 157void BufferedStackTrace::PopStackFrames(uptr count) { 158 CHECK_LT(count, size); 159 size -= count; 160 for (uptr i = 0; i < size; ++i) { 161 trace_buffer[i] = trace_buffer[i + count]; 162 } 163} 164 165static uptr Distance(uptr a, uptr b) { return a < b ? b - a : a - b; } 166 167uptr BufferedStackTrace::LocatePcInTrace(uptr pc) { 168 uptr best = 0; 169 for (uptr i = 1; i < size; ++i) { 170 if (Distance(trace[i], pc) < Distance(trace[best], pc)) best = i; 171 } 172 return best; 173} 174 175} // namespace __sanitizer 176