1//===-- asan_poisoning.cc -------------------------------------------------===// 2// 3// This file is distributed under the University of Illinois Open Source 4// License. See LICENSE.TXT for details. 5// 6//===----------------------------------------------------------------------===// 7// 8// This file is a part of AddressSanitizer, an address sanity checker. 9// 10// Shadow memory poisoning by ASan RTL and by user application. 11//===----------------------------------------------------------------------===// 12 13#include "asan_poisoning.h" 14#include "asan_report.h" 15#include "asan_stack.h" 16#include "sanitizer_common/sanitizer_libc.h" 17#include "sanitizer_common/sanitizer_flags.h" 18 19namespace __asan { 20 21void PoisonShadow(uptr addr, uptr size, u8 value) { 22 if (!flags()->poison_heap) return; 23 CHECK(AddrIsAlignedByGranularity(addr)); 24 CHECK(AddrIsInMem(addr)); 25 CHECK(AddrIsAlignedByGranularity(addr + size)); 26 CHECK(AddrIsInMem(addr + size - SHADOW_GRANULARITY)); 27 CHECK(REAL(memset)); 28 FastPoisonShadow(addr, size, value); 29} 30 31void PoisonShadowPartialRightRedzone(uptr addr, 32 uptr size, 33 uptr redzone_size, 34 u8 value) { 35 if (!flags()->poison_heap) return; 36 CHECK(AddrIsAlignedByGranularity(addr)); 37 CHECK(AddrIsInMem(addr)); 38 FastPoisonShadowPartialRightRedzone(addr, size, redzone_size, value); 39} 40 41struct ShadowSegmentEndpoint { 42 u8 *chunk; 43 s8 offset; // in [0, SHADOW_GRANULARITY) 44 s8 value; // = *chunk; 45 46 explicit ShadowSegmentEndpoint(uptr address) { 47 chunk = (u8*)MemToShadow(address); 48 offset = address & (SHADOW_GRANULARITY - 1); 49 value = *chunk; 50 } 51}; 52 53void FlushUnneededASanShadowMemory(uptr p, uptr size) { 54 // Since asan's mapping is compacting, the shadow chunk may be 55 // not page-aligned, so we only flush the page-aligned portion. 56 uptr page_size = GetPageSizeCached(); 57 uptr shadow_beg = RoundUpTo(MemToShadow(p), page_size); 58 uptr shadow_end = RoundDownTo(MemToShadow(p + size), page_size); 59 FlushUnneededShadowMemory(shadow_beg, shadow_end - shadow_beg); 60} 61 62void AsanPoisonOrUnpoisonIntraObjectRedzone(uptr ptr, uptr size, bool poison) { 63 uptr end = ptr + size; 64 if (common_flags()->verbosity) { 65 Printf("__asan_%spoison_intra_object_redzone [%p,%p) %zd\n", 66 poison ? "" : "un", ptr, end, size); 67 if (common_flags()->verbosity >= 2) 68 PRINT_CURRENT_STACK(); 69 } 70 CHECK(size); 71 CHECK_LE(size, 4096); 72 CHECK(IsAligned(end, SHADOW_GRANULARITY)); 73 if (!IsAligned(ptr, SHADOW_GRANULARITY)) { 74 *(u8 *)MemToShadow(ptr) = 75 poison ? static_cast<u8>(ptr % SHADOW_GRANULARITY) : 0; 76 ptr |= SHADOW_GRANULARITY - 1; 77 ptr++; 78 } 79 for (; ptr < end; ptr += SHADOW_GRANULARITY) 80 *(u8*)MemToShadow(ptr) = poison ? kAsanIntraObjectRedzone : 0; 81} 82 83} // namespace __asan 84 85// ---------------------- Interface ---------------- {{{1 86using namespace __asan; // NOLINT 87 88// Current implementation of __asan_(un)poison_memory_region doesn't check 89// that user program (un)poisons the memory it owns. It poisons memory 90// conservatively, and unpoisons progressively to make sure asan shadow 91// mapping invariant is preserved (see detailed mapping description here: 92// http://code.google.com/p/address-sanitizer/wiki/AddressSanitizerAlgorithm). 93// 94// * if user asks to poison region [left, right), the program poisons 95// at least [left, AlignDown(right)). 96// * if user asks to unpoison region [left, right), the program unpoisons 97// at most [AlignDown(left), right). 98void __asan_poison_memory_region(void const volatile *addr, uptr size) { 99 if (!flags()->allow_user_poisoning || size == 0) return; 100 uptr beg_addr = (uptr)addr; 101 uptr end_addr = beg_addr + size; 102 VPrintf(1, "Trying to poison memory region [%p, %p)\n", (void *)beg_addr, 103 (void *)end_addr); 104 ShadowSegmentEndpoint beg(beg_addr); 105 ShadowSegmentEndpoint end(end_addr); 106 if (beg.chunk == end.chunk) { 107 CHECK(beg.offset < end.offset); 108 s8 value = beg.value; 109 CHECK(value == end.value); 110 // We can only poison memory if the byte in end.offset is unaddressable. 111 // No need to re-poison memory if it is poisoned already. 112 if (value > 0 && value <= end.offset) { 113 if (beg.offset > 0) { 114 *beg.chunk = Min(value, beg.offset); 115 } else { 116 *beg.chunk = kAsanUserPoisonedMemoryMagic; 117 } 118 } 119 return; 120 } 121 CHECK(beg.chunk < end.chunk); 122 if (beg.offset > 0) { 123 // Mark bytes from beg.offset as unaddressable. 124 if (beg.value == 0) { 125 *beg.chunk = beg.offset; 126 } else { 127 *beg.chunk = Min(beg.value, beg.offset); 128 } 129 beg.chunk++; 130 } 131 REAL(memset)(beg.chunk, kAsanUserPoisonedMemoryMagic, end.chunk - beg.chunk); 132 // Poison if byte in end.offset is unaddressable. 133 if (end.value > 0 && end.value <= end.offset) { 134 *end.chunk = kAsanUserPoisonedMemoryMagic; 135 } 136} 137 138void __asan_unpoison_memory_region(void const volatile *addr, uptr size) { 139 if (!flags()->allow_user_poisoning || size == 0) return; 140 uptr beg_addr = (uptr)addr; 141 uptr end_addr = beg_addr + size; 142 VPrintf(1, "Trying to unpoison memory region [%p, %p)\n", (void *)beg_addr, 143 (void *)end_addr); 144 ShadowSegmentEndpoint beg(beg_addr); 145 ShadowSegmentEndpoint end(end_addr); 146 if (beg.chunk == end.chunk) { 147 CHECK(beg.offset < end.offset); 148 s8 value = beg.value; 149 CHECK(value == end.value); 150 // We unpoison memory bytes up to enbytes up to end.offset if it is not 151 // unpoisoned already. 152 if (value != 0) { 153 *beg.chunk = Max(value, end.offset); 154 } 155 return; 156 } 157 CHECK(beg.chunk < end.chunk); 158 if (beg.offset > 0) { 159 *beg.chunk = 0; 160 beg.chunk++; 161 } 162 REAL(memset)(beg.chunk, 0, end.chunk - beg.chunk); 163 if (end.offset > 0 && end.value != 0) { 164 *end.chunk = Max(end.value, end.offset); 165 } 166} 167 168int __asan_address_is_poisoned(void const volatile *addr) { 169 return __asan::AddressIsPoisoned((uptr)addr); 170} 171 172uptr __asan_region_is_poisoned(uptr beg, uptr size) { 173 if (!size) return 0; 174 uptr end = beg + size; 175 if (!AddrIsInMem(beg)) return beg; 176 if (!AddrIsInMem(end)) return end; 177 CHECK_LT(beg, end); 178 uptr aligned_b = RoundUpTo(beg, SHADOW_GRANULARITY); 179 uptr aligned_e = RoundDownTo(end, SHADOW_GRANULARITY); 180 uptr shadow_beg = MemToShadow(aligned_b); 181 uptr shadow_end = MemToShadow(aligned_e); 182 // First check the first and the last application bytes, 183 // then check the SHADOW_GRANULARITY-aligned region by calling 184 // mem_is_zero on the corresponding shadow. 185 if (!__asan::AddressIsPoisoned(beg) && 186 !__asan::AddressIsPoisoned(end - 1) && 187 (shadow_end <= shadow_beg || 188 __sanitizer::mem_is_zero((const char *)shadow_beg, 189 shadow_end - shadow_beg))) 190 return 0; 191 // The fast check failed, so we have a poisoned byte somewhere. 192 // Find it slowly. 193 for (; beg < end; beg++) 194 if (__asan::AddressIsPoisoned(beg)) 195 return beg; 196 UNREACHABLE("mem_is_zero returned false, but poisoned byte was not found"); 197 return 0; 198} 199 200#define CHECK_SMALL_REGION(p, size, isWrite) \ 201 do { \ 202 uptr __p = reinterpret_cast<uptr>(p); \ 203 uptr __size = size; \ 204 if (UNLIKELY(__asan::AddressIsPoisoned(__p) || \ 205 __asan::AddressIsPoisoned(__p + __size - 1))) { \ 206 GET_CURRENT_PC_BP_SP; \ 207 uptr __bad = __asan_region_is_poisoned(__p, __size); \ 208 __asan_report_error(pc, bp, sp, __bad, isWrite, __size);\ 209 } \ 210 } while (false); \ 211 212 213extern "C" SANITIZER_INTERFACE_ATTRIBUTE 214u16 __sanitizer_unaligned_load16(const uu16 *p) { 215 CHECK_SMALL_REGION(p, sizeof(*p), false); 216 return *p; 217} 218 219extern "C" SANITIZER_INTERFACE_ATTRIBUTE 220u32 __sanitizer_unaligned_load32(const uu32 *p) { 221 CHECK_SMALL_REGION(p, sizeof(*p), false); 222 return *p; 223} 224 225extern "C" SANITIZER_INTERFACE_ATTRIBUTE 226u64 __sanitizer_unaligned_load64(const uu64 *p) { 227 CHECK_SMALL_REGION(p, sizeof(*p), false); 228 return *p; 229} 230 231extern "C" SANITIZER_INTERFACE_ATTRIBUTE 232void __sanitizer_unaligned_store16(uu16 *p, u16 x) { 233 CHECK_SMALL_REGION(p, sizeof(*p), true); 234 *p = x; 235} 236 237extern "C" SANITIZER_INTERFACE_ATTRIBUTE 238void __sanitizer_unaligned_store32(uu32 *p, u32 x) { 239 CHECK_SMALL_REGION(p, sizeof(*p), true); 240 *p = x; 241} 242 243extern "C" SANITIZER_INTERFACE_ATTRIBUTE 244void __sanitizer_unaligned_store64(uu64 *p, u64 x) { 245 CHECK_SMALL_REGION(p, sizeof(*p), true); 246 *p = x; 247} 248 249extern "C" SANITIZER_INTERFACE_ATTRIBUTE 250void __asan_poison_cxx_array_cookie(uptr p) { 251 if (SANITIZER_WORDSIZE != 64) return; 252 if (!flags()->poison_array_cookie) return; 253 uptr s = MEM_TO_SHADOW(p); 254 *reinterpret_cast<u8*>(s) = kAsanArrayCookieMagic; 255} 256 257extern "C" SANITIZER_INTERFACE_ATTRIBUTE 258uptr __asan_load_cxx_array_cookie(uptr *p) { 259 if (SANITIZER_WORDSIZE != 64) return *p; 260 if (!flags()->poison_array_cookie) return *p; 261 uptr s = MEM_TO_SHADOW(reinterpret_cast<uptr>(p)); 262 u8 sval = *reinterpret_cast<u8*>(s); 263 if (sval == kAsanArrayCookieMagic) return *p; 264 // If sval is not kAsanArrayCookieMagic it can only be freed memory, 265 // which means that we are going to get double-free. So, return 0 to avoid 266 // infinite loop of destructors. We don't want to report a double-free here 267 // though, so print a warning just in case. 268 // CHECK_EQ(sval, kAsanHeapFreeMagic); 269 if (sval == kAsanHeapFreeMagic) { 270 Report("AddressSanitizer: loaded array cookie from free-d memory; " 271 "expect a double-free report\n"); 272 return 0; 273 } 274 // The cookie may remain unpoisoned if e.g. it comes from a custom 275 // operator new defined inside a class. 276 return *p; 277} 278 279// This is a simplified version of __asan_(un)poison_memory_region, which 280// assumes that left border of region to be poisoned is properly aligned. 281static void PoisonAlignedStackMemory(uptr addr, uptr size, bool do_poison) { 282 if (size == 0) return; 283 uptr aligned_size = size & ~(SHADOW_GRANULARITY - 1); 284 PoisonShadow(addr, aligned_size, 285 do_poison ? kAsanStackUseAfterScopeMagic : 0); 286 if (size == aligned_size) 287 return; 288 s8 end_offset = (s8)(size - aligned_size); 289 s8* shadow_end = (s8*)MemToShadow(addr + aligned_size); 290 s8 end_value = *shadow_end; 291 if (do_poison) { 292 // If possible, mark all the bytes mapping to last shadow byte as 293 // unaddressable. 294 if (end_value > 0 && end_value <= end_offset) 295 *shadow_end = (s8)kAsanStackUseAfterScopeMagic; 296 } else { 297 // If necessary, mark few first bytes mapping to last shadow byte 298 // as addressable 299 if (end_value != 0) 300 *shadow_end = Max(end_value, end_offset); 301 } 302} 303 304void __asan_poison_stack_memory(uptr addr, uptr size) { 305 VReport(1, "poisoning: %p %zx\n", (void *)addr, size); 306 PoisonAlignedStackMemory(addr, size, true); 307} 308 309void __asan_unpoison_stack_memory(uptr addr, uptr size) { 310 VReport(1, "unpoisoning: %p %zx\n", (void *)addr, size); 311 PoisonAlignedStackMemory(addr, size, false); 312} 313 314void __sanitizer_annotate_contiguous_container(const void *beg_p, 315 const void *end_p, 316 const void *old_mid_p, 317 const void *new_mid_p) { 318 if (!flags()->detect_container_overflow) return; 319 VPrintf(2, "contiguous_container: %p %p %p %p\n", beg_p, end_p, old_mid_p, 320 new_mid_p); 321 uptr beg = reinterpret_cast<uptr>(beg_p); 322 uptr end = reinterpret_cast<uptr>(end_p); 323 uptr old_mid = reinterpret_cast<uptr>(old_mid_p); 324 uptr new_mid = reinterpret_cast<uptr>(new_mid_p); 325 uptr granularity = SHADOW_GRANULARITY; 326 if (!(beg <= old_mid && beg <= new_mid && old_mid <= end && new_mid <= end && 327 IsAligned(beg, granularity))) { 328 GET_STACK_TRACE_FATAL_HERE; 329 ReportBadParamsToAnnotateContiguousContainer(beg, end, old_mid, new_mid, 330 &stack); 331 } 332 CHECK_LE(end - beg, 333 FIRST_32_SECOND_64(1UL << 30, 1UL << 34)); // Sanity check. 334 335 uptr a = RoundDownTo(Min(old_mid, new_mid), granularity); 336 uptr c = RoundUpTo(Max(old_mid, new_mid), granularity); 337 uptr d1 = RoundDownTo(old_mid, granularity); 338 // uptr d2 = RoundUpTo(old_mid, granularity); 339 // Currently we should be in this state: 340 // [a, d1) is good, [d2, c) is bad, [d1, d2) is partially good. 341 // Make a quick sanity check that we are indeed in this state. 342 // 343 // FIXME: Two of these three checks are disabled until we fix 344 // https://code.google.com/p/address-sanitizer/issues/detail?id=258. 345 // if (d1 != d2) 346 // CHECK_EQ(*(u8*)MemToShadow(d1), old_mid - d1); 347 if (a + granularity <= d1) 348 CHECK_EQ(*(u8*)MemToShadow(a), 0); 349 // if (d2 + granularity <= c && c <= end) 350 // CHECK_EQ(*(u8 *)MemToShadow(c - granularity), 351 // kAsanContiguousContainerOOBMagic); 352 353 uptr b1 = RoundDownTo(new_mid, granularity); 354 uptr b2 = RoundUpTo(new_mid, granularity); 355 // New state: 356 // [a, b1) is good, [b2, c) is bad, [b1, b2) is partially good. 357 PoisonShadow(a, b1 - a, 0); 358 PoisonShadow(b2, c - b2, kAsanContiguousContainerOOBMagic); 359 if (b1 != b2) { 360 CHECK_EQ(b2 - b1, granularity); 361 *(u8*)MemToShadow(b1) = static_cast<u8>(new_mid - b1); 362 } 363} 364 365int __sanitizer_verify_contiguous_container(const void *beg_p, 366 const void *mid_p, 367 const void *end_p) { 368 if (!flags()->detect_container_overflow) return 1; 369 uptr beg = reinterpret_cast<uptr>(beg_p); 370 uptr end = reinterpret_cast<uptr>(end_p); 371 uptr mid = reinterpret_cast<uptr>(mid_p); 372 CHECK_LE(beg, mid); 373 CHECK_LE(mid, end); 374 // Check some bytes starting from beg, some bytes around mid, and some bytes 375 // ending with end. 376 uptr kMaxRangeToCheck = 32; 377 uptr r1_beg = beg; 378 uptr r1_end = Min(end + kMaxRangeToCheck, mid); 379 uptr r2_beg = Max(beg, mid - kMaxRangeToCheck); 380 uptr r2_end = Min(end, mid + kMaxRangeToCheck); 381 uptr r3_beg = Max(end - kMaxRangeToCheck, mid); 382 uptr r3_end = end; 383 for (uptr i = r1_beg; i < r1_end; i++) 384 if (AddressIsPoisoned(i)) 385 return 0; 386 for (uptr i = r2_beg; i < mid; i++) 387 if (AddressIsPoisoned(i)) 388 return 0; 389 for (uptr i = mid; i < r2_end; i++) 390 if (!AddressIsPoisoned(i)) 391 return 0; 392 for (uptr i = r3_beg; i < r3_end; i++) 393 if (!AddressIsPoisoned(i)) 394 return 0; 395 return 1; 396} 397 398extern "C" SANITIZER_INTERFACE_ATTRIBUTE 399void __asan_poison_intra_object_redzone(uptr ptr, uptr size) { 400 AsanPoisonOrUnpoisonIntraObjectRedzone(ptr, size, true); 401} 402 403extern "C" SANITIZER_INTERFACE_ATTRIBUTE 404void __asan_unpoison_intra_object_redzone(uptr ptr, uptr size) { 405 AsanPoisonOrUnpoisonIntraObjectRedzone(ptr, size, false); 406} 407 408// --- Implementation of LSan-specific functions --- {{{1 409namespace __lsan { 410bool WordIsPoisoned(uptr addr) { 411 return (__asan_region_is_poisoned(addr, sizeof(uptr)) != 0); 412} 413} 414