1246074Sgabor//===-- tsan_rtl_access.cpp -----------------------------------------------===//
2246074Sgabor//
3246074Sgabor// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4246074Sgabor// See https://llvm.org/LICENSE.txt for license information.
5246074Sgabor// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6246074Sgabor//
7246074Sgabor//===----------------------------------------------------------------------===//
8246074Sgabor//
9246074Sgabor// This file is a part of ThreadSanitizer (TSan), a race detector.
10246091Sdelphij//
11246091Sdelphij// Definitions of memory access and function entry/exit entry points.
12246091Sdelphij//===----------------------------------------------------------------------===//
13246074Sgabor
14246091Sdelphij#include "tsan_rtl.h"
15246074Sgabor
16246074Sgabornamespace __tsan {
17246074Sgabor
18246074Sgabornamespace v3 {
19246074Sgabor
20246074SgaborALWAYS_INLINE USED bool TryTraceMemoryAccess(ThreadState *thr, uptr pc,
21246074Sgabor                                             uptr addr, uptr size,
22246074Sgabor                                             AccessType typ) {
23246074Sgabor  DCHECK(size == 1 || size == 2 || size == 4 || size == 8);
24246074Sgabor  if (!kCollectHistory)
25246074Sgabor    return true;
26246074Sgabor  EventAccess *ev;
27246074Sgabor  if (UNLIKELY(!TraceAcquire(thr, &ev)))
28246074Sgabor    return false;
29246074Sgabor  u64 size_log = size == 1 ? 0 : size == 2 ? 1 : size == 4 ? 2 : 3;
30246074Sgabor  uptr pc_delta = pc - thr->trace_prev_pc + (1 << (EventAccess::kPCBits - 1));
31246074Sgabor  thr->trace_prev_pc = pc;
32246074Sgabor  if (LIKELY(pc_delta < (1 << EventAccess::kPCBits))) {
33246074Sgabor    ev->is_access = 1;
34246074Sgabor    ev->is_read = !!(typ & kAccessRead);
35246074Sgabor    ev->is_atomic = !!(typ & kAccessAtomic);
36246074Sgabor    ev->size_log = size_log;
37246074Sgabor    ev->pc_delta = pc_delta;
38246074Sgabor    DCHECK_EQ(ev->pc_delta, pc_delta);
39246074Sgabor    ev->addr = CompressAddr(addr);
40    TraceRelease(thr, ev);
41    return true;
42  }
43  auto *evex = reinterpret_cast<EventAccessExt *>(ev);
44  evex->is_access = 0;
45  evex->is_func = 0;
46  evex->type = EventType::kAccessExt;
47  evex->is_read = !!(typ & kAccessRead);
48  evex->is_atomic = !!(typ & kAccessAtomic);
49  evex->size_log = size_log;
50  evex->addr = CompressAddr(addr);
51  evex->pc = pc;
52  TraceRelease(thr, evex);
53  return true;
54}
55
56ALWAYS_INLINE USED bool TryTraceMemoryAccessRange(ThreadState *thr, uptr pc,
57                                                  uptr addr, uptr size,
58                                                  AccessType typ) {
59  if (!kCollectHistory)
60    return true;
61  EventAccessRange *ev;
62  if (UNLIKELY(!TraceAcquire(thr, &ev)))
63    return false;
64  thr->trace_prev_pc = pc;
65  ev->is_access = 0;
66  ev->is_func = 0;
67  ev->type = EventType::kAccessRange;
68  ev->is_read = !!(typ & kAccessRead);
69  ev->is_free = !!(typ & kAccessFree);
70  ev->size_lo = size;
71  ev->pc = CompressAddr(pc);
72  ev->addr = CompressAddr(addr);
73  ev->size_hi = size >> EventAccessRange::kSizeLoBits;
74  TraceRelease(thr, ev);
75  return true;
76}
77
78void TraceMemoryAccessRange(ThreadState *thr, uptr pc, uptr addr, uptr size,
79                            AccessType typ) {
80  if (LIKELY(TryTraceMemoryAccessRange(thr, pc, addr, size, typ)))
81    return;
82  TraceSwitchPart(thr);
83  UNUSED bool res = TryTraceMemoryAccessRange(thr, pc, addr, size, typ);
84  DCHECK(res);
85}
86
87void TraceFunc(ThreadState *thr, uptr pc) {
88  if (LIKELY(TryTraceFunc(thr, pc)))
89    return;
90  TraceSwitchPart(thr);
91  UNUSED bool res = TryTraceFunc(thr, pc);
92  DCHECK(res);
93}
94
95void TraceMutexLock(ThreadState *thr, EventType type, uptr pc, uptr addr,
96                    StackID stk) {
97  DCHECK(type == EventType::kLock || type == EventType::kRLock);
98  if (!kCollectHistory)
99    return;
100  EventLock ev;
101  ev.is_access = 0;
102  ev.is_func = 0;
103  ev.type = type;
104  ev.pc = CompressAddr(pc);
105  ev.stack_lo = stk;
106  ev.stack_hi = stk >> EventLock::kStackIDLoBits;
107  ev._ = 0;
108  ev.addr = CompressAddr(addr);
109  TraceEvent(thr, ev);
110}
111
112void TraceMutexUnlock(ThreadState *thr, uptr addr) {
113  if (!kCollectHistory)
114    return;
115  EventUnlock ev;
116  ev.is_access = 0;
117  ev.is_func = 0;
118  ev.type = EventType::kUnlock;
119  ev._ = 0;
120  ev.addr = CompressAddr(addr);
121  TraceEvent(thr, ev);
122}
123
124void TraceTime(ThreadState *thr) {
125  if (!kCollectHistory)
126    return;
127  EventTime ev;
128  ev.is_access = 0;
129  ev.is_func = 0;
130  ev.type = EventType::kTime;
131  ev.sid = static_cast<u64>(thr->sid);
132  ev.epoch = static_cast<u64>(thr->epoch);
133  ev._ = 0;
134  TraceEvent(thr, ev);
135}
136
137}  // namespace v3
138
139ALWAYS_INLINE
140Shadow LoadShadow(u64 *p) {
141  u64 raw = atomic_load((atomic_uint64_t *)p, memory_order_relaxed);
142  return Shadow(raw);
143}
144
145ALWAYS_INLINE
146void StoreShadow(u64 *sp, u64 s) {
147  atomic_store((atomic_uint64_t *)sp, s, memory_order_relaxed);
148}
149
150ALWAYS_INLINE
151void StoreIfNotYetStored(u64 *sp, u64 *s) {
152  StoreShadow(sp, *s);
153  *s = 0;
154}
155
156extern "C" void __tsan_report_race();
157
158ALWAYS_INLINE
159void HandleRace(ThreadState *thr, u64 *shadow_mem, Shadow cur, Shadow old) {
160  thr->racy_state[0] = cur.raw();
161  thr->racy_state[1] = old.raw();
162  thr->racy_shadow_addr = shadow_mem;
163#if !SANITIZER_GO
164  HACKY_CALL(__tsan_report_race);
165#else
166  ReportRace(thr);
167#endif
168}
169
170static inline bool HappensBefore(Shadow old, ThreadState *thr) {
171  return thr->clock.get(old.TidWithIgnore()) >= old.epoch();
172}
173
174ALWAYS_INLINE
175void MemoryAccessImpl1(ThreadState *thr, uptr addr, int kAccessSizeLog,
176                       bool kAccessIsWrite, bool kIsAtomic, u64 *shadow_mem,
177                       Shadow cur) {
178  // This potentially can live in an MMX/SSE scratch register.
179  // The required intrinsics are:
180  // __m128i _mm_move_epi64(__m128i*);
181  // _mm_storel_epi64(u64*, __m128i);
182  u64 store_word = cur.raw();
183  bool stored = false;
184
185  // scan all the shadow values and dispatch to 4 categories:
186  // same, replace, candidate and race (see comments below).
187  // we consider only 3 cases regarding access sizes:
188  // equal, intersect and not intersect. initially I considered
189  // larger and smaller as well, it allowed to replace some
190  // 'candidates' with 'same' or 'replace', but I think
191  // it's just not worth it (performance- and complexity-wise).
192
193  Shadow old(0);
194
195  // It release mode we manually unroll the loop,
196  // because empirically gcc generates better code this way.
197  // However, we can't afford unrolling in debug mode, because the function
198  // consumes almost 4K of stack. Gtest gives only 4K of stack to death test
199  // threads, which is not enough for the unrolled loop.
200#if SANITIZER_DEBUG
201  for (int idx = 0; idx < 4; idx++) {
202#  include "tsan_update_shadow_word.inc"
203  }
204#else
205  int idx = 0;
206#  include "tsan_update_shadow_word.inc"
207  idx = 1;
208  if (stored) {
209#  include "tsan_update_shadow_word.inc"
210  } else {
211#  include "tsan_update_shadow_word.inc"
212  }
213  idx = 2;
214  if (stored) {
215#  include "tsan_update_shadow_word.inc"
216  } else {
217#  include "tsan_update_shadow_word.inc"
218  }
219  idx = 3;
220  if (stored) {
221#  include "tsan_update_shadow_word.inc"
222  } else {
223#  include "tsan_update_shadow_word.inc"
224  }
225#endif
226
227  // we did not find any races and had already stored
228  // the current access info, so we are done
229  if (LIKELY(stored))
230    return;
231  // choose a random candidate slot and replace it
232  StoreShadow(shadow_mem + (cur.epoch() % kShadowCnt), store_word);
233  return;
234RACE:
235  HandleRace(thr, shadow_mem, cur, old);
236  return;
237}
238
239void UnalignedMemoryAccess(ThreadState *thr, uptr pc, uptr addr, uptr size,
240                           AccessType typ) {
241  DCHECK(!(typ & kAccessAtomic));
242  const bool kAccessIsWrite = !(typ & kAccessRead);
243  const bool kIsAtomic = false;
244  while (size) {
245    int size1 = 1;
246    int kAccessSizeLog = kSizeLog1;
247    if (size >= 8 && (addr & ~7) == ((addr + 7) & ~7)) {
248      size1 = 8;
249      kAccessSizeLog = kSizeLog8;
250    } else if (size >= 4 && (addr & ~7) == ((addr + 3) & ~7)) {
251      size1 = 4;
252      kAccessSizeLog = kSizeLog4;
253    } else if (size >= 2 && (addr & ~7) == ((addr + 1) & ~7)) {
254      size1 = 2;
255      kAccessSizeLog = kSizeLog2;
256    }
257    MemoryAccess(thr, pc, addr, kAccessSizeLog, kAccessIsWrite, kIsAtomic);
258    addr += size1;
259    size -= size1;
260  }
261}
262
263ALWAYS_INLINE
264bool ContainsSameAccessSlow(u64 *s, u64 a, u64 sync_epoch, bool is_write) {
265  Shadow cur(a);
266  for (uptr i = 0; i < kShadowCnt; i++) {
267    Shadow old(LoadShadow(&s[i]));
268    if (Shadow::Addr0AndSizeAreEqual(cur, old) &&
269        old.TidWithIgnore() == cur.TidWithIgnore() &&
270        old.epoch() > sync_epoch && old.IsAtomic() == cur.IsAtomic() &&
271        old.IsRead() <= cur.IsRead())
272      return true;
273  }
274  return false;
275}
276
277#if TSAN_VECTORIZE
278#  define SHUF(v0, v1, i0, i1, i2, i3)                    \
279    _mm_castps_si128(_mm_shuffle_ps(_mm_castsi128_ps(v0), \
280                                    _mm_castsi128_ps(v1), \
281                                    (i0)*1 + (i1)*4 + (i2)*16 + (i3)*64))
282ALWAYS_INLINE
283bool ContainsSameAccessFast(u64 *s, u64 a, u64 sync_epoch, bool is_write) {
284  // This is an optimized version of ContainsSameAccessSlow.
285  // load current access into access[0:63]
286  const m128 access = _mm_cvtsi64_si128(a);
287  // duplicate high part of access in addr0:
288  // addr0[0:31]        = access[32:63]
289  // addr0[32:63]       = access[32:63]
290  // addr0[64:95]       = access[32:63]
291  // addr0[96:127]      = access[32:63]
292  const m128 addr0 = SHUF(access, access, 1, 1, 1, 1);
293  // load 4 shadow slots
294  const m128 shadow0 = _mm_load_si128((__m128i *)s);
295  const m128 shadow1 = _mm_load_si128((__m128i *)s + 1);
296  // load high parts of 4 shadow slots into addr_vect:
297  // addr_vect[0:31]    = shadow0[32:63]
298  // addr_vect[32:63]   = shadow0[96:127]
299  // addr_vect[64:95]   = shadow1[32:63]
300  // addr_vect[96:127]  = shadow1[96:127]
301  m128 addr_vect = SHUF(shadow0, shadow1, 1, 3, 1, 3);
302  if (!is_write) {
303    // set IsRead bit in addr_vect
304    const m128 rw_mask1 = _mm_cvtsi64_si128(1 << 15);
305    const m128 rw_mask = SHUF(rw_mask1, rw_mask1, 0, 0, 0, 0);
306    addr_vect = _mm_or_si128(addr_vect, rw_mask);
307  }
308  // addr0 == addr_vect?
309  const m128 addr_res = _mm_cmpeq_epi32(addr0, addr_vect);
310  // epoch1[0:63]       = sync_epoch
311  const m128 epoch1 = _mm_cvtsi64_si128(sync_epoch);
312  // epoch[0:31]        = sync_epoch[0:31]
313  // epoch[32:63]       = sync_epoch[0:31]
314  // epoch[64:95]       = sync_epoch[0:31]
315  // epoch[96:127]      = sync_epoch[0:31]
316  const m128 epoch = SHUF(epoch1, epoch1, 0, 0, 0, 0);
317  // load low parts of shadow cell epochs into epoch_vect:
318  // epoch_vect[0:31]   = shadow0[0:31]
319  // epoch_vect[32:63]  = shadow0[64:95]
320  // epoch_vect[64:95]  = shadow1[0:31]
321  // epoch_vect[96:127] = shadow1[64:95]
322  const m128 epoch_vect = SHUF(shadow0, shadow1, 0, 2, 0, 2);
323  // epoch_vect >= sync_epoch?
324  const m128 epoch_res = _mm_cmpgt_epi32(epoch_vect, epoch);
325  // addr_res & epoch_res
326  const m128 res = _mm_and_si128(addr_res, epoch_res);
327  // mask[0] = res[7]
328  // mask[1] = res[15]
329  // ...
330  // mask[15] = res[127]
331  const int mask = _mm_movemask_epi8(res);
332  return mask != 0;
333}
334#endif
335
336ALWAYS_INLINE
337bool ContainsSameAccess(u64 *s, u64 a, u64 sync_epoch, bool is_write) {
338#if TSAN_VECTORIZE
339  bool res = ContainsSameAccessFast(s, a, sync_epoch, is_write);
340  // NOTE: this check can fail if the shadow is concurrently mutated
341  // by other threads. But it still can be useful if you modify
342  // ContainsSameAccessFast and want to ensure that it's not completely broken.
343  // DCHECK_EQ(res, ContainsSameAccessSlow(s, a, sync_epoch, is_write));
344  return res;
345#else
346  return ContainsSameAccessSlow(s, a, sync_epoch, is_write);
347#endif
348}
349
350ALWAYS_INLINE USED void MemoryAccess(ThreadState *thr, uptr pc, uptr addr,
351                                     int kAccessSizeLog, bool kAccessIsWrite,
352                                     bool kIsAtomic) {
353  RawShadow *shadow_mem = MemToShadow(addr);
354  DPrintf2(
355      "#%d: MemoryAccess: @%p %p size=%d"
356      " is_write=%d shadow_mem=%p {%zx, %zx, %zx, %zx}\n",
357      (int)thr->fast_state.tid(), (void *)pc, (void *)addr,
358      (int)(1 << kAccessSizeLog), kAccessIsWrite, shadow_mem,
359      (uptr)shadow_mem[0], (uptr)shadow_mem[1], (uptr)shadow_mem[2],
360      (uptr)shadow_mem[3]);
361#if SANITIZER_DEBUG
362  if (!IsAppMem(addr)) {
363    Printf("Access to non app mem %zx\n", addr);
364    DCHECK(IsAppMem(addr));
365  }
366  if (!IsShadowMem(shadow_mem)) {
367    Printf("Bad shadow addr %p (%zx)\n", shadow_mem, addr);
368    DCHECK(IsShadowMem(shadow_mem));
369  }
370#endif
371
372  if (!SANITIZER_GO && !kAccessIsWrite && *shadow_mem == kShadowRodata) {
373    // Access to .rodata section, no races here.
374    // Measurements show that it can be 10-20% of all memory accesses.
375    return;
376  }
377
378  FastState fast_state = thr->fast_state;
379  if (UNLIKELY(fast_state.GetIgnoreBit())) {
380    return;
381  }
382
383  Shadow cur(fast_state);
384  cur.SetAddr0AndSizeLog(addr & 7, kAccessSizeLog);
385  cur.SetWrite(kAccessIsWrite);
386  cur.SetAtomic(kIsAtomic);
387
388  if (LIKELY(ContainsSameAccess(shadow_mem, cur.raw(), thr->fast_synch_epoch,
389                                kAccessIsWrite))) {
390    return;
391  }
392
393  if (kCollectHistory) {
394    fast_state.IncrementEpoch();
395    thr->fast_state = fast_state;
396    TraceAddEvent(thr, fast_state, EventTypeMop, pc);
397    cur.IncrementEpoch();
398  }
399
400  MemoryAccessImpl1(thr, addr, kAccessSizeLog, kAccessIsWrite, kIsAtomic,
401                    shadow_mem, cur);
402}
403
404// Called by MemoryAccessRange in tsan_rtl_thread.cpp
405ALWAYS_INLINE USED void MemoryAccessImpl(ThreadState *thr, uptr addr,
406                                         int kAccessSizeLog,
407                                         bool kAccessIsWrite, bool kIsAtomic,
408                                         u64 *shadow_mem, Shadow cur) {
409  if (LIKELY(ContainsSameAccess(shadow_mem, cur.raw(), thr->fast_synch_epoch,
410                                kAccessIsWrite))) {
411    return;
412  }
413
414  MemoryAccessImpl1(thr, addr, kAccessSizeLog, kAccessIsWrite, kIsAtomic,
415                    shadow_mem, cur);
416}
417
418static void MemoryRangeSet(ThreadState *thr, uptr pc, uptr addr, uptr size,
419                           u64 val) {
420  (void)thr;
421  (void)pc;
422  if (size == 0)
423    return;
424  // FIXME: fix me.
425  uptr offset = addr % kShadowCell;
426  if (offset) {
427    offset = kShadowCell - offset;
428    if (size <= offset)
429      return;
430    addr += offset;
431    size -= offset;
432  }
433  DCHECK_EQ(addr % 8, 0);
434  // If a user passes some insane arguments (memset(0)),
435  // let it just crash as usual.
436  if (!IsAppMem(addr) || !IsAppMem(addr + size - 1))
437    return;
438  // Don't want to touch lots of shadow memory.
439  // If a program maps 10MB stack, there is no need reset the whole range.
440  size = (size + (kShadowCell - 1)) & ~(kShadowCell - 1);
441  // UnmapOrDie/MmapFixedNoReserve does not work on Windows.
442  if (SANITIZER_WINDOWS || size < common_flags()->clear_shadow_mmap_threshold) {
443    RawShadow *p = MemToShadow(addr);
444    CHECK(IsShadowMem(p));
445    CHECK(IsShadowMem(p + size * kShadowCnt / kShadowCell - 1));
446    // FIXME: may overwrite a part outside the region
447    for (uptr i = 0; i < size / kShadowCell * kShadowCnt;) {
448      p[i++] = val;
449      for (uptr j = 1; j < kShadowCnt; j++) p[i++] = 0;
450    }
451  } else {
452    // The region is big, reset only beginning and end.
453    const uptr kPageSize = GetPageSizeCached();
454    RawShadow *begin = MemToShadow(addr);
455    RawShadow *end = begin + size / kShadowCell * kShadowCnt;
456    RawShadow *p = begin;
457    // Set at least first kPageSize/2 to page boundary.
458    while ((p < begin + kPageSize / kShadowSize / 2) || ((uptr)p % kPageSize)) {
459      *p++ = val;
460      for (uptr j = 1; j < kShadowCnt; j++) *p++ = 0;
461    }
462    // Reset middle part.
463    RawShadow *p1 = p;
464    p = RoundDown(end, kPageSize);
465    if (!MmapFixedSuperNoReserve((uptr)p1, (uptr)p - (uptr)p1))
466      Die();
467    // Set the ending.
468    while (p < end) {
469      *p++ = val;
470      for (uptr j = 1; j < kShadowCnt; j++) *p++ = 0;
471    }
472  }
473}
474
475void MemoryResetRange(ThreadState *thr, uptr pc, uptr addr, uptr size) {
476  MemoryRangeSet(thr, pc, addr, size, 0);
477}
478
479void MemoryRangeFreed(ThreadState *thr, uptr pc, uptr addr, uptr size) {
480  // Processing more than 1k (4k of shadow) is expensive,
481  // can cause excessive memory consumption (user does not necessary touch
482  // the whole range) and most likely unnecessary.
483  if (size > 1024)
484    size = 1024;
485  CHECK_EQ(thr->is_freeing, false);
486  thr->is_freeing = true;
487  MemoryAccessRange(thr, pc, addr, size, true);
488  thr->is_freeing = false;
489  if (kCollectHistory) {
490    thr->fast_state.IncrementEpoch();
491    TraceAddEvent(thr, thr->fast_state, EventTypeMop, pc);
492  }
493  Shadow s(thr->fast_state);
494  s.ClearIgnoreBit();
495  s.MarkAsFreed();
496  s.SetWrite(true);
497  s.SetAddr0AndSizeLog(0, 3);
498  MemoryRangeSet(thr, pc, addr, size, s.raw());
499}
500
501void MemoryRangeImitateWrite(ThreadState *thr, uptr pc, uptr addr, uptr size) {
502  if (kCollectHistory) {
503    thr->fast_state.IncrementEpoch();
504    TraceAddEvent(thr, thr->fast_state, EventTypeMop, pc);
505  }
506  Shadow s(thr->fast_state);
507  s.ClearIgnoreBit();
508  s.SetWrite(true);
509  s.SetAddr0AndSizeLog(0, 3);
510  MemoryRangeSet(thr, pc, addr, size, s.raw());
511}
512
513void MemoryRangeImitateWriteOrResetRange(ThreadState *thr, uptr pc, uptr addr,
514                                         uptr size) {
515  if (thr->ignore_reads_and_writes == 0)
516    MemoryRangeImitateWrite(thr, pc, addr, size);
517  else
518    MemoryResetRange(thr, pc, addr, size);
519}
520
521void MemoryAccessRange(ThreadState *thr, uptr pc, uptr addr, uptr size,
522                       bool is_write) {
523  if (size == 0)
524    return;
525
526  RawShadow *shadow_mem = MemToShadow(addr);
527  DPrintf2("#%d: MemoryAccessRange: @%p %p size=%d is_write=%d\n", thr->tid,
528           (void *)pc, (void *)addr, (int)size, is_write);
529
530#if SANITIZER_DEBUG
531  if (!IsAppMem(addr)) {
532    Printf("Access to non app mem %zx\n", addr);
533    DCHECK(IsAppMem(addr));
534  }
535  if (!IsAppMem(addr + size - 1)) {
536    Printf("Access to non app mem %zx\n", addr + size - 1);
537    DCHECK(IsAppMem(addr + size - 1));
538  }
539  if (!IsShadowMem(shadow_mem)) {
540    Printf("Bad shadow addr %p (%zx)\n", shadow_mem, addr);
541    DCHECK(IsShadowMem(shadow_mem));
542  }
543  if (!IsShadowMem(shadow_mem + size * kShadowCnt / 8 - 1)) {
544    Printf("Bad shadow addr %p (%zx)\n", shadow_mem + size * kShadowCnt / 8 - 1,
545           addr + size - 1);
546    DCHECK(IsShadowMem(shadow_mem + size * kShadowCnt / 8 - 1));
547  }
548#endif
549
550  if (*shadow_mem == kShadowRodata) {
551    DCHECK(!is_write);
552    // Access to .rodata section, no races here.
553    // Measurements show that it can be 10-20% of all memory accesses.
554    return;
555  }
556
557  FastState fast_state = thr->fast_state;
558  if (fast_state.GetIgnoreBit())
559    return;
560
561  fast_state.IncrementEpoch();
562  thr->fast_state = fast_state;
563  TraceAddEvent(thr, fast_state, EventTypeMop, pc);
564
565  bool unaligned = (addr % kShadowCell) != 0;
566
567  // Handle unaligned beginning, if any.
568  for (; addr % kShadowCell && size; addr++, size--) {
569    int const kAccessSizeLog = 0;
570    Shadow cur(fast_state);
571    cur.SetWrite(is_write);
572    cur.SetAddr0AndSizeLog(addr & (kShadowCell - 1), kAccessSizeLog);
573    MemoryAccessImpl(thr, addr, kAccessSizeLog, is_write, false, shadow_mem,
574                     cur);
575  }
576  if (unaligned)
577    shadow_mem += kShadowCnt;
578  // Handle middle part, if any.
579  for (; size >= kShadowCell; addr += kShadowCell, size -= kShadowCell) {
580    int const kAccessSizeLog = 3;
581    Shadow cur(fast_state);
582    cur.SetWrite(is_write);
583    cur.SetAddr0AndSizeLog(0, kAccessSizeLog);
584    MemoryAccessImpl(thr, addr, kAccessSizeLog, is_write, false, shadow_mem,
585                     cur);
586    shadow_mem += kShadowCnt;
587  }
588  // Handle ending, if any.
589  for (; size; addr++, size--) {
590    int const kAccessSizeLog = 0;
591    Shadow cur(fast_state);
592    cur.SetWrite(is_write);
593    cur.SetAddr0AndSizeLog(addr & (kShadowCell - 1), kAccessSizeLog);
594    MemoryAccessImpl(thr, addr, kAccessSizeLog, is_write, false, shadow_mem,
595                     cur);
596  }
597}
598
599}  // namespace __tsan
600
601#if !SANITIZER_GO
602// Must be included in this file to make sure everything is inlined.
603#  include "tsan_interface.inc"
604#endif
605