1//===-- tsan_fd.cc --------------------------------------------------------===//
2//
3// This file is distributed under the University of Illinois Open Source
4// License. See LICENSE.TXT for details.
5//
6//===----------------------------------------------------------------------===//
7//
8// This file is a part of ThreadSanitizer (TSan), a race detector.
9//
10//===----------------------------------------------------------------------===//
11
12#include "tsan_fd.h"
13#include "tsan_rtl.h"
14#include <sanitizer_common/sanitizer_atomic.h>
15
16namespace __tsan {
17
18const int kTableSizeL1 = 1024;
19const int kTableSizeL2 = 1024;
20const int kTableSize = kTableSizeL1 * kTableSizeL2;
21
22struct FdSync {
23  atomic_uint64_t rc;
24};
25
26struct FdDesc {
27  FdSync *sync;
28  int creation_tid;
29  u32 creation_stack;
30};
31
32struct FdContext {
33  atomic_uintptr_t tab[kTableSizeL1];
34  // Addresses used for synchronization.
35  FdSync globsync;
36  FdSync filesync;
37  FdSync socksync;
38  u64 connectsync;
39};
40
41static FdContext fdctx;
42
43static bool bogusfd(int fd) {
44  // Apparently a bogus fd value.
45  return fd < 0 || fd >= kTableSize;
46}
47
48static FdSync *allocsync(ThreadState *thr, uptr pc) {
49  FdSync *s = (FdSync*)user_alloc(thr, pc, sizeof(FdSync), kDefaultAlignment,
50      false);
51  atomic_store(&s->rc, 1, memory_order_relaxed);
52  return s;
53}
54
55static FdSync *ref(FdSync *s) {
56  if (s && atomic_load(&s->rc, memory_order_relaxed) != (u64)-1)
57    atomic_fetch_add(&s->rc, 1, memory_order_relaxed);
58  return s;
59}
60
61static void unref(ThreadState *thr, uptr pc, FdSync *s) {
62  if (s && atomic_load(&s->rc, memory_order_relaxed) != (u64)-1) {
63    if (atomic_fetch_sub(&s->rc, 1, memory_order_acq_rel) == 1) {
64      CHECK_NE(s, &fdctx.globsync);
65      CHECK_NE(s, &fdctx.filesync);
66      CHECK_NE(s, &fdctx.socksync);
67      user_free(thr, pc, s, false);
68    }
69  }
70}
71
72static FdDesc *fddesc(ThreadState *thr, uptr pc, int fd) {
73  CHECK_GE(fd, 0);
74  CHECK_LT(fd, kTableSize);
75  atomic_uintptr_t *pl1 = &fdctx.tab[fd / kTableSizeL2];
76  uptr l1 = atomic_load(pl1, memory_order_consume);
77  if (l1 == 0) {
78    uptr size = kTableSizeL2 * sizeof(FdDesc);
79    // We need this to reside in user memory to properly catch races on it.
80    void *p = user_alloc(thr, pc, size, kDefaultAlignment, false);
81    internal_memset(p, 0, size);
82    MemoryResetRange(thr, (uptr)&fddesc, (uptr)p, size);
83    if (atomic_compare_exchange_strong(pl1, &l1, (uptr)p, memory_order_acq_rel))
84      l1 = (uptr)p;
85    else
86      user_free(thr, pc, p, false);
87  }
88  return &((FdDesc*)l1)[fd % kTableSizeL2];  // NOLINT
89}
90
91// pd must be already ref'ed.
92static void init(ThreadState *thr, uptr pc, int fd, FdSync *s) {
93  FdDesc *d = fddesc(thr, pc, fd);
94  // As a matter of fact, we don't intercept all close calls.
95  // See e.g. libc __res_iclose().
96  if (d->sync) {
97    unref(thr, pc, d->sync);
98    d->sync = 0;
99  }
100  if (flags()->io_sync == 0) {
101    unref(thr, pc, s);
102  } else if (flags()->io_sync == 1) {
103    d->sync = s;
104  } else if (flags()->io_sync == 2) {
105    unref(thr, pc, s);
106    d->sync = &fdctx.globsync;
107  }
108  d->creation_tid = thr->tid;
109  d->creation_stack = CurrentStackId(thr, pc);
110  // To catch races between fd usage and open.
111  MemoryRangeImitateWrite(thr, pc, (uptr)d, 8);
112}
113
114void FdInit() {
115  atomic_store(&fdctx.globsync.rc, (u64)-1, memory_order_relaxed);
116  atomic_store(&fdctx.filesync.rc, (u64)-1, memory_order_relaxed);
117  atomic_store(&fdctx.socksync.rc, (u64)-1, memory_order_relaxed);
118}
119
120void FdOnFork(ThreadState *thr, uptr pc) {
121  // On fork() we need to reset all fd's, because the child is going
122  // close all them, and that will cause races between previous read/write
123  // and the close.
124  for (int l1 = 0; l1 < kTableSizeL1; l1++) {
125    FdDesc *tab = (FdDesc*)atomic_load(&fdctx.tab[l1], memory_order_relaxed);
126    if (tab == 0)
127      break;
128    for (int l2 = 0; l2 < kTableSizeL2; l2++) {
129      FdDesc *d = &tab[l2];
130      MemoryResetRange(thr, pc, (uptr)d, 8);
131    }
132  }
133}
134
135bool FdLocation(uptr addr, int *fd, int *tid, u32 *stack) {
136  for (int l1 = 0; l1 < kTableSizeL1; l1++) {
137    FdDesc *tab = (FdDesc*)atomic_load(&fdctx.tab[l1], memory_order_relaxed);
138    if (tab == 0)
139      break;
140    if (addr >= (uptr)tab && addr < (uptr)(tab + kTableSizeL2)) {
141      int l2 = (addr - (uptr)tab) / sizeof(FdDesc);
142      FdDesc *d = &tab[l2];
143      *fd = l1 * kTableSizeL1 + l2;
144      *tid = d->creation_tid;
145      *stack = d->creation_stack;
146      return true;
147    }
148  }
149  return false;
150}
151
152void FdAcquire(ThreadState *thr, uptr pc, int fd) {
153  if (bogusfd(fd))
154    return;
155  FdDesc *d = fddesc(thr, pc, fd);
156  FdSync *s = d->sync;
157  DPrintf("#%d: FdAcquire(%d) -> %p\n", thr->tid, fd, s);
158  MemoryRead(thr, pc, (uptr)d, kSizeLog8);
159  if (s)
160    Acquire(thr, pc, (uptr)s);
161}
162
163void FdRelease(ThreadState *thr, uptr pc, int fd) {
164  if (bogusfd(fd))
165    return;
166  FdDesc *d = fddesc(thr, pc, fd);
167  FdSync *s = d->sync;
168  DPrintf("#%d: FdRelease(%d) -> %p\n", thr->tid, fd, s);
169  MemoryRead(thr, pc, (uptr)d, kSizeLog8);
170  if (s)
171    Release(thr, pc, (uptr)s);
172}
173
174void FdAccess(ThreadState *thr, uptr pc, int fd) {
175  DPrintf("#%d: FdAccess(%d)\n", thr->tid, fd);
176  if (bogusfd(fd))
177    return;
178  FdDesc *d = fddesc(thr, pc, fd);
179  MemoryRead(thr, pc, (uptr)d, kSizeLog8);
180}
181
182void FdClose(ThreadState *thr, uptr pc, int fd) {
183  DPrintf("#%d: FdClose(%d)\n", thr->tid, fd);
184  if (bogusfd(fd))
185    return;
186  FdDesc *d = fddesc(thr, pc, fd);
187  // To catch races between fd usage and close.
188  MemoryWrite(thr, pc, (uptr)d, kSizeLog8);
189  // We need to clear it, because if we do not intercept any call out there
190  // that creates fd, we will hit false postives.
191  MemoryResetRange(thr, pc, (uptr)d, 8);
192  unref(thr, pc, d->sync);
193  d->sync = 0;
194  d->creation_tid = 0;
195  d->creation_stack = 0;
196}
197
198void FdFileCreate(ThreadState *thr, uptr pc, int fd) {
199  DPrintf("#%d: FdFileCreate(%d)\n", thr->tid, fd);
200  if (bogusfd(fd))
201    return;
202  init(thr, pc, fd, &fdctx.filesync);
203}
204
205void FdDup(ThreadState *thr, uptr pc, int oldfd, int newfd) {
206  DPrintf("#%d: FdDup(%d, %d)\n", thr->tid, oldfd, newfd);
207  if (bogusfd(oldfd) || bogusfd(newfd))
208    return;
209  // Ignore the case when user dups not yet connected socket.
210  FdDesc *od = fddesc(thr, pc, oldfd);
211  MemoryRead(thr, pc, (uptr)od, kSizeLog8);
212  FdClose(thr, pc, newfd);
213  init(thr, pc, newfd, ref(od->sync));
214}
215
216void FdPipeCreate(ThreadState *thr, uptr pc, int rfd, int wfd) {
217  DPrintf("#%d: FdCreatePipe(%d, %d)\n", thr->tid, rfd, wfd);
218  FdSync *s = allocsync(thr, pc);
219  init(thr, pc, rfd, ref(s));
220  init(thr, pc, wfd, ref(s));
221  unref(thr, pc, s);
222}
223
224void FdEventCreate(ThreadState *thr, uptr pc, int fd) {
225  DPrintf("#%d: FdEventCreate(%d)\n", thr->tid, fd);
226  if (bogusfd(fd))
227    return;
228  init(thr, pc, fd, allocsync(thr, pc));
229}
230
231void FdSignalCreate(ThreadState *thr, uptr pc, int fd) {
232  DPrintf("#%d: FdSignalCreate(%d)\n", thr->tid, fd);
233  if (bogusfd(fd))
234    return;
235  init(thr, pc, fd, 0);
236}
237
238void FdInotifyCreate(ThreadState *thr, uptr pc, int fd) {
239  DPrintf("#%d: FdInotifyCreate(%d)\n", thr->tid, fd);
240  if (bogusfd(fd))
241    return;
242  init(thr, pc, fd, 0);
243}
244
245void FdPollCreate(ThreadState *thr, uptr pc, int fd) {
246  DPrintf("#%d: FdPollCreate(%d)\n", thr->tid, fd);
247  if (bogusfd(fd))
248    return;
249  init(thr, pc, fd, allocsync(thr, pc));
250}
251
252void FdSocketCreate(ThreadState *thr, uptr pc, int fd) {
253  DPrintf("#%d: FdSocketCreate(%d)\n", thr->tid, fd);
254  if (bogusfd(fd))
255    return;
256  // It can be a UDP socket.
257  init(thr, pc, fd, &fdctx.socksync);
258}
259
260void FdSocketAccept(ThreadState *thr, uptr pc, int fd, int newfd) {
261  DPrintf("#%d: FdSocketAccept(%d, %d)\n", thr->tid, fd, newfd);
262  if (bogusfd(fd))
263    return;
264  // Synchronize connect->accept.
265  Acquire(thr, pc, (uptr)&fdctx.connectsync);
266  init(thr, pc, newfd, &fdctx.socksync);
267}
268
269void FdSocketConnecting(ThreadState *thr, uptr pc, int fd) {
270  DPrintf("#%d: FdSocketConnecting(%d)\n", thr->tid, fd);
271  if (bogusfd(fd))
272    return;
273  // Synchronize connect->accept.
274  Release(thr, pc, (uptr)&fdctx.connectsync);
275}
276
277void FdSocketConnect(ThreadState *thr, uptr pc, int fd) {
278  DPrintf("#%d: FdSocketConnect(%d)\n", thr->tid, fd);
279  if (bogusfd(fd))
280    return;
281  init(thr, pc, fd, &fdctx.socksync);
282}
283
284uptr File2addr(const char *path) {
285  (void)path;
286  static u64 addr;
287  return (uptr)&addr;
288}
289
290uptr Dir2addr(const char *path) {
291  (void)path;
292  static u64 addr;
293  return (uptr)&addr;
294}
295
296}  //  namespace __tsan
297