1353944Sdim//===-- xray_interface.cpp --------------------------------------*- C++ -*-===//
2353944Sdim//
3353944Sdim// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4353944Sdim// See https://llvm.org/LICENSE.txt for license information.
5353944Sdim// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6353944Sdim//
7353944Sdim//===----------------------------------------------------------------------===//
8353944Sdim//
9353944Sdim// This file is a part of XRay, a dynamic runtime instrumentation system.
10353944Sdim//
11353944Sdim// Implementation of the API functions.
12353944Sdim//
13353944Sdim//===----------------------------------------------------------------------===//
14353944Sdim
15353944Sdim#include "xray_interface_internal.h"
16353944Sdim
17353944Sdim#include <cstdint>
18353944Sdim#include <cstdio>
19353944Sdim#include <errno.h>
20353944Sdim#include <limits>
21353944Sdim#include <string.h>
22353944Sdim#include <sys/mman.h>
23353944Sdim
24353944Sdim#if SANITIZER_FUCHSIA
25353944Sdim#include <zircon/process.h>
26353944Sdim#include <zircon/sanitizer.h>
27353944Sdim#include <zircon/status.h>
28353944Sdim#include <zircon/syscalls.h>
29353944Sdim#endif
30353944Sdim
31353944Sdim#include "sanitizer_common/sanitizer_addrhashmap.h"
32353944Sdim#include "sanitizer_common/sanitizer_common.h"
33353944Sdim
34353944Sdim#include "xray_defs.h"
35353944Sdim#include "xray_flags.h"
36353944Sdim
37353944Sdimextern __sanitizer::SpinMutex XRayInstrMapMutex;
38353944Sdimextern __sanitizer::atomic_uint8_t XRayInitialized;
39353944Sdimextern __xray::XRaySledMap XRayInstrMap;
40353944Sdim
41353944Sdimnamespace __xray {
42353944Sdim
43353944Sdim#if defined(__x86_64__)
44353944Sdimstatic const int16_t cSledLength = 12;
45353944Sdim#elif defined(__aarch64__)
46353944Sdimstatic const int16_t cSledLength = 32;
47353944Sdim#elif defined(__arm__)
48353944Sdimstatic const int16_t cSledLength = 28;
49353944Sdim#elif SANITIZER_MIPS32
50353944Sdimstatic const int16_t cSledLength = 48;
51353944Sdim#elif SANITIZER_MIPS64
52353944Sdimstatic const int16_t cSledLength = 64;
53353944Sdim#elif defined(__powerpc64__)
54353944Sdimstatic const int16_t cSledLength = 8;
55353944Sdim#else
56353944Sdim#error "Unsupported CPU Architecture"
57353944Sdim#endif /* CPU architecture */
58353944Sdim
59353944Sdim// This is the function to call when we encounter the entry or exit sleds.
60353944Sdimatomic_uintptr_t XRayPatchedFunction{0};
61353944Sdim
62353944Sdim// This is the function to call from the arg1-enabled sleds/trampolines.
63353944Sdimatomic_uintptr_t XRayArgLogger{0};
64353944Sdim
65353944Sdim// This is the function to call when we encounter a custom event log call.
66353944Sdimatomic_uintptr_t XRayPatchedCustomEvent{0};
67353944Sdim
68353944Sdim// This is the function to call when we encounter a typed event log call.
69353944Sdimatomic_uintptr_t XRayPatchedTypedEvent{0};
70353944Sdim
71353944Sdim// This is the global status to determine whether we are currently
72353944Sdim// patching/unpatching.
73353944Sdimatomic_uint8_t XRayPatching{0};
74353944Sdim
75353944Sdimstruct TypeDescription {
76353944Sdim  uint32_t type_id;
77353944Sdim  std::size_t description_string_length;
78353944Sdim};
79353944Sdim
80353944Sdimusing TypeDescriptorMapType = AddrHashMap<TypeDescription, 11>;
81353944Sdim// An address map from immutable descriptors to type ids.
82353944SdimTypeDescriptorMapType TypeDescriptorAddressMap{};
83353944Sdim
84353944Sdimatomic_uint32_t TypeEventDescriptorCounter{0};
85353944Sdim
86353944Sdim// MProtectHelper is an RAII wrapper for calls to mprotect(...) that will
87353944Sdim// undo any successful mprotect(...) changes. This is used to make a page
88353944Sdim// writeable and executable, and upon destruction if it was successful in
89353944Sdim// doing so returns the page into a read-only and executable page.
90353944Sdim//
91353944Sdim// This is only used specifically for runtime-patching of the XRay
92353944Sdim// instrumentation points. This assumes that the executable pages are
93353944Sdim// originally read-and-execute only.
94353944Sdimclass MProtectHelper {
95353944Sdim  void *PageAlignedAddr;
96353944Sdim  std::size_t MProtectLen;
97353944Sdim  bool MustCleanup;
98353944Sdim
99353944Sdimpublic:
100353944Sdim  explicit MProtectHelper(void *PageAlignedAddr,
101353944Sdim                          std::size_t MProtectLen,
102353944Sdim                          std::size_t PageSize) XRAY_NEVER_INSTRUMENT
103353944Sdim      : PageAlignedAddr(PageAlignedAddr),
104353944Sdim        MProtectLen(MProtectLen),
105353944Sdim        MustCleanup(false) {
106353944Sdim#if SANITIZER_FUCHSIA
107353944Sdim    MProtectLen = RoundUpTo(MProtectLen, PageSize);
108353944Sdim#endif
109353944Sdim  }
110353944Sdim
111353944Sdim  int MakeWriteable() XRAY_NEVER_INSTRUMENT {
112353944Sdim#if SANITIZER_FUCHSIA
113353944Sdim    auto R = __sanitizer_change_code_protection(
114353944Sdim        reinterpret_cast<uintptr_t>(PageAlignedAddr), MProtectLen, true);
115353944Sdim    if (R != ZX_OK) {
116353944Sdim      Report("XRay: cannot change code protection: %s\n",
117353944Sdim             _zx_status_get_string(R));
118353944Sdim      return -1;
119353944Sdim    }
120353944Sdim    MustCleanup = true;
121353944Sdim    return 0;
122353944Sdim#else
123353944Sdim    auto R = mprotect(PageAlignedAddr, MProtectLen,
124353944Sdim                      PROT_READ | PROT_WRITE | PROT_EXEC);
125353944Sdim    if (R != -1)
126353944Sdim      MustCleanup = true;
127353944Sdim    return R;
128353944Sdim#endif
129353944Sdim  }
130353944Sdim
131353944Sdim  ~MProtectHelper() XRAY_NEVER_INSTRUMENT {
132353944Sdim    if (MustCleanup) {
133353944Sdim#if SANITIZER_FUCHSIA
134353944Sdim      auto R = __sanitizer_change_code_protection(
135353944Sdim          reinterpret_cast<uintptr_t>(PageAlignedAddr), MProtectLen, false);
136353944Sdim      if (R != ZX_OK) {
137353944Sdim        Report("XRay: cannot change code protection: %s\n",
138353944Sdim               _zx_status_get_string(R));
139353944Sdim      }
140353944Sdim#else
141353944Sdim      mprotect(PageAlignedAddr, MProtectLen, PROT_READ | PROT_EXEC);
142353944Sdim#endif
143353944Sdim    }
144353944Sdim  }
145353944Sdim};
146353944Sdim
147353944Sdimnamespace {
148353944Sdim
149353944Sdimbool patchSled(const XRaySledEntry &Sled, bool Enable,
150353944Sdim               int32_t FuncId) XRAY_NEVER_INSTRUMENT {
151353944Sdim  bool Success = false;
152353944Sdim  switch (Sled.Kind) {
153353944Sdim  case XRayEntryType::ENTRY:
154353944Sdim    Success = patchFunctionEntry(Enable, FuncId, Sled, __xray_FunctionEntry);
155353944Sdim    break;
156353944Sdim  case XRayEntryType::EXIT:
157353944Sdim    Success = patchFunctionExit(Enable, FuncId, Sled);
158353944Sdim    break;
159353944Sdim  case XRayEntryType::TAIL:
160353944Sdim    Success = patchFunctionTailExit(Enable, FuncId, Sled);
161353944Sdim    break;
162353944Sdim  case XRayEntryType::LOG_ARGS_ENTRY:
163353944Sdim    Success = patchFunctionEntry(Enable, FuncId, Sled, __xray_ArgLoggerEntry);
164353944Sdim    break;
165353944Sdim  case XRayEntryType::CUSTOM_EVENT:
166353944Sdim    Success = patchCustomEvent(Enable, FuncId, Sled);
167353944Sdim    break;
168353944Sdim  case XRayEntryType::TYPED_EVENT:
169353944Sdim    Success = patchTypedEvent(Enable, FuncId, Sled);
170353944Sdim    break;
171353944Sdim  default:
172353944Sdim    Report("Unsupported sled kind '%d' @%04x\n", Sled.Address, int(Sled.Kind));
173353944Sdim    return false;
174353944Sdim  }
175353944Sdim  return Success;
176353944Sdim}
177353944Sdim
178353944SdimXRayPatchingStatus patchFunction(int32_t FuncId,
179353944Sdim                                 bool Enable) XRAY_NEVER_INSTRUMENT {
180353944Sdim  if (!atomic_load(&XRayInitialized,
181353944Sdim                                memory_order_acquire))
182353944Sdim    return XRayPatchingStatus::NOT_INITIALIZED; // Not initialized.
183353944Sdim
184353944Sdim  uint8_t NotPatching = false;
185353944Sdim  if (!atomic_compare_exchange_strong(
186353944Sdim          &XRayPatching, &NotPatching, true, memory_order_acq_rel))
187353944Sdim    return XRayPatchingStatus::ONGOING; // Already patching.
188353944Sdim
189353944Sdim  // Next, we look for the function index.
190353944Sdim  XRaySledMap InstrMap;
191353944Sdim  {
192353944Sdim    SpinMutexLock Guard(&XRayInstrMapMutex);
193353944Sdim    InstrMap = XRayInstrMap;
194353944Sdim  }
195353944Sdim
196353944Sdim  // If we don't have an index, we can't patch individual functions.
197353944Sdim  if (InstrMap.Functions == 0)
198353944Sdim    return XRayPatchingStatus::NOT_INITIALIZED;
199353944Sdim
200353944Sdim  // FuncId must be a positive number, less than the number of functions
201353944Sdim  // instrumented.
202353944Sdim  if (FuncId <= 0 || static_cast<size_t>(FuncId) > InstrMap.Functions) {
203353944Sdim    Report("Invalid function id provided: %d\n", FuncId);
204353944Sdim    return XRayPatchingStatus::FAILED;
205353944Sdim  }
206353944Sdim
207353944Sdim  // Now we patch ths sleds for this specific function.
208353944Sdim  auto SledRange = InstrMap.SledsIndex[FuncId - 1];
209353944Sdim  auto *f = SledRange.Begin;
210353944Sdim  auto *e = SledRange.End;
211353944Sdim
212353944Sdim  bool SucceedOnce = false;
213353944Sdim  while (f != e)
214353944Sdim    SucceedOnce |= patchSled(*f++, Enable, FuncId);
215353944Sdim
216353944Sdim  atomic_store(&XRayPatching, false,
217353944Sdim                            memory_order_release);
218353944Sdim
219353944Sdim  if (!SucceedOnce) {
220353944Sdim    Report("Failed patching any sled for function '%d'.", FuncId);
221353944Sdim    return XRayPatchingStatus::FAILED;
222353944Sdim  }
223353944Sdim
224353944Sdim  return XRayPatchingStatus::SUCCESS;
225353944Sdim}
226353944Sdim
227353944Sdim// controlPatching implements the common internals of the patching/unpatching
228353944Sdim// implementation. |Enable| defines whether we're enabling or disabling the
229353944Sdim// runtime XRay instrumentation.
230353944SdimXRayPatchingStatus controlPatching(bool Enable) XRAY_NEVER_INSTRUMENT {
231353944Sdim  if (!atomic_load(&XRayInitialized,
232353944Sdim                                memory_order_acquire))
233353944Sdim    return XRayPatchingStatus::NOT_INITIALIZED; // Not initialized.
234353944Sdim
235353944Sdim  uint8_t NotPatching = false;
236353944Sdim  if (!atomic_compare_exchange_strong(
237353944Sdim          &XRayPatching, &NotPatching, true, memory_order_acq_rel))
238353944Sdim    return XRayPatchingStatus::ONGOING; // Already patching.
239353944Sdim
240353944Sdim  uint8_t PatchingSuccess = false;
241353944Sdim  auto XRayPatchingStatusResetter =
242353944Sdim      at_scope_exit([&PatchingSuccess] {
243353944Sdim        if (!PatchingSuccess)
244353944Sdim          atomic_store(&XRayPatching, false,
245353944Sdim                                    memory_order_release);
246353944Sdim      });
247353944Sdim
248353944Sdim  XRaySledMap InstrMap;
249353944Sdim  {
250353944Sdim    SpinMutexLock Guard(&XRayInstrMapMutex);
251353944Sdim    InstrMap = XRayInstrMap;
252353944Sdim  }
253353944Sdim  if (InstrMap.Entries == 0)
254353944Sdim    return XRayPatchingStatus::NOT_INITIALIZED;
255353944Sdim
256353944Sdim  uint32_t FuncId = 1;
257353944Sdim  uint64_t CurFun = 0;
258353944Sdim
259353944Sdim  // First we want to find the bounds for which we have instrumentation points,
260353944Sdim  // and try to get as few calls to mprotect(...) as possible. We're assuming
261353944Sdim  // that all the sleds for the instrumentation map are contiguous as a single
262353944Sdim  // set of pages. When we do support dynamic shared object instrumentation,
263353944Sdim  // we'll need to do this for each set of page load offsets per DSO loaded. For
264353944Sdim  // now we're assuming we can mprotect the whole section of text between the
265353944Sdim  // minimum sled address and the maximum sled address (+ the largest sled
266353944Sdim  // size).
267353944Sdim  auto MinSled = InstrMap.Sleds[0];
268353944Sdim  auto MaxSled = InstrMap.Sleds[InstrMap.Entries - 1];
269353944Sdim  for (std::size_t I = 0; I < InstrMap.Entries; I++) {
270353944Sdim    const auto &Sled = InstrMap.Sleds[I];
271353944Sdim    if (Sled.Address < MinSled.Address)
272353944Sdim      MinSled = Sled;
273353944Sdim    if (Sled.Address > MaxSled.Address)
274353944Sdim      MaxSled = Sled;
275353944Sdim  }
276353944Sdim
277353944Sdim  const size_t PageSize = flags()->xray_page_size_override > 0
278353944Sdim                              ? flags()->xray_page_size_override
279353944Sdim                              : GetPageSizeCached();
280353944Sdim  if ((PageSize == 0) || ((PageSize & (PageSize - 1)) != 0)) {
281353944Sdim    Report("System page size is not a power of two: %lld\n", PageSize);
282353944Sdim    return XRayPatchingStatus::FAILED;
283353944Sdim  }
284353944Sdim
285353944Sdim  void *PageAlignedAddr =
286353944Sdim      reinterpret_cast<void *>(MinSled.Address & ~(PageSize - 1));
287353944Sdim  size_t MProtectLen =
288353944Sdim      (MaxSled.Address - reinterpret_cast<uptr>(PageAlignedAddr)) + cSledLength;
289353944Sdim  MProtectHelper Protector(PageAlignedAddr, MProtectLen, PageSize);
290353944Sdim  if (Protector.MakeWriteable() == -1) {
291353944Sdim    Report("Failed mprotect: %d\n", errno);
292353944Sdim    return XRayPatchingStatus::FAILED;
293353944Sdim  }
294353944Sdim
295353944Sdim  for (std::size_t I = 0; I < InstrMap.Entries; ++I) {
296353944Sdim    auto &Sled = InstrMap.Sleds[I];
297353944Sdim    auto F = Sled.Function;
298353944Sdim    if (CurFun == 0)
299353944Sdim      CurFun = F;
300353944Sdim    if (F != CurFun) {
301353944Sdim      ++FuncId;
302353944Sdim      CurFun = F;
303353944Sdim    }
304353944Sdim    patchSled(Sled, Enable, FuncId);
305353944Sdim  }
306353944Sdim  atomic_store(&XRayPatching, false,
307353944Sdim                            memory_order_release);
308353944Sdim  PatchingSuccess = true;
309353944Sdim  return XRayPatchingStatus::SUCCESS;
310353944Sdim}
311353944Sdim
312353944SdimXRayPatchingStatus mprotectAndPatchFunction(int32_t FuncId,
313353944Sdim                                            bool Enable) XRAY_NEVER_INSTRUMENT {
314353944Sdim  XRaySledMap InstrMap;
315353944Sdim  {
316353944Sdim    SpinMutexLock Guard(&XRayInstrMapMutex);
317353944Sdim    InstrMap = XRayInstrMap;
318353944Sdim  }
319353944Sdim
320353944Sdim  // FuncId must be a positive number, less than the number of functions
321353944Sdim  // instrumented.
322353944Sdim  if (FuncId <= 0 || static_cast<size_t>(FuncId) > InstrMap.Functions) {
323353944Sdim    Report("Invalid function id provided: %d\n", FuncId);
324353944Sdim    return XRayPatchingStatus::FAILED;
325353944Sdim  }
326353944Sdim
327353944Sdim  const size_t PageSize = flags()->xray_page_size_override > 0
328353944Sdim                              ? flags()->xray_page_size_override
329353944Sdim                              : GetPageSizeCached();
330353944Sdim  if ((PageSize == 0) || ((PageSize & (PageSize - 1)) != 0)) {
331353944Sdim    Report("Provided page size is not a power of two: %lld\n", PageSize);
332353944Sdim    return XRayPatchingStatus::FAILED;
333353944Sdim  }
334353944Sdim
335353944Sdim  // Here we compute the minumum sled and maximum sled associated with a
336353944Sdim  // particular function ID.
337353944Sdim  auto SledRange = InstrMap.SledsIndex[FuncId - 1];
338353944Sdim  auto *f = SledRange.Begin;
339353944Sdim  auto *e = SledRange.End;
340353944Sdim  auto MinSled = *f;
341353944Sdim  auto MaxSled = *(SledRange.End - 1);
342353944Sdim  while (f != e) {
343353944Sdim    if (f->Address < MinSled.Address)
344353944Sdim      MinSled = *f;
345353944Sdim    if (f->Address > MaxSled.Address)
346353944Sdim      MaxSled = *f;
347353944Sdim    ++f;
348353944Sdim  }
349353944Sdim
350353944Sdim  void *PageAlignedAddr =
351353944Sdim      reinterpret_cast<void *>(MinSled.Address & ~(PageSize - 1));
352353944Sdim  size_t MProtectLen =
353353944Sdim      (MaxSled.Address - reinterpret_cast<uptr>(PageAlignedAddr)) + cSledLength;
354353944Sdim  MProtectHelper Protector(PageAlignedAddr, MProtectLen, PageSize);
355353944Sdim  if (Protector.MakeWriteable() == -1) {
356353944Sdim    Report("Failed mprotect: %d\n", errno);
357353944Sdim    return XRayPatchingStatus::FAILED;
358353944Sdim  }
359353944Sdim  return patchFunction(FuncId, Enable);
360353944Sdim}
361353944Sdim
362353944Sdim} // namespace
363353944Sdim
364353944Sdim} // namespace __xray
365353944Sdim
366353944Sdimusing namespace __xray;
367353944Sdim
368353944Sdim// The following functions are declared `extern "C" {...}` in the header, hence
369353944Sdim// they're defined in the global namespace.
370353944Sdim
371353944Sdimint __xray_set_handler(void (*entry)(int32_t,
372353944Sdim                                     XRayEntryType)) XRAY_NEVER_INSTRUMENT {
373353944Sdim  if (atomic_load(&XRayInitialized,
374353944Sdim                               memory_order_acquire)) {
375353944Sdim
376353944Sdim    atomic_store(&__xray::XRayPatchedFunction,
377353944Sdim                              reinterpret_cast<uintptr_t>(entry),
378353944Sdim                              memory_order_release);
379353944Sdim    return 1;
380353944Sdim  }
381353944Sdim  return 0;
382353944Sdim}
383353944Sdim
384353944Sdimint __xray_set_customevent_handler(void (*entry)(void *, size_t))
385353944Sdim    XRAY_NEVER_INSTRUMENT {
386353944Sdim  if (atomic_load(&XRayInitialized,
387353944Sdim                               memory_order_acquire)) {
388353944Sdim    atomic_store(&__xray::XRayPatchedCustomEvent,
389353944Sdim                              reinterpret_cast<uintptr_t>(entry),
390353944Sdim                              memory_order_release);
391353944Sdim    return 1;
392353944Sdim  }
393353944Sdim  return 0;
394353944Sdim}
395353944Sdim
396353944Sdimint __xray_set_typedevent_handler(void (*entry)(
397353944Sdim    uint16_t, const void *, size_t)) XRAY_NEVER_INSTRUMENT {
398353944Sdim  if (atomic_load(&XRayInitialized,
399353944Sdim                               memory_order_acquire)) {
400353944Sdim    atomic_store(&__xray::XRayPatchedTypedEvent,
401353944Sdim                              reinterpret_cast<uintptr_t>(entry),
402353944Sdim                              memory_order_release);
403353944Sdim    return 1;
404353944Sdim  }
405353944Sdim  return 0;
406353944Sdim}
407353944Sdim
408353944Sdimint __xray_remove_handler() XRAY_NEVER_INSTRUMENT {
409353944Sdim  return __xray_set_handler(nullptr);
410353944Sdim}
411353944Sdim
412353944Sdimint __xray_remove_customevent_handler() XRAY_NEVER_INSTRUMENT {
413353944Sdim  return __xray_set_customevent_handler(nullptr);
414353944Sdim}
415353944Sdim
416353944Sdimint __xray_remove_typedevent_handler() XRAY_NEVER_INSTRUMENT {
417353944Sdim  return __xray_set_typedevent_handler(nullptr);
418353944Sdim}
419353944Sdim
420353944Sdimuint16_t __xray_register_event_type(
421353944Sdim    const char *const event_type) XRAY_NEVER_INSTRUMENT {
422353944Sdim  TypeDescriptorMapType::Handle h(&TypeDescriptorAddressMap, (uptr)event_type);
423353944Sdim  if (h.created()) {
424353944Sdim    h->type_id = atomic_fetch_add(
425353944Sdim        &TypeEventDescriptorCounter, 1, memory_order_acq_rel);
426353944Sdim    h->description_string_length = strnlen(event_type, 1024);
427353944Sdim  }
428353944Sdim  return h->type_id;
429353944Sdim}
430353944Sdim
431353944SdimXRayPatchingStatus __xray_patch() XRAY_NEVER_INSTRUMENT {
432353944Sdim  return controlPatching(true);
433353944Sdim}
434353944Sdim
435353944SdimXRayPatchingStatus __xray_unpatch() XRAY_NEVER_INSTRUMENT {
436353944Sdim  return controlPatching(false);
437353944Sdim}
438353944Sdim
439353944SdimXRayPatchingStatus __xray_patch_function(int32_t FuncId) XRAY_NEVER_INSTRUMENT {
440353944Sdim  return mprotectAndPatchFunction(FuncId, true);
441353944Sdim}
442353944Sdim
443353944SdimXRayPatchingStatus
444353944Sdim__xray_unpatch_function(int32_t FuncId) XRAY_NEVER_INSTRUMENT {
445353944Sdim  return mprotectAndPatchFunction(FuncId, false);
446353944Sdim}
447353944Sdim
448353944Sdimint __xray_set_handler_arg1(void (*entry)(int32_t, XRayEntryType, uint64_t)) {
449353944Sdim  if (!atomic_load(&XRayInitialized,
450353944Sdim                                memory_order_acquire))
451353944Sdim    return 0;
452353944Sdim
453353944Sdim  // A relaxed write might not be visible even if the current thread gets
454353944Sdim  // scheduled on a different CPU/NUMA node.  We need to wait for everyone to
455353944Sdim  // have this handler installed for consistency of collected data across CPUs.
456353944Sdim  atomic_store(&XRayArgLogger, reinterpret_cast<uint64_t>(entry),
457353944Sdim                            memory_order_release);
458353944Sdim  return 1;
459353944Sdim}
460353944Sdim
461353944Sdimint __xray_remove_handler_arg1() { return __xray_set_handler_arg1(nullptr); }
462353944Sdim
463353944Sdimuintptr_t __xray_function_address(int32_t FuncId) XRAY_NEVER_INSTRUMENT {
464353944Sdim  SpinMutexLock Guard(&XRayInstrMapMutex);
465353944Sdim  if (FuncId <= 0 || static_cast<size_t>(FuncId) > XRayInstrMap.Functions)
466353944Sdim    return 0;
467353944Sdim  return XRayInstrMap.SledsIndex[FuncId - 1].Begin->Function
468353944Sdim// On PPC, function entries are always aligned to 16 bytes. The beginning of a
469353944Sdim// sled might be a local entry, which is always +8 based on the global entry.
470353944Sdim// Always return the global entry.
471353944Sdim#ifdef __PPC__
472353944Sdim         & ~0xf
473353944Sdim#endif
474353944Sdim      ;
475353944Sdim}
476353944Sdim
477353944Sdimsize_t __xray_max_function_id() XRAY_NEVER_INSTRUMENT {
478353944Sdim  SpinMutexLock Guard(&XRayInstrMapMutex);
479353944Sdim  return XRayInstrMap.Functions;
480353944Sdim}
481