1//===-- memtag.h ------------------------------------------------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
9#ifndef SCUDO_MEMTAG_H_
10#define SCUDO_MEMTAG_H_
11
12#include "internal_defs.h"
13
14#if SCUDO_LINUX
15#include <sys/auxv.h>
16#include <sys/prctl.h>
17#if defined(ANDROID_EXPERIMENTAL_MTE)
18#include <bionic/mte_kernel.h>
19#endif
20#endif
21
22namespace scudo {
23
24#if defined(__aarch64__) || defined(SCUDO_FUZZ)
25
26inline constexpr bool archSupportsMemoryTagging() { return true; }
27inline constexpr uptr archMemoryTagGranuleSize() { return 16; }
28
29inline uptr untagPointer(uptr Ptr) { return Ptr & ((1ULL << 56) - 1); }
30
31inline uint8_t extractTag(uptr Ptr) {
32  return (Ptr >> 56) & 0xf;
33}
34
35#else
36
37inline constexpr bool archSupportsMemoryTagging() { return false; }
38
39inline uptr archMemoryTagGranuleSize() {
40  UNREACHABLE("memory tagging not supported");
41}
42
43inline uptr untagPointer(uptr Ptr) {
44  (void)Ptr;
45  UNREACHABLE("memory tagging not supported");
46}
47
48inline uint8_t extractTag(uptr Ptr) {
49  (void)Ptr;
50  UNREACHABLE("memory tagging not supported");
51}
52
53#endif
54
55#if defined(__aarch64__)
56
57inline bool systemSupportsMemoryTagging() {
58#if defined(ANDROID_EXPERIMENTAL_MTE)
59  return getauxval(AT_HWCAP2) & HWCAP2_MTE;
60#else
61  return false;
62#endif
63}
64
65inline bool systemDetectsMemoryTagFaultsTestOnly() {
66#if defined(ANDROID_EXPERIMENTAL_MTE)
67  return (prctl(PR_GET_TAGGED_ADDR_CTRL, 0, 0, 0, 0) & PR_MTE_TCF_MASK) !=
68         PR_MTE_TCF_NONE;
69#else
70  return false;
71#endif
72}
73
74inline void disableMemoryTagChecksTestOnly() {
75  __asm__ __volatile__(".arch_extension mte; msr tco, #1");
76}
77
78inline void enableMemoryTagChecksTestOnly() {
79  __asm__ __volatile__(".arch_extension mte; msr tco, #0");
80}
81
82class ScopedDisableMemoryTagChecks {
83  size_t PrevTCO;
84
85 public:
86  ScopedDisableMemoryTagChecks() {
87    __asm__ __volatile__(".arch_extension mte; mrs %0, tco; msr tco, #1"
88                         : "=r"(PrevTCO));
89  }
90
91  ~ScopedDisableMemoryTagChecks() {
92    __asm__ __volatile__(".arch_extension mte; msr tco, %0" : : "r"(PrevTCO));
93  }
94};
95
96inline void setRandomTag(void *Ptr, uptr Size, uptr ExcludeMask,
97                         uptr *TaggedBegin, uptr *TaggedEnd) {
98  void *End;
99  __asm__ __volatile__(
100      R"(
101    .arch_extension mte
102
103    // Set a random tag for Ptr in TaggedPtr. This needs to happen even if
104    // Size = 0 so that TaggedPtr ends up pointing at a valid address.
105    irg %[TaggedPtr], %[Ptr], %[ExcludeMask]
106    mov %[Cur], %[TaggedPtr]
107
108    // Skip the loop if Size = 0. We don't want to do any tagging in this case.
109    cbz %[Size], 2f
110
111    // Set the memory tag of the region
112    // [TaggedPtr, TaggedPtr + roundUpTo(Size, 16))
113    // to the pointer tag stored in TaggedPtr.
114    add %[End], %[TaggedPtr], %[Size]
115
116  1:
117    stzg %[Cur], [%[Cur]], #16
118    cmp %[Cur], %[End]
119    b.lt 1b
120
121  2:
122  )"
123      :
124      [TaggedPtr] "=&r"(*TaggedBegin), [Cur] "=&r"(*TaggedEnd), [End] "=&r"(End)
125      : [Ptr] "r"(Ptr), [Size] "r"(Size), [ExcludeMask] "r"(ExcludeMask)
126      : "memory");
127}
128
129inline void *prepareTaggedChunk(void *Ptr, uptr Size, uptr BlockEnd) {
130  // Prepare the granule before the chunk to store the chunk header by setting
131  // its tag to 0. Normally its tag will already be 0, but in the case where a
132  // chunk holding a low alignment allocation is reused for a higher alignment
133  // allocation, the chunk may already have a non-zero tag from the previous
134  // allocation.
135  __asm__ __volatile__(".arch_extension mte; stg %0, [%0, #-16]"
136                       :
137                       : "r"(Ptr)
138                       : "memory");
139
140  uptr TaggedBegin, TaggedEnd;
141  setRandomTag(Ptr, Size, 0, &TaggedBegin, &TaggedEnd);
142
143  // Finally, set the tag of the granule past the end of the allocation to 0,
144  // to catch linear overflows even if a previous larger allocation used the
145  // same block and tag. Only do this if the granule past the end is in our
146  // block, because this would otherwise lead to a SEGV if the allocation
147  // covers the entire block and our block is at the end of a mapping. The tag
148  // of the next block's header granule will be set to 0, so it will serve the
149  // purpose of catching linear overflows in this case.
150  uptr UntaggedEnd = untagPointer(TaggedEnd);
151  if (UntaggedEnd != BlockEnd)
152    __asm__ __volatile__(".arch_extension mte; stg %0, [%0]"
153                         :
154                         : "r"(UntaggedEnd)
155                         : "memory");
156  return reinterpret_cast<void *>(TaggedBegin);
157}
158
159inline void resizeTaggedChunk(uptr OldPtr, uptr NewPtr, uptr BlockEnd) {
160  uptr RoundOldPtr = roundUpTo(OldPtr, 16);
161  if (RoundOldPtr >= NewPtr) {
162    // If the allocation is shrinking we just need to set the tag past the end
163    // of the allocation to 0. See explanation in prepareTaggedChunk above.
164    uptr RoundNewPtr = untagPointer(roundUpTo(NewPtr, 16));
165    if (RoundNewPtr != BlockEnd)
166      __asm__ __volatile__(".arch_extension mte; stg %0, [%0]"
167                           :
168                           : "r"(RoundNewPtr)
169                           : "memory");
170    return;
171  }
172
173  __asm__ __volatile__(R"(
174    .arch_extension mte
175
176    // Set the memory tag of the region
177    // [roundUpTo(OldPtr, 16), roundUpTo(NewPtr, 16))
178    // to the pointer tag stored in OldPtr.
179  1:
180    stzg %[Cur], [%[Cur]], #16
181    cmp %[Cur], %[End]
182    b.lt 1b
183
184    // Finally, set the tag of the granule past the end of the allocation to 0.
185    and %[Cur], %[Cur], #(1 << 56) - 1
186    cmp %[Cur], %[BlockEnd]
187    b.eq 2f
188    stg %[Cur], [%[Cur]]
189
190  2:
191  )"
192                       : [ Cur ] "+&r"(RoundOldPtr), [ End ] "+&r"(NewPtr)
193                       : [ BlockEnd ] "r"(BlockEnd)
194                       : "memory");
195}
196
197inline uptr loadTag(uptr Ptr) {
198  uptr TaggedPtr = Ptr;
199  __asm__ __volatile__(".arch_extension mte; ldg %0, [%0]"
200                       : "+r"(TaggedPtr)
201                       :
202                       : "memory");
203  return TaggedPtr;
204}
205
206#else
207
208inline bool systemSupportsMemoryTagging() {
209  UNREACHABLE("memory tagging not supported");
210}
211
212inline bool systemDetectsMemoryTagFaultsTestOnly() {
213  UNREACHABLE("memory tagging not supported");
214}
215
216inline void disableMemoryTagChecksTestOnly() {
217  UNREACHABLE("memory tagging not supported");
218}
219
220inline void enableMemoryTagChecksTestOnly() {
221  UNREACHABLE("memory tagging not supported");
222}
223
224struct ScopedDisableMemoryTagChecks {
225  ScopedDisableMemoryTagChecks() {}
226};
227
228inline void setRandomTag(void *Ptr, uptr Size, uptr ExcludeMask,
229                         uptr *TaggedBegin, uptr *TaggedEnd) {
230  (void)Ptr;
231  (void)Size;
232  (void)ExcludeMask;
233  (void)TaggedBegin;
234  (void)TaggedEnd;
235  UNREACHABLE("memory tagging not supported");
236}
237
238inline void *prepareTaggedChunk(void *Ptr, uptr Size, uptr BlockEnd) {
239  (void)Ptr;
240  (void)Size;
241  (void)BlockEnd;
242  UNREACHABLE("memory tagging not supported");
243}
244
245inline void resizeTaggedChunk(uptr OldPtr, uptr NewPtr, uptr BlockEnd) {
246  (void)OldPtr;
247  (void)NewPtr;
248  (void)BlockEnd;
249  UNREACHABLE("memory tagging not supported");
250}
251
252inline uptr loadTag(uptr Ptr) {
253  (void)Ptr;
254  UNREACHABLE("memory tagging not supported");
255}
256
257#endif
258
259} // namespace scudo
260
261#endif
262