Deleted Added
full compact
xray_fdr_log_writer.h (344779) xray_fdr_log_writer.h (353358)
1//===-- xray_fdr_log_writer.h ---------------------------------------------===//
2//
1//===-- xray_fdr_log_writer.h ---------------------------------------------===//
2//
3// The LLVM Compiler Infrastructure
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
4//
6//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file is a part of XRay, a function call tracing system.
11//
12//===----------------------------------------------------------------------===//
13#ifndef COMPILER_RT_LIB_XRAY_XRAY_FDR_LOG_WRITER_H_
14#define COMPILER_RT_LIB_XRAY_XRAY_FDR_LOG_WRITER_H_
15
16#include "xray_buffer_queue.h"
17#include "xray_fdr_log_records.h"
18#include <functional>
19#include <tuple>
20#include <type_traits>
21#include <utility>
22
23namespace __xray {
24
25template <size_t Index> struct SerializerImpl {
26 template <class Tuple,
27 typename std::enable_if<
28 Index<std::tuple_size<
29 typename std::remove_reference<Tuple>::type>::value,
30 int>::type = 0> static void serializeTo(char *Buffer,
31 Tuple &&T) {
32 auto P = reinterpret_cast<const char *>(&std::get<Index>(T));
33 constexpr auto Size = sizeof(std::get<Index>(T));
34 internal_memcpy(Buffer, P, Size);
35 SerializerImpl<Index + 1>::serializeTo(Buffer + Size,
36 std::forward<Tuple>(T));
37 }
38
39 template <class Tuple,
40 typename std::enable_if<
41 Index >= std::tuple_size<typename std::remove_reference<
42 Tuple>::type>::value,
43 int>::type = 0>
44 static void serializeTo(char *, Tuple &&) {}
45};
46
47using Serializer = SerializerImpl<0>;
48
49template <class Tuple, size_t Index> struct AggregateSizesImpl {
50 static constexpr size_t value =
51 sizeof(typename std::tuple_element<Index, Tuple>::type) +
52 AggregateSizesImpl<Tuple, Index - 1>::value;
53};
54
55template <class Tuple> struct AggregateSizesImpl<Tuple, 0> {
56 static constexpr size_t value =
57 sizeof(typename std::tuple_element<0, Tuple>::type);
58};
59
60template <class Tuple> struct AggregateSizes {
61 static constexpr size_t value =
62 AggregateSizesImpl<Tuple, std::tuple_size<Tuple>::value - 1>::value;
63};
64
65template <MetadataRecord::RecordKinds Kind, class... DataTypes>
66MetadataRecord createMetadataRecord(DataTypes &&... Ds) {
67 static_assert(AggregateSizes<std::tuple<DataTypes...>>::value <=
68 sizeof(MetadataRecord) - 1,
69 "Metadata payload longer than metadata buffer!");
70 MetadataRecord R;
71 R.Type = 1;
72 R.RecordKind = static_cast<uint8_t>(Kind);
73 Serializer::serializeTo(R.Data,
74 std::make_tuple(std::forward<DataTypes>(Ds)...));
75 return R;
76}
77
78class FDRLogWriter {
79 BufferQueue::Buffer &Buffer;
80 char *NextRecord = nullptr;
81
82 template <class T> void writeRecord(const T &R) {
83 internal_memcpy(NextRecord, reinterpret_cast<const char *>(&R), sizeof(T));
84 NextRecord += sizeof(T);
85 // We need this atomic fence here to ensure that other threads attempting to
86 // read the bytes in the buffer will see the writes committed before the
87 // extents are updated.
88 atomic_thread_fence(memory_order_release);
89 atomic_fetch_add(Buffer.Extents, sizeof(T), memory_order_acq_rel);
90 }
91
92public:
93 explicit FDRLogWriter(BufferQueue::Buffer &B, char *P)
94 : Buffer(B), NextRecord(P) {
95 DCHECK_NE(Buffer.Data, nullptr);
96 DCHECK_NE(NextRecord, nullptr);
97 }
98
99 explicit FDRLogWriter(BufferQueue::Buffer &B)
100 : FDRLogWriter(B, static_cast<char *>(B.Data)) {}
101
102 template <MetadataRecord::RecordKinds Kind, class... Data>
103 bool writeMetadata(Data &&... Ds) {
104 // TODO: Check boundary conditions:
105 // 1) Buffer is full, and cannot handle one metadata record.
106 // 2) Buffer queue is finalising.
107 writeRecord(createMetadataRecord<Kind>(std::forward<Data>(Ds)...));
108 return true;
109 }
110
111 template <size_t N> size_t writeMetadataRecords(MetadataRecord (&Recs)[N]) {
112 constexpr auto Size = sizeof(MetadataRecord) * N;
113 internal_memcpy(NextRecord, reinterpret_cast<const char *>(Recs), Size);
114 NextRecord += Size;
115 // We need this atomic fence here to ensure that other threads attempting to
116 // read the bytes in the buffer will see the writes committed before the
117 // extents are updated.
118 atomic_thread_fence(memory_order_release);
119 atomic_fetch_add(Buffer.Extents, Size, memory_order_acq_rel);
120 return Size;
121 }
122
123 enum class FunctionRecordKind : uint8_t {
124 Enter = 0x00,
125 Exit = 0x01,
126 TailExit = 0x02,
127 EnterArg = 0x03,
128 };
129
130 bool writeFunction(FunctionRecordKind Kind, int32_t FuncId, int32_t Delta) {
131 FunctionRecord R;
132 R.Type = 0;
133 R.RecordKind = uint8_t(Kind);
134 R.FuncId = FuncId;
135 R.TSCDelta = Delta;
136 writeRecord(R);
137 return true;
138 }
139
140 bool writeFunctionWithArg(FunctionRecordKind Kind, int32_t FuncId,
141 int32_t Delta, uint64_t Arg) {
142 // We need to write the function with arg into the buffer, and then
143 // atomically update the buffer extents. This ensures that any reads
144 // synchronised on the buffer extents record will always see the writes
145 // that happen before the atomic update.
146 FunctionRecord R;
147 R.Type = 0;
148 R.RecordKind = uint8_t(Kind);
149 R.FuncId = FuncId;
150 R.TSCDelta = Delta;
151 MetadataRecord A =
152 createMetadataRecord<MetadataRecord::RecordKinds::CallArgument>(Arg);
153 NextRecord = reinterpret_cast<char *>(internal_memcpy(
154 NextRecord, reinterpret_cast<char *>(&R), sizeof(R))) +
155 sizeof(R);
156 NextRecord = reinterpret_cast<char *>(internal_memcpy(
157 NextRecord, reinterpret_cast<char *>(&A), sizeof(A))) +
158 sizeof(A);
159 // We need this atomic fence here to ensure that other threads attempting to
160 // read the bytes in the buffer will see the writes committed before the
161 // extents are updated.
162 atomic_thread_fence(memory_order_release);
163 atomic_fetch_add(Buffer.Extents, sizeof(R) + sizeof(A),
164 memory_order_acq_rel);
165 return true;
166 }
167
168 bool writeCustomEvent(int32_t Delta, const void *Event, int32_t EventSize) {
169 // We write the metadata record and the custom event data into the buffer
170 // first, before we atomically update the extents for the buffer. This
171 // allows us to ensure that any threads reading the extents of the buffer
172 // will only ever see the full metadata and custom event payload accounted
173 // (no partial writes accounted).
174 MetadataRecord R =
175 createMetadataRecord<MetadataRecord::RecordKinds::CustomEventMarker>(
176 EventSize, Delta);
177 NextRecord = reinterpret_cast<char *>(internal_memcpy(
178 NextRecord, reinterpret_cast<char *>(&R), sizeof(R))) +
179 sizeof(R);
180 NextRecord = reinterpret_cast<char *>(
181 internal_memcpy(NextRecord, Event, EventSize)) +
182 EventSize;
183
184 // We need this atomic fence here to ensure that other threads attempting to
185 // read the bytes in the buffer will see the writes committed before the
186 // extents are updated.
187 atomic_thread_fence(memory_order_release);
188 atomic_fetch_add(Buffer.Extents, sizeof(R) + EventSize,
189 memory_order_acq_rel);
190 return true;
191 }
192
193 bool writeTypedEvent(int32_t Delta, uint16_t EventType, const void *Event,
194 int32_t EventSize) {
195 // We do something similar when writing out typed events, see
196 // writeCustomEvent(...) above for details.
197 MetadataRecord R =
198 createMetadataRecord<MetadataRecord::RecordKinds::TypedEventMarker>(
199 EventSize, Delta, EventType);
200 NextRecord = reinterpret_cast<char *>(internal_memcpy(
201 NextRecord, reinterpret_cast<char *>(&R), sizeof(R))) +
202 sizeof(R);
203 NextRecord = reinterpret_cast<char *>(
204 internal_memcpy(NextRecord, Event, EventSize)) +
205 EventSize;
206
207 // We need this atomic fence here to ensure that other threads attempting to
208 // read the bytes in the buffer will see the writes committed before the
209 // extents are updated.
210 atomic_thread_fence(memory_order_release);
211 atomic_fetch_add(Buffer.Extents, EventSize, memory_order_acq_rel);
212 return true;
213 }
214
215 char *getNextRecord() const { return NextRecord; }
216
217 void resetRecord() {
218 NextRecord = reinterpret_cast<char *>(Buffer.Data);
219 atomic_store(Buffer.Extents, 0, memory_order_release);
220 }
221
222 void undoWrites(size_t B) {
223 DCHECK_GE(NextRecord - B, reinterpret_cast<char *>(Buffer.Data));
224 NextRecord -= B;
225 atomic_fetch_sub(Buffer.Extents, B, memory_order_acq_rel);
226 }
227
228}; // namespace __xray
229
230} // namespace __xray
231
232#endif // COMPILER-RT_LIB_XRAY_XRAY_FDR_LOG_WRITER_H_
7//===----------------------------------------------------------------------===//
8//
9// This file is a part of XRay, a function call tracing system.
10//
11//===----------------------------------------------------------------------===//
12#ifndef COMPILER_RT_LIB_XRAY_XRAY_FDR_LOG_WRITER_H_
13#define COMPILER_RT_LIB_XRAY_XRAY_FDR_LOG_WRITER_H_
14
15#include "xray_buffer_queue.h"
16#include "xray_fdr_log_records.h"
17#include <functional>
18#include <tuple>
19#include <type_traits>
20#include <utility>
21
22namespace __xray {
23
24template <size_t Index> struct SerializerImpl {
25 template <class Tuple,
26 typename std::enable_if<
27 Index<std::tuple_size<
28 typename std::remove_reference<Tuple>::type>::value,
29 int>::type = 0> static void serializeTo(char *Buffer,
30 Tuple &&T) {
31 auto P = reinterpret_cast<const char *>(&std::get<Index>(T));
32 constexpr auto Size = sizeof(std::get<Index>(T));
33 internal_memcpy(Buffer, P, Size);
34 SerializerImpl<Index + 1>::serializeTo(Buffer + Size,
35 std::forward<Tuple>(T));
36 }
37
38 template <class Tuple,
39 typename std::enable_if<
40 Index >= std::tuple_size<typename std::remove_reference<
41 Tuple>::type>::value,
42 int>::type = 0>
43 static void serializeTo(char *, Tuple &&) {}
44};
45
46using Serializer = SerializerImpl<0>;
47
48template <class Tuple, size_t Index> struct AggregateSizesImpl {
49 static constexpr size_t value =
50 sizeof(typename std::tuple_element<Index, Tuple>::type) +
51 AggregateSizesImpl<Tuple, Index - 1>::value;
52};
53
54template <class Tuple> struct AggregateSizesImpl<Tuple, 0> {
55 static constexpr size_t value =
56 sizeof(typename std::tuple_element<0, Tuple>::type);
57};
58
59template <class Tuple> struct AggregateSizes {
60 static constexpr size_t value =
61 AggregateSizesImpl<Tuple, std::tuple_size<Tuple>::value - 1>::value;
62};
63
64template <MetadataRecord::RecordKinds Kind, class... DataTypes>
65MetadataRecord createMetadataRecord(DataTypes &&... Ds) {
66 static_assert(AggregateSizes<std::tuple<DataTypes...>>::value <=
67 sizeof(MetadataRecord) - 1,
68 "Metadata payload longer than metadata buffer!");
69 MetadataRecord R;
70 R.Type = 1;
71 R.RecordKind = static_cast<uint8_t>(Kind);
72 Serializer::serializeTo(R.Data,
73 std::make_tuple(std::forward<DataTypes>(Ds)...));
74 return R;
75}
76
77class FDRLogWriter {
78 BufferQueue::Buffer &Buffer;
79 char *NextRecord = nullptr;
80
81 template <class T> void writeRecord(const T &R) {
82 internal_memcpy(NextRecord, reinterpret_cast<const char *>(&R), sizeof(T));
83 NextRecord += sizeof(T);
84 // We need this atomic fence here to ensure that other threads attempting to
85 // read the bytes in the buffer will see the writes committed before the
86 // extents are updated.
87 atomic_thread_fence(memory_order_release);
88 atomic_fetch_add(Buffer.Extents, sizeof(T), memory_order_acq_rel);
89 }
90
91public:
92 explicit FDRLogWriter(BufferQueue::Buffer &B, char *P)
93 : Buffer(B), NextRecord(P) {
94 DCHECK_NE(Buffer.Data, nullptr);
95 DCHECK_NE(NextRecord, nullptr);
96 }
97
98 explicit FDRLogWriter(BufferQueue::Buffer &B)
99 : FDRLogWriter(B, static_cast<char *>(B.Data)) {}
100
101 template <MetadataRecord::RecordKinds Kind, class... Data>
102 bool writeMetadata(Data &&... Ds) {
103 // TODO: Check boundary conditions:
104 // 1) Buffer is full, and cannot handle one metadata record.
105 // 2) Buffer queue is finalising.
106 writeRecord(createMetadataRecord<Kind>(std::forward<Data>(Ds)...));
107 return true;
108 }
109
110 template <size_t N> size_t writeMetadataRecords(MetadataRecord (&Recs)[N]) {
111 constexpr auto Size = sizeof(MetadataRecord) * N;
112 internal_memcpy(NextRecord, reinterpret_cast<const char *>(Recs), Size);
113 NextRecord += Size;
114 // We need this atomic fence here to ensure that other threads attempting to
115 // read the bytes in the buffer will see the writes committed before the
116 // extents are updated.
117 atomic_thread_fence(memory_order_release);
118 atomic_fetch_add(Buffer.Extents, Size, memory_order_acq_rel);
119 return Size;
120 }
121
122 enum class FunctionRecordKind : uint8_t {
123 Enter = 0x00,
124 Exit = 0x01,
125 TailExit = 0x02,
126 EnterArg = 0x03,
127 };
128
129 bool writeFunction(FunctionRecordKind Kind, int32_t FuncId, int32_t Delta) {
130 FunctionRecord R;
131 R.Type = 0;
132 R.RecordKind = uint8_t(Kind);
133 R.FuncId = FuncId;
134 R.TSCDelta = Delta;
135 writeRecord(R);
136 return true;
137 }
138
139 bool writeFunctionWithArg(FunctionRecordKind Kind, int32_t FuncId,
140 int32_t Delta, uint64_t Arg) {
141 // We need to write the function with arg into the buffer, and then
142 // atomically update the buffer extents. This ensures that any reads
143 // synchronised on the buffer extents record will always see the writes
144 // that happen before the atomic update.
145 FunctionRecord R;
146 R.Type = 0;
147 R.RecordKind = uint8_t(Kind);
148 R.FuncId = FuncId;
149 R.TSCDelta = Delta;
150 MetadataRecord A =
151 createMetadataRecord<MetadataRecord::RecordKinds::CallArgument>(Arg);
152 NextRecord = reinterpret_cast<char *>(internal_memcpy(
153 NextRecord, reinterpret_cast<char *>(&R), sizeof(R))) +
154 sizeof(R);
155 NextRecord = reinterpret_cast<char *>(internal_memcpy(
156 NextRecord, reinterpret_cast<char *>(&A), sizeof(A))) +
157 sizeof(A);
158 // We need this atomic fence here to ensure that other threads attempting to
159 // read the bytes in the buffer will see the writes committed before the
160 // extents are updated.
161 atomic_thread_fence(memory_order_release);
162 atomic_fetch_add(Buffer.Extents, sizeof(R) + sizeof(A),
163 memory_order_acq_rel);
164 return true;
165 }
166
167 bool writeCustomEvent(int32_t Delta, const void *Event, int32_t EventSize) {
168 // We write the metadata record and the custom event data into the buffer
169 // first, before we atomically update the extents for the buffer. This
170 // allows us to ensure that any threads reading the extents of the buffer
171 // will only ever see the full metadata and custom event payload accounted
172 // (no partial writes accounted).
173 MetadataRecord R =
174 createMetadataRecord<MetadataRecord::RecordKinds::CustomEventMarker>(
175 EventSize, Delta);
176 NextRecord = reinterpret_cast<char *>(internal_memcpy(
177 NextRecord, reinterpret_cast<char *>(&R), sizeof(R))) +
178 sizeof(R);
179 NextRecord = reinterpret_cast<char *>(
180 internal_memcpy(NextRecord, Event, EventSize)) +
181 EventSize;
182
183 // We need this atomic fence here to ensure that other threads attempting to
184 // read the bytes in the buffer will see the writes committed before the
185 // extents are updated.
186 atomic_thread_fence(memory_order_release);
187 atomic_fetch_add(Buffer.Extents, sizeof(R) + EventSize,
188 memory_order_acq_rel);
189 return true;
190 }
191
192 bool writeTypedEvent(int32_t Delta, uint16_t EventType, const void *Event,
193 int32_t EventSize) {
194 // We do something similar when writing out typed events, see
195 // writeCustomEvent(...) above for details.
196 MetadataRecord R =
197 createMetadataRecord<MetadataRecord::RecordKinds::TypedEventMarker>(
198 EventSize, Delta, EventType);
199 NextRecord = reinterpret_cast<char *>(internal_memcpy(
200 NextRecord, reinterpret_cast<char *>(&R), sizeof(R))) +
201 sizeof(R);
202 NextRecord = reinterpret_cast<char *>(
203 internal_memcpy(NextRecord, Event, EventSize)) +
204 EventSize;
205
206 // We need this atomic fence here to ensure that other threads attempting to
207 // read the bytes in the buffer will see the writes committed before the
208 // extents are updated.
209 atomic_thread_fence(memory_order_release);
210 atomic_fetch_add(Buffer.Extents, EventSize, memory_order_acq_rel);
211 return true;
212 }
213
214 char *getNextRecord() const { return NextRecord; }
215
216 void resetRecord() {
217 NextRecord = reinterpret_cast<char *>(Buffer.Data);
218 atomic_store(Buffer.Extents, 0, memory_order_release);
219 }
220
221 void undoWrites(size_t B) {
222 DCHECK_GE(NextRecord - B, reinterpret_cast<char *>(Buffer.Data));
223 NextRecord -= B;
224 atomic_fetch_sub(Buffer.Extents, B, memory_order_acq_rel);
225 }
226
227}; // namespace __xray
228
229} // namespace __xray
230
231#endif // COMPILER-RT_LIB_XRAY_XRAY_FDR_LOG_WRITER_H_