1// Copyright 2017 The Fuchsia Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#include "fixture.h"
6
7#include <regex.h>
8#include <stdint.h>
9#include <string.h>
10#include <sys/types.h>
11
12#include <zircon/assert.h>
13
14#include <fbl/algorithm.h>
15#include <fbl/array.h>
16#include <fbl/string.h>
17#include <fbl/string_buffer.h>
18#include <fbl/vector.h>
19#include <lib/async-loop/cpp/loop.h>
20#include <lib/zx/event.h>
21#include <trace-reader/reader.h>
22#include <trace-reader/reader_internal.h>
23#include <trace/handler.h>
24#include <unittest/unittest.h>
25
26namespace {
27
28class Fixture : private trace::TraceHandler {
29public:
30    Fixture(attach_to_thread_t attach_to_thread,
31            trace_buffering_mode_t mode, size_t buffer_size)
32        : attach_to_thread_(attach_to_thread),
33          loop_(attach_to_thread == kAttachToThread
34                ? &kAsyncLoopConfigAttachToThread
35                : &kAsyncLoopConfigNoAttachToThread),
36          buffering_mode_(mode),
37          buffer_(new uint8_t[buffer_size], buffer_size) {
38        zx_status_t status = zx::event::create(0u, &trace_stopped_);
39        ZX_DEBUG_ASSERT(status == ZX_OK);
40        status = zx::event::create(0u, &buffer_full_);
41        ZX_DEBUG_ASSERT(status == ZX_OK);
42    }
43
44    ~Fixture() {
45        StopTracing(false);
46    }
47
48    void StartTracing() {
49        if (trace_running_)
50            return;
51
52        trace_running_ = true;
53
54        if (attach_to_thread_ == kNoAttachToThread) {
55            loop_.StartThread("trace test");
56        }
57
58        // Asynchronously start the engine.
59        zx_status_t status = trace_start_engine(loop_.dispatcher(), this,
60                                                buffering_mode_,
61                                                buffer_.get(), buffer_.size());
62        ZX_DEBUG_ASSERT_MSG(status == ZX_OK, "status=%d", status);
63    }
64
65    void StopEngine() {
66        ZX_DEBUG_ASSERT(trace_running_);
67        zx_status_t status = trace_stop_engine(ZX_OK);
68        ZX_DEBUG_ASSERT_MSG(status == ZX_OK, "status=%d", status);
69    }
70
71    void Shutdown() {
72        // Shut down the loop (implicitly joins the thread we started
73        // earlier). When this completes we know the trace engine is
74        // really stopped.
75        loop_.Shutdown();
76
77        ZX_DEBUG_ASSERT(observed_stopped_callback_);
78
79        trace_running_ = false;
80    }
81
82    void StopTracing(bool hard_shutdown) {
83        if (!trace_running_)
84            return;
85
86        // Asynchronously stop the engine.
87        // If we're performing a hard shutdown, skip this step and begin immediately
88        // tearing down the loop.  The trace engine should stop itself.
89        if (!hard_shutdown) {
90            StopEngine();
91
92            zx_status_t status;
93            while (trace_state() != TRACE_STOPPED) {
94                if (attach_to_thread_ == kNoAttachToThread) {
95                    status = trace_stopped_.wait_one(
96                        ZX_EVENT_SIGNALED, zx::deadline_after(zx::msec(100)),
97                        nullptr);
98                    ZX_DEBUG_ASSERT_MSG(status == ZX_OK || status == ZX_ERR_TIMED_OUT,
99                                        "status=%d", status);
100                } else {
101                    // Finish up any remaining tasks. The engine may have queued some.
102                    status = loop_.RunUntilIdle();
103                    ZX_DEBUG_ASSERT_MSG(status == ZX_OK, "status=%d", status);
104                }
105            }
106        }
107
108        Shutdown();
109    }
110
111    bool WaitBufferFullNotification() {
112        auto status = buffer_full_.wait_one(ZX_EVENT_SIGNALED,
113                                            zx::deadline_after(zx::msec(1000)), nullptr);
114        buffer_full_.signal(ZX_EVENT_SIGNALED, 0u);
115        return status == ZX_OK;
116    }
117
118    async::Loop& loop() {
119        return loop_;
120    }
121
122    zx_status_t disposition() const {
123        return disposition_;
124    }
125
126    bool observed_notify_buffer_full_callback() const {
127        return observed_notify_buffer_full_callback_;
128    }
129
130    bool observed_buffer_full_wrapped_count() const {
131        return observed_buffer_full_wrapped_count_;
132    }
133
134    bool observed_buffer_full_durable_data_end() const {
135        return observed_buffer_full_durable_data_end_;
136    }
137
138    void ResetBufferFullNotification() {
139        observed_notify_buffer_full_callback_ = false;
140        observed_buffer_full_wrapped_count_ = 0;
141        observed_buffer_full_durable_data_end_ = 0;
142    }
143
144    bool ReadRecords(fbl::Vector<trace::Record>* out_records,
145                     fbl::Vector<fbl::String>* out_errors) {
146        trace::TraceReader reader(
147            [out_records](trace::Record record) {
148                out_records->push_back(fbl::move(record));
149            },
150            [out_errors](fbl::String error) {
151                out_errors->push_back(fbl::move(error));
152            });
153        trace::internal::TraceBufferReader buffer_reader(
154            [&reader](trace::Chunk chunk) {
155                if (!reader.ReadRecords(chunk)) {
156                    // Nothing to do, error already recorded.
157                }
158            },
159            [out_errors](fbl::String error) {
160                out_errors->push_back(fbl::move(error));
161            });
162        return buffer_reader.ReadChunks(buffer_.get(), buffer_.size());
163    }
164
165private:
166    bool IsCategoryEnabled(const char* category) override {
167        // All categories which begin with + are enabled.
168        return category[0] == '+';
169    }
170
171    void TraceStopped(async_dispatcher_t* dispatcher,
172                      zx_status_t disposition,
173                      size_t buffer_bytes_written) override {
174        ZX_DEBUG_ASSERT(!observed_stopped_callback_);
175        observed_stopped_callback_ = true;
176        ZX_DEBUG_ASSERT(dispatcher = loop_.dispatcher());
177        disposition_ = disposition;
178        buffer_bytes_written_ = buffer_bytes_written;
179
180        trace_stopped_.signal(0u, ZX_EVENT_SIGNALED);
181
182        // The normal provider support does "delete this" here.
183        // We don't need nor want it as we still have to verify the results.
184    }
185
186    void NotifyBufferFull(uint32_t wrapped_count, uint64_t durable_data_end)
187            override {
188        observed_notify_buffer_full_callback_ = true;
189        observed_buffer_full_wrapped_count_ = wrapped_count;
190        observed_buffer_full_durable_data_end_ = durable_data_end;
191        buffer_full_.signal(0u, ZX_EVENT_SIGNALED);
192    }
193
194    attach_to_thread_t attach_to_thread_;
195    async::Loop loop_;
196    trace_buffering_mode_t buffering_mode_;
197    fbl::Array<uint8_t> buffer_;
198    bool trace_running_ = false;
199    zx_status_t disposition_ = ZX_ERR_INTERNAL;
200    size_t buffer_bytes_written_ = 0u;
201    zx::event trace_stopped_;
202    zx::event buffer_full_;
203    bool observed_stopped_callback_ = false;
204    bool observed_notify_buffer_full_callback_ = false;
205    uint32_t observed_buffer_full_wrapped_count_ = 0;
206    uint64_t observed_buffer_full_durable_data_end_ = 0;
207};
208
209Fixture* g_fixture{nullptr};
210
211} // namespace
212
213struct FixtureSquelch {
214    // Records the compiled regex.
215    regex_t regex;
216};
217
218void fixture_set_up(attach_to_thread_t attach_to_thread,
219                    trace_buffering_mode_t mode, size_t buffer_size) {
220    ZX_DEBUG_ASSERT(!g_fixture);
221    g_fixture = new Fixture(attach_to_thread, mode, buffer_size);
222}
223
224void fixture_tear_down(void) {
225    ZX_DEBUG_ASSERT(g_fixture);
226    delete g_fixture;
227    g_fixture = nullptr;
228}
229
230void fixture_start_tracing() {
231    ZX_DEBUG_ASSERT(g_fixture);
232    g_fixture->StartTracing();
233}
234
235void fixture_stop_tracing() {
236    ZX_DEBUG_ASSERT(g_fixture);
237    g_fixture->StopTracing(false);
238}
239
240void fixture_stop_tracing_hard() {
241    ZX_DEBUG_ASSERT(g_fixture);
242    g_fixture->StopTracing(true);
243}
244
245void fixture_stop_engine() {
246    ZX_DEBUG_ASSERT(g_fixture);
247    g_fixture->StopEngine();
248}
249
250void fixture_shutdown() {
251    ZX_DEBUG_ASSERT(g_fixture);
252    g_fixture->Shutdown();
253}
254
255async_loop_t* fixture_async_loop(void) {
256    ZX_DEBUG_ASSERT(g_fixture);
257    return g_fixture->loop().loop();
258}
259
260zx_status_t fixture_get_disposition(void) {
261    ZX_DEBUG_ASSERT(g_fixture);
262    return g_fixture->disposition();
263}
264
265bool fixture_wait_buffer_full_notification() {
266    ZX_DEBUG_ASSERT(g_fixture);
267    return g_fixture->WaitBufferFullNotification();
268}
269
270uint32_t fixture_get_buffer_full_wrapped_count() {
271    ZX_DEBUG_ASSERT(g_fixture);
272    return g_fixture->observed_buffer_full_wrapped_count();
273}
274
275void fixture_reset_buffer_full_notification() {
276    ZX_DEBUG_ASSERT(g_fixture);
277    g_fixture->ResetBufferFullNotification();
278}
279
280bool fixture_create_squelch(const char* regex_str, FixtureSquelch** out_squelch) {
281    // We don't make any assumptions about the copyability of |regex_t|.
282    // Therefore we construct it in place.
283    auto squelch = new FixtureSquelch;
284    if (regcomp(&squelch->regex, regex_str, REG_EXTENDED | REG_NEWLINE) != 0) {
285        return false;
286    }
287    *out_squelch = squelch;
288    return true;
289}
290
291void fixture_destroy_squelch(FixtureSquelch* squelch) {
292    regfree(&squelch->regex);
293    delete squelch;
294}
295
296fbl::String fixture_squelch(FixtureSquelch* squelch, const char* str) {
297    fbl::StringBuffer<1024u> buf;
298    const char* cur = str;
299    const char* end = str + strlen(str);
300    while (*cur) {
301        // size must be 1 + number of parenthesized subexpressions
302        size_t match_count = squelch->regex.re_nsub + 1;
303        regmatch_t match[match_count];
304        if (regexec(&squelch->regex, cur, match_count, match, 0) != 0) {
305            buf.Append(cur, end - cur);
306            break;
307        }
308        size_t offset = 0u;
309        for (size_t i = 1; i < match_count; i++) {
310            if (match[i].rm_so == -1)
311                continue;
312            buf.Append(cur, match[i].rm_so - offset);
313            buf.Append("<>");
314            cur += match[i].rm_eo - offset;
315            offset = match[i].rm_eo;
316        }
317    }
318    return buf;
319}
320
321bool fixture_compare_raw_records(const fbl::Vector<trace::Record>& records,
322                                 size_t start_record, size_t max_num_records,
323                                 const char* expected) {
324    BEGIN_HELPER;
325
326    // Append |num_records| records to the buffer, replacing each match of a parenthesized
327    // subexpression of the regex with "<>".  This is used to strip out timestamps
328    // and other varying data that is not controlled by these tests.
329    FixtureSquelch* squelch;
330    ASSERT_TRUE(fixture_create_squelch(
331                    "([0-9]+/[0-9]+)"
332                    "|koid\\(([0-9]+)\\)"
333                    "|koid: ([0-9]+)"
334                    "|ts: ([0-9]+)"
335                    "|(0x[0-9a-f]+)",
336                    &squelch), "error creating squelch");
337
338    fbl::StringBuffer<16384u> buf;
339    size_t num_recs = 0;
340    for (size_t i = start_record; i < records.size(); ++i) {
341        if (num_recs == max_num_records)
342            break;
343        const auto& record = records[i];
344        fbl::String str = record.ToString();
345        buf.Append(fixture_squelch(squelch, str.c_str()));
346        buf.Append('\n');
347        ++num_recs;
348    }
349    EXPECT_STR_EQ(expected, buf.c_str(), "unequal cstr");
350    fixture_destroy_squelch(squelch);
351
352    END_HELPER;
353}
354
355bool fixture_compare_n_records(size_t max_num_records, const char* expected,
356                               fbl::Vector<trace::Record>* out_records) {
357    ZX_DEBUG_ASSERT(g_fixture);
358    BEGIN_HELPER;
359
360    g_fixture->StopTracing(false);
361
362    fbl::Vector<trace::Record> records;
363    fbl::Vector<fbl::String> errors;
364    EXPECT_TRUE(g_fixture->ReadRecords(&records, &errors), "read error");
365
366    for (const auto& error : errors)
367        printf("error: %s\n", error.c_str());
368    ASSERT_EQ(0u, errors.size(), "errors encountered");
369
370    ASSERT_GE(records.size(), 1u, "expected an initialization record");
371    ASSERT_EQ(trace::RecordType::kInitialization, records[0].type(),
372              "expected initialization record");
373    EXPECT_EQ(zx_ticks_per_second(),
374              records[0].GetInitialization().ticks_per_second);
375    records.erase(0);
376
377    EXPECT_TRUE(fixture_compare_raw_records(records, 0, max_num_records, expected));
378
379    if (out_records) {
380        *out_records = fbl::move(records);
381    }
382
383    END_HELPER;
384}
385
386bool fixture_compare_records(const char* expected) {
387    return fixture_compare_n_records(SIZE_MAX, expected, nullptr);
388}
389
390void fixture_snapshot_buffer_header(trace_buffer_header* header) {
391    auto context = trace::TraceProlongedContext::Acquire();
392    trace_context_snapshot_buffer_header(context.get(), header);
393}
394