1// Copyright 2017 The Fuchsia Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#include "fixture.h"
6
7#include <threads.h>
8
9#include <fbl/function.h>
10#include <fbl/string.h>
11#include <fbl/string_printf.h>
12#include <fbl/vector.h>
13#include <lib/zx/event.h>
14#include <trace/event.h>
15#include <trace/handler.h>
16
17namespace {
18int RunClosure(void* arg) {
19    auto closure = static_cast<fbl::Closure*>(arg);
20    (*closure)();
21    delete closure;
22    return 0;
23}
24
25void RunThread(fbl::Closure closure) {
26    thrd_t thread;
27    int result = thrd_create(&thread, RunClosure,
28                             new fbl::Closure(fbl::move(closure)));
29    ZX_ASSERT(result == thrd_success);
30
31    result = thrd_join(thread, nullptr);
32    ZX_ASSERT(result == thrd_success);
33}
34
35bool test_normal_shutdown() {
36    BEGIN_TRACE_TEST;
37
38    fixture_start_tracing();
39    fixture_stop_tracing();
40    EXPECT_EQ(ZX_OK, fixture_get_disposition());
41
42    END_TRACE_TEST;
43}
44
45bool test_hard_shutdown() {
46    BEGIN_TRACE_TEST;
47
48    fixture_start_tracing();
49    fixture_stop_tracing_hard();
50    EXPECT_EQ(ZX_ERR_CANCELED, fixture_get_disposition());
51
52    END_TRACE_TEST;
53}
54
55bool test_state() {
56    BEGIN_TRACE_TEST;
57
58    EXPECT_EQ(TRACE_STOPPED, trace_state());
59
60    fixture_start_tracing();
61    EXPECT_EQ(TRACE_STARTED, trace_state());
62
63    fixture_stop_tracing();
64    EXPECT_EQ(TRACE_STOPPED, trace_state());
65
66    END_TRACE_TEST;
67}
68
69bool test_is_enabled() {
70    BEGIN_TRACE_TEST;
71
72    EXPECT_FALSE(trace_is_enabled());
73
74    fixture_start_tracing();
75    EXPECT_TRUE(trace_is_enabled());
76
77    fixture_stop_tracing();
78    EXPECT_FALSE(trace_is_enabled());
79
80    END_TRACE_TEST;
81}
82
83bool test_is_category_enabled() {
84    BEGIN_TRACE_TEST;
85
86    EXPECT_FALSE(trace_is_category_enabled("+enabled"));
87    EXPECT_FALSE(trace_is_category_enabled("-disabled"));
88    EXPECT_FALSE(trace_is_category_enabled(""));
89
90    fixture_start_tracing();
91    EXPECT_TRUE(trace_is_category_enabled("+enabled"));
92    EXPECT_FALSE(trace_is_category_enabled("-disabled"));
93    EXPECT_FALSE(trace_is_category_enabled(""));
94
95    fixture_stop_tracing();
96    EXPECT_FALSE(trace_is_category_enabled("+enabled"));
97    EXPECT_FALSE(trace_is_category_enabled("-disabled"));
98    EXPECT_FALSE(trace_is_category_enabled(""));
99
100    END_TRACE_TEST;
101}
102
103bool test_generate_nonce() {
104    BEGIN_TRACE_TEST;
105
106    uint64_t nonce1 = trace_generate_nonce();
107    EXPECT_NE(0u, nonce1, "nonce is never 0");
108
109    uint64_t nonce2 = trace_generate_nonce();
110    EXPECT_NE(0u, nonce2, "nonce is never 0");
111
112    EXPECT_NE(nonce1, nonce2, "nonce is unique");
113
114    END_TRACE_TEST;
115}
116
117bool test_observer() {
118    const size_t kBufferSize = 4096u;
119
120    // This test needs the trace engine to run in the same thread as the test:
121    // We need to control when state change signalling happens.
122    BEGIN_TRACE_TEST_ETC(kAttachToThread,
123                         TRACE_BUFFERING_MODE_ONESHOT, kBufferSize);
124
125    zx::event event;
126    EXPECT_EQ(ZX_OK, zx::event::create(0u, &event));
127
128    EXPECT_EQ(ZX_OK, trace_register_observer(event.get()));
129    EXPECT_EQ(ZX_ERR_TIMED_OUT, event.wait_one(ZX_EVENT_SIGNALED, zx::time(), nullptr));
130
131    fixture_start_tracing();
132    EXPECT_EQ(ZX_OK, event.wait_one(ZX_EVENT_SIGNALED, zx::time(), nullptr));
133    EXPECT_EQ(TRACE_STARTED, trace_state());
134
135    EXPECT_EQ(ZX_OK, event.signal(ZX_EVENT_SIGNALED, 0u));
136    EXPECT_EQ(ZX_ERR_TIMED_OUT, event.wait_one(ZX_EVENT_SIGNALED, zx::time(), nullptr));
137
138    fixture_stop_engine();
139
140    // Now walk the dispatcher loop an event at a time so that we see both
141    // the TRACE_STOPPING event and the TRACE_STOPPED event.
142    EXPECT_EQ(TRACE_STOPPING, trace_state());
143    EXPECT_EQ(ZX_OK, event.wait_one(ZX_EVENT_SIGNALED, zx::time(), nullptr));
144    EXPECT_EQ(ZX_OK, event.signal(ZX_EVENT_SIGNALED, 0u));
145    zx_status_t status = ZX_OK;
146    while (status == ZX_OK && trace_state() != TRACE_STOPPED) {
147        status = async_loop_run(fixture_async_loop(), zx_deadline_after(0), true);
148        EXPECT_EQ(ZX_OK, status);
149        if (trace_state() == TRACE_STOPPED) {
150            EXPECT_EQ(ZX_OK, event.wait_one(ZX_EVENT_SIGNALED, zx::time(), nullptr));
151        }
152    }
153
154    fixture_shutdown();
155    EXPECT_EQ(ZX_OK, trace_unregister_observer(event.get()));
156
157    END_TRACE_TEST;
158}
159
160bool test_observer_errors() {
161    BEGIN_TRACE_TEST;
162
163    zx::event event;
164    EXPECT_EQ(ZX_OK, zx::event::create(0u, &event));
165
166    EXPECT_EQ(ZX_OK, trace_register_observer(event.get()));
167    EXPECT_EQ(ZX_ERR_INVALID_ARGS, trace_register_observer(event.get()));
168
169    EXPECT_EQ(ZX_OK, trace_unregister_observer(event.get()));
170    EXPECT_EQ(ZX_ERR_NOT_FOUND, trace_unregister_observer(event.get()));
171
172    END_TRACE_TEST;
173}
174
175bool test_register_current_thread() {
176    BEGIN_TRACE_TEST;
177
178    fixture_start_tracing();
179
180    trace_thread_ref_t t1, t2;
181    {
182        auto context = trace::TraceContext::Acquire();
183
184        trace_context_register_current_thread(context.get(), &t1);
185        trace_context_register_current_thread(context.get(), &t2);
186    }
187
188    EXPECT_TRUE(trace_is_indexed_thread_ref(&t1));
189    EXPECT_TRUE(trace_is_indexed_thread_ref(&t2));
190    EXPECT_EQ(t1.encoded_value, t2.encoded_value);
191
192    ASSERT_RECORDS(R"X(String(index: 1, "process")
193KernelObject(koid: <>, type: thread, name: "initial-thread", {process: koid(<>)})
194Thread(index: 1, <>)
195)X",
196                   "");
197
198    END_TRACE_TEST;
199}
200
201bool test_register_current_thread_multiple_threads() {
202    BEGIN_TRACE_TEST;
203
204    fixture_start_tracing();
205
206    trace_thread_ref_t t1;
207    {
208        auto context = trace::TraceContext::Acquire();
209
210        trace_context_register_current_thread(context.get(), &t1);
211    }
212
213    trace_thread_ref_t t2;
214    RunThread([&t2] {
215        auto context = trace::TraceContext::Acquire();
216
217        trace_context_register_current_thread(context.get(), &t2);
218    });
219
220    EXPECT_TRUE(trace_is_indexed_thread_ref(&t1));
221    EXPECT_TRUE(trace_is_indexed_thread_ref(&t2));
222    EXPECT_NE(t1.encoded_value, t2.encoded_value);
223
224    ASSERT_RECORDS(R"X(String(index: 1, "process")
225KernelObject(koid: <>, type: thread, name: "initial-thread", {process: koid(<>)})
226Thread(index: 1, <>)
227String(index: 2, "process")
228KernelObject(koid: <>, type: thread, name: "thrd_t:<>/TLS=<>", {process: koid(<>)})
229Thread(index: 2, <>)
230)X",
231                   "");
232
233    END_TRACE_TEST;
234}
235
236bool test_register_string_literal() {
237    BEGIN_TRACE_TEST;
238
239    fixture_start_tracing();
240
241    trace_string_ref_t empty;
242    trace_string_ref_t null;
243    trace_string_ref_t a1, a2, a3;
244    trace_string_ref_t b1, b2, b3;
245    {
246        auto context = trace::TraceContext::Acquire();
247
248        trace_context_register_string_literal(context.get(), "", &empty);
249
250        trace_context_register_string_literal(context.get(), nullptr, &null);
251
252        trace_context_register_string_literal(context.get(), "string1", &a1);
253        trace_context_register_string_literal(context.get(), "string2", &a2);
254        trace_context_register_string_literal(context.get(), "string3", &a3);
255
256        trace_context_register_string_literal(context.get(), "string1", &b1);
257        trace_context_register_string_literal(context.get(), "string2", &b2);
258        trace_context_register_string_literal(context.get(), "string3", &b3);
259    }
260
261    EXPECT_TRUE(trace_is_empty_string_ref(&empty));
262    EXPECT_TRUE(trace_is_empty_string_ref(&null));
263
264    EXPECT_TRUE(trace_is_indexed_string_ref(&a1));
265    EXPECT_TRUE(trace_is_indexed_string_ref(&a2));
266    EXPECT_TRUE(trace_is_indexed_string_ref(&a3));
267
268    EXPECT_TRUE(trace_is_indexed_string_ref(&b1));
269    EXPECT_TRUE(trace_is_indexed_string_ref(&b2));
270    EXPECT_TRUE(trace_is_indexed_string_ref(&b3));
271
272    EXPECT_EQ(a1.encoded_value, b1.encoded_value);
273    EXPECT_EQ(a2.encoded_value, b2.encoded_value);
274    EXPECT_EQ(a3.encoded_value, b3.encoded_value);
275
276    EXPECT_NE(a1.encoded_value, a2.encoded_value);
277    EXPECT_NE(a1.encoded_value, a3.encoded_value);
278    EXPECT_NE(a2.encoded_value, a3.encoded_value);
279
280    ASSERT_RECORDS(R"X(String(index: 1, "string1")
281String(index: 2, "string2")
282String(index: 3, "string3")
283)X",
284                   "");
285
286    END_TRACE_TEST;
287}
288
289bool test_register_string_literal_multiple_threads() {
290    BEGIN_TRACE_TEST;
291
292    fixture_start_tracing();
293
294    trace_string_ref_t a1;
295    trace_string_ref_t a2;
296    {
297        auto context = trace::TraceContext::Acquire();
298
299        trace_context_register_string_literal(context.get(), "string1", &a1);
300        trace_context_register_string_literal(context.get(), "string2", &a2);
301    }
302
303    trace_string_ref_t b1;
304    trace_string_ref_t b2;
305    RunThread([&b1, &b2] {
306        auto context = trace::TraceContext::Acquire();
307
308        trace_context_register_string_literal(context.get(), "string1", &b1);
309        trace_context_register_string_literal(context.get(), "string2", &b2);
310    });
311
312    EXPECT_TRUE(trace_is_indexed_string_ref(&a1));
313    EXPECT_TRUE(trace_is_indexed_string_ref(&a2));
314
315    EXPECT_TRUE(trace_is_indexed_string_ref(&b1));
316    EXPECT_TRUE(trace_is_indexed_string_ref(&b2));
317
318    EXPECT_NE(a1.encoded_value, a2.encoded_value);
319    EXPECT_NE(b1.encoded_value, b2.encoded_value);
320
321    // Each thread has its own string pool.
322    EXPECT_NE(a1.encoded_value, b1.encoded_value);
323    EXPECT_NE(a2.encoded_value, b2.encoded_value);
324
325    ASSERT_RECORDS(R"X(String(index: 1, "string1")
326String(index: 2, "string2")
327String(index: 3, "string1")
328String(index: 4, "string2")
329)X",
330                   "");
331
332    END_TRACE_TEST;
333}
334
335bool test_register_string_literal_table_overflow() {
336    BEGIN_TRACE_TEST;
337
338    fixture_start_tracing();
339
340    fbl::Vector<fbl::String> strings;
341
342    {
343        auto context = trace::TraceContext::Acquire();
344
345        unsigned n = 0;
346        for (n = 0; n < TRACE_ENCODED_STRING_REF_MAX_INDEX; n++) {
347            trace_string_ref_t r;
348            fbl::String string = fbl::StringPrintf("string%d", n);
349            strings.push_back(string);
350            trace_context_register_string_literal(context.get(), string.c_str(), &r);
351            if (trace_is_inline_string_ref(&r))
352                break;
353        }
354        EXPECT_GT(n, 100); // at least 100 string can be cached per thread
355    }
356
357    END_TRACE_TEST;
358}
359
360bool test_maximum_record_length() {
361    BEGIN_TRACE_TEST;
362
363    fixture_start_tracing();
364
365    {
366        auto context = trace::TraceContext::Acquire();
367
368        EXPECT_NONNULL(trace_context_alloc_record(context.get(), 0));
369        EXPECT_NONNULL(trace_context_alloc_record(context.get(), 8));
370        EXPECT_NONNULL(trace_context_alloc_record(context.get(), 16));
371        EXPECT_NONNULL(trace_context_alloc_record(
372            context.get(), TRACE_ENCODED_RECORD_MAX_LENGTH));
373
374        EXPECT_NULL(trace_context_alloc_record(
375            context.get(), TRACE_ENCODED_RECORD_MAX_LENGTH + 8));
376        EXPECT_NULL(trace_context_alloc_record(
377            context.get(), TRACE_ENCODED_RECORD_MAX_LENGTH + 16));
378    }
379
380    END_TRACE_TEST;
381}
382
383bool test_event_with_inline_everything() {
384    BEGIN_TRACE_TEST;
385
386    fixture_start_tracing();
387
388    trace_string_ref_t cat = trace_make_inline_c_string_ref("cat");
389    trace_string_ref_t name = trace_make_inline_c_string_ref("name");
390    trace_thread_ref_t thread = trace_make_inline_thread_ref(123, 456);
391    trace_arg_t args[] = {
392        trace_make_arg(trace_make_inline_c_string_ref("argname"),
393                       trace_make_string_arg_value(trace_make_inline_c_string_ref("argvalue")))};
394
395    {
396        auto context = trace::TraceContext::Acquire();
397
398        trace_context_write_instant_event_record(context.get(), zx_ticks_get(),
399                                                 &thread, &cat, &name,
400                                                 TRACE_SCOPE_GLOBAL,
401                                                 args, fbl::count_of(args));
402    }
403
404    ASSERT_RECORDS(R"X(Event(ts: <>, pt: <>, category: "cat", name: "name", Instant(scope: global), {argname: string("argvalue")})
405)X",
406                   "");
407
408    END_TRACE_TEST;
409}
410
411bool test_circular_mode() {
412    const size_t kBufferSize = 4096u;
413    BEGIN_TRACE_TEST_ETC(kNoAttachToThread,
414                         TRACE_BUFFERING_MODE_CIRCULAR, kBufferSize);
415
416    fixture_start_tracing();
417
418    // Fill the buffers with one kind of record, then fill them with another.
419    // We should see only the second kind remaining.
420
421    for (size_t i = 0; i < kBufferSize / 8; ++i) {
422        TRACE_INSTANT("+enabled", "name", TRACE_SCOPE_GLOBAL,
423                      "k1", TA_INT32(1));
424    }
425
426    // IWBN to verify the contents of the buffer at this point, but that
427    // requires stopping the trace. There's no current way to pause it.
428
429    // Now fill the buffer with a different kind of record.
430
431    for (size_t i = 0; i < kBufferSize / 8; ++i) {
432        TRACE_INSTANT("+enabled", "name", TRACE_SCOPE_GLOBAL,
433                      "k2", TA_INT32(2));
434    }
435
436    // TODO(dje): There is a 1-second wait here. Not sure what to do about it.
437    EXPECT_FALSE(fixture_wait_buffer_full_notification());
438
439    // Prepare a squelcher to remove timestamps.
440    FixtureSquelch* ts_squelch;
441    ASSERT_TRUE(fixture_create_squelch("ts: ([0-9]+)", &ts_squelch));
442
443    // These records come from the durable buffer.
444    const char expected_initial_records[] = "\
445String(index: 1, \"+enabled\")\n\
446String(index: 2, \"k1\")\n\
447String(index: 3, \"process\")\n\
448KernelObject(koid: <>, type: thread, name: \"initial-thread\", {process: koid(<>)})\n\
449Thread(index: 1, <>)\n\
450String(index: 4, \"name\")\n\
451String(index: 5, \"k2\")\n\
452Event(ts: <>, pt: <>, category: \"+enabled\", name: \"name\", Instant(scope: global), {k2: int32(2)})\n\
453";
454
455    fbl::Vector<trace::Record> records;
456    const size_t kDataRecordOffset = 7;
457    ASSERT_N_RECORDS(kDataRecordOffset + 1, /*empty*/, expected_initial_records,
458                     &records);
459
460    // Verify all trailing records are the same (sans timestamp).
461    auto test_record = records[kDataRecordOffset].ToString();
462    auto test_str = fixture_squelch(ts_squelch, test_record.c_str());
463    for (size_t i = kDataRecordOffset + 1; i < records.size(); ++i) {
464        // FIXME(dje): Moved this here from outside the for loop to get things
465        // working. Why was it necessary?
466        auto test_cstr = test_str.c_str();
467        auto record_str = fixture_squelch(ts_squelch,
468                                          records[i].ToString().c_str());
469        auto record_cstr = record_str.c_str();
470        EXPECT_STR_EQ(test_cstr, record_cstr, "bad data record");
471    }
472
473    fixture_destroy_squelch(ts_squelch);
474
475    END_TRACE_TEST;
476}
477
478bool test_streaming_mode() {
479    const size_t kBufferSize = 4096u;
480    BEGIN_TRACE_TEST_ETC(kNoAttachToThread,
481                         TRACE_BUFFERING_MODE_STREAMING, kBufferSize);
482
483    fixture_start_tracing();
484
485    // Fill the buffers with one kind of record.
486    // Both buffers should fill since there's no one to save them.
487
488    for (size_t i = 0; i < kBufferSize / 8; ++i) {
489        TRACE_INSTANT("+enabled", "name", TRACE_SCOPE_GLOBAL,
490                      "k1", TA_INT32(1));
491    }
492
493    EXPECT_TRUE(fixture_wait_buffer_full_notification());
494    EXPECT_EQ(fixture_get_buffer_full_wrapped_count(), 0);
495    fixture_reset_buffer_full_notification();
496
497    // N.B. While we're examining the header we assume tracing is paused.
498
499    trace_buffer_header header;
500    fixture_snapshot_buffer_header(&header);
501
502    EXPECT_EQ(header.version, 0);
503    EXPECT_EQ(header.buffering_mode, TRACE_BUFFERING_MODE_STREAMING);
504    EXPECT_EQ(header.reserved1, 0);
505    EXPECT_EQ(header.wrapped_count, 1);
506    EXPECT_EQ(header.total_size, kBufferSize);
507    EXPECT_NE(header.durable_buffer_size, 0);
508    EXPECT_NE(header.rolling_buffer_size, 0);
509    EXPECT_EQ(sizeof(header) + header.durable_buffer_size +
510              2 * header.rolling_buffer_size, kBufferSize);
511    EXPECT_NE(header.durable_data_end, 0);
512    EXPECT_LE(header.durable_data_end, header.durable_buffer_size);
513    EXPECT_NE(header.rolling_data_end[0], 0);
514    EXPECT_LE(header.rolling_data_end[0], header.rolling_buffer_size);
515    EXPECT_NE(header.rolling_data_end[1], 0);
516    EXPECT_LE(header.rolling_data_end[1], header.rolling_buffer_size);
517    // All the records are the same size, so each buffer should end up at
518    // the same place.
519    EXPECT_EQ(header.rolling_data_end[0], header.rolling_data_end[1]);
520
521    // Try to fill the buffer with a different kind of record.
522    // These should all be dropped.
523
524    for (size_t i = 0; i < kBufferSize / 8; ++i) {
525        TRACE_INSTANT("+enabled", "name", TRACE_SCOPE_GLOBAL,
526                      "k2", TA_INT32(2));
527    }
528
529    // TODO(dje): There is a 1-second wait here. Not sure what to do about it.
530    EXPECT_FALSE(fixture_wait_buffer_full_notification());
531
532    // Pretend to save the older of the two buffers.
533    {
534        auto context = trace::TraceProlongedContext::Acquire();
535        trace_context_snapshot_buffer_header(context.get(), &header);
536    }
537    EXPECT_EQ(header.wrapped_count, 1);
538
539    // Buffer zero is older.
540    trace_engine_mark_buffer_saved(0, 0);
541
542    {
543        auto context = trace::TraceProlongedContext::Acquire();
544        trace_context_snapshot_buffer_header(context.get(), &header);
545    }
546    EXPECT_EQ(header.rolling_data_end[0], 0);
547    // The wrapped count shouldn't be updated until the next record is written.
548    EXPECT_EQ(header.wrapped_count, 1);
549
550    // Fill the buffer with a different kind of record.
551
552    for (size_t i = 0; i < kBufferSize / 8; ++i) {
553        TRACE_INSTANT("+enabled", "name", TRACE_SCOPE_GLOBAL,
554                      "k3", TA_INT32(3));
555    }
556
557    EXPECT_TRUE(fixture_wait_buffer_full_notification());
558    EXPECT_EQ(fixture_get_buffer_full_wrapped_count(), 1);
559
560    {
561        auto context = trace::TraceProlongedContext::Acquire();
562        trace_context_snapshot_buffer_header(context.get(), &header);
563    }
564    EXPECT_EQ(header.wrapped_count, 2);
565    EXPECT_NE(header.rolling_data_end[0], 0);
566    EXPECT_EQ(header.rolling_data_end[0], header.rolling_data_end[1]);
567
568    // One buffer should now have the first kind of record, and the other
569    // should have the new kind of record. And the newer records should be
570    // read after the older ones.
571
572    FixtureSquelch* ts_squelch;
573    ASSERT_TRUE(fixture_create_squelch("ts: ([0-9]+)", &ts_squelch));
574
575    const char expected_initial_records[] =
576        // These records come from the durable buffer.
577        "\
578String(index: 1, \"+enabled\")\n\
579String(index: 2, \"k1\")\n\
580String(index: 3, \"process\")\n\
581KernelObject(koid: <>, type: thread, name: \"initial-thread\", {process: koid(<>)})\n\
582Thread(index: 1, <>)\n\
583String(index: 4, \"name\")\n\
584String(index: 5, \"k2\")\n\
585String(index: 6, \"k3\")\n"
586        // This record is the first record in the rolling buffer
587        "\
588Event(ts: <>, pt: <>, category: \"+enabled\", name: \"name\", Instant(scope: global), {k1: int32(1)})\n";
589
590    // There is no DATA2_RECORD, those records are dropped (buffer is full).
591    const char data3_record[] = "\
592Event(ts: <>, pt: <>, category: \"+enabled\", name: \"name\", Instant(scope: global), {k3: int32(3)})\n";
593
594    fbl::Vector<trace::Record> records;
595    const size_t kDataRecordOffset = 8;
596    ASSERT_N_RECORDS(kDataRecordOffset + 1, /*empty*/, expected_initial_records,
597                     &records);
598
599    // Verify the first set of data records are the same (sans timestamp).
600    auto test_record = records[kDataRecordOffset].ToString();
601    auto test_str = fixture_squelch(ts_squelch, test_record.c_str());
602    size_t num_data_records = 1;
603    size_t i;
604    for (i = kDataRecordOffset + 1; i < records.size(); ++i) {
605        auto test_cstr = test_str.c_str();
606        auto record_str = fixture_squelch(ts_squelch,
607                                          records[i].ToString().c_str());
608        auto record_cstr = record_str.c_str();
609        if (strcmp(test_cstr, record_cstr) != 0)
610            break;
611        ++num_data_records;
612    }
613    EXPECT_GE(num_data_records, 2);
614    // The records are all of equal size, therefore they should evenly fit
615    // in the number of bytes written. Buffer 1 holds the older records.
616    EXPECT_EQ(header.rolling_data_end[1] % num_data_records, 0);
617
618    // There should be the same number of records remaining.
619    EXPECT_EQ(num_data_records, records.size() - i);
620
621    // The next record should be |data3_record|.
622    EXPECT_TRUE(fixture_compare_raw_records(records, i, 1, data3_record));
623
624    // All remaining records should match (sans timestamp).
625    test_record = records[i].ToString();
626    test_str = fixture_squelch(ts_squelch, test_record.c_str());
627    for (i = i + 1; i < records.size(); ++i) {
628        auto test_cstr = test_str.c_str();
629        auto record_str = fixture_squelch(ts_squelch,
630                                          records[i].ToString().c_str());
631        auto record_cstr = record_str.c_str();
632        EXPECT_STR_EQ(test_cstr, record_cstr, "bad data record");
633    }
634
635    fixture_destroy_squelch(ts_squelch);
636
637    END_TRACE_TEST;
638}
639
640// This test exercises DX-441 where a buffer becomes full and immediately
641// thereafter tracing is stopped. This causes the "please save buffer"
642// processing to run when tracing is not active.
643bool test_shutdown_when_full() {
644    const size_t kBufferSize = 4096u;
645
646    // This test needs the trace engine to run in the same thread as the test:
647    // We need to control when buffer full handling happens.
648    BEGIN_TRACE_TEST_ETC(kAttachToThread,
649                         TRACE_BUFFERING_MODE_STREAMING, kBufferSize);
650
651    fixture_start_tracing();
652
653    // Keep writing records until we just fill the buffer.
654    // Since the engine loop is on the same loop as us, we can't rely on
655    // handler notifications: They won't get run.
656    {
657        auto context = trace::TraceProlongedContext::Acquire();
658        for (;;) {
659            TRACE_INSTANT("+enabled", "name", TRACE_SCOPE_GLOBAL,
660                          "k1", TA_INT32(1));
661
662            trace_buffer_header header;
663            trace_context_snapshot_buffer_header(context.get(), &header);
664            if (header.wrapped_count > 0) {
665                break;
666            }
667        }
668    }
669
670    // At this point there should be no references to the context except for
671    // the engine's. Then when remaining tasks in the loop are run the
672    // |trace_engine_request_save_buffer()| task will have no context in
673    // which to process the request and should gracefully fail.
674    fixture_stop_tracing();
675
676    END_TRACE_TEST;
677}
678
679// NOTE: The functions for writing trace records are exercised by other trace tests.
680
681} // namespace
682
683BEGIN_TEST_CASE(engine_tests)
684RUN_TEST(test_normal_shutdown)
685RUN_TEST(test_hard_shutdown)
686RUN_TEST(test_is_enabled)
687RUN_TEST(test_is_category_enabled)
688RUN_TEST(test_generate_nonce)
689RUN_TEST(test_observer)
690RUN_TEST(test_observer_errors)
691RUN_TEST(test_register_current_thread)
692RUN_TEST(test_register_current_thread_multiple_threads)
693RUN_TEST(test_register_string_literal)
694RUN_TEST(test_register_string_literal_multiple_threads)
695RUN_TEST(test_register_string_literal_table_overflow)
696RUN_TEST(test_maximum_record_length)
697RUN_TEST(test_event_with_inline_everything)
698RUN_TEST(test_circular_mode)
699RUN_TEST(test_streaming_mode)
700RUN_TEST(test_shutdown_when_full)
701END_TEST_CASE(engine_tests)
702