1// Copyright 2017 The Fuchsia Authors. All rights reserved. 2// Use of this source code is governed by a BSD-style license that can be 3// found in the LICENSE file. 4 5#include <string.h> 6 7#include <zircon/compiler.h> 8#include <zircon/syscalls.h> 9 10#include <fbl/algorithm.h> 11#include <fbl/atomic.h> 12#include <fbl/intrusive_hash_table.h> 13#include <fbl/unique_ptr.h> 14#include <lib/zx/process.h> 15#include <lib/zx/thread.h> 16#include <trace-engine/fields.h> 17#include <trace-engine/handler.h> 18 19#include "context_impl.h" 20 21namespace trace { 22namespace { 23 24// The cached koid of this process. 25// Initialized on first use. 26fbl::atomic<uint64_t> g_process_koid{ZX_KOID_INVALID}; 27 28// This thread's koid. 29// Initialized on first use. 30thread_local zx_koid_t tls_thread_koid{ZX_KOID_INVALID}; 31 32zx_koid_t GetKoid(zx_handle_t handle) { 33 zx_info_handle_basic_t info; 34 zx_status_t status = zx_object_get_info(handle, ZX_INFO_HANDLE_BASIC, &info, 35 sizeof(info), nullptr, nullptr); 36 return status == ZX_OK ? info.koid : ZX_KOID_INVALID; 37} 38 39zx_koid_t GetCurrentProcessKoid() { 40 zx_koid_t koid = g_process_koid.load(fbl::memory_order_relaxed); 41 if (unlikely(koid == ZX_KOID_INVALID)) { 42 koid = GetKoid(zx_process_self()); 43 g_process_koid.store(koid, fbl::memory_order_relaxed); // idempotent 44 } 45 return koid; 46} 47 48zx_koid_t GetCurrentThreadKoid() { 49 if (unlikely(tls_thread_koid == ZX_KOID_INVALID)) { 50 tls_thread_koid = GetKoid(zx_thread_self()); 51 } 52 return tls_thread_koid; 53} 54 55void GetObjectName(zx_handle_t handle, char* name_buf, size_t name_buf_size, 56 trace_string_ref* out_name_ref) { 57 zx_status_t status = zx_object_get_property(handle, ZX_PROP_NAME, 58 name_buf, name_buf_size); 59 name_buf[name_buf_size - 1] = 0; 60 if (status == ZX_OK) { 61 *out_name_ref = trace_make_inline_c_string_ref(name_buf); 62 } else { 63 *out_name_ref = trace_make_empty_string_ref(); 64 } 65} 66 67// A string table entry. 68struct StringEntry : public fbl::SinglyLinkedListable<StringEntry*> { 69 // Attempted to assign an index. 70 static constexpr uint32_t kAllocIndexAttempted = 1u << 0; 71 // Successfully assigned an index. 72 static constexpr uint32_t kAllocIndexSucceeded = 1u << 1; 73 // Category check performed. 74 static constexpr uint32_t kCategoryChecked = 1u << 2; 75 // Category is enabled. 76 static constexpr uint32_t kCategoryEnabled = 1u << 3; 77 78 // The string literal itself. 79 const char* string_literal; 80 81 // Flags for the string entry. 82 uint32_t flags; 83 84 // The index with which the string was associated, or 0 if none. 85 trace_string_index_t index; 86 87 // Used by the hash table. 88 const char* GetKey() const { return string_literal; } 89 static size_t GetHash(const char* key) { return reinterpret_cast<uintptr_t>(key); } 90}; 91 92// A thread table entry. 93struct ThreadEntry : public fbl::SinglyLinkedListable<ThreadEntry*> { 94 // The thread koid itself. 95 zx_koid_t thread_koid; 96 97 // Thread reference for this thread. 98 trace_thread_ref_t thread_ref{}; 99 100 // Used by the hash table. 101 zx_koid_t GetKey() const { return thread_koid; } 102 static size_t GetHash(zx_koid_t key) { return key; } 103}; 104 105// Cached thread and string data for a context. 106// Each thread has its own cache of context state to avoid locking overhead 107// while writing trace events in the common case. There may be some 108// duplicate registration of strings across threads. 109struct ContextCache { 110 ContextCache() = default; 111 ~ContextCache() { string_table.clear(); } 112 113 // The generation number of the context which last modified this state. 114 uint32_t generation{0u}; 115 116 // Thread reference created when this thread was registered. 117 trace_thread_ref_t thread_ref{}; 118 119 // Maximum number of strings to cache per thread. 120 static constexpr size_t kMaxStringEntries = 256; 121 122 // String table. 123 // Provides a limited amount of storage for rapidly looking up string literals 124 // registered by this thread. 125 fbl::HashTable<const char*, StringEntry*> string_table; 126 127 // Storage for the string entries. 128 StringEntry string_entries[kMaxStringEntries]; 129 130 // Maximum number of external thread references to cache per thread. 131 static constexpr size_t kMaxThreadEntries = 4; 132 133 // External thread table. 134 // Provides a limited amount of storage for rapidly looking up external threads 135 // registered by this thread. 136 fbl::HashTable<zx_koid_t, ThreadEntry*> thread_table; 137 138 // Storage for the external thread entries. 139 ThreadEntry thread_entries[kMaxThreadEntries]; 140}; 141thread_local fbl::unique_ptr<ContextCache> tls_cache{}; 142 143ContextCache* GetCurrentContextCache(uint32_t generation) { 144 ContextCache* cache = tls_cache.get(); 145 if (likely(cache)) { 146 if (likely(cache->generation == generation)) 147 return cache; 148 if (unlikely(cache->generation > generation)) 149 return nullptr; 150 } else { 151 cache = new ContextCache(); 152 tls_cache.reset(cache); 153 } 154 cache->generation = generation; 155 cache->thread_ref = trace_make_unknown_thread_ref(); 156 cache->string_table.clear(); 157 cache->thread_table.clear(); 158 return cache; 159} 160 161StringEntry* CacheStringEntry(uint32_t generation, 162 const char* string_literal) { 163 ContextCache* cache = GetCurrentContextCache(generation); 164 if (unlikely(!cache)) 165 return nullptr; 166 167 auto it = cache->string_table.find(string_literal); 168 if (likely(it.IsValid())) 169 return it.CopyPointer(); 170 171 size_t count = cache->string_table.size(); 172 if (unlikely(count == ContextCache::kMaxStringEntries)) 173 return nullptr; 174 175 StringEntry* entry = &cache->string_entries[count]; 176 entry->string_literal = string_literal; 177 entry->flags = 0u; 178 entry->index = 0u; 179 cache->string_table.insert(entry); 180 return entry; 181} 182 183ThreadEntry* CacheThreadEntry(uint32_t generation, zx_koid_t thread_koid) { 184 ContextCache* cache = GetCurrentContextCache(generation); 185 if (unlikely(!cache)) 186 return nullptr; 187 188 auto it = cache->thread_table.find(thread_koid); 189 if (likely(it.IsValid())) 190 return it.CopyPointer(); 191 192 size_t count = cache->thread_table.size(); 193 if (unlikely(count == ContextCache::kMaxThreadEntries)) 194 return nullptr; 195 196 ThreadEntry* entry = &cache->thread_entries[count]; 197 entry->thread_koid = thread_koid; 198 entry->thread_ref = trace_make_unknown_thread_ref(); 199 cache->thread_table.insert(entry); 200 return entry; 201} 202 203inline constexpr uint64_t MakeRecordHeader(RecordType type, size_t size) { 204 return RecordFields::Type::Make(ToUnderlyingType(type)) | 205 RecordFields::RecordSize::Make(size >> 3); 206} 207 208inline constexpr uint64_t MakeArgumentHeader(ArgumentType type, size_t size, 209 const trace_string_ref_t* name_ref) { 210 return ArgumentFields::Type::Make(ToUnderlyingType(type)) | 211 ArgumentFields::ArgumentSize::Make(size >> 3) | 212 ArgumentFields::NameRef::Make(name_ref->encoded_value); 213} 214 215size_t SizeOfEncodedStringRef(const trace_string_ref_t* string_ref) { 216 return trace_is_inline_string_ref(string_ref) 217 ? Pad(trace_inline_string_ref_length(string_ref)) 218 : 0u; 219} 220 221size_t SizeOfEncodedThreadRef(const trace_thread_ref_t* thread_ref) { 222 // TODO(ZX-1030): Unknown thread refs should not be stored inline. 223 return trace_is_inline_thread_ref(thread_ref) || trace_is_unknown_thread_ref(thread_ref) 224 ? WordsToBytes(2) 225 : 0u; 226} 227 228size_t SizeOfEncodedArgValue(const trace_arg_value_t* arg_value) { 229 switch (arg_value->type) { 230 case TRACE_ARG_NULL: 231 return 0u; 232 case TRACE_ARG_INT32: 233 return 0u; // stored inline 234 case TRACE_ARG_UINT32: 235 return 0u; // stored inline 236 case TRACE_ARG_INT64: 237 return WordsToBytes(1); 238 case TRACE_ARG_UINT64: 239 return WordsToBytes(1); 240 case TRACE_ARG_DOUBLE: 241 return WordsToBytes(1); 242 case TRACE_ARG_STRING: 243 return SizeOfEncodedStringRef(&arg_value->string_value_ref); 244 case TRACE_ARG_POINTER: 245 return WordsToBytes(1); 246 case TRACE_ARG_KOID: 247 return WordsToBytes(1); 248 default: 249 // skip unrecognized argument type 250 ZX_DEBUG_ASSERT(false); 251 return 0u; 252 } 253} 254 255size_t SizeOfEncodedArg(const trace_arg_t* arg) { 256 return sizeof(ArgumentHeader) + 257 SizeOfEncodedStringRef(&arg->name_ref) + 258 SizeOfEncodedArgValue(&arg->value); 259} 260 261size_t SizeOfEncodedArgs(const trace_arg_t* args, size_t num_args) { 262 size_t total_size = 0u; 263 while (num_args-- != 0u) 264 total_size += SizeOfEncodedArg(args++); 265 return total_size; 266} 267 268// Provides support for writing sequences of 64-bit words into a trace buffer. 269class Payload { 270public: 271 explicit Payload(trace_context_t* context, size_t num_bytes) 272 : ptr_(context->AllocRecord(num_bytes)) {} 273 274 explicit Payload(trace_context_t* context, bool rqst_durable, size_t num_bytes) 275 : ptr_(rqst_durable && context->UsingDurableBuffer() 276 ? context->AllocDurableRecord(num_bytes) 277 : context->AllocRecord(num_bytes)) {} 278 279 explicit operator bool() const { 280 return ptr_ != nullptr; 281 } 282 283 Payload& WriteUint64(uint64_t value) { 284 *ptr_++ = value; 285 return *this; 286 } 287 288 Payload& WriteInt64(int64_t value) { 289 *reinterpret_cast<int64_t*>(ptr_++) = value; 290 return *this; 291 } 292 293 Payload& WriteDouble(double value) { 294 *reinterpret_cast<double*>(ptr_++) = value; 295 return *this; 296 } 297 298 void* PrepareWriteBytes(size_t length) { 299 auto result = ptr_; 300 ptr_ += length / 8u; 301 size_t tail = length & 7u; 302 if (tail) { 303 size_t padding = 8u - tail; 304 ptr_++; 305 memset(reinterpret_cast<uint8_t*>(ptr_) - padding, 0u, padding); 306 } 307 return result; 308 } 309 310 Payload& WriteBytes(const void* src, size_t length) { 311 auto ptr = PrepareWriteBytes(length); 312 memcpy(ptr, src, length); 313 return *this; 314 } 315 316 Payload& WriteStringRef(const trace_string_ref_t* string_ref) { 317 if (trace_is_inline_string_ref(string_ref)) { 318 WriteBytes(string_ref->inline_string, 319 trace_inline_string_ref_length(string_ref)); 320 } 321 return *this; 322 } 323 324 Payload& WriteThreadRef(const trace_thread_ref_t* thread_ref) { 325 // TODO(ZX-1030): Unknown thread refs should not be stored inline. 326 if (trace_is_inline_thread_ref(thread_ref) || trace_is_unknown_thread_ref(thread_ref)) { 327 WriteUint64(thread_ref->inline_process_koid); 328 WriteUint64(thread_ref->inline_thread_koid); 329 } 330 return *this; 331 } 332 333 Payload& WriteArg(const trace_arg_t* arg) { 334 switch (arg->value.type) { 335 case TRACE_ARG_NULL: 336 WriteArgumentHeaderAndName(ArgumentType::kNull, &arg->name_ref, 0u, 0u); 337 break; 338 case TRACE_ARG_INT32: 339 WriteArgumentHeaderAndName(ArgumentType::kInt32, &arg->name_ref, 0u, 340 Int32ArgumentFields::Value::Make(arg->value.int32_value)); 341 break; 342 case TRACE_ARG_UINT32: 343 WriteArgumentHeaderAndName(ArgumentType::kUint32, &arg->name_ref, 0u, 344 Uint32ArgumentFields::Value::Make(arg->value.uint32_value)); 345 break; 346 case TRACE_ARG_INT64: 347 WriteArgumentHeaderAndName(ArgumentType::kInt64, &arg->name_ref, WordsToBytes(1), 0u); 348 WriteInt64(arg->value.int64_value); 349 break; 350 case TRACE_ARG_UINT64: 351 WriteArgumentHeaderAndName(ArgumentType::kUint64, &arg->name_ref, WordsToBytes(1), 0u); 352 WriteUint64(arg->value.uint64_value); 353 break; 354 case TRACE_ARG_DOUBLE: 355 WriteArgumentHeaderAndName(ArgumentType::kDouble, &arg->name_ref, WordsToBytes(1), 0u); 356 WriteDouble(arg->value.double_value); 357 break; 358 case TRACE_ARG_STRING: 359 WriteArgumentHeaderAndName(ArgumentType::kString, &arg->name_ref, 360 SizeOfEncodedStringRef(&arg->value.string_value_ref), 361 StringArgumentFields::Index::Make( 362 arg->value.string_value_ref.encoded_value)); 363 WriteStringRef(&arg->value.string_value_ref); 364 break; 365 case TRACE_ARG_POINTER: 366 WriteArgumentHeaderAndName(ArgumentType::kPointer, &arg->name_ref, WordsToBytes(1), 0u); 367 WriteUint64(arg->value.pointer_value); 368 break; 369 case TRACE_ARG_KOID: 370 WriteArgumentHeaderAndName(ArgumentType::kKoid, &arg->name_ref, WordsToBytes(1), 0u); 371 WriteUint64(arg->value.koid_value); 372 break; 373 default: 374 // skip unrecognized argument type 375 ZX_DEBUG_ASSERT(false); 376 break; 377 } 378 return *this; 379 } 380 381 Payload& WriteArgs(const trace_arg_t* args, size_t num_args) { 382 while (num_args-- != 0u) 383 WriteArg(args++); 384 return *this; 385 } 386 387private: 388 void WriteArgumentHeaderAndName(ArgumentType type, 389 const trace_string_ref_t* name_ref, 390 size_t content_size, 391 uint64_t header_bits) { 392 const size_t argument_size = sizeof(ArgumentHeader) + 393 SizeOfEncodedStringRef(name_ref) + 394 content_size; 395 WriteUint64(MakeArgumentHeader(type, argument_size, name_ref) | header_bits); 396 WriteStringRef(name_ref); 397 } 398 399 uint64_t* ptr_; 400}; 401 402Payload WriteEventRecordBase( 403 trace_context_t* context, 404 EventType event_type, 405 trace_ticks_t event_time, 406 const trace_thread_ref_t* thread_ref, 407 const trace_string_ref_t* category_ref, 408 const trace_string_ref_t* name_ref, 409 const trace_arg_t* args, size_t num_args, 410 size_t content_size) { 411 const size_t record_size = sizeof(RecordHeader) + 412 WordsToBytes(1) + 413 SizeOfEncodedThreadRef(thread_ref) + 414 SizeOfEncodedStringRef(category_ref) + 415 SizeOfEncodedStringRef(name_ref) + 416 SizeOfEncodedArgs(args, num_args) + 417 content_size; 418 Payload payload(context, record_size); 419 if (payload) { 420 payload 421 .WriteUint64(MakeRecordHeader(RecordType::kEvent, record_size) | 422 EventRecordFields::EventType::Make(ToUnderlyingType(event_type)) | 423 EventRecordFields::ArgumentCount::Make(num_args) | 424 EventRecordFields::ThreadRef::Make(thread_ref->encoded_value) | 425 EventRecordFields::CategoryStringRef::Make(category_ref->encoded_value) | 426 EventRecordFields::NameStringRef::Make(name_ref->encoded_value)) 427 .WriteUint64(event_time) 428 .WriteThreadRef(thread_ref) 429 .WriteStringRef(category_ref) 430 .WriteStringRef(name_ref) 431 .WriteArgs(args, num_args); 432 } 433 return payload; 434} 435 436bool CheckCategory(trace_context_t* context, const char* category) { 437 return context->handler()->ops->is_category_enabled(context->handler(), category); 438} 439 440// Returns true if write succeeded, false otherwise. 441// The write fails if the buffer we use is full. 442 443bool WriteStringRecord(trace_context_t* context, bool rqst_durable_buffer, 444 trace_string_index_t index, 445 const char* string, size_t length) { 446 ZX_DEBUG_ASSERT(index != TRACE_ENCODED_STRING_REF_EMPTY); 447 ZX_DEBUG_ASSERT(index <= TRACE_ENCODED_STRING_REF_MAX_INDEX); 448 449 if (unlikely(length > TRACE_ENCODED_STRING_REF_MAX_LENGTH)) 450 length = TRACE_ENCODED_STRING_REF_MAX_LENGTH; 451 452 const size_t record_size = sizeof(trace::RecordHeader) + 453 trace::Pad(length); 454 Payload payload(context, rqst_durable_buffer, record_size); 455 if (likely(payload)) { 456 payload 457 .WriteUint64(trace::MakeRecordHeader(trace::RecordType::kString, record_size) | 458 trace::StringRecordFields::StringIndex::Make(index) | 459 trace::StringRecordFields::StringLength::Make(length)) 460 .WriteBytes(string, length); 461 return true; 462 } 463 return false; 464} 465 466// Returns true if write succeeded, false otherwise. 467// The write fails if the buffer we use is full. 468 469bool WriteThreadRecord(trace_context_t* context, trace_thread_index_t index, 470 zx_koid_t process_koid, zx_koid_t thread_koid) { 471 ZX_DEBUG_ASSERT(index != TRACE_ENCODED_THREAD_REF_INLINE); 472 ZX_DEBUG_ASSERT(index <= TRACE_ENCODED_THREAD_REF_MAX_INDEX); 473 474 const size_t record_size = 475 sizeof(trace::RecordHeader) + trace::WordsToBytes(2); 476 trace::Payload payload(context, true, record_size); 477 if (likely(payload)) { 478 payload 479 .WriteUint64(trace::MakeRecordHeader(trace::RecordType::kThread, record_size) | 480 trace::ThreadRecordFields::ThreadIndex::Make(index)) 481 .WriteUint64(process_koid) 482 .WriteUint64(thread_koid); 483 return true; 484 } 485 return false; 486} 487 488// N.B. This may only return false if |check_category| is true. 489 490bool RegisterString(trace_context_t* context, 491 const char* string_literal, 492 bool check_category, 493 trace_string_ref_t* out_ref_optional) { 494 if (unlikely(!string_literal || !*string_literal)) { 495 if (check_category) 496 return false; // NULL and empty strings are not valid categories 497 if (out_ref_optional) 498 *out_ref_optional = trace_make_empty_string_ref(); 499 return true; 500 } 501 502 StringEntry* entry = CacheStringEntry(context->generation(), string_literal); 503 if (likely(entry)) { 504 // Fast path: using the thread-local cache. 505 if (check_category) { 506 if (unlikely(!(entry->flags & StringEntry::kCategoryChecked))) { 507 if (CheckCategory(context, string_literal)) { 508 entry->flags |= StringEntry::kCategoryChecked | 509 StringEntry::kCategoryEnabled; 510 } else { 511 entry->flags |= StringEntry::kCategoryChecked; 512 } 513 } 514 if (!(entry->flags & StringEntry::kCategoryEnabled)) { 515 return false; // category disabled 516 } 517 } 518 519 if (out_ref_optional) { 520 if (unlikely(!(entry->flags & StringEntry::kAllocIndexAttempted))) { 521 entry->flags |= StringEntry::kAllocIndexAttempted; 522 size_t string_len = strlen(string_literal); 523 bool rqst_durable = true; 524 // If allocating an index succeeds but writing the record 525 // fails, toss the index and return an inline reference. The 526 // index is lost anyway, but the result won't be half-complete. 527 // The subsequent write of the inlined reference will likely 528 // also fail, but that's ok. 529 if (likely(context->AllocStringIndex(&entry->index) && 530 WriteStringRecord(context, rqst_durable, entry->index, 531 string_literal, string_len))) { 532 entry->flags |= StringEntry::kAllocIndexSucceeded; 533 } 534 } 535 if (likely(entry->flags & StringEntry::kAllocIndexSucceeded)) { 536 *out_ref_optional = trace_make_indexed_string_ref(entry->index); 537 } else { 538 *out_ref_optional = trace_make_inline_c_string_ref(string_literal); 539 } 540 } 541 return true; 542 } 543 544 // Slow path. 545 // TODO(ZX-1035): Since we can't use the thread-local cache here, cache 546 // this registered string on the trace context structure, guarded by a mutex. 547 // Make sure to assign it a string index if possible instead of inlining. 548 if (check_category && !CheckCategory(context, string_literal)) { 549 return false; // category disabled 550 } 551 if (out_ref_optional) { 552 *out_ref_optional = trace_make_inline_c_string_ref(string_literal); 553 } 554 return true; 555} 556 557} // namespace 558} // namespace trace 559 560bool trace_context_is_category_enabled( 561 trace_context_t* context, 562 const char* category_literal) { 563 return trace::RegisterString(context, category_literal, true, nullptr); 564} 565 566void trace_context_register_string_copy( 567 trace_context_t* context, 568 const char* string, size_t length, 569 trace_string_ref_t* out_ref) { 570 // TODO(ZX-1035): Cache the registered strings on the trace context structure, 571 // guarded by a mutex. 572 trace_string_index_t index; 573 bool rqst_durable = true; 574 // If allocating an index succeeds but writing the record 575 // fails, toss the index and return an inline reference. The 576 // index is lost anyway, but the result won't be half-complete. 577 // The subsequent write of the inlined reference will likely 578 // also fail, but that's ok. 579 if (likely(context->AllocStringIndex(&index) && 580 trace::WriteStringRecord(context, rqst_durable, index, string, length))) { 581 *out_ref = trace_make_indexed_string_ref(index); 582 } else { 583 *out_ref = trace_make_inline_string_ref(string, length); 584 } 585} 586 587void trace_context_register_string_literal( 588 trace_context_t* context, 589 const char* string_literal, 590 trace_string_ref_t* out_ref) { 591 bool result = trace::RegisterString(context, string_literal, false, out_ref); 592 ZX_DEBUG_ASSERT(result); 593} 594 595bool trace_context_register_category_literal( 596 trace_context_t* context, 597 const char* category_literal, 598 trace_string_ref_t* out_ref) { 599 return trace::RegisterString(context, category_literal, true, out_ref); 600} 601 602void trace_context_register_current_thread( 603 trace_context_t* context, 604 trace_thread_ref_t* out_ref) { 605 trace::ContextCache* cache = trace::GetCurrentContextCache(context->generation()); 606 if (likely(cache && !trace_is_unknown_thread_ref(&cache->thread_ref))) { 607 // Fast path: the thread is already registered. 608 *out_ref = cache->thread_ref; 609 return; 610 } 611 612 trace_string_ref name_ref; 613 char name_buf[ZX_MAX_NAME_LEN]; 614 trace::GetObjectName(zx_thread_self(), name_buf, sizeof(name_buf), &name_ref); 615 zx_koid_t process_koid = trace::GetCurrentProcessKoid(); 616 zx_koid_t thread_koid = trace::GetCurrentThreadKoid(); 617 trace_context_write_thread_info_record(context, process_koid, thread_koid, 618 &name_ref); 619 620 if (likely(cache)) { 621 trace_thread_index_t index; 622 // If allocating an index succeeds but writing the record fails, 623 // toss the index and return an inline reference. The index is lost 624 // anyway, but the result won't be half-complete. The subsequent 625 // write of the inlined reference will likely also fail, but that's ok. 626 if (likely(context->AllocThreadIndex(&index) && 627 trace::WriteThreadRecord(context, index, 628 process_koid, thread_koid))) { 629 cache->thread_ref = trace_make_indexed_thread_ref(index); 630 } else { 631 cache->thread_ref = trace_make_inline_thread_ref( 632 process_koid, thread_koid); 633 } 634 *out_ref = cache->thread_ref; 635 return; 636 } 637 638 // Slow path: the context's generation is out of date so we can't 639 // cache anything related to the current thread. 640 trace_context_register_thread(context, 641 trace::GetCurrentProcessKoid(), 642 trace::GetCurrentThreadKoid(), 643 out_ref); 644} 645 646void trace_context_register_thread( 647 trace_context_t* context, 648 zx_koid_t process_koid, zx_koid_t thread_koid, 649 trace_thread_ref_t* out_ref) { 650 // TODO(ZX-1035): Since we can't use the thread-local cache here, cache 651 // this registered thread on the trace context structure, guarded by a mutex. 652 trace_thread_index_t index; 653 // If allocating an index succeeds but writing the record fails, 654 // toss the index and return an inline reference. The index is lost 655 // anyway, but the result won't be half-complete. The subsequent 656 // write of the inlined reference will likely also fail, but that's ok. 657 if (likely(context->AllocThreadIndex(&index) && 658 trace::WriteThreadRecord(context, index, process_koid, thread_koid))) { 659 *out_ref = trace_make_indexed_thread_ref(index); 660 } else { 661 *out_ref = trace_make_inline_thread_ref(process_koid, thread_koid); 662 } 663} 664 665void trace_context_register_vthread( 666 trace_context_t* context, 667 zx_koid_t process_koid, 668 const char* vthread_literal, 669 trace_vthread_id_t vthread_id, 670 trace_thread_ref_t* out_ref) { 671 // This flag is used to avoid collisions with regular threads. This is not 672 // guaranteed to work but is sufficient until we have koid range that can 673 // never be used by regular threads. 674 constexpr zx_koid_t kVirtualThreadIdFlag = 0x100000000; 675 zx_koid_t vthread_koid = kVirtualThreadIdFlag | vthread_id; 676 677 trace::ThreadEntry* entry = trace::CacheThreadEntry(context->generation(), vthread_koid); 678 if (likely(entry && !trace_is_unknown_thread_ref(&entry->thread_ref))) { 679 // Fast path: the thread is already registered. 680 *out_ref = entry->thread_ref; 681 return; 682 } 683 684 if (process_koid == ZX_KOID_INVALID) { 685 process_koid = trace::GetCurrentProcessKoid(); 686 } 687 688 trace_string_ref name_ref = trace_make_inline_c_string_ref(vthread_literal); 689 trace_context_write_thread_info_record(context, process_koid, vthread_koid, 690 &name_ref); 691 692 if (likely(entry)) { 693 trace_thread_index_t index; 694 // If allocating an index succeeds but writing the record fails, 695 // toss the index and return an inline reference. The index is lost 696 // anyway, but the result won't be half-complete. The subsequent 697 // write of the inlined reference will likely also fail, but that's ok. 698 if (likely(context->AllocThreadIndex(&index) && 699 trace::WriteThreadRecord(context, index, 700 process_koid, vthread_koid))) { 701 entry->thread_ref = trace_make_indexed_thread_ref(index); 702 } else { 703 entry->thread_ref = trace_make_inline_thread_ref( 704 process_koid, vthread_koid); 705 } 706 *out_ref = entry->thread_ref; 707 return; 708 } 709 710 *out_ref = trace_make_inline_thread_ref(process_koid, vthread_koid); 711} 712 713void trace_context_write_blob_record( 714 trace_context_t* context, 715 trace_blob_type_t type, 716 const trace_string_ref_t* name_ref, 717 const void* blob, size_t blob_size) { 718 const size_t name_string_size = trace::SizeOfEncodedStringRef(name_ref); 719 const size_t record_size_less_blob = sizeof(trace::RecordHeader) + 720 name_string_size; 721 const size_t padded_blob_size = trace::Pad(blob_size); 722 const size_t max_record_size = trace::RecordFields::kMaxRecordSizeBytes; 723 if (record_size_less_blob > max_record_size || 724 padded_blob_size > max_record_size - record_size_less_blob) { 725 return; 726 } 727 const size_t record_size = record_size_less_blob + padded_blob_size; 728 trace::Payload payload(context, record_size); 729 if (payload) { 730 payload 731 .WriteUint64(trace::MakeRecordHeader(trace::RecordType::kBlob, record_size) | 732 trace::BlobRecordFields::BlobType::Make( 733 trace::ToUnderlyingType(type)) | 734 trace::BlobRecordFields::NameStringRef::Make( 735 name_ref->encoded_value) | 736 trace::BlobRecordFields::BlobSize::Make(blob_size)) 737 .WriteStringRef(name_ref) 738 .WriteBytes(blob, blob_size); 739 } 740} 741 742void trace_context_write_kernel_object_record( 743 trace_context_t* context, 744 bool use_durable, 745 zx_koid_t koid, zx_obj_type_t type, 746 const trace_string_ref_t* name_ref, 747 const trace_arg_t* args, size_t num_args) { 748 const size_t record_size = sizeof(trace::RecordHeader) + 749 trace::WordsToBytes(1) + 750 trace::SizeOfEncodedStringRef(name_ref) + 751 trace::SizeOfEncodedArgs(args, num_args); 752 trace::Payload payload(context, use_durable, record_size); 753 if (payload) { 754 payload 755 .WriteUint64(trace::MakeRecordHeader(trace::RecordType::kKernelObject, record_size) | 756 trace::KernelObjectRecordFields::ObjectType::Make(type) | 757 trace::KernelObjectRecordFields::NameStringRef::Make( 758 name_ref->encoded_value) | 759 trace::KernelObjectRecordFields::ArgumentCount::Make(num_args)) 760 .WriteUint64(koid) 761 .WriteStringRef(name_ref) 762 .WriteArgs(args, num_args); 763 } 764} 765 766void trace_context_write_kernel_object_record_for_handle( 767 trace_context_t* context, 768 zx_handle_t handle, 769 const trace_arg_t* args, size_t num_args) { 770 zx_info_handle_basic_t info; 771 zx_status_t status = zx_object_get_info(handle, ZX_INFO_HANDLE_BASIC, &info, 772 sizeof(info), nullptr, nullptr); 773 if (status != ZX_OK) 774 return; 775 776 trace_string_ref name_ref; 777 char name_buf[ZX_MAX_NAME_LEN]; 778 trace::GetObjectName(handle, name_buf, sizeof(name_buf), &name_ref); 779 780 zx_obj_type_t obj_type = static_cast<zx_obj_type_t>(info.type); 781 switch (obj_type) { 782 case ZX_OBJ_TYPE_PROCESS: 783 // TODO(ZX-1028): Support custom args. 784 trace_context_write_process_info_record(context, info.koid, &name_ref); 785 break; 786 case ZX_OBJ_TYPE_THREAD: 787 // TODO(ZX-1028): Support custom args. 788 trace_context_write_thread_info_record(context, info.related_koid, info.koid, &name_ref); 789 break; 790 default: 791 trace_context_write_kernel_object_record(context, false, info.koid, 792 obj_type, &name_ref, 793 args, num_args); 794 break; 795 } 796} 797 798void trace_context_write_process_info_record( 799 trace_context_t* context, 800 zx_koid_t process_koid, 801 const trace_string_ref_t* process_name_ref) { 802 trace_context_write_kernel_object_record(context, true, 803 process_koid, ZX_OBJ_TYPE_PROCESS, 804 process_name_ref, nullptr, 0u); 805} 806 807void trace_context_write_thread_info_record( 808 trace_context_t* context, 809 zx_koid_t process_koid, 810 zx_koid_t thread_koid, 811 const trace_string_ref_t* thread_name_ref) { 812 // TODO(ZX-1028): We should probably store the related koid in the trace 813 // event directly instead of packing it into an argument like this. 814 trace_arg_t arg; 815 trace_context_register_string_literal(context, "process", &arg.name_ref); 816 arg.value.type = TRACE_ARG_KOID; 817 arg.value.koid_value = process_koid; 818 trace_context_write_kernel_object_record(context, true, 819 thread_koid, ZX_OBJ_TYPE_THREAD, 820 thread_name_ref, &arg, 1u); 821} 822 823void trace_context_write_context_switch_record( 824 trace_context_t* context, 825 trace_ticks_t event_time, 826 trace_cpu_number_t cpu_number, 827 trace_thread_state_t outgoing_thread_state, 828 const trace_thread_ref_t* outgoing_thread_ref, 829 const trace_thread_ref_t* incoming_thread_ref, 830 trace_thread_priority_t outgoing_thread_priority, 831 trace_thread_priority_t incoming_thread_priority) { 832 const size_t record_size = sizeof(trace::RecordHeader) + 833 trace::WordsToBytes(1) + 834 trace::SizeOfEncodedThreadRef(outgoing_thread_ref) + 835 trace::SizeOfEncodedThreadRef(incoming_thread_ref); 836 trace::Payload payload(context, record_size); 837 if (payload) { 838 payload 839 .WriteUint64(trace::MakeRecordHeader(trace::RecordType::kContextSwitch, record_size) | 840 trace::ContextSwitchRecordFields::CpuNumber::Make(cpu_number) | 841 trace::ContextSwitchRecordFields::OutgoingThreadState::Make( 842 ZX_THREAD_STATE_BASIC(outgoing_thread_state)) | 843 trace::ContextSwitchRecordFields::OutgoingThreadRef::Make( 844 outgoing_thread_ref->encoded_value) | 845 trace::ContextSwitchRecordFields::IncomingThreadRef::Make( 846 incoming_thread_ref->encoded_value) | 847 trace::ContextSwitchRecordFields::OutgoingThreadPriority::Make( 848 outgoing_thread_priority) | 849 trace::ContextSwitchRecordFields::IncomingThreadPriority::Make( 850 incoming_thread_priority)) 851 .WriteUint64(event_time) 852 .WriteThreadRef(outgoing_thread_ref) 853 .WriteThreadRef(incoming_thread_ref); 854 } 855} 856 857void trace_context_write_log_record( 858 trace_context_t* context, 859 trace_ticks_t event_time, 860 const trace_thread_ref_t* thread_ref, 861 const char* log_message, 862 size_t log_message_length) { 863 if (!log_message) 864 return; 865 866 log_message_length = 867 fbl::min(log_message_length, size_t(trace::LogRecordFields::kMaxMessageLength)); 868 const size_t record_size = sizeof(trace::RecordHeader) + 869 trace::SizeOfEncodedThreadRef(thread_ref) + 870 trace::WordsToBytes(1) + 871 trace::Pad(log_message_length); 872 trace::Payload payload(context, record_size); 873 if (payload) { 874 payload 875 .WriteUint64(trace::MakeRecordHeader(trace::RecordType::kLog, record_size) | 876 trace::LogRecordFields::LogMessageLength::Make(log_message_length) | 877 trace::LogRecordFields::ThreadRef::Make(thread_ref->encoded_value)) 878 .WriteUint64(event_time) 879 .WriteThreadRef(thread_ref) 880 .WriteBytes(log_message, log_message_length); 881 } 882} 883 884void trace_context_write_instant_event_record( 885 trace_context_t* context, 886 trace_ticks_t event_time, 887 const trace_thread_ref_t* thread_ref, 888 const trace_string_ref_t* category_ref, 889 const trace_string_ref_t* name_ref, 890 trace_scope_t scope, 891 const trace_arg_t* args, size_t num_args) { 892 const size_t content_size = trace::WordsToBytes(1); 893 trace::Payload payload = trace::WriteEventRecordBase( 894 context, trace::EventType::kInstant, event_time, 895 thread_ref, category_ref, name_ref, 896 args, num_args, content_size); 897 if (payload) { 898 payload.WriteUint64(trace::ToUnderlyingType(scope)); 899 } 900} 901 902void trace_context_write_counter_event_record( 903 trace_context_t* context, 904 trace_ticks_t event_time, 905 const trace_thread_ref_t* thread_ref, 906 const trace_string_ref_t* category_ref, 907 const trace_string_ref_t* name_ref, 908 trace_counter_id_t counter_id, 909 const trace_arg_t* args, size_t num_args) { 910 const size_t content_size = trace::WordsToBytes(1); 911 trace::Payload payload = trace::WriteEventRecordBase( 912 context, trace::EventType::kCounter, event_time, 913 thread_ref, category_ref, name_ref, 914 args, num_args, content_size); 915 if (payload) { 916 payload.WriteUint64(counter_id); 917 } 918} 919 920void trace_context_write_duration_event_record( 921 trace_context_t* context, 922 trace_ticks_t start_time, 923 trace_ticks_t end_time, 924 const trace_thread_ref_t* thread_ref, 925 const trace_string_ref_t* category_ref, 926 const trace_string_ref_t* name_ref, 927 const trace_arg_t* args, size_t num_args) { 928 trace_context_write_duration_begin_event_record( 929 context, start_time, 930 thread_ref, category_ref, name_ref, 931 args, num_args); 932 trace_context_write_duration_end_event_record( 933 context, end_time, 934 thread_ref, category_ref, name_ref, 935 nullptr, 0u); 936} 937 938void trace_context_write_duration_begin_event_record( 939 trace_context_t* context, 940 trace_ticks_t event_time, 941 const trace_thread_ref_t* thread_ref, 942 const trace_string_ref_t* category_ref, 943 const trace_string_ref_t* name_ref, 944 const trace_arg_t* args, size_t num_args) { 945 trace::WriteEventRecordBase( 946 context, trace::EventType::kDurationBegin, event_time, 947 thread_ref, category_ref, name_ref, 948 args, num_args, 0u); 949} 950 951void trace_context_write_duration_end_event_record( 952 trace_context_t* context, 953 trace_ticks_t event_time, 954 const trace_thread_ref_t* thread_ref, 955 const trace_string_ref_t* category_ref, 956 const trace_string_ref_t* name_ref, 957 const trace_arg_t* args, size_t num_args) { 958 trace::WriteEventRecordBase( 959 context, trace::EventType::kDurationEnd, event_time, 960 thread_ref, category_ref, name_ref, 961 args, num_args, 0u); 962} 963 964void trace_context_write_async_begin_event_record( 965 trace_context_t* context, 966 trace_ticks_t event_time, 967 const trace_thread_ref_t* thread_ref, 968 const trace_string_ref_t* category_ref, 969 const trace_string_ref_t* name_ref, 970 trace_async_id_t async_id, 971 const trace_arg_t* args, size_t num_args) { 972 const size_t content_size = trace::WordsToBytes(1); 973 trace::Payload payload = trace::WriteEventRecordBase( 974 context, trace::EventType::kAsyncBegin, event_time, 975 thread_ref, category_ref, name_ref, 976 args, num_args, content_size); 977 if (payload) { 978 payload.WriteUint64(async_id); 979 } 980} 981 982void trace_context_write_async_instant_event_record( 983 trace_context_t* context, 984 trace_ticks_t event_time, 985 const trace_thread_ref_t* thread_ref, 986 const trace_string_ref_t* category_ref, 987 const trace_string_ref_t* name_ref, 988 trace_async_id_t async_id, 989 const trace_arg_t* args, size_t num_args) { 990 const size_t content_size = trace::WordsToBytes(1); 991 trace::Payload payload = trace::WriteEventRecordBase( 992 context, trace::EventType::kAsyncInstant, event_time, 993 thread_ref, category_ref, name_ref, 994 args, num_args, content_size); 995 if (payload) { 996 payload.WriteUint64(async_id); 997 } 998} 999 1000void trace_context_write_async_end_event_record( 1001 trace_context_t* context, 1002 trace_ticks_t event_time, 1003 const trace_thread_ref_t* thread_ref, 1004 const trace_string_ref_t* category_ref, 1005 const trace_string_ref_t* name_ref, 1006 trace_async_id_t async_id, 1007 const trace_arg_t* args, size_t num_args) { 1008 const size_t content_size = trace::WordsToBytes(1); 1009 trace::Payload payload = trace::WriteEventRecordBase( 1010 context, trace::EventType::kAsyncEnd, event_time, 1011 thread_ref, category_ref, name_ref, 1012 args, num_args, content_size); 1013 if (payload) { 1014 payload.WriteUint64(async_id); 1015 } 1016} 1017 1018void trace_context_write_flow_begin_event_record( 1019 trace_context_t* context, 1020 trace_ticks_t event_time, 1021 const trace_thread_ref_t* thread_ref, 1022 const trace_string_ref_t* category_ref, 1023 const trace_string_ref_t* name_ref, 1024 trace_flow_id_t flow_id, 1025 const trace_arg_t* args, size_t num_args) { 1026 const size_t content_size = trace::WordsToBytes(1); 1027 trace::Payload payload = trace::WriteEventRecordBase( 1028 context, trace::EventType::kFlowBegin, event_time, 1029 thread_ref, category_ref, name_ref, 1030 args, num_args, content_size); 1031 if (payload) { 1032 payload.WriteUint64(flow_id); 1033 } 1034} 1035 1036void trace_context_write_flow_step_event_record( 1037 trace_context_t* context, 1038 trace_ticks_t event_time, 1039 const trace_thread_ref_t* thread_ref, 1040 const trace_string_ref_t* category_ref, 1041 const trace_string_ref_t* name_ref, 1042 trace_flow_id_t flow_id, 1043 const trace_arg_t* args, size_t num_args) { 1044 const size_t content_size = trace::WordsToBytes(1); 1045 trace::Payload payload = trace::WriteEventRecordBase( 1046 context, trace::EventType::kFlowStep, event_time, 1047 thread_ref, category_ref, name_ref, 1048 args, num_args, content_size); 1049 if (payload) { 1050 payload.WriteUint64(flow_id); 1051 } 1052} 1053 1054void trace_context_write_flow_end_event_record( 1055 trace_context_t* context, 1056 trace_ticks_t event_time, 1057 const trace_thread_ref_t* thread_ref, 1058 const trace_string_ref_t* category_ref, 1059 const trace_string_ref_t* name_ref, 1060 trace_flow_id_t flow_id, 1061 const trace_arg_t* args, size_t num_args) { 1062 const size_t content_size = trace::WordsToBytes(1); 1063 trace::Payload payload = trace::WriteEventRecordBase( 1064 context, trace::EventType::kFlowEnd, event_time, 1065 thread_ref, category_ref, name_ref, 1066 args, num_args, content_size); 1067 if (payload) { 1068 payload.WriteUint64(flow_id); 1069 } 1070} 1071 1072// TODO(dje): Move data to header? 1073void trace_context_write_initialization_record( 1074 trace_context_t* context, 1075 zx_ticks_t ticks_per_second) { 1076 const size_t record_size = sizeof(trace::RecordHeader) + 1077 trace::WordsToBytes(1); 1078 trace::Payload payload(context, true, record_size); 1079 if (payload) { 1080 payload 1081 .WriteUint64(trace::MakeRecordHeader(trace::RecordType::kInitialization, record_size)) 1082 .WriteUint64(ticks_per_second); 1083 } 1084} 1085 1086void trace_context_write_string_record( 1087 trace_context_t* context, 1088 trace_string_index_t index, const char* string, size_t length) { 1089 if (unlikely(!trace::WriteStringRecord(context, false, index, 1090 string, length))) { 1091 // The write will fail if the buffer is full. Nothing we can do. 1092 } 1093} 1094 1095void trace_context_write_thread_record( 1096 trace_context_t* context, 1097 trace_thread_index_t index, 1098 zx_koid_t process_koid, 1099 zx_koid_t thread_koid) { 1100 if (unlikely(!trace::WriteThreadRecord(context, index, 1101 process_koid, thread_koid))) { 1102 // The write will fail if the buffer is full. Nothing we can do. 1103 } 1104} 1105 1106void* trace_context_alloc_record(trace_context_t* context, size_t num_bytes) { 1107 return context->AllocRecord(num_bytes); 1108} 1109 1110void trace_context_snapshot_buffer_header( 1111 trace_prolonged_context_t* context, 1112 ::trace::internal::trace_buffer_header* header) { 1113 auto ctx = reinterpret_cast<trace_context_t*>(context); 1114 ctx->UpdateBufferHeaderAfterStopped(); 1115 memcpy(header, ctx->buffer_header(), sizeof(*header)); 1116} 1117