1/* 2 * Stage 1 of the trace events. 3 * 4 * Override the macros in <trace/trace_events.h> to include the following: 5 * 6 * struct ftrace_raw_<call> { 7 * struct trace_entry ent; 8 * <type> <item>; 9 * <type2> <item2>[<len>]; 10 * [...] 11 * }; 12 * 13 * The <type> <item> is created by the __field(type, item) macro or 14 * the __array(type2, item2, len) macro. 15 * We simply do "type item;", and that will create the fields 16 * in the structure. 17 */ 18 19#include <linux/ftrace_event.h> 20 21/* 22 * DECLARE_EVENT_CLASS can be used to add a generic function 23 * handlers for events. That is, if all events have the same 24 * parameters and just have distinct trace points. 25 * Each tracepoint can be defined with DEFINE_EVENT and that 26 * will map the DECLARE_EVENT_CLASS to the tracepoint. 27 * 28 * TRACE_EVENT is a one to one mapping between tracepoint and template. 29 */ 30#undef TRACE_EVENT 31#define TRACE_EVENT(name, proto, args, tstruct, assign, print) \ 32 DECLARE_EVENT_CLASS(name, \ 33 PARAMS(proto), \ 34 PARAMS(args), \ 35 PARAMS(tstruct), \ 36 PARAMS(assign), \ 37 PARAMS(print)); \ 38 DEFINE_EVENT(name, name, PARAMS(proto), PARAMS(args)); 39 40 41#undef __field 42#define __field(type, item) type item; 43 44#undef __field_ext 45#define __field_ext(type, item, filter_type) type item; 46 47#undef __array 48#define __array(type, item, len) type item[len]; 49 50#undef __dynamic_array 51#define __dynamic_array(type, item, len) u32 __data_loc_##item; 52 53#undef __string 54#define __string(item, src) __dynamic_array(char, item, -1) 55 56#undef TP_STRUCT__entry 57#define TP_STRUCT__entry(args...) args 58 59#undef DECLARE_EVENT_CLASS 60#define DECLARE_EVENT_CLASS(name, proto, args, tstruct, assign, print) \ 61 struct ftrace_raw_##name { \ 62 struct trace_entry ent; \ 63 tstruct \ 64 char __data[0]; \ 65 }; \ 66 \ 67 static struct ftrace_event_class event_class_##name; 68 69#undef DEFINE_EVENT 70#define DEFINE_EVENT(template, name, proto, args) \ 71 static struct ftrace_event_call __used \ 72 __attribute__((__aligned__(4))) event_##name 73 74#undef DEFINE_EVENT_PRINT 75#define DEFINE_EVENT_PRINT(template, name, proto, args, print) \ 76 DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args)) 77 78/* Callbacks are meaningless to ftrace. */ 79#undef TRACE_EVENT_FN 80#define TRACE_EVENT_FN(name, proto, args, tstruct, \ 81 assign, print, reg, unreg) \ 82 TRACE_EVENT(name, PARAMS(proto), PARAMS(args), \ 83 PARAMS(tstruct), PARAMS(assign), PARAMS(print)) \ 84 85#include TRACE_INCLUDE(TRACE_INCLUDE_FILE) 86 87 88/* 89 * Stage 2 of the trace events. 90 * 91 * Include the following: 92 * 93 * struct ftrace_data_offsets_<call> { 94 * u32 <item1>; 95 * u32 <item2>; 96 * [...] 97 * }; 98 * 99 * The __dynamic_array() macro will create each u32 <item>, this is 100 * to keep the offset of each array from the beginning of the event. 101 * The size of an array is also encoded, in the higher 16 bits of <item>. 102 */ 103 104#undef __field 105#define __field(type, item) 106 107#undef __field_ext 108#define __field_ext(type, item, filter_type) 109 110#undef __array 111#define __array(type, item, len) 112 113#undef __dynamic_array 114#define __dynamic_array(type, item, len) u32 item; 115 116#undef __string 117#define __string(item, src) __dynamic_array(char, item, -1) 118 119#undef DECLARE_EVENT_CLASS 120#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ 121 struct ftrace_data_offsets_##call { \ 122 tstruct; \ 123 }; 124 125#undef DEFINE_EVENT 126#define DEFINE_EVENT(template, name, proto, args) 127 128#undef DEFINE_EVENT_PRINT 129#define DEFINE_EVENT_PRINT(template, name, proto, args, print) \ 130 DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args)) 131 132#include TRACE_INCLUDE(TRACE_INCLUDE_FILE) 133 134/* 135 * Stage 3 of the trace events. 136 * 137 * Override the macros in <trace/trace_events.h> to include the following: 138 * 139 * enum print_line_t 140 * ftrace_raw_output_<call>(struct trace_iterator *iter, int flags) 141 * { 142 * struct trace_seq *s = &iter->seq; 143 * struct ftrace_raw_<call> *field; <-- defined in stage 1 144 * struct trace_entry *entry; 145 * struct trace_seq *p = &iter->tmp_seq; 146 * int ret; 147 * 148 * entry = iter->ent; 149 * 150 * if (entry->type != event_<call>->event.type) { 151 * WARN_ON_ONCE(1); 152 * return TRACE_TYPE_UNHANDLED; 153 * } 154 * 155 * field = (typeof(field))entry; 156 * 157 * trace_seq_init(p); 158 * ret = trace_seq_printf(s, "%s: ", <call>); 159 * if (ret) 160 * ret = trace_seq_printf(s, <TP_printk> "\n"); 161 * if (!ret) 162 * return TRACE_TYPE_PARTIAL_LINE; 163 * 164 * return TRACE_TYPE_HANDLED; 165 * } 166 * 167 * This is the method used to print the raw event to the trace 168 * output format. Note, this is not needed if the data is read 169 * in binary. 170 */ 171 172#undef __entry 173#define __entry field 174 175#undef TP_printk 176#define TP_printk(fmt, args...) fmt "\n", args 177 178#undef __get_dynamic_array 179#define __get_dynamic_array(field) \ 180 ((void *)__entry + (__entry->__data_loc_##field & 0xffff)) 181 182#undef __get_str 183#define __get_str(field) (char *)__get_dynamic_array(field) 184 185#undef __print_flags 186#define __print_flags(flag, delim, flag_array...) \ 187 ({ \ 188 static const struct trace_print_flags __flags[] = \ 189 { flag_array, { -1, NULL }}; \ 190 ftrace_print_flags_seq(p, delim, flag, __flags); \ 191 }) 192 193#undef __print_symbolic 194#define __print_symbolic(value, symbol_array...) \ 195 ({ \ 196 static const struct trace_print_flags symbols[] = \ 197 { symbol_array, { -1, NULL }}; \ 198 ftrace_print_symbols_seq(p, value, symbols); \ 199 }) 200 201#undef __print_hex 202#define __print_hex(buf, buf_len) ftrace_print_hex_seq(p, buf, buf_len) 203 204#undef DECLARE_EVENT_CLASS 205#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ 206static notrace enum print_line_t \ 207ftrace_raw_output_##call(struct trace_iterator *iter, int flags, \ 208 struct trace_event *trace_event) \ 209{ \ 210 struct ftrace_event_call *event; \ 211 struct trace_seq *s = &iter->seq; \ 212 struct ftrace_raw_##call *field; \ 213 struct trace_entry *entry; \ 214 struct trace_seq *p = &iter->tmp_seq; \ 215 int ret; \ 216 \ 217 event = container_of(trace_event, struct ftrace_event_call, \ 218 event); \ 219 \ 220 entry = iter->ent; \ 221 \ 222 if (entry->type != event->event.type) { \ 223 WARN_ON_ONCE(1); \ 224 return TRACE_TYPE_UNHANDLED; \ 225 } \ 226 \ 227 field = (typeof(field))entry; \ 228 \ 229 trace_seq_init(p); \ 230 ret = trace_seq_printf(s, "%s: ", event->name); \ 231 if (ret) \ 232 ret = trace_seq_printf(s, print); \ 233 if (!ret) \ 234 return TRACE_TYPE_PARTIAL_LINE; \ 235 \ 236 return TRACE_TYPE_HANDLED; \ 237} \ 238static struct trace_event_functions ftrace_event_type_funcs_##call = { \ 239 .trace = ftrace_raw_output_##call, \ 240}; 241 242#undef DEFINE_EVENT_PRINT 243#define DEFINE_EVENT_PRINT(template, call, proto, args, print) \ 244static notrace enum print_line_t \ 245ftrace_raw_output_##call(struct trace_iterator *iter, int flags, \ 246 struct trace_event *event) \ 247{ \ 248 struct trace_seq *s = &iter->seq; \ 249 struct ftrace_raw_##template *field; \ 250 struct trace_entry *entry; \ 251 struct trace_seq *p = &iter->tmp_seq; \ 252 int ret; \ 253 \ 254 entry = iter->ent; \ 255 \ 256 if (entry->type != event_##call.event.type) { \ 257 WARN_ON_ONCE(1); \ 258 return TRACE_TYPE_UNHANDLED; \ 259 } \ 260 \ 261 field = (typeof(field))entry; \ 262 \ 263 trace_seq_init(p); \ 264 ret = trace_seq_printf(s, "%s: ", #call); \ 265 if (ret) \ 266 ret = trace_seq_printf(s, print); \ 267 if (!ret) \ 268 return TRACE_TYPE_PARTIAL_LINE; \ 269 \ 270 return TRACE_TYPE_HANDLED; \ 271} \ 272static struct trace_event_functions ftrace_event_type_funcs_##call = { \ 273 .trace = ftrace_raw_output_##call, \ 274}; 275 276#include TRACE_INCLUDE(TRACE_INCLUDE_FILE) 277 278#undef __field_ext 279#define __field_ext(type, item, filter_type) \ 280 ret = trace_define_field(event_call, #type, #item, \ 281 offsetof(typeof(field), item), \ 282 sizeof(field.item), \ 283 is_signed_type(type), filter_type); \ 284 if (ret) \ 285 return ret; 286 287#undef __field 288#define __field(type, item) __field_ext(type, item, FILTER_OTHER) 289 290#undef __array 291#define __array(type, item, len) \ 292 BUILD_BUG_ON(len > MAX_FILTER_STR_VAL); \ 293 ret = trace_define_field(event_call, #type "[" #len "]", #item, \ 294 offsetof(typeof(field), item), \ 295 sizeof(field.item), \ 296 is_signed_type(type), FILTER_OTHER); \ 297 if (ret) \ 298 return ret; 299 300#undef __dynamic_array 301#define __dynamic_array(type, item, len) \ 302 ret = trace_define_field(event_call, "__data_loc " #type "[]", #item, \ 303 offsetof(typeof(field), __data_loc_##item), \ 304 sizeof(field.__data_loc_##item), \ 305 is_signed_type(type), FILTER_OTHER); 306 307#undef __string 308#define __string(item, src) __dynamic_array(char, item, -1) 309 310#undef DECLARE_EVENT_CLASS 311#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, func, print) \ 312static int notrace \ 313ftrace_define_fields_##call(struct ftrace_event_call *event_call) \ 314{ \ 315 struct ftrace_raw_##call field; \ 316 int ret; \ 317 \ 318 tstruct; \ 319 \ 320 return ret; \ 321} 322 323#undef DEFINE_EVENT 324#define DEFINE_EVENT(template, name, proto, args) 325 326#undef DEFINE_EVENT_PRINT 327#define DEFINE_EVENT_PRINT(template, name, proto, args, print) \ 328 DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args)) 329 330#include TRACE_INCLUDE(TRACE_INCLUDE_FILE) 331 332/* 333 * remember the offset of each array from the beginning of the event. 334 */ 335 336#undef __entry 337#define __entry entry 338 339#undef __field 340#define __field(type, item) 341 342#undef __field_ext 343#define __field_ext(type, item, filter_type) 344 345#undef __array 346#define __array(type, item, len) 347 348#undef __dynamic_array 349#define __dynamic_array(type, item, len) \ 350 __data_offsets->item = __data_size + \ 351 offsetof(typeof(*entry), __data); \ 352 __data_offsets->item |= (len * sizeof(type)) << 16; \ 353 __data_size += (len) * sizeof(type); 354 355#undef __string 356#define __string(item, src) __dynamic_array(char, item, strlen(src) + 1) 357 358#undef DECLARE_EVENT_CLASS 359#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ 360static inline notrace int ftrace_get_offsets_##call( \ 361 struct ftrace_data_offsets_##call *__data_offsets, proto) \ 362{ \ 363 int __data_size = 0; \ 364 struct ftrace_raw_##call __maybe_unused *entry; \ 365 \ 366 tstruct; \ 367 \ 368 return __data_size; \ 369} 370 371#undef DEFINE_EVENT 372#define DEFINE_EVENT(template, name, proto, args) 373 374#undef DEFINE_EVENT_PRINT 375#define DEFINE_EVENT_PRINT(template, name, proto, args, print) \ 376 DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args)) 377 378#include TRACE_INCLUDE(TRACE_INCLUDE_FILE) 379 380/* 381 * Stage 4 of the trace events. 382 * 383 * Override the macros in <trace/trace_events.h> to include the following: 384 * 385 * For those macros defined with TRACE_EVENT: 386 * 387 * static struct ftrace_event_call event_<call>; 388 * 389 * static void ftrace_raw_event_<call>(void *__data, proto) 390 * { 391 * struct ftrace_event_call *event_call = __data; 392 * struct ftrace_data_offsets_<call> __maybe_unused __data_offsets; 393 * struct ring_buffer_event *event; 394 * struct ftrace_raw_<call> *entry; <-- defined in stage 1 395 * struct ring_buffer *buffer; 396 * unsigned long irq_flags; 397 * int __data_size; 398 * int pc; 399 * 400 * local_save_flags(irq_flags); 401 * pc = preempt_count(); 402 * 403 * __data_size = ftrace_get_offsets_<call>(&__data_offsets, args); 404 * 405 * event = trace_current_buffer_lock_reserve(&buffer, 406 * event_<call>->event.type, 407 * sizeof(*entry) + __data_size, 408 * irq_flags, pc); 409 * if (!event) 410 * return; 411 * entry = ring_buffer_event_data(event); 412 * 413 * { <assign>; } <-- Here we assign the entries by the __field and 414 * __array macros. 415 * 416 * if (!filter_current_check_discard(buffer, event_call, entry, event)) 417 * trace_current_buffer_unlock_commit(buffer, 418 * event, irq_flags, pc); 419 * } 420 * 421 * static struct trace_event ftrace_event_type_<call> = { 422 * .trace = ftrace_raw_output_<call>, <-- stage 2 423 * }; 424 * 425 * static const char print_fmt_<call>[] = <TP_printk>; 426 * 427 * static struct ftrace_event_class __used event_class_<template> = { 428 * .system = "<system>", 429 * .define_fields = ftrace_define_fields_<call>, 430 * .fields = LIST_HEAD_INIT(event_class_##call.fields), 431 * .raw_init = trace_event_raw_init, 432 * .probe = ftrace_raw_event_##call, 433 * .reg = ftrace_event_reg, 434 * }; 435 * 436 * static struct ftrace_event_call __used 437 * __attribute__((__aligned__(4))) 438 * __attribute__((section("_ftrace_events"))) event_<call> = { 439 * .name = "<call>", 440 * .class = event_class_<template>, 441 * .event = &ftrace_event_type_<call>, 442 * .print_fmt = print_fmt_<call>, 443 * }; 444 * 445 */ 446 447#ifdef CONFIG_PERF_EVENTS 448 449#define _TRACE_PERF_PROTO(call, proto) \ 450 static notrace void \ 451 perf_trace_##call(void *__data, proto); 452 453#define _TRACE_PERF_INIT(call) \ 454 .perf_probe = perf_trace_##call, 455 456#else 457#define _TRACE_PERF_PROTO(call, proto) 458#define _TRACE_PERF_INIT(call) 459#endif /* CONFIG_PERF_EVENTS */ 460 461#undef __entry 462#define __entry entry 463 464#undef __field 465#define __field(type, item) 466 467#undef __array 468#define __array(type, item, len) 469 470#undef __dynamic_array 471#define __dynamic_array(type, item, len) \ 472 __entry->__data_loc_##item = __data_offsets.item; 473 474#undef __string 475#define __string(item, src) __dynamic_array(char, item, -1) \ 476 477#undef __assign_str 478#define __assign_str(dst, src) \ 479 strcpy(__get_str(dst), src); 480 481#undef TP_fast_assign 482#define TP_fast_assign(args...) args 483 484#undef TP_perf_assign 485#define TP_perf_assign(args...) 486 487#undef DECLARE_EVENT_CLASS 488#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ 489 \ 490static notrace void \ 491ftrace_raw_event_##call(void *__data, proto) \ 492{ \ 493 struct ftrace_event_call *event_call = __data; \ 494 struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\ 495 struct ring_buffer_event *event; \ 496 struct ftrace_raw_##call *entry; \ 497 struct ring_buffer *buffer; \ 498 unsigned long irq_flags; \ 499 int __data_size; \ 500 int pc; \ 501 \ 502 local_save_flags(irq_flags); \ 503 pc = preempt_count(); \ 504 \ 505 __data_size = ftrace_get_offsets_##call(&__data_offsets, args); \ 506 \ 507 event = trace_current_buffer_lock_reserve(&buffer, \ 508 event_call->event.type, \ 509 sizeof(*entry) + __data_size, \ 510 irq_flags, pc); \ 511 if (!event) \ 512 return; \ 513 entry = ring_buffer_event_data(event); \ 514 \ 515 tstruct \ 516 \ 517 { assign; } \ 518 \ 519 if (!filter_current_check_discard(buffer, event_call, entry, event)) \ 520 trace_nowake_buffer_unlock_commit(buffer, \ 521 event, irq_flags, pc); \ 522} 523/* 524 * The ftrace_test_probe is compiled out, it is only here as a build time check 525 * to make sure that if the tracepoint handling changes, the ftrace probe will 526 * fail to compile unless it too is updated. 527 */ 528 529#undef DEFINE_EVENT 530#define DEFINE_EVENT(template, call, proto, args) \ 531static inline void ftrace_test_probe_##call(void) \ 532{ \ 533 check_trace_callback_type_##call(ftrace_raw_event_##template); \ 534} 535 536#undef DEFINE_EVENT_PRINT 537#define DEFINE_EVENT_PRINT(template, name, proto, args, print) 538 539#include TRACE_INCLUDE(TRACE_INCLUDE_FILE) 540 541#undef __entry 542#define __entry REC 543 544#undef __print_flags 545#undef __print_symbolic 546#undef __get_dynamic_array 547#undef __get_str 548 549#undef TP_printk 550#define TP_printk(fmt, args...) "\"" fmt "\", " __stringify(args) 551 552#undef DECLARE_EVENT_CLASS 553#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ 554_TRACE_PERF_PROTO(call, PARAMS(proto)); \ 555static const char print_fmt_##call[] = print; \ 556static struct ftrace_event_class __used event_class_##call = { \ 557 .system = __stringify(TRACE_SYSTEM), \ 558 .define_fields = ftrace_define_fields_##call, \ 559 .fields = LIST_HEAD_INIT(event_class_##call.fields),\ 560 .raw_init = trace_event_raw_init, \ 561 .probe = ftrace_raw_event_##call, \ 562 .reg = ftrace_event_reg, \ 563 _TRACE_PERF_INIT(call) \ 564}; 565 566#undef DEFINE_EVENT 567#define DEFINE_EVENT(template, call, proto, args) \ 568 \ 569static struct ftrace_event_call __used \ 570__attribute__((__aligned__(4))) \ 571__attribute__((section("_ftrace_events"))) event_##call = { \ 572 .name = #call, \ 573 .class = &event_class_##template, \ 574 .event.funcs = &ftrace_event_type_funcs_##template, \ 575 .print_fmt = print_fmt_##template, \ 576}; 577 578#undef DEFINE_EVENT_PRINT 579#define DEFINE_EVENT_PRINT(template, call, proto, args, print) \ 580 \ 581static const char print_fmt_##call[] = print; \ 582 \ 583static struct ftrace_event_call __used \ 584__attribute__((__aligned__(4))) \ 585__attribute__((section("_ftrace_events"))) event_##call = { \ 586 .name = #call, \ 587 .class = &event_class_##template, \ 588 .event.funcs = &ftrace_event_type_funcs_##call, \ 589 .print_fmt = print_fmt_##call, \ 590} 591 592#include TRACE_INCLUDE(TRACE_INCLUDE_FILE) 593 594/* 595 * Define the insertion callback to perf events 596 * 597 * The job is very similar to ftrace_raw_event_<call> except that we don't 598 * insert in the ring buffer but in a perf counter. 599 * 600 * static void ftrace_perf_<call>(proto) 601 * { 602 * struct ftrace_data_offsets_<call> __maybe_unused __data_offsets; 603 * struct ftrace_event_call *event_call = &event_<call>; 604 * extern void perf_tp_event(int, u64, u64, void *, int); 605 * struct ftrace_raw_##call *entry; 606 * struct perf_trace_buf *trace_buf; 607 * u64 __addr = 0, __count = 1; 608 * unsigned long irq_flags; 609 * struct trace_entry *ent; 610 * int __entry_size; 611 * int __data_size; 612 * int __cpu 613 * int pc; 614 * 615 * pc = preempt_count(); 616 * 617 * __data_size = ftrace_get_offsets_<call>(&__data_offsets, args); 618 * 619 * // Below we want to get the aligned size by taking into account 620 * // the u32 field that will later store the buffer size 621 * __entry_size = ALIGN(__data_size + sizeof(*entry) + sizeof(u32), 622 * sizeof(u64)); 623 * __entry_size -= sizeof(u32); 624 * 625 * // Protect the non nmi buffer 626 * // This also protects the rcu read side 627 * local_irq_save(irq_flags); 628 * __cpu = smp_processor_id(); 629 * 630 * if (in_nmi()) 631 * trace_buf = rcu_dereference_sched(perf_trace_buf_nmi); 632 * else 633 * trace_buf = rcu_dereference_sched(perf_trace_buf); 634 * 635 * if (!trace_buf) 636 * goto end; 637 * 638 * trace_buf = per_cpu_ptr(trace_buf, __cpu); 639 * 640 * // Avoid recursion from perf that could mess up the buffer 641 * if (trace_buf->recursion++) 642 * goto end_recursion; 643 * 644 * raw_data = trace_buf->buf; 645 * 646 * // Make recursion update visible before entering perf_tp_event 647 * // so that we protect from perf recursions. 648 * 649 * barrier(); 650 * 651 * //zero dead bytes from alignment to avoid stack leak to userspace: 652 * *(u64 *)(&raw_data[__entry_size - sizeof(u64)]) = 0ULL; 653 * entry = (struct ftrace_raw_<call> *)raw_data; 654 * ent = &entry->ent; 655 * tracing_generic_entry_update(ent, irq_flags, pc); 656 * ent->type = event_call->id; 657 * 658 * <tstruct> <- do some jobs with dynamic arrays 659 * 660 * <assign> <- affect our values 661 * 662 * perf_tp_event(event_call->id, __addr, __count, entry, 663 * __entry_size); <- submit them to perf counter 664 * 665 * } 666 */ 667 668#ifdef CONFIG_PERF_EVENTS 669 670#undef __entry 671#define __entry entry 672 673#undef __get_dynamic_array 674#define __get_dynamic_array(field) \ 675 ((void *)__entry + (__entry->__data_loc_##field & 0xffff)) 676 677#undef __get_str 678#define __get_str(field) (char *)__get_dynamic_array(field) 679 680#undef __perf_addr 681#define __perf_addr(a) __addr = (a) 682 683#undef __perf_count 684#define __perf_count(c) __count = (c) 685 686#undef DECLARE_EVENT_CLASS 687#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ 688static notrace void \ 689perf_trace_##call(void *__data, proto) \ 690{ \ 691 struct ftrace_event_call *event_call = __data; \ 692 struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\ 693 struct ftrace_raw_##call *entry; \ 694 struct pt_regs __regs; \ 695 u64 __addr = 0, __count = 1; \ 696 struct hlist_head *head; \ 697 int __entry_size; \ 698 int __data_size; \ 699 int rctx; \ 700 \ 701 perf_fetch_caller_regs(&__regs); \ 702 \ 703 __data_size = ftrace_get_offsets_##call(&__data_offsets, args); \ 704 __entry_size = ALIGN(__data_size + sizeof(*entry) + sizeof(u32),\ 705 sizeof(u64)); \ 706 __entry_size -= sizeof(u32); \ 707 \ 708 if (WARN_ONCE(__entry_size > PERF_MAX_TRACE_SIZE, \ 709 "profile buffer not large enough")) \ 710 return; \ 711 \ 712 entry = (struct ftrace_raw_##call *)perf_trace_buf_prepare( \ 713 __entry_size, event_call->event.type, &__regs, &rctx); \ 714 if (!entry) \ 715 return; \ 716 \ 717 tstruct \ 718 \ 719 { assign; } \ 720 \ 721 head = this_cpu_ptr(event_call->perf_events); \ 722 perf_trace_buf_submit(entry, __entry_size, rctx, __addr, \ 723 __count, &__regs, head); \ 724} 725 726/* 727 * This part is compiled out, it is only here as a build time check 728 * to make sure that if the tracepoint handling changes, the 729 * perf probe will fail to compile unless it too is updated. 730 */ 731#undef DEFINE_EVENT 732#define DEFINE_EVENT(template, call, proto, args) \ 733static inline void perf_test_probe_##call(void) \ 734{ \ 735 check_trace_callback_type_##call(perf_trace_##template); \ 736} 737 738 739#undef DEFINE_EVENT_PRINT 740#define DEFINE_EVENT_PRINT(template, name, proto, args, print) \ 741 DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args)) 742 743#include TRACE_INCLUDE(TRACE_INCLUDE_FILE) 744#endif /* CONFIG_PERF_EVENTS */ 745 746#undef _TRACE_PROFILE_INIT 747