Lines Matching refs:tr

22 static void tracing_start_function_trace(struct trace_array *tr);
23 static void tracing_stop_function_trace(struct trace_array *tr);
52 int ftrace_allocate_ftrace_ops(struct trace_array *tr)
57 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
68 tr->ops = ops;
69 ops->private = tr;
74 void ftrace_free_ftrace_ops(struct trace_array *tr)
76 kfree(tr->ops);
77 tr->ops = NULL;
80 int ftrace_create_function_files(struct trace_array *tr,
87 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
90 if (!tr->ops)
93 ftrace_create_filter_files(tr->ops, parent);
98 void ftrace_destroy_function_files(struct trace_array *tr)
100 ftrace_destroy_filter_files(tr->ops);
101 ftrace_free_ftrace_ops(tr);
120 static bool handle_func_repeats(struct trace_array *tr, u32 flags_val)
122 if (!tr->last_func_repeats &&
124 tr->last_func_repeats = alloc_percpu(struct trace_func_repeats);
125 if (!tr->last_func_repeats)
132 static int function_trace_init(struct trace_array *tr)
140 if (!tr->ops)
147 if (!handle_func_repeats(tr, func_flags.val))
150 ftrace_init_array_ops(tr, func);
152 tr->array_buffer.cpu = raw_smp_processor_id();
155 tracing_start_function_trace(tr);
159 static void function_trace_reset(struct trace_array *tr)
161 tracing_stop_function_trace(tr);
163 ftrace_reset_array_ops(tr);
166 static void function_trace_start(struct trace_array *tr)
168 tracing_reset_online_cpus(&tr->array_buffer);
175 struct trace_array *tr = op->private;
181 if (unlikely(!tr->function_enabled))
191 data = per_cpu_ptr(tr->array_buffer.data, cpu);
193 trace_function(tr, ip, parent_ip, trace_ctx);
220 struct trace_array *tr = op->private;
227 if (unlikely(!tr->function_enabled))
236 data = per_cpu_ptr(tr->array_buffer.data, cpu);
241 trace_function(tr, ip, parent_ip, trace_ctx);
242 __trace_stack(tr, trace_ctx, STACK_SKIP);
249 static inline bool is_repeat_check(struct trace_array *tr,
257 ring_buffer_time_stamp(tr->array_buffer.buffer);
265 static inline void process_repeats(struct trace_array *tr,
271 trace_last_func_repeats(tr, last_info, trace_ctx);
285 struct trace_array *tr = op->private;
292 if (unlikely(!tr->function_enabled))
300 data = per_cpu_ptr(tr->array_buffer.data, cpu);
311 last_info = per_cpu_ptr(tr->last_func_repeats, cpu);
312 if (is_repeat_check(tr, last_info, ip, parent_ip))
317 process_repeats(tr, ip, parent_ip, last_info, trace_ctx);
319 trace_function(tr, ip, parent_ip, trace_ctx);
331 struct trace_array *tr = op->private;
338 if (unlikely(!tr->function_enabled))
347 data = per_cpu_ptr(tr->array_buffer.data, cpu);
351 last_info = per_cpu_ptr(tr->last_func_repeats, cpu);
352 if (is_repeat_check(tr, last_info, ip, parent_ip))
356 process_repeats(tr, ip, parent_ip, last_info, trace_ctx);
358 trace_function(tr, ip, parent_ip, trace_ctx);
359 __trace_stack(tr, trace_ctx, STACK_SKIP);
380 static void tracing_start_function_trace(struct trace_array *tr)
382 tr->function_enabled = 0;
383 register_ftrace_function(tr->ops);
384 tr->function_enabled = 1;
387 static void tracing_stop_function_trace(struct trace_array *tr)
389 tr->function_enabled = 0;
390 unregister_ftrace_function(tr->ops);
396 func_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
406 if (tr->current_trace != &function_trace)
415 if (tr->ops->func == func)
418 if (!handle_func_repeats(tr, new_flags))
421 unregister_ftrace_function(tr->ops);
422 tr->ops->func = func;
423 register_ftrace_function(tr->ops);
445 struct trace_array *tr, bool on,
490 if (on == !!tracer_tracing_is_on(tr))
494 tracer_tracing_on(tr);
496 tracer_tracing_off(tr);
506 struct trace_array *tr, struct ftrace_probe_ops *ops,
509 update_traceon_count(ops, ip, tr, 1, data);
514 struct trace_array *tr, struct ftrace_probe_ops *ops,
517 update_traceon_count(ops, ip, tr, 0, data);
522 struct trace_array *tr, struct ftrace_probe_ops *ops,
525 if (tracer_tracing_is_on(tr))
528 tracer_tracing_on(tr);
533 struct trace_array *tr, struct ftrace_probe_ops *ops,
536 if (!tracer_tracing_is_on(tr))
539 tracer_tracing_off(tr);
564 static __always_inline void trace_stack(struct trace_array *tr)
570 __trace_stack(tr, trace_ctx, FTRACE_STACK_SKIP);
575 struct trace_array *tr, struct ftrace_probe_ops *ops,
578 trace_stack(tr);
583 struct trace_array *tr, struct ftrace_probe_ops *ops,
596 trace_stack(tr);
615 trace_stack(tr);
643 struct trace_array *tr, struct ftrace_probe_ops *ops,
653 struct trace_array *tr, struct ftrace_probe_ops *ops,
719 ftrace_count_init(struct ftrace_probe_ops *ops, struct trace_array *tr,
735 ftrace_count_free(struct ftrace_probe_ops *ops, struct trace_array *tr,
797 ftrace_trace_probe_callback(struct trace_array *tr,
811 return unregister_ftrace_function_probe_func(glob+1, tr, ops);
830 ret = register_ftrace_function_probe(glob, tr, ops, count);
836 ftrace_trace_onoff_callback(struct trace_array *tr, struct ftrace_hash *hash,
841 if (!tr)
850 return ftrace_trace_probe_callback(tr, ops, hash, glob, cmd,
855 ftrace_stacktrace_callback(struct trace_array *tr, struct ftrace_hash *hash,
860 if (!tr)
865 return ftrace_trace_probe_callback(tr, ops, hash, glob, cmd,
870 ftrace_dump_callback(struct trace_array *tr, struct ftrace_hash *hash,
875 if (!tr)
881 return ftrace_trace_probe_callback(tr, ops, hash, glob, cmd,
886 ftrace_cpudump_callback(struct trace_array *tr, struct ftrace_hash *hash,
891 if (!tr)
897 return ftrace_trace_probe_callback(tr, ops, hash, glob, cmd,