Lines Matching refs:tr

97 dummy_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
179 int tracing_set_tracer(struct trace_array *tr, const char *buf);
180 static void ftrace_trace_userstack(struct trace_array *tr,
503 void trace_set_ring_buffer_expanded(struct trace_array *tr)
505 if (!tr)
506 tr = &global_trace;
507 tr->ring_buffer_expanded = true;
514 struct trace_array *tr;
518 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
519 if (tr == this_tr) {
520 tr->ref++;
556 int tracing_check_open_get_tr(struct trace_array *tr)
567 if (tr && trace_array_get(tr) < 0)
971 static inline void ftrace_trace_stack(struct trace_array *tr,
982 static inline void ftrace_trace_stack(struct trace_array *tr,
1015 void tracer_tracing_on(struct trace_array *tr)
1017 if (tr->array_buffer.buffer)
1018 ring_buffer_record_on(tr->array_buffer.buffer);
1027 tr->buffer_disabled = 0;
1062 int __trace_array_puts(struct trace_array *tr, unsigned long ip,
1071 if (!(tr->trace_flags & TRACE_ITER_PRINTK))
1074 if (unlikely(tracing_selftest_running && tr == &global_trace))
1083 buffer = tr->array_buffer.buffer;
1105 ftrace_trace_stack(tr, buffer, trace_ctx, 4, NULL);
1168 static void tracing_snapshot_instance_cond(struct trace_array *tr,
1171 struct tracer *tracer = tr->current_trace;
1175 trace_array_puts(tr, "*** SNAPSHOT CALLED FROM NMI CONTEXT ***\n");
1176 trace_array_puts(tr, "*** snapshot is being ignored ***\n");
1180 if (!tr->allocated_snapshot) {
1181 trace_array_puts(tr, "*** SNAPSHOT NOT ALLOCATED ***\n");
1182 trace_array_puts(tr, "*** stopping trace here! ***\n");
1183 tracer_tracing_off(tr);
1189 trace_array_puts(tr, "*** LATENCY TRACER ACTIVE ***\n");
1190 trace_array_puts(tr, "*** Can not use snapshot (sorry) ***\n");
1195 update_max_tr(tr, current, smp_processor_id(), cond_data);
1199 void tracing_snapshot_instance(struct trace_array *tr)
1201 tracing_snapshot_instance_cond(tr, NULL);
1220 struct trace_array *tr = &global_trace;
1222 tracing_snapshot_instance(tr);
1228 * @tr: The tracing instance to snapshot
1239 void tracing_snapshot_cond(struct trace_array *tr, void *cond_data)
1241 tracing_snapshot_instance_cond(tr, cond_data);
1247 * @tr: The tracing instance
1254 * the tr->max_lock lock, which the code calling
1259 void *tracing_cond_snapshot_data(struct trace_array *tr)
1264 arch_spin_lock(&tr->max_lock);
1266 if (tr->cond_snapshot)
1267 cond_data = tr->cond_snapshot->cond_data;
1269 arch_spin_unlock(&tr->max_lock);
1280 int tracing_alloc_snapshot_instance(struct trace_array *tr)
1285 if (!tr->allocated_snapshot) {
1288 order = ring_buffer_subbuf_order_get(tr->array_buffer.buffer);
1289 ret = ring_buffer_subbuf_order_set(tr->max_buffer.buffer, order);
1294 ret = resize_buffer_duplicate_size(&tr->max_buffer,
1295 &tr->array_buffer, RING_BUFFER_ALL_CPUS);
1299 tr->allocated_snapshot = true;
1305 static void free_snapshot(struct trace_array *tr)
1312 ring_buffer_subbuf_order_set(tr->max_buffer.buffer, 0);
1313 ring_buffer_resize(tr->max_buffer.buffer, 1, RING_BUFFER_ALL_CPUS);
1314 set_buffer_entries(&tr->max_buffer, 1);
1315 tracing_reset_online_cpus(&tr->max_buffer);
1316 tr->allocated_snapshot = false;
1319 static int tracing_arm_snapshot_locked(struct trace_array *tr)
1325 spin_lock(&tr->snapshot_trigger_lock);
1326 if (tr->snapshot == UINT_MAX) {
1327 spin_unlock(&tr->snapshot_trigger_lock);
1331 tr->snapshot++;
1332 spin_unlock(&tr->snapshot_trigger_lock);
1334 ret = tracing_alloc_snapshot_instance(tr);
1336 spin_lock(&tr->snapshot_trigger_lock);
1337 tr->snapshot--;
1338 spin_unlock(&tr->snapshot_trigger_lock);
1344 int tracing_arm_snapshot(struct trace_array *tr)
1349 ret = tracing_arm_snapshot_locked(tr);
1355 void tracing_disarm_snapshot(struct trace_array *tr)
1357 spin_lock(&tr->snapshot_trigger_lock);
1358 if (!WARN_ON(!tr->snapshot))
1359 tr->snapshot--;
1360 spin_unlock(&tr->snapshot_trigger_lock);
1375 struct trace_array *tr = &global_trace;
1378 ret = tracing_alloc_snapshot_instance(tr);
1410 * @tr: The tracing instance
1421 int tracing_snapshot_cond_enable(struct trace_array *tr, void *cond_data,
1436 if (tr->current_trace->use_max_tr) {
1449 if (tr->cond_snapshot) {
1454 ret = tracing_arm_snapshot_locked(tr);
1459 arch_spin_lock(&tr->max_lock);
1460 tr->cond_snapshot = cond_snapshot;
1461 arch_spin_unlock(&tr->max_lock);
1477 * @tr: The tracing instance
1485 int tracing_snapshot_cond_disable(struct trace_array *tr)
1490 arch_spin_lock(&tr->max_lock);
1492 if (!tr->cond_snapshot)
1495 kfree(tr->cond_snapshot);
1496 tr->cond_snapshot = NULL;
1499 arch_spin_unlock(&tr->max_lock);
1502 tracing_disarm_snapshot(tr);
1513 void tracing_snapshot_cond(struct trace_array *tr, void *cond_data)
1530 void *tracing_cond_snapshot_data(struct trace_array *tr)
1535 int tracing_snapshot_cond_enable(struct trace_array *tr, void *cond_data, cond_update_fn_t update)
1540 int tracing_snapshot_cond_disable(struct trace_array *tr)
1545 #define free_snapshot(tr) do { } while (0)
1546 #define tracing_arm_snapshot_locked(tr) ({ -EBUSY; })
1549 void tracer_tracing_off(struct trace_array *tr)
1551 if (tr->array_buffer.buffer)
1552 ring_buffer_record_off(tr->array_buffer.buffer);
1561 tr->buffer_disabled = 1;
1591 * @tr : the trace array to know if ring buffer is enabled
1595 bool tracer_tracing_is_on(struct trace_array *tr)
1597 if (tr->array_buffer.buffer)
1598 return ring_buffer_record_is_set_on(tr->array_buffer.buffer);
1599 return !tr->buffer_disabled;
1680 bool trace_clock_in_ns(struct trace_array *tr)
1682 if (trace_clocks[tr->clock_id].in_ns)
1828 struct trace_array *tr = container_of(work, struct trace_array,
1830 fsnotify_inode(tr->d_max_latency->d_inode, FS_MODIFY);
1835 struct trace_array *tr = container_of(iwork, struct trace_array,
1837 queue_work(fsnotify_wq, &tr->fsnotify_work);
1840 static void trace_create_maxlat_file(struct trace_array *tr,
1843 INIT_WORK(&tr->fsnotify_work, latency_fsnotify_workfn);
1844 init_irq_work(&tr->fsnotify_irqwork, latency_fsnotify_workfn_irq);
1845 tr->d_max_latency = trace_create_file("tracing_max_latency",
1847 d_tracer, tr,
1864 void latency_fsnotify(struct trace_array *tr)
1869 * We cannot call queue_work(&tr->fsnotify_work) from here because it's
1873 irq_work_queue(&tr->fsnotify_irqwork);
1878 #define trace_create_maxlat_file(tr, d_tracer) \
1880 d_tracer, tr, &tracing_max_lat_fops)
1890 __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
1892 struct array_buffer *trace_buf = &tr->array_buffer;
1893 struct array_buffer *max_buf = &tr->max_buffer;
1900 max_data->saved_latency = tr->max_latency;
1921 latency_fsnotify(tr);
1926 * @tr: tracer
1931 * Flip the buffers between the @tr and the max_tr and record information
1935 update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu,
1938 if (tr->stop_count)
1943 if (!tr->allocated_snapshot) {
1945 WARN_ON_ONCE(tr->current_trace != &nop_trace);
1949 arch_spin_lock(&tr->max_lock);
1952 if (ring_buffer_record_is_set_on(tr->array_buffer.buffer))
1953 ring_buffer_record_on(tr->max_buffer.buffer);
1955 ring_buffer_record_off(tr->max_buffer.buffer);
1958 if (tr->cond_snapshot && !tr->cond_snapshot->update(tr, cond_data)) {
1959 arch_spin_unlock(&tr->max_lock);
1963 swap(tr->array_buffer.buffer, tr->max_buffer.buffer);
1965 __update_max_tr(tr, tsk, cpu);
1967 arch_spin_unlock(&tr->max_lock);
1970 ring_buffer_wake_waiters(tr->array_buffer.buffer, RING_BUFFER_ALL_CPUS);
1975 * @tr: tracer
1979 * Flip the trace of a single CPU buffer between the @tr and the max_tr.
1982 update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
1986 if (tr->stop_count)
1990 if (!tr->allocated_snapshot) {
1992 WARN_ON_ONCE(tr->current_trace != &nop_trace);
1996 arch_spin_lock(&tr->max_lock);
1998 ret = ring_buffer_swap_cpu(tr->max_buffer.buffer, tr->array_buffer.buffer, cpu);
2008 trace_array_printk_buf(tr->max_buffer.buffer, _THIS_IP_,
2014 __update_max_tr(tr, tsk, cpu);
2015 arch_spin_unlock(&tr->max_lock);
2057 iter->array_buffer = &iter->tr->max_buffer;
2087 struct trace_array *tr = &global_trace;
2088 struct tracer *saved_tracer = tr->current_trace;
2115 tracing_reset_online_cpus(&tr->array_buffer);
2117 tr->current_trace = type;
2122 if (tr->ring_buffer_expanded)
2123 ring_buffer_resize(tr->max_buffer.buffer, trace_buf_size,
2125 tr->allocated_snapshot = true;
2131 ret = type->selftest(type, tr);
2133 tr->current_trace = saved_tracer;
2141 tracing_reset_online_cpus(&tr->array_buffer);
2145 tr->allocated_snapshot = false;
2148 if (tr->ring_buffer_expanded)
2149 ring_buffer_resize(tr->max_buffer.buffer, 1,
2233 static void add_tracer_options(struct trace_array *tr, struct tracer *t);
2363 struct trace_array *tr;
2367 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
2368 if (!tr->clear_trace)
2370 tr->clear_trace = false;
2371 tracing_reset_online_cpus(&tr->array_buffer);
2373 tracing_reset_online_cpus(&tr->max_buffer);
2390 static void tracing_start_tr(struct trace_array *tr)
2398 raw_spin_lock_irqsave(&tr->start_lock, flags);
2399 if (--tr->stop_count) {
2400 if (WARN_ON_ONCE(tr->stop_count < 0)) {
2402 tr->stop_count = 0;
2408 arch_spin_lock(&tr->max_lock);
2410 buffer = tr->array_buffer.buffer;
2415 buffer = tr->max_buffer.buffer;
2420 arch_spin_unlock(&tr->max_lock);
2423 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
2438 static void tracing_stop_tr(struct trace_array *tr)
2443 raw_spin_lock_irqsave(&tr->start_lock, flags);
2444 if (tr->stop_count++)
2448 arch_spin_lock(&tr->max_lock);
2450 buffer = tr->array_buffer.buffer;
2455 buffer = tr->max_buffer.buffer;
2460 arch_spin_unlock(&tr->max_lock);
2463 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
2650 struct trace_array *tr = trace_file->tr;
2653 *current_rb = tr->array_buffer.buffer;
2655 if (!tr->no_filter_buffering_ref &&
2812 trace_buffer_unlock_commit_regs(file->tr, fbuffer->buffer,
2831 void trace_buffer_unlock_commit_regs(struct trace_array *tr,
2845 ftrace_trace_stack(tr, buffer, trace_ctx, regs ? 0 : STACK_SKIP, regs);
2846 ftrace_trace_userstack(tr, buffer, trace_ctx);
2860 trace_function(struct trace_array *tr, unsigned long ip, unsigned long
2864 struct trace_buffer *buffer = tr->array_buffer.buffer;
2971 static inline void ftrace_trace_stack(struct trace_array *tr,
2976 if (!(tr->trace_flags & TRACE_ITER_STACKTRACE))
2982 void __trace_stack(struct trace_array *tr, unsigned int trace_ctx,
2985 struct trace_buffer *buffer = tr->array_buffer.buffer;
3031 ftrace_trace_userstack(struct trace_array *tr,
3038 if (!(tr->trace_flags & TRACE_ITER_USERSTACKTRACE))
3077 static void ftrace_trace_userstack(struct trace_array *tr,
3094 void trace_last_func_repeats(struct trace_array *tr,
3098 struct trace_buffer *buffer = tr->array_buffer.buffer;
3241 struct trace_array *tr = &global_trace;
3268 buffer = tr->array_buffer.buffer;
3281 ftrace_trace_stack(tr, buffer, trace_ctx, 6, NULL);
3354 int trace_array_vprintk(struct trace_array *tr,
3357 if (tracing_selftest_running && tr == &global_trace)
3360 return __trace_array_vprintk(tr->array_buffer.buffer, ip, fmt, args);
3365 * @tr: The instance trace_array descriptor
3380 * Note, trace_array_init_printk() must be called on @tr before this
3384 int trace_array_printk(struct trace_array *tr,
3390 if (!tr)
3394 if (tr == &global_trace)
3397 if (!(tr->trace_flags & TRACE_ITER_PRINTK))
3401 ret = trace_array_vprintk(tr, ip, fmt, ap);
3409 * @tr: The trace array to initialize the buffers for
3415 int trace_array_init_printk(struct trace_array *tr)
3417 if (!tr)
3421 if (tr == &global_trace)
3553 * iter->tr is NULL when used with tp_printk, which makes
3556 if (!iter->tr || iter->fmt == static_fmt_buf)
3806 if (!iter->tr || iter->tr->trace_flags & TRACE_ITER_HASH_PTR)
3967 struct trace_array *tr = iter->tr;
3974 if (unlikely(tr->current_trace != iter->trace)) {
3978 iter->trace = tr->current_trace;
4075 unsigned long trace_total_entries_cpu(struct trace_array *tr, int cpu)
4079 if (!tr)
4080 tr = &global_trace;
4082 get_total_entries_cpu(&tr->array_buffer, &total, &entries, cpu);
4087 unsigned long trace_total_entries(struct trace_array *tr)
4091 if (!tr)
4092 tr = &global_trace;
4094 get_total_entries(&tr->array_buffer, &total, &entries);
4212 struct trace_array *tr = iter->tr;
4214 if (!(tr->trace_flags & TRACE_ITER_ANNOTATE))
4238 struct trace_array *tr = iter->tr;
4240 unsigned long sym_flags = (tr->trace_flags & TRACE_ITER_SYM_MASK);
4250 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
4261 if (tr->trace_flags & TRACE_ITER_FIELDS)
4273 struct trace_array *tr = iter->tr;
4280 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO)
4298 struct trace_array *tr = iter->tr;
4306 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
4328 struct trace_array *tr = iter->tr;
4335 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
4384 struct trace_array *tr = iter->tr;
4385 unsigned long trace_flags = tr->trace_flags;
4435 struct trace_array *tr = iter->tr;
4444 if (!(tr->trace_flags & TRACE_ITER_VERBOSE))
4451 struct trace_array *tr = iter->tr;
4452 unsigned long trace_flags = tr->trace_flags;
4512 if (iter->tr->allocated_snapshot)
4534 if (iter->tr) {
4616 struct trace_array *tr = inode->i_private;
4655 iter->trace = tr->current_trace;
4660 iter->tr = tr;
4664 if (tr->current_trace->print_max || snapshot)
4665 iter->array_buffer = &tr->max_buffer;
4668 iter->array_buffer = &tr->array_buffer;
4683 if (trace_clocks[tr->clock_id].in_ns)
4690 if (!iter->snapshot && (tr->trace_flags & TRACE_ITER_PAUSE_ON_TRACE))
4691 tracing_stop_tr(tr);
4749 struct trace_array *tr = inode->i_private;
4752 ret = tracing_check_open_get_tr(tr);
4763 * Update the tr ref count associated to it.
4770 ret = tracing_check_open_get_tr(file->tr);
4778 trace_array_put(file->tr);
4797 trace_array_put(file->tr);
4817 struct trace_array *tr = inode->i_private;
4823 trace_array_put(tr);
4839 if (!iter->snapshot && tr->stop_count)
4841 tracing_start_tr(tr);
4843 __trace_array_put(tr);
4855 struct trace_array *tr = inode->i_private;
4857 trace_array_put(tr);
4863 struct trace_array *tr = inode->i_private;
4865 trace_array_put(tr);
4872 struct trace_array *tr = inode->i_private;
4876 ret = tracing_check_open_get_tr(tr);
4883 struct array_buffer *trace_buf = &tr->array_buffer;
4886 if (tr->current_trace->print_max)
4887 trace_buf = &tr->max_buffer;
4900 else if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
4905 trace_array_put(tr);
4916 trace_ok_for_array(struct tracer *t, struct trace_array *tr)
4918 return (tr->flags & TRACE_ARRAY_FL_GLOBAL) || t->allow_instances;
4923 get_tracer_for_array(struct trace_array *tr, struct tracer *t)
4925 while (t && !trace_ok_for_array(t, tr))
4934 struct trace_array *tr = m->private;
4940 t = get_tracer_for_array(tr, t->next);
4947 struct trace_array *tr = m->private;
4953 t = get_tracer_for_array(tr, trace_types);
4990 struct trace_array *tr = inode->i_private;
4994 ret = tracing_check_open_get_tr(tr);
5000 trace_array_put(tr);
5005 m->private = tr;
5012 struct trace_array *tr = inode->i_private;
5014 trace_array_put(tr);
5058 struct trace_array *tr = file_inode(filp)->i_private;
5063 cpumask_pr_args(tr->tracing_cpumask)) + 1;
5069 cpumask_pr_args(tr->tracing_cpumask));
5082 int tracing_set_cpumask(struct trace_array *tr,
5087 if (!tr)
5091 arch_spin_lock(&tr->max_lock);
5097 if (cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
5099 atomic_inc(&per_cpu_ptr(tr->array_buffer.data, cpu)->disabled);
5100 ring_buffer_record_disable_cpu(tr->array_buffer.buffer, cpu);
5102 ring_buffer_record_disable_cpu(tr->max_buffer.buffer, cpu);
5105 if (!cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
5107 atomic_dec(&per_cpu_ptr(tr->array_buffer.data, cpu)->disabled);
5108 ring_buffer_record_enable_cpu(tr->array_buffer.buffer, cpu);
5110 ring_buffer_record_enable_cpu(tr->max_buffer.buffer, cpu);
5114 arch_spin_unlock(&tr->max_lock);
5117 cpumask_copy(tr->tracing_cpumask, tracing_cpumask_new);
5126 struct trace_array *tr = file_inode(filp)->i_private;
5137 err = tracing_set_cpumask(tr, tracing_cpumask_new);
5162 struct trace_array *tr = m->private;
5167 tracer_flags = tr->current_trace->flags->val;
5168 trace_opts = tr->current_trace->flags->opts;
5171 if (tr->trace_flags & (1 << i))
5188 static int __set_tracer_option(struct trace_array *tr,
5195 ret = trace->set_flag(tr, tracer_flags->val, opts->bit, !neg);
5207 static int set_tracer_option(struct trace_array *tr, char *cmp, int neg)
5209 struct tracer *trace = tr->current_trace;
5218 return __set_tracer_option(tr, trace->flags, opts, neg);
5233 int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled)
5240 if (!!(tr->trace_flags & mask) == !!enabled)
5244 if (tr->current_trace->flag_changed)
5245 if (tr->current_trace->flag_changed(tr, mask, !!enabled))
5249 tr->trace_flags |= mask;
5251 tr->trace_flags &= ~mask;
5259 tr->trace_flags &= ~TRACE_ITER_RECORD_TGID;
5267 trace_event_follow_fork(tr, enabled);
5270 ftrace_pid_follow_fork(tr, enabled);
5273 ring_buffer_change_overwrite(tr->array_buffer.buffer, enabled);
5275 ring_buffer_change_overwrite(tr->max_buffer.buffer, enabled);
5287 int trace_set_options(struct trace_array *tr, char *option)
5309 ret = set_tracer_option(tr, cmp, neg);
5311 ret = set_tracer_flag(tr, 1 << ret, !neg);
5351 struct trace_array *tr = m->private;
5363 ret = trace_set_options(tr, buf);
5374 struct trace_array *tr = inode->i_private;
5377 ret = tracing_check_open_get_tr(tr);
5383 trace_array_put(tr);
5883 struct trace_array *tr = filp->private_data;
5888 r = sprintf(buf, "%s\n", tr->current_trace->name);
5894 int tracer_init(struct tracer *t, struct trace_array *tr)
5896 tracing_reset_online_cpus(&tr->array_buffer);
5897 return t->init(tr);
5918 /* resize @tr's buffer to the size of @size_tr's entries */
5945 static int __tracing_resize_ring_buffer(struct trace_array *tr,
5955 trace_set_ring_buffer_expanded(tr);
5958 if (!tr->array_buffer.buffer)
5962 tracing_stop_tr(tr);
5964 ret = ring_buffer_resize(tr->array_buffer.buffer, size, cpu);
5969 if (!tr->allocated_snapshot)
5972 ret = ring_buffer_resize(tr->max_buffer.buffer, size, cpu);
5974 int r = resize_buffer_duplicate_size(&tr->array_buffer,
5975 &tr->array_buffer, cpu);
5997 update_buffer_entries(&tr->max_buffer, cpu);
6002 update_buffer_entries(&tr->array_buffer, cpu);
6004 tracing_start_tr(tr);
6008 ssize_t tracing_resize_ring_buffer(struct trace_array *tr,
6023 ret = __tracing_resize_ring_buffer(tr, size, cpu_id);
6036 * @tr: The tracing instance
6045 int tracing_update_buffers(struct trace_array *tr)
6050 if (!tr->ring_buffer_expanded)
6051 ret = __tracing_resize_ring_buffer(tr, trace_buf_size,
6061 create_trace_option_files(struct trace_array *tr, struct tracer *tracer);
6067 static void tracing_set_nop(struct trace_array *tr)
6069 if (tr->current_trace == &nop_trace)
6072 tr->current_trace->enabled--;
6074 if (tr->current_trace->reset)
6075 tr->current_trace->reset(tr);
6077 tr->current_trace = &nop_trace;
6082 static void add_tracer_options(struct trace_array *tr, struct tracer *t)
6085 if (!tr->dir)
6092 create_trace_option_files(tr, t);
6095 int tracing_set_tracer(struct trace_array *tr, const char *buf)
6105 if (!tr->ring_buffer_expanded) {
6106 ret = __tracing_resize_ring_buffer(tr, trace_buf_size,
6121 if (t == tr->current_trace)
6127 arch_spin_lock(&tr->max_lock);
6128 if (tr->cond_snapshot)
6130 arch_spin_unlock(&tr->max_lock);
6144 if (!trace_ok_for_array(t, tr)) {
6150 if (tr->trace_ref) {
6157 tr->current_trace->enabled--;
6159 if (tr->current_trace->reset)
6160 tr->current_trace->reset(tr);
6163 had_max_tr = tr->current_trace->use_max_tr;
6166 tr->current_trace = &nop_trace;
6177 free_snapshot(tr);
6178 tracing_disarm_snapshot(tr);
6182 ret = tracing_arm_snapshot_locked(tr);
6187 tr->current_trace = &nop_trace;
6191 ret = tracer_init(t, tr);
6195 tracing_disarm_snapshot(tr);
6201 tr->current_trace = t;
6202 tr->current_trace->enabled++;
6203 trace_branch_enable(tr);
6214 struct trace_array *tr = filp->private_data;
6232 err = tracing_set_tracer(tr, name);
6282 struct trace_array *tr = filp->private_data;
6290 if (tr->current_trace->update_thresh) {
6291 ret = tr->current_trace->update_thresh(tr);
6309 struct trace_array *tr = filp->private_data;
6311 return tracing_nsecs_read(&tr->max_latency, ubuf, cnt, ppos);
6318 struct trace_array *tr = filp->private_data;
6320 return tracing_nsecs_write(&tr->max_latency, ubuf, cnt, ppos);
6325 static int open_pipe_on_cpu(struct trace_array *tr, int cpu)
6328 if (cpumask_empty(tr->pipe_cpumask)) {
6329 cpumask_setall(tr->pipe_cpumask);
6332 } else if (!cpumask_test_cpu(cpu, tr->pipe_cpumask)) {
6333 cpumask_set_cpu(cpu, tr->pipe_cpumask);
6339 static void close_pipe_on_cpu(struct trace_array *tr, int cpu)
6342 WARN_ON(!cpumask_full(tr->pipe_cpumask));
6343 cpumask_clear(tr->pipe_cpumask);
6345 WARN_ON(!cpumask_test_cpu(cpu, tr->pipe_cpumask));
6346 cpumask_clear_cpu(cpu, tr->pipe_cpumask);
6352 struct trace_array *tr = inode->i_private;
6357 ret = tracing_check_open_get_tr(tr);
6363 ret = open_pipe_on_cpu(tr, cpu);
6375 iter->trace = tr->current_trace;
6385 if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
6389 if (trace_clocks[tr->clock_id].in_ns)
6392 iter->tr = tr;
6393 iter->array_buffer = &tr->array_buffer;
6403 tr->trace_ref++;
6411 close_pipe_on_cpu(tr, cpu);
6413 __trace_array_put(tr);
6421 struct trace_array *tr = inode->i_private;
6425 tr->trace_ref--;
6429 close_pipe_on_cpu(tr, iter->cpu_file);
6435 trace_array_put(tr);
6443 struct trace_array *tr = iter->tr;
6449 if (tr->trace_flags & TRACE_ITER_BLOCK)
6456 filp, poll_table, iter->tr->buffer_percent);
6488 if (!tracer_tracing_is_on(iter->tr) && iter->pos)
6548 /* reset all but tr, trace, and overruns */
6757 struct trace_array *tr = inode->i_private;
6775 size = per_cpu_ptr(tr->array_buffer.data, cpu)->entries;
6776 if (size != per_cpu_ptr(tr->array_buffer.data, cpu)->entries) {
6783 if (!tr->ring_buffer_expanded)
6792 r = sprintf(buf, "%lu\n", per_cpu_ptr(tr->array_buffer.data, cpu)->entries >> 10);
6805 struct trace_array *tr = inode->i_private;
6819 ret = tracing_resize_ring_buffer(tr, val, tracing_get_cpu(inode));
6832 struct trace_array *tr = filp->private_data;
6839 size += per_cpu_ptr(tr->array_buffer.data, cpu)->entries >> 10;
6840 if (!tr->ring_buffer_expanded)
6843 if (tr->ring_buffer_expanded)
6869 struct trace_array *tr = inode->i_private;
6872 if (tr->trace_flags & TRACE_ITER_STOP_ON_FREE)
6873 tracer_tracing_off(tr);
6875 tracing_resize_ring_buffer(tr, 0, RING_BUFFER_ALL_CPUS);
6877 trace_array_put(tr);
6888 struct trace_array *tr = filp->private_data;
6905 if (!(tr->trace_flags & TRACE_ITER_MARKERS))
6922 buffer = tr->array_buffer.buffer;
6956 if (tr->trace_marker_file && !list_empty(&tr->trace_marker_file->triggers)) {
6959 tt = event_triggers_call(tr->trace_marker_file, buffer, entry, event);
6973 event_triggers_post_call(tr->trace_marker_file, tt);
6982 struct trace_array *tr = filp->private_data;
6995 if (!(tr->trace_flags & TRACE_ITER_MARKERS))
7006 buffer = tr->array_buffer.buffer;
7034 struct trace_array *tr = m->private;
7040 i == tr->clock_id ? "[" : "", trace_clocks[i].name,
7041 i == tr->clock_id ? "]" : "");
7047 int tracing_set_clock(struct trace_array *tr, const char *clockstr)
7060 tr->clock_id = i;
7062 ring_buffer_set_clock(tr->array_buffer.buffer, trace_clocks[i].func);
7068 tracing_reset_online_cpus(&tr->array_buffer);
7071 if (tr->max_buffer.buffer)
7072 ring_buffer_set_clock(tr->max_buffer.buffer, trace_clocks[i].func);
7073 tracing_reset_online_cpus(&tr->max_buffer);
7085 struct trace_array *tr = m->private;
7100 ret = tracing_set_clock(tr, clockstr);
7111 struct trace_array *tr = inode->i_private;
7114 ret = tracing_check_open_get_tr(tr);
7120 trace_array_put(tr);
7127 struct trace_array *tr = m->private;
7131 if (ring_buffer_time_stamp_abs(tr->array_buffer.buffer))
7143 struct trace_array *tr = inode->i_private;
7146 ret = tracing_check_open_get_tr(tr);
7152 trace_array_put(tr);
7168 int tracing_set_filter_buffering(struct trace_array *tr, bool set)
7174 if (set && tr->no_filter_buffering_ref++)
7178 if (WARN_ON_ONCE(!tr->no_filter_buffering_ref)) {
7183 --tr->no_filter_buffering_ref;
7202 struct trace_array *tr = inode->i_private;
7207 ret = tracing_check_open_get_tr(tr);
7228 iter->tr = tr;
7229 iter->array_buffer = &tr->max_buffer;
7236 trace_array_put(tr);
7241 static void tracing_swap_cpu_buffer(void *tr)
7243 update_max_tr_single((struct trace_array *)tr, current, smp_processor_id());
7252 struct trace_array *tr = iter->tr;
7256 ret = tracing_update_buffers(tr);
7266 if (tr->current_trace->use_max_tr) {
7272 arch_spin_lock(&tr->max_lock);
7273 if (tr->cond_snapshot)
7275 arch_spin_unlock(&tr->max_lock);
7286 if (tr->allocated_snapshot)
7287 free_snapshot(tr);
7297 if (tr->allocated_snapshot)
7298 ret = resize_buffer_duplicate_size(&tr->max_buffer,
7299 &tr->array_buffer, iter->cpu_file);
7301 ret = tracing_arm_snapshot_locked(tr);
7308 update_max_tr(tr, current, smp_processor_id(), NULL);
7312 (void *)tr, 1);
7314 tracing_disarm_snapshot(tr);
7317 if (tr->allocated_snapshot) {
7319 tracing_reset_online_cpus(&tr->max_buffer);
7321 tracing_reset_cpu(&tr->max_buffer, iter->cpu_file);
7378 info->iter.array_buffer = &info->iter.tr->max_buffer;
7616 static struct tracing_log_err *get_tracing_log_err(struct trace_array *tr,
7622 if (tr->n_err_log_entries < TRACING_LOG_ERRS_MAX) {
7625 tr->n_err_log_entries++;
7632 err = list_first_entry(&tr->err_log, struct tracing_log_err, list);
7668 * @tr: The associated trace array for the error (NULL for top level array)
7693 void tracing_log_err(struct trace_array *tr,
7700 if (!tr)
7701 tr = &global_trace;
7706 err = get_tracing_log_err(tr, len);
7720 list_add_tail(&err->list, &tr->err_log);
7724 static void clear_tracing_err_log(struct trace_array *tr)
7729 list_for_each_entry_safe(err, next, &tr->err_log, list) {
7734 tr->n_err_log_entries = 0;
7740 struct trace_array *tr = m->private;
7744 return seq_list_start(&tr->err_log, *pos);
7749 struct trace_array *tr = m->private;
7751 return seq_list_next(v, &tr->err_log, pos);
7798 struct trace_array *tr = inode->i_private;
7801 ret = tracing_check_open_get_tr(tr);
7807 clear_tracing_err_log(tr);
7813 m->private = tr;
7815 trace_array_put(tr);
7830 struct trace_array *tr = inode->i_private;
7832 trace_array_put(tr);
7850 struct trace_array *tr = inode->i_private;
7854 ret = tracing_check_open_get_tr(tr);
7860 trace_array_put(tr);
7866 info->iter.tr = tr;
7868 info->iter.trace = tr->current_trace;
7869 info->iter.array_buffer = &tr->array_buffer;
7876 tr->trace_ref++;
7882 trace_array_put(tr);
7911 if (iter->snapshot && iter->tr->current_trace->use_max_tr)
8005 iter->tr->trace_ref--;
8007 __trace_array_put(iter->tr);
8097 if (iter->snapshot && iter->tr->current_trace->use_max_tr)
8176 ret = wait_on_pipe(iter, iter->snapshot ? 0 : iter->tr->buffer_percent);
8181 if (!tracer_tracing_is_on(iter->tr))
8233 struct trace_array *tr = inode->i_private;
8234 struct array_buffer *trace_buf = &tr->array_buffer;
8259 if (trace_clocks[tr->clock_id].in_ns) {
8334 struct trace_array *tr, struct ftrace_probe_ops *ops,
8337 tracing_snapshot_instance(tr);
8342 struct trace_array *tr, struct ftrace_probe_ops *ops,
8359 tracing_snapshot_instance(tr);
8385 ftrace_snapshot_init(struct ftrace_probe_ops *ops, struct trace_array *tr,
8401 ftrace_snapshot_free(struct ftrace_probe_ops *ops, struct trace_array *tr,
8429 ftrace_trace_snapshot_callback(struct trace_array *tr, struct ftrace_hash *hash,
8437 if (!tr)
8447 ret = unregister_ftrace_function_probe_func(glob+1, tr, ops);
8449 tracing_disarm_snapshot(tr);
8471 ret = tracing_arm_snapshot(tr);
8475 ret = register_ftrace_function_probe(glob, tr, ops, count);
8477 tracing_disarm_snapshot(tr);
8495 static struct dentry *tracing_get_dentry(struct trace_array *tr)
8497 if (WARN_ON(!tr->dir))
8501 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
8505 return tr->dir;
8508 static struct dentry *tracing_dentry_percpu(struct trace_array *tr, int cpu)
8512 if (tr->percpu_dir)
8513 return tr->percpu_dir;
8515 d_tracer = tracing_get_dentry(tr);
8519 tr->percpu_dir = tracefs_create_dir("per_cpu", d_tracer);
8521 MEM_FAIL(!tr->percpu_dir,
8524 return tr->percpu_dir;
8539 tracing_init_tracefs_percpu(struct trace_array *tr, long cpu)
8541 struct dentry *d_percpu = tracing_dentry_percpu(tr, cpu);
8557 tr, cpu, &tracing_pipe_fops);
8561 tr, cpu, &tracing_fops);
8564 tr, cpu, &tracing_buffers_fops);
8567 tr, cpu, &tracing_stats_fops);
8570 tr, cpu, &tracing_entries_fops);
8574 tr, cpu, &snapshot_fops);
8577 tr, cpu, &snapshot_raw_fops);
8618 ret = __set_tracer_option(topt->tr, topt->flags,
8635 ret = tracing_check_open_get_tr(topt->tr);
8647 trace_array_put(topt->tr);
8697 struct trace_array *tr;
8701 get_tr_index(tr_index, &tr, &index);
8703 if (tr->trace_flags & (1 << index))
8716 struct trace_array *tr;
8721 get_tr_index(tr_index, &tr, &index);
8732 ret = set_tracer_flag(tr, 1 << index, val);
8767 static struct dentry *trace_options_init_dentry(struct trace_array *tr)
8771 if (tr->options)
8772 return tr->options;
8774 d_tracer = tracing_get_dentry(tr);
8778 tr->options = tracefs_create_dir("options", d_tracer);
8779 if (!tr->options) {
8784 return tr->options;
8788 create_trace_option_file(struct trace_array *tr,
8795 t_options = trace_options_init_dentry(tr);
8801 topt->tr = tr;
8809 create_trace_option_files(struct trace_array *tr, struct tracer *tracer)
8830 if (!trace_ok_for_array(tracer, tr))
8833 for (i = 0; i < tr->nr_topts; i++) {
8835 if (WARN_ON_ONCE(tr->topts[i].tracer->flags == tracer->flags))
8848 tr_topts = krealloc(tr->topts, sizeof(*tr->topts) * (tr->nr_topts + 1),
8855 tr->topts = tr_topts;
8856 tr->topts[tr->nr_topts].tracer = tracer;
8857 tr->topts[tr->nr_topts].topts = topts;
8858 tr->nr_topts++;
8861 create_trace_option_file(tr, &topts[cnt], flags,
8870 create_trace_option_core_file(struct trace_array *tr,
8875 t_options = trace_options_init_dentry(tr);
8880 (void *)&tr->trace_flags_index[index],
8884 static void create_trace_options_dir(struct trace_array *tr)
8887 bool top_level = tr == &global_trace;
8890 t_options = trace_options_init_dentry(tr);
8897 create_trace_option_core_file(tr, trace_options[i], i);
8905 struct trace_array *tr = filp->private_data;
8909 r = tracer_tracing_is_on(tr);
8919 struct trace_array *tr = filp->private_data;
8920 struct trace_buffer *buffer = tr->array_buffer.buffer;
8930 if (!!val == tracer_tracing_is_on(tr)) {
8933 tracer_tracing_on(tr);
8934 if (tr->current_trace->start)
8935 tr->current_trace->start(tr);
8937 tracer_tracing_off(tr);
8938 if (tr->current_trace->stop)
8939 tr->current_trace->stop(tr);
8963 struct trace_array *tr = filp->private_data;
8967 r = tr->buffer_percent;
8977 struct trace_array *tr = filp->private_data;
8988 tr->buffer_percent = val;
9006 struct trace_array *tr = filp->private_data;
9012 order = ring_buffer_subbuf_order_get(tr->array_buffer.buffer);
9024 struct trace_array *tr = filp->private_data;
9045 tracing_stop_tr(tr);
9047 old_order = ring_buffer_subbuf_order_get(tr->array_buffer.buffer);
9051 ret = ring_buffer_subbuf_order_set(tr->array_buffer.buffer, order);
9057 if (!tr->allocated_snapshot)
9060 ret = ring_buffer_subbuf_order_set(tr->max_buffer.buffer, order);
9063 cnt = ring_buffer_subbuf_order_set(tr->array_buffer.buffer, old_order);
9088 tracing_start_tr(tr);
9103 init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer);
9106 allocate_trace_buffer(struct trace_array *tr, struct array_buffer *buf, int size)
9110 rb_flags = tr->trace_flags & TRACE_ITER_OVERWRITE ? RB_FL_OVERWRITE : 0;
9112 buf->tr = tr;
9126 set_buffer_entries(&tr->array_buffer,
9127 ring_buffer_size(tr->array_buffer.buffer, 0));
9142 static int allocate_trace_buffers(struct trace_array *tr, int size)
9146 ret = allocate_trace_buffer(tr, &tr->array_buffer, size);
9151 ret = allocate_trace_buffer(tr, &tr->max_buffer,
9154 free_trace_buffer(&tr->array_buffer);
9157 tr->allocated_snapshot = allocate_snapshot;
9165 static void free_trace_buffers(struct trace_array *tr)
9167 if (!tr)
9170 free_trace_buffer(&tr->array_buffer);
9173 free_trace_buffer(&tr->max_buffer);
9177 static void init_trace_flags_index(struct trace_array *tr)
9183 tr->trace_flags_index[i] = i;
9186 static void __update_tracer_options(struct trace_array *tr)
9191 add_tracer_options(tr, t);
9194 static void update_tracer_options(struct trace_array *tr)
9198 __update_tracer_options(tr);
9205 struct trace_array *tr, *found = NULL;
9207 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
9208 if (tr->name && strcmp(tr->name, instance) == 0) {
9209 found = tr;
9219 struct trace_array *tr;
9222 tr = trace_array_find(instance);
9223 if (tr)
9224 tr->ref++;
9227 return tr;
9230 static int trace_array_create_dir(struct trace_array *tr)
9234 tr->dir = tracefs_create_dir(tr->name, trace_instance_dir);
9235 if (!tr->dir)
9238 ret = event_trace_add_tracer(tr->dir, tr);
9240 tracefs_remove(tr->dir);
9244 init_tracer_tracefs(tr, tr->dir);
9245 __update_tracer_options(tr);
9253 struct trace_array *tr;
9257 tr = kzalloc(sizeof(*tr), GFP_KERNEL);
9258 if (!tr)
9261 tr->name = kstrdup(name, GFP_KERNEL);
9262 if (!tr->name)
9265 if (!alloc_cpumask_var(&tr->tracing_cpumask, GFP_KERNEL))
9268 if (!zalloc_cpumask_var(&tr->pipe_cpumask, GFP_KERNEL))
9272 tr->system_names = kstrdup_const(systems, GFP_KERNEL);
9273 if (!tr->system_names)
9277 tr->trace_flags = global_trace.trace_flags & ~ZEROED_TRACE_FLAGS;
9279 cpumask_copy(tr->tracing_cpumask, cpu_all_mask);
9281 raw_spin_lock_init(&tr->start_lock);
9283 tr->max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
9285 spin_lock_init(&tr->snapshot_trigger_lock);
9287 tr->current_trace = &nop_trace;
9289 INIT_LIST_HEAD(&tr->systems);
9290 INIT_LIST_HEAD(&tr->events);
9291 INIT_LIST_HEAD(&tr->hist_vars);
9292 INIT_LIST_HEAD(&tr->err_log);
9294 if (allocate_trace_buffers(tr, trace_buf_size) < 0)
9298 trace_set_ring_buffer_expanded(tr);
9300 if (ftrace_allocate_ftrace_ops(tr) < 0)
9303 ftrace_init_trace_array(tr);
9305 init_trace_flags_index(tr);
9308 ret = trace_array_create_dir(tr);
9312 __trace_early_add_events(tr);
9314 list_add(&tr->list, &ftrace_trace_arrays);
9316 tr->ref++;
9318 return tr;
9321 ftrace_free_ftrace_ops(tr);
9322 free_trace_buffers(tr);
9323 free_cpumask_var(tr->pipe_cpumask);
9324 free_cpumask_var(tr->tracing_cpumask);
9325 kfree_const(tr->system_names);
9326 kfree(tr->name);
9327 kfree(tr);
9339 struct trace_array *tr;
9349 tr = trace_array_create(name);
9351 ret = PTR_ERR_OR_ZERO(tr);
9378 struct trace_array *tr;
9383 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
9384 if (tr->name && strcmp(tr->name, name) == 0)
9388 tr = trace_array_create_systems(name, systems);
9390 if (IS_ERR(tr))
9391 tr = NULL;
9393 if (tr)
9394 tr->ref++;
9398 return tr;
9402 static int __remove_instance(struct trace_array *tr)
9407 if (tr->ref > 1 || (tr->current_trace && tr->trace_ref))
9410 list_del(&tr->list);
9415 set_tracer_flag(tr, 1 << i, 0);
9418 tracing_set_nop(tr);
9419 clear_ftrace_function_probes(tr);
9420 event_trace_del_tracer(tr);
9421 ftrace_clear_pids(tr);
9422 ftrace_destroy_function_files(tr);
9423 tracefs_remove(tr->dir);
9424 free_percpu(tr->last_func_repeats);
9425 free_trace_buffers(tr);
9426 clear_tracing_err_log(tr);
9428 for (i = 0; i < tr->nr_topts; i++) {
9429 kfree(tr->topts[i].topts);
9431 kfree(tr->topts);
9433 free_cpumask_var(tr->pipe_cpumask);
9434 free_cpumask_var(tr->tracing_cpumask);
9435 kfree_const(tr->system_names);
9436 kfree(tr->name);
9437 kfree(tr);
9444 struct trace_array *tr;
9456 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
9457 if (tr == this_tr) {
9458 ret = __remove_instance(tr);
9472 struct trace_array *tr;
9479 tr = trace_array_find(name);
9480 if (tr)
9481 ret = __remove_instance(tr);
9491 struct trace_array *tr;
9502 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
9503 if (!tr->name)
9505 if (MEM_FAIL(trace_array_create_dir(tr) < 0,
9515 init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer)
9520 tr, &show_traces_fops);
9523 tr, &set_tracer_fops);
9526 tr, &tracing_cpumask_fops);
9529 tr, &tracing_iter_fops);
9532 tr, &tracing_fops);
9535 tr, &tracing_pipe_fops);
9538 tr, &tracing_entries_fops);
9541 tr, &tracing_total_entries_fops);
9544 tr, &tracing_free_buffer_fops);
9547 tr, &tracing_mark_fops);
9549 tr->trace_marker_file = __find_event_file(tr, "ftrace", "print");
9552 tr, &tracing_mark_raw_fops);
9554 trace_create_file("trace_clock", TRACE_MODE_WRITE, d_tracer, tr,
9558 tr, &rb_simple_fops);
9560 trace_create_file("timestamp_mode", TRACE_MODE_READ, d_tracer, tr,
9563 tr->buffer_percent = 50;
9566 tr, &buffer_percent_fops);
9569 tr, &buffer_subbuf_size_fops);
9571 create_trace_options_dir(tr);
9574 trace_create_maxlat_file(tr, d_tracer);
9577 if (ftrace_create_function_files(tr, d_tracer))
9582 tr, &snapshot_fops);
9586 tr, &tracing_err_log_fops);
9589 tracing_init_tracefs_percpu(tr, cpu);
9591 ftrace_init_tracefs(tr, d_tracer);
9625 struct trace_array *tr = &global_trace;
9633 if (tr->dir)
9645 tr->dir = debugfs_create_automount("tracing", NULL,
9899 static void trace_init_iter(struct trace_iterator *iter, struct trace_array *tr)
9901 iter->tr = tr;
9902 iter->trace = iter->tr->current_trace;
9904 iter->array_buffer = &tr->array_buffer;
9914 if (trace_clocks[iter->tr->clock_id].in_ns)
9929 static void ftrace_dump_one(struct trace_array *tr, enum ftrace_dump_mode dump_mode)
9945 tracer_tracing_off(tr);
9950 trace_init_iter(&iter, tr);
9956 old_userobj = tr->trace_flags & TRACE_ITER_SYM_USEROBJ;
9959 tr->trace_flags &= ~TRACE_ITER_SYM_USEROBJ;
9966 if (tr == &global_trace)
9969 printk(KERN_TRACE "Dumping ftrace instance %s buffer:\n", tr->name);
10011 tr->trace_flags |= old_userobj;
10024 struct trace_array *tr;
10046 tr = trace_array_find(inst_name);
10047 if (!tr) {
10054 ftrace_dump_one(tr, DUMP_ORIG);
10056 ftrace_dump_one(tr, DUMP_ALL);
10201 struct trace_array *tr;
10217 tr = trace_array_get_by_name(tok, NULL);
10218 if (!tr) {
10223 trace_array_put(tr);
10226 early_enable_events(tr, tok, true);
10371 struct trace_array *tr;
10376 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
10377 if (!tr->allocated_snapshot)
10380 tracing_snapshot_instance(tr);
10381 trace_array_puts(tr, "** Boot snapshot taken **\n");