1// SPDX-License-Identifier: GPL-2.0
2/*
3 * trace task wakeup timings
4 *
5 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
6 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
7 *
8 * Based on code from the latency_tracer, that is:
9 *
10 *  Copyright (C) 2004-2006 Ingo Molnar
11 *  Copyright (C) 2004 Nadia Yvette Chambers
12 */
13#include <linux/module.h>
14#include <linux/kallsyms.h>
15#include <linux/uaccess.h>
16#include <linux/ftrace.h>
17#include <linux/sched/rt.h>
18#include <linux/sched/deadline.h>
19#include <trace/events/sched.h>
20#include "trace.h"
21
22static struct trace_array	*wakeup_trace;
23static int __read_mostly	tracer_enabled;
24
25static struct task_struct	*wakeup_task;
26static int			wakeup_cpu;
27static int			wakeup_current_cpu;
28static unsigned			wakeup_prio = -1;
29static bool			wakeup_rt;
30static bool			wakeup_dl;
31static bool			tracing_dl;
32
33static arch_spinlock_t wakeup_lock =
34	(arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
35
36static void wakeup_reset(struct trace_array *tr);
37static void __wakeup_reset(struct trace_array *tr);
38static int start_func_tracer(struct trace_array *tr, int graph);
39static void stop_func_tracer(struct trace_array *tr, int graph);
40
41static int save_flags;
42
43#ifdef CONFIG_FUNCTION_GRAPH_TRACER
44# define is_graph(tr) ((tr)->trace_flags & TRACE_ITER_DISPLAY_GRAPH)
45#else
46# define is_graph(tr) false
47#endif
48
49#ifdef CONFIG_FUNCTION_TRACER
50
51static bool function_enabled;
52
53/*
54 * Prologue for the wakeup function tracers.
55 *
56 * Returns 1 if it is OK to continue, and preemption
57 *            is disabled and data->disabled is incremented.
58 *         0 if the trace is to be ignored, and preemption
59 *            is not disabled and data->disabled is
60 *            kept the same.
61 *
62 * Note, this function is also used outside this ifdef but
63 *  inside the #ifdef of the function graph tracer below.
64 *  This is OK, since the function graph tracer is
65 *  dependent on the function tracer.
66 */
67static int
68func_prolog_preempt_disable(struct trace_array *tr,
69			    struct trace_array_cpu **data,
70			    unsigned int *trace_ctx)
71{
72	long disabled;
73	int cpu;
74
75	if (likely(!wakeup_task))
76		return 0;
77
78	*trace_ctx = tracing_gen_ctx();
79	preempt_disable_notrace();
80
81	cpu = raw_smp_processor_id();
82	if (cpu != wakeup_current_cpu)
83		goto out_enable;
84
85	*data = per_cpu_ptr(tr->array_buffer.data, cpu);
86	disabled = atomic_inc_return(&(*data)->disabled);
87	if (unlikely(disabled != 1))
88		goto out;
89
90	return 1;
91
92out:
93	atomic_dec(&(*data)->disabled);
94
95out_enable:
96	preempt_enable_notrace();
97	return 0;
98}
99
100#ifdef CONFIG_FUNCTION_GRAPH_TRACER
101
102static int wakeup_display_graph(struct trace_array *tr, int set)
103{
104	if (!(is_graph(tr) ^ set))
105		return 0;
106
107	stop_func_tracer(tr, !set);
108
109	wakeup_reset(wakeup_trace);
110	tr->max_latency = 0;
111
112	return start_func_tracer(tr, set);
113}
114
115static int wakeup_graph_entry(struct ftrace_graph_ent *trace)
116{
117	struct trace_array *tr = wakeup_trace;
118	struct trace_array_cpu *data;
119	unsigned int trace_ctx;
120	int ret = 0;
121
122	if (ftrace_graph_ignore_func(trace))
123		return 0;
124	/*
125	 * Do not trace a function if it's filtered by set_graph_notrace.
126	 * Make the index of ret stack negative to indicate that it should
127	 * ignore further functions.  But it needs its own ret stack entry
128	 * to recover the original index in order to continue tracing after
129	 * returning from the function.
130	 */
131	if (ftrace_graph_notrace_addr(trace->func))
132		return 1;
133
134	if (!func_prolog_preempt_disable(tr, &data, &trace_ctx))
135		return 0;
136
137	ret = __trace_graph_entry(tr, trace, trace_ctx);
138	atomic_dec(&data->disabled);
139	preempt_enable_notrace();
140
141	return ret;
142}
143
144static void wakeup_graph_return(struct ftrace_graph_ret *trace)
145{
146	struct trace_array *tr = wakeup_trace;
147	struct trace_array_cpu *data;
148	unsigned int trace_ctx;
149
150	ftrace_graph_addr_finish(trace);
151
152	if (!func_prolog_preempt_disable(tr, &data, &trace_ctx))
153		return;
154
155	__trace_graph_return(tr, trace, trace_ctx);
156	atomic_dec(&data->disabled);
157
158	preempt_enable_notrace();
159	return;
160}
161
162static struct fgraph_ops fgraph_wakeup_ops = {
163	.entryfunc = &wakeup_graph_entry,
164	.retfunc = &wakeup_graph_return,
165};
166
167static void wakeup_trace_open(struct trace_iterator *iter)
168{
169	if (is_graph(iter->tr))
170		graph_trace_open(iter);
171	else
172		iter->private = NULL;
173}
174
175static void wakeup_trace_close(struct trace_iterator *iter)
176{
177	if (iter->private)
178		graph_trace_close(iter);
179}
180
181#define GRAPH_TRACER_FLAGS (TRACE_GRAPH_PRINT_PROC | \
182			    TRACE_GRAPH_PRINT_CPU |  \
183			    TRACE_GRAPH_PRINT_REL_TIME | \
184			    TRACE_GRAPH_PRINT_DURATION | \
185			    TRACE_GRAPH_PRINT_OVERHEAD | \
186			    TRACE_GRAPH_PRINT_IRQS)
187
188static enum print_line_t wakeup_print_line(struct trace_iterator *iter)
189{
190	/*
191	 * In graph mode call the graph tracer output function,
192	 * otherwise go with the TRACE_FN event handler
193	 */
194	if (is_graph(iter->tr))
195		return print_graph_function_flags(iter, GRAPH_TRACER_FLAGS);
196
197	return TRACE_TYPE_UNHANDLED;
198}
199
200static void wakeup_print_header(struct seq_file *s)
201{
202	if (is_graph(wakeup_trace))
203		print_graph_headers_flags(s, GRAPH_TRACER_FLAGS);
204	else
205		trace_default_header(s);
206}
207#endif /* else CONFIG_FUNCTION_GRAPH_TRACER */
208
209/*
210 * wakeup uses its own tracer function to keep the overhead down:
211 */
212static void
213wakeup_tracer_call(unsigned long ip, unsigned long parent_ip,
214		   struct ftrace_ops *op, struct ftrace_regs *fregs)
215{
216	struct trace_array *tr = wakeup_trace;
217	struct trace_array_cpu *data;
218	unsigned long flags;
219	unsigned int trace_ctx;
220
221	if (!func_prolog_preempt_disable(tr, &data, &trace_ctx))
222		return;
223
224	local_irq_save(flags);
225	trace_function(tr, ip, parent_ip, trace_ctx);
226	local_irq_restore(flags);
227
228	atomic_dec(&data->disabled);
229	preempt_enable_notrace();
230}
231
232static int register_wakeup_function(struct trace_array *tr, int graph, int set)
233{
234	int ret;
235
236	/* 'set' is set if TRACE_ITER_FUNCTION is about to be set */
237	if (function_enabled || (!set && !(tr->trace_flags & TRACE_ITER_FUNCTION)))
238		return 0;
239
240	if (graph)
241		ret = register_ftrace_graph(&fgraph_wakeup_ops);
242	else
243		ret = register_ftrace_function(tr->ops);
244
245	if (!ret)
246		function_enabled = true;
247
248	return ret;
249}
250
251static void unregister_wakeup_function(struct trace_array *tr, int graph)
252{
253	if (!function_enabled)
254		return;
255
256	if (graph)
257		unregister_ftrace_graph(&fgraph_wakeup_ops);
258	else
259		unregister_ftrace_function(tr->ops);
260
261	function_enabled = false;
262}
263
264static int wakeup_function_set(struct trace_array *tr, u32 mask, int set)
265{
266	if (!(mask & TRACE_ITER_FUNCTION))
267		return 0;
268
269	if (set)
270		register_wakeup_function(tr, is_graph(tr), 1);
271	else
272		unregister_wakeup_function(tr, is_graph(tr));
273	return 1;
274}
275#else /* CONFIG_FUNCTION_TRACER */
276static int register_wakeup_function(struct trace_array *tr, int graph, int set)
277{
278	return 0;
279}
280static void unregister_wakeup_function(struct trace_array *tr, int graph) { }
281static int wakeup_function_set(struct trace_array *tr, u32 mask, int set)
282{
283	return 0;
284}
285#endif /* else CONFIG_FUNCTION_TRACER */
286
287#ifndef CONFIG_FUNCTION_GRAPH_TRACER
288static enum print_line_t wakeup_print_line(struct trace_iterator *iter)
289{
290	return TRACE_TYPE_UNHANDLED;
291}
292
293static void wakeup_trace_open(struct trace_iterator *iter) { }
294static void wakeup_trace_close(struct trace_iterator *iter) { }
295
296static void wakeup_print_header(struct seq_file *s)
297{
298	trace_default_header(s);
299}
300#endif /* !CONFIG_FUNCTION_GRAPH_TRACER */
301
302static void
303__trace_function(struct trace_array *tr,
304		 unsigned long ip, unsigned long parent_ip,
305		 unsigned int trace_ctx)
306{
307	if (is_graph(tr))
308		trace_graph_function(tr, ip, parent_ip, trace_ctx);
309	else
310		trace_function(tr, ip, parent_ip, trace_ctx);
311}
312
313static int wakeup_flag_changed(struct trace_array *tr, u32 mask, int set)
314{
315	struct tracer *tracer = tr->current_trace;
316
317	if (wakeup_function_set(tr, mask, set))
318		return 0;
319
320#ifdef CONFIG_FUNCTION_GRAPH_TRACER
321	if (mask & TRACE_ITER_DISPLAY_GRAPH)
322		return wakeup_display_graph(tr, set);
323#endif
324
325	return trace_keep_overwrite(tracer, mask, set);
326}
327
328static int start_func_tracer(struct trace_array *tr, int graph)
329{
330	int ret;
331
332	ret = register_wakeup_function(tr, graph, 0);
333
334	if (!ret && tracing_is_enabled())
335		tracer_enabled = 1;
336	else
337		tracer_enabled = 0;
338
339	return ret;
340}
341
342static void stop_func_tracer(struct trace_array *tr, int graph)
343{
344	tracer_enabled = 0;
345
346	unregister_wakeup_function(tr, graph);
347}
348
349/*
350 * Should this new latency be reported/recorded?
351 */
352static bool report_latency(struct trace_array *tr, u64 delta)
353{
354	if (tracing_thresh) {
355		if (delta < tracing_thresh)
356			return false;
357	} else {
358		if (delta <= tr->max_latency)
359			return false;
360	}
361	return true;
362}
363
364static void
365probe_wakeup_migrate_task(void *ignore, struct task_struct *task, int cpu)
366{
367	if (task != wakeup_task)
368		return;
369
370	wakeup_current_cpu = cpu;
371}
372
373static void
374tracing_sched_switch_trace(struct trace_array *tr,
375			   struct task_struct *prev,
376			   struct task_struct *next,
377			   unsigned int trace_ctx)
378{
379	struct trace_event_call *call = &event_context_switch;
380	struct trace_buffer *buffer = tr->array_buffer.buffer;
381	struct ring_buffer_event *event;
382	struct ctx_switch_entry *entry;
383
384	event = trace_buffer_lock_reserve(buffer, TRACE_CTX,
385					  sizeof(*entry), trace_ctx);
386	if (!event)
387		return;
388	entry	= ring_buffer_event_data(event);
389	entry->prev_pid			= prev->pid;
390	entry->prev_prio		= prev->prio;
391	entry->prev_state		= task_state_index(prev);
392	entry->next_pid			= next->pid;
393	entry->next_prio		= next->prio;
394	entry->next_state		= task_state_index(next);
395	entry->next_cpu	= task_cpu(next);
396
397	if (!call_filter_check_discard(call, entry, buffer, event))
398		trace_buffer_unlock_commit(tr, buffer, event, trace_ctx);
399}
400
401static void
402tracing_sched_wakeup_trace(struct trace_array *tr,
403			   struct task_struct *wakee,
404			   struct task_struct *curr,
405			   unsigned int trace_ctx)
406{
407	struct trace_event_call *call = &event_wakeup;
408	struct ring_buffer_event *event;
409	struct ctx_switch_entry *entry;
410	struct trace_buffer *buffer = tr->array_buffer.buffer;
411
412	event = trace_buffer_lock_reserve(buffer, TRACE_WAKE,
413					  sizeof(*entry), trace_ctx);
414	if (!event)
415		return;
416	entry	= ring_buffer_event_data(event);
417	entry->prev_pid			= curr->pid;
418	entry->prev_prio		= curr->prio;
419	entry->prev_state		= task_state_index(curr);
420	entry->next_pid			= wakee->pid;
421	entry->next_prio		= wakee->prio;
422	entry->next_state		= task_state_index(wakee);
423	entry->next_cpu			= task_cpu(wakee);
424
425	if (!call_filter_check_discard(call, entry, buffer, event))
426		trace_buffer_unlock_commit(tr, buffer, event, trace_ctx);
427}
428
429static void notrace
430probe_wakeup_sched_switch(void *ignore, bool preempt,
431			  struct task_struct *prev, struct task_struct *next,
432			  unsigned int prev_state)
433{
434	struct trace_array_cpu *data;
435	u64 T0, T1, delta;
436	unsigned long flags;
437	long disabled;
438	int cpu;
439	unsigned int trace_ctx;
440
441	tracing_record_cmdline(prev);
442
443	if (unlikely(!tracer_enabled))
444		return;
445
446	/*
447	 * When we start a new trace, we set wakeup_task to NULL
448	 * and then set tracer_enabled = 1. We want to make sure
449	 * that another CPU does not see the tracer_enabled = 1
450	 * and the wakeup_task with an older task, that might
451	 * actually be the same as next.
452	 */
453	smp_rmb();
454
455	if (next != wakeup_task)
456		return;
457
458	/* disable local data, not wakeup_cpu data */
459	cpu = raw_smp_processor_id();
460	disabled = atomic_inc_return(&per_cpu_ptr(wakeup_trace->array_buffer.data, cpu)->disabled);
461	if (likely(disabled != 1))
462		goto out;
463
464	local_irq_save(flags);
465	trace_ctx = tracing_gen_ctx_flags(flags);
466
467	arch_spin_lock(&wakeup_lock);
468
469	/* We could race with grabbing wakeup_lock */
470	if (unlikely(!tracer_enabled || next != wakeup_task))
471		goto out_unlock;
472
473	/* The task we are waiting for is waking up */
474	data = per_cpu_ptr(wakeup_trace->array_buffer.data, wakeup_cpu);
475
476	__trace_function(wakeup_trace, CALLER_ADDR0, CALLER_ADDR1, trace_ctx);
477	tracing_sched_switch_trace(wakeup_trace, prev, next, trace_ctx);
478	__trace_stack(wakeup_trace, trace_ctx, 0);
479
480	T0 = data->preempt_timestamp;
481	T1 = ftrace_now(cpu);
482	delta = T1-T0;
483
484	if (!report_latency(wakeup_trace, delta))
485		goto out_unlock;
486
487	if (likely(!is_tracing_stopped())) {
488		wakeup_trace->max_latency = delta;
489		update_max_tr(wakeup_trace, wakeup_task, wakeup_cpu, NULL);
490	}
491
492out_unlock:
493	__wakeup_reset(wakeup_trace);
494	arch_spin_unlock(&wakeup_lock);
495	local_irq_restore(flags);
496out:
497	atomic_dec(&per_cpu_ptr(wakeup_trace->array_buffer.data, cpu)->disabled);
498}
499
500static void __wakeup_reset(struct trace_array *tr)
501{
502	wakeup_cpu = -1;
503	wakeup_prio = -1;
504	tracing_dl = false;
505
506	if (wakeup_task)
507		put_task_struct(wakeup_task);
508
509	wakeup_task = NULL;
510}
511
512static void wakeup_reset(struct trace_array *tr)
513{
514	unsigned long flags;
515
516	tracing_reset_online_cpus(&tr->array_buffer);
517
518	local_irq_save(flags);
519	arch_spin_lock(&wakeup_lock);
520	__wakeup_reset(tr);
521	arch_spin_unlock(&wakeup_lock);
522	local_irq_restore(flags);
523}
524
525static void
526probe_wakeup(void *ignore, struct task_struct *p)
527{
528	struct trace_array_cpu *data;
529	int cpu = smp_processor_id();
530	long disabled;
531	unsigned int trace_ctx;
532
533	if (likely(!tracer_enabled))
534		return;
535
536	tracing_record_cmdline(p);
537	tracing_record_cmdline(current);
538
539	/*
540	 * Semantic is like this:
541	 *  - wakeup tracer handles all tasks in the system, independently
542	 *    from their scheduling class;
543	 *  - wakeup_rt tracer handles tasks belonging to sched_dl and
544	 *    sched_rt class;
545	 *  - wakeup_dl handles tasks belonging to sched_dl class only.
546	 */
547	if (tracing_dl || (wakeup_dl && !dl_task(p)) ||
548	    (wakeup_rt && !dl_task(p) && !rt_task(p)) ||
549	    (!dl_task(p) && (p->prio >= wakeup_prio || p->prio >= current->prio)))
550		return;
551
552	disabled = atomic_inc_return(&per_cpu_ptr(wakeup_trace->array_buffer.data, cpu)->disabled);
553	if (unlikely(disabled != 1))
554		goto out;
555
556	trace_ctx = tracing_gen_ctx();
557
558	/* interrupts should be off from try_to_wake_up */
559	arch_spin_lock(&wakeup_lock);
560
561	/* check for races. */
562	if (!tracer_enabled || tracing_dl ||
563	    (!dl_task(p) && p->prio >= wakeup_prio))
564		goto out_locked;
565
566	/* reset the trace */
567	__wakeup_reset(wakeup_trace);
568
569	wakeup_cpu = task_cpu(p);
570	wakeup_current_cpu = wakeup_cpu;
571	wakeup_prio = p->prio;
572
573	/*
574	 * Once you start tracing a -deadline task, don't bother tracing
575	 * another task until the first one wakes up.
576	 */
577	if (dl_task(p))
578		tracing_dl = true;
579	else
580		tracing_dl = false;
581
582	wakeup_task = get_task_struct(p);
583
584	data = per_cpu_ptr(wakeup_trace->array_buffer.data, wakeup_cpu);
585	data->preempt_timestamp = ftrace_now(cpu);
586	tracing_sched_wakeup_trace(wakeup_trace, p, current, trace_ctx);
587	__trace_stack(wakeup_trace, trace_ctx, 0);
588
589	/*
590	 * We must be careful in using CALLER_ADDR2. But since wake_up
591	 * is not called by an assembly function  (where as schedule is)
592	 * it should be safe to use it here.
593	 */
594	__trace_function(wakeup_trace, CALLER_ADDR1, CALLER_ADDR2, trace_ctx);
595
596out_locked:
597	arch_spin_unlock(&wakeup_lock);
598out:
599	atomic_dec(&per_cpu_ptr(wakeup_trace->array_buffer.data, cpu)->disabled);
600}
601
602static void start_wakeup_tracer(struct trace_array *tr)
603{
604	int ret;
605
606	ret = register_trace_sched_wakeup(probe_wakeup, NULL);
607	if (ret) {
608		pr_info("wakeup trace: Couldn't activate tracepoint"
609			" probe to kernel_sched_wakeup\n");
610		return;
611	}
612
613	ret = register_trace_sched_wakeup_new(probe_wakeup, NULL);
614	if (ret) {
615		pr_info("wakeup trace: Couldn't activate tracepoint"
616			" probe to kernel_sched_wakeup_new\n");
617		goto fail_deprobe;
618	}
619
620	ret = register_trace_sched_switch(probe_wakeup_sched_switch, NULL);
621	if (ret) {
622		pr_info("sched trace: Couldn't activate tracepoint"
623			" probe to kernel_sched_switch\n");
624		goto fail_deprobe_wake_new;
625	}
626
627	ret = register_trace_sched_migrate_task(probe_wakeup_migrate_task, NULL);
628	if (ret) {
629		pr_info("wakeup trace: Couldn't activate tracepoint"
630			" probe to kernel_sched_migrate_task\n");
631		goto fail_deprobe_sched_switch;
632	}
633
634	wakeup_reset(tr);
635
636	/*
637	 * Don't let the tracer_enabled = 1 show up before
638	 * the wakeup_task is reset. This may be overkill since
639	 * wakeup_reset does a spin_unlock after setting the
640	 * wakeup_task to NULL, but I want to be safe.
641	 * This is a slow path anyway.
642	 */
643	smp_wmb();
644
645	if (start_func_tracer(tr, is_graph(tr)))
646		printk(KERN_ERR "failed to start wakeup tracer\n");
647
648	return;
649fail_deprobe_sched_switch:
650	unregister_trace_sched_switch(probe_wakeup_sched_switch, NULL);
651fail_deprobe_wake_new:
652	unregister_trace_sched_wakeup_new(probe_wakeup, NULL);
653fail_deprobe:
654	unregister_trace_sched_wakeup(probe_wakeup, NULL);
655}
656
657static void stop_wakeup_tracer(struct trace_array *tr)
658{
659	tracer_enabled = 0;
660	stop_func_tracer(tr, is_graph(tr));
661	unregister_trace_sched_switch(probe_wakeup_sched_switch, NULL);
662	unregister_trace_sched_wakeup_new(probe_wakeup, NULL);
663	unregister_trace_sched_wakeup(probe_wakeup, NULL);
664	unregister_trace_sched_migrate_task(probe_wakeup_migrate_task, NULL);
665}
666
667static bool wakeup_busy;
668
669static int __wakeup_tracer_init(struct trace_array *tr)
670{
671	save_flags = tr->trace_flags;
672
673	/* non overwrite screws up the latency tracers */
674	set_tracer_flag(tr, TRACE_ITER_OVERWRITE, 1);
675	set_tracer_flag(tr, TRACE_ITER_LATENCY_FMT, 1);
676
677	tr->max_latency = 0;
678	wakeup_trace = tr;
679	ftrace_init_array_ops(tr, wakeup_tracer_call);
680	start_wakeup_tracer(tr);
681
682	wakeup_busy = true;
683	return 0;
684}
685
686static int wakeup_tracer_init(struct trace_array *tr)
687{
688	if (wakeup_busy)
689		return -EBUSY;
690
691	wakeup_dl = false;
692	wakeup_rt = false;
693	return __wakeup_tracer_init(tr);
694}
695
696static int wakeup_rt_tracer_init(struct trace_array *tr)
697{
698	if (wakeup_busy)
699		return -EBUSY;
700
701	wakeup_dl = false;
702	wakeup_rt = true;
703	return __wakeup_tracer_init(tr);
704}
705
706static int wakeup_dl_tracer_init(struct trace_array *tr)
707{
708	if (wakeup_busy)
709		return -EBUSY;
710
711	wakeup_dl = true;
712	wakeup_rt = false;
713	return __wakeup_tracer_init(tr);
714}
715
716static void wakeup_tracer_reset(struct trace_array *tr)
717{
718	int lat_flag = save_flags & TRACE_ITER_LATENCY_FMT;
719	int overwrite_flag = save_flags & TRACE_ITER_OVERWRITE;
720
721	stop_wakeup_tracer(tr);
722	/* make sure we put back any tasks we are tracing */
723	wakeup_reset(tr);
724
725	set_tracer_flag(tr, TRACE_ITER_LATENCY_FMT, lat_flag);
726	set_tracer_flag(tr, TRACE_ITER_OVERWRITE, overwrite_flag);
727	ftrace_reset_array_ops(tr);
728	wakeup_busy = false;
729}
730
731static void wakeup_tracer_start(struct trace_array *tr)
732{
733	wakeup_reset(tr);
734	tracer_enabled = 1;
735}
736
737static void wakeup_tracer_stop(struct trace_array *tr)
738{
739	tracer_enabled = 0;
740}
741
742static struct tracer wakeup_tracer __read_mostly =
743{
744	.name		= "wakeup",
745	.init		= wakeup_tracer_init,
746	.reset		= wakeup_tracer_reset,
747	.start		= wakeup_tracer_start,
748	.stop		= wakeup_tracer_stop,
749	.print_max	= true,
750	.print_header	= wakeup_print_header,
751	.print_line	= wakeup_print_line,
752	.flag_changed	= wakeup_flag_changed,
753#ifdef CONFIG_FTRACE_SELFTEST
754	.selftest    = trace_selftest_startup_wakeup,
755#endif
756	.open		= wakeup_trace_open,
757	.close		= wakeup_trace_close,
758	.allow_instances = true,
759	.use_max_tr	= true,
760};
761
762static struct tracer wakeup_rt_tracer __read_mostly =
763{
764	.name		= "wakeup_rt",
765	.init		= wakeup_rt_tracer_init,
766	.reset		= wakeup_tracer_reset,
767	.start		= wakeup_tracer_start,
768	.stop		= wakeup_tracer_stop,
769	.print_max	= true,
770	.print_header	= wakeup_print_header,
771	.print_line	= wakeup_print_line,
772	.flag_changed	= wakeup_flag_changed,
773#ifdef CONFIG_FTRACE_SELFTEST
774	.selftest    = trace_selftest_startup_wakeup,
775#endif
776	.open		= wakeup_trace_open,
777	.close		= wakeup_trace_close,
778	.allow_instances = true,
779	.use_max_tr	= true,
780};
781
782static struct tracer wakeup_dl_tracer __read_mostly =
783{
784	.name		= "wakeup_dl",
785	.init		= wakeup_dl_tracer_init,
786	.reset		= wakeup_tracer_reset,
787	.start		= wakeup_tracer_start,
788	.stop		= wakeup_tracer_stop,
789	.print_max	= true,
790	.print_header	= wakeup_print_header,
791	.print_line	= wakeup_print_line,
792	.flag_changed	= wakeup_flag_changed,
793#ifdef CONFIG_FTRACE_SELFTEST
794	.selftest    = trace_selftest_startup_wakeup,
795#endif
796	.open		= wakeup_trace_open,
797	.close		= wakeup_trace_close,
798	.allow_instances = true,
799	.use_max_tr	= true,
800};
801
802__init static int init_wakeup_tracer(void)
803{
804	int ret;
805
806	ret = register_tracer(&wakeup_tracer);
807	if (ret)
808		return ret;
809
810	ret = register_tracer(&wakeup_rt_tracer);
811	if (ret)
812		return ret;
813
814	ret = register_tracer(&wakeup_dl_tracer);
815	if (ret)
816		return ret;
817
818	return 0;
819}
820core_initcall(init_wakeup_tracer);
821