View | Details | Raw Unified | Return to bug 211339
Collapse All | Expand All

(-)a/init/init_task.c (+1 lines)
Lines 199-204 struct task_struct init_task Link Here
199
#endif
199
#endif
200
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
200
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
201
	.ret_stack	= NULL,
201
	.ret_stack	= NULL,
202
	.tracing_graph_pause	= ATOMIC_INIT(0),
202
#endif
203
#endif
203
#if defined(CONFIG_TRACING) && defined(CONFIG_PREEMPTION)
204
#if defined(CONFIG_TRACING) && defined(CONFIG_PREEMPTION)
204
	.trace_recursion = 0,
205
	.trace_recursion = 0,
(-)a/kernel/trace/fgraph.c (-2 lines)
Lines 394-400 static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list) Link Here
394
		}
394
		}
395
395
396
		if (t->ret_stack == NULL) {
396
		if (t->ret_stack == NULL) {
397
			atomic_set(&t->tracing_graph_pause, 0);
398
			atomic_set(&t->trace_overrun, 0);
397
			atomic_set(&t->trace_overrun, 0);
399
			t->curr_ret_stack = -1;
398
			t->curr_ret_stack = -1;
400
			t->curr_ret_depth = -1;
399
			t->curr_ret_depth = -1;
Lines 489-495 static DEFINE_PER_CPU(struct ftrace_ret_stack *, idle_ret_stack); Link Here
489
static void
488
static void
490
graph_init_task(struct task_struct *t, struct ftrace_ret_stack *ret_stack)
489
graph_init_task(struct task_struct *t, struct ftrace_ret_stack *ret_stack)
491
{
490
{
492
	atomic_set(&t->tracing_graph_pause, 0);
493
	atomic_set(&t->trace_overrun, 0);
491
	atomic_set(&t->trace_overrun, 0);
494
	t->ftrace_timestamp = 0;
492
	t->ftrace_timestamp = 0;
495
	/* make curr_ret_stack visible before we add the ret_stack */
493
	/* make curr_ret_stack visible before we add the ret_stack */

Return to bug 211339