Lines 394-400
static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list)
Link Here
|
394 |
} |
394 |
} |
395 |
|
395 |
|
396 |
if (t->ret_stack == NULL) { |
396 |
if (t->ret_stack == NULL) { |
397 |
atomic_set(&t->tracing_graph_pause, 0); |
|
|
398 |
atomic_set(&t->trace_overrun, 0); |
397 |
atomic_set(&t->trace_overrun, 0); |
399 |
t->curr_ret_stack = -1; |
398 |
t->curr_ret_stack = -1; |
400 |
t->curr_ret_depth = -1; |
399 |
t->curr_ret_depth = -1; |
Lines 489-495
static DEFINE_PER_CPU(struct ftrace_ret_stack *, idle_ret_stack);
Link Here
|
489 |
static void |
488 |
static void |
490 |
graph_init_task(struct task_struct *t, struct ftrace_ret_stack *ret_stack) |
489 |
graph_init_task(struct task_struct *t, struct ftrace_ret_stack *ret_stack) |
491 |
{ |
490 |
{ |
492 |
atomic_set(&t->tracing_graph_pause, 0); |
|
|
493 |
atomic_set(&t->trace_overrun, 0); |
491 |
atomic_set(&t->trace_overrun, 0); |
494 |
t->ftrace_timestamp = 0; |
492 |
t->ftrace_timestamp = 0; |
495 |
/* make curr_ret_stack visible before we add the ret_stack */ |
493 |
/* make curr_ret_stack visible before we add the ret_stack */ |