Lines Matching refs:ret_stack

77 	if (!current->ret_stack)  in ftrace_push_return_trace()
96 current->ret_stack[index].ret = ret; in ftrace_push_return_trace()
97 current->ret_stack[index].func = func; in ftrace_push_return_trace()
98 current->ret_stack[index].calltime = calltime; in ftrace_push_return_trace()
100 current->ret_stack[index].fp = frame_pointer; in ftrace_push_return_trace()
103 current->ret_stack[index].retp = retp; in ftrace_push_return_trace()
188 if (unlikely(current->ret_stack[index].fp != frame_pointer)) { in ftrace_pop_return_trace()
192 current->ret_stack[index].fp, in ftrace_pop_return_trace()
194 (void *)current->ret_stack[index].func, in ftrace_pop_return_trace()
195 current->ret_stack[index].ret); in ftrace_pop_return_trace()
201 *ret = current->ret_stack[index].ret; in ftrace_pop_return_trace()
202 trace->func = current->ret_stack[index].func; in ftrace_pop_return_trace()
203 trace->calltime = current->ret_stack[index].calltime; in ftrace_pop_return_trace()
286 return &task->ret_stack[idx]; in ftrace_graph_get_ret_stack()
320 if (task->ret_stack[i].retp == retp) in ftrace_graph_ret_addr()
321 return task->ret_stack[i].ret; in ftrace_graph_ret_addr()
336 if (!task->ret_stack || task_idx < *idx) in ftrace_graph_ret_addr()
342 return task->ret_stack[task_idx].ret; in ftrace_graph_ret_addr()
407 if (t->ret_stack == NULL) { in alloc_retstack_tasklist()
413 t->ret_stack = ret_stack_list[start++]; in alloc_retstack_tasklist()
456 next->ret_stack[index].calltime += timestamp; in ftrace_graph_probe_sched_switch()
502 graph_init_task(struct task_struct *t, struct ftrace_ret_stack *ret_stack) in graph_init_task() argument
508 t->ret_stack = ret_stack; in graph_init_task()
523 if (t->ret_stack) in ftrace_graph_init_idle_task()
524 WARN_ON(t->ret_stack != per_cpu(idle_ret_stack, cpu)); in ftrace_graph_init_idle_task()
527 struct ftrace_ret_stack *ret_stack; in ftrace_graph_init_idle_task() local
529 ret_stack = per_cpu(idle_ret_stack, cpu); in ftrace_graph_init_idle_task()
530 if (!ret_stack) { in ftrace_graph_init_idle_task()
531 ret_stack = in ftrace_graph_init_idle_task()
535 if (!ret_stack) in ftrace_graph_init_idle_task()
537 per_cpu(idle_ret_stack, cpu) = ret_stack; in ftrace_graph_init_idle_task()
539 graph_init_task(t, ret_stack); in ftrace_graph_init_idle_task()
547 t->ret_stack = NULL; in ftrace_graph_init_task()
552 struct ftrace_ret_stack *ret_stack; in ftrace_graph_init_task() local
554 ret_stack = kmalloc_array(FTRACE_RETFUNC_DEPTH, in ftrace_graph_init_task()
557 if (!ret_stack) in ftrace_graph_init_task()
559 graph_init_task(t, ret_stack); in ftrace_graph_init_task()
565 struct ftrace_ret_stack *ret_stack = t->ret_stack; in ftrace_graph_exit_task() local
567 t->ret_stack = NULL; in ftrace_graph_exit_task()
571 kfree(ret_stack); in ftrace_graph_exit_task()
589 if (!idle_task(cpu)->ret_stack) in start_graph_tracing()