ret_stack          88 arch/arm64/kernel/stacktrace.c 	if (tsk->ret_stack &&
ret_stack          90 arch/arm64/kernel/stacktrace.c 		struct ftrace_ret_stack *ret_stack;
ret_stack          97 arch/arm64/kernel/stacktrace.c 		ret_stack = ftrace_graph_get_ret_stack(tsk, frame->graph++);
ret_stack          98 arch/arm64/kernel/stacktrace.c 		if (WARN_ON_ONCE(!ret_stack))
ret_stack         100 arch/arm64/kernel/stacktrace.c 		frame->pc = ret_stack->ret;
ret_stack          59 arch/sh/kernel/dumpstack.c 	struct ftrace_ret_stack *ret_stack;
ret_stack          65 arch/sh/kernel/dumpstack.c 	if (!task->ret_stack)
ret_stack          68 arch/sh/kernel/dumpstack.c 	ret_stack = ftrace_graph_get_ret_stack(task, *graph);
ret_stack          69 arch/sh/kernel/dumpstack.c 	if (!ret_stack)
ret_stack          72 arch/sh/kernel/dumpstack.c 	ret_addr = ret_stack->ret;
ret_stack         608 arch/sh/kernel/dwarf.c 		struct ftrace_ret_stack *ret_stack;
ret_stack         610 arch/sh/kernel/dwarf.c 		ret_stack = ftrace_graph_get_ret_stack(current, 0);
ret_stack         611 arch/sh/kernel/dwarf.c 		if (ret_stack)
ret_stack         612 arch/sh/kernel/dwarf.c 			pc = ret_stack->ret;
ret_stack        1774 arch/sparc/kernel/perf_event.c 			struct ftrace_ret_stack *ret_stack;
ret_stack        1775 arch/sparc/kernel/perf_event.c 			ret_stack = ftrace_graph_get_ret_stack(current,
ret_stack        1777 arch/sparc/kernel/perf_event.c 			if (ret_stack) {
ret_stack        1778 arch/sparc/kernel/perf_event.c 				pc = ret_stack->ret;
ret_stack          61 arch/sparc/kernel/stacktrace.c 				struct ftrace_ret_stack *ret_stack;
ret_stack          62 arch/sparc/kernel/stacktrace.c 				ret_stack = ftrace_graph_get_ret_stack(t,
ret_stack          64 arch/sparc/kernel/stacktrace.c 				if (ret_stack) {
ret_stack          65 arch/sparc/kernel/stacktrace.c 					pc = ret_stack->ret;
ret_stack        2503 arch/sparc/kernel/traps_64.c 			struct ftrace_ret_stack *ret_stack;
ret_stack        2504 arch/sparc/kernel/traps_64.c 			ret_stack = ftrace_graph_get_ret_stack(tsk, graph);
ret_stack        2505 arch/sparc/kernel/traps_64.c 			if (ret_stack) {
ret_stack        2506 arch/sparc/kernel/traps_64.c 				pc = ret_stack->ret;
ret_stack        1184 include/linux/sched.h 	struct ftrace_ret_stack		*ret_stack;
ret_stack         173 init/init_task.c 	.ret_stack	= NULL,
ret_stack          68 kernel/trace/fgraph.c 	if (!current->ret_stack)
ret_stack          87 kernel/trace/fgraph.c 	current->ret_stack[index].ret = ret;
ret_stack          88 kernel/trace/fgraph.c 	current->ret_stack[index].func = func;
ret_stack          89 kernel/trace/fgraph.c 	current->ret_stack[index].calltime = calltime;
ret_stack          91 kernel/trace/fgraph.c 	current->ret_stack[index].fp = frame_pointer;
ret_stack          94 kernel/trace/fgraph.c 	current->ret_stack[index].retp = retp;
ret_stack         154 kernel/trace/fgraph.c 	if (unlikely(current->ret_stack[index].fp != frame_pointer)) {
ret_stack         158 kernel/trace/fgraph.c 		     current->ret_stack[index].fp,
ret_stack         160 kernel/trace/fgraph.c 		     (void *)current->ret_stack[index].func,
ret_stack         161 kernel/trace/fgraph.c 		     current->ret_stack[index].ret);
ret_stack         167 kernel/trace/fgraph.c 	*ret = current->ret_stack[index].ret;
ret_stack         168 kernel/trace/fgraph.c 	trace->func = current->ret_stack[index].func;
ret_stack         169 kernel/trace/fgraph.c 	trace->calltime = current->ret_stack[index].calltime;
ret_stack         252 kernel/trace/fgraph.c 		return &task->ret_stack[idx];
ret_stack         286 kernel/trace/fgraph.c 		if (task->ret_stack[i].retp == retp)
ret_stack         287 kernel/trace/fgraph.c 			return task->ret_stack[i].ret;
ret_stack         302 kernel/trace/fgraph.c 	if (!task->ret_stack || task_idx < *idx)
ret_stack         308 kernel/trace/fgraph.c 	return task->ret_stack[task_idx].ret;
ret_stack         369 kernel/trace/fgraph.c 		if (t->ret_stack == NULL) {
ret_stack         376 kernel/trace/fgraph.c 			t->ret_stack = ret_stack_list[start++];
ret_stack         417 kernel/trace/fgraph.c 		next->ret_stack[index].calltime += timestamp;
ret_stack         463 kernel/trace/fgraph.c graph_init_task(struct task_struct *t, struct ftrace_ret_stack *ret_stack)
ret_stack         470 kernel/trace/fgraph.c 	t->ret_stack = ret_stack;
ret_stack         485 kernel/trace/fgraph.c 	if (t->ret_stack)
ret_stack         486 kernel/trace/fgraph.c 		WARN_ON(t->ret_stack != per_cpu(idle_ret_stack, cpu));
ret_stack         489 kernel/trace/fgraph.c 		struct ftrace_ret_stack *ret_stack;
ret_stack         491 kernel/trace/fgraph.c 		ret_stack = per_cpu(idle_ret_stack, cpu);
ret_stack         492 kernel/trace/fgraph.c 		if (!ret_stack) {
ret_stack         493 kernel/trace/fgraph.c 			ret_stack =
ret_stack         497 kernel/trace/fgraph.c 			if (!ret_stack)
ret_stack         499 kernel/trace/fgraph.c 			per_cpu(idle_ret_stack, cpu) = ret_stack;
ret_stack         501 kernel/trace/fgraph.c 		graph_init_task(t, ret_stack);
ret_stack         509 kernel/trace/fgraph.c 	t->ret_stack = NULL;
ret_stack         514 kernel/trace/fgraph.c 		struct ftrace_ret_stack *ret_stack;
ret_stack         516 kernel/trace/fgraph.c 		ret_stack = kmalloc_array(FTRACE_RETFUNC_DEPTH,
ret_stack         519 kernel/trace/fgraph.c 		if (!ret_stack)
ret_stack         521 kernel/trace/fgraph.c 		graph_init_task(t, ret_stack);
ret_stack         527 kernel/trace/fgraph.c 	struct ftrace_ret_stack	*ret_stack = t->ret_stack;
ret_stack         529 kernel/trace/fgraph.c 	t->ret_stack = NULL;
ret_stack         533 kernel/trace/fgraph.c 	kfree(ret_stack);
ret_stack         551 kernel/trace/fgraph.c 		if (!idle_task(cpu)->ret_stack)
ret_stack         798 kernel/trace/ftrace.c 	struct ftrace_ret_stack *ret_stack;
ret_stack         803 kernel/trace/ftrace.c 	if (!current->ret_stack)
ret_stack         806 kernel/trace/ftrace.c 	ret_stack = ftrace_graph_get_ret_stack(current, 0);
ret_stack         807 kernel/trace/ftrace.c 	if (ret_stack)
ret_stack         808 kernel/trace/ftrace.c 		ret_stack->subtime = 0;
ret_stack         815 kernel/trace/ftrace.c 	struct ftrace_ret_stack *ret_stack;
ret_stack         835 kernel/trace/ftrace.c 		ret_stack = ftrace_graph_get_ret_stack(current, 1);
ret_stack         836 kernel/trace/ftrace.c 		if (ret_stack)
ret_stack         837 kernel/trace/ftrace.c 			ret_stack->subtime += calltime;
ret_stack         839 kernel/trace/ftrace.c 		ret_stack = ftrace_graph_get_ret_stack(current, 0);
ret_stack         840 kernel/trace/ftrace.c 		if (ret_stack && ret_stack->subtime < calltime)
ret_stack         841 kernel/trace/ftrace.c 			calltime -= ret_stack->subtime;