/linux-4.1.27/arch/parisc/include/asm/ |
H A D | rt_sigframe.h | 9 /* XXX: Must match trampoline size in arch/parisc/kernel/signal.c 11 trampoline we left on the stack (we were bad and didn't
|
/linux-4.1.27/arch/hexagon/kernel/ |
H A D | Makefile | 6 obj-y += process.o trampoline.o reset.o ptrace.o vdso.o
|
H A D | vdso.c | 30 /* Create a vDSO page holding the signal trampoline. 46 /* Install the signal trampoline; currently looks like this: vdso_init()
|
H A D | signal.c | 122 /* The on-stack signal trampoline is no longer executed; setup_rt_frame()
|
/linux-4.1.27/arch/powerpc/include/asm/ |
H A D | kdump.h | 9 * be greater or equal to the trampoline's end address. 16 * On PPC64 translation is disabled during trampoline setup, so we use
|
H A D | module.h | 82 int module_trampoline_target(struct module *mod, u32 *trampoline,
|
H A D | ppc_asm.h | 782 * Create an endian fixup trampoline 791 * trampoline in "reverse endian" if we are running with the 801 b $+36; /* Skip trampoline if endian is good */ \
|
/linux-4.1.27/arch/x86/kernel/ |
H A D | ftrace.c | 701 * trampoline only services a single ftrace_ops, we can pass in tramp_free() 726 void *trampoline; create_trampoline() local 747 * the ftrace_ops this trampoline is used for. create_trampoline() 749 trampoline = alloc_tramp(size + MCOUNT_INSN_SIZE + sizeof(void *)); create_trampoline() 750 if (!trampoline) create_trampoline() 755 /* Copy ftrace_caller onto the trampoline memory */ create_trampoline() 756 ret = probe_kernel_read(trampoline, (void *)start_offset, size); create_trampoline() 758 tramp_free(trampoline); create_trampoline() 762 ip = (unsigned long)trampoline + size; create_trampoline() 764 /* The trampoline ends with a jmp to ftrace_return */ create_trampoline() 766 memcpy(trampoline + size, jmp, MCOUNT_INSN_SIZE); create_trampoline() 769 * The address of the ftrace_ops that is used for this trampoline create_trampoline() 770 * is stored at the end of the trampoline. This will be used to create_trampoline() 772 * location at the end of the trampoline takes the place of create_trampoline() 776 ptr = (unsigned long *)(trampoline + size + MCOUNT_INSN_SIZE); create_trampoline() 780 memcpy(&op_ptr, trampoline + op_offset, OP_REF_SIZE); create_trampoline() 784 tramp_free(trampoline); create_trampoline() 790 offset -= (unsigned long)trampoline + op_offset + OP_REF_SIZE; create_trampoline() 795 memcpy(trampoline + op_offset, &op_ptr, OP_REF_SIZE); create_trampoline() 800 return (unsigned long)trampoline; create_trampoline() 828 if (ops->trampoline) { arch_ftrace_update_trampoline() 830 * The ftrace_ops caller may set up its own trampoline. arch_ftrace_update_trampoline() 836 ops->trampoline = create_trampoline(ops, &size); arch_ftrace_update_trampoline() 837 if (!ops->trampoline) arch_ftrace_update_trampoline() 843 ip = ops->trampoline + offset; arch_ftrace_update_trampoline() 847 /* Do a safe modify in case the trampoline is executing */ arch_ftrace_update_trampoline() 855 /* Return the address of the function the trampoline calls */ addr_from_call() 878 * If the ops->trampoline was not allocated, then it probably 879 * has a static trampoline func, or is the ftrace caller itself. 887 if (ops && ops->trampoline) { static_tramp_func() 891 * trampoline. static_tramp_func() 893 if (ops->trampoline == FTRACE_GRAPH_ADDR) static_tramp_func() 913 /* If we didn't allocate this trampoline, consider it static */ arch_ftrace_trampoline_func() 918 return addr_from_call((void *)ops->trampoline + offset); arch_ftrace_trampoline_func() 926 tramp_free((void *)ops->trampoline); arch_ftrace_trampoline_free() 927 ops->trampoline = 0; arch_ftrace_trampoline_free()
|
H A D | mcount_64.S | 62 * %rdi - holds the address that called the trampoline 73 * Stack traces will stop at the ftrace trampoline if the frame pointer 171 * The copied trampoline must call ftrace_return as it 248 * it must not be copied into the trampoline. 249 * The trampoline will add the code to jump
|
H A D | head.c | 20 * as we have enough memory to install the trampoline. Using
|
H A D | head64.c | 161 /* Kill off the identity-map trampoline */ x86_64_start_kernel()
|
H A D | head_64.S | 173 * or from trampoline.S (using virtual addresses). 175 * Using virtual addresses from trampoline.S removes the need
|
H A D | head_32.S | 296 * Non-boot CPU entry point; entered from trampoline.S 298 * we know the trampoline has already loaded the boot_gdt for us.
|
H A D | smpboot.c | 26 * Michael Chastain : Change trampoline.S to gnu as.
|
/linux-4.1.27/arch/s390/include/asm/ |
H A D | uprobes.h | 40 unsigned long arch_uretprobe_hijack_return_addr(unsigned long trampoline,
|
/linux-4.1.27/arch/powerpc/kernel/ |
H A D | ftrace.c | 130 pr_err("Not a trampoline\n"); __ftrace_make_nop() 135 pr_err("Failed to get trampoline target\n"); __ftrace_make_nop() 139 pr_devel("trampoline target %lx", ptr); __ftrace_make_nop() 192 * On PPC32 the trampoline looks like: __ftrace_make_nop() 201 /* Find where the trampoline jumps to */ __ftrace_make_nop() 214 pr_err("Not a trampoline\n"); __ftrace_make_nop() 249 * then we had to use a trampoline to make the call. ftrace_make_nop() 314 /* If we never set up a trampoline to ftrace_caller, then bail */ __ftrace_make_call() 316 pr_err("No ftrace trampoline\n"); __ftrace_make_call() 350 /* If we never set up a trampoline to ftrace_caller, then bail */ __ftrace_make_call() 352 pr_err("No ftrace trampoline\n"); __ftrace_make_call() 356 /* create the branch to the trampoline */ __ftrace_make_call() 381 * then we had to use a trampoline to make the call. ftrace_make_call()
|
H A D | crash_dump.c | 42 * instruction's address + (32 MB - 4) bytes. For the trampoline we create_trampoline() 44 * the trampoline address, then the next instruction (+ 4 bytes) create_trampoline()
|
H A D | module_64.c | 106 * so we don't have to modify the trampoline load instruction. */ 177 int module_trampoline_target(struct module *mod, u32 *trampoline, module_trampoline_target() argument 185 if (probe_kernel_read(buf, trampoline, sizeof(buf))) module_trampoline_target() 195 * Now get the address this trampoline jumps to. This module_trampoline_target() 196 * is always 32 bytes into our trampoline stub. module_trampoline_target() 301 /* make the trampoline to the ftrace_caller */ get_stubs_size()
|
H A D | kprobes.c | 139 /* Replace the return addr with trampoline addr */ arch_prepare_kretprobe() 276 * Function return probe trampoline: 289 * Called when the probe at kretprobe trampoline is hit
|
H A D | module_32.c | 181 /* Set up a trampoline in the PLT to bounce us to the distant function */ do_plt_call()
|
H A D | uprobes.c | 203 /* Replace the return addr with trampoline addr */ arch_uretprobe_hijack_return_addr()
|
H A D | entry_64.S | 673 subi r3,r3,INT_FRAME_SIZE /* dst: Allocate a trampoline exception frame */ 675 mr r1,r3 /* Reroute the trampoline frame to r1 */ 677 /* Copy from the original to the trampoline. */ 1113 /* Setup our trampoline return addr in LR */
|
H A D | entry_32.S | 781 subi r3,r3,INT_FRAME_SIZE /* dst: Allocate a trampoline exception frame */ 783 mr r1,r3 /* Reroute the trampoline frame to r1 */ 785 /* Copy from the original to the trampoline. */
|
H A D | signal_32.c | 232 /* We use the mc_pad field for the signal return trampoline. */ 494 /* Set up the sigreturn trampoline: li r0,sigret; sc */ save_user_regs() 646 /* Set up the sigreturn trampoline: li r0,sigret; sc */ save_tm_user_regs()
|
H A D | exceptions-64e.S | 1098 * TODO: move some bits like SRR0 read to trampoline, pass PACA 1486 * We enter here from head_64.S, possibly after the prom_init trampoline
|
H A D | signal_64.c | 555 * Setup the trampoline code on the stack
|
H A D | head_32.S | 131 * appropriate trampoline if it's present
|
H A D | exceptions-64s.S | 1263 * trampoline handlers (like 0x4f00, 0x4f20, etc.) which branch
|
/linux-4.1.27/arch/x86/realmode/ |
H A D | init.c | 20 panic("Cannot allocate trampoline\n"); reserve_real_mode() 25 printk(KERN_DEBUG "Base memory trampoline at [%p] %llx size %zu\n", reserve_real_mode() 96 * function. Also trampoline code will be executed by APs so we
|
/linux-4.1.27/arch/x86/include/asm/ |
H A D | realmode.h | 11 /* SMP trampoline */
|
/linux-4.1.27/arch/arc/include/uapi/asm/ |
H A D | signal.h | 18 * W/o this kernel needs to "synthesize" the sigreturn trampoline on user
|
/linux-4.1.27/arch/arm64/kvm/ |
H A D | hyp-init.S | 114 /* Skip the trampoline dance if we merged the boot and runtime PGDs */ 118 /* MMU is now enabled. Get ready for the trampoline dance */ 124 target: /* We're now in the trampoline code, switch page tables */
|
/linux-4.1.27/arch/arm/kvm/ |
H A D | init.S | 38 * - Jump to a target into the trampoline page (remember, this is the same 138 @ Jump to the trampoline page 144 target: @ We're now in the trampoline code, switch page tables
|
H A D | mmu.c | 1696 /* Map the very same page at the trampoline VA */ kvm_mmu_init() 1702 kvm_err("Failed to map trampoline @%lx into boot HYP pgd\n", kvm_mmu_init() 1713 kvm_err("Failed to map trampoline @%lx into runtime HYP pgd\n", kvm_mmu_init()
|
/linux-4.1.27/arch/parisc/kernel/ |
H A D | signal32.h | 61 /* XXX: Must match trampoline size in arch/parisc/kernel/signal.c 63 trampoline we left on the stack (we were bad and didn't
|
H A D | signal.c | 281 save the previous sigrestartblock trampoline that might be setup_rt_frame() 282 on the stack. We start the sigreturn trampoline at setup_rt_frame() 482 /* Setup a trampoline to restart the syscall insert_restart_trampoline()
|
H A D | syscall.S | 234 * trampoline code in signal.c). 353 * trampoline code in signal.c).
|
/linux-4.1.27/arch/um/kernel/ |
H A D | uml.lds.S | 13 /* Static binaries stick stuff here, like the sigreturn trampoline,
|
/linux-4.1.27/arch/powerpc/boot/ |
H A D | ppc_asm.h | 67 b $+36; /* Skip trampoline if endian is good */ \
|
/linux-4.1.27/arch/cris/arch-v10/kernel/ |
H A D | signal.c | 51 unsigned char retcode[8]; /* trampoline code */ 59 unsigned char retcode[8]; /* trampoline code */ 221 * trampoline which performs the syscall sigreturn, or a provided 222 * user-mode trampoline. 253 /* trampoline - the desired return ip is the retcode itself */ setup_frame() 312 /* trampoline - the desired return ip is the retcode itself */ setup_rt_frame()
|
/linux-4.1.27/arch/x86/realmode/rm/ |
H A D | trampoline_64.S | 10 * trampoline page to make our stack and everything else 98 # Setup trampoline 4 level pagetables
|
H A D | trampoline_32.S | 11 * trampoline page to make our stack and everything else
|
H A D | reboot.S | 24 /* Switch to trampoline GDT as it is guaranteed < 4 GiB */
|
/linux-4.1.27/arch/sparc/kernel/ |
H A D | trampoline_32.S | 2 * trampoline.S: SMP cpu boot-up trampoline code.
|
H A D | hvtramp.S | 1 /* hvtramp.S: Hypervisor start-cpu trampoline code.
|
H A D | sun4m_smp.c | 87 /* See trampoline.S for details... */ smp4m_boot_one_cpu()
|
H A D | kprobes.c | 507 /* Replace the return addr with trampoline addr */ arch_prepare_kretprobe() 513 * Called when the probe at kretprobe trampoline is hit
|
H A D | signal32.c | 504 /* 3. signal handler back-trampoline and parameters */ setup_frame32() 635 /* 3. signal handler back-trampoline and parameters */ setup_rt_frame32()
|
H A D | leon_smp.c | 188 /* See trampoline.S:leon_smp_cpu_startup for details... leon_boot_one_cpu()
|
H A D | trampoline_64.S | 2 * trampoline.S: Jump start slave processors on sparc64.
|
H A D | head_64.S | 722 * secondary processor startup (via trampoline.S). The 737 * preventing trampoline.S from using this code... ho hum. 852 * routine, the other cpus set things up via trampoline.S.
|
H A D | signal_32.c | 280 /* 3. signal handler back-trampoline and parameters */ setup_frame()
|
H A D | signal_64.c | 418 /* 3. signal handler back-trampoline and parameters */ setup_rt_frame()
|
H A D | irq_64.c | 973 * On SMP this gets invoked from the CPU trampoline before
|
/linux-4.1.27/arch/sparc/include/asm/ |
H A D | sigcontext.h | 20 int sigc_o0; /* within the trampoline code. */
|
/linux-4.1.27/arch/tile/include/asm/ |
H A D | syscalls.h | 30 * _sys_xxx() trampoline in intvec*.S just sets up the pointer and
|
H A D | compat.h | 293 /* Assembly trampoline to avoid clobbering r0. */
|
/linux-4.1.27/arch/openrisc/kernel/ |
H A D | signal.c | 40 unsigned char retcode[16]; /* trampoline code */ 152 * trampoline which performs the syscall sigreturn, or a provided 153 * user-mode trampoline. 182 /* trampoline - the desired return ip is the retcode itself */ setup_rt_frame()
|
H A D | head.S | 1216 tophys (r3,r5) // r3 is trampoline (physical) 1249 // r3 is trampoline address (physical) 1380 // set up new EPC to point to our trampoline code
|
/linux-4.1.27/arch/avr32/include/asm/ |
H A D | thread_info.h | 30 trampoline */
|
/linux-4.1.27/arch/avr32/kernel/ |
H A D | ptrace.c | 263 * Explicit breakpoint from trampoline or do_debug() 296 * set up a trampoline just in case. do_debug() 299 * trampoline stuff if it does a full context do_debug() 307 pr_debug("Setting up trampoline...\n"); do_debug()
|
H A D | kprobes.c | 265 /* TODO: Register kretprobe trampoline */ arch_init_kprobes()
|
H A D | entry-avr32b.S | 323 * The debug handler set up a trampoline to make us
|
/linux-4.1.27/arch/arm/mach-rockchip/ |
H A D | platsmp.c | 167 * big enough. After this check, copy the trampoline code that directs the 188 pr_err("%s: reserved block with size 0x%x is to small for trampoline size 0x%x\n", rockchip_smp_prepare_sram() 196 /* copy the trampoline to sram, that runs during startup of the core */ rockchip_smp_prepare_sram()
|
/linux-4.1.27/kernel/trace/ |
H A D | ftrace.c | 281 * then have the mcount trampoline call the function directly. update_ftrace_function() 432 /* The control_ops needs the trampoline update */ __register_ftrace_function() 1159 * address is on a dynamically allocated trampoline that would 1180 if (op->trampoline && op->trampoline_size) do_for_each_ftrace_op() 1181 if (addr >= op->trampoline && do_for_each_ftrace_op() 1182 addr < op->trampoline + op->trampoline_size) { do_for_each_ftrace_op() 1717 * function, and the ops has a trampoline registered do_for_each_ftrace_rec() 1720 if (ftrace_rec_count(rec) == 1 && ops->trampoline) do_for_each_ftrace_rec() 1726 * custom trampoline in use, then we need to go do_for_each_ftrace_rec() 1727 * back to the default trampoline. do_for_each_ftrace_rec() 1762 * has a trampoline. do_for_each_ftrace_rec() 1995 (void *)ops->trampoline); ftrace_bug() 2068 * vice versa, or from a trampoline call. ftrace_check_record() 2127 if (!op->trampoline) do_for_each_ftrace_op() 2155 * Need to find the current trampoline for a rec. ftrace_find_tramp_ops_curr() 2156 * Now, a trampoline is only attached to a rec if there ftrace_find_tramp_ops_curr() 2168 * a trampoline, it needs to be removed (trampolines are only ftrace_find_tramp_ops_curr() 2174 if (!op->trampoline) do_for_each_ftrace_op() 2230 * Returns the address of the trampoline to set to 2239 if (FTRACE_WARN_ON(!ops || !ops->trampoline)) { ftrace_get_addr_new() 2240 pr_warn("Bad trampoline accounting at: %p (%pS) (%lx)\n", ftrace_get_addr_new() 2245 return ops->trampoline; ftrace_get_addr_new() 2262 * Returns the address of the trampoline that is currently being called 2272 pr_warning("Bad trampoline accounting at: %p (%pS)\n", ftrace_get_addr_curr() 2277 return ops->trampoline; ftrace_get_addr_curr() 2671 * If the ops uses a trampoline, then it needs to be ftrace_shutdown() 2677 /* The trampoline logic checks the old hashes */ ftrace_shutdown() 3261 (void *)ops->trampoline); t_show() 5023 * Currently there's no safe way to free a trampoline when the kernel ftrace_update_trampoline() 5025 * when it jumped to the trampoline, it may be preempted for a long time ftrace_update_trampoline() 5027 * when it will be off the trampoline. If the trampoline is freed ftrace_update_trampoline() 5032 /* Currently, only non dynamic ops can have a trampoline */ ftrace_update_trampoline() 5209 * recursion, this function will be called by the mcount trampoline. 5227 * ftrace_ops_get_func - get the function a trampoline should call 5230 * Normally the mcount trampoline will call the ops->func, but there 5235 * Returns the function that the trampoline should call for @ops. 5631 .trampoline = FTRACE_GRAPH_TRAMP_ADDR, 5882 * Function graph does not allocate the trampoline, but unregister_ftrace_graph() 5886 global_ops.trampoline = save_global_trampoline; unregister_ftrace_graph()
|
/linux-4.1.27/arch/sh/include/asm/ |
H A D | cacheflush.h | 20 * - flush_cache_sigtramp(vaddr) flushes the signal trampoline
|
/linux-4.1.27/arch/powerpc/kernel/vdso64/ |
H A D | sigtramp.S | 2 * Signal trampoline for 64 bits processes in a ppc64 kernel for 36 trampoline layout. The last magic value is the ucontext pointer,
|
/linux-4.1.27/arch/s390/kernel/ |
H A D | kprobes.c | 274 /* Replace the return addr with trampoline addr */ arch_prepare_kretprobe() 378 * Function return probe trampoline: 390 * Called when the probe at kretprobe trampoline is hit 719 static struct kprobe trampoline = { variable in typeref:struct:kprobe 726 return register_kprobe(&trampoline); arch_init_kprobes()
|
H A D | uprobes.c | 140 unsigned long arch_uretprobe_hijack_return_addr(unsigned long trampoline, arch_uretprobe_hijack_return_addr() argument 146 regs->gprs[14] = trampoline; arch_uretprobe_hijack_return_addr()
|
H A D | ftrace.c | 39 * trampoline (ftrace_plt), which clobbers also r1.
|
/linux-4.1.27/tools/testing/selftests/x86/ |
H A D | sigreturn.c | 27 * user mode to succeed, we return to a short trampoline that generates 70 * An aligned int3 instruction used as a trampoline. Some of the tests 71 * want to fish out their ss values, so this trampoline copies ss to eax 313 * int3 trampoline. Sets SP to a large known value so that we can see 453 * int3 trampoline was invoked. test_valid_sigreturn()
|
/linux-4.1.27/arch/tile/kernel/ |
H A D | kprobes.c | 416 * Function return probe trampoline: 438 /* Replace the return addr with trampoline addr */ arch_prepare_kretprobe() 443 * Called when the probe at kretprobe trampoline is hit.
|
H A D | stack.c | 122 /* Is the pc pointing to a sigreturn trampoline? */ is_sigreturn()
|
H A D | single_step.c | 293 * When we arrive at this routine via a trampoline, the single step
|
/linux-4.1.27/arch/mips/kernel/ |
H A D | signal.c | 52 u32 sf_pad[2]; /* Was: signal trampoline */ 59 u32 rs_pad[2]; /* Was: signal trampoline */ 292 * FPU emulator may have it's own trampoline active just get_sigframe()
|
H A D | signal_n32.c | 63 u32 rs_pad[2]; /* Was: signal trampoline */
|
H A D | kprobes.c | 566 * Function return probe trampoline: 592 /* Replace the return addr with trampoline addr */ arch_prepare_kretprobe() 597 * Called when the probe at kretprobe trampoline is hit
|
H A D | signal32.c | 65 u32 sf_pad[2]; /* Was: signal trampoline */ 72 u32 rs_pad[2]; /* Was: signal trampoline */
|
H A D | mips-r2-to-r6-emul.c | 71 * for performance instead of the traditional way of using a stack trampoline 280 * For anything else we go back to trampoline emulation. jr_func()
|
/linux-4.1.27/arch/mips/include/asm/ |
H A D | cacheflush.h | 28 * - flush_cache_sigtramp() flush signal trampoline
|
/linux-4.1.27/arch/arm/probes/uprobes/ |
H A D | core.c | 70 /* Replace the return addr with trampoline addr */ arch_uretprobe_hijack_return_addr()
|
/linux-4.1.27/arch/microblaze/kernel/ |
H A D | signal.c | 50 unsigned long tramp[2]; /* signal trampoline */ 56 unsigned long tramp[2]; /* signal trampoline */
|
/linux-4.1.27/arch/cris/arch-v32/kernel/ |
H A D | ptrace.c | 142 /* The signal trampoline page is outside the normal user-addressable arch_ptrace() 147 /* The trampoline page is globally mapped, no page table to traverse.*/ arch_ptrace()
|
H A D | signal.c | 210 * trampoline. 493 in the signal trampoline: keep the Q flag. */ keep_debug_flags()
|
/linux-4.1.27/include/linux/ |
H A D | ftrace.h | 108 * ALLOC_TRAMP - A dynamic trampoline was allocated by the core code. trace_init() 110 * trampoline. This lets the arch know that it can update the trace_init() 111 * trampoline in case the callback function changes. trace_init() 112 * The ftrace_ops trampoline can be set by the ftrace users, and trace_init() 171 unsigned long trampoline; member in struct:ftrace_ops 478 * trampoline, then they can define FTRACE_GRAPH_TRAMP_ADDR
|
/linux-4.1.27/arch/arc/kernel/ |
H A D | kprobes.c | 433 /* Replace the return addr with trampoline addr */ arch_prepare_kretprobe() 508 /* Registering the trampoline code for the kret probe */ arch_init_kprobes()
|
/linux-4.1.27/arch/xtensa/include/asm/ |
H A D | processor.h | 33 * 1 GB region. The C compiler places trampoline code on the stack for sources
|
/linux-4.1.27/arch/metag/kernel/ |
H A D | module.c | 161 /* Set up a trampoline in the PLT to bounce us to the distant function */ do_plt_call()
|
/linux-4.1.27/arch/powerpc/platforms/powernv/ |
H A D | opal-wrappers.S | 88 * this by instead converting the below trampoline to a set of
|
/linux-4.1.27/arch/score/kernel/ |
H A D | signal.c | 39 u32 rs_code[2]; /* signal trampoline */
|
/linux-4.1.27/arch/mips/mm/ |
H A D | c-octeon.c | 131 * Flush the icache for a trampoline. These are used for interrupt
|
/linux-4.1.27/arch/nios2/kernel/ |
H A D | signal.c | 202 trampoline on kuser page. */ setup_rt_frame()
|
/linux-4.1.27/arch/powerpc/kvm/ |
H A D | book3s_interrupts.S | 143 * lowmem trampoline code, so it's basically the guest exit code.
|
H A D | powerpc.c | 183 * a bug where they would map their trampoline code KVM_HCALL_TOKEN()
|
/linux-4.1.27/arch/sh/kernel/ |
H A D | signal_64.c | 429 /* Cohere the trampoline with the I-cache. */ setup_frame() 521 /* Cohere the trampoline with the I-cache. */ setup_rt_frame()
|
H A D | kprobes.c | 211 /* Replace the return addr with trampoline addr */ arch_prepare_kretprobe()
|
/linux-4.1.27/arch/ia64/kernel/ |
H A D | signal.c | 222 * trampoline starts. Everything else is done at the user-level. 334 * register stack is switched in the signal trampoline). setup_frame()
|
H A D | kprobes.c | 416 * returning into our trampoline. Lookup the associated instance 507 /* Replace the return addr with trampoline addr */ arch_prepare_kretprobe() 868 * and the kretprobe trampoline pre_kprobes_handler()
|
H A D | gate.S | 3 * region. For now, it contains the signal trampoline code only.
|
/linux-4.1.27/arch/m32r/kernel/ |
H A D | smpboot.c | 32 * Michael Chastain : Change trampoline.S to gnu as.
|
/linux-4.1.27/arch/powerpc/platforms/pseries/ |
H A D | setup.c | 105 * addresses anyway, and use a trampoline to get to the real code. */ fwnmi_init()
|
/linux-4.1.27/arch/arm/probes/kprobes/ |
H A D | core.c | 493 /* Replace the return addr with trampoline addr. */ arch_prepare_kretprobe()
|
/linux-4.1.27/arch/arm/include/asm/ |
H A D | cacheflush.h | 465 * trampoline are inserted by the linker and to keep sp 64-bit aligned.
|
/linux-4.1.27/kernel/ |
H A D | softirq.c | 588 * The trampoline is called when the hrtimer expires. It schedules a tasklet
|
/linux-4.1.27/kernel/events/ |
H A D | uprobes.c | 1500 * Current area->vaddr notion assume the trampoline address is always 1549 * We don't want to keep trampoline address in stack, rather keep the prepare_uretprobe()
|
/linux-4.1.27/arch/x86/kernel/kprobes/ |
H A D | core.c | 518 /* Replace the return addr with trampoline addr */ arch_prepare_kretprobe()
|
/linux-4.1.27/arch/powerpc/lib/ |
H A D | sstep.c | 1706 * have to provide the exception frame trampoline, which is pushed
|