1/* 2 * Code for replacing ftrace calls with jumps. 3 * 4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com> 5 * 6 * Thanks goes out to P.A. Semi, Inc for supplying me with a PPC64 box. 7 * 8 * Added function graph tracer code, taken from x86 that was written 9 * by Frederic Weisbecker, and ported to PPC by Steven Rostedt. 10 * 11 */ 12 13#define pr_fmt(fmt) "ftrace-powerpc: " fmt 14 15#include <linux/spinlock.h> 16#include <linux/hardirq.h> 17#include <linux/uaccess.h> 18#include <linux/module.h> 19#include <linux/ftrace.h> 20#include <linux/percpu.h> 21#include <linux/init.h> 22#include <linux/list.h> 23 24#include <asm/cacheflush.h> 25#include <asm/code-patching.h> 26#include <asm/ftrace.h> 27#include <asm/syscall.h> 28 29 30#ifdef CONFIG_DYNAMIC_FTRACE 31static unsigned int 32ftrace_call_replace(unsigned long ip, unsigned long addr, int link) 33{ 34 unsigned int op; 35 36 addr = ppc_function_entry((void *)addr); 37 38 /* if (link) set op to 'bl' else 'b' */ 39 op = create_branch((unsigned int *)ip, addr, link ? 1 : 0); 40 41 return op; 42} 43 44static int 45ftrace_modify_code(unsigned long ip, unsigned int old, unsigned int new) 46{ 47 unsigned int replaced; 48 49 /* 50 * Note: Due to modules and __init, code can 51 * disappear and change, we need to protect against faulting 52 * as well as code changing. We do this by using the 53 * probe_kernel_* functions. 54 * 55 * No real locking needed, this code is run through 56 * kstop_machine, or before SMP starts. 57 */ 58 59 /* read the text we want to modify */ 60 if (probe_kernel_read(&replaced, (void *)ip, MCOUNT_INSN_SIZE)) 61 return -EFAULT; 62 63 /* Make sure it is what we expect it to be */ 64 if (replaced != old) 65 return -EINVAL; 66 67 /* replace the text with the new text */ 68 if (patch_instruction((unsigned int *)ip, new)) 69 return -EPERM; 70 71 return 0; 72} 73 74/* 75 * Helper functions that are the same for both PPC64 and PPC32. 76 */ 77static int test_24bit_addr(unsigned long ip, unsigned long addr) 78{ 79 addr = ppc_function_entry((void *)addr); 80 81 /* use the create_branch to verify that this offset can be branched */ 82 return create_branch((unsigned int *)ip, addr, 0); 83} 84 85#ifdef CONFIG_MODULES 86 87static int is_bl_op(unsigned int op) 88{ 89 return (op & 0xfc000003) == 0x48000001; 90} 91 92static unsigned long find_bl_target(unsigned long ip, unsigned int op) 93{ 94 static int offset; 95 96 offset = (op & 0x03fffffc); 97 /* make it signed */ 98 if (offset & 0x02000000) 99 offset |= 0xfe000000; 100 101 return ip + (long)offset; 102} 103 104#ifdef CONFIG_PPC64 105static int 106__ftrace_make_nop(struct module *mod, 107 struct dyn_ftrace *rec, unsigned long addr) 108{ 109 unsigned int op; 110 unsigned long entry, ptr; 111 unsigned long ip = rec->ip; 112 void *tramp; 113 114 /* read where this goes */ 115 if (probe_kernel_read(&op, (void *)ip, sizeof(int))) 116 return -EFAULT; 117 118 /* Make sure that that this is still a 24bit jump */ 119 if (!is_bl_op(op)) { 120 pr_err("Not expected bl: opcode is %x\n", op); 121 return -EINVAL; 122 } 123 124 /* lets find where the pointer goes */ 125 tramp = (void *)find_bl_target(ip, op); 126 127 pr_devel("ip:%lx jumps to %p", ip, tramp); 128 129 if (!is_module_trampoline(tramp)) { 130 pr_err("Not a trampoline\n"); 131 return -EINVAL; 132 } 133 134 if (module_trampoline_target(mod, tramp, &ptr)) { 135 pr_err("Failed to get trampoline target\n"); 136 return -EFAULT; 137 } 138 139 pr_devel("trampoline target %lx", ptr); 140 141 entry = ppc_global_function_entry((void *)addr); 142 /* This should match what was called */ 143 if (ptr != entry) { 144 pr_err("addr %lx does not match expected %lx\n", ptr, entry); 145 return -EINVAL; 146 } 147 148 /* 149 * Our original call site looks like: 150 * 151 * bl <tramp> 152 * ld r2,XX(r1) 153 * 154 * Milton Miller pointed out that we can not simply nop the branch. 155 * If a task was preempted when calling a trace function, the nops 156 * will remove the way to restore the TOC in r2 and the r2 TOC will 157 * get corrupted. 158 * 159 * Use a b +8 to jump over the load. 160 */ 161 op = 0x48000008; /* b +8 */ 162 163 if (patch_instruction((unsigned int *)ip, op)) 164 return -EPERM; 165 166 return 0; 167} 168 169#else /* !PPC64 */ 170static int 171__ftrace_make_nop(struct module *mod, 172 struct dyn_ftrace *rec, unsigned long addr) 173{ 174 unsigned int op; 175 unsigned int jmp[4]; 176 unsigned long ip = rec->ip; 177 unsigned long tramp; 178 179 if (probe_kernel_read(&op, (void *)ip, MCOUNT_INSN_SIZE)) 180 return -EFAULT; 181 182 /* Make sure that that this is still a 24bit jump */ 183 if (!is_bl_op(op)) { 184 pr_err("Not expected bl: opcode is %x\n", op); 185 return -EINVAL; 186 } 187 188 /* lets find where the pointer goes */ 189 tramp = find_bl_target(ip, op); 190 191 /* 192 * On PPC32 the trampoline looks like: 193 * 0x3d, 0x80, 0x00, 0x00 lis r12,sym@ha 194 * 0x39, 0x8c, 0x00, 0x00 addi r12,r12,sym@l 195 * 0x7d, 0x89, 0x03, 0xa6 mtctr r12 196 * 0x4e, 0x80, 0x04, 0x20 bctr 197 */ 198 199 pr_devel("ip:%lx jumps to %lx", ip, tramp); 200 201 /* Find where the trampoline jumps to */ 202 if (probe_kernel_read(jmp, (void *)tramp, sizeof(jmp))) { 203 pr_err("Failed to read %lx\n", tramp); 204 return -EFAULT; 205 } 206 207 pr_devel(" %08x %08x ", jmp[0], jmp[1]); 208 209 /* verify that this is what we expect it to be */ 210 if (((jmp[0] & 0xffff0000) != 0x3d800000) || 211 ((jmp[1] & 0xffff0000) != 0x398c0000) || 212 (jmp[2] != 0x7d8903a6) || 213 (jmp[3] != 0x4e800420)) { 214 pr_err("Not a trampoline\n"); 215 return -EINVAL; 216 } 217 218 tramp = (jmp[1] & 0xffff) | 219 ((jmp[0] & 0xffff) << 16); 220 if (tramp & 0x8000) 221 tramp -= 0x10000; 222 223 pr_devel(" %lx ", tramp); 224 225 if (tramp != addr) { 226 pr_err("Trampoline location %08lx does not match addr\n", 227 tramp); 228 return -EINVAL; 229 } 230 231 op = PPC_INST_NOP; 232 233 if (patch_instruction((unsigned int *)ip, op)) 234 return -EPERM; 235 236 return 0; 237} 238#endif /* PPC64 */ 239#endif /* CONFIG_MODULES */ 240 241int ftrace_make_nop(struct module *mod, 242 struct dyn_ftrace *rec, unsigned long addr) 243{ 244 unsigned long ip = rec->ip; 245 unsigned int old, new; 246 247 /* 248 * If the calling address is more that 24 bits away, 249 * then we had to use a trampoline to make the call. 250 * Otherwise just update the call site. 251 */ 252 if (test_24bit_addr(ip, addr)) { 253 /* within range */ 254 old = ftrace_call_replace(ip, addr, 1); 255 new = PPC_INST_NOP; 256 return ftrace_modify_code(ip, old, new); 257 } 258 259#ifdef CONFIG_MODULES 260 /* 261 * Out of range jumps are called from modules. 262 * We should either already have a pointer to the module 263 * or it has been passed in. 264 */ 265 if (!rec->arch.mod) { 266 if (!mod) { 267 pr_err("No module loaded addr=%lx\n", addr); 268 return -EFAULT; 269 } 270 rec->arch.mod = mod; 271 } else if (mod) { 272 if (mod != rec->arch.mod) { 273 pr_err("Record mod %p not equal to passed in mod %p\n", 274 rec->arch.mod, mod); 275 return -EINVAL; 276 } 277 /* nothing to do if mod == rec->arch.mod */ 278 } else 279 mod = rec->arch.mod; 280 281 return __ftrace_make_nop(mod, rec, addr); 282#else 283 /* We should not get here without modules */ 284 return -EINVAL; 285#endif /* CONFIG_MODULES */ 286} 287 288#ifdef CONFIG_MODULES 289#ifdef CONFIG_PPC64 290static int 291__ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) 292{ 293 unsigned int op[2]; 294 void *ip = (void *)rec->ip; 295 296 /* read where this goes */ 297 if (probe_kernel_read(op, ip, sizeof(op))) 298 return -EFAULT; 299 300 /* 301 * We expect to see: 302 * 303 * b +8 304 * ld r2,XX(r1) 305 * 306 * The load offset is different depending on the ABI. For simplicity 307 * just mask it out when doing the compare. 308 */ 309 if ((op[0] != 0x48000008) || ((op[1] & 0xffff0000) != 0xe8410000)) { 310 pr_err("Unexpected call sequence: %x %x\n", op[0], op[1]); 311 return -EINVAL; 312 } 313 314 /* If we never set up a trampoline to ftrace_caller, then bail */ 315 if (!rec->arch.mod->arch.tramp) { 316 pr_err("No ftrace trampoline\n"); 317 return -EINVAL; 318 } 319 320 /* Ensure branch is within 24 bits */ 321 if (!create_branch(ip, rec->arch.mod->arch.tramp, BRANCH_SET_LINK)) { 322 pr_err("Branch out of range\n"); 323 return -EINVAL; 324 } 325 326 if (patch_branch(ip, rec->arch.mod->arch.tramp, BRANCH_SET_LINK)) { 327 pr_err("REL24 out of range!\n"); 328 return -EINVAL; 329 } 330 331 return 0; 332} 333#else 334static int 335__ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) 336{ 337 unsigned int op; 338 unsigned long ip = rec->ip; 339 340 /* read where this goes */ 341 if (probe_kernel_read(&op, (void *)ip, MCOUNT_INSN_SIZE)) 342 return -EFAULT; 343 344 /* It should be pointing to a nop */ 345 if (op != PPC_INST_NOP) { 346 pr_err("Expected NOP but have %x\n", op); 347 return -EINVAL; 348 } 349 350 /* If we never set up a trampoline to ftrace_caller, then bail */ 351 if (!rec->arch.mod->arch.tramp) { 352 pr_err("No ftrace trampoline\n"); 353 return -EINVAL; 354 } 355 356 /* create the branch to the trampoline */ 357 op = create_branch((unsigned int *)ip, 358 rec->arch.mod->arch.tramp, BRANCH_SET_LINK); 359 if (!op) { 360 pr_err("REL24 out of range!\n"); 361 return -EINVAL; 362 } 363 364 pr_devel("write to %lx\n", rec->ip); 365 366 if (patch_instruction((unsigned int *)ip, op)) 367 return -EPERM; 368 369 return 0; 370} 371#endif /* CONFIG_PPC64 */ 372#endif /* CONFIG_MODULES */ 373 374int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) 375{ 376 unsigned long ip = rec->ip; 377 unsigned int old, new; 378 379 /* 380 * If the calling address is more that 24 bits away, 381 * then we had to use a trampoline to make the call. 382 * Otherwise just update the call site. 383 */ 384 if (test_24bit_addr(ip, addr)) { 385 /* within range */ 386 old = PPC_INST_NOP; 387 new = ftrace_call_replace(ip, addr, 1); 388 return ftrace_modify_code(ip, old, new); 389 } 390 391#ifdef CONFIG_MODULES 392 /* 393 * Out of range jumps are called from modules. 394 * Being that we are converting from nop, it had better 395 * already have a module defined. 396 */ 397 if (!rec->arch.mod) { 398 pr_err("No module loaded\n"); 399 return -EINVAL; 400 } 401 402 return __ftrace_make_call(rec, addr); 403#else 404 /* We should not get here without modules */ 405 return -EINVAL; 406#endif /* CONFIG_MODULES */ 407} 408 409int ftrace_update_ftrace_func(ftrace_func_t func) 410{ 411 unsigned long ip = (unsigned long)(&ftrace_call); 412 unsigned int old, new; 413 int ret; 414 415 old = *(unsigned int *)&ftrace_call; 416 new = ftrace_call_replace(ip, (unsigned long)func, 1); 417 ret = ftrace_modify_code(ip, old, new); 418 419 return ret; 420} 421 422static int __ftrace_replace_code(struct dyn_ftrace *rec, int enable) 423{ 424 unsigned long ftrace_addr = (unsigned long)FTRACE_ADDR; 425 int ret; 426 427 ret = ftrace_update_record(rec, enable); 428 429 switch (ret) { 430 case FTRACE_UPDATE_IGNORE: 431 return 0; 432 case FTRACE_UPDATE_MAKE_CALL: 433 return ftrace_make_call(rec, ftrace_addr); 434 case FTRACE_UPDATE_MAKE_NOP: 435 return ftrace_make_nop(NULL, rec, ftrace_addr); 436 } 437 438 return 0; 439} 440 441void ftrace_replace_code(int enable) 442{ 443 struct ftrace_rec_iter *iter; 444 struct dyn_ftrace *rec; 445 int ret; 446 447 for (iter = ftrace_rec_iter_start(); iter; 448 iter = ftrace_rec_iter_next(iter)) { 449 rec = ftrace_rec_iter_record(iter); 450 ret = __ftrace_replace_code(rec, enable); 451 if (ret) { 452 ftrace_bug(ret, rec); 453 return; 454 } 455 } 456} 457 458void arch_ftrace_update_code(int command) 459{ 460 if (command & FTRACE_UPDATE_CALLS) 461 ftrace_replace_code(1); 462 else if (command & FTRACE_DISABLE_CALLS) 463 ftrace_replace_code(0); 464 465 if (command & FTRACE_UPDATE_TRACE_FUNC) 466 ftrace_update_ftrace_func(ftrace_trace_function); 467 468 if (command & FTRACE_START_FUNC_RET) 469 ftrace_enable_ftrace_graph_caller(); 470 else if (command & FTRACE_STOP_FUNC_RET) 471 ftrace_disable_ftrace_graph_caller(); 472} 473 474int __init ftrace_dyn_arch_init(void) 475{ 476 return 0; 477} 478#endif /* CONFIG_DYNAMIC_FTRACE */ 479 480#ifdef CONFIG_FUNCTION_GRAPH_TRACER 481 482#ifdef CONFIG_DYNAMIC_FTRACE 483extern void ftrace_graph_call(void); 484extern void ftrace_graph_stub(void); 485 486int ftrace_enable_ftrace_graph_caller(void) 487{ 488 unsigned long ip = (unsigned long)(&ftrace_graph_call); 489 unsigned long addr = (unsigned long)(&ftrace_graph_caller); 490 unsigned long stub = (unsigned long)(&ftrace_graph_stub); 491 unsigned int old, new; 492 493 old = ftrace_call_replace(ip, stub, 0); 494 new = ftrace_call_replace(ip, addr, 0); 495 496 return ftrace_modify_code(ip, old, new); 497} 498 499int ftrace_disable_ftrace_graph_caller(void) 500{ 501 unsigned long ip = (unsigned long)(&ftrace_graph_call); 502 unsigned long addr = (unsigned long)(&ftrace_graph_caller); 503 unsigned long stub = (unsigned long)(&ftrace_graph_stub); 504 unsigned int old, new; 505 506 old = ftrace_call_replace(ip, addr, 0); 507 new = ftrace_call_replace(ip, stub, 0); 508 509 return ftrace_modify_code(ip, old, new); 510} 511#endif /* CONFIG_DYNAMIC_FTRACE */ 512 513/* 514 * Hook the return address and push it in the stack of return addrs 515 * in current thread info. Return the address we want to divert to. 516 */ 517unsigned long prepare_ftrace_return(unsigned long parent, unsigned long ip) 518{ 519 struct ftrace_graph_ent trace; 520 unsigned long return_hooker; 521 522 if (unlikely(ftrace_graph_is_dead())) 523 goto out; 524 525 if (unlikely(atomic_read(¤t->tracing_graph_pause))) 526 goto out; 527 528 return_hooker = ppc_function_entry(return_to_handler); 529 530 trace.func = ip; 531 trace.depth = current->curr_ret_stack + 1; 532 533 /* Only trace if the calling function expects to */ 534 if (!ftrace_graph_entry(&trace)) 535 goto out; 536 537 if (ftrace_push_return_trace(parent, ip, &trace.depth, 0) == -EBUSY) 538 goto out; 539 540 parent = return_hooker; 541out: 542 return parent; 543} 544#endif /* CONFIG_FUNCTION_GRAPH_TRACER */ 545 546#if defined(CONFIG_FTRACE_SYSCALLS) && defined(CONFIG_PPC64) 547unsigned long __init arch_syscall_addr(int nr) 548{ 549 return sys_call_table[nr*2]; 550} 551#endif /* CONFIG_FTRACE_SYSCALLS && CONFIG_PPC64 */ 552