This source file includes following definitions.
- arch_ftrace_update_code
- ftrace_dyn_arch_init_insns
- ftrace_modify_code
- ftrace_modify_code_2
- ftrace_modify_code_2r
- ftrace_make_nop
- ftrace_make_call
- ftrace_update_ftrace_func
- ftrace_dyn_arch_init
- ftrace_enable_ftrace_graph_caller
- ftrace_disable_ftrace_graph_caller
- ftrace_get_parent_ra_addr
- prepare_ftrace_return
- arch_syscall_addr
- arch_syscall_addr
   1 
   2 
   3 
   4 
   5 
   6 
   7 
   8 
   9 
  10 
  11 
  12 #include <linux/uaccess.h>
  13 #include <linux/init.h>
  14 #include <linux/ftrace.h>
  15 #include <linux/syscalls.h>
  16 
  17 #include <asm/asm.h>
  18 #include <asm/asm-offsets.h>
  19 #include <asm/cacheflush.h>
  20 #include <asm/syscall.h>
  21 #include <asm/uasm.h>
  22 #include <asm/unistd.h>
  23 
  24 #include <asm-generic/sections.h>
  25 
  26 #if defined(KBUILD_MCOUNT_RA_ADDRESS) && defined(CONFIG_32BIT)
  27 #define MCOUNT_OFFSET_INSNS 5
  28 #else
  29 #define MCOUNT_OFFSET_INSNS 4
  30 #endif
  31 
  32 #ifdef CONFIG_DYNAMIC_FTRACE
  33 
  34 
  35 void arch_ftrace_update_code(int command)
  36 {
  37         ftrace_modify_all_code(command);
  38 }
  39 
  40 #endif
  41 
  42 #ifdef CONFIG_DYNAMIC_FTRACE
  43 
  44 #define JAL 0x0c000000          
  45 #define ADDR_MASK 0x03ffffff    
  46 #define JUMP_RANGE_MASK ((1UL << 28) - 1)
  47 
  48 #define INSN_NOP 0x00000000     
  49 #define INSN_JAL(addr)  \
  50         ((unsigned int)(JAL | (((addr) >> 2) & ADDR_MASK)))
  51 
  52 static unsigned int insn_jal_ftrace_caller __read_mostly;
  53 static unsigned int insn_la_mcount[2] __read_mostly;
  54 static unsigned int insn_j_ftrace_graph_caller __maybe_unused __read_mostly;
  55 
  56 static inline void ftrace_dyn_arch_init_insns(void)
  57 {
  58         u32 *buf;
  59         unsigned int v1;
  60 
  61         
  62         v1 = 3;
  63         buf = (u32 *)&insn_la_mcount[0];
  64         UASM_i_LA(&buf, v1, MCOUNT_ADDR);
  65 
  66         
  67         buf = (u32 *)&insn_jal_ftrace_caller;
  68         uasm_i_jal(&buf, (FTRACE_ADDR + 8) & JUMP_RANGE_MASK);
  69 
  70 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
  71         
  72         buf = (u32 *)&insn_j_ftrace_graph_caller;
  73         uasm_i_j(&buf, (unsigned long)ftrace_graph_caller & JUMP_RANGE_MASK);
  74 #endif
  75 }
  76 
  77 static int ftrace_modify_code(unsigned long ip, unsigned int new_code)
  78 {
  79         int faulted;
  80         mm_segment_t old_fs;
  81 
  82         
  83         safe_store_code(new_code, ip, faulted);
  84 
  85         if (unlikely(faulted))
  86                 return -EFAULT;
  87 
  88         old_fs = get_fs();
  89         set_fs(KERNEL_DS);
  90         flush_icache_range(ip, ip + 8);
  91         set_fs(old_fs);
  92 
  93         return 0;
  94 }
  95 
  96 #ifndef CONFIG_64BIT
  97 static int ftrace_modify_code_2(unsigned long ip, unsigned int new_code1,
  98                                 unsigned int new_code2)
  99 {
 100         int faulted;
 101         mm_segment_t old_fs;
 102 
 103         safe_store_code(new_code1, ip, faulted);
 104         if (unlikely(faulted))
 105                 return -EFAULT;
 106 
 107         ip += 4;
 108         safe_store_code(new_code2, ip, faulted);
 109         if (unlikely(faulted))
 110                 return -EFAULT;
 111 
 112         ip -= 4;
 113         old_fs = get_fs();
 114         set_fs(KERNEL_DS);
 115         flush_icache_range(ip, ip + 8);
 116         set_fs(old_fs);
 117 
 118         return 0;
 119 }
 120 
 121 static int ftrace_modify_code_2r(unsigned long ip, unsigned int new_code1,
 122                                  unsigned int new_code2)
 123 {
 124         int faulted;
 125         mm_segment_t old_fs;
 126 
 127         ip += 4;
 128         safe_store_code(new_code2, ip, faulted);
 129         if (unlikely(faulted))
 130                 return -EFAULT;
 131 
 132         ip -= 4;
 133         safe_store_code(new_code1, ip, faulted);
 134         if (unlikely(faulted))
 135                 return -EFAULT;
 136 
 137         old_fs = get_fs();
 138         set_fs(KERNEL_DS);
 139         flush_icache_range(ip, ip + 8);
 140         set_fs(old_fs);
 141 
 142         return 0;
 143 }
 144 #endif
 145 
 146 
 147 
 148 
 149 
 150 
 151 
 152 
 153 
 154 
 155 
 156 
 157 
 158 
 159 
 160 
 161 
 162 
 163 
 164 
 165 
 166 
 167 
 168 
 169 
 170 
 171 
 172 
 173 
 174 
 175 
 176 #define INSN_B_1F (0x10000000 | MCOUNT_OFFSET_INSNS)
 177 
 178 int ftrace_make_nop(struct module *mod,
 179                     struct dyn_ftrace *rec, unsigned long addr)
 180 {
 181         unsigned int new;
 182         unsigned long ip = rec->ip;
 183 
 184         
 185 
 186 
 187 
 188         new = core_kernel_text(ip) ? INSN_NOP : INSN_B_1F;
 189 #ifdef CONFIG_64BIT
 190         return ftrace_modify_code(ip, new);
 191 #else
 192         
 193 
 194 
 195 
 196 
 197 
 198 
 199         return ftrace_modify_code_2(ip, new, INSN_NOP);
 200 #endif
 201 }
 202 
 203 int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
 204 {
 205         unsigned int new;
 206         unsigned long ip = rec->ip;
 207 
 208         new = core_kernel_text(ip) ? insn_jal_ftrace_caller : insn_la_mcount[0];
 209 
 210 #ifdef CONFIG_64BIT
 211         return ftrace_modify_code(ip, new);
 212 #else
 213         return ftrace_modify_code_2r(ip, new, core_kernel_text(ip) ?
 214                                                 INSN_NOP : insn_la_mcount[1]);
 215 #endif
 216 }
 217 
 218 #define FTRACE_CALL_IP ((unsigned long)(&ftrace_call))
 219 
 220 int ftrace_update_ftrace_func(ftrace_func_t func)
 221 {
 222         unsigned int new;
 223 
 224         new = INSN_JAL((unsigned long)func);
 225 
 226         return ftrace_modify_code(FTRACE_CALL_IP, new);
 227 }
 228 
 229 int __init ftrace_dyn_arch_init(void)
 230 {
 231         
 232         ftrace_dyn_arch_init_insns();
 233 
 234         
 235         ftrace_modify_code(MCOUNT_ADDR, INSN_NOP);
 236 
 237         return 0;
 238 }
 239 #endif  
 240 
 241 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
 242 
 243 #ifdef CONFIG_DYNAMIC_FTRACE
 244 
 245 extern void ftrace_graph_call(void);
 246 #define FTRACE_GRAPH_CALL_IP    ((unsigned long)(&ftrace_graph_call))
 247 
 248 int ftrace_enable_ftrace_graph_caller(void)
 249 {
 250         return ftrace_modify_code(FTRACE_GRAPH_CALL_IP,
 251                         insn_j_ftrace_graph_caller);
 252 }
 253 
 254 int ftrace_disable_ftrace_graph_caller(void)
 255 {
 256         return ftrace_modify_code(FTRACE_GRAPH_CALL_IP, INSN_NOP);
 257 }
 258 
 259 #endif  
 260 
 261 #ifndef KBUILD_MCOUNT_RA_ADDRESS
 262 
 263 #define S_RA_SP (0xafbf << 16)  
 264 #define S_R_SP  (0xafb0 << 16)  
 265 #define OFFSET_MASK     0xffff  
 266 
 267 unsigned long ftrace_get_parent_ra_addr(unsigned long self_ra, unsigned long
 268                 old_parent_ra, unsigned long parent_ra_addr, unsigned long fp)
 269 {
 270         unsigned long sp, ip, tmp;
 271         unsigned int code;
 272         int faulted;
 273 
 274         
 275 
 276 
 277 
 278 
 279         ip = self_ra - (core_kernel_text(self_ra) ? 16 : 24);
 280 
 281         
 282 
 283 
 284 
 285         do {
 286                 
 287                 safe_load_code(code, ip, faulted);
 288 
 289                 if (unlikely(faulted))
 290                         return 0;
 291                 
 292 
 293 
 294 
 295 
 296                 if ((code & S_R_SP) != S_R_SP)
 297                         return parent_ra_addr;
 298 
 299                 
 300                 ip -= 4;
 301         } while ((code & S_RA_SP) != S_RA_SP);
 302 
 303         sp = fp + (code & OFFSET_MASK);
 304 
 305         
 306         safe_load_stack(tmp, sp, faulted);
 307         if (unlikely(faulted))
 308                 return 0;
 309 
 310         if (tmp == old_parent_ra)
 311                 return sp;
 312         return 0;
 313 }
 314 
 315 #endif  
 316 
 317 
 318 
 319 
 320 
 321 void prepare_ftrace_return(unsigned long *parent_ra_addr, unsigned long self_ra,
 322                            unsigned long fp)
 323 {
 324         unsigned long old_parent_ra;
 325         unsigned long return_hooker = (unsigned long)
 326             &return_to_handler;
 327         int faulted, insns;
 328 
 329         if (unlikely(ftrace_graph_is_dead()))
 330                 return;
 331 
 332         if (unlikely(atomic_read(¤t->tracing_graph_pause)))
 333                 return;
 334 
 335         
 336 
 337 
 338 
 339 
 340 
 341 
 342 
 343 
 344 
 345 
 346 
 347 
 348 
 349 
 350 
 351 
 352 
 353         
 354         safe_load_stack(old_parent_ra, parent_ra_addr, faulted);
 355         if (unlikely(faulted))
 356                 goto out;
 357 #ifndef KBUILD_MCOUNT_RA_ADDRESS
 358         parent_ra_addr = (unsigned long *)ftrace_get_parent_ra_addr(self_ra,
 359                         old_parent_ra, (unsigned long)parent_ra_addr, fp);
 360         
 361 
 362 
 363 
 364         if (parent_ra_addr == NULL)
 365                 goto out;
 366 #endif
 367         
 368         safe_store_stack(return_hooker, parent_ra_addr, faulted);
 369         if (unlikely(faulted))
 370                 goto out;
 371 
 372         
 373 
 374 
 375 
 376 
 377 
 378         insns = core_kernel_text(self_ra) ? 2 : MCOUNT_OFFSET_INSNS + 1;
 379         self_ra -= (MCOUNT_INSN_SIZE * insns);
 380 
 381         if (function_graph_enter(old_parent_ra, self_ra, fp, NULL))
 382                 *parent_ra_addr = old_parent_ra;
 383         return;
 384 out:
 385         ftrace_graph_stop();
 386         WARN_ON(1);
 387 }
 388 #endif  
 389 
 390 #ifdef CONFIG_FTRACE_SYSCALLS
 391 
 392 #ifdef CONFIG_32BIT
 393 unsigned long __init arch_syscall_addr(int nr)
 394 {
 395         return (unsigned long)sys_call_table[nr - __NR_O32_Linux];
 396 }
 397 #endif
 398 
 399 #ifdef CONFIG_64BIT
 400 
 401 unsigned long __init arch_syscall_addr(int nr)
 402 {
 403 #ifdef CONFIG_MIPS32_N32
 404         if (nr >= __NR_N32_Linux && nr < __NR_N32_Linux + __NR_N32_Linux_syscalls)
 405                 return (unsigned long)sysn32_call_table[nr - __NR_N32_Linux];
 406 #endif
 407         if (nr >= __NR_64_Linux  && nr < __NR_64_Linux + __NR_64_Linux_syscalls)
 408                 return (unsigned long)sys_call_table[nr - __NR_64_Linux];
 409 #ifdef CONFIG_MIPS32_O32
 410         if (nr >= __NR_O32_Linux && nr < __NR_O32_Linux + __NR_O32_Linux_syscalls)
 411                 return (unsigned long)sys32_call_table[nr - __NR_O32_Linux];
 412 #endif
 413 
 414         return (unsigned long) &sys_ni_syscall;
 415 }
 416 #endif
 417 
 418 #endif