root/include/linux/ftrace.h

/* [<][>][^][v][top][bottom][index][help] */

INCLUDED FROM


DEFINITIONS

This source file includes following definitions.
  1. trace_init
  2. early_trace_init
  3. ftrace_mod_address_lookup
  4. ftrace_mod_get_kallsym
  5. ftrace_free_init_mem
  6. ftrace_free_mem
  7. ftrace_kill
  8. ftrace_free_init_mem
  9. ftrace_free_mem
  10. stack_tracer_disable
  11. stack_tracer_enable
  12. stack_tracer_disable
  13. stack_tracer_enable
  14. ftrace_enable_ftrace_graph_caller
  15. ftrace_disable_ftrace_graph_caller
  16. ftrace_modify_call
  17. skip_trace
  18. ftrace_force_update
  19. ftrace_disable_daemon
  20. ftrace_enable_daemon
  21. ftrace_module_init
  22. ftrace_module_enable
  23. ftrace_release_mod
  24. ftrace_text_reserved
  25. ftrace_location
  26. ftrace_filter_write
  27. ftrace_notrace_write
  28. ftrace_regex_release
  29. is_ftrace_trampoline
  30. tracer_disable
  31. __ftrace_enabled_save
  32. __ftrace_enabled_restore
  33. get_lock_parent_ip
  34. ftrace_init
  35. pause_graph_tracing
  36. unpause_graph_tracing
  37. ftrace_graph_init_task
  38. ftrace_graph_exit_task
  39. ftrace_graph_init_idle_task
  40. ftrace_graph_ret_addr
  41. pause_graph_tracing
  42. unpause_graph_tracing
  43. set_tsk_trace_trace
  44. clear_tsk_trace_trace
  45. test_tsk_trace_trace
  46. set_tsk_trace_graph
  47. clear_tsk_trace_graph
  48. test_tsk_trace_graph
  49. disable_trace_on_warning

   1 /* SPDX-License-Identifier: GPL-2.0 */
   2 /*
   3  * Ftrace header.  For implementation details beyond the random comments
   4  * scattered below, see: Documentation/trace/ftrace-design.rst
   5  */
   6 
   7 #ifndef _LINUX_FTRACE_H
   8 #define _LINUX_FTRACE_H
   9 
  10 #include <linux/trace_clock.h>
  11 #include <linux/kallsyms.h>
  12 #include <linux/linkage.h>
  13 #include <linux/bitops.h>
  14 #include <linux/ptrace.h>
  15 #include <linux/ktime.h>
  16 #include <linux/sched.h>
  17 #include <linux/types.h>
  18 #include <linux/init.h>
  19 #include <linux/fs.h>
  20 
  21 #include <asm/ftrace.h>
  22 
  23 /*
  24  * If the arch supports passing the variable contents of
  25  * function_trace_op as the third parameter back from the
  26  * mcount call, then the arch should define this as 1.
  27  */
  28 #ifndef ARCH_SUPPORTS_FTRACE_OPS
  29 #define ARCH_SUPPORTS_FTRACE_OPS 0
  30 #endif
  31 
  32 /*
  33  * If the arch's mcount caller does not support all of ftrace's
  34  * features, then it must call an indirect function that
  35  * does. Or at least does enough to prevent any unwelcomed side effects.
  36  */
  37 #if !ARCH_SUPPORTS_FTRACE_OPS
  38 # define FTRACE_FORCE_LIST_FUNC 1
  39 #else
  40 # define FTRACE_FORCE_LIST_FUNC 0
  41 #endif
  42 
  43 /* Main tracing buffer and events set up */
  44 #ifdef CONFIG_TRACING
  45 void trace_init(void);
  46 void early_trace_init(void);
  47 #else
  48 static inline void trace_init(void) { }
  49 static inline void early_trace_init(void) { }
  50 #endif
  51 
  52 struct module;
  53 struct ftrace_hash;
  54 
  55 #if defined(CONFIG_FUNCTION_TRACER) && defined(CONFIG_MODULES) && \
  56         defined(CONFIG_DYNAMIC_FTRACE)
  57 const char *
  58 ftrace_mod_address_lookup(unsigned long addr, unsigned long *size,
  59                    unsigned long *off, char **modname, char *sym);
  60 int ftrace_mod_get_kallsym(unsigned int symnum, unsigned long *value,
  61                            char *type, char *name,
  62                            char *module_name, int *exported);
  63 #else
  64 static inline const char *
  65 ftrace_mod_address_lookup(unsigned long addr, unsigned long *size,
  66                    unsigned long *off, char **modname, char *sym)
  67 {
  68         return NULL;
  69 }
  70 static inline int ftrace_mod_get_kallsym(unsigned int symnum, unsigned long *value,
  71                                          char *type, char *name,
  72                                          char *module_name, int *exported)
  73 {
  74         return -1;
  75 }
  76 #endif
  77 
  78 
  79 #ifdef CONFIG_FUNCTION_TRACER
  80 
  81 extern int ftrace_enabled;
  82 extern int
  83 ftrace_enable_sysctl(struct ctl_table *table, int write,
  84                      void __user *buffer, size_t *lenp,
  85                      loff_t *ppos);
  86 
  87 struct ftrace_ops;
  88 
  89 typedef void (*ftrace_func_t)(unsigned long ip, unsigned long parent_ip,
  90                               struct ftrace_ops *op, struct pt_regs *regs);
  91 
  92 ftrace_func_t ftrace_ops_get_func(struct ftrace_ops *ops);
  93 
  94 /*
  95  * FTRACE_OPS_FL_* bits denote the state of ftrace_ops struct and are
  96  * set in the flags member.
  97  * CONTROL, SAVE_REGS, SAVE_REGS_IF_SUPPORTED, RECURSION_SAFE, STUB and
  98  * IPMODIFY are a kind of attribute flags which can be set only before
  99  * registering the ftrace_ops, and can not be modified while registered.
 100  * Changing those attribute flags after registering ftrace_ops will
 101  * cause unexpected results.
 102  *
 103  * ENABLED - set/unset when ftrace_ops is registered/unregistered
 104  * DYNAMIC - set when ftrace_ops is registered to denote dynamically
 105  *           allocated ftrace_ops which need special care
 106  * SAVE_REGS - The ftrace_ops wants regs saved at each function called
 107  *            and passed to the callback. If this flag is set, but the
 108  *            architecture does not support passing regs
 109  *            (CONFIG_DYNAMIC_FTRACE_WITH_REGS is not defined), then the
 110  *            ftrace_ops will fail to register, unless the next flag
 111  *            is set.
 112  * SAVE_REGS_IF_SUPPORTED - This is the same as SAVE_REGS, but if the
 113  *            handler can handle an arch that does not save regs
 114  *            (the handler tests if regs == NULL), then it can set
 115  *            this flag instead. It will not fail registering the ftrace_ops
 116  *            but, the regs field will be NULL if the arch does not support
 117  *            passing regs to the handler.
 118  *            Note, if this flag is set, the SAVE_REGS flag will automatically
 119  *            get set upon registering the ftrace_ops, if the arch supports it.
 120  * RECURSION_SAFE - The ftrace_ops can set this to tell the ftrace infrastructure
 121  *            that the call back has its own recursion protection. If it does
 122  *            not set this, then the ftrace infrastructure will add recursion
 123  *            protection for the caller.
 124  * STUB   - The ftrace_ops is just a place holder.
 125  * INITIALIZED - The ftrace_ops has already been initialized (first use time
 126  *            register_ftrace_function() is called, it will initialized the ops)
 127  * DELETED - The ops are being deleted, do not let them be registered again.
 128  * ADDING  - The ops is in the process of being added.
 129  * REMOVING - The ops is in the process of being removed.
 130  * MODIFYING - The ops is in the process of changing its filter functions.
 131  * ALLOC_TRAMP - A dynamic trampoline was allocated by the core code.
 132  *            The arch specific code sets this flag when it allocated a
 133  *            trampoline. This lets the arch know that it can update the
 134  *            trampoline in case the callback function changes.
 135  *            The ftrace_ops trampoline can be set by the ftrace users, and
 136  *            in such cases the arch must not modify it. Only the arch ftrace
 137  *            core code should set this flag.
 138  * IPMODIFY - The ops can modify the IP register. This can only be set with
 139  *            SAVE_REGS. If another ops with this flag set is already registered
 140  *            for any of the functions that this ops will be registered for, then
 141  *            this ops will fail to register or set_filter_ip.
 142  * PID     - Is affected by set_ftrace_pid (allows filtering on those pids)
 143  * RCU     - Set when the ops can only be called when RCU is watching.
 144  * TRACE_ARRAY - The ops->private points to a trace_array descriptor.
 145  */
 146 enum {
 147         FTRACE_OPS_FL_ENABLED                   = 1 << 0,
 148         FTRACE_OPS_FL_DYNAMIC                   = 1 << 1,
 149         FTRACE_OPS_FL_SAVE_REGS                 = 1 << 2,
 150         FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED    = 1 << 3,
 151         FTRACE_OPS_FL_RECURSION_SAFE            = 1 << 4,
 152         FTRACE_OPS_FL_STUB                      = 1 << 5,
 153         FTRACE_OPS_FL_INITIALIZED               = 1 << 6,
 154         FTRACE_OPS_FL_DELETED                   = 1 << 7,
 155         FTRACE_OPS_FL_ADDING                    = 1 << 8,
 156         FTRACE_OPS_FL_REMOVING                  = 1 << 9,
 157         FTRACE_OPS_FL_MODIFYING                 = 1 << 10,
 158         FTRACE_OPS_FL_ALLOC_TRAMP               = 1 << 11,
 159         FTRACE_OPS_FL_IPMODIFY                  = 1 << 12,
 160         FTRACE_OPS_FL_PID                       = 1 << 13,
 161         FTRACE_OPS_FL_RCU                       = 1 << 14,
 162         FTRACE_OPS_FL_TRACE_ARRAY               = 1 << 15,
 163 };
 164 
 165 #ifdef CONFIG_DYNAMIC_FTRACE
 166 /* The hash used to know what functions callbacks trace */
 167 struct ftrace_ops_hash {
 168         struct ftrace_hash __rcu        *notrace_hash;
 169         struct ftrace_hash __rcu        *filter_hash;
 170         struct mutex                    regex_lock;
 171 };
 172 
 173 void ftrace_free_init_mem(void);
 174 void ftrace_free_mem(struct module *mod, void *start, void *end);
 175 #else
 176 static inline void ftrace_free_init_mem(void) { }
 177 static inline void ftrace_free_mem(struct module *mod, void *start, void *end) { }
 178 #endif
 179 
 180 /*
 181  * Note, ftrace_ops can be referenced outside of RCU protection, unless
 182  * the RCU flag is set. If ftrace_ops is allocated and not part of kernel
 183  * core data, the unregistering of it will perform a scheduling on all CPUs
 184  * to make sure that there are no more users. Depending on the load of the
 185  * system that may take a bit of time.
 186  *
 187  * Any private data added must also take care not to be freed and if private
 188  * data is added to a ftrace_ops that is in core code, the user of the
 189  * ftrace_ops must perform a schedule_on_each_cpu() before freeing it.
 190  */
 191 struct ftrace_ops {
 192         ftrace_func_t                   func;
 193         struct ftrace_ops __rcu         *next;
 194         unsigned long                   flags;
 195         void                            *private;
 196         ftrace_func_t                   saved_func;
 197 #ifdef CONFIG_DYNAMIC_FTRACE
 198         struct ftrace_ops_hash          local_hash;
 199         struct ftrace_ops_hash          *func_hash;
 200         struct ftrace_ops_hash          old_hash;
 201         unsigned long                   trampoline;
 202         unsigned long                   trampoline_size;
 203 #endif
 204 };
 205 
 206 /*
 207  * Type of the current tracing.
 208  */
 209 enum ftrace_tracing_type_t {
 210         FTRACE_TYPE_ENTER = 0, /* Hook the call of the function */
 211         FTRACE_TYPE_RETURN,     /* Hook the return of the function */
 212 };
 213 
 214 /* Current tracing type, default is FTRACE_TYPE_ENTER */
 215 extern enum ftrace_tracing_type_t ftrace_tracing_type;
 216 
 217 /*
 218  * The ftrace_ops must be a static and should also
 219  * be read_mostly.  These functions do modify read_mostly variables
 220  * so use them sparely. Never free an ftrace_op or modify the
 221  * next pointer after it has been registered. Even after unregistering
 222  * it, the next pointer may still be used internally.
 223  */
 224 int register_ftrace_function(struct ftrace_ops *ops);
 225 int unregister_ftrace_function(struct ftrace_ops *ops);
 226 
 227 extern void ftrace_stub(unsigned long a0, unsigned long a1,
 228                         struct ftrace_ops *op, struct pt_regs *regs);
 229 
 230 #else /* !CONFIG_FUNCTION_TRACER */
 231 /*
 232  * (un)register_ftrace_function must be a macro since the ops parameter
 233  * must not be evaluated.
 234  */
 235 #define register_ftrace_function(ops) ({ 0; })
 236 #define unregister_ftrace_function(ops) ({ 0; })
 237 static inline void ftrace_kill(void) { }
 238 static inline void ftrace_free_init_mem(void) { }
 239 static inline void ftrace_free_mem(struct module *mod, void *start, void *end) { }
 240 #endif /* CONFIG_FUNCTION_TRACER */
 241 
 242 #ifdef CONFIG_STACK_TRACER
 243 
 244 extern int stack_tracer_enabled;
 245 
 246 int stack_trace_sysctl(struct ctl_table *table, int write,
 247                        void __user *buffer, size_t *lenp,
 248                        loff_t *ppos);
 249 
 250 /* DO NOT MODIFY THIS VARIABLE DIRECTLY! */
 251 DECLARE_PER_CPU(int, disable_stack_tracer);
 252 
 253 /**
 254  * stack_tracer_disable - temporarily disable the stack tracer
 255  *
 256  * There's a few locations (namely in RCU) where stack tracing
 257  * cannot be executed. This function is used to disable stack
 258  * tracing during those critical sections.
 259  *
 260  * This function must be called with preemption or interrupts
 261  * disabled and stack_tracer_enable() must be called shortly after
 262  * while preemption or interrupts are still disabled.
 263  */
 264 static inline void stack_tracer_disable(void)
 265 {
 266         /* Preemption or interupts must be disabled */
 267         if (IS_ENABLED(CONFIG_DEBUG_PREEMPT))
 268                 WARN_ON_ONCE(!preempt_count() || !irqs_disabled());
 269         this_cpu_inc(disable_stack_tracer);
 270 }
 271 
 272 /**
 273  * stack_tracer_enable - re-enable the stack tracer
 274  *
 275  * After stack_tracer_disable() is called, stack_tracer_enable()
 276  * must be called shortly afterward.
 277  */
 278 static inline void stack_tracer_enable(void)
 279 {
 280         if (IS_ENABLED(CONFIG_DEBUG_PREEMPT))
 281                 WARN_ON_ONCE(!preempt_count() || !irqs_disabled());
 282         this_cpu_dec(disable_stack_tracer);
 283 }
 284 #else
 285 static inline void stack_tracer_disable(void) { }
 286 static inline void stack_tracer_enable(void) { }
 287 #endif
 288 
 289 #ifdef CONFIG_DYNAMIC_FTRACE
 290 
 291 int ftrace_arch_code_modify_prepare(void);
 292 int ftrace_arch_code_modify_post_process(void);
 293 
 294 struct dyn_ftrace;
 295 
 296 enum ftrace_bug_type {
 297         FTRACE_BUG_UNKNOWN,
 298         FTRACE_BUG_INIT,
 299         FTRACE_BUG_NOP,
 300         FTRACE_BUG_CALL,
 301         FTRACE_BUG_UPDATE,
 302 };
 303 extern enum ftrace_bug_type ftrace_bug_type;
 304 
 305 /*
 306  * Archs can set this to point to a variable that holds the value that was
 307  * expected at the call site before calling ftrace_bug().
 308  */
 309 extern const void *ftrace_expected;
 310 
 311 void ftrace_bug(int err, struct dyn_ftrace *rec);
 312 
 313 struct seq_file;
 314 
 315 extern int ftrace_text_reserved(const void *start, const void *end);
 316 
 317 struct ftrace_ops *ftrace_ops_trampoline(unsigned long addr);
 318 
 319 bool is_ftrace_trampoline(unsigned long addr);
 320 
 321 /*
 322  * The dyn_ftrace record's flags field is split into two parts.
 323  * the first part which is '0-FTRACE_REF_MAX' is a counter of
 324  * the number of callbacks that have registered the function that
 325  * the dyn_ftrace descriptor represents.
 326  *
 327  * The second part is a mask:
 328  *  ENABLED - the function is being traced
 329  *  REGS    - the record wants the function to save regs
 330  *  REGS_EN - the function is set up to save regs.
 331  *  IPMODIFY - the record allows for the IP address to be changed.
 332  *  DISABLED - the record is not ready to be touched yet
 333  *
 334  * When a new ftrace_ops is registered and wants a function to save
 335  * pt_regs, the rec->flag REGS is set. When the function has been
 336  * set up to save regs, the REG_EN flag is set. Once a function
 337  * starts saving regs it will do so until all ftrace_ops are removed
 338  * from tracing that function.
 339  */
 340 enum {
 341         FTRACE_FL_ENABLED       = (1UL << 31),
 342         FTRACE_FL_REGS          = (1UL << 30),
 343         FTRACE_FL_REGS_EN       = (1UL << 29),
 344         FTRACE_FL_TRAMP         = (1UL << 28),
 345         FTRACE_FL_TRAMP_EN      = (1UL << 27),
 346         FTRACE_FL_IPMODIFY      = (1UL << 26),
 347         FTRACE_FL_DISABLED      = (1UL << 25),
 348 };
 349 
 350 #define FTRACE_REF_MAX_SHIFT    25
 351 #define FTRACE_FL_BITS          7
 352 #define FTRACE_FL_MASKED_BITS   ((1UL << FTRACE_FL_BITS) - 1)
 353 #define FTRACE_FL_MASK          (FTRACE_FL_MASKED_BITS << FTRACE_REF_MAX_SHIFT)
 354 #define FTRACE_REF_MAX          ((1UL << FTRACE_REF_MAX_SHIFT) - 1)
 355 
 356 #define ftrace_rec_count(rec)   ((rec)->flags & ~FTRACE_FL_MASK)
 357 
 358 struct dyn_ftrace {
 359         unsigned long           ip; /* address of mcount call-site */
 360         unsigned long           flags;
 361         struct dyn_arch_ftrace  arch;
 362 };
 363 
 364 int ftrace_force_update(void);
 365 int ftrace_set_filter_ip(struct ftrace_ops *ops, unsigned long ip,
 366                          int remove, int reset);
 367 int ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf,
 368                        int len, int reset);
 369 int ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf,
 370                         int len, int reset);
 371 void ftrace_set_global_filter(unsigned char *buf, int len, int reset);
 372 void ftrace_set_global_notrace(unsigned char *buf, int len, int reset);
 373 void ftrace_free_filter(struct ftrace_ops *ops);
 374 void ftrace_ops_set_global_filter(struct ftrace_ops *ops);
 375 
 376 enum {
 377         FTRACE_UPDATE_CALLS             = (1 << 0),
 378         FTRACE_DISABLE_CALLS            = (1 << 1),
 379         FTRACE_UPDATE_TRACE_FUNC        = (1 << 2),
 380         FTRACE_START_FUNC_RET           = (1 << 3),
 381         FTRACE_STOP_FUNC_RET            = (1 << 4),
 382         FTRACE_MAY_SLEEP                = (1 << 5),
 383 };
 384 
 385 /*
 386  * The FTRACE_UPDATE_* enum is used to pass information back
 387  * from the ftrace_update_record() and ftrace_test_record()
 388  * functions. These are called by the code update routines
 389  * to find out what is to be done for a given function.
 390  *
 391  *  IGNORE           - The function is already what we want it to be
 392  *  MAKE_CALL        - Start tracing the function
 393  *  MODIFY_CALL      - Stop saving regs for the function
 394  *  MAKE_NOP         - Stop tracing the function
 395  */
 396 enum {
 397         FTRACE_UPDATE_IGNORE,
 398         FTRACE_UPDATE_MAKE_CALL,
 399         FTRACE_UPDATE_MODIFY_CALL,
 400         FTRACE_UPDATE_MAKE_NOP,
 401 };
 402 
 403 enum {
 404         FTRACE_ITER_FILTER      = (1 << 0),
 405         FTRACE_ITER_NOTRACE     = (1 << 1),
 406         FTRACE_ITER_PRINTALL    = (1 << 2),
 407         FTRACE_ITER_DO_PROBES   = (1 << 3),
 408         FTRACE_ITER_PROBE       = (1 << 4),
 409         FTRACE_ITER_MOD         = (1 << 5),
 410         FTRACE_ITER_ENABLED     = (1 << 6),
 411 };
 412 
 413 void arch_ftrace_update_code(int command);
 414 void arch_ftrace_update_trampoline(struct ftrace_ops *ops);
 415 void *arch_ftrace_trampoline_func(struct ftrace_ops *ops, struct dyn_ftrace *rec);
 416 void arch_ftrace_trampoline_free(struct ftrace_ops *ops);
 417 
 418 struct ftrace_rec_iter;
 419 
 420 struct ftrace_rec_iter *ftrace_rec_iter_start(void);
 421 struct ftrace_rec_iter *ftrace_rec_iter_next(struct ftrace_rec_iter *iter);
 422 struct dyn_ftrace *ftrace_rec_iter_record(struct ftrace_rec_iter *iter);
 423 
 424 #define for_ftrace_rec_iter(iter)               \
 425         for (iter = ftrace_rec_iter_start();    \
 426              iter;                              \
 427              iter = ftrace_rec_iter_next(iter))
 428 
 429 
 430 int ftrace_update_record(struct dyn_ftrace *rec, bool enable);
 431 int ftrace_test_record(struct dyn_ftrace *rec, bool enable);
 432 void ftrace_run_stop_machine(int command);
 433 unsigned long ftrace_location(unsigned long ip);
 434 unsigned long ftrace_location_range(unsigned long start, unsigned long end);
 435 unsigned long ftrace_get_addr_new(struct dyn_ftrace *rec);
 436 unsigned long ftrace_get_addr_curr(struct dyn_ftrace *rec);
 437 
 438 extern ftrace_func_t ftrace_trace_function;
 439 
 440 int ftrace_regex_open(struct ftrace_ops *ops, int flag,
 441                   struct inode *inode, struct file *file);
 442 ssize_t ftrace_filter_write(struct file *file, const char __user *ubuf,
 443                             size_t cnt, loff_t *ppos);
 444 ssize_t ftrace_notrace_write(struct file *file, const char __user *ubuf,
 445                              size_t cnt, loff_t *ppos);
 446 int ftrace_regex_release(struct inode *inode, struct file *file);
 447 
 448 void __init
 449 ftrace_set_early_filter(struct ftrace_ops *ops, char *buf, int enable);
 450 
 451 /* defined in arch */
 452 extern int ftrace_ip_converted(unsigned long ip);
 453 extern int ftrace_dyn_arch_init(void);
 454 extern void ftrace_replace_code(int enable);
 455 extern int ftrace_update_ftrace_func(ftrace_func_t func);
 456 extern void ftrace_caller(void);
 457 extern void ftrace_regs_caller(void);
 458 extern void ftrace_call(void);
 459 extern void ftrace_regs_call(void);
 460 extern void mcount_call(void);
 461 
 462 void ftrace_modify_all_code(int command);
 463 
 464 #ifndef FTRACE_ADDR
 465 #define FTRACE_ADDR ((unsigned long)ftrace_caller)
 466 #endif
 467 
 468 #ifndef FTRACE_GRAPH_ADDR
 469 #define FTRACE_GRAPH_ADDR ((unsigned long)ftrace_graph_caller)
 470 #endif
 471 
 472 #ifndef FTRACE_REGS_ADDR
 473 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
 474 # define FTRACE_REGS_ADDR ((unsigned long)ftrace_regs_caller)
 475 #else
 476 # define FTRACE_REGS_ADDR FTRACE_ADDR
 477 #endif
 478 #endif
 479 
 480 /*
 481  * If an arch would like functions that are only traced
 482  * by the function graph tracer to jump directly to its own
 483  * trampoline, then they can define FTRACE_GRAPH_TRAMP_ADDR
 484  * to be that address to jump to.
 485  */
 486 #ifndef FTRACE_GRAPH_TRAMP_ADDR
 487 #define FTRACE_GRAPH_TRAMP_ADDR ((unsigned long) 0)
 488 #endif
 489 
 490 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
 491 extern void ftrace_graph_caller(void);
 492 extern int ftrace_enable_ftrace_graph_caller(void);
 493 extern int ftrace_disable_ftrace_graph_caller(void);
 494 #else
 495 static inline int ftrace_enable_ftrace_graph_caller(void) { return 0; }
 496 static inline int ftrace_disable_ftrace_graph_caller(void) { return 0; }
 497 #endif
 498 
 499 /**
 500  * ftrace_make_nop - convert code into nop
 501  * @mod: module structure if called by module load initialization
 502  * @rec: the mcount call site record
 503  * @addr: the address that the call site should be calling
 504  *
 505  * This is a very sensitive operation and great care needs
 506  * to be taken by the arch.  The operation should carefully
 507  * read the location, check to see if what is read is indeed
 508  * what we expect it to be, and then on success of the compare,
 509  * it should write to the location.
 510  *
 511  * The code segment at @rec->ip should be a caller to @addr
 512  *
 513  * Return must be:
 514  *  0 on success
 515  *  -EFAULT on error reading the location
 516  *  -EINVAL on a failed compare of the contents
 517  *  -EPERM  on error writing to the location
 518  * Any other value will be considered a failure.
 519  */
 520 extern int ftrace_make_nop(struct module *mod,
 521                            struct dyn_ftrace *rec, unsigned long addr);
 522 
 523 /**
 524  * ftrace_make_call - convert a nop call site into a call to addr
 525  * @rec: the mcount call site record
 526  * @addr: the address that the call site should call
 527  *
 528  * This is a very sensitive operation and great care needs
 529  * to be taken by the arch.  The operation should carefully
 530  * read the location, check to see if what is read is indeed
 531  * what we expect it to be, and then on success of the compare,
 532  * it should write to the location.
 533  *
 534  * The code segment at @rec->ip should be a nop
 535  *
 536  * Return must be:
 537  *  0 on success
 538  *  -EFAULT on error reading the location
 539  *  -EINVAL on a failed compare of the contents
 540  *  -EPERM  on error writing to the location
 541  * Any other value will be considered a failure.
 542  */
 543 extern int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr);
 544 
 545 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
 546 /**
 547  * ftrace_modify_call - convert from one addr to another (no nop)
 548  * @rec: the mcount call site record
 549  * @old_addr: the address expected to be currently called to
 550  * @addr: the address to change to
 551  *
 552  * This is a very sensitive operation and great care needs
 553  * to be taken by the arch.  The operation should carefully
 554  * read the location, check to see if what is read is indeed
 555  * what we expect it to be, and then on success of the compare,
 556  * it should write to the location.
 557  *
 558  * The code segment at @rec->ip should be a caller to @old_addr
 559  *
 560  * Return must be:
 561  *  0 on success
 562  *  -EFAULT on error reading the location
 563  *  -EINVAL on a failed compare of the contents
 564  *  -EPERM  on error writing to the location
 565  * Any other value will be considered a failure.
 566  */
 567 extern int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
 568                               unsigned long addr);
 569 #else
 570 /* Should never be called */
 571 static inline int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
 572                                      unsigned long addr)
 573 {
 574         return -EINVAL;
 575 }
 576 #endif
 577 
 578 /* May be defined in arch */
 579 extern int ftrace_arch_read_dyn_info(char *buf, int size);
 580 
 581 extern int skip_trace(unsigned long ip);
 582 extern void ftrace_module_init(struct module *mod);
 583 extern void ftrace_module_enable(struct module *mod);
 584 extern void ftrace_release_mod(struct module *mod);
 585 
 586 extern void ftrace_disable_daemon(void);
 587 extern void ftrace_enable_daemon(void);
 588 #else /* CONFIG_DYNAMIC_FTRACE */
 589 static inline int skip_trace(unsigned long ip) { return 0; }
 590 static inline int ftrace_force_update(void) { return 0; }
 591 static inline void ftrace_disable_daemon(void) { }
 592 static inline void ftrace_enable_daemon(void) { }
 593 static inline void ftrace_module_init(struct module *mod) { }
 594 static inline void ftrace_module_enable(struct module *mod) { }
 595 static inline void ftrace_release_mod(struct module *mod) { }
 596 static inline int ftrace_text_reserved(const void *start, const void *end)
 597 {
 598         return 0;
 599 }
 600 static inline unsigned long ftrace_location(unsigned long ip)
 601 {
 602         return 0;
 603 }
 604 
 605 /*
 606  * Again users of functions that have ftrace_ops may not
 607  * have them defined when ftrace is not enabled, but these
 608  * functions may still be called. Use a macro instead of inline.
 609  */
 610 #define ftrace_regex_open(ops, flag, inod, file) ({ -ENODEV; })
 611 #define ftrace_set_early_filter(ops, buf, enable) do { } while (0)
 612 #define ftrace_set_filter_ip(ops, ip, remove, reset) ({ -ENODEV; })
 613 #define ftrace_set_filter(ops, buf, len, reset) ({ -ENODEV; })
 614 #define ftrace_set_notrace(ops, buf, len, reset) ({ -ENODEV; })
 615 #define ftrace_free_filter(ops) do { } while (0)
 616 #define ftrace_ops_set_global_filter(ops) do { } while (0)
 617 
 618 static inline ssize_t ftrace_filter_write(struct file *file, const char __user *ubuf,
 619                             size_t cnt, loff_t *ppos) { return -ENODEV; }
 620 static inline ssize_t ftrace_notrace_write(struct file *file, const char __user *ubuf,
 621                              size_t cnt, loff_t *ppos) { return -ENODEV; }
 622 static inline int
 623 ftrace_regex_release(struct inode *inode, struct file *file) { return -ENODEV; }
 624 
 625 static inline bool is_ftrace_trampoline(unsigned long addr)
 626 {
 627         return false;
 628 }
 629 #endif /* CONFIG_DYNAMIC_FTRACE */
 630 
 631 /* totally disable ftrace - can not re-enable after this */
 632 void ftrace_kill(void);
 633 
 634 static inline void tracer_disable(void)
 635 {
 636 #ifdef CONFIG_FUNCTION_TRACER
 637         ftrace_enabled = 0;
 638 #endif
 639 }
 640 
 641 /*
 642  * Ftrace disable/restore without lock. Some synchronization mechanism
 643  * must be used to prevent ftrace_enabled to be changed between
 644  * disable/restore.
 645  */
 646 static inline int __ftrace_enabled_save(void)
 647 {
 648 #ifdef CONFIG_FUNCTION_TRACER
 649         int saved_ftrace_enabled = ftrace_enabled;
 650         ftrace_enabled = 0;
 651         return saved_ftrace_enabled;
 652 #else
 653         return 0;
 654 #endif
 655 }
 656 
 657 static inline void __ftrace_enabled_restore(int enabled)
 658 {
 659 #ifdef CONFIG_FUNCTION_TRACER
 660         ftrace_enabled = enabled;
 661 #endif
 662 }
 663 
 664 /* All archs should have this, but we define it for consistency */
 665 #ifndef ftrace_return_address0
 666 # define ftrace_return_address0 __builtin_return_address(0)
 667 #endif
 668 
 669 /* Archs may use other ways for ADDR1 and beyond */
 670 #ifndef ftrace_return_address
 671 # ifdef CONFIG_FRAME_POINTER
 672 #  define ftrace_return_address(n) __builtin_return_address(n)
 673 # else
 674 #  define ftrace_return_address(n) 0UL
 675 # endif
 676 #endif
 677 
 678 #define CALLER_ADDR0 ((unsigned long)ftrace_return_address0)
 679 #define CALLER_ADDR1 ((unsigned long)ftrace_return_address(1))
 680 #define CALLER_ADDR2 ((unsigned long)ftrace_return_address(2))
 681 #define CALLER_ADDR3 ((unsigned long)ftrace_return_address(3))
 682 #define CALLER_ADDR4 ((unsigned long)ftrace_return_address(4))
 683 #define CALLER_ADDR5 ((unsigned long)ftrace_return_address(5))
 684 #define CALLER_ADDR6 ((unsigned long)ftrace_return_address(6))
 685 
 686 static inline unsigned long get_lock_parent_ip(void)
 687 {
 688         unsigned long addr = CALLER_ADDR0;
 689 
 690         if (!in_lock_functions(addr))
 691                 return addr;
 692         addr = CALLER_ADDR1;
 693         if (!in_lock_functions(addr))
 694                 return addr;
 695         return CALLER_ADDR2;
 696 }
 697 
 698 #ifdef CONFIG_TRACE_PREEMPT_TOGGLE
 699   extern void trace_preempt_on(unsigned long a0, unsigned long a1);
 700   extern void trace_preempt_off(unsigned long a0, unsigned long a1);
 701 #else
 702 /*
 703  * Use defines instead of static inlines because some arches will make code out
 704  * of the CALLER_ADDR, when we really want these to be a real nop.
 705  */
 706 # define trace_preempt_on(a0, a1) do { } while (0)
 707 # define trace_preempt_off(a0, a1) do { } while (0)
 708 #endif
 709 
 710 #ifdef CONFIG_FTRACE_MCOUNT_RECORD
 711 extern void ftrace_init(void);
 712 #else
 713 static inline void ftrace_init(void) { }
 714 #endif
 715 
 716 /*
 717  * Structure that defines an entry function trace.
 718  * It's already packed but the attribute "packed" is needed
 719  * to remove extra padding at the end.
 720  */
 721 struct ftrace_graph_ent {
 722         unsigned long func; /* Current function */
 723         int depth;
 724 } __packed;
 725 
 726 /*
 727  * Structure that defines a return function trace.
 728  * It's already packed but the attribute "packed" is needed
 729  * to remove extra padding at the end.
 730  */
 731 struct ftrace_graph_ret {
 732         unsigned long func; /* Current function */
 733         /* Number of functions that overran the depth limit for current task */
 734         unsigned long overrun;
 735         unsigned long long calltime;
 736         unsigned long long rettime;
 737         int depth;
 738 } __packed;
 739 
 740 /* Type of the callback handlers for tracing function graph*/
 741 typedef void (*trace_func_graph_ret_t)(struct ftrace_graph_ret *); /* return */
 742 typedef int (*trace_func_graph_ent_t)(struct ftrace_graph_ent *); /* entry */
 743 
 744 extern int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace);
 745 
 746 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
 747 
 748 struct fgraph_ops {
 749         trace_func_graph_ent_t          entryfunc;
 750         trace_func_graph_ret_t          retfunc;
 751 };
 752 
 753 /*
 754  * Stack of return addresses for functions
 755  * of a thread.
 756  * Used in struct thread_info
 757  */
 758 struct ftrace_ret_stack {
 759         unsigned long ret;
 760         unsigned long func;
 761         unsigned long long calltime;
 762 #ifdef CONFIG_FUNCTION_PROFILER
 763         unsigned long long subtime;
 764 #endif
 765 #ifdef HAVE_FUNCTION_GRAPH_FP_TEST
 766         unsigned long fp;
 767 #endif
 768 #ifdef HAVE_FUNCTION_GRAPH_RET_ADDR_PTR
 769         unsigned long *retp;
 770 #endif
 771 };
 772 
 773 /*
 774  * Primary handler of a function return.
 775  * It relays on ftrace_return_to_handler.
 776  * Defined in entry_32/64.S
 777  */
 778 extern void return_to_handler(void);
 779 
 780 extern int
 781 function_graph_enter(unsigned long ret, unsigned long func,
 782                      unsigned long frame_pointer, unsigned long *retp);
 783 
 784 struct ftrace_ret_stack *
 785 ftrace_graph_get_ret_stack(struct task_struct *task, int idx);
 786 
 787 unsigned long ftrace_graph_ret_addr(struct task_struct *task, int *idx,
 788                                     unsigned long ret, unsigned long *retp);
 789 
 790 /*
 791  * Sometimes we don't want to trace a function with the function
 792  * graph tracer but we want them to keep traced by the usual function
 793  * tracer if the function graph tracer is not configured.
 794  */
 795 #define __notrace_funcgraph             notrace
 796 
 797 #define FTRACE_RETFUNC_DEPTH 50
 798 #define FTRACE_RETSTACK_ALLOC_SIZE 32
 799 
 800 extern int register_ftrace_graph(struct fgraph_ops *ops);
 801 extern void unregister_ftrace_graph(struct fgraph_ops *ops);
 802 
 803 extern bool ftrace_graph_is_dead(void);
 804 extern void ftrace_graph_stop(void);
 805 
 806 /* The current handlers in use */
 807 extern trace_func_graph_ret_t ftrace_graph_return;
 808 extern trace_func_graph_ent_t ftrace_graph_entry;
 809 
 810 extern void ftrace_graph_init_task(struct task_struct *t);
 811 extern void ftrace_graph_exit_task(struct task_struct *t);
 812 extern void ftrace_graph_init_idle_task(struct task_struct *t, int cpu);
 813 
 814 static inline void pause_graph_tracing(void)
 815 {
 816         atomic_inc(&current->tracing_graph_pause);
 817 }
 818 
 819 static inline void unpause_graph_tracing(void)
 820 {
 821         atomic_dec(&current->tracing_graph_pause);
 822 }
 823 #else /* !CONFIG_FUNCTION_GRAPH_TRACER */
 824 
 825 #define __notrace_funcgraph
 826 
 827 static inline void ftrace_graph_init_task(struct task_struct *t) { }
 828 static inline void ftrace_graph_exit_task(struct task_struct *t) { }
 829 static inline void ftrace_graph_init_idle_task(struct task_struct *t, int cpu) { }
 830 
 831 /* Define as macros as fgraph_ops may not be defined */
 832 #define register_ftrace_graph(ops) ({ -1; })
 833 #define unregister_ftrace_graph(ops) do { } while (0)
 834 
 835 static inline unsigned long
 836 ftrace_graph_ret_addr(struct task_struct *task, int *idx, unsigned long ret,
 837                       unsigned long *retp)
 838 {
 839         return ret;
 840 }
 841 
 842 static inline void pause_graph_tracing(void) { }
 843 static inline void unpause_graph_tracing(void) { }
 844 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
 845 
 846 #ifdef CONFIG_TRACING
 847 
 848 /* flags for current->trace */
 849 enum {
 850         TSK_TRACE_FL_TRACE_BIT  = 0,
 851         TSK_TRACE_FL_GRAPH_BIT  = 1,
 852 };
 853 enum {
 854         TSK_TRACE_FL_TRACE      = 1 << TSK_TRACE_FL_TRACE_BIT,
 855         TSK_TRACE_FL_GRAPH      = 1 << TSK_TRACE_FL_GRAPH_BIT,
 856 };
 857 
 858 static inline void set_tsk_trace_trace(struct task_struct *tsk)
 859 {
 860         set_bit(TSK_TRACE_FL_TRACE_BIT, &tsk->trace);
 861 }
 862 
 863 static inline void clear_tsk_trace_trace(struct task_struct *tsk)
 864 {
 865         clear_bit(TSK_TRACE_FL_TRACE_BIT, &tsk->trace);
 866 }
 867 
 868 static inline int test_tsk_trace_trace(struct task_struct *tsk)
 869 {
 870         return tsk->trace & TSK_TRACE_FL_TRACE;
 871 }
 872 
 873 static inline void set_tsk_trace_graph(struct task_struct *tsk)
 874 {
 875         set_bit(TSK_TRACE_FL_GRAPH_BIT, &tsk->trace);
 876 }
 877 
 878 static inline void clear_tsk_trace_graph(struct task_struct *tsk)
 879 {
 880         clear_bit(TSK_TRACE_FL_GRAPH_BIT, &tsk->trace);
 881 }
 882 
 883 static inline int test_tsk_trace_graph(struct task_struct *tsk)
 884 {
 885         return tsk->trace & TSK_TRACE_FL_GRAPH;
 886 }
 887 
 888 enum ftrace_dump_mode;
 889 
 890 extern enum ftrace_dump_mode ftrace_dump_on_oops;
 891 extern int tracepoint_printk;
 892 
 893 extern void disable_trace_on_warning(void);
 894 extern int __disable_trace_on_warning;
 895 
 896 int tracepoint_printk_sysctl(struct ctl_table *table, int write,
 897                              void __user *buffer, size_t *lenp,
 898                              loff_t *ppos);
 899 
 900 #else /* CONFIG_TRACING */
 901 static inline void  disable_trace_on_warning(void) { }
 902 #endif /* CONFIG_TRACING */
 903 
 904 #ifdef CONFIG_FTRACE_SYSCALLS
 905 
 906 unsigned long arch_syscall_addr(int nr);
 907 
 908 #endif /* CONFIG_FTRACE_SYSCALLS */
 909 
 910 #endif /* _LINUX_FTRACE_H */

/* [<][>][^][v][top][bottom][index][help] */