1/* 2 * event tracer 3 * 4 * Copyright (C) 2008 Red Hat Inc, Steven Rostedt <srostedt@redhat.com> 5 * 6 * - Added format output of fields of the trace point. 7 * This was based off of work by Tom Zanussi <tzanussi@gmail.com>. 8 * 9 */ 10 11#define pr_fmt(fmt) fmt 12 13#include <linux/workqueue.h> 14#include <linux/spinlock.h> 15#include <linux/kthread.h> 16#include <linux/tracefs.h> 17#include <linux/uaccess.h> 18#include <linux/module.h> 19#include <linux/ctype.h> 20#include <linux/slab.h> 21#include <linux/delay.h> 22 23#include <asm/setup.h> 24 25#include "trace_output.h" 26 27#undef TRACE_SYSTEM 28#define TRACE_SYSTEM "TRACE_SYSTEM" 29 30DEFINE_MUTEX(event_mutex); 31 32LIST_HEAD(ftrace_events); 33static LIST_HEAD(ftrace_common_fields); 34 35#define GFP_TRACE (GFP_KERNEL | __GFP_ZERO) 36 37static struct kmem_cache *field_cachep; 38static struct kmem_cache *file_cachep; 39 40#define SYSTEM_FL_FREE_NAME (1 << 31) 41 42static inline int system_refcount(struct event_subsystem *system) 43{ 44 return system->ref_count & ~SYSTEM_FL_FREE_NAME; 45} 46 47static int system_refcount_inc(struct event_subsystem *system) 48{ 49 return (system->ref_count++) & ~SYSTEM_FL_FREE_NAME; 50} 51 52static int system_refcount_dec(struct event_subsystem *system) 53{ 54 return (--system->ref_count) & ~SYSTEM_FL_FREE_NAME; 55} 56 57/* Double loops, do not use break, only goto's work */ 58#define do_for_each_event_file(tr, file) \ 59 list_for_each_entry(tr, &ftrace_trace_arrays, list) { \ 60 list_for_each_entry(file, &tr->events, list) 61 62#define do_for_each_event_file_safe(tr, file) \ 63 list_for_each_entry(tr, &ftrace_trace_arrays, list) { \ 64 struct ftrace_event_file *___n; \ 65 list_for_each_entry_safe(file, ___n, &tr->events, list) 66 67#define while_for_each_event_file() \ 68 } 69 70static struct list_head * 71trace_get_fields(struct ftrace_event_call *event_call) 72{ 73 if (!event_call->class->get_fields) 74 return &event_call->class->fields; 75 return event_call->class->get_fields(event_call); 76} 77 78static struct ftrace_event_field * 79__find_event_field(struct list_head *head, char *name) 80{ 81 struct ftrace_event_field *field; 82 83 list_for_each_entry(field, head, link) { 84 if (!strcmp(field->name, name)) 85 return field; 86 } 87 88 return NULL; 89} 90 91struct ftrace_event_field * 92trace_find_event_field(struct ftrace_event_call *call, char *name) 93{ 94 struct ftrace_event_field *field; 95 struct list_head *head; 96 97 field = __find_event_field(&ftrace_common_fields, name); 98 if (field) 99 return field; 100 101 head = trace_get_fields(call); 102 return __find_event_field(head, name); 103} 104 105static int __trace_define_field(struct list_head *head, const char *type, 106 const char *name, int offset, int size, 107 int is_signed, int filter_type) 108{ 109 struct ftrace_event_field *field; 110 111 field = kmem_cache_alloc(field_cachep, GFP_TRACE); 112 if (!field) 113 return -ENOMEM; 114 115 field->name = name; 116 field->type = type; 117 118 if (filter_type == FILTER_OTHER) 119 field->filter_type = filter_assign_type(type); 120 else 121 field->filter_type = filter_type; 122 123 field->offset = offset; 124 field->size = size; 125 field->is_signed = is_signed; 126 127 list_add(&field->link, head); 128 129 return 0; 130} 131 132int trace_define_field(struct ftrace_event_call *call, const char *type, 133 const char *name, int offset, int size, int is_signed, 134 int filter_type) 135{ 136 struct list_head *head; 137 138 if (WARN_ON(!call->class)) 139 return 0; 140 141 head = trace_get_fields(call); 142 return __trace_define_field(head, type, name, offset, size, 143 is_signed, filter_type); 144} 145EXPORT_SYMBOL_GPL(trace_define_field); 146 147#define __common_field(type, item) \ 148 ret = __trace_define_field(&ftrace_common_fields, #type, \ 149 "common_" #item, \ 150 offsetof(typeof(ent), item), \ 151 sizeof(ent.item), \ 152 is_signed_type(type), FILTER_OTHER); \ 153 if (ret) \ 154 return ret; 155 156static int trace_define_common_fields(void) 157{ 158 int ret; 159 struct trace_entry ent; 160 161 __common_field(unsigned short, type); 162 __common_field(unsigned char, flags); 163 __common_field(unsigned char, preempt_count); 164 __common_field(int, pid); 165 166 return ret; 167} 168 169static void trace_destroy_fields(struct ftrace_event_call *call) 170{ 171 struct ftrace_event_field *field, *next; 172 struct list_head *head; 173 174 head = trace_get_fields(call); 175 list_for_each_entry_safe(field, next, head, link) { 176 list_del(&field->link); 177 kmem_cache_free(field_cachep, field); 178 } 179} 180 181int trace_event_raw_init(struct ftrace_event_call *call) 182{ 183 int id; 184 185 id = register_ftrace_event(&call->event); 186 if (!id) 187 return -ENODEV; 188 189 return 0; 190} 191EXPORT_SYMBOL_GPL(trace_event_raw_init); 192 193void *ftrace_event_buffer_reserve(struct ftrace_event_buffer *fbuffer, 194 struct ftrace_event_file *ftrace_file, 195 unsigned long len) 196{ 197 struct ftrace_event_call *event_call = ftrace_file->event_call; 198 199 local_save_flags(fbuffer->flags); 200 fbuffer->pc = preempt_count(); 201 fbuffer->ftrace_file = ftrace_file; 202 203 fbuffer->event = 204 trace_event_buffer_lock_reserve(&fbuffer->buffer, ftrace_file, 205 event_call->event.type, len, 206 fbuffer->flags, fbuffer->pc); 207 if (!fbuffer->event) 208 return NULL; 209 210 fbuffer->entry = ring_buffer_event_data(fbuffer->event); 211 return fbuffer->entry; 212} 213EXPORT_SYMBOL_GPL(ftrace_event_buffer_reserve); 214 215static DEFINE_SPINLOCK(tracepoint_iter_lock); 216 217static void output_printk(struct ftrace_event_buffer *fbuffer) 218{ 219 struct ftrace_event_call *event_call; 220 struct trace_event *event; 221 unsigned long flags; 222 struct trace_iterator *iter = tracepoint_print_iter; 223 224 if (!iter) 225 return; 226 227 event_call = fbuffer->ftrace_file->event_call; 228 if (!event_call || !event_call->event.funcs || 229 !event_call->event.funcs->trace) 230 return; 231 232 event = &fbuffer->ftrace_file->event_call->event; 233 234 spin_lock_irqsave(&tracepoint_iter_lock, flags); 235 trace_seq_init(&iter->seq); 236 iter->ent = fbuffer->entry; 237 event_call->event.funcs->trace(iter, 0, event); 238 trace_seq_putc(&iter->seq, 0); 239 printk("%s", iter->seq.buffer); 240 241 spin_unlock_irqrestore(&tracepoint_iter_lock, flags); 242} 243 244void ftrace_event_buffer_commit(struct ftrace_event_buffer *fbuffer) 245{ 246 if (tracepoint_printk) 247 output_printk(fbuffer); 248 249 event_trigger_unlock_commit(fbuffer->ftrace_file, fbuffer->buffer, 250 fbuffer->event, fbuffer->entry, 251 fbuffer->flags, fbuffer->pc); 252} 253EXPORT_SYMBOL_GPL(ftrace_event_buffer_commit); 254 255int ftrace_event_reg(struct ftrace_event_call *call, 256 enum trace_reg type, void *data) 257{ 258 struct ftrace_event_file *file = data; 259 260 WARN_ON(!(call->flags & TRACE_EVENT_FL_TRACEPOINT)); 261 switch (type) { 262 case TRACE_REG_REGISTER: 263 return tracepoint_probe_register(call->tp, 264 call->class->probe, 265 file); 266 case TRACE_REG_UNREGISTER: 267 tracepoint_probe_unregister(call->tp, 268 call->class->probe, 269 file); 270 return 0; 271 272#ifdef CONFIG_PERF_EVENTS 273 case TRACE_REG_PERF_REGISTER: 274 return tracepoint_probe_register(call->tp, 275 call->class->perf_probe, 276 call); 277 case TRACE_REG_PERF_UNREGISTER: 278 tracepoint_probe_unregister(call->tp, 279 call->class->perf_probe, 280 call); 281 return 0; 282 case TRACE_REG_PERF_OPEN: 283 case TRACE_REG_PERF_CLOSE: 284 case TRACE_REG_PERF_ADD: 285 case TRACE_REG_PERF_DEL: 286 return 0; 287#endif 288 } 289 return 0; 290} 291EXPORT_SYMBOL_GPL(ftrace_event_reg); 292 293void trace_event_enable_cmd_record(bool enable) 294{ 295 struct ftrace_event_file *file; 296 struct trace_array *tr; 297 298 mutex_lock(&event_mutex); 299 do_for_each_event_file(tr, file) { 300 301 if (!(file->flags & FTRACE_EVENT_FL_ENABLED)) 302 continue; 303 304 if (enable) { 305 tracing_start_cmdline_record(); 306 set_bit(FTRACE_EVENT_FL_RECORDED_CMD_BIT, &file->flags); 307 } else { 308 tracing_stop_cmdline_record(); 309 clear_bit(FTRACE_EVENT_FL_RECORDED_CMD_BIT, &file->flags); 310 } 311 } while_for_each_event_file(); 312 mutex_unlock(&event_mutex); 313} 314 315static int __ftrace_event_enable_disable(struct ftrace_event_file *file, 316 int enable, int soft_disable) 317{ 318 struct ftrace_event_call *call = file->event_call; 319 int ret = 0; 320 int disable; 321 322 switch (enable) { 323 case 0: 324 /* 325 * When soft_disable is set and enable is cleared, the sm_ref 326 * reference counter is decremented. If it reaches 0, we want 327 * to clear the SOFT_DISABLED flag but leave the event in the 328 * state that it was. That is, if the event was enabled and 329 * SOFT_DISABLED isn't set, then do nothing. But if SOFT_DISABLED 330 * is set we do not want the event to be enabled before we 331 * clear the bit. 332 * 333 * When soft_disable is not set but the SOFT_MODE flag is, 334 * we do nothing. Do not disable the tracepoint, otherwise 335 * "soft enable"s (clearing the SOFT_DISABLED bit) wont work. 336 */ 337 if (soft_disable) { 338 if (atomic_dec_return(&file->sm_ref) > 0) 339 break; 340 disable = file->flags & FTRACE_EVENT_FL_SOFT_DISABLED; 341 clear_bit(FTRACE_EVENT_FL_SOFT_MODE_BIT, &file->flags); 342 } else 343 disable = !(file->flags & FTRACE_EVENT_FL_SOFT_MODE); 344 345 if (disable && (file->flags & FTRACE_EVENT_FL_ENABLED)) { 346 clear_bit(FTRACE_EVENT_FL_ENABLED_BIT, &file->flags); 347 if (file->flags & FTRACE_EVENT_FL_RECORDED_CMD) { 348 tracing_stop_cmdline_record(); 349 clear_bit(FTRACE_EVENT_FL_RECORDED_CMD_BIT, &file->flags); 350 } 351 call->class->reg(call, TRACE_REG_UNREGISTER, file); 352 } 353 /* If in SOFT_MODE, just set the SOFT_DISABLE_BIT, else clear it */ 354 if (file->flags & FTRACE_EVENT_FL_SOFT_MODE) 355 set_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &file->flags); 356 else 357 clear_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &file->flags); 358 break; 359 case 1: 360 /* 361 * When soft_disable is set and enable is set, we want to 362 * register the tracepoint for the event, but leave the event 363 * as is. That means, if the event was already enabled, we do 364 * nothing (but set SOFT_MODE). If the event is disabled, we 365 * set SOFT_DISABLED before enabling the event tracepoint, so 366 * it still seems to be disabled. 367 */ 368 if (!soft_disable) 369 clear_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &file->flags); 370 else { 371 if (atomic_inc_return(&file->sm_ref) > 1) 372 break; 373 set_bit(FTRACE_EVENT_FL_SOFT_MODE_BIT, &file->flags); 374 } 375 376 if (!(file->flags & FTRACE_EVENT_FL_ENABLED)) { 377 378 /* Keep the event disabled, when going to SOFT_MODE. */ 379 if (soft_disable) 380 set_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &file->flags); 381 382 if (trace_flags & TRACE_ITER_RECORD_CMD) { 383 tracing_start_cmdline_record(); 384 set_bit(FTRACE_EVENT_FL_RECORDED_CMD_BIT, &file->flags); 385 } 386 ret = call->class->reg(call, TRACE_REG_REGISTER, file); 387 if (ret) { 388 tracing_stop_cmdline_record(); 389 pr_info("event trace: Could not enable event " 390 "%s\n", ftrace_event_name(call)); 391 break; 392 } 393 set_bit(FTRACE_EVENT_FL_ENABLED_BIT, &file->flags); 394 395 /* WAS_ENABLED gets set but never cleared. */ 396 call->flags |= TRACE_EVENT_FL_WAS_ENABLED; 397 } 398 break; 399 } 400 401 return ret; 402} 403 404int trace_event_enable_disable(struct ftrace_event_file *file, 405 int enable, int soft_disable) 406{ 407 return __ftrace_event_enable_disable(file, enable, soft_disable); 408} 409 410static int ftrace_event_enable_disable(struct ftrace_event_file *file, 411 int enable) 412{ 413 return __ftrace_event_enable_disable(file, enable, 0); 414} 415 416static void ftrace_clear_events(struct trace_array *tr) 417{ 418 struct ftrace_event_file *file; 419 420 mutex_lock(&event_mutex); 421 list_for_each_entry(file, &tr->events, list) { 422 ftrace_event_enable_disable(file, 0); 423 } 424 mutex_unlock(&event_mutex); 425} 426 427static void __put_system(struct event_subsystem *system) 428{ 429 struct event_filter *filter = system->filter; 430 431 WARN_ON_ONCE(system_refcount(system) == 0); 432 if (system_refcount_dec(system)) 433 return; 434 435 list_del(&system->list); 436 437 if (filter) { 438 kfree(filter->filter_string); 439 kfree(filter); 440 } 441 if (system->ref_count & SYSTEM_FL_FREE_NAME) 442 kfree(system->name); 443 kfree(system); 444} 445 446static void __get_system(struct event_subsystem *system) 447{ 448 WARN_ON_ONCE(system_refcount(system) == 0); 449 system_refcount_inc(system); 450} 451 452static void __get_system_dir(struct ftrace_subsystem_dir *dir) 453{ 454 WARN_ON_ONCE(dir->ref_count == 0); 455 dir->ref_count++; 456 __get_system(dir->subsystem); 457} 458 459static void __put_system_dir(struct ftrace_subsystem_dir *dir) 460{ 461 WARN_ON_ONCE(dir->ref_count == 0); 462 /* If the subsystem is about to be freed, the dir must be too */ 463 WARN_ON_ONCE(system_refcount(dir->subsystem) == 1 && dir->ref_count != 1); 464 465 __put_system(dir->subsystem); 466 if (!--dir->ref_count) 467 kfree(dir); 468} 469 470static void put_system(struct ftrace_subsystem_dir *dir) 471{ 472 mutex_lock(&event_mutex); 473 __put_system_dir(dir); 474 mutex_unlock(&event_mutex); 475} 476 477static void remove_subsystem(struct ftrace_subsystem_dir *dir) 478{ 479 if (!dir) 480 return; 481 482 if (!--dir->nr_events) { 483 tracefs_remove_recursive(dir->entry); 484 list_del(&dir->list); 485 __put_system_dir(dir); 486 } 487} 488 489static void remove_event_file_dir(struct ftrace_event_file *file) 490{ 491 struct dentry *dir = file->dir; 492 struct dentry *child; 493 494 if (dir) { 495 spin_lock(&dir->d_lock); /* probably unneeded */ 496 list_for_each_entry(child, &dir->d_subdirs, d_child) { 497 if (d_really_is_positive(child)) /* probably unneeded */ 498 d_inode(child)->i_private = NULL; 499 } 500 spin_unlock(&dir->d_lock); 501 502 tracefs_remove_recursive(dir); 503 } 504 505 list_del(&file->list); 506 remove_subsystem(file->system); 507 free_event_filter(file->filter); 508 kmem_cache_free(file_cachep, file); 509} 510 511/* 512 * __ftrace_set_clr_event(NULL, NULL, NULL, set) will set/unset all events. 513 */ 514static int 515__ftrace_set_clr_event_nolock(struct trace_array *tr, const char *match, 516 const char *sub, const char *event, int set) 517{ 518 struct ftrace_event_file *file; 519 struct ftrace_event_call *call; 520 const char *name; 521 int ret = -EINVAL; 522 523 list_for_each_entry(file, &tr->events, list) { 524 525 call = file->event_call; 526 name = ftrace_event_name(call); 527 528 if (!name || !call->class || !call->class->reg) 529 continue; 530 531 if (call->flags & TRACE_EVENT_FL_IGNORE_ENABLE) 532 continue; 533 534 if (match && 535 strcmp(match, name) != 0 && 536 strcmp(match, call->class->system) != 0) 537 continue; 538 539 if (sub && strcmp(sub, call->class->system) != 0) 540 continue; 541 542 if (event && strcmp(event, name) != 0) 543 continue; 544 545 ftrace_event_enable_disable(file, set); 546 547 ret = 0; 548 } 549 550 return ret; 551} 552 553static int __ftrace_set_clr_event(struct trace_array *tr, const char *match, 554 const char *sub, const char *event, int set) 555{ 556 int ret; 557 558 mutex_lock(&event_mutex); 559 ret = __ftrace_set_clr_event_nolock(tr, match, sub, event, set); 560 mutex_unlock(&event_mutex); 561 562 return ret; 563} 564 565static int ftrace_set_clr_event(struct trace_array *tr, char *buf, int set) 566{ 567 char *event = NULL, *sub = NULL, *match; 568 int ret; 569 570 /* 571 * The buf format can be <subsystem>:<event-name> 572 * *:<event-name> means any event by that name. 573 * :<event-name> is the same. 574 * 575 * <subsystem>:* means all events in that subsystem 576 * <subsystem>: means the same. 577 * 578 * <name> (no ':') means all events in a subsystem with 579 * the name <name> or any event that matches <name> 580 */ 581 582 match = strsep(&buf, ":"); 583 if (buf) { 584 sub = match; 585 event = buf; 586 match = NULL; 587 588 if (!strlen(sub) || strcmp(sub, "*") == 0) 589 sub = NULL; 590 if (!strlen(event) || strcmp(event, "*") == 0) 591 event = NULL; 592 } 593 594 ret = __ftrace_set_clr_event(tr, match, sub, event, set); 595 596 /* Put back the colon to allow this to be called again */ 597 if (buf) 598 *(buf - 1) = ':'; 599 600 return ret; 601} 602 603/** 604 * trace_set_clr_event - enable or disable an event 605 * @system: system name to match (NULL for any system) 606 * @event: event name to match (NULL for all events, within system) 607 * @set: 1 to enable, 0 to disable 608 * 609 * This is a way for other parts of the kernel to enable or disable 610 * event recording. 611 * 612 * Returns 0 on success, -EINVAL if the parameters do not match any 613 * registered events. 614 */ 615int trace_set_clr_event(const char *system, const char *event, int set) 616{ 617 struct trace_array *tr = top_trace_array(); 618 619 if (!tr) 620 return -ENODEV; 621 622 return __ftrace_set_clr_event(tr, NULL, system, event, set); 623} 624EXPORT_SYMBOL_GPL(trace_set_clr_event); 625 626/* 128 should be much more than enough */ 627#define EVENT_BUF_SIZE 127 628 629static ssize_t 630ftrace_event_write(struct file *file, const char __user *ubuf, 631 size_t cnt, loff_t *ppos) 632{ 633 struct trace_parser parser; 634 struct seq_file *m = file->private_data; 635 struct trace_array *tr = m->private; 636 ssize_t read, ret; 637 638 if (!cnt) 639 return 0; 640 641 ret = tracing_update_buffers(); 642 if (ret < 0) 643 return ret; 644 645 if (trace_parser_get_init(&parser, EVENT_BUF_SIZE + 1)) 646 return -ENOMEM; 647 648 read = trace_get_user(&parser, ubuf, cnt, ppos); 649 650 if (read >= 0 && trace_parser_loaded((&parser))) { 651 int set = 1; 652 653 if (*parser.buffer == '!') 654 set = 0; 655 656 parser.buffer[parser.idx] = 0; 657 658 ret = ftrace_set_clr_event(tr, parser.buffer + !set, set); 659 if (ret) 660 goto out_put; 661 } 662 663 ret = read; 664 665 out_put: 666 trace_parser_put(&parser); 667 668 return ret; 669} 670 671static void * 672t_next(struct seq_file *m, void *v, loff_t *pos) 673{ 674 struct ftrace_event_file *file = v; 675 struct ftrace_event_call *call; 676 struct trace_array *tr = m->private; 677 678 (*pos)++; 679 680 list_for_each_entry_continue(file, &tr->events, list) { 681 call = file->event_call; 682 /* 683 * The ftrace subsystem is for showing formats only. 684 * They can not be enabled or disabled via the event files. 685 */ 686 if (call->class && call->class->reg && 687 !(call->flags & TRACE_EVENT_FL_IGNORE_ENABLE)) 688 return file; 689 } 690 691 return NULL; 692} 693 694static void *t_start(struct seq_file *m, loff_t *pos) 695{ 696 struct ftrace_event_file *file; 697 struct trace_array *tr = m->private; 698 loff_t l; 699 700 mutex_lock(&event_mutex); 701 702 file = list_entry(&tr->events, struct ftrace_event_file, list); 703 for (l = 0; l <= *pos; ) { 704 file = t_next(m, file, &l); 705 if (!file) 706 break; 707 } 708 return file; 709} 710 711static void * 712s_next(struct seq_file *m, void *v, loff_t *pos) 713{ 714 struct ftrace_event_file *file = v; 715 struct trace_array *tr = m->private; 716 717 (*pos)++; 718 719 list_for_each_entry_continue(file, &tr->events, list) { 720 if (file->flags & FTRACE_EVENT_FL_ENABLED) 721 return file; 722 } 723 724 return NULL; 725} 726 727static void *s_start(struct seq_file *m, loff_t *pos) 728{ 729 struct ftrace_event_file *file; 730 struct trace_array *tr = m->private; 731 loff_t l; 732 733 mutex_lock(&event_mutex); 734 735 file = list_entry(&tr->events, struct ftrace_event_file, list); 736 for (l = 0; l <= *pos; ) { 737 file = s_next(m, file, &l); 738 if (!file) 739 break; 740 } 741 return file; 742} 743 744static int t_show(struct seq_file *m, void *v) 745{ 746 struct ftrace_event_file *file = v; 747 struct ftrace_event_call *call = file->event_call; 748 749 if (strcmp(call->class->system, TRACE_SYSTEM) != 0) 750 seq_printf(m, "%s:", call->class->system); 751 seq_printf(m, "%s\n", ftrace_event_name(call)); 752 753 return 0; 754} 755 756static void t_stop(struct seq_file *m, void *p) 757{ 758 mutex_unlock(&event_mutex); 759} 760 761static ssize_t 762event_enable_read(struct file *filp, char __user *ubuf, size_t cnt, 763 loff_t *ppos) 764{ 765 struct ftrace_event_file *file; 766 unsigned long flags; 767 char buf[4] = "0"; 768 769 mutex_lock(&event_mutex); 770 file = event_file_data(filp); 771 if (likely(file)) 772 flags = file->flags; 773 mutex_unlock(&event_mutex); 774 775 if (!file) 776 return -ENODEV; 777 778 if (flags & FTRACE_EVENT_FL_ENABLED && 779 !(flags & FTRACE_EVENT_FL_SOFT_DISABLED)) 780 strcpy(buf, "1"); 781 782 if (flags & FTRACE_EVENT_FL_SOFT_DISABLED || 783 flags & FTRACE_EVENT_FL_SOFT_MODE) 784 strcat(buf, "*"); 785 786 strcat(buf, "\n"); 787 788 return simple_read_from_buffer(ubuf, cnt, ppos, buf, strlen(buf)); 789} 790 791static ssize_t 792event_enable_write(struct file *filp, const char __user *ubuf, size_t cnt, 793 loff_t *ppos) 794{ 795 struct ftrace_event_file *file; 796 unsigned long val; 797 int ret; 798 799 ret = kstrtoul_from_user(ubuf, cnt, 10, &val); 800 if (ret) 801 return ret; 802 803 ret = tracing_update_buffers(); 804 if (ret < 0) 805 return ret; 806 807 switch (val) { 808 case 0: 809 case 1: 810 ret = -ENODEV; 811 mutex_lock(&event_mutex); 812 file = event_file_data(filp); 813 if (likely(file)) 814 ret = ftrace_event_enable_disable(file, val); 815 mutex_unlock(&event_mutex); 816 break; 817 818 default: 819 return -EINVAL; 820 } 821 822 *ppos += cnt; 823 824 return ret ? ret : cnt; 825} 826 827static ssize_t 828system_enable_read(struct file *filp, char __user *ubuf, size_t cnt, 829 loff_t *ppos) 830{ 831 const char set_to_char[4] = { '?', '0', '1', 'X' }; 832 struct ftrace_subsystem_dir *dir = filp->private_data; 833 struct event_subsystem *system = dir->subsystem; 834 struct ftrace_event_call *call; 835 struct ftrace_event_file *file; 836 struct trace_array *tr = dir->tr; 837 char buf[2]; 838 int set = 0; 839 int ret; 840 841 mutex_lock(&event_mutex); 842 list_for_each_entry(file, &tr->events, list) { 843 call = file->event_call; 844 if (!ftrace_event_name(call) || !call->class || !call->class->reg) 845 continue; 846 847 if (system && strcmp(call->class->system, system->name) != 0) 848 continue; 849 850 /* 851 * We need to find out if all the events are set 852 * or if all events or cleared, or if we have 853 * a mixture. 854 */ 855 set |= (1 << !!(file->flags & FTRACE_EVENT_FL_ENABLED)); 856 857 /* 858 * If we have a mixture, no need to look further. 859 */ 860 if (set == 3) 861 break; 862 } 863 mutex_unlock(&event_mutex); 864 865 buf[0] = set_to_char[set]; 866 buf[1] = '\n'; 867 868 ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, 2); 869 870 return ret; 871} 872 873static ssize_t 874system_enable_write(struct file *filp, const char __user *ubuf, size_t cnt, 875 loff_t *ppos) 876{ 877 struct ftrace_subsystem_dir *dir = filp->private_data; 878 struct event_subsystem *system = dir->subsystem; 879 const char *name = NULL; 880 unsigned long val; 881 ssize_t ret; 882 883 ret = kstrtoul_from_user(ubuf, cnt, 10, &val); 884 if (ret) 885 return ret; 886 887 ret = tracing_update_buffers(); 888 if (ret < 0) 889 return ret; 890 891 if (val != 0 && val != 1) 892 return -EINVAL; 893 894 /* 895 * Opening of "enable" adds a ref count to system, 896 * so the name is safe to use. 897 */ 898 if (system) 899 name = system->name; 900 901 ret = __ftrace_set_clr_event(dir->tr, NULL, name, NULL, val); 902 if (ret) 903 goto out; 904 905 ret = cnt; 906 907out: 908 *ppos += cnt; 909 910 return ret; 911} 912 913enum { 914 FORMAT_HEADER = 1, 915 FORMAT_FIELD_SEPERATOR = 2, 916 FORMAT_PRINTFMT = 3, 917}; 918 919static void *f_next(struct seq_file *m, void *v, loff_t *pos) 920{ 921 struct ftrace_event_call *call = event_file_data(m->private); 922 struct list_head *common_head = &ftrace_common_fields; 923 struct list_head *head = trace_get_fields(call); 924 struct list_head *node = v; 925 926 (*pos)++; 927 928 switch ((unsigned long)v) { 929 case FORMAT_HEADER: 930 node = common_head; 931 break; 932 933 case FORMAT_FIELD_SEPERATOR: 934 node = head; 935 break; 936 937 case FORMAT_PRINTFMT: 938 /* all done */ 939 return NULL; 940 } 941 942 node = node->prev; 943 if (node == common_head) 944 return (void *)FORMAT_FIELD_SEPERATOR; 945 else if (node == head) 946 return (void *)FORMAT_PRINTFMT; 947 else 948 return node; 949} 950 951static int f_show(struct seq_file *m, void *v) 952{ 953 struct ftrace_event_call *call = event_file_data(m->private); 954 struct ftrace_event_field *field; 955 const char *array_descriptor; 956 957 switch ((unsigned long)v) { 958 case FORMAT_HEADER: 959 seq_printf(m, "name: %s\n", ftrace_event_name(call)); 960 seq_printf(m, "ID: %d\n", call->event.type); 961 seq_puts(m, "format:\n"); 962 return 0; 963 964 case FORMAT_FIELD_SEPERATOR: 965 seq_putc(m, '\n'); 966 return 0; 967 968 case FORMAT_PRINTFMT: 969 seq_printf(m, "\nprint fmt: %s\n", 970 call->print_fmt); 971 return 0; 972 } 973 974 field = list_entry(v, struct ftrace_event_field, link); 975 /* 976 * Smartly shows the array type(except dynamic array). 977 * Normal: 978 * field:TYPE VAR 979 * If TYPE := TYPE[LEN], it is shown: 980 * field:TYPE VAR[LEN] 981 */ 982 array_descriptor = strchr(field->type, '['); 983 984 if (!strncmp(field->type, "__data_loc", 10)) 985 array_descriptor = NULL; 986 987 if (!array_descriptor) 988 seq_printf(m, "\tfield:%s %s;\toffset:%u;\tsize:%u;\tsigned:%d;\n", 989 field->type, field->name, field->offset, 990 field->size, !!field->is_signed); 991 else 992 seq_printf(m, "\tfield:%.*s %s%s;\toffset:%u;\tsize:%u;\tsigned:%d;\n", 993 (int)(array_descriptor - field->type), 994 field->type, field->name, 995 array_descriptor, field->offset, 996 field->size, !!field->is_signed); 997 998 return 0; 999} 1000 1001static void *f_start(struct seq_file *m, loff_t *pos) 1002{ 1003 void *p = (void *)FORMAT_HEADER; 1004 loff_t l = 0; 1005 1006 /* ->stop() is called even if ->start() fails */ 1007 mutex_lock(&event_mutex); 1008 if (!event_file_data(m->private)) 1009 return ERR_PTR(-ENODEV); 1010 1011 while (l < *pos && p) 1012 p = f_next(m, p, &l); 1013 1014 return p; 1015} 1016 1017static void f_stop(struct seq_file *m, void *p) 1018{ 1019 mutex_unlock(&event_mutex); 1020} 1021 1022static const struct seq_operations trace_format_seq_ops = { 1023 .start = f_start, 1024 .next = f_next, 1025 .stop = f_stop, 1026 .show = f_show, 1027}; 1028 1029static int trace_format_open(struct inode *inode, struct file *file) 1030{ 1031 struct seq_file *m; 1032 int ret; 1033 1034 ret = seq_open(file, &trace_format_seq_ops); 1035 if (ret < 0) 1036 return ret; 1037 1038 m = file->private_data; 1039 m->private = file; 1040 1041 return 0; 1042} 1043 1044static ssize_t 1045event_id_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos) 1046{ 1047 int id = (long)event_file_data(filp); 1048 char buf[32]; 1049 int len; 1050 1051 if (*ppos) 1052 return 0; 1053 1054 if (unlikely(!id)) 1055 return -ENODEV; 1056 1057 len = sprintf(buf, "%d\n", id); 1058 1059 return simple_read_from_buffer(ubuf, cnt, ppos, buf, len); 1060} 1061 1062static ssize_t 1063event_filter_read(struct file *filp, char __user *ubuf, size_t cnt, 1064 loff_t *ppos) 1065{ 1066 struct ftrace_event_file *file; 1067 struct trace_seq *s; 1068 int r = -ENODEV; 1069 1070 if (*ppos) 1071 return 0; 1072 1073 s = kmalloc(sizeof(*s), GFP_KERNEL); 1074 1075 if (!s) 1076 return -ENOMEM; 1077 1078 trace_seq_init(s); 1079 1080 mutex_lock(&event_mutex); 1081 file = event_file_data(filp); 1082 if (file) 1083 print_event_filter(file, s); 1084 mutex_unlock(&event_mutex); 1085 1086 if (file) 1087 r = simple_read_from_buffer(ubuf, cnt, ppos, 1088 s->buffer, trace_seq_used(s)); 1089 1090 kfree(s); 1091 1092 return r; 1093} 1094 1095static ssize_t 1096event_filter_write(struct file *filp, const char __user *ubuf, size_t cnt, 1097 loff_t *ppos) 1098{ 1099 struct ftrace_event_file *file; 1100 char *buf; 1101 int err = -ENODEV; 1102 1103 if (cnt >= PAGE_SIZE) 1104 return -EINVAL; 1105 1106 buf = (char *)__get_free_page(GFP_TEMPORARY); 1107 if (!buf) 1108 return -ENOMEM; 1109 1110 if (copy_from_user(buf, ubuf, cnt)) { 1111 free_page((unsigned long) buf); 1112 return -EFAULT; 1113 } 1114 buf[cnt] = '\0'; 1115 1116 mutex_lock(&event_mutex); 1117 file = event_file_data(filp); 1118 if (file) 1119 err = apply_event_filter(file, buf); 1120 mutex_unlock(&event_mutex); 1121 1122 free_page((unsigned long) buf); 1123 if (err < 0) 1124 return err; 1125 1126 *ppos += cnt; 1127 1128 return cnt; 1129} 1130 1131static LIST_HEAD(event_subsystems); 1132 1133static int subsystem_open(struct inode *inode, struct file *filp) 1134{ 1135 struct event_subsystem *system = NULL; 1136 struct ftrace_subsystem_dir *dir = NULL; /* Initialize for gcc */ 1137 struct trace_array *tr; 1138 int ret; 1139 1140 if (tracing_is_disabled()) 1141 return -ENODEV; 1142 1143 /* Make sure the system still exists */ 1144 mutex_lock(&trace_types_lock); 1145 mutex_lock(&event_mutex); 1146 list_for_each_entry(tr, &ftrace_trace_arrays, list) { 1147 list_for_each_entry(dir, &tr->systems, list) { 1148 if (dir == inode->i_private) { 1149 /* Don't open systems with no events */ 1150 if (dir->nr_events) { 1151 __get_system_dir(dir); 1152 system = dir->subsystem; 1153 } 1154 goto exit_loop; 1155 } 1156 } 1157 } 1158 exit_loop: 1159 mutex_unlock(&event_mutex); 1160 mutex_unlock(&trace_types_lock); 1161 1162 if (!system) 1163 return -ENODEV; 1164 1165 /* Some versions of gcc think dir can be uninitialized here */ 1166 WARN_ON(!dir); 1167 1168 /* Still need to increment the ref count of the system */ 1169 if (trace_array_get(tr) < 0) { 1170 put_system(dir); 1171 return -ENODEV; 1172 } 1173 1174 ret = tracing_open_generic(inode, filp); 1175 if (ret < 0) { 1176 trace_array_put(tr); 1177 put_system(dir); 1178 } 1179 1180 return ret; 1181} 1182 1183static int system_tr_open(struct inode *inode, struct file *filp) 1184{ 1185 struct ftrace_subsystem_dir *dir; 1186 struct trace_array *tr = inode->i_private; 1187 int ret; 1188 1189 if (tracing_is_disabled()) 1190 return -ENODEV; 1191 1192 if (trace_array_get(tr) < 0) 1193 return -ENODEV; 1194 1195 /* Make a temporary dir that has no system but points to tr */ 1196 dir = kzalloc(sizeof(*dir), GFP_KERNEL); 1197 if (!dir) { 1198 trace_array_put(tr); 1199 return -ENOMEM; 1200 } 1201 1202 dir->tr = tr; 1203 1204 ret = tracing_open_generic(inode, filp); 1205 if (ret < 0) { 1206 trace_array_put(tr); 1207 kfree(dir); 1208 return ret; 1209 } 1210 1211 filp->private_data = dir; 1212 1213 return 0; 1214} 1215 1216static int subsystem_release(struct inode *inode, struct file *file) 1217{ 1218 struct ftrace_subsystem_dir *dir = file->private_data; 1219 1220 trace_array_put(dir->tr); 1221 1222 /* 1223 * If dir->subsystem is NULL, then this is a temporary 1224 * descriptor that was made for a trace_array to enable 1225 * all subsystems. 1226 */ 1227 if (dir->subsystem) 1228 put_system(dir); 1229 else 1230 kfree(dir); 1231 1232 return 0; 1233} 1234 1235static ssize_t 1236subsystem_filter_read(struct file *filp, char __user *ubuf, size_t cnt, 1237 loff_t *ppos) 1238{ 1239 struct ftrace_subsystem_dir *dir = filp->private_data; 1240 struct event_subsystem *system = dir->subsystem; 1241 struct trace_seq *s; 1242 int r; 1243 1244 if (*ppos) 1245 return 0; 1246 1247 s = kmalloc(sizeof(*s), GFP_KERNEL); 1248 if (!s) 1249 return -ENOMEM; 1250 1251 trace_seq_init(s); 1252 1253 print_subsystem_event_filter(system, s); 1254 r = simple_read_from_buffer(ubuf, cnt, ppos, 1255 s->buffer, trace_seq_used(s)); 1256 1257 kfree(s); 1258 1259 return r; 1260} 1261 1262static ssize_t 1263subsystem_filter_write(struct file *filp, const char __user *ubuf, size_t cnt, 1264 loff_t *ppos) 1265{ 1266 struct ftrace_subsystem_dir *dir = filp->private_data; 1267 char *buf; 1268 int err; 1269 1270 if (cnt >= PAGE_SIZE) 1271 return -EINVAL; 1272 1273 buf = (char *)__get_free_page(GFP_TEMPORARY); 1274 if (!buf) 1275 return -ENOMEM; 1276 1277 if (copy_from_user(buf, ubuf, cnt)) { 1278 free_page((unsigned long) buf); 1279 return -EFAULT; 1280 } 1281 buf[cnt] = '\0'; 1282 1283 err = apply_subsystem_event_filter(dir, buf); 1284 free_page((unsigned long) buf); 1285 if (err < 0) 1286 return err; 1287 1288 *ppos += cnt; 1289 1290 return cnt; 1291} 1292 1293static ssize_t 1294show_header(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos) 1295{ 1296 int (*func)(struct trace_seq *s) = filp->private_data; 1297 struct trace_seq *s; 1298 int r; 1299 1300 if (*ppos) 1301 return 0; 1302 1303 s = kmalloc(sizeof(*s), GFP_KERNEL); 1304 if (!s) 1305 return -ENOMEM; 1306 1307 trace_seq_init(s); 1308 1309 func(s); 1310 r = simple_read_from_buffer(ubuf, cnt, ppos, 1311 s->buffer, trace_seq_used(s)); 1312 1313 kfree(s); 1314 1315 return r; 1316} 1317 1318static int ftrace_event_avail_open(struct inode *inode, struct file *file); 1319static int ftrace_event_set_open(struct inode *inode, struct file *file); 1320static int ftrace_event_release(struct inode *inode, struct file *file); 1321 1322static const struct seq_operations show_event_seq_ops = { 1323 .start = t_start, 1324 .next = t_next, 1325 .show = t_show, 1326 .stop = t_stop, 1327}; 1328 1329static const struct seq_operations show_set_event_seq_ops = { 1330 .start = s_start, 1331 .next = s_next, 1332 .show = t_show, 1333 .stop = t_stop, 1334}; 1335 1336static const struct file_operations ftrace_avail_fops = { 1337 .open = ftrace_event_avail_open, 1338 .read = seq_read, 1339 .llseek = seq_lseek, 1340 .release = seq_release, 1341}; 1342 1343static const struct file_operations ftrace_set_event_fops = { 1344 .open = ftrace_event_set_open, 1345 .read = seq_read, 1346 .write = ftrace_event_write, 1347 .llseek = seq_lseek, 1348 .release = ftrace_event_release, 1349}; 1350 1351static const struct file_operations ftrace_enable_fops = { 1352 .open = tracing_open_generic, 1353 .read = event_enable_read, 1354 .write = event_enable_write, 1355 .llseek = default_llseek, 1356}; 1357 1358static const struct file_operations ftrace_event_format_fops = { 1359 .open = trace_format_open, 1360 .read = seq_read, 1361 .llseek = seq_lseek, 1362 .release = seq_release, 1363}; 1364 1365static const struct file_operations ftrace_event_id_fops = { 1366 .read = event_id_read, 1367 .llseek = default_llseek, 1368}; 1369 1370static const struct file_operations ftrace_event_filter_fops = { 1371 .open = tracing_open_generic, 1372 .read = event_filter_read, 1373 .write = event_filter_write, 1374 .llseek = default_llseek, 1375}; 1376 1377static const struct file_operations ftrace_subsystem_filter_fops = { 1378 .open = subsystem_open, 1379 .read = subsystem_filter_read, 1380 .write = subsystem_filter_write, 1381 .llseek = default_llseek, 1382 .release = subsystem_release, 1383}; 1384 1385static const struct file_operations ftrace_system_enable_fops = { 1386 .open = subsystem_open, 1387 .read = system_enable_read, 1388 .write = system_enable_write, 1389 .llseek = default_llseek, 1390 .release = subsystem_release, 1391}; 1392 1393static const struct file_operations ftrace_tr_enable_fops = { 1394 .open = system_tr_open, 1395 .read = system_enable_read, 1396 .write = system_enable_write, 1397 .llseek = default_llseek, 1398 .release = subsystem_release, 1399}; 1400 1401static const struct file_operations ftrace_show_header_fops = { 1402 .open = tracing_open_generic, 1403 .read = show_header, 1404 .llseek = default_llseek, 1405}; 1406 1407static int 1408ftrace_event_open(struct inode *inode, struct file *file, 1409 const struct seq_operations *seq_ops) 1410{ 1411 struct seq_file *m; 1412 int ret; 1413 1414 ret = seq_open(file, seq_ops); 1415 if (ret < 0) 1416 return ret; 1417 m = file->private_data; 1418 /* copy tr over to seq ops */ 1419 m->private = inode->i_private; 1420 1421 return ret; 1422} 1423 1424static int ftrace_event_release(struct inode *inode, struct file *file) 1425{ 1426 struct trace_array *tr = inode->i_private; 1427 1428 trace_array_put(tr); 1429 1430 return seq_release(inode, file); 1431} 1432 1433static int 1434ftrace_event_avail_open(struct inode *inode, struct file *file) 1435{ 1436 const struct seq_operations *seq_ops = &show_event_seq_ops; 1437 1438 return ftrace_event_open(inode, file, seq_ops); 1439} 1440 1441static int 1442ftrace_event_set_open(struct inode *inode, struct file *file) 1443{ 1444 const struct seq_operations *seq_ops = &show_set_event_seq_ops; 1445 struct trace_array *tr = inode->i_private; 1446 int ret; 1447 1448 if (trace_array_get(tr) < 0) 1449 return -ENODEV; 1450 1451 if ((file->f_mode & FMODE_WRITE) && 1452 (file->f_flags & O_TRUNC)) 1453 ftrace_clear_events(tr); 1454 1455 ret = ftrace_event_open(inode, file, seq_ops); 1456 if (ret < 0) 1457 trace_array_put(tr); 1458 return ret; 1459} 1460 1461static struct event_subsystem * 1462create_new_subsystem(const char *name) 1463{ 1464 struct event_subsystem *system; 1465 1466 /* need to create new entry */ 1467 system = kmalloc(sizeof(*system), GFP_KERNEL); 1468 if (!system) 1469 return NULL; 1470 1471 system->ref_count = 1; 1472 1473 /* Only allocate if dynamic (kprobes and modules) */ 1474 if (!core_kernel_data((unsigned long)name)) { 1475 system->ref_count |= SYSTEM_FL_FREE_NAME; 1476 system->name = kstrdup(name, GFP_KERNEL); 1477 if (!system->name) 1478 goto out_free; 1479 } else 1480 system->name = name; 1481 1482 system->filter = NULL; 1483 1484 system->filter = kzalloc(sizeof(struct event_filter), GFP_KERNEL); 1485 if (!system->filter) 1486 goto out_free; 1487 1488 list_add(&system->list, &event_subsystems); 1489 1490 return system; 1491 1492 out_free: 1493 if (system->ref_count & SYSTEM_FL_FREE_NAME) 1494 kfree(system->name); 1495 kfree(system); 1496 return NULL; 1497} 1498 1499static struct dentry * 1500event_subsystem_dir(struct trace_array *tr, const char *name, 1501 struct ftrace_event_file *file, struct dentry *parent) 1502{ 1503 struct ftrace_subsystem_dir *dir; 1504 struct event_subsystem *system; 1505 struct dentry *entry; 1506 1507 /* First see if we did not already create this dir */ 1508 list_for_each_entry(dir, &tr->systems, list) { 1509 system = dir->subsystem; 1510 if (strcmp(system->name, name) == 0) { 1511 dir->nr_events++; 1512 file->system = dir; 1513 return dir->entry; 1514 } 1515 } 1516 1517 /* Now see if the system itself exists. */ 1518 list_for_each_entry(system, &event_subsystems, list) { 1519 if (strcmp(system->name, name) == 0) 1520 break; 1521 } 1522 /* Reset system variable when not found */ 1523 if (&system->list == &event_subsystems) 1524 system = NULL; 1525 1526 dir = kmalloc(sizeof(*dir), GFP_KERNEL); 1527 if (!dir) 1528 goto out_fail; 1529 1530 if (!system) { 1531 system = create_new_subsystem(name); 1532 if (!system) 1533 goto out_free; 1534 } else 1535 __get_system(system); 1536 1537 dir->entry = tracefs_create_dir(name, parent); 1538 if (!dir->entry) { 1539 pr_warn("Failed to create system directory %s\n", name); 1540 __put_system(system); 1541 goto out_free; 1542 } 1543 1544 dir->tr = tr; 1545 dir->ref_count = 1; 1546 dir->nr_events = 1; 1547 dir->subsystem = system; 1548 file->system = dir; 1549 1550 entry = tracefs_create_file("filter", 0644, dir->entry, dir, 1551 &ftrace_subsystem_filter_fops); 1552 if (!entry) { 1553 kfree(system->filter); 1554 system->filter = NULL; 1555 pr_warn("Could not create tracefs '%s/filter' entry\n", name); 1556 } 1557 1558 trace_create_file("enable", 0644, dir->entry, dir, 1559 &ftrace_system_enable_fops); 1560 1561 list_add(&dir->list, &tr->systems); 1562 1563 return dir->entry; 1564 1565 out_free: 1566 kfree(dir); 1567 out_fail: 1568 /* Only print this message if failed on memory allocation */ 1569 if (!dir || !system) 1570 pr_warn("No memory to create event subsystem %s\n", name); 1571 return NULL; 1572} 1573 1574static int 1575event_create_dir(struct dentry *parent, struct ftrace_event_file *file) 1576{ 1577 struct ftrace_event_call *call = file->event_call; 1578 struct trace_array *tr = file->tr; 1579 struct list_head *head; 1580 struct dentry *d_events; 1581 const char *name; 1582 int ret; 1583 1584 /* 1585 * If the trace point header did not define TRACE_SYSTEM 1586 * then the system would be called "TRACE_SYSTEM". 1587 */ 1588 if (strcmp(call->class->system, TRACE_SYSTEM) != 0) { 1589 d_events = event_subsystem_dir(tr, call->class->system, file, parent); 1590 if (!d_events) 1591 return -ENOMEM; 1592 } else 1593 d_events = parent; 1594 1595 name = ftrace_event_name(call); 1596 file->dir = tracefs_create_dir(name, d_events); 1597 if (!file->dir) { 1598 pr_warn("Could not create tracefs '%s' directory\n", name); 1599 return -1; 1600 } 1601 1602 if (call->class->reg && !(call->flags & TRACE_EVENT_FL_IGNORE_ENABLE)) 1603 trace_create_file("enable", 0644, file->dir, file, 1604 &ftrace_enable_fops); 1605 1606#ifdef CONFIG_PERF_EVENTS 1607 if (call->event.type && call->class->reg) 1608 trace_create_file("id", 0444, file->dir, 1609 (void *)(long)call->event.type, 1610 &ftrace_event_id_fops); 1611#endif 1612 1613 /* 1614 * Other events may have the same class. Only update 1615 * the fields if they are not already defined. 1616 */ 1617 head = trace_get_fields(call); 1618 if (list_empty(head)) { 1619 ret = call->class->define_fields(call); 1620 if (ret < 0) { 1621 pr_warn("Could not initialize trace point events/%s\n", 1622 name); 1623 return -1; 1624 } 1625 } 1626 trace_create_file("filter", 0644, file->dir, file, 1627 &ftrace_event_filter_fops); 1628 1629 /* 1630 * Only event directories that can be enabled should have 1631 * triggers. 1632 */ 1633 if (!(call->flags & TRACE_EVENT_FL_IGNORE_ENABLE)) 1634 trace_create_file("trigger", 0644, file->dir, file, 1635 &event_trigger_fops); 1636 1637 trace_create_file("format", 0444, file->dir, call, 1638 &ftrace_event_format_fops); 1639 1640 return 0; 1641} 1642 1643static void remove_event_from_tracers(struct ftrace_event_call *call) 1644{ 1645 struct ftrace_event_file *file; 1646 struct trace_array *tr; 1647 1648 do_for_each_event_file_safe(tr, file) { 1649 if (file->event_call != call) 1650 continue; 1651 1652 remove_event_file_dir(file); 1653 /* 1654 * The do_for_each_event_file_safe() is 1655 * a double loop. After finding the call for this 1656 * trace_array, we use break to jump to the next 1657 * trace_array. 1658 */ 1659 break; 1660 } while_for_each_event_file(); 1661} 1662 1663static void event_remove(struct ftrace_event_call *call) 1664{ 1665 struct trace_array *tr; 1666 struct ftrace_event_file *file; 1667 1668 do_for_each_event_file(tr, file) { 1669 if (file->event_call != call) 1670 continue; 1671 ftrace_event_enable_disable(file, 0); 1672 /* 1673 * The do_for_each_event_file() is 1674 * a double loop. After finding the call for this 1675 * trace_array, we use break to jump to the next 1676 * trace_array. 1677 */ 1678 break; 1679 } while_for_each_event_file(); 1680 1681 if (call->event.funcs) 1682 __unregister_ftrace_event(&call->event); 1683 remove_event_from_tracers(call); 1684 list_del(&call->list); 1685} 1686 1687static int event_init(struct ftrace_event_call *call) 1688{ 1689 int ret = 0; 1690 const char *name; 1691 1692 name = ftrace_event_name(call); 1693 if (WARN_ON(!name)) 1694 return -EINVAL; 1695 1696 if (call->class->raw_init) { 1697 ret = call->class->raw_init(call); 1698 if (ret < 0 && ret != -ENOSYS) 1699 pr_warn("Could not initialize trace events/%s\n", name); 1700 } 1701 1702 return ret; 1703} 1704 1705static int 1706__register_event(struct ftrace_event_call *call, struct module *mod) 1707{ 1708 int ret; 1709 1710 ret = event_init(call); 1711 if (ret < 0) 1712 return ret; 1713 1714 list_add(&call->list, &ftrace_events); 1715 call->mod = mod; 1716 1717 return 0; 1718} 1719 1720static char *enum_replace(char *ptr, struct trace_enum_map *map, int len) 1721{ 1722 int rlen; 1723 int elen; 1724 1725 /* Find the length of the enum value as a string */ 1726 elen = snprintf(ptr, 0, "%ld", map->enum_value); 1727 /* Make sure there's enough room to replace the string with the value */ 1728 if (len < elen) 1729 return NULL; 1730 1731 snprintf(ptr, elen + 1, "%ld", map->enum_value); 1732 1733 /* Get the rest of the string of ptr */ 1734 rlen = strlen(ptr + len); 1735 memmove(ptr + elen, ptr + len, rlen); 1736 /* Make sure we end the new string */ 1737 ptr[elen + rlen] = 0; 1738 1739 return ptr + elen; 1740} 1741 1742static void update_event_printk(struct ftrace_event_call *call, 1743 struct trace_enum_map *map) 1744{ 1745 char *ptr; 1746 int quote = 0; 1747 int len = strlen(map->enum_string); 1748 1749 for (ptr = call->print_fmt; *ptr; ptr++) { 1750 if (*ptr == '\\') { 1751 ptr++; 1752 /* paranoid */ 1753 if (!*ptr) 1754 break; 1755 continue; 1756 } 1757 if (*ptr == '"') { 1758 quote ^= 1; 1759 continue; 1760 } 1761 if (quote) 1762 continue; 1763 if (isdigit(*ptr)) { 1764 /* skip numbers */ 1765 do { 1766 ptr++; 1767 /* Check for alpha chars like ULL */ 1768 } while (isalnum(*ptr)); 1769 if (!*ptr) 1770 break; 1771 /* 1772 * A number must have some kind of delimiter after 1773 * it, and we can ignore that too. 1774 */ 1775 continue; 1776 } 1777 if (isalpha(*ptr) || *ptr == '_') { 1778 if (strncmp(map->enum_string, ptr, len) == 0 && 1779 !isalnum(ptr[len]) && ptr[len] != '_') { 1780 ptr = enum_replace(ptr, map, len); 1781 /* Hmm, enum string smaller than value */ 1782 if (WARN_ON_ONCE(!ptr)) 1783 return; 1784 /* 1785 * No need to decrement here, as enum_replace() 1786 * returns the pointer to the character passed 1787 * the enum, and two enums can not be placed 1788 * back to back without something in between. 1789 * We can skip that something in between. 1790 */ 1791 continue; 1792 } 1793 skip_more: 1794 do { 1795 ptr++; 1796 } while (isalnum(*ptr) || *ptr == '_'); 1797 if (!*ptr) 1798 break; 1799 /* 1800 * If what comes after this variable is a '.' or 1801 * '->' then we can continue to ignore that string. 1802 */ 1803 if (*ptr == '.' || (ptr[0] == '-' && ptr[1] == '>')) { 1804 ptr += *ptr == '.' ? 1 : 2; 1805 if (!*ptr) 1806 break; 1807 goto skip_more; 1808 } 1809 /* 1810 * Once again, we can skip the delimiter that came 1811 * after the string. 1812 */ 1813 continue; 1814 } 1815 } 1816} 1817 1818void trace_event_enum_update(struct trace_enum_map **map, int len) 1819{ 1820 struct ftrace_event_call *call, *p; 1821 const char *last_system = NULL; 1822 int last_i; 1823 int i; 1824 1825 down_write(&trace_event_sem); 1826 list_for_each_entry_safe(call, p, &ftrace_events, list) { 1827 /* events are usually grouped together with systems */ 1828 if (!last_system || call->class->system != last_system) { 1829 last_i = 0; 1830 last_system = call->class->system; 1831 } 1832 1833 for (i = last_i; i < len; i++) { 1834 if (call->class->system == map[i]->system) { 1835 /* Save the first system if need be */ 1836 if (!last_i) 1837 last_i = i; 1838 update_event_printk(call, map[i]); 1839 } 1840 } 1841 } 1842 up_write(&trace_event_sem); 1843} 1844 1845static struct ftrace_event_file * 1846trace_create_new_event(struct ftrace_event_call *call, 1847 struct trace_array *tr) 1848{ 1849 struct ftrace_event_file *file; 1850 1851 file = kmem_cache_alloc(file_cachep, GFP_TRACE); 1852 if (!file) 1853 return NULL; 1854 1855 file->event_call = call; 1856 file->tr = tr; 1857 atomic_set(&file->sm_ref, 0); 1858 atomic_set(&file->tm_ref, 0); 1859 INIT_LIST_HEAD(&file->triggers); 1860 list_add(&file->list, &tr->events); 1861 1862 return file; 1863} 1864 1865/* Add an event to a trace directory */ 1866static int 1867__trace_add_new_event(struct ftrace_event_call *call, struct trace_array *tr) 1868{ 1869 struct ftrace_event_file *file; 1870 1871 file = trace_create_new_event(call, tr); 1872 if (!file) 1873 return -ENOMEM; 1874 1875 return event_create_dir(tr->event_dir, file); 1876} 1877 1878/* 1879 * Just create a decriptor for early init. A descriptor is required 1880 * for enabling events at boot. We want to enable events before 1881 * the filesystem is initialized. 1882 */ 1883static __init int 1884__trace_early_add_new_event(struct ftrace_event_call *call, 1885 struct trace_array *tr) 1886{ 1887 struct ftrace_event_file *file; 1888 1889 file = trace_create_new_event(call, tr); 1890 if (!file) 1891 return -ENOMEM; 1892 1893 return 0; 1894} 1895 1896struct ftrace_module_file_ops; 1897static void __add_event_to_tracers(struct ftrace_event_call *call); 1898 1899/* Add an additional event_call dynamically */ 1900int trace_add_event_call(struct ftrace_event_call *call) 1901{ 1902 int ret; 1903 mutex_lock(&trace_types_lock); 1904 mutex_lock(&event_mutex); 1905 1906 ret = __register_event(call, NULL); 1907 if (ret >= 0) 1908 __add_event_to_tracers(call); 1909 1910 mutex_unlock(&event_mutex); 1911 mutex_unlock(&trace_types_lock); 1912 return ret; 1913} 1914 1915/* 1916 * Must be called under locking of trace_types_lock, event_mutex and 1917 * trace_event_sem. 1918 */ 1919static void __trace_remove_event_call(struct ftrace_event_call *call) 1920{ 1921 event_remove(call); 1922 trace_destroy_fields(call); 1923 free_event_filter(call->filter); 1924 call->filter = NULL; 1925} 1926 1927static int probe_remove_event_call(struct ftrace_event_call *call) 1928{ 1929 struct trace_array *tr; 1930 struct ftrace_event_file *file; 1931 1932#ifdef CONFIG_PERF_EVENTS 1933 if (call->perf_refcount) 1934 return -EBUSY; 1935#endif 1936 do_for_each_event_file(tr, file) { 1937 if (file->event_call != call) 1938 continue; 1939 /* 1940 * We can't rely on ftrace_event_enable_disable(enable => 0) 1941 * we are going to do, FTRACE_EVENT_FL_SOFT_MODE can suppress 1942 * TRACE_REG_UNREGISTER. 1943 */ 1944 if (file->flags & FTRACE_EVENT_FL_ENABLED) 1945 return -EBUSY; 1946 /* 1947 * The do_for_each_event_file_safe() is 1948 * a double loop. After finding the call for this 1949 * trace_array, we use break to jump to the next 1950 * trace_array. 1951 */ 1952 break; 1953 } while_for_each_event_file(); 1954 1955 __trace_remove_event_call(call); 1956 1957 return 0; 1958} 1959 1960/* Remove an event_call */ 1961int trace_remove_event_call(struct ftrace_event_call *call) 1962{ 1963 int ret; 1964 1965 mutex_lock(&trace_types_lock); 1966 mutex_lock(&event_mutex); 1967 down_write(&trace_event_sem); 1968 ret = probe_remove_event_call(call); 1969 up_write(&trace_event_sem); 1970 mutex_unlock(&event_mutex); 1971 mutex_unlock(&trace_types_lock); 1972 1973 return ret; 1974} 1975 1976#define for_each_event(event, start, end) \ 1977 for (event = start; \ 1978 (unsigned long)event < (unsigned long)end; \ 1979 event++) 1980 1981#ifdef CONFIG_MODULES 1982 1983static void trace_module_add_events(struct module *mod) 1984{ 1985 struct ftrace_event_call **call, **start, **end; 1986 1987 if (!mod->num_trace_events) 1988 return; 1989 1990 /* Don't add infrastructure for mods without tracepoints */ 1991 if (trace_module_has_bad_taint(mod)) { 1992 pr_err("%s: module has bad taint, not creating trace events\n", 1993 mod->name); 1994 return; 1995 } 1996 1997 start = mod->trace_events; 1998 end = mod->trace_events + mod->num_trace_events; 1999 2000 for_each_event(call, start, end) { 2001 __register_event(*call, mod); 2002 __add_event_to_tracers(*call); 2003 } 2004} 2005 2006static void trace_module_remove_events(struct module *mod) 2007{ 2008 struct ftrace_event_call *call, *p; 2009 bool clear_trace = false; 2010 2011 down_write(&trace_event_sem); 2012 list_for_each_entry_safe(call, p, &ftrace_events, list) { 2013 if (call->mod == mod) { 2014 if (call->flags & TRACE_EVENT_FL_WAS_ENABLED) 2015 clear_trace = true; 2016 __trace_remove_event_call(call); 2017 } 2018 } 2019 up_write(&trace_event_sem); 2020 2021 /* 2022 * It is safest to reset the ring buffer if the module being unloaded 2023 * registered any events that were used. The only worry is if 2024 * a new module gets loaded, and takes on the same id as the events 2025 * of this module. When printing out the buffer, traced events left 2026 * over from this module may be passed to the new module events and 2027 * unexpected results may occur. 2028 */ 2029 if (clear_trace) 2030 tracing_reset_all_online_cpus(); 2031} 2032 2033static int trace_module_notify(struct notifier_block *self, 2034 unsigned long val, void *data) 2035{ 2036 struct module *mod = data; 2037 2038 mutex_lock(&trace_types_lock); 2039 mutex_lock(&event_mutex); 2040 switch (val) { 2041 case MODULE_STATE_COMING: 2042 trace_module_add_events(mod); 2043 break; 2044 case MODULE_STATE_GOING: 2045 trace_module_remove_events(mod); 2046 break; 2047 } 2048 mutex_unlock(&event_mutex); 2049 mutex_unlock(&trace_types_lock); 2050 2051 return 0; 2052} 2053 2054static struct notifier_block trace_module_nb = { 2055 .notifier_call = trace_module_notify, 2056 .priority = 1, /* higher than trace.c module notify */ 2057}; 2058#endif /* CONFIG_MODULES */ 2059 2060/* Create a new event directory structure for a trace directory. */ 2061static void 2062__trace_add_event_dirs(struct trace_array *tr) 2063{ 2064 struct ftrace_event_call *call; 2065 int ret; 2066 2067 list_for_each_entry(call, &ftrace_events, list) { 2068 ret = __trace_add_new_event(call, tr); 2069 if (ret < 0) 2070 pr_warn("Could not create directory for event %s\n", 2071 ftrace_event_name(call)); 2072 } 2073} 2074 2075struct ftrace_event_file * 2076find_event_file(struct trace_array *tr, const char *system, const char *event) 2077{ 2078 struct ftrace_event_file *file; 2079 struct ftrace_event_call *call; 2080 const char *name; 2081 2082 list_for_each_entry(file, &tr->events, list) { 2083 2084 call = file->event_call; 2085 name = ftrace_event_name(call); 2086 2087 if (!name || !call->class || !call->class->reg) 2088 continue; 2089 2090 if (call->flags & TRACE_EVENT_FL_IGNORE_ENABLE) 2091 continue; 2092 2093 if (strcmp(event, name) == 0 && 2094 strcmp(system, call->class->system) == 0) 2095 return file; 2096 } 2097 return NULL; 2098} 2099 2100#ifdef CONFIG_DYNAMIC_FTRACE 2101 2102/* Avoid typos */ 2103#define ENABLE_EVENT_STR "enable_event" 2104#define DISABLE_EVENT_STR "disable_event" 2105 2106struct event_probe_data { 2107 struct ftrace_event_file *file; 2108 unsigned long count; 2109 int ref; 2110 bool enable; 2111}; 2112 2113static void 2114event_enable_probe(unsigned long ip, unsigned long parent_ip, void **_data) 2115{ 2116 struct event_probe_data **pdata = (struct event_probe_data **)_data; 2117 struct event_probe_data *data = *pdata; 2118 2119 if (!data) 2120 return; 2121 2122 if (data->enable) 2123 clear_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &data->file->flags); 2124 else 2125 set_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &data->file->flags); 2126} 2127 2128static void 2129event_enable_count_probe(unsigned long ip, unsigned long parent_ip, void **_data) 2130{ 2131 struct event_probe_data **pdata = (struct event_probe_data **)_data; 2132 struct event_probe_data *data = *pdata; 2133 2134 if (!data) 2135 return; 2136 2137 if (!data->count) 2138 return; 2139 2140 /* Skip if the event is in a state we want to switch to */ 2141 if (data->enable == !(data->file->flags & FTRACE_EVENT_FL_SOFT_DISABLED)) 2142 return; 2143 2144 if (data->count != -1) 2145 (data->count)--; 2146 2147 event_enable_probe(ip, parent_ip, _data); 2148} 2149 2150static int 2151event_enable_print(struct seq_file *m, unsigned long ip, 2152 struct ftrace_probe_ops *ops, void *_data) 2153{ 2154 struct event_probe_data *data = _data; 2155 2156 seq_printf(m, "%ps:", (void *)ip); 2157 2158 seq_printf(m, "%s:%s:%s", 2159 data->enable ? ENABLE_EVENT_STR : DISABLE_EVENT_STR, 2160 data->file->event_call->class->system, 2161 ftrace_event_name(data->file->event_call)); 2162 2163 if (data->count == -1) 2164 seq_puts(m, ":unlimited\n"); 2165 else 2166 seq_printf(m, ":count=%ld\n", data->count); 2167 2168 return 0; 2169} 2170 2171static int 2172event_enable_init(struct ftrace_probe_ops *ops, unsigned long ip, 2173 void **_data) 2174{ 2175 struct event_probe_data **pdata = (struct event_probe_data **)_data; 2176 struct event_probe_data *data = *pdata; 2177 2178 data->ref++; 2179 return 0; 2180} 2181 2182static void 2183event_enable_free(struct ftrace_probe_ops *ops, unsigned long ip, 2184 void **_data) 2185{ 2186 struct event_probe_data **pdata = (struct event_probe_data **)_data; 2187 struct event_probe_data *data = *pdata; 2188 2189 if (WARN_ON_ONCE(data->ref <= 0)) 2190 return; 2191 2192 data->ref--; 2193 if (!data->ref) { 2194 /* Remove the SOFT_MODE flag */ 2195 __ftrace_event_enable_disable(data->file, 0, 1); 2196 module_put(data->file->event_call->mod); 2197 kfree(data); 2198 } 2199 *pdata = NULL; 2200} 2201 2202static struct ftrace_probe_ops event_enable_probe_ops = { 2203 .func = event_enable_probe, 2204 .print = event_enable_print, 2205 .init = event_enable_init, 2206 .free = event_enable_free, 2207}; 2208 2209static struct ftrace_probe_ops event_enable_count_probe_ops = { 2210 .func = event_enable_count_probe, 2211 .print = event_enable_print, 2212 .init = event_enable_init, 2213 .free = event_enable_free, 2214}; 2215 2216static struct ftrace_probe_ops event_disable_probe_ops = { 2217 .func = event_enable_probe, 2218 .print = event_enable_print, 2219 .init = event_enable_init, 2220 .free = event_enable_free, 2221}; 2222 2223static struct ftrace_probe_ops event_disable_count_probe_ops = { 2224 .func = event_enable_count_probe, 2225 .print = event_enable_print, 2226 .init = event_enable_init, 2227 .free = event_enable_free, 2228}; 2229 2230static int 2231event_enable_func(struct ftrace_hash *hash, 2232 char *glob, char *cmd, char *param, int enabled) 2233{ 2234 struct trace_array *tr = top_trace_array(); 2235 struct ftrace_event_file *file; 2236 struct ftrace_probe_ops *ops; 2237 struct event_probe_data *data; 2238 const char *system; 2239 const char *event; 2240 char *number; 2241 bool enable; 2242 int ret; 2243 2244 if (!tr) 2245 return -ENODEV; 2246 2247 /* hash funcs only work with set_ftrace_filter */ 2248 if (!enabled || !param) 2249 return -EINVAL; 2250 2251 system = strsep(¶m, ":"); 2252 if (!param) 2253 return -EINVAL; 2254 2255 event = strsep(¶m, ":"); 2256 2257 mutex_lock(&event_mutex); 2258 2259 ret = -EINVAL; 2260 file = find_event_file(tr, system, event); 2261 if (!file) 2262 goto out; 2263 2264 enable = strcmp(cmd, ENABLE_EVENT_STR) == 0; 2265 2266 if (enable) 2267 ops = param ? &event_enable_count_probe_ops : &event_enable_probe_ops; 2268 else 2269 ops = param ? &event_disable_count_probe_ops : &event_disable_probe_ops; 2270 2271 if (glob[0] == '!') { 2272 unregister_ftrace_function_probe_func(glob+1, ops); 2273 ret = 0; 2274 goto out; 2275 } 2276 2277 ret = -ENOMEM; 2278 data = kzalloc(sizeof(*data), GFP_KERNEL); 2279 if (!data) 2280 goto out; 2281 2282 data->enable = enable; 2283 data->count = -1; 2284 data->file = file; 2285 2286 if (!param) 2287 goto out_reg; 2288 2289 number = strsep(¶m, ":"); 2290 2291 ret = -EINVAL; 2292 if (!strlen(number)) 2293 goto out_free; 2294 2295 /* 2296 * We use the callback data field (which is a pointer) 2297 * as our counter. 2298 */ 2299 ret = kstrtoul(number, 0, &data->count); 2300 if (ret) 2301 goto out_free; 2302 2303 out_reg: 2304 /* Don't let event modules unload while probe registered */ 2305 ret = try_module_get(file->event_call->mod); 2306 if (!ret) { 2307 ret = -EBUSY; 2308 goto out_free; 2309 } 2310 2311 ret = __ftrace_event_enable_disable(file, 1, 1); 2312 if (ret < 0) 2313 goto out_put; 2314 ret = register_ftrace_function_probe(glob, ops, data); 2315 /* 2316 * The above returns on success the # of functions enabled, 2317 * but if it didn't find any functions it returns zero. 2318 * Consider no functions a failure too. 2319 */ 2320 if (!ret) { 2321 ret = -ENOENT; 2322 goto out_disable; 2323 } else if (ret < 0) 2324 goto out_disable; 2325 /* Just return zero, not the number of enabled functions */ 2326 ret = 0; 2327 out: 2328 mutex_unlock(&event_mutex); 2329 return ret; 2330 2331 out_disable: 2332 __ftrace_event_enable_disable(file, 0, 1); 2333 out_put: 2334 module_put(file->event_call->mod); 2335 out_free: 2336 kfree(data); 2337 goto out; 2338} 2339 2340static struct ftrace_func_command event_enable_cmd = { 2341 .name = ENABLE_EVENT_STR, 2342 .func = event_enable_func, 2343}; 2344 2345static struct ftrace_func_command event_disable_cmd = { 2346 .name = DISABLE_EVENT_STR, 2347 .func = event_enable_func, 2348}; 2349 2350static __init int register_event_cmds(void) 2351{ 2352 int ret; 2353 2354 ret = register_ftrace_command(&event_enable_cmd); 2355 if (WARN_ON(ret < 0)) 2356 return ret; 2357 ret = register_ftrace_command(&event_disable_cmd); 2358 if (WARN_ON(ret < 0)) 2359 unregister_ftrace_command(&event_enable_cmd); 2360 return ret; 2361} 2362#else 2363static inline int register_event_cmds(void) { return 0; } 2364#endif /* CONFIG_DYNAMIC_FTRACE */ 2365 2366/* 2367 * The top level array has already had its ftrace_event_file 2368 * descriptors created in order to allow for early events to 2369 * be recorded. This function is called after the tracefs has been 2370 * initialized, and we now have to create the files associated 2371 * to the events. 2372 */ 2373static __init void 2374__trace_early_add_event_dirs(struct trace_array *tr) 2375{ 2376 struct ftrace_event_file *file; 2377 int ret; 2378 2379 2380 list_for_each_entry(file, &tr->events, list) { 2381 ret = event_create_dir(tr->event_dir, file); 2382 if (ret < 0) 2383 pr_warn("Could not create directory for event %s\n", 2384 ftrace_event_name(file->event_call)); 2385 } 2386} 2387 2388/* 2389 * For early boot up, the top trace array requires to have 2390 * a list of events that can be enabled. This must be done before 2391 * the filesystem is set up in order to allow events to be traced 2392 * early. 2393 */ 2394static __init void 2395__trace_early_add_events(struct trace_array *tr) 2396{ 2397 struct ftrace_event_call *call; 2398 int ret; 2399 2400 list_for_each_entry(call, &ftrace_events, list) { 2401 /* Early boot up should not have any modules loaded */ 2402 if (WARN_ON_ONCE(call->mod)) 2403 continue; 2404 2405 ret = __trace_early_add_new_event(call, tr); 2406 if (ret < 0) 2407 pr_warn("Could not create early event %s\n", 2408 ftrace_event_name(call)); 2409 } 2410} 2411 2412/* Remove the event directory structure for a trace directory. */ 2413static void 2414__trace_remove_event_dirs(struct trace_array *tr) 2415{ 2416 struct ftrace_event_file *file, *next; 2417 2418 list_for_each_entry_safe(file, next, &tr->events, list) 2419 remove_event_file_dir(file); 2420} 2421 2422static void __add_event_to_tracers(struct ftrace_event_call *call) 2423{ 2424 struct trace_array *tr; 2425 2426 list_for_each_entry(tr, &ftrace_trace_arrays, list) 2427 __trace_add_new_event(call, tr); 2428} 2429 2430extern struct ftrace_event_call *__start_ftrace_events[]; 2431extern struct ftrace_event_call *__stop_ftrace_events[]; 2432 2433static char bootup_event_buf[COMMAND_LINE_SIZE] __initdata; 2434 2435static __init int setup_trace_event(char *str) 2436{ 2437 strlcpy(bootup_event_buf, str, COMMAND_LINE_SIZE); 2438 ring_buffer_expanded = true; 2439 tracing_selftest_disabled = true; 2440 2441 return 1; 2442} 2443__setup("trace_event=", setup_trace_event); 2444 2445/* Expects to have event_mutex held when called */ 2446static int 2447create_event_toplevel_files(struct dentry *parent, struct trace_array *tr) 2448{ 2449 struct dentry *d_events; 2450 struct dentry *entry; 2451 2452 entry = tracefs_create_file("set_event", 0644, parent, 2453 tr, &ftrace_set_event_fops); 2454 if (!entry) { 2455 pr_warn("Could not create tracefs 'set_event' entry\n"); 2456 return -ENOMEM; 2457 } 2458 2459 d_events = tracefs_create_dir("events", parent); 2460 if (!d_events) { 2461 pr_warn("Could not create tracefs 'events' directory\n"); 2462 return -ENOMEM; 2463 } 2464 2465 /* ring buffer internal formats */ 2466 trace_create_file("header_page", 0444, d_events, 2467 ring_buffer_print_page_header, 2468 &ftrace_show_header_fops); 2469 2470 trace_create_file("header_event", 0444, d_events, 2471 ring_buffer_print_entry_header, 2472 &ftrace_show_header_fops); 2473 2474 trace_create_file("enable", 0644, d_events, 2475 tr, &ftrace_tr_enable_fops); 2476 2477 tr->event_dir = d_events; 2478 2479 return 0; 2480} 2481 2482/** 2483 * event_trace_add_tracer - add a instance of a trace_array to events 2484 * @parent: The parent dentry to place the files/directories for events in 2485 * @tr: The trace array associated with these events 2486 * 2487 * When a new instance is created, it needs to set up its events 2488 * directory, as well as other files associated with events. It also 2489 * creates the event hierachry in the @parent/events directory. 2490 * 2491 * Returns 0 on success. 2492 */ 2493int event_trace_add_tracer(struct dentry *parent, struct trace_array *tr) 2494{ 2495 int ret; 2496 2497 mutex_lock(&event_mutex); 2498 2499 ret = create_event_toplevel_files(parent, tr); 2500 if (ret) 2501 goto out_unlock; 2502 2503 down_write(&trace_event_sem); 2504 __trace_add_event_dirs(tr); 2505 up_write(&trace_event_sem); 2506 2507 out_unlock: 2508 mutex_unlock(&event_mutex); 2509 2510 return ret; 2511} 2512 2513/* 2514 * The top trace array already had its file descriptors created. 2515 * Now the files themselves need to be created. 2516 */ 2517static __init int 2518early_event_add_tracer(struct dentry *parent, struct trace_array *tr) 2519{ 2520 int ret; 2521 2522 mutex_lock(&event_mutex); 2523 2524 ret = create_event_toplevel_files(parent, tr); 2525 if (ret) 2526 goto out_unlock; 2527 2528 down_write(&trace_event_sem); 2529 __trace_early_add_event_dirs(tr); 2530 up_write(&trace_event_sem); 2531 2532 out_unlock: 2533 mutex_unlock(&event_mutex); 2534 2535 return ret; 2536} 2537 2538int event_trace_del_tracer(struct trace_array *tr) 2539{ 2540 mutex_lock(&event_mutex); 2541 2542 /* Disable any event triggers and associated soft-disabled events */ 2543 clear_event_triggers(tr); 2544 2545 /* Disable any running events */ 2546 __ftrace_set_clr_event_nolock(tr, NULL, NULL, NULL, 0); 2547 2548 /* Access to events are within rcu_read_lock_sched() */ 2549 synchronize_sched(); 2550 2551 down_write(&trace_event_sem); 2552 __trace_remove_event_dirs(tr); 2553 tracefs_remove_recursive(tr->event_dir); 2554 up_write(&trace_event_sem); 2555 2556 tr->event_dir = NULL; 2557 2558 mutex_unlock(&event_mutex); 2559 2560 return 0; 2561} 2562 2563static __init int event_trace_memsetup(void) 2564{ 2565 field_cachep = KMEM_CACHE(ftrace_event_field, SLAB_PANIC); 2566 file_cachep = KMEM_CACHE(ftrace_event_file, SLAB_PANIC); 2567 return 0; 2568} 2569 2570static __init void 2571early_enable_events(struct trace_array *tr, bool disable_first) 2572{ 2573 char *buf = bootup_event_buf; 2574 char *token; 2575 int ret; 2576 2577 while (true) { 2578 token = strsep(&buf, ","); 2579 2580 if (!token) 2581 break; 2582 if (!*token) 2583 continue; 2584 2585 /* Restarting syscalls requires that we stop them first */ 2586 if (disable_first) 2587 ftrace_set_clr_event(tr, token, 0); 2588 2589 ret = ftrace_set_clr_event(tr, token, 1); 2590 if (ret) 2591 pr_warn("Failed to enable trace event: %s\n", token); 2592 2593 /* Put back the comma to allow this to be called again */ 2594 if (buf) 2595 *(buf - 1) = ','; 2596 } 2597} 2598 2599static __init int event_trace_enable(void) 2600{ 2601 struct trace_array *tr = top_trace_array(); 2602 struct ftrace_event_call **iter, *call; 2603 int ret; 2604 2605 if (!tr) 2606 return -ENODEV; 2607 2608 for_each_event(iter, __start_ftrace_events, __stop_ftrace_events) { 2609 2610 call = *iter; 2611 ret = event_init(call); 2612 if (!ret) 2613 list_add(&call->list, &ftrace_events); 2614 } 2615 2616 /* 2617 * We need the top trace array to have a working set of trace 2618 * points at early init, before the debug files and directories 2619 * are created. Create the file entries now, and attach them 2620 * to the actual file dentries later. 2621 */ 2622 __trace_early_add_events(tr); 2623 2624 early_enable_events(tr, false); 2625 2626 trace_printk_start_comm(); 2627 2628 register_event_cmds(); 2629 2630 register_trigger_cmds(); 2631 2632 return 0; 2633} 2634 2635/* 2636 * event_trace_enable() is called from trace_event_init() first to 2637 * initialize events and perhaps start any events that are on the 2638 * command line. Unfortunately, there are some events that will not 2639 * start this early, like the system call tracepoints that need 2640 * to set the TIF_SYSCALL_TRACEPOINT flag of pid 1. But event_trace_enable() 2641 * is called before pid 1 starts, and this flag is never set, making 2642 * the syscall tracepoint never get reached, but the event is enabled 2643 * regardless (and not doing anything). 2644 */ 2645static __init int event_trace_enable_again(void) 2646{ 2647 struct trace_array *tr; 2648 2649 tr = top_trace_array(); 2650 if (!tr) 2651 return -ENODEV; 2652 2653 early_enable_events(tr, true); 2654 2655 return 0; 2656} 2657 2658early_initcall(event_trace_enable_again); 2659 2660static __init int event_trace_init(void) 2661{ 2662 struct trace_array *tr; 2663 struct dentry *d_tracer; 2664 struct dentry *entry; 2665 int ret; 2666 2667 tr = top_trace_array(); 2668 if (!tr) 2669 return -ENODEV; 2670 2671 d_tracer = tracing_init_dentry(); 2672 if (IS_ERR(d_tracer)) 2673 return 0; 2674 2675 entry = tracefs_create_file("available_events", 0444, d_tracer, 2676 tr, &ftrace_avail_fops); 2677 if (!entry) 2678 pr_warn("Could not create tracefs 'available_events' entry\n"); 2679 2680 if (trace_define_common_fields()) 2681 pr_warn("tracing: Failed to allocate common fields"); 2682 2683 ret = early_event_add_tracer(d_tracer, tr); 2684 if (ret) 2685 return ret; 2686 2687#ifdef CONFIG_MODULES 2688 ret = register_module_notifier(&trace_module_nb); 2689 if (ret) 2690 pr_warn("Failed to register trace events module notifier\n"); 2691#endif 2692 return 0; 2693} 2694 2695void __init trace_event_init(void) 2696{ 2697 event_trace_memsetup(); 2698 init_ftrace_syscalls(); 2699 event_trace_enable(); 2700} 2701 2702fs_initcall(event_trace_init); 2703 2704#ifdef CONFIG_FTRACE_STARTUP_TEST 2705 2706static DEFINE_SPINLOCK(test_spinlock); 2707static DEFINE_SPINLOCK(test_spinlock_irq); 2708static DEFINE_MUTEX(test_mutex); 2709 2710static __init void test_work(struct work_struct *dummy) 2711{ 2712 spin_lock(&test_spinlock); 2713 spin_lock_irq(&test_spinlock_irq); 2714 udelay(1); 2715 spin_unlock_irq(&test_spinlock_irq); 2716 spin_unlock(&test_spinlock); 2717 2718 mutex_lock(&test_mutex); 2719 msleep(1); 2720 mutex_unlock(&test_mutex); 2721} 2722 2723static __init int event_test_thread(void *unused) 2724{ 2725 void *test_malloc; 2726 2727 test_malloc = kmalloc(1234, GFP_KERNEL); 2728 if (!test_malloc) 2729 pr_info("failed to kmalloc\n"); 2730 2731 schedule_on_each_cpu(test_work); 2732 2733 kfree(test_malloc); 2734 2735 set_current_state(TASK_INTERRUPTIBLE); 2736 while (!kthread_should_stop()) { 2737 schedule(); 2738 set_current_state(TASK_INTERRUPTIBLE); 2739 } 2740 __set_current_state(TASK_RUNNING); 2741 2742 return 0; 2743} 2744 2745/* 2746 * Do various things that may trigger events. 2747 */ 2748static __init void event_test_stuff(void) 2749{ 2750 struct task_struct *test_thread; 2751 2752 test_thread = kthread_run(event_test_thread, NULL, "test-events"); 2753 msleep(1); 2754 kthread_stop(test_thread); 2755} 2756 2757/* 2758 * For every trace event defined, we will test each trace point separately, 2759 * and then by groups, and finally all trace points. 2760 */ 2761static __init void event_trace_self_tests(void) 2762{ 2763 struct ftrace_subsystem_dir *dir; 2764 struct ftrace_event_file *file; 2765 struct ftrace_event_call *call; 2766 struct event_subsystem *system; 2767 struct trace_array *tr; 2768 int ret; 2769 2770 tr = top_trace_array(); 2771 if (!tr) 2772 return; 2773 2774 pr_info("Running tests on trace events:\n"); 2775 2776 list_for_each_entry(file, &tr->events, list) { 2777 2778 call = file->event_call; 2779 2780 /* Only test those that have a probe */ 2781 if (!call->class || !call->class->probe) 2782 continue; 2783 2784/* 2785 * Testing syscall events here is pretty useless, but 2786 * we still do it if configured. But this is time consuming. 2787 * What we really need is a user thread to perform the 2788 * syscalls as we test. 2789 */ 2790#ifndef CONFIG_EVENT_TRACE_TEST_SYSCALLS 2791 if (call->class->system && 2792 strcmp(call->class->system, "syscalls") == 0) 2793 continue; 2794#endif 2795 2796 pr_info("Testing event %s: ", ftrace_event_name(call)); 2797 2798 /* 2799 * If an event is already enabled, someone is using 2800 * it and the self test should not be on. 2801 */ 2802 if (file->flags & FTRACE_EVENT_FL_ENABLED) { 2803 pr_warn("Enabled event during self test!\n"); 2804 WARN_ON_ONCE(1); 2805 continue; 2806 } 2807 2808 ftrace_event_enable_disable(file, 1); 2809 event_test_stuff(); 2810 ftrace_event_enable_disable(file, 0); 2811 2812 pr_cont("OK\n"); 2813 } 2814 2815 /* Now test at the sub system level */ 2816 2817 pr_info("Running tests on trace event systems:\n"); 2818 2819 list_for_each_entry(dir, &tr->systems, list) { 2820 2821 system = dir->subsystem; 2822 2823 /* the ftrace system is special, skip it */ 2824 if (strcmp(system->name, "ftrace") == 0) 2825 continue; 2826 2827 pr_info("Testing event system %s: ", system->name); 2828 2829 ret = __ftrace_set_clr_event(tr, NULL, system->name, NULL, 1); 2830 if (WARN_ON_ONCE(ret)) { 2831 pr_warn("error enabling system %s\n", 2832 system->name); 2833 continue; 2834 } 2835 2836 event_test_stuff(); 2837 2838 ret = __ftrace_set_clr_event(tr, NULL, system->name, NULL, 0); 2839 if (WARN_ON_ONCE(ret)) { 2840 pr_warn("error disabling system %s\n", 2841 system->name); 2842 continue; 2843 } 2844 2845 pr_cont("OK\n"); 2846 } 2847 2848 /* Test with all events enabled */ 2849 2850 pr_info("Running tests on all trace events:\n"); 2851 pr_info("Testing all events: "); 2852 2853 ret = __ftrace_set_clr_event(tr, NULL, NULL, NULL, 1); 2854 if (WARN_ON_ONCE(ret)) { 2855 pr_warn("error enabling all events\n"); 2856 return; 2857 } 2858 2859 event_test_stuff(); 2860 2861 /* reset sysname */ 2862 ret = __ftrace_set_clr_event(tr, NULL, NULL, NULL, 0); 2863 if (WARN_ON_ONCE(ret)) { 2864 pr_warn("error disabling all events\n"); 2865 return; 2866 } 2867 2868 pr_cont("OK\n"); 2869} 2870 2871#ifdef CONFIG_FUNCTION_TRACER 2872 2873static DEFINE_PER_CPU(atomic_t, ftrace_test_event_disable); 2874 2875static void 2876function_test_events_call(unsigned long ip, unsigned long parent_ip, 2877 struct ftrace_ops *op, struct pt_regs *pt_regs) 2878{ 2879 struct ring_buffer_event *event; 2880 struct ring_buffer *buffer; 2881 struct ftrace_entry *entry; 2882 unsigned long flags; 2883 long disabled; 2884 int cpu; 2885 int pc; 2886 2887 pc = preempt_count(); 2888 preempt_disable_notrace(); 2889 cpu = raw_smp_processor_id(); 2890 disabled = atomic_inc_return(&per_cpu(ftrace_test_event_disable, cpu)); 2891 2892 if (disabled != 1) 2893 goto out; 2894 2895 local_save_flags(flags); 2896 2897 event = trace_current_buffer_lock_reserve(&buffer, 2898 TRACE_FN, sizeof(*entry), 2899 flags, pc); 2900 if (!event) 2901 goto out; 2902 entry = ring_buffer_event_data(event); 2903 entry->ip = ip; 2904 entry->parent_ip = parent_ip; 2905 2906 trace_buffer_unlock_commit(buffer, event, flags, pc); 2907 2908 out: 2909 atomic_dec(&per_cpu(ftrace_test_event_disable, cpu)); 2910 preempt_enable_notrace(); 2911} 2912 2913static struct ftrace_ops trace_ops __initdata = 2914{ 2915 .func = function_test_events_call, 2916 .flags = FTRACE_OPS_FL_RECURSION_SAFE, 2917}; 2918 2919static __init void event_trace_self_test_with_function(void) 2920{ 2921 int ret; 2922 ret = register_ftrace_function(&trace_ops); 2923 if (WARN_ON(ret < 0)) { 2924 pr_info("Failed to enable function tracer for event tests\n"); 2925 return; 2926 } 2927 pr_info("Running tests again, along with the function tracer\n"); 2928 event_trace_self_tests(); 2929 unregister_ftrace_function(&trace_ops); 2930} 2931#else 2932static __init void event_trace_self_test_with_function(void) 2933{ 2934} 2935#endif 2936 2937static __init int event_trace_self_tests_init(void) 2938{ 2939 if (!tracing_selftest_disabled) { 2940 event_trace_self_tests(); 2941 event_trace_self_test_with_function(); 2942 } 2943 2944 return 0; 2945} 2946 2947late_initcall(event_trace_self_tests_init); 2948 2949#endif 2950