root/include/trace/events/bcache.h

/* [<][>][^][v][top][bottom][index][help] */

INCLUDED FROM


   1 /* SPDX-License-Identifier: GPL-2.0 */
   2 #undef TRACE_SYSTEM
   3 #define TRACE_SYSTEM bcache
   4 
   5 #if !defined(_TRACE_BCACHE_H) || defined(TRACE_HEADER_MULTI_READ)
   6 #define _TRACE_BCACHE_H
   7 
   8 #include <linux/tracepoint.h>
   9 
  10 DECLARE_EVENT_CLASS(bcache_request,
  11         TP_PROTO(struct bcache_device *d, struct bio *bio),
  12         TP_ARGS(d, bio),
  13 
  14         TP_STRUCT__entry(
  15                 __field(dev_t,          dev                     )
  16                 __field(unsigned int,   orig_major              )
  17                 __field(unsigned int,   orig_minor              )
  18                 __field(sector_t,       sector                  )
  19                 __field(dev_t,          orig_sector             )
  20                 __field(unsigned int,   nr_sector               )
  21                 __array(char,           rwbs,   6               )
  22         ),
  23 
  24         TP_fast_assign(
  25                 __entry->dev            = bio_dev(bio);
  26                 __entry->orig_major     = d->disk->major;
  27                 __entry->orig_minor     = d->disk->first_minor;
  28                 __entry->sector         = bio->bi_iter.bi_sector;
  29                 __entry->orig_sector    = bio->bi_iter.bi_sector - 16;
  30                 __entry->nr_sector      = bio->bi_iter.bi_size >> 9;
  31                 blk_fill_rwbs(__entry->rwbs, bio->bi_opf, bio->bi_iter.bi_size);
  32         ),
  33 
  34         TP_printk("%d,%d %s %llu + %u (from %d,%d @ %llu)",
  35                   MAJOR(__entry->dev), MINOR(__entry->dev),
  36                   __entry->rwbs, (unsigned long long)__entry->sector,
  37                   __entry->nr_sector, __entry->orig_major, __entry->orig_minor,
  38                   (unsigned long long)__entry->orig_sector)
  39 );
  40 
  41 DECLARE_EVENT_CLASS(bkey,
  42         TP_PROTO(struct bkey *k),
  43         TP_ARGS(k),
  44 
  45         TP_STRUCT__entry(
  46                 __field(u32,    size                            )
  47                 __field(u32,    inode                           )
  48                 __field(u64,    offset                          )
  49                 __field(bool,   dirty                           )
  50         ),
  51 
  52         TP_fast_assign(
  53                 __entry->inode  = KEY_INODE(k);
  54                 __entry->offset = KEY_OFFSET(k);
  55                 __entry->size   = KEY_SIZE(k);
  56                 __entry->dirty  = KEY_DIRTY(k);
  57         ),
  58 
  59         TP_printk("%u:%llu len %u dirty %u", __entry->inode,
  60                   __entry->offset, __entry->size, __entry->dirty)
  61 );
  62 
  63 DECLARE_EVENT_CLASS(btree_node,
  64         TP_PROTO(struct btree *b),
  65         TP_ARGS(b),
  66 
  67         TP_STRUCT__entry(
  68                 __field(size_t,         bucket                  )
  69         ),
  70 
  71         TP_fast_assign(
  72                 __entry->bucket = PTR_BUCKET_NR(b->c, &b->key, 0);
  73         ),
  74 
  75         TP_printk("bucket %zu", __entry->bucket)
  76 );
  77 
  78 /* request.c */
  79 
  80 DEFINE_EVENT(bcache_request, bcache_request_start,
  81         TP_PROTO(struct bcache_device *d, struct bio *bio),
  82         TP_ARGS(d, bio)
  83 );
  84 
  85 DEFINE_EVENT(bcache_request, bcache_request_end,
  86         TP_PROTO(struct bcache_device *d, struct bio *bio),
  87         TP_ARGS(d, bio)
  88 );
  89 
  90 DECLARE_EVENT_CLASS(bcache_bio,
  91         TP_PROTO(struct bio *bio),
  92         TP_ARGS(bio),
  93 
  94         TP_STRUCT__entry(
  95                 __field(dev_t,          dev                     )
  96                 __field(sector_t,       sector                  )
  97                 __field(unsigned int,   nr_sector               )
  98                 __array(char,           rwbs,   6               )
  99         ),
 100 
 101         TP_fast_assign(
 102                 __entry->dev            = bio_dev(bio);
 103                 __entry->sector         = bio->bi_iter.bi_sector;
 104                 __entry->nr_sector      = bio->bi_iter.bi_size >> 9;
 105                 blk_fill_rwbs(__entry->rwbs, bio->bi_opf, bio->bi_iter.bi_size);
 106         ),
 107 
 108         TP_printk("%d,%d  %s %llu + %u",
 109                   MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs,
 110                   (unsigned long long)__entry->sector, __entry->nr_sector)
 111 );
 112 
 113 DEFINE_EVENT(bcache_bio, bcache_bypass_sequential,
 114         TP_PROTO(struct bio *bio),
 115         TP_ARGS(bio)
 116 );
 117 
 118 DEFINE_EVENT(bcache_bio, bcache_bypass_congested,
 119         TP_PROTO(struct bio *bio),
 120         TP_ARGS(bio)
 121 );
 122 
 123 TRACE_EVENT(bcache_read,
 124         TP_PROTO(struct bio *bio, bool hit, bool bypass),
 125         TP_ARGS(bio, hit, bypass),
 126 
 127         TP_STRUCT__entry(
 128                 __field(dev_t,          dev                     )
 129                 __field(sector_t,       sector                  )
 130                 __field(unsigned int,   nr_sector               )
 131                 __array(char,           rwbs,   6               )
 132                 __field(bool,           cache_hit               )
 133                 __field(bool,           bypass                  )
 134         ),
 135 
 136         TP_fast_assign(
 137                 __entry->dev            = bio_dev(bio);
 138                 __entry->sector         = bio->bi_iter.bi_sector;
 139                 __entry->nr_sector      = bio->bi_iter.bi_size >> 9;
 140                 blk_fill_rwbs(__entry->rwbs, bio->bi_opf, bio->bi_iter.bi_size);
 141                 __entry->cache_hit = hit;
 142                 __entry->bypass = bypass;
 143         ),
 144 
 145         TP_printk("%d,%d  %s %llu + %u hit %u bypass %u",
 146                   MAJOR(__entry->dev), MINOR(__entry->dev),
 147                   __entry->rwbs, (unsigned long long)__entry->sector,
 148                   __entry->nr_sector, __entry->cache_hit, __entry->bypass)
 149 );
 150 
 151 TRACE_EVENT(bcache_write,
 152         TP_PROTO(struct cache_set *c, u64 inode, struct bio *bio,
 153                 bool writeback, bool bypass),
 154         TP_ARGS(c, inode, bio, writeback, bypass),
 155 
 156         TP_STRUCT__entry(
 157                 __array(char,           uuid,   16              )
 158                 __field(u64,            inode                   )
 159                 __field(sector_t,       sector                  )
 160                 __field(unsigned int,   nr_sector               )
 161                 __array(char,           rwbs,   6               )
 162                 __field(bool,           writeback               )
 163                 __field(bool,           bypass                  )
 164         ),
 165 
 166         TP_fast_assign(
 167                 memcpy(__entry->uuid, c->sb.set_uuid, 16);
 168                 __entry->inode          = inode;
 169                 __entry->sector         = bio->bi_iter.bi_sector;
 170                 __entry->nr_sector      = bio->bi_iter.bi_size >> 9;
 171                 blk_fill_rwbs(__entry->rwbs, bio->bi_opf, bio->bi_iter.bi_size);
 172                 __entry->writeback = writeback;
 173                 __entry->bypass = bypass;
 174         ),
 175 
 176         TP_printk("%pU inode %llu  %s %llu + %u hit %u bypass %u",
 177                   __entry->uuid, __entry->inode,
 178                   __entry->rwbs, (unsigned long long)__entry->sector,
 179                   __entry->nr_sector, __entry->writeback, __entry->bypass)
 180 );
 181 
 182 DEFINE_EVENT(bcache_bio, bcache_read_retry,
 183         TP_PROTO(struct bio *bio),
 184         TP_ARGS(bio)
 185 );
 186 
 187 DEFINE_EVENT(bkey, bcache_cache_insert,
 188         TP_PROTO(struct bkey *k),
 189         TP_ARGS(k)
 190 );
 191 
 192 /* Journal */
 193 
 194 DECLARE_EVENT_CLASS(cache_set,
 195         TP_PROTO(struct cache_set *c),
 196         TP_ARGS(c),
 197 
 198         TP_STRUCT__entry(
 199                 __array(char,           uuid,   16 )
 200         ),
 201 
 202         TP_fast_assign(
 203                 memcpy(__entry->uuid, c->sb.set_uuid, 16);
 204         ),
 205 
 206         TP_printk("%pU", __entry->uuid)
 207 );
 208 
 209 DEFINE_EVENT(bkey, bcache_journal_replay_key,
 210         TP_PROTO(struct bkey *k),
 211         TP_ARGS(k)
 212 );
 213 
 214 DEFINE_EVENT(cache_set, bcache_journal_full,
 215         TP_PROTO(struct cache_set *c),
 216         TP_ARGS(c)
 217 );
 218 
 219 DEFINE_EVENT(cache_set, bcache_journal_entry_full,
 220         TP_PROTO(struct cache_set *c),
 221         TP_ARGS(c)
 222 );
 223 
 224 TRACE_EVENT(bcache_journal_write,
 225         TP_PROTO(struct bio *bio, u32 keys),
 226         TP_ARGS(bio, keys),
 227 
 228         TP_STRUCT__entry(
 229                 __field(dev_t,          dev                     )
 230                 __field(sector_t,       sector                  )
 231                 __field(unsigned int,   nr_sector               )
 232                 __array(char,           rwbs,   6               )
 233                 __field(u32,            nr_keys                 )
 234         ),
 235 
 236         TP_fast_assign(
 237                 __entry->dev            = bio_dev(bio);
 238                 __entry->sector         = bio->bi_iter.bi_sector;
 239                 __entry->nr_sector      = bio->bi_iter.bi_size >> 9;
 240                 __entry->nr_keys        = keys;
 241                 blk_fill_rwbs(__entry->rwbs, bio->bi_opf, bio->bi_iter.bi_size);
 242         ),
 243 
 244         TP_printk("%d,%d  %s %llu + %u keys %u",
 245                   MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs,
 246                   (unsigned long long)__entry->sector, __entry->nr_sector,
 247                   __entry->nr_keys)
 248 );
 249 
 250 /* Btree */
 251 
 252 DEFINE_EVENT(cache_set, bcache_btree_cache_cannibalize,
 253         TP_PROTO(struct cache_set *c),
 254         TP_ARGS(c)
 255 );
 256 
 257 DEFINE_EVENT(btree_node, bcache_btree_read,
 258         TP_PROTO(struct btree *b),
 259         TP_ARGS(b)
 260 );
 261 
 262 TRACE_EVENT(bcache_btree_write,
 263         TP_PROTO(struct btree *b),
 264         TP_ARGS(b),
 265 
 266         TP_STRUCT__entry(
 267                 __field(size_t,         bucket                  )
 268                 __field(unsigned,       block                   )
 269                 __field(unsigned,       keys                    )
 270         ),
 271 
 272         TP_fast_assign(
 273                 __entry->bucket = PTR_BUCKET_NR(b->c, &b->key, 0);
 274                 __entry->block  = b->written;
 275                 __entry->keys   = b->keys.set[b->keys.nsets].data->keys;
 276         ),
 277 
 278         TP_printk("bucket %zu", __entry->bucket)
 279 );
 280 
 281 DEFINE_EVENT(btree_node, bcache_btree_node_alloc,
 282         TP_PROTO(struct btree *b),
 283         TP_ARGS(b)
 284 );
 285 
 286 DEFINE_EVENT(cache_set, bcache_btree_node_alloc_fail,
 287         TP_PROTO(struct cache_set *c),
 288         TP_ARGS(c)
 289 );
 290 
 291 DEFINE_EVENT(btree_node, bcache_btree_node_free,
 292         TP_PROTO(struct btree *b),
 293         TP_ARGS(b)
 294 );
 295 
 296 TRACE_EVENT(bcache_btree_gc_coalesce,
 297         TP_PROTO(unsigned nodes),
 298         TP_ARGS(nodes),
 299 
 300         TP_STRUCT__entry(
 301                 __field(unsigned,       nodes                   )
 302         ),
 303 
 304         TP_fast_assign(
 305                 __entry->nodes  = nodes;
 306         ),
 307 
 308         TP_printk("coalesced %u nodes", __entry->nodes)
 309 );
 310 
 311 DEFINE_EVENT(cache_set, bcache_gc_start,
 312         TP_PROTO(struct cache_set *c),
 313         TP_ARGS(c)
 314 );
 315 
 316 DEFINE_EVENT(cache_set, bcache_gc_end,
 317         TP_PROTO(struct cache_set *c),
 318         TP_ARGS(c)
 319 );
 320 
 321 DEFINE_EVENT(bkey, bcache_gc_copy,
 322         TP_PROTO(struct bkey *k),
 323         TP_ARGS(k)
 324 );
 325 
 326 DEFINE_EVENT(bkey, bcache_gc_copy_collision,
 327         TP_PROTO(struct bkey *k),
 328         TP_ARGS(k)
 329 );
 330 
 331 TRACE_EVENT(bcache_btree_insert_key,
 332         TP_PROTO(struct btree *b, struct bkey *k, unsigned op, unsigned status),
 333         TP_ARGS(b, k, op, status),
 334 
 335         TP_STRUCT__entry(
 336                 __field(u64,    btree_node                      )
 337                 __field(u32,    btree_level                     )
 338                 __field(u32,    inode                           )
 339                 __field(u64,    offset                          )
 340                 __field(u32,    size                            )
 341                 __field(u8,     dirty                           )
 342                 __field(u8,     op                              )
 343                 __field(u8,     status                          )
 344         ),
 345 
 346         TP_fast_assign(
 347                 __entry->btree_node = PTR_BUCKET_NR(b->c, &b->key, 0);
 348                 __entry->btree_level = b->level;
 349                 __entry->inode  = KEY_INODE(k);
 350                 __entry->offset = KEY_OFFSET(k);
 351                 __entry->size   = KEY_SIZE(k);
 352                 __entry->dirty  = KEY_DIRTY(k);
 353                 __entry->op = op;
 354                 __entry->status = status;
 355         ),
 356 
 357         TP_printk("%u for %u at %llu(%u): %u:%llu len %u dirty %u",
 358                   __entry->status, __entry->op,
 359                   __entry->btree_node, __entry->btree_level,
 360                   __entry->inode, __entry->offset,
 361                   __entry->size, __entry->dirty)
 362 );
 363 
 364 DECLARE_EVENT_CLASS(btree_split,
 365         TP_PROTO(struct btree *b, unsigned keys),
 366         TP_ARGS(b, keys),
 367 
 368         TP_STRUCT__entry(
 369                 __field(size_t,         bucket                  )
 370                 __field(unsigned,       keys                    )
 371         ),
 372 
 373         TP_fast_assign(
 374                 __entry->bucket = PTR_BUCKET_NR(b->c, &b->key, 0);
 375                 __entry->keys   = keys;
 376         ),
 377 
 378         TP_printk("bucket %zu keys %u", __entry->bucket, __entry->keys)
 379 );
 380 
 381 DEFINE_EVENT(btree_split, bcache_btree_node_split,
 382         TP_PROTO(struct btree *b, unsigned keys),
 383         TP_ARGS(b, keys)
 384 );
 385 
 386 DEFINE_EVENT(btree_split, bcache_btree_node_compact,
 387         TP_PROTO(struct btree *b, unsigned keys),
 388         TP_ARGS(b, keys)
 389 );
 390 
 391 DEFINE_EVENT(btree_node, bcache_btree_set_root,
 392         TP_PROTO(struct btree *b),
 393         TP_ARGS(b)
 394 );
 395 
 396 TRACE_EVENT(bcache_keyscan,
 397         TP_PROTO(unsigned nr_found,
 398                  unsigned start_inode, uint64_t start_offset,
 399                  unsigned end_inode, uint64_t end_offset),
 400         TP_ARGS(nr_found,
 401                 start_inode, start_offset,
 402                 end_inode, end_offset),
 403 
 404         TP_STRUCT__entry(
 405                 __field(__u32,  nr_found                        )
 406                 __field(__u32,  start_inode                     )
 407                 __field(__u64,  start_offset                    )
 408                 __field(__u32,  end_inode                       )
 409                 __field(__u64,  end_offset                      )
 410         ),
 411 
 412         TP_fast_assign(
 413                 __entry->nr_found       = nr_found;
 414                 __entry->start_inode    = start_inode;
 415                 __entry->start_offset   = start_offset;
 416                 __entry->end_inode      = end_inode;
 417                 __entry->end_offset     = end_offset;
 418         ),
 419 
 420         TP_printk("found %u keys from %u:%llu to %u:%llu", __entry->nr_found,
 421                   __entry->start_inode, __entry->start_offset,
 422                   __entry->end_inode, __entry->end_offset)
 423 );
 424 
 425 /* Allocator */
 426 
 427 TRACE_EVENT(bcache_invalidate,
 428         TP_PROTO(struct cache *ca, size_t bucket),
 429         TP_ARGS(ca, bucket),
 430 
 431         TP_STRUCT__entry(
 432                 __field(unsigned,       sectors                 )
 433                 __field(dev_t,          dev                     )
 434                 __field(__u64,          offset                  )
 435         ),
 436 
 437         TP_fast_assign(
 438                 __entry->dev            = ca->bdev->bd_dev;
 439                 __entry->offset         = bucket << ca->set->bucket_bits;
 440                 __entry->sectors        = GC_SECTORS_USED(&ca->buckets[bucket]);
 441         ),
 442 
 443         TP_printk("invalidated %u sectors at %d,%d sector=%llu",
 444                   __entry->sectors, MAJOR(__entry->dev),
 445                   MINOR(__entry->dev), __entry->offset)
 446 );
 447 
 448 TRACE_EVENT(bcache_alloc,
 449         TP_PROTO(struct cache *ca, size_t bucket),
 450         TP_ARGS(ca, bucket),
 451 
 452         TP_STRUCT__entry(
 453                 __field(dev_t,          dev                     )
 454                 __field(__u64,          offset                  )
 455         ),
 456 
 457         TP_fast_assign(
 458                 __entry->dev            = ca->bdev->bd_dev;
 459                 __entry->offset         = bucket << ca->set->bucket_bits;
 460         ),
 461 
 462         TP_printk("allocated %d,%d sector=%llu", MAJOR(__entry->dev),
 463                   MINOR(__entry->dev), __entry->offset)
 464 );
 465 
 466 TRACE_EVENT(bcache_alloc_fail,
 467         TP_PROTO(struct cache *ca, unsigned reserve),
 468         TP_ARGS(ca, reserve),
 469 
 470         TP_STRUCT__entry(
 471                 __field(dev_t,          dev                     )
 472                 __field(unsigned,       free                    )
 473                 __field(unsigned,       free_inc                )
 474                 __field(unsigned,       blocked                 )
 475         ),
 476 
 477         TP_fast_assign(
 478                 __entry->dev            = ca->bdev->bd_dev;
 479                 __entry->free           = fifo_used(&ca->free[reserve]);
 480                 __entry->free_inc       = fifo_used(&ca->free_inc);
 481                 __entry->blocked        = atomic_read(&ca->set->prio_blocked);
 482         ),
 483 
 484         TP_printk("alloc fail %d,%d free %u free_inc %u blocked %u",
 485                   MAJOR(__entry->dev), MINOR(__entry->dev), __entry->free,
 486                   __entry->free_inc, __entry->blocked)
 487 );
 488 
 489 /* Background writeback */
 490 
 491 DEFINE_EVENT(bkey, bcache_writeback,
 492         TP_PROTO(struct bkey *k),
 493         TP_ARGS(k)
 494 );
 495 
 496 DEFINE_EVENT(bkey, bcache_writeback_collision,
 497         TP_PROTO(struct bkey *k),
 498         TP_ARGS(k)
 499 );
 500 
 501 #endif /* _TRACE_BCACHE_H */
 502 
 503 /* This part must be outside protection */
 504 #include <trace/define_trace.h>

/* [<][>][^][v][top][bottom][index][help] */