root/arch/powerpc/lib/rheap.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. fixup
  2. grow
  3. assure_empty
  4. get_slot
  5. release_slot
  6. attach_free_block
  7. attach_taken_block
  8. rh_create
  9. rh_destroy
  10. rh_init
  11. rh_attach_region
  12. rh_detach_region
  13. rh_alloc_align
  14. rh_alloc
  15. rh_alloc_fixed
  16. rh_free
  17. rh_get_stats
  18. rh_set_owner
  19. rh_dump
  20. rh_dump_blk

   1 /*
   2  * A Remote Heap.  Remote means that we don't touch the memory that the
   3  * heap points to. Normal heap implementations use the memory they manage
   4  * to place their list. We cannot do that because the memory we manage may
   5  * have special properties, for example it is uncachable or of different
   6  * endianess.
   7  *
   8  * Author: Pantelis Antoniou <panto@intracom.gr>
   9  *
  10  * 2004 (c) INTRACOM S.A. Greece. This file is licensed under
  11  * the terms of the GNU General Public License version 2. This program
  12  * is licensed "as is" without any warranty of any kind, whether express
  13  * or implied.
  14  */
  15 #include <linux/types.h>
  16 #include <linux/errno.h>
  17 #include <linux/kernel.h>
  18 #include <linux/export.h>
  19 #include <linux/mm.h>
  20 #include <linux/err.h>
  21 #include <linux/slab.h>
  22 
  23 #include <asm/rheap.h>
  24 
  25 /*
  26  * Fixup a list_head, needed when copying lists.  If the pointers fall
  27  * between s and e, apply the delta.  This assumes that
  28  * sizeof(struct list_head *) == sizeof(unsigned long *).
  29  */
  30 static inline void fixup(unsigned long s, unsigned long e, int d,
  31                          struct list_head *l)
  32 {
  33         unsigned long *pp;
  34 
  35         pp = (unsigned long *)&l->next;
  36         if (*pp >= s && *pp < e)
  37                 *pp += d;
  38 
  39         pp = (unsigned long *)&l->prev;
  40         if (*pp >= s && *pp < e)
  41                 *pp += d;
  42 }
  43 
  44 /* Grow the allocated blocks */
  45 static int grow(rh_info_t * info, int max_blocks)
  46 {
  47         rh_block_t *block, *blk;
  48         int i, new_blocks;
  49         int delta;
  50         unsigned long blks, blke;
  51 
  52         if (max_blocks <= info->max_blocks)
  53                 return -EINVAL;
  54 
  55         new_blocks = max_blocks - info->max_blocks;
  56 
  57         block = kmalloc_array(max_blocks, sizeof(rh_block_t), GFP_ATOMIC);
  58         if (block == NULL)
  59                 return -ENOMEM;
  60 
  61         if (info->max_blocks > 0) {
  62 
  63                 /* copy old block area */
  64                 memcpy(block, info->block,
  65                        sizeof(rh_block_t) * info->max_blocks);
  66 
  67                 delta = (char *)block - (char *)info->block;
  68 
  69                 /* and fixup list pointers */
  70                 blks = (unsigned long)info->block;
  71                 blke = (unsigned long)(info->block + info->max_blocks);
  72 
  73                 for (i = 0, blk = block; i < info->max_blocks; i++, blk++)
  74                         fixup(blks, blke, delta, &blk->list);
  75 
  76                 fixup(blks, blke, delta, &info->empty_list);
  77                 fixup(blks, blke, delta, &info->free_list);
  78                 fixup(blks, blke, delta, &info->taken_list);
  79 
  80                 /* free the old allocated memory */
  81                 if ((info->flags & RHIF_STATIC_BLOCK) == 0)
  82                         kfree(info->block);
  83         }
  84 
  85         info->block = block;
  86         info->empty_slots += new_blocks;
  87         info->max_blocks = max_blocks;
  88         info->flags &= ~RHIF_STATIC_BLOCK;
  89 
  90         /* add all new blocks to the free list */
  91         blk = block + info->max_blocks - new_blocks;
  92         for (i = 0; i < new_blocks; i++, blk++)
  93                 list_add(&blk->list, &info->empty_list);
  94 
  95         return 0;
  96 }
  97 
  98 /*
  99  * Assure at least the required amount of empty slots.  If this function
 100  * causes a grow in the block area then all pointers kept to the block
 101  * area are invalid!
 102  */
 103 static int assure_empty(rh_info_t * info, int slots)
 104 {
 105         int max_blocks;
 106 
 107         /* This function is not meant to be used to grow uncontrollably */
 108         if (slots >= 4)
 109                 return -EINVAL;
 110 
 111         /* Enough space */
 112         if (info->empty_slots >= slots)
 113                 return 0;
 114 
 115         /* Next 16 sized block */
 116         max_blocks = ((info->max_blocks + slots) + 15) & ~15;
 117 
 118         return grow(info, max_blocks);
 119 }
 120 
 121 static rh_block_t *get_slot(rh_info_t * info)
 122 {
 123         rh_block_t *blk;
 124 
 125         /* If no more free slots, and failure to extend. */
 126         /* XXX: You should have called assure_empty before */
 127         if (info->empty_slots == 0) {
 128                 printk(KERN_ERR "rh: out of slots; crash is imminent.\n");
 129                 return NULL;
 130         }
 131 
 132         /* Get empty slot to use */
 133         blk = list_entry(info->empty_list.next, rh_block_t, list);
 134         list_del_init(&blk->list);
 135         info->empty_slots--;
 136 
 137         /* Initialize */
 138         blk->start = 0;
 139         blk->size = 0;
 140         blk->owner = NULL;
 141 
 142         return blk;
 143 }
 144 
 145 static inline void release_slot(rh_info_t * info, rh_block_t * blk)
 146 {
 147         list_add(&blk->list, &info->empty_list);
 148         info->empty_slots++;
 149 }
 150 
 151 static void attach_free_block(rh_info_t * info, rh_block_t * blkn)
 152 {
 153         rh_block_t *blk;
 154         rh_block_t *before;
 155         rh_block_t *after;
 156         rh_block_t *next;
 157         int size;
 158         unsigned long s, e, bs, be;
 159         struct list_head *l;
 160 
 161         /* We assume that they are aligned properly */
 162         size = blkn->size;
 163         s = blkn->start;
 164         e = s + size;
 165 
 166         /* Find the blocks immediately before and after the given one
 167          * (if any) */
 168         before = NULL;
 169         after = NULL;
 170         next = NULL;
 171 
 172         list_for_each(l, &info->free_list) {
 173                 blk = list_entry(l, rh_block_t, list);
 174 
 175                 bs = blk->start;
 176                 be = bs + blk->size;
 177 
 178                 if (next == NULL && s >= bs)
 179                         next = blk;
 180 
 181                 if (be == s)
 182                         before = blk;
 183 
 184                 if (e == bs)
 185                         after = blk;
 186 
 187                 /* If both are not null, break now */
 188                 if (before != NULL && after != NULL)
 189                         break;
 190         }
 191 
 192         /* Now check if they are really adjacent */
 193         if (before && s != (before->start + before->size))
 194                 before = NULL;
 195 
 196         if (after && e != after->start)
 197                 after = NULL;
 198 
 199         /* No coalescing; list insert and return */
 200         if (before == NULL && after == NULL) {
 201 
 202                 if (next != NULL)
 203                         list_add(&blkn->list, &next->list);
 204                 else
 205                         list_add(&blkn->list, &info->free_list);
 206 
 207                 return;
 208         }
 209 
 210         /* We don't need it anymore */
 211         release_slot(info, blkn);
 212 
 213         /* Grow the before block */
 214         if (before != NULL && after == NULL) {
 215                 before->size += size;
 216                 return;
 217         }
 218 
 219         /* Grow the after block backwards */
 220         if (before == NULL && after != NULL) {
 221                 after->start -= size;
 222                 after->size += size;
 223                 return;
 224         }
 225 
 226         /* Grow the before block, and release the after block */
 227         before->size += size + after->size;
 228         list_del(&after->list);
 229         release_slot(info, after);
 230 }
 231 
 232 static void attach_taken_block(rh_info_t * info, rh_block_t * blkn)
 233 {
 234         rh_block_t *blk;
 235         struct list_head *l;
 236 
 237         /* Find the block immediately before the given one (if any) */
 238         list_for_each(l, &info->taken_list) {
 239                 blk = list_entry(l, rh_block_t, list);
 240                 if (blk->start > blkn->start) {
 241                         list_add_tail(&blkn->list, &blk->list);
 242                         return;
 243                 }
 244         }
 245 
 246         list_add_tail(&blkn->list, &info->taken_list);
 247 }
 248 
 249 /*
 250  * Create a remote heap dynamically.  Note that no memory for the blocks
 251  * are allocated.  It will upon the first allocation
 252  */
 253 rh_info_t *rh_create(unsigned int alignment)
 254 {
 255         rh_info_t *info;
 256 
 257         /* Alignment must be a power of two */
 258         if ((alignment & (alignment - 1)) != 0)
 259                 return ERR_PTR(-EINVAL);
 260 
 261         info = kmalloc(sizeof(*info), GFP_ATOMIC);
 262         if (info == NULL)
 263                 return ERR_PTR(-ENOMEM);
 264 
 265         info->alignment = alignment;
 266 
 267         /* Initially everything as empty */
 268         info->block = NULL;
 269         info->max_blocks = 0;
 270         info->empty_slots = 0;
 271         info->flags = 0;
 272 
 273         INIT_LIST_HEAD(&info->empty_list);
 274         INIT_LIST_HEAD(&info->free_list);
 275         INIT_LIST_HEAD(&info->taken_list);
 276 
 277         return info;
 278 }
 279 EXPORT_SYMBOL_GPL(rh_create);
 280 
 281 /*
 282  * Destroy a dynamically created remote heap.  Deallocate only if the areas
 283  * are not static
 284  */
 285 void rh_destroy(rh_info_t * info)
 286 {
 287         if ((info->flags & RHIF_STATIC_BLOCK) == 0)
 288                 kfree(info->block);
 289 
 290         if ((info->flags & RHIF_STATIC_INFO) == 0)
 291                 kfree(info);
 292 }
 293 EXPORT_SYMBOL_GPL(rh_destroy);
 294 
 295 /*
 296  * Initialize in place a remote heap info block.  This is needed to support
 297  * operation very early in the startup of the kernel, when it is not yet safe
 298  * to call kmalloc.
 299  */
 300 void rh_init(rh_info_t * info, unsigned int alignment, int max_blocks,
 301              rh_block_t * block)
 302 {
 303         int i;
 304         rh_block_t *blk;
 305 
 306         /* Alignment must be a power of two */
 307         if ((alignment & (alignment - 1)) != 0)
 308                 return;
 309 
 310         info->alignment = alignment;
 311 
 312         /* Initially everything as empty */
 313         info->block = block;
 314         info->max_blocks = max_blocks;
 315         info->empty_slots = max_blocks;
 316         info->flags = RHIF_STATIC_INFO | RHIF_STATIC_BLOCK;
 317 
 318         INIT_LIST_HEAD(&info->empty_list);
 319         INIT_LIST_HEAD(&info->free_list);
 320         INIT_LIST_HEAD(&info->taken_list);
 321 
 322         /* Add all new blocks to the free list */
 323         for (i = 0, blk = block; i < max_blocks; i++, blk++)
 324                 list_add(&blk->list, &info->empty_list);
 325 }
 326 EXPORT_SYMBOL_GPL(rh_init);
 327 
 328 /* Attach a free memory region, coalesces regions if adjacent */
 329 int rh_attach_region(rh_info_t * info, unsigned long start, int size)
 330 {
 331         rh_block_t *blk;
 332         unsigned long s, e, m;
 333         int r;
 334 
 335         /* The region must be aligned */
 336         s = start;
 337         e = s + size;
 338         m = info->alignment - 1;
 339 
 340         /* Round start up */
 341         s = (s + m) & ~m;
 342 
 343         /* Round end down */
 344         e = e & ~m;
 345 
 346         if (IS_ERR_VALUE(e) || (e < s))
 347                 return -ERANGE;
 348 
 349         /* Take final values */
 350         start = s;
 351         size = e - s;
 352 
 353         /* Grow the blocks, if needed */
 354         r = assure_empty(info, 1);
 355         if (r < 0)
 356                 return r;
 357 
 358         blk = get_slot(info);
 359         blk->start = start;
 360         blk->size = size;
 361         blk->owner = NULL;
 362 
 363         attach_free_block(info, blk);
 364 
 365         return 0;
 366 }
 367 EXPORT_SYMBOL_GPL(rh_attach_region);
 368 
 369 /* Detatch given address range, splits free block if needed. */
 370 unsigned long rh_detach_region(rh_info_t * info, unsigned long start, int size)
 371 {
 372         struct list_head *l;
 373         rh_block_t *blk, *newblk;
 374         unsigned long s, e, m, bs, be;
 375 
 376         /* Validate size */
 377         if (size <= 0)
 378                 return (unsigned long) -EINVAL;
 379 
 380         /* The region must be aligned */
 381         s = start;
 382         e = s + size;
 383         m = info->alignment - 1;
 384 
 385         /* Round start up */
 386         s = (s + m) & ~m;
 387 
 388         /* Round end down */
 389         e = e & ~m;
 390 
 391         if (assure_empty(info, 1) < 0)
 392                 return (unsigned long) -ENOMEM;
 393 
 394         blk = NULL;
 395         list_for_each(l, &info->free_list) {
 396                 blk = list_entry(l, rh_block_t, list);
 397                 /* The range must lie entirely inside one free block */
 398                 bs = blk->start;
 399                 be = blk->start + blk->size;
 400                 if (s >= bs && e <= be)
 401                         break;
 402                 blk = NULL;
 403         }
 404 
 405         if (blk == NULL)
 406                 return (unsigned long) -ENOMEM;
 407 
 408         /* Perfect fit */
 409         if (bs == s && be == e) {
 410                 /* Delete from free list, release slot */
 411                 list_del(&blk->list);
 412                 release_slot(info, blk);
 413                 return s;
 414         }
 415 
 416         /* blk still in free list, with updated start and/or size */
 417         if (bs == s || be == e) {
 418                 if (bs == s)
 419                         blk->start += size;
 420                 blk->size -= size;
 421 
 422         } else {
 423                 /* The front free fragment */
 424                 blk->size = s - bs;
 425 
 426                 /* the back free fragment */
 427                 newblk = get_slot(info);
 428                 newblk->start = e;
 429                 newblk->size = be - e;
 430 
 431                 list_add(&newblk->list, &blk->list);
 432         }
 433 
 434         return s;
 435 }
 436 EXPORT_SYMBOL_GPL(rh_detach_region);
 437 
 438 /* Allocate a block of memory at the specified alignment.  The value returned
 439  * is an offset into the buffer initialized by rh_init(), or a negative number
 440  * if there is an error.
 441  */
 442 unsigned long rh_alloc_align(rh_info_t * info, int size, int alignment, const char *owner)
 443 {
 444         struct list_head *l;
 445         rh_block_t *blk;
 446         rh_block_t *newblk;
 447         unsigned long start, sp_size;
 448 
 449         /* Validate size, and alignment must be power of two */
 450         if (size <= 0 || (alignment & (alignment - 1)) != 0)
 451                 return (unsigned long) -EINVAL;
 452 
 453         /* Align to configured alignment */
 454         size = (size + (info->alignment - 1)) & ~(info->alignment - 1);
 455 
 456         if (assure_empty(info, 2) < 0)
 457                 return (unsigned long) -ENOMEM;
 458 
 459         blk = NULL;
 460         list_for_each(l, &info->free_list) {
 461                 blk = list_entry(l, rh_block_t, list);
 462                 if (size <= blk->size) {
 463                         start = (blk->start + alignment - 1) & ~(alignment - 1);
 464                         if (start + size <= blk->start + blk->size)
 465                                 break;
 466                 }
 467                 blk = NULL;
 468         }
 469 
 470         if (blk == NULL)
 471                 return (unsigned long) -ENOMEM;
 472 
 473         /* Just fits */
 474         if (blk->size == size) {
 475                 /* Move from free list to taken list */
 476                 list_del(&blk->list);
 477                 newblk = blk;
 478         } else {
 479                 /* Fragment caused, split if needed */
 480                 /* Create block for fragment in the beginning */
 481                 sp_size = start - blk->start;
 482                 if (sp_size) {
 483                         rh_block_t *spblk;
 484 
 485                         spblk = get_slot(info);
 486                         spblk->start = blk->start;
 487                         spblk->size = sp_size;
 488                         /* add before the blk */
 489                         list_add(&spblk->list, blk->list.prev);
 490                 }
 491                 newblk = get_slot(info);
 492                 newblk->start = start;
 493                 newblk->size = size;
 494 
 495                 /* blk still in free list, with updated start and size
 496                  * for fragment in the end */
 497                 blk->start = start + size;
 498                 blk->size -= sp_size + size;
 499                 /* No fragment in the end, remove blk */
 500                 if (blk->size == 0) {
 501                         list_del(&blk->list);
 502                         release_slot(info, blk);
 503                 }
 504         }
 505 
 506         newblk->owner = owner;
 507         attach_taken_block(info, newblk);
 508 
 509         return start;
 510 }
 511 EXPORT_SYMBOL_GPL(rh_alloc_align);
 512 
 513 /* Allocate a block of memory at the default alignment.  The value returned is
 514  * an offset into the buffer initialized by rh_init(), or a negative number if
 515  * there is an error.
 516  */
 517 unsigned long rh_alloc(rh_info_t * info, int size, const char *owner)
 518 {
 519         return rh_alloc_align(info, size, info->alignment, owner);
 520 }
 521 EXPORT_SYMBOL_GPL(rh_alloc);
 522 
 523 /* Allocate a block of memory at the given offset, rounded up to the default
 524  * alignment.  The value returned is an offset into the buffer initialized by
 525  * rh_init(), or a negative number if there is an error.
 526  */
 527 unsigned long rh_alloc_fixed(rh_info_t * info, unsigned long start, int size, const char *owner)
 528 {
 529         struct list_head *l;
 530         rh_block_t *blk, *newblk1, *newblk2;
 531         unsigned long s, e, m, bs = 0, be = 0;
 532 
 533         /* Validate size */
 534         if (size <= 0)
 535                 return (unsigned long) -EINVAL;
 536 
 537         /* The region must be aligned */
 538         s = start;
 539         e = s + size;
 540         m = info->alignment - 1;
 541 
 542         /* Round start up */
 543         s = (s + m) & ~m;
 544 
 545         /* Round end down */
 546         e = e & ~m;
 547 
 548         if (assure_empty(info, 2) < 0)
 549                 return (unsigned long) -ENOMEM;
 550 
 551         blk = NULL;
 552         list_for_each(l, &info->free_list) {
 553                 blk = list_entry(l, rh_block_t, list);
 554                 /* The range must lie entirely inside one free block */
 555                 bs = blk->start;
 556                 be = blk->start + blk->size;
 557                 if (s >= bs && e <= be)
 558                         break;
 559                 blk = NULL;
 560         }
 561 
 562         if (blk == NULL)
 563                 return (unsigned long) -ENOMEM;
 564 
 565         /* Perfect fit */
 566         if (bs == s && be == e) {
 567                 /* Move from free list to taken list */
 568                 list_del(&blk->list);
 569                 blk->owner = owner;
 570 
 571                 start = blk->start;
 572                 attach_taken_block(info, blk);
 573 
 574                 return start;
 575 
 576         }
 577 
 578         /* blk still in free list, with updated start and/or size */
 579         if (bs == s || be == e) {
 580                 if (bs == s)
 581                         blk->start += size;
 582                 blk->size -= size;
 583 
 584         } else {
 585                 /* The front free fragment */
 586                 blk->size = s - bs;
 587 
 588                 /* The back free fragment */
 589                 newblk2 = get_slot(info);
 590                 newblk2->start = e;
 591                 newblk2->size = be - e;
 592 
 593                 list_add(&newblk2->list, &blk->list);
 594         }
 595 
 596         newblk1 = get_slot(info);
 597         newblk1->start = s;
 598         newblk1->size = e - s;
 599         newblk1->owner = owner;
 600 
 601         start = newblk1->start;
 602         attach_taken_block(info, newblk1);
 603 
 604         return start;
 605 }
 606 EXPORT_SYMBOL_GPL(rh_alloc_fixed);
 607 
 608 /* Deallocate the memory previously allocated by one of the rh_alloc functions.
 609  * The return value is the size of the deallocated block, or a negative number
 610  * if there is an error.
 611  */
 612 int rh_free(rh_info_t * info, unsigned long start)
 613 {
 614         rh_block_t *blk, *blk2;
 615         struct list_head *l;
 616         int size;
 617 
 618         /* Linear search for block */
 619         blk = NULL;
 620         list_for_each(l, &info->taken_list) {
 621                 blk2 = list_entry(l, rh_block_t, list);
 622                 if (start < blk2->start)
 623                         break;
 624                 blk = blk2;
 625         }
 626 
 627         if (blk == NULL || start > (blk->start + blk->size))
 628                 return -EINVAL;
 629 
 630         /* Remove from taken list */
 631         list_del(&blk->list);
 632 
 633         /* Get size of freed block */
 634         size = blk->size;
 635         attach_free_block(info, blk);
 636 
 637         return size;
 638 }
 639 EXPORT_SYMBOL_GPL(rh_free);
 640 
 641 int rh_get_stats(rh_info_t * info, int what, int max_stats, rh_stats_t * stats)
 642 {
 643         rh_block_t *blk;
 644         struct list_head *l;
 645         struct list_head *h;
 646         int nr;
 647 
 648         switch (what) {
 649 
 650         case RHGS_FREE:
 651                 h = &info->free_list;
 652                 break;
 653 
 654         case RHGS_TAKEN:
 655                 h = &info->taken_list;
 656                 break;
 657 
 658         default:
 659                 return -EINVAL;
 660         }
 661 
 662         /* Linear search for block */
 663         nr = 0;
 664         list_for_each(l, h) {
 665                 blk = list_entry(l, rh_block_t, list);
 666                 if (stats != NULL && nr < max_stats) {
 667                         stats->start = blk->start;
 668                         stats->size = blk->size;
 669                         stats->owner = blk->owner;
 670                         stats++;
 671                 }
 672                 nr++;
 673         }
 674 
 675         return nr;
 676 }
 677 EXPORT_SYMBOL_GPL(rh_get_stats);
 678 
 679 int rh_set_owner(rh_info_t * info, unsigned long start, const char *owner)
 680 {
 681         rh_block_t *blk, *blk2;
 682         struct list_head *l;
 683         int size;
 684 
 685         /* Linear search for block */
 686         blk = NULL;
 687         list_for_each(l, &info->taken_list) {
 688                 blk2 = list_entry(l, rh_block_t, list);
 689                 if (start < blk2->start)
 690                         break;
 691                 blk = blk2;
 692         }
 693 
 694         if (blk == NULL || start > (blk->start + blk->size))
 695                 return -EINVAL;
 696 
 697         blk->owner = owner;
 698         size = blk->size;
 699 
 700         return size;
 701 }
 702 EXPORT_SYMBOL_GPL(rh_set_owner);
 703 
 704 void rh_dump(rh_info_t * info)
 705 {
 706         static rh_stats_t st[32];       /* XXX maximum 32 blocks */
 707         int maxnr;
 708         int i, nr;
 709 
 710         maxnr = ARRAY_SIZE(st);
 711 
 712         printk(KERN_INFO
 713                "info @0x%p (%d slots empty / %d max)\n",
 714                info, info->empty_slots, info->max_blocks);
 715 
 716         printk(KERN_INFO "  Free:\n");
 717         nr = rh_get_stats(info, RHGS_FREE, maxnr, st);
 718         if (nr > maxnr)
 719                 nr = maxnr;
 720         for (i = 0; i < nr; i++)
 721                 printk(KERN_INFO
 722                        "    0x%lx-0x%lx (%u)\n",
 723                        st[i].start, st[i].start + st[i].size,
 724                        st[i].size);
 725         printk(KERN_INFO "\n");
 726 
 727         printk(KERN_INFO "  Taken:\n");
 728         nr = rh_get_stats(info, RHGS_TAKEN, maxnr, st);
 729         if (nr > maxnr)
 730                 nr = maxnr;
 731         for (i = 0; i < nr; i++)
 732                 printk(KERN_INFO
 733                        "    0x%lx-0x%lx (%u) %s\n",
 734                        st[i].start, st[i].start + st[i].size,
 735                        st[i].size, st[i].owner != NULL ? st[i].owner : "");
 736         printk(KERN_INFO "\n");
 737 }
 738 EXPORT_SYMBOL_GPL(rh_dump);
 739 
 740 void rh_dump_blk(rh_info_t * info, rh_block_t * blk)
 741 {
 742         printk(KERN_INFO
 743                "blk @0x%p: 0x%lx-0x%lx (%u)\n",
 744                blk, blk->start, blk->start + blk->size, blk->size);
 745 }
 746 EXPORT_SYMBOL_GPL(rh_dump_blk);
 747 

/* [<][>][^][v][top][bottom][index][help] */