root/fs/hfsplus/bnode.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. hfs_bnode_read
  2. hfs_bnode_read_u16
  3. hfs_bnode_read_u8
  4. hfs_bnode_read_key
  5. hfs_bnode_write
  6. hfs_bnode_write_u16
  7. hfs_bnode_clear
  8. hfs_bnode_copy
  9. hfs_bnode_move
  10. hfs_bnode_dump
  11. hfs_bnode_unlink
  12. hfs_bnode_hash
  13. hfs_bnode_findhash
  14. __hfs_bnode_create
  15. hfs_bnode_unhash
  16. hfs_bnode_find
  17. hfs_bnode_free
  18. hfs_bnode_create
  19. hfs_bnode_get
  20. hfs_bnode_put
  21. hfs_bnode_need_zeroout

   1 // SPDX-License-Identifier: GPL-2.0
   2 /*
   3  *  linux/fs/hfsplus/bnode.c
   4  *
   5  * Copyright (C) 2001
   6  * Brad Boyer (flar@allandria.com)
   7  * (C) 2003 Ardis Technologies <roman@ardistech.com>
   8  *
   9  * Handle basic btree node operations
  10  */
  11 
  12 #include <linux/string.h>
  13 #include <linux/slab.h>
  14 #include <linux/pagemap.h>
  15 #include <linux/fs.h>
  16 #include <linux/swap.h>
  17 
  18 #include "hfsplus_fs.h"
  19 #include "hfsplus_raw.h"
  20 
  21 /* Copy a specified range of bytes from the raw data of a node */
  22 void hfs_bnode_read(struct hfs_bnode *node, void *buf, int off, int len)
  23 {
  24         struct page **pagep;
  25         int l;
  26 
  27         off += node->page_offset;
  28         pagep = node->page + (off >> PAGE_SHIFT);
  29         off &= ~PAGE_MASK;
  30 
  31         l = min_t(int, len, PAGE_SIZE - off);
  32         memcpy(buf, kmap(*pagep) + off, l);
  33         kunmap(*pagep);
  34 
  35         while ((len -= l) != 0) {
  36                 buf += l;
  37                 l = min_t(int, len, PAGE_SIZE);
  38                 memcpy(buf, kmap(*++pagep), l);
  39                 kunmap(*pagep);
  40         }
  41 }
  42 
  43 u16 hfs_bnode_read_u16(struct hfs_bnode *node, int off)
  44 {
  45         __be16 data;
  46         /* TODO: optimize later... */
  47         hfs_bnode_read(node, &data, off, 2);
  48         return be16_to_cpu(data);
  49 }
  50 
  51 u8 hfs_bnode_read_u8(struct hfs_bnode *node, int off)
  52 {
  53         u8 data;
  54         /* TODO: optimize later... */
  55         hfs_bnode_read(node, &data, off, 1);
  56         return data;
  57 }
  58 
  59 void hfs_bnode_read_key(struct hfs_bnode *node, void *key, int off)
  60 {
  61         struct hfs_btree *tree;
  62         int key_len;
  63 
  64         tree = node->tree;
  65         if (node->type == HFS_NODE_LEAF ||
  66             tree->attributes & HFS_TREE_VARIDXKEYS ||
  67             node->tree->cnid == HFSPLUS_ATTR_CNID)
  68                 key_len = hfs_bnode_read_u16(node, off) + 2;
  69         else
  70                 key_len = tree->max_key_len + 2;
  71 
  72         hfs_bnode_read(node, key, off, key_len);
  73 }
  74 
  75 void hfs_bnode_write(struct hfs_bnode *node, void *buf, int off, int len)
  76 {
  77         struct page **pagep;
  78         int l;
  79 
  80         off += node->page_offset;
  81         pagep = node->page + (off >> PAGE_SHIFT);
  82         off &= ~PAGE_MASK;
  83 
  84         l = min_t(int, len, PAGE_SIZE - off);
  85         memcpy(kmap(*pagep) + off, buf, l);
  86         set_page_dirty(*pagep);
  87         kunmap(*pagep);
  88 
  89         while ((len -= l) != 0) {
  90                 buf += l;
  91                 l = min_t(int, len, PAGE_SIZE);
  92                 memcpy(kmap(*++pagep), buf, l);
  93                 set_page_dirty(*pagep);
  94                 kunmap(*pagep);
  95         }
  96 }
  97 
  98 void hfs_bnode_write_u16(struct hfs_bnode *node, int off, u16 data)
  99 {
 100         __be16 v = cpu_to_be16(data);
 101         /* TODO: optimize later... */
 102         hfs_bnode_write(node, &v, off, 2);
 103 }
 104 
 105 void hfs_bnode_clear(struct hfs_bnode *node, int off, int len)
 106 {
 107         struct page **pagep;
 108         int l;
 109 
 110         off += node->page_offset;
 111         pagep = node->page + (off >> PAGE_SHIFT);
 112         off &= ~PAGE_MASK;
 113 
 114         l = min_t(int, len, PAGE_SIZE - off);
 115         memset(kmap(*pagep) + off, 0, l);
 116         set_page_dirty(*pagep);
 117         kunmap(*pagep);
 118 
 119         while ((len -= l) != 0) {
 120                 l = min_t(int, len, PAGE_SIZE);
 121                 memset(kmap(*++pagep), 0, l);
 122                 set_page_dirty(*pagep);
 123                 kunmap(*pagep);
 124         }
 125 }
 126 
 127 void hfs_bnode_copy(struct hfs_bnode *dst_node, int dst,
 128                     struct hfs_bnode *src_node, int src, int len)
 129 {
 130         struct page **src_page, **dst_page;
 131         int l;
 132 
 133         hfs_dbg(BNODE_MOD, "copybytes: %u,%u,%u\n", dst, src, len);
 134         if (!len)
 135                 return;
 136         src += src_node->page_offset;
 137         dst += dst_node->page_offset;
 138         src_page = src_node->page + (src >> PAGE_SHIFT);
 139         src &= ~PAGE_MASK;
 140         dst_page = dst_node->page + (dst >> PAGE_SHIFT);
 141         dst &= ~PAGE_MASK;
 142 
 143         if (src == dst) {
 144                 l = min_t(int, len, PAGE_SIZE - src);
 145                 memcpy(kmap(*dst_page) + src, kmap(*src_page) + src, l);
 146                 kunmap(*src_page);
 147                 set_page_dirty(*dst_page);
 148                 kunmap(*dst_page);
 149 
 150                 while ((len -= l) != 0) {
 151                         l = min_t(int, len, PAGE_SIZE);
 152                         memcpy(kmap(*++dst_page), kmap(*++src_page), l);
 153                         kunmap(*src_page);
 154                         set_page_dirty(*dst_page);
 155                         kunmap(*dst_page);
 156                 }
 157         } else {
 158                 void *src_ptr, *dst_ptr;
 159 
 160                 do {
 161                         src_ptr = kmap(*src_page) + src;
 162                         dst_ptr = kmap(*dst_page) + dst;
 163                         if (PAGE_SIZE - src < PAGE_SIZE - dst) {
 164                                 l = PAGE_SIZE - src;
 165                                 src = 0;
 166                                 dst += l;
 167                         } else {
 168                                 l = PAGE_SIZE - dst;
 169                                 src += l;
 170                                 dst = 0;
 171                         }
 172                         l = min(len, l);
 173                         memcpy(dst_ptr, src_ptr, l);
 174                         kunmap(*src_page);
 175                         set_page_dirty(*dst_page);
 176                         kunmap(*dst_page);
 177                         if (!dst)
 178                                 dst_page++;
 179                         else
 180                                 src_page++;
 181                 } while ((len -= l));
 182         }
 183 }
 184 
 185 void hfs_bnode_move(struct hfs_bnode *node, int dst, int src, int len)
 186 {
 187         struct page **src_page, **dst_page;
 188         int l;
 189 
 190         hfs_dbg(BNODE_MOD, "movebytes: %u,%u,%u\n", dst, src, len);
 191         if (!len)
 192                 return;
 193         src += node->page_offset;
 194         dst += node->page_offset;
 195         if (dst > src) {
 196                 src += len - 1;
 197                 src_page = node->page + (src >> PAGE_SHIFT);
 198                 src = (src & ~PAGE_MASK) + 1;
 199                 dst += len - 1;
 200                 dst_page = node->page + (dst >> PAGE_SHIFT);
 201                 dst = (dst & ~PAGE_MASK) + 1;
 202 
 203                 if (src == dst) {
 204                         while (src < len) {
 205                                 memmove(kmap(*dst_page), kmap(*src_page), src);
 206                                 kunmap(*src_page);
 207                                 set_page_dirty(*dst_page);
 208                                 kunmap(*dst_page);
 209                                 len -= src;
 210                                 src = PAGE_SIZE;
 211                                 src_page--;
 212                                 dst_page--;
 213                         }
 214                         src -= len;
 215                         memmove(kmap(*dst_page) + src,
 216                                 kmap(*src_page) + src, len);
 217                         kunmap(*src_page);
 218                         set_page_dirty(*dst_page);
 219                         kunmap(*dst_page);
 220                 } else {
 221                         void *src_ptr, *dst_ptr;
 222 
 223                         do {
 224                                 src_ptr = kmap(*src_page) + src;
 225                                 dst_ptr = kmap(*dst_page) + dst;
 226                                 if (src < dst) {
 227                                         l = src;
 228                                         src = PAGE_SIZE;
 229                                         dst -= l;
 230                                 } else {
 231                                         l = dst;
 232                                         src -= l;
 233                                         dst = PAGE_SIZE;
 234                                 }
 235                                 l = min(len, l);
 236                                 memmove(dst_ptr - l, src_ptr - l, l);
 237                                 kunmap(*src_page);
 238                                 set_page_dirty(*dst_page);
 239                                 kunmap(*dst_page);
 240                                 if (dst == PAGE_SIZE)
 241                                         dst_page--;
 242                                 else
 243                                         src_page--;
 244                         } while ((len -= l));
 245                 }
 246         } else {
 247                 src_page = node->page + (src >> PAGE_SHIFT);
 248                 src &= ~PAGE_MASK;
 249                 dst_page = node->page + (dst >> PAGE_SHIFT);
 250                 dst &= ~PAGE_MASK;
 251 
 252                 if (src == dst) {
 253                         l = min_t(int, len, PAGE_SIZE - src);
 254                         memmove(kmap(*dst_page) + src,
 255                                 kmap(*src_page) + src, l);
 256                         kunmap(*src_page);
 257                         set_page_dirty(*dst_page);
 258                         kunmap(*dst_page);
 259 
 260                         while ((len -= l) != 0) {
 261                                 l = min_t(int, len, PAGE_SIZE);
 262                                 memmove(kmap(*++dst_page),
 263                                         kmap(*++src_page), l);
 264                                 kunmap(*src_page);
 265                                 set_page_dirty(*dst_page);
 266                                 kunmap(*dst_page);
 267                         }
 268                 } else {
 269                         void *src_ptr, *dst_ptr;
 270 
 271                         do {
 272                                 src_ptr = kmap(*src_page) + src;
 273                                 dst_ptr = kmap(*dst_page) + dst;
 274                                 if (PAGE_SIZE - src <
 275                                                 PAGE_SIZE - dst) {
 276                                         l = PAGE_SIZE - src;
 277                                         src = 0;
 278                                         dst += l;
 279                                 } else {
 280                                         l = PAGE_SIZE - dst;
 281                                         src += l;
 282                                         dst = 0;
 283                                 }
 284                                 l = min(len, l);
 285                                 memmove(dst_ptr, src_ptr, l);
 286                                 kunmap(*src_page);
 287                                 set_page_dirty(*dst_page);
 288                                 kunmap(*dst_page);
 289                                 if (!dst)
 290                                         dst_page++;
 291                                 else
 292                                         src_page++;
 293                         } while ((len -= l));
 294                 }
 295         }
 296 }
 297 
 298 void hfs_bnode_dump(struct hfs_bnode *node)
 299 {
 300         struct hfs_bnode_desc desc;
 301         __be32 cnid;
 302         int i, off, key_off;
 303 
 304         hfs_dbg(BNODE_MOD, "bnode: %d\n", node->this);
 305         hfs_bnode_read(node, &desc, 0, sizeof(desc));
 306         hfs_dbg(BNODE_MOD, "%d, %d, %d, %d, %d\n",
 307                 be32_to_cpu(desc.next), be32_to_cpu(desc.prev),
 308                 desc.type, desc.height, be16_to_cpu(desc.num_recs));
 309 
 310         off = node->tree->node_size - 2;
 311         for (i = be16_to_cpu(desc.num_recs); i >= 0; off -= 2, i--) {
 312                 key_off = hfs_bnode_read_u16(node, off);
 313                 hfs_dbg(BNODE_MOD, " %d", key_off);
 314                 if (i && node->type == HFS_NODE_INDEX) {
 315                         int tmp;
 316 
 317                         if (node->tree->attributes & HFS_TREE_VARIDXKEYS ||
 318                                         node->tree->cnid == HFSPLUS_ATTR_CNID)
 319                                 tmp = hfs_bnode_read_u16(node, key_off) + 2;
 320                         else
 321                                 tmp = node->tree->max_key_len + 2;
 322                         hfs_dbg_cont(BNODE_MOD, " (%d", tmp);
 323                         hfs_bnode_read(node, &cnid, key_off + tmp, 4);
 324                         hfs_dbg_cont(BNODE_MOD, ",%d)", be32_to_cpu(cnid));
 325                 } else if (i && node->type == HFS_NODE_LEAF) {
 326                         int tmp;
 327 
 328                         tmp = hfs_bnode_read_u16(node, key_off);
 329                         hfs_dbg_cont(BNODE_MOD, " (%d)", tmp);
 330                 }
 331         }
 332         hfs_dbg_cont(BNODE_MOD, "\n");
 333 }
 334 
 335 void hfs_bnode_unlink(struct hfs_bnode *node)
 336 {
 337         struct hfs_btree *tree;
 338         struct hfs_bnode *tmp;
 339         __be32 cnid;
 340 
 341         tree = node->tree;
 342         if (node->prev) {
 343                 tmp = hfs_bnode_find(tree, node->prev);
 344                 if (IS_ERR(tmp))
 345                         return;
 346                 tmp->next = node->next;
 347                 cnid = cpu_to_be32(tmp->next);
 348                 hfs_bnode_write(tmp, &cnid,
 349                         offsetof(struct hfs_bnode_desc, next), 4);
 350                 hfs_bnode_put(tmp);
 351         } else if (node->type == HFS_NODE_LEAF)
 352                 tree->leaf_head = node->next;
 353 
 354         if (node->next) {
 355                 tmp = hfs_bnode_find(tree, node->next);
 356                 if (IS_ERR(tmp))
 357                         return;
 358                 tmp->prev = node->prev;
 359                 cnid = cpu_to_be32(tmp->prev);
 360                 hfs_bnode_write(tmp, &cnid,
 361                         offsetof(struct hfs_bnode_desc, prev), 4);
 362                 hfs_bnode_put(tmp);
 363         } else if (node->type == HFS_NODE_LEAF)
 364                 tree->leaf_tail = node->prev;
 365 
 366         /* move down? */
 367         if (!node->prev && !node->next)
 368                 hfs_dbg(BNODE_MOD, "hfs_btree_del_level\n");
 369         if (!node->parent) {
 370                 tree->root = 0;
 371                 tree->depth = 0;
 372         }
 373         set_bit(HFS_BNODE_DELETED, &node->flags);
 374 }
 375 
 376 static inline int hfs_bnode_hash(u32 num)
 377 {
 378         num = (num >> 16) + num;
 379         num += num >> 8;
 380         return num & (NODE_HASH_SIZE - 1);
 381 }
 382 
 383 struct hfs_bnode *hfs_bnode_findhash(struct hfs_btree *tree, u32 cnid)
 384 {
 385         struct hfs_bnode *node;
 386 
 387         if (cnid >= tree->node_count) {
 388                 pr_err("request for non-existent node %d in B*Tree\n",
 389                        cnid);
 390                 return NULL;
 391         }
 392 
 393         for (node = tree->node_hash[hfs_bnode_hash(cnid)];
 394                         node; node = node->next_hash)
 395                 if (node->this == cnid)
 396                         return node;
 397         return NULL;
 398 }
 399 
 400 static struct hfs_bnode *__hfs_bnode_create(struct hfs_btree *tree, u32 cnid)
 401 {
 402         struct hfs_bnode *node, *node2;
 403         struct address_space *mapping;
 404         struct page *page;
 405         int size, block, i, hash;
 406         loff_t off;
 407 
 408         if (cnid >= tree->node_count) {
 409                 pr_err("request for non-existent node %d in B*Tree\n",
 410                        cnid);
 411                 return NULL;
 412         }
 413 
 414         size = sizeof(struct hfs_bnode) + tree->pages_per_bnode *
 415                 sizeof(struct page *);
 416         node = kzalloc(size, GFP_KERNEL);
 417         if (!node)
 418                 return NULL;
 419         node->tree = tree;
 420         node->this = cnid;
 421         set_bit(HFS_BNODE_NEW, &node->flags);
 422         atomic_set(&node->refcnt, 1);
 423         hfs_dbg(BNODE_REFS, "new_node(%d:%d): 1\n",
 424                 node->tree->cnid, node->this);
 425         init_waitqueue_head(&node->lock_wq);
 426         spin_lock(&tree->hash_lock);
 427         node2 = hfs_bnode_findhash(tree, cnid);
 428         if (!node2) {
 429                 hash = hfs_bnode_hash(cnid);
 430                 node->next_hash = tree->node_hash[hash];
 431                 tree->node_hash[hash] = node;
 432                 tree->node_hash_cnt++;
 433         } else {
 434                 spin_unlock(&tree->hash_lock);
 435                 kfree(node);
 436                 wait_event(node2->lock_wq,
 437                         !test_bit(HFS_BNODE_NEW, &node2->flags));
 438                 return node2;
 439         }
 440         spin_unlock(&tree->hash_lock);
 441 
 442         mapping = tree->inode->i_mapping;
 443         off = (loff_t)cnid << tree->node_size_shift;
 444         block = off >> PAGE_SHIFT;
 445         node->page_offset = off & ~PAGE_MASK;
 446         for (i = 0; i < tree->pages_per_bnode; block++, i++) {
 447                 page = read_mapping_page(mapping, block, NULL);
 448                 if (IS_ERR(page))
 449                         goto fail;
 450                 if (PageError(page)) {
 451                         put_page(page);
 452                         goto fail;
 453                 }
 454                 node->page[i] = page;
 455         }
 456 
 457         return node;
 458 fail:
 459         set_bit(HFS_BNODE_ERROR, &node->flags);
 460         return node;
 461 }
 462 
 463 void hfs_bnode_unhash(struct hfs_bnode *node)
 464 {
 465         struct hfs_bnode **p;
 466 
 467         hfs_dbg(BNODE_REFS, "remove_node(%d:%d): %d\n",
 468                 node->tree->cnid, node->this, atomic_read(&node->refcnt));
 469         for (p = &node->tree->node_hash[hfs_bnode_hash(node->this)];
 470              *p && *p != node; p = &(*p)->next_hash)
 471                 ;
 472         BUG_ON(!*p);
 473         *p = node->next_hash;
 474         node->tree->node_hash_cnt--;
 475 }
 476 
 477 /* Load a particular node out of a tree */
 478 struct hfs_bnode *hfs_bnode_find(struct hfs_btree *tree, u32 num)
 479 {
 480         struct hfs_bnode *node;
 481         struct hfs_bnode_desc *desc;
 482         int i, rec_off, off, next_off;
 483         int entry_size, key_size;
 484 
 485         spin_lock(&tree->hash_lock);
 486         node = hfs_bnode_findhash(tree, num);
 487         if (node) {
 488                 hfs_bnode_get(node);
 489                 spin_unlock(&tree->hash_lock);
 490                 wait_event(node->lock_wq,
 491                         !test_bit(HFS_BNODE_NEW, &node->flags));
 492                 if (test_bit(HFS_BNODE_ERROR, &node->flags))
 493                         goto node_error;
 494                 return node;
 495         }
 496         spin_unlock(&tree->hash_lock);
 497         node = __hfs_bnode_create(tree, num);
 498         if (!node)
 499                 return ERR_PTR(-ENOMEM);
 500         if (test_bit(HFS_BNODE_ERROR, &node->flags))
 501                 goto node_error;
 502         if (!test_bit(HFS_BNODE_NEW, &node->flags))
 503                 return node;
 504 
 505         desc = (struct hfs_bnode_desc *)(kmap(node->page[0]) +
 506                         node->page_offset);
 507         node->prev = be32_to_cpu(desc->prev);
 508         node->next = be32_to_cpu(desc->next);
 509         node->num_recs = be16_to_cpu(desc->num_recs);
 510         node->type = desc->type;
 511         node->height = desc->height;
 512         kunmap(node->page[0]);
 513 
 514         switch (node->type) {
 515         case HFS_NODE_HEADER:
 516         case HFS_NODE_MAP:
 517                 if (node->height != 0)
 518                         goto node_error;
 519                 break;
 520         case HFS_NODE_LEAF:
 521                 if (node->height != 1)
 522                         goto node_error;
 523                 break;
 524         case HFS_NODE_INDEX:
 525                 if (node->height <= 1 || node->height > tree->depth)
 526                         goto node_error;
 527                 break;
 528         default:
 529                 goto node_error;
 530         }
 531 
 532         rec_off = tree->node_size - 2;
 533         off = hfs_bnode_read_u16(node, rec_off);
 534         if (off != sizeof(struct hfs_bnode_desc))
 535                 goto node_error;
 536         for (i = 1; i <= node->num_recs; off = next_off, i++) {
 537                 rec_off -= 2;
 538                 next_off = hfs_bnode_read_u16(node, rec_off);
 539                 if (next_off <= off ||
 540                     next_off > tree->node_size ||
 541                     next_off & 1)
 542                         goto node_error;
 543                 entry_size = next_off - off;
 544                 if (node->type != HFS_NODE_INDEX &&
 545                     node->type != HFS_NODE_LEAF)
 546                         continue;
 547                 key_size = hfs_bnode_read_u16(node, off) + 2;
 548                 if (key_size >= entry_size || key_size & 1)
 549                         goto node_error;
 550         }
 551         clear_bit(HFS_BNODE_NEW, &node->flags);
 552         wake_up(&node->lock_wq);
 553         return node;
 554 
 555 node_error:
 556         set_bit(HFS_BNODE_ERROR, &node->flags);
 557         clear_bit(HFS_BNODE_NEW, &node->flags);
 558         wake_up(&node->lock_wq);
 559         hfs_bnode_put(node);
 560         return ERR_PTR(-EIO);
 561 }
 562 
 563 void hfs_bnode_free(struct hfs_bnode *node)
 564 {
 565         int i;
 566 
 567         for (i = 0; i < node->tree->pages_per_bnode; i++)
 568                 if (node->page[i])
 569                         put_page(node->page[i]);
 570         kfree(node);
 571 }
 572 
 573 struct hfs_bnode *hfs_bnode_create(struct hfs_btree *tree, u32 num)
 574 {
 575         struct hfs_bnode *node;
 576         struct page **pagep;
 577         int i;
 578 
 579         spin_lock(&tree->hash_lock);
 580         node = hfs_bnode_findhash(tree, num);
 581         spin_unlock(&tree->hash_lock);
 582         if (node) {
 583                 pr_crit("new node %u already hashed?\n", num);
 584                 WARN_ON(1);
 585                 return node;
 586         }
 587         node = __hfs_bnode_create(tree, num);
 588         if (!node)
 589                 return ERR_PTR(-ENOMEM);
 590         if (test_bit(HFS_BNODE_ERROR, &node->flags)) {
 591                 hfs_bnode_put(node);
 592                 return ERR_PTR(-EIO);
 593         }
 594 
 595         pagep = node->page;
 596         memset(kmap(*pagep) + node->page_offset, 0,
 597                min_t(int, PAGE_SIZE, tree->node_size));
 598         set_page_dirty(*pagep);
 599         kunmap(*pagep);
 600         for (i = 1; i < tree->pages_per_bnode; i++) {
 601                 memset(kmap(*++pagep), 0, PAGE_SIZE);
 602                 set_page_dirty(*pagep);
 603                 kunmap(*pagep);
 604         }
 605         clear_bit(HFS_BNODE_NEW, &node->flags);
 606         wake_up(&node->lock_wq);
 607 
 608         return node;
 609 }
 610 
 611 void hfs_bnode_get(struct hfs_bnode *node)
 612 {
 613         if (node) {
 614                 atomic_inc(&node->refcnt);
 615                 hfs_dbg(BNODE_REFS, "get_node(%d:%d): %d\n",
 616                         node->tree->cnid, node->this,
 617                         atomic_read(&node->refcnt));
 618         }
 619 }
 620 
 621 /* Dispose of resources used by a node */
 622 void hfs_bnode_put(struct hfs_bnode *node)
 623 {
 624         if (node) {
 625                 struct hfs_btree *tree = node->tree;
 626                 int i;
 627 
 628                 hfs_dbg(BNODE_REFS, "put_node(%d:%d): %d\n",
 629                         node->tree->cnid, node->this,
 630                         atomic_read(&node->refcnt));
 631                 BUG_ON(!atomic_read(&node->refcnt));
 632                 if (!atomic_dec_and_lock(&node->refcnt, &tree->hash_lock))
 633                         return;
 634                 for (i = 0; i < tree->pages_per_bnode; i++) {
 635                         if (!node->page[i])
 636                                 continue;
 637                         mark_page_accessed(node->page[i]);
 638                 }
 639 
 640                 if (test_bit(HFS_BNODE_DELETED, &node->flags)) {
 641                         hfs_bnode_unhash(node);
 642                         spin_unlock(&tree->hash_lock);
 643                         if (hfs_bnode_need_zeroout(tree))
 644                                 hfs_bnode_clear(node, 0, tree->node_size);
 645                         hfs_bmap_free(node);
 646                         hfs_bnode_free(node);
 647                         return;
 648                 }
 649                 spin_unlock(&tree->hash_lock);
 650         }
 651 }
 652 
 653 /*
 654  * Unused nodes have to be zeroed if this is the catalog tree and
 655  * a corresponding flag in the volume header is set.
 656  */
 657 bool hfs_bnode_need_zeroout(struct hfs_btree *tree)
 658 {
 659         struct super_block *sb = tree->inode->i_sb;
 660         struct hfsplus_sb_info *sbi = HFSPLUS_SB(sb);
 661         const u32 volume_attr = be32_to_cpu(sbi->s_vhdr->attributes);
 662 
 663         return tree->cnid == HFSPLUS_CAT_CNID &&
 664                 volume_attr & HFSPLUS_VOL_UNUSED_NODE_FIX;
 665 }

/* [<][>][^][v][top][bottom][index][help] */