root/fs/jfs/inode.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. jfs_iget
  2. jfs_commit_inode
  3. jfs_write_inode
  4. jfs_evict_inode
  5. jfs_dirty_inode
  6. jfs_get_block
  7. jfs_writepage
  8. jfs_writepages
  9. jfs_readpage
  10. jfs_readpages
  11. jfs_write_failed
  12. jfs_write_begin
  13. jfs_bmap
  14. jfs_direct_IO
  15. jfs_truncate_nolock
  16. jfs_truncate

   1 // SPDX-License-Identifier: GPL-2.0-or-later
   2 /*
   3  *   Copyright (C) International Business Machines Corp., 2000-2004
   4  *   Portions Copyright (C) Christoph Hellwig, 2001-2002
   5  */
   6 
   7 #include <linux/fs.h>
   8 #include <linux/mpage.h>
   9 #include <linux/buffer_head.h>
  10 #include <linux/pagemap.h>
  11 #include <linux/quotaops.h>
  12 #include <linux/uio.h>
  13 #include <linux/writeback.h>
  14 #include "jfs_incore.h"
  15 #include "jfs_inode.h"
  16 #include "jfs_filsys.h"
  17 #include "jfs_imap.h"
  18 #include "jfs_extent.h"
  19 #include "jfs_unicode.h"
  20 #include "jfs_debug.h"
  21 #include "jfs_dmap.h"
  22 
  23 
  24 struct inode *jfs_iget(struct super_block *sb, unsigned long ino)
  25 {
  26         struct inode *inode;
  27         int ret;
  28 
  29         inode = iget_locked(sb, ino);
  30         if (!inode)
  31                 return ERR_PTR(-ENOMEM);
  32         if (!(inode->i_state & I_NEW))
  33                 return inode;
  34 
  35         ret = diRead(inode);
  36         if (ret < 0) {
  37                 iget_failed(inode);
  38                 return ERR_PTR(ret);
  39         }
  40 
  41         if (S_ISREG(inode->i_mode)) {
  42                 inode->i_op = &jfs_file_inode_operations;
  43                 inode->i_fop = &jfs_file_operations;
  44                 inode->i_mapping->a_ops = &jfs_aops;
  45         } else if (S_ISDIR(inode->i_mode)) {
  46                 inode->i_op = &jfs_dir_inode_operations;
  47                 inode->i_fop = &jfs_dir_operations;
  48         } else if (S_ISLNK(inode->i_mode)) {
  49                 if (inode->i_size >= IDATASIZE) {
  50                         inode->i_op = &page_symlink_inode_operations;
  51                         inode_nohighmem(inode);
  52                         inode->i_mapping->a_ops = &jfs_aops;
  53                 } else {
  54                         inode->i_op = &jfs_fast_symlink_inode_operations;
  55                         inode->i_link = JFS_IP(inode)->i_inline;
  56                         /*
  57                          * The inline data should be null-terminated, but
  58                          * don't let on-disk corruption crash the kernel
  59                          */
  60                         inode->i_link[inode->i_size] = '\0';
  61                 }
  62         } else {
  63                 inode->i_op = &jfs_file_inode_operations;
  64                 init_special_inode(inode, inode->i_mode, inode->i_rdev);
  65         }
  66         unlock_new_inode(inode);
  67         return inode;
  68 }
  69 
  70 /*
  71  * Workhorse of both fsync & write_inode
  72  */
  73 int jfs_commit_inode(struct inode *inode, int wait)
  74 {
  75         int rc = 0;
  76         tid_t tid;
  77         static int noisy = 5;
  78 
  79         jfs_info("In jfs_commit_inode, inode = 0x%p", inode);
  80 
  81         /*
  82          * Don't commit if inode has been committed since last being
  83          * marked dirty, or if it has been deleted.
  84          */
  85         if (inode->i_nlink == 0 || !test_cflag(COMMIT_Dirty, inode))
  86                 return 0;
  87 
  88         if (isReadOnly(inode)) {
  89                 /* kernel allows writes to devices on read-only
  90                  * partitions and may think inode is dirty
  91                  */
  92                 if (!special_file(inode->i_mode) && noisy) {
  93                         jfs_err("jfs_commit_inode(0x%p) called on read-only volume",
  94                                 inode);
  95                         jfs_err("Is remount racy?");
  96                         noisy--;
  97                 }
  98                 return 0;
  99         }
 100 
 101         tid = txBegin(inode->i_sb, COMMIT_INODE);
 102         mutex_lock(&JFS_IP(inode)->commit_mutex);
 103 
 104         /*
 105          * Retest inode state after taking commit_mutex
 106          */
 107         if (inode->i_nlink && test_cflag(COMMIT_Dirty, inode))
 108                 rc = txCommit(tid, 1, &inode, wait ? COMMIT_SYNC : 0);
 109 
 110         txEnd(tid);
 111         mutex_unlock(&JFS_IP(inode)->commit_mutex);
 112         return rc;
 113 }
 114 
 115 int jfs_write_inode(struct inode *inode, struct writeback_control *wbc)
 116 {
 117         int wait = wbc->sync_mode == WB_SYNC_ALL;
 118 
 119         if (inode->i_nlink == 0)
 120                 return 0;
 121         /*
 122          * If COMMIT_DIRTY is not set, the inode isn't really dirty.
 123          * It has been committed since the last change, but was still
 124          * on the dirty inode list.
 125          */
 126         if (!test_cflag(COMMIT_Dirty, inode)) {
 127                 /* Make sure committed changes hit the disk */
 128                 jfs_flush_journal(JFS_SBI(inode->i_sb)->log, wait);
 129                 return 0;
 130         }
 131 
 132         if (jfs_commit_inode(inode, wait)) {
 133                 jfs_err("jfs_write_inode: jfs_commit_inode failed!");
 134                 return -EIO;
 135         } else
 136                 return 0;
 137 }
 138 
 139 void jfs_evict_inode(struct inode *inode)
 140 {
 141         struct jfs_inode_info *ji = JFS_IP(inode);
 142 
 143         jfs_info("In jfs_evict_inode, inode = 0x%p", inode);
 144 
 145         if (!inode->i_nlink && !is_bad_inode(inode)) {
 146                 dquot_initialize(inode);
 147 
 148                 if (JFS_IP(inode)->fileset == FILESYSTEM_I) {
 149                         truncate_inode_pages_final(&inode->i_data);
 150 
 151                         if (test_cflag(COMMIT_Freewmap, inode))
 152                                 jfs_free_zero_link(inode);
 153 
 154                         diFree(inode);
 155 
 156                         /*
 157                          * Free the inode from the quota allocation.
 158                          */
 159                         dquot_free_inode(inode);
 160                 }
 161         } else {
 162                 truncate_inode_pages_final(&inode->i_data);
 163         }
 164         clear_inode(inode);
 165         dquot_drop(inode);
 166 
 167         BUG_ON(!list_empty(&ji->anon_inode_list));
 168 
 169         spin_lock_irq(&ji->ag_lock);
 170         if (ji->active_ag != -1) {
 171                 struct bmap *bmap = JFS_SBI(inode->i_sb)->bmap;
 172                 atomic_dec(&bmap->db_active[ji->active_ag]);
 173                 ji->active_ag = -1;
 174         }
 175         spin_unlock_irq(&ji->ag_lock);
 176 }
 177 
 178 void jfs_dirty_inode(struct inode *inode, int flags)
 179 {
 180         static int noisy = 5;
 181 
 182         if (isReadOnly(inode)) {
 183                 if (!special_file(inode->i_mode) && noisy) {
 184                         /* kernel allows writes to devices on read-only
 185                          * partitions and may try to mark inode dirty
 186                          */
 187                         jfs_err("jfs_dirty_inode called on read-only volume");
 188                         jfs_err("Is remount racy?");
 189                         noisy--;
 190                 }
 191                 return;
 192         }
 193 
 194         set_cflag(COMMIT_Dirty, inode);
 195 }
 196 
 197 int jfs_get_block(struct inode *ip, sector_t lblock,
 198                   struct buffer_head *bh_result, int create)
 199 {
 200         s64 lblock64 = lblock;
 201         int rc = 0;
 202         xad_t xad;
 203         s64 xaddr;
 204         int xflag;
 205         s32 xlen = bh_result->b_size >> ip->i_blkbits;
 206 
 207         /*
 208          * Take appropriate lock on inode
 209          */
 210         if (create)
 211                 IWRITE_LOCK(ip, RDWRLOCK_NORMAL);
 212         else
 213                 IREAD_LOCK(ip, RDWRLOCK_NORMAL);
 214 
 215         if (((lblock64 << ip->i_sb->s_blocksize_bits) < ip->i_size) &&
 216             (!xtLookup(ip, lblock64, xlen, &xflag, &xaddr, &xlen, 0)) &&
 217             xaddr) {
 218                 if (xflag & XAD_NOTRECORDED) {
 219                         if (!create)
 220                                 /*
 221                                  * Allocated but not recorded, read treats
 222                                  * this as a hole
 223                                  */
 224                                 goto unlock;
 225 #ifdef _JFS_4K
 226                         XADoffset(&xad, lblock64);
 227                         XADlength(&xad, xlen);
 228                         XADaddress(&xad, xaddr);
 229 #else                           /* _JFS_4K */
 230                         /*
 231                          * As long as block size = 4K, this isn't a problem.
 232                          * We should mark the whole page not ABNR, but how
 233                          * will we know to mark the other blocks BH_New?
 234                          */
 235                         BUG();
 236 #endif                          /* _JFS_4K */
 237                         rc = extRecord(ip, &xad);
 238                         if (rc)
 239                                 goto unlock;
 240                         set_buffer_new(bh_result);
 241                 }
 242 
 243                 map_bh(bh_result, ip->i_sb, xaddr);
 244                 bh_result->b_size = xlen << ip->i_blkbits;
 245                 goto unlock;
 246         }
 247         if (!create)
 248                 goto unlock;
 249 
 250         /*
 251          * Allocate a new block
 252          */
 253 #ifdef _JFS_4K
 254         if ((rc = extHint(ip, lblock64 << ip->i_sb->s_blocksize_bits, &xad)))
 255                 goto unlock;
 256         rc = extAlloc(ip, xlen, lblock64, &xad, false);
 257         if (rc)
 258                 goto unlock;
 259 
 260         set_buffer_new(bh_result);
 261         map_bh(bh_result, ip->i_sb, addressXAD(&xad));
 262         bh_result->b_size = lengthXAD(&xad) << ip->i_blkbits;
 263 
 264 #else                           /* _JFS_4K */
 265         /*
 266          * We need to do whatever it takes to keep all but the last buffers
 267          * in 4K pages - see jfs_write.c
 268          */
 269         BUG();
 270 #endif                          /* _JFS_4K */
 271 
 272       unlock:
 273         /*
 274          * Release lock on inode
 275          */
 276         if (create)
 277                 IWRITE_UNLOCK(ip);
 278         else
 279                 IREAD_UNLOCK(ip);
 280         return rc;
 281 }
 282 
 283 static int jfs_writepage(struct page *page, struct writeback_control *wbc)
 284 {
 285         return block_write_full_page(page, jfs_get_block, wbc);
 286 }
 287 
 288 static int jfs_writepages(struct address_space *mapping,
 289                         struct writeback_control *wbc)
 290 {
 291         return mpage_writepages(mapping, wbc, jfs_get_block);
 292 }
 293 
 294 static int jfs_readpage(struct file *file, struct page *page)
 295 {
 296         return mpage_readpage(page, jfs_get_block);
 297 }
 298 
 299 static int jfs_readpages(struct file *file, struct address_space *mapping,
 300                 struct list_head *pages, unsigned nr_pages)
 301 {
 302         return mpage_readpages(mapping, pages, nr_pages, jfs_get_block);
 303 }
 304 
 305 static void jfs_write_failed(struct address_space *mapping, loff_t to)
 306 {
 307         struct inode *inode = mapping->host;
 308 
 309         if (to > inode->i_size) {
 310                 truncate_pagecache(inode, inode->i_size);
 311                 jfs_truncate(inode);
 312         }
 313 }
 314 
 315 static int jfs_write_begin(struct file *file, struct address_space *mapping,
 316                                 loff_t pos, unsigned len, unsigned flags,
 317                                 struct page **pagep, void **fsdata)
 318 {
 319         int ret;
 320 
 321         ret = nobh_write_begin(mapping, pos, len, flags, pagep, fsdata,
 322                                 jfs_get_block);
 323         if (unlikely(ret))
 324                 jfs_write_failed(mapping, pos + len);
 325 
 326         return ret;
 327 }
 328 
 329 static sector_t jfs_bmap(struct address_space *mapping, sector_t block)
 330 {
 331         return generic_block_bmap(mapping, block, jfs_get_block);
 332 }
 333 
 334 static ssize_t jfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
 335 {
 336         struct file *file = iocb->ki_filp;
 337         struct address_space *mapping = file->f_mapping;
 338         struct inode *inode = file->f_mapping->host;
 339         size_t count = iov_iter_count(iter);
 340         ssize_t ret;
 341 
 342         ret = blockdev_direct_IO(iocb, inode, iter, jfs_get_block);
 343 
 344         /*
 345          * In case of error extending write may have instantiated a few
 346          * blocks outside i_size. Trim these off again.
 347          */
 348         if (unlikely(iov_iter_rw(iter) == WRITE && ret < 0)) {
 349                 loff_t isize = i_size_read(inode);
 350                 loff_t end = iocb->ki_pos + count;
 351 
 352                 if (end > isize)
 353                         jfs_write_failed(mapping, end);
 354         }
 355 
 356         return ret;
 357 }
 358 
 359 const struct address_space_operations jfs_aops = {
 360         .readpage       = jfs_readpage,
 361         .readpages      = jfs_readpages,
 362         .writepage      = jfs_writepage,
 363         .writepages     = jfs_writepages,
 364         .write_begin    = jfs_write_begin,
 365         .write_end      = nobh_write_end,
 366         .bmap           = jfs_bmap,
 367         .direct_IO      = jfs_direct_IO,
 368 };
 369 
 370 /*
 371  * Guts of jfs_truncate.  Called with locks already held.  Can be called
 372  * with directory for truncating directory index table.
 373  */
 374 void jfs_truncate_nolock(struct inode *ip, loff_t length)
 375 {
 376         loff_t newsize;
 377         tid_t tid;
 378 
 379         ASSERT(length >= 0);
 380 
 381         if (test_cflag(COMMIT_Nolink, ip)) {
 382                 xtTruncate(0, ip, length, COMMIT_WMAP);
 383                 return;
 384         }
 385 
 386         do {
 387                 tid = txBegin(ip->i_sb, 0);
 388 
 389                 /*
 390                  * The commit_mutex cannot be taken before txBegin.
 391                  * txBegin may block and there is a chance the inode
 392                  * could be marked dirty and need to be committed
 393                  * before txBegin unblocks
 394                  */
 395                 mutex_lock(&JFS_IP(ip)->commit_mutex);
 396 
 397                 newsize = xtTruncate(tid, ip, length,
 398                                      COMMIT_TRUNCATE | COMMIT_PWMAP);
 399                 if (newsize < 0) {
 400                         txEnd(tid);
 401                         mutex_unlock(&JFS_IP(ip)->commit_mutex);
 402                         break;
 403                 }
 404 
 405                 ip->i_mtime = ip->i_ctime = current_time(ip);
 406                 mark_inode_dirty(ip);
 407 
 408                 txCommit(tid, 1, &ip, 0);
 409                 txEnd(tid);
 410                 mutex_unlock(&JFS_IP(ip)->commit_mutex);
 411         } while (newsize > length);     /* Truncate isn't always atomic */
 412 }
 413 
 414 void jfs_truncate(struct inode *ip)
 415 {
 416         jfs_info("jfs_truncate: size = 0x%lx", (ulong) ip->i_size);
 417 
 418         nobh_truncate_page(ip->i_mapping, ip->i_size, jfs_get_block);
 419 
 420         IWRITE_LOCK(ip, RDWRLOCK_NORMAL);
 421         jfs_truncate_nolock(ip, ip->i_size);
 422         IWRITE_UNLOCK(ip);
 423 }

/* [<][>][^][v][top][bottom][index][help] */