Lines Matching defs:inode

56  * With the new dcache, the pathname is stored at each inode, at least as
57 * long as the refcount of the inode is positive. As a side effect, the
58 * size of the dcache depends on the inode cache and thus is dynamic.
258 static int check_acl(struct inode *inode, int mask)
264 acl = get_cached_acl_rcu(inode, ACL_TYPE_ACCESS);
270 return posix_acl_permission(inode, acl, mask & ~MAY_NOT_BLOCK);
273 acl = get_acl(inode, ACL_TYPE_ACCESS);
277 int error = posix_acl_permission(inode, acl, mask);
289 static int acl_permission_check(struct inode *inode, int mask)
291 unsigned int mode = inode->i_mode;
293 if (likely(uid_eq(current_fsuid(), inode->i_uid)))
296 if (IS_POSIXACL(inode) && (mode & S_IRWXG)) {
297 int error = check_acl(inode, mask);
302 if (in_group_p(inode->i_gid))
316 * @inode: inode to check access rights for
328 int generic_permission(struct inode *inode, int mask)
335 ret = acl_permission_check(inode, mask);
339 if (S_ISDIR(inode->i_mode)) {
341 if (capable_wrt_inode_uidgid(inode, CAP_DAC_OVERRIDE))
344 if (capable_wrt_inode_uidgid(inode,
354 if (!(mask & MAY_EXEC) || (inode->i_mode & S_IXUGO))
355 if (capable_wrt_inode_uidgid(inode, CAP_DAC_OVERRIDE))
363 if (capable_wrt_inode_uidgid(inode, CAP_DAC_READ_SEARCH))
372 * even looking at the inode->i_op values. So we keep a cache
373 * flag in inode->i_opflags, that says "this has not special
376 static inline int do_inode_permission(struct inode *inode, int mask)
378 if (unlikely(!(inode->i_opflags & IOP_FASTPERM))) {
379 if (likely(inode->i_op->permission))
380 return inode->i_op->permission(inode, mask);
382 /* This gets set once for the inode lifetime */
383 spin_lock(&inode->i_lock);
384 inode->i_opflags |= IOP_FASTPERM;
385 spin_unlock(&inode->i_lock);
387 return generic_permission(inode, mask);
391 * __inode_permission - Check for access rights to a given inode
392 * @inode: Inode to check permission on
395 * Check for read/write/execute permissions on an inode.
402 int __inode_permission(struct inode *inode, int mask)
410 if (IS_IMMUTABLE(inode))
414 retval = do_inode_permission(inode, mask);
418 retval = devcgroup_inode_permission(inode, mask);
422 return security_inode_permission(inode, mask);
428 * @sb: Superblock of inode to check permission on
429 * @inode: Inode to check permission on
432 * Separate out file-system wide checks from inode-specific permission checks.
434 static int sb_permission(struct super_block *sb, struct inode *inode, int mask)
437 umode_t mode = inode->i_mode;
448 * inode_permission - Check for access rights to a given inode
449 * @inode: Inode to check permission on
452 * Check for read/write/execute permissions on an inode. We use fs[ug]id for
458 int inode_permission(struct inode *inode, int mask)
462 retval = sb_permission(inode->i_sb, inode, mask);
465 return __inode_permission(inode, mask);
499 struct inode *inode; /* path.dentry.d_inode */
585 BUG_ON(nd->inode != parent->d_inode);
730 nd->inode = nd->path.dentry->d_inode;
748 struct inode *inode = link->dentry->d_inode;
749 if (inode->i_op->put_link)
750 inode->i_op->put_link(link->dentry, nd, cookie);
775 const struct inode *inode;
776 const struct inode *parent;
782 inode = link->dentry->d_inode;
783 if (uid_eq(current_cred()->fsuid, inode->i_uid))
792 if (uid_eq(parent->i_uid, inode->i_uid))
803 * @inode: the source inode to hardlink from
806 * - inode is not a regular file
807 * - inode is setuid
808 * - inode is setgid and group-exec
813 static bool safe_hardlink_source(struct inode *inode)
815 umode_t mode = inode->i_mode;
830 if (inode_permission(inode, MAY_READ | MAY_WRITE))
842 * - fsuid does not match inode
851 struct inode *inode;
857 inode = link->dentry->d_inode;
859 /* Source inode owner (or CAP_FOWNER) can hardlink all they like,
862 if (uid_eq(cred->fsuid, inode->i_uid) || safe_hardlink_source(inode) ||
918 nd->inode = nd->path.dentry->d_inode;
1154 struct inode **inode)
1183 * Update the inode too. We don't need to re-check the
1187 *inode = path->dentry->d_inode;
1195 struct inode *inode = nd->inode;
1209 inode = parent->d_inode;
1221 inode = nd->path.dentry->d_inode;
1231 inode = nd->path.dentry->d_inode;
1236 nd->inode = inode;
1339 nd->inode = nd->path.dentry->d_inode;
1390 static struct dentry *lookup_real(struct inode *dir, struct dentry *dentry,
1428 struct path *path, struct inode **inode)
1449 * This sequence count validates that the inode matches
1452 *inode = dentry->d_inode;
1484 if (likely(__follow_mount_rcu(nd, path, inode)))
1521 *inode = path->dentry->d_inode;
1535 BUG_ON(nd->inode != parent->d_inode);
1557 int err = inode_permission(nd->inode, MAY_EXEC|MAY_NOT_BLOCK);
1563 return inode_permission(nd->inode, MAY_EXEC);
1592 * to do this check without having to look at inode->i_op,
1604 struct inode *inode;
1613 err = lookup_fast(nd, path, &inode);
1625 inode = path->dentry->d_inode;
1636 BUG_ON(inode != path->dentry->d_inode);
1640 nd->inode = inode;
1903 struct inode *inode = root->d_inode;
1907 retval = inode_permission(inode, MAY_EXEC);
1912 nd->inode = inode;
1979 nd->inode = nd->path.dentry->d_inode;
2434 * in the inode in this situation, and ESTALE errors can be a problem. We
2455 int __check_sticky(struct inode *dir, struct inode *inode)
2459 if (uid_eq(inode->i_uid, fsuid))
2463 return !capable_wrt_inode_uidgid(inode, CAP_FOWNER);
2486 static int may_delete(struct inode *dir, struct dentry *victim, bool isdir)
2488 struct inode *inode = victim->d_inode;
2493 BUG_ON(!inode);
2504 if (check_sticky(dir, inode) || IS_APPEND(inode) ||
2505 IS_IMMUTABLE(inode) || IS_SWAPFILE(inode))
2529 static inline int may_create(struct inode *dir, struct dentry *child)
2583 int vfs_create(struct inode *dir, struct dentry *dentry, umode_t mode,
2607 struct inode *inode = dentry->d_inode;
2614 if (!inode)
2617 switch (inode->i_mode & S_IFMT) {
2635 error = inode_permission(inode, acc_mode);
2642 if (IS_APPEND(inode)) {
2650 if (flag & O_NOATIME && !inode_owner_or_capable(inode))
2659 struct inode *inode = path->dentry->d_inode;
2660 int error = get_write_access(inode);
2674 put_write_access(inode);
2717 struct inode *dir = nd->path.dentry->d_inode;
2820 * We didn't have the inode before the open, so check open permission
2877 struct inode *dir_inode = dir->d_inode;
2951 struct inode *inode;
2973 error = lookup_fast(nd, path, &inode);
2980 BUG_ON(nd->inode != dir->d_inode);
3063 inode = path->dentry->d_inode;
3069 inode = path->dentry->d_inode;
3080 BUG_ON(inode != path->dentry->d_inode);
3092 nd->inode = inode;
3169 nd->inode = dir->d_inode;
3187 struct inode *dir;
3196 error = inode_permission(nd->inode, MAY_WRITE | MAY_EXEC);
3218 /* Don't check for other permissions, the inode was just created */
3230 struct inode *inode = file_inode(file);
3231 spin_lock(&inode->i_lock);
3232 inode->i_state |= I_LINKABLE;
3233 spin_unlock(&inode->i_lock);
3453 int vfs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t dev)
3545 int vfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
3626 int vfs_rmdir(struct inode *dir, struct dentry *dentry)
3731 * @delegated_inode: returns victim inode, if the inode is delegated.
3736 * return a reference to the inode in delegated_inode. The caller
3737 * should then break the delegation on that inode and retry. Because
3745 int vfs_unlink(struct inode *dir, struct dentry *dentry, struct inode **delegated_inode)
3747 struct inode *target = dentry->d_inode;
3797 struct inode *inode = NULL;
3798 struct inode *delegated_inode = NULL;
3821 inode = dentry->d_inode;
3824 ihold(inode);
3833 if (inode)
3834 iput(inode); /* truncate the inode here */
3835 inode = NULL;
3847 inode = NULL;
3878 int vfs_symlink(struct inode *dir, struct dentry *dentry, const char *oldname)
3940 * @delegated_inode: returns inode needing a delegation break
3946 * inode in delegated_inode. The caller should then break the delegation
3954 int vfs_link(struct dentry *old_dentry, struct inode *dir, struct dentry *new_dentry, struct inode **delegated_inode)
3956 struct inode *inode = old_dentry->d_inode;
3960 if (!inode)
3967 if (dir->i_sb != inode->i_sb)
3973 if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
3977 if (S_ISDIR(inode->i_mode))
3984 mutex_lock(&inode->i_mutex);
3986 if (inode->i_nlink == 0 && !(inode->i_state & I_LINKABLE))
3988 else if (max_links && inode->i_nlink >= max_links)
3991 error = try_break_deleg(inode, delegated_inode);
3996 if (!error && (inode->i_state & I_LINKABLE)) {
3997 spin_lock(&inode->i_lock);
3998 inode->i_state &= ~I_LINKABLE;
3999 spin_unlock(&inode->i_lock);
4001 mutex_unlock(&inode->i_mutex);
4003 fsnotify_link(dir, inode, new_dentry);
4022 struct inode *delegated_inode = NULL;
4093 * @delegated_inode: returns an inode needing a delegation break
4100 * reference to the inode in delegated_inode. The caller should then
4136 int vfs_rename(struct inode *old_dir, struct dentry *old_dentry,
4137 struct inode *new_dir, struct dentry *new_dentry,
4138 struct inode **delegated_inode, unsigned int flags)
4143 struct inode *source = old_dentry->d_inode;
4144 struct inode *target = new_dentry->d_inode;
4277 struct inode *delegated_inode = NULL;
4425 int vfs_whiteout(struct inode *dir, struct dentry *dentry)
4458 * using) it for any given inode is up to filesystem.
4527 int __page_symlink(struct inode *inode, const char *symname, int len, int nofs)
4529 struct address_space *mapping = inode->i_mapping;
4555 mark_inode_dirty(inode);
4562 int page_symlink(struct inode *inode, const char *symname, int len)
4564 return __page_symlink(inode, symname, len,
4565 !(mapping_gfp_mask(inode->i_mapping) & __GFP_FS));