1/*
2 * Copyright (C) Sistina Software, Inc.  1997-2003 All rights reserved.
3 * Copyright (C) 2004-2006 Red Hat, Inc.  All rights reserved.
4 *
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
7 * of the GNU General Public License version 2.
8 */
9
10#include <linux/slab.h>
11#include <linux/spinlock.h>
12#include <linux/completion.h>
13#include <linux/buffer_head.h>
14#include <linux/pagemap.h>
15#include <linux/uio.h>
16#include <linux/blkdev.h>
17#include <linux/mm.h>
18#include <linux/mount.h>
19#include <linux/fs.h>
20#include <linux/gfs2_ondisk.h>
21#include <linux/falloc.h>
22#include <linux/swap.h>
23#include <linux/crc32.h>
24#include <linux/writeback.h>
25#include <asm/uaccess.h>
26#include <linux/dlm.h>
27#include <linux/dlm_plock.h>
28#include <linux/delay.h>
29
30#include "gfs2.h"
31#include "incore.h"
32#include "bmap.h"
33#include "dir.h"
34#include "glock.h"
35#include "glops.h"
36#include "inode.h"
37#include "log.h"
38#include "meta_io.h"
39#include "quota.h"
40#include "rgrp.h"
41#include "trans.h"
42#include "util.h"
43
44/**
45 * gfs2_llseek - seek to a location in a file
46 * @file: the file
47 * @offset: the offset
48 * @whence: Where to seek from (SEEK_SET, SEEK_CUR, or SEEK_END)
49 *
50 * SEEK_END requires the glock for the file because it references the
51 * file's size.
52 *
53 * Returns: The new offset, or errno
54 */
55
56static loff_t gfs2_llseek(struct file *file, loff_t offset, int whence)
57{
58	struct gfs2_inode *ip = GFS2_I(file->f_mapping->host);
59	struct gfs2_holder i_gh;
60	loff_t error;
61
62	switch (whence) {
63	case SEEK_END: /* These reference inode->i_size */
64	case SEEK_DATA:
65	case SEEK_HOLE:
66		error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY,
67					   &i_gh);
68		if (!error) {
69			error = generic_file_llseek(file, offset, whence);
70			gfs2_glock_dq_uninit(&i_gh);
71		}
72		break;
73	case SEEK_CUR:
74	case SEEK_SET:
75		error = generic_file_llseek(file, offset, whence);
76		break;
77	default:
78		error = -EINVAL;
79	}
80
81	return error;
82}
83
84/**
85 * gfs2_readdir - Iterator for a directory
86 * @file: The directory to read from
87 * @ctx: What to feed directory entries to
88 *
89 * Returns: errno
90 */
91
92static int gfs2_readdir(struct file *file, struct dir_context *ctx)
93{
94	struct inode *dir = file->f_mapping->host;
95	struct gfs2_inode *dip = GFS2_I(dir);
96	struct gfs2_holder d_gh;
97	int error;
98
99	error = gfs2_glock_nq_init(dip->i_gl, LM_ST_SHARED, 0, &d_gh);
100	if (error)
101		return error;
102
103	error = gfs2_dir_read(dir, ctx, &file->f_ra);
104
105	gfs2_glock_dq_uninit(&d_gh);
106
107	return error;
108}
109
110/**
111 * fsflags_cvt
112 * @table: A table of 32 u32 flags
113 * @val: a 32 bit value to convert
114 *
115 * This function can be used to convert between fsflags values and
116 * GFS2's own flags values.
117 *
118 * Returns: the converted flags
119 */
120static u32 fsflags_cvt(const u32 *table, u32 val)
121{
122	u32 res = 0;
123	while(val) {
124		if (val & 1)
125			res |= *table;
126		table++;
127		val >>= 1;
128	}
129	return res;
130}
131
132static const u32 fsflags_to_gfs2[32] = {
133	[3] = GFS2_DIF_SYNC,
134	[4] = GFS2_DIF_IMMUTABLE,
135	[5] = GFS2_DIF_APPENDONLY,
136	[7] = GFS2_DIF_NOATIME,
137	[12] = GFS2_DIF_EXHASH,
138	[14] = GFS2_DIF_INHERIT_JDATA,
139	[17] = GFS2_DIF_TOPDIR,
140};
141
142static const u32 gfs2_to_fsflags[32] = {
143	[gfs2fl_Sync] = FS_SYNC_FL,
144	[gfs2fl_Immutable] = FS_IMMUTABLE_FL,
145	[gfs2fl_AppendOnly] = FS_APPEND_FL,
146	[gfs2fl_NoAtime] = FS_NOATIME_FL,
147	[gfs2fl_ExHash] = FS_INDEX_FL,
148	[gfs2fl_TopLevel] = FS_TOPDIR_FL,
149	[gfs2fl_InheritJdata] = FS_JOURNAL_DATA_FL,
150};
151
152static int gfs2_get_flags(struct file *filp, u32 __user *ptr)
153{
154	struct inode *inode = file_inode(filp);
155	struct gfs2_inode *ip = GFS2_I(inode);
156	struct gfs2_holder gh;
157	int error;
158	u32 fsflags;
159
160	gfs2_holder_init(ip->i_gl, LM_ST_SHARED, 0, &gh);
161	error = gfs2_glock_nq(&gh);
162	if (error)
163		return error;
164
165	fsflags = fsflags_cvt(gfs2_to_fsflags, ip->i_diskflags);
166	if (!S_ISDIR(inode->i_mode) && ip->i_diskflags & GFS2_DIF_JDATA)
167		fsflags |= FS_JOURNAL_DATA_FL;
168	if (put_user(fsflags, ptr))
169		error = -EFAULT;
170
171	gfs2_glock_dq(&gh);
172	gfs2_holder_uninit(&gh);
173	return error;
174}
175
176void gfs2_set_inode_flags(struct inode *inode)
177{
178	struct gfs2_inode *ip = GFS2_I(inode);
179	unsigned int flags = inode->i_flags;
180
181	flags &= ~(S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC|S_NOSEC);
182	if ((ip->i_eattr == 0) && !is_sxid(inode->i_mode))
183		inode->i_flags |= S_NOSEC;
184	if (ip->i_diskflags & GFS2_DIF_IMMUTABLE)
185		flags |= S_IMMUTABLE;
186	if (ip->i_diskflags & GFS2_DIF_APPENDONLY)
187		flags |= S_APPEND;
188	if (ip->i_diskflags & GFS2_DIF_NOATIME)
189		flags |= S_NOATIME;
190	if (ip->i_diskflags & GFS2_DIF_SYNC)
191		flags |= S_SYNC;
192	inode->i_flags = flags;
193}
194
195/* Flags that can be set by user space */
196#define GFS2_FLAGS_USER_SET (GFS2_DIF_JDATA|			\
197			     GFS2_DIF_IMMUTABLE|		\
198			     GFS2_DIF_APPENDONLY|		\
199			     GFS2_DIF_NOATIME|			\
200			     GFS2_DIF_SYNC|			\
201			     GFS2_DIF_SYSTEM|			\
202			     GFS2_DIF_TOPDIR|			\
203			     GFS2_DIF_INHERIT_JDATA)
204
205/**
206 * do_gfs2_set_flags - set flags on an inode
207 * @filp: file pointer
208 * @reqflags: The flags to set
209 * @mask: Indicates which flags are valid
210 *
211 */
212static int do_gfs2_set_flags(struct file *filp, u32 reqflags, u32 mask)
213{
214	struct inode *inode = file_inode(filp);
215	struct gfs2_inode *ip = GFS2_I(inode);
216	struct gfs2_sbd *sdp = GFS2_SB(inode);
217	struct buffer_head *bh;
218	struct gfs2_holder gh;
219	int error;
220	u32 new_flags, flags;
221
222	error = mnt_want_write_file(filp);
223	if (error)
224		return error;
225
226	error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh);
227	if (error)
228		goto out_drop_write;
229
230	error = -EACCES;
231	if (!inode_owner_or_capable(inode))
232		goto out;
233
234	error = 0;
235	flags = ip->i_diskflags;
236	new_flags = (flags & ~mask) | (reqflags & mask);
237	if ((new_flags ^ flags) == 0)
238		goto out;
239
240	error = -EINVAL;
241	if ((new_flags ^ flags) & ~GFS2_FLAGS_USER_SET)
242		goto out;
243
244	error = -EPERM;
245	if (IS_IMMUTABLE(inode) && (new_flags & GFS2_DIF_IMMUTABLE))
246		goto out;
247	if (IS_APPEND(inode) && (new_flags & GFS2_DIF_APPENDONLY))
248		goto out;
249	if (((new_flags ^ flags) & GFS2_DIF_IMMUTABLE) &&
250	    !capable(CAP_LINUX_IMMUTABLE))
251		goto out;
252	if (!IS_IMMUTABLE(inode)) {
253		error = gfs2_permission(inode, MAY_WRITE);
254		if (error)
255			goto out;
256	}
257	if ((flags ^ new_flags) & GFS2_DIF_JDATA) {
258		if (flags & GFS2_DIF_JDATA)
259			gfs2_log_flush(sdp, ip->i_gl, NORMAL_FLUSH);
260		error = filemap_fdatawrite(inode->i_mapping);
261		if (error)
262			goto out;
263		error = filemap_fdatawait(inode->i_mapping);
264		if (error)
265			goto out;
266	}
267	error = gfs2_trans_begin(sdp, RES_DINODE, 0);
268	if (error)
269		goto out;
270	error = gfs2_meta_inode_buffer(ip, &bh);
271	if (error)
272		goto out_trans_end;
273	gfs2_trans_add_meta(ip->i_gl, bh);
274	ip->i_diskflags = new_flags;
275	gfs2_dinode_out(ip, bh->b_data);
276	brelse(bh);
277	gfs2_set_inode_flags(inode);
278	gfs2_set_aops(inode);
279out_trans_end:
280	gfs2_trans_end(sdp);
281out:
282	gfs2_glock_dq_uninit(&gh);
283out_drop_write:
284	mnt_drop_write_file(filp);
285	return error;
286}
287
288static int gfs2_set_flags(struct file *filp, u32 __user *ptr)
289{
290	struct inode *inode = file_inode(filp);
291	u32 fsflags, gfsflags;
292
293	if (get_user(fsflags, ptr))
294		return -EFAULT;
295
296	gfsflags = fsflags_cvt(fsflags_to_gfs2, fsflags);
297	if (!S_ISDIR(inode->i_mode)) {
298		gfsflags &= ~GFS2_DIF_TOPDIR;
299		if (gfsflags & GFS2_DIF_INHERIT_JDATA)
300			gfsflags ^= (GFS2_DIF_JDATA | GFS2_DIF_INHERIT_JDATA);
301		return do_gfs2_set_flags(filp, gfsflags, ~0);
302	}
303	return do_gfs2_set_flags(filp, gfsflags, ~GFS2_DIF_JDATA);
304}
305
306static long gfs2_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
307{
308	switch(cmd) {
309	case FS_IOC_GETFLAGS:
310		return gfs2_get_flags(filp, (u32 __user *)arg);
311	case FS_IOC_SETFLAGS:
312		return gfs2_set_flags(filp, (u32 __user *)arg);
313	case FITRIM:
314		return gfs2_fitrim(filp, (void __user *)arg);
315	}
316	return -ENOTTY;
317}
318
319/**
320 * gfs2_size_hint - Give a hint to the size of a write request
321 * @filep: The struct file
322 * @offset: The file offset of the write
323 * @size: The length of the write
324 *
325 * When we are about to do a write, this function records the total
326 * write size in order to provide a suitable hint to the lower layers
327 * about how many blocks will be required.
328 *
329 */
330
331static void gfs2_size_hint(struct file *filep, loff_t offset, size_t size)
332{
333	struct inode *inode = file_inode(filep);
334	struct gfs2_sbd *sdp = GFS2_SB(inode);
335	struct gfs2_inode *ip = GFS2_I(inode);
336	size_t blks = (size + sdp->sd_sb.sb_bsize - 1) >> sdp->sd_sb.sb_bsize_shift;
337	int hint = min_t(size_t, INT_MAX, blks);
338
339	if (hint > atomic_read(&ip->i_res->rs_sizehint))
340		atomic_set(&ip->i_res->rs_sizehint, hint);
341}
342
343/**
344 * gfs2_allocate_page_backing - Use bmap to allocate blocks
345 * @page: The (locked) page to allocate backing for
346 *
347 * We try to allocate all the blocks required for the page in
348 * one go. This might fail for various reasons, so we keep
349 * trying until all the blocks to back this page are allocated.
350 * If some of the blocks are already allocated, thats ok too.
351 */
352
353static int gfs2_allocate_page_backing(struct page *page)
354{
355	struct inode *inode = page->mapping->host;
356	struct buffer_head bh;
357	unsigned long size = PAGE_CACHE_SIZE;
358	u64 lblock = page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
359
360	do {
361		bh.b_state = 0;
362		bh.b_size = size;
363		gfs2_block_map(inode, lblock, &bh, 1);
364		if (!buffer_mapped(&bh))
365			return -EIO;
366		size -= bh.b_size;
367		lblock += (bh.b_size >> inode->i_blkbits);
368	} while(size > 0);
369	return 0;
370}
371
372/**
373 * gfs2_page_mkwrite - Make a shared, mmap()ed, page writable
374 * @vma: The virtual memory area
375 * @vmf: The virtual memory fault containing the page to become writable
376 *
377 * When the page becomes writable, we need to ensure that we have
378 * blocks allocated on disk to back that page.
379 */
380
381static int gfs2_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
382{
383	struct page *page = vmf->page;
384	struct inode *inode = file_inode(vma->vm_file);
385	struct gfs2_inode *ip = GFS2_I(inode);
386	struct gfs2_sbd *sdp = GFS2_SB(inode);
387	struct gfs2_alloc_parms ap = { .aflags = 0, };
388	unsigned long last_index;
389	u64 pos = page->index << PAGE_CACHE_SHIFT;
390	unsigned int data_blocks, ind_blocks, rblocks;
391	struct gfs2_holder gh;
392	loff_t size;
393	int ret;
394
395	sb_start_pagefault(inode->i_sb);
396
397	/* Update file times before taking page lock */
398	file_update_time(vma->vm_file);
399
400	ret = get_write_access(inode);
401	if (ret)
402		goto out;
403
404	ret = gfs2_rs_alloc(ip);
405	if (ret)
406		goto out_write_access;
407
408	gfs2_size_hint(vma->vm_file, pos, PAGE_CACHE_SIZE);
409
410	gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh);
411	ret = gfs2_glock_nq(&gh);
412	if (ret)
413		goto out_uninit;
414
415	set_bit(GLF_DIRTY, &ip->i_gl->gl_flags);
416	set_bit(GIF_SW_PAGED, &ip->i_flags);
417
418	if (!gfs2_write_alloc_required(ip, pos, PAGE_CACHE_SIZE)) {
419		lock_page(page);
420		if (!PageUptodate(page) || page->mapping != inode->i_mapping) {
421			ret = -EAGAIN;
422			unlock_page(page);
423		}
424		goto out_unlock;
425	}
426
427	ret = gfs2_rindex_update(sdp);
428	if (ret)
429		goto out_unlock;
430
431	gfs2_write_calc_reserv(ip, PAGE_CACHE_SIZE, &data_blocks, &ind_blocks);
432	ap.target = data_blocks + ind_blocks;
433	ret = gfs2_quota_lock_check(ip, &ap);
434	if (ret)
435		goto out_unlock;
436	ret = gfs2_inplace_reserve(ip, &ap);
437	if (ret)
438		goto out_quota_unlock;
439
440	rblocks = RES_DINODE + ind_blocks;
441	if (gfs2_is_jdata(ip))
442		rblocks += data_blocks ? data_blocks : 1;
443	if (ind_blocks || data_blocks) {
444		rblocks += RES_STATFS + RES_QUOTA;
445		rblocks += gfs2_rg_blocks(ip, data_blocks + ind_blocks);
446	}
447	ret = gfs2_trans_begin(sdp, rblocks, 0);
448	if (ret)
449		goto out_trans_fail;
450
451	lock_page(page);
452	ret = -EINVAL;
453	size = i_size_read(inode);
454	last_index = (size - 1) >> PAGE_CACHE_SHIFT;
455	/* Check page index against inode size */
456	if (size == 0 || (page->index > last_index))
457		goto out_trans_end;
458
459	ret = -EAGAIN;
460	/* If truncated, we must retry the operation, we may have raced
461	 * with the glock demotion code.
462	 */
463	if (!PageUptodate(page) || page->mapping != inode->i_mapping)
464		goto out_trans_end;
465
466	/* Unstuff, if required, and allocate backing blocks for page */
467	ret = 0;
468	if (gfs2_is_stuffed(ip))
469		ret = gfs2_unstuff_dinode(ip, page);
470	if (ret == 0)
471		ret = gfs2_allocate_page_backing(page);
472
473out_trans_end:
474	if (ret)
475		unlock_page(page);
476	gfs2_trans_end(sdp);
477out_trans_fail:
478	gfs2_inplace_release(ip);
479out_quota_unlock:
480	gfs2_quota_unlock(ip);
481out_unlock:
482	gfs2_glock_dq(&gh);
483out_uninit:
484	gfs2_holder_uninit(&gh);
485	if (ret == 0) {
486		set_page_dirty(page);
487		wait_for_stable_page(page);
488	}
489out_write_access:
490	put_write_access(inode);
491out:
492	sb_end_pagefault(inode->i_sb);
493	return block_page_mkwrite_return(ret);
494}
495
496static const struct vm_operations_struct gfs2_vm_ops = {
497	.fault = filemap_fault,
498	.map_pages = filemap_map_pages,
499	.page_mkwrite = gfs2_page_mkwrite,
500};
501
502/**
503 * gfs2_mmap -
504 * @file: The file to map
505 * @vma: The VMA which described the mapping
506 *
507 * There is no need to get a lock here unless we should be updating
508 * atime. We ignore any locking errors since the only consequence is
509 * a missed atime update (which will just be deferred until later).
510 *
511 * Returns: 0
512 */
513
514static int gfs2_mmap(struct file *file, struct vm_area_struct *vma)
515{
516	struct gfs2_inode *ip = GFS2_I(file->f_mapping->host);
517
518	if (!(file->f_flags & O_NOATIME) &&
519	    !IS_NOATIME(&ip->i_inode)) {
520		struct gfs2_holder i_gh;
521		int error;
522
523		error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY,
524					   &i_gh);
525		if (error)
526			return error;
527		/* grab lock to update inode */
528		gfs2_glock_dq_uninit(&i_gh);
529		file_accessed(file);
530	}
531	vma->vm_ops = &gfs2_vm_ops;
532
533	return 0;
534}
535
536/**
537 * gfs2_open_common - This is common to open and atomic_open
538 * @inode: The inode being opened
539 * @file: The file being opened
540 *
541 * This maybe called under a glock or not depending upon how it has
542 * been called. We must always be called under a glock for regular
543 * files, however. For other file types, it does not matter whether
544 * we hold the glock or not.
545 *
546 * Returns: Error code or 0 for success
547 */
548
549int gfs2_open_common(struct inode *inode, struct file *file)
550{
551	struct gfs2_file *fp;
552	int ret;
553
554	if (S_ISREG(inode->i_mode)) {
555		ret = generic_file_open(inode, file);
556		if (ret)
557			return ret;
558	}
559
560	fp = kzalloc(sizeof(struct gfs2_file), GFP_NOFS);
561	if (!fp)
562		return -ENOMEM;
563
564	mutex_init(&fp->f_fl_mutex);
565
566	gfs2_assert_warn(GFS2_SB(inode), !file->private_data);
567	file->private_data = fp;
568	return 0;
569}
570
571/**
572 * gfs2_open - open a file
573 * @inode: the inode to open
574 * @file: the struct file for this opening
575 *
576 * After atomic_open, this function is only used for opening files
577 * which are already cached. We must still get the glock for regular
578 * files to ensure that we have the file size uptodate for the large
579 * file check which is in the common code. That is only an issue for
580 * regular files though.
581 *
582 * Returns: errno
583 */
584
585static int gfs2_open(struct inode *inode, struct file *file)
586{
587	struct gfs2_inode *ip = GFS2_I(inode);
588	struct gfs2_holder i_gh;
589	int error;
590	bool need_unlock = false;
591
592	if (S_ISREG(ip->i_inode.i_mode)) {
593		error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY,
594					   &i_gh);
595		if (error)
596			return error;
597		need_unlock = true;
598	}
599
600	error = gfs2_open_common(inode, file);
601
602	if (need_unlock)
603		gfs2_glock_dq_uninit(&i_gh);
604
605	return error;
606}
607
608/**
609 * gfs2_release - called to close a struct file
610 * @inode: the inode the struct file belongs to
611 * @file: the struct file being closed
612 *
613 * Returns: errno
614 */
615
616static int gfs2_release(struct inode *inode, struct file *file)
617{
618	struct gfs2_inode *ip = GFS2_I(inode);
619
620	kfree(file->private_data);
621	file->private_data = NULL;
622
623	if (!(file->f_mode & FMODE_WRITE))
624		return 0;
625
626	gfs2_rs_delete(ip, &inode->i_writecount);
627	return 0;
628}
629
630/**
631 * gfs2_fsync - sync the dirty data for a file (across the cluster)
632 * @file: the file that points to the dentry
633 * @start: the start position in the file to sync
634 * @end: the end position in the file to sync
635 * @datasync: set if we can ignore timestamp changes
636 *
637 * We split the data flushing here so that we don't wait for the data
638 * until after we've also sent the metadata to disk. Note that for
639 * data=ordered, we will write & wait for the data at the log flush
640 * stage anyway, so this is unlikely to make much of a difference
641 * except in the data=writeback case.
642 *
643 * If the fdatawrite fails due to any reason except -EIO, we will
644 * continue the remainder of the fsync, although we'll still report
645 * the error at the end. This is to match filemap_write_and_wait_range()
646 * behaviour.
647 *
648 * Returns: errno
649 */
650
651static int gfs2_fsync(struct file *file, loff_t start, loff_t end,
652		      int datasync)
653{
654	struct address_space *mapping = file->f_mapping;
655	struct inode *inode = mapping->host;
656	int sync_state = inode->i_state & I_DIRTY_ALL;
657	struct gfs2_inode *ip = GFS2_I(inode);
658	int ret = 0, ret1 = 0;
659
660	if (mapping->nrpages) {
661		ret1 = filemap_fdatawrite_range(mapping, start, end);
662		if (ret1 == -EIO)
663			return ret1;
664	}
665
666	if (!gfs2_is_jdata(ip))
667		sync_state &= ~I_DIRTY_PAGES;
668	if (datasync)
669		sync_state &= ~(I_DIRTY_SYNC | I_DIRTY_TIME);
670
671	if (sync_state) {
672		ret = sync_inode_metadata(inode, 1);
673		if (ret)
674			return ret;
675		if (gfs2_is_jdata(ip))
676			filemap_write_and_wait(mapping);
677		gfs2_ail_flush(ip->i_gl, 1);
678	}
679
680	if (mapping->nrpages)
681		ret = filemap_fdatawait_range(mapping, start, end);
682
683	return ret ? ret : ret1;
684}
685
686/**
687 * gfs2_file_write_iter - Perform a write to a file
688 * @iocb: The io context
689 * @iov: The data to write
690 * @nr_segs: Number of @iov segments
691 * @pos: The file position
692 *
693 * We have to do a lock/unlock here to refresh the inode size for
694 * O_APPEND writes, otherwise we can land up writing at the wrong
695 * offset. There is still a race, but provided the app is using its
696 * own file locking, this will make O_APPEND work as expected.
697 *
698 */
699
700static ssize_t gfs2_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
701{
702	struct file *file = iocb->ki_filp;
703	struct gfs2_inode *ip = GFS2_I(file_inode(file));
704	int ret;
705
706	ret = gfs2_rs_alloc(ip);
707	if (ret)
708		return ret;
709
710	gfs2_size_hint(file, iocb->ki_pos, iov_iter_count(from));
711
712	if (iocb->ki_flags & IOCB_APPEND) {
713		struct gfs2_holder gh;
714
715		ret = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, 0, &gh);
716		if (ret)
717			return ret;
718		gfs2_glock_dq_uninit(&gh);
719	}
720
721	return generic_file_write_iter(iocb, from);
722}
723
724static int fallocate_chunk(struct inode *inode, loff_t offset, loff_t len,
725			   int mode)
726{
727	struct gfs2_inode *ip = GFS2_I(inode);
728	struct buffer_head *dibh;
729	int error;
730	unsigned int nr_blks;
731	sector_t lblock = offset >> inode->i_blkbits;
732
733	error = gfs2_meta_inode_buffer(ip, &dibh);
734	if (unlikely(error))
735		return error;
736
737	gfs2_trans_add_meta(ip->i_gl, dibh);
738
739	if (gfs2_is_stuffed(ip)) {
740		error = gfs2_unstuff_dinode(ip, NULL);
741		if (unlikely(error))
742			goto out;
743	}
744
745	while (len) {
746		struct buffer_head bh_map = { .b_state = 0, .b_blocknr = 0 };
747		bh_map.b_size = len;
748		set_buffer_zeronew(&bh_map);
749
750		error = gfs2_block_map(inode, lblock, &bh_map, 1);
751		if (unlikely(error))
752			goto out;
753		len -= bh_map.b_size;
754		nr_blks = bh_map.b_size >> inode->i_blkbits;
755		lblock += nr_blks;
756		if (!buffer_new(&bh_map))
757			continue;
758		if (unlikely(!buffer_zeronew(&bh_map))) {
759			error = -EIO;
760			goto out;
761		}
762	}
763out:
764	brelse(dibh);
765	return error;
766}
767/**
768 * calc_max_reserv() - Reverse of write_calc_reserv. Given a number of
769 *                     blocks, determine how many bytes can be written.
770 * @ip:          The inode in question.
771 * @len:         Max cap of bytes. What we return in *len must be <= this.
772 * @data_blocks: Compute and return the number of data blocks needed
773 * @ind_blocks:  Compute and return the number of indirect blocks needed
774 * @max_blocks:  The total blocks available to work with.
775 *
776 * Returns: void, but @len, @data_blocks and @ind_blocks are filled in.
777 */
778static void calc_max_reserv(struct gfs2_inode *ip, loff_t *len,
779			    unsigned int *data_blocks, unsigned int *ind_blocks,
780			    unsigned int max_blocks)
781{
782	loff_t max = *len;
783	const struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
784	unsigned int tmp, max_data = max_blocks - 3 * (sdp->sd_max_height - 1);
785
786	for (tmp = max_data; tmp > sdp->sd_diptrs;) {
787		tmp = DIV_ROUND_UP(tmp, sdp->sd_inptrs);
788		max_data -= tmp;
789	}
790
791	*data_blocks = max_data;
792	*ind_blocks = max_blocks - max_data;
793	*len = ((loff_t)max_data - 3) << sdp->sd_sb.sb_bsize_shift;
794	if (*len > max) {
795		*len = max;
796		gfs2_write_calc_reserv(ip, max, data_blocks, ind_blocks);
797	}
798}
799
800static long __gfs2_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
801{
802	struct inode *inode = file_inode(file);
803	struct gfs2_sbd *sdp = GFS2_SB(inode);
804	struct gfs2_inode *ip = GFS2_I(inode);
805	struct gfs2_alloc_parms ap = { .aflags = 0, };
806	unsigned int data_blocks = 0, ind_blocks = 0, rblocks;
807	loff_t bytes, max_bytes, max_blks = UINT_MAX;
808	int error;
809	const loff_t pos = offset;
810	const loff_t count = len;
811	loff_t bsize_mask = ~((loff_t)sdp->sd_sb.sb_bsize - 1);
812	loff_t next = (offset + len - 1) >> sdp->sd_sb.sb_bsize_shift;
813	loff_t max_chunk_size = UINT_MAX & bsize_mask;
814
815	next = (next + 1) << sdp->sd_sb.sb_bsize_shift;
816
817	offset &= bsize_mask;
818
819	len = next - offset;
820	bytes = sdp->sd_max_rg_data * sdp->sd_sb.sb_bsize / 2;
821	if (!bytes)
822		bytes = UINT_MAX;
823	bytes &= bsize_mask;
824	if (bytes == 0)
825		bytes = sdp->sd_sb.sb_bsize;
826
827	gfs2_size_hint(file, offset, len);
828
829	gfs2_write_calc_reserv(ip, PAGE_SIZE, &data_blocks, &ind_blocks);
830	ap.min_target = data_blocks + ind_blocks;
831
832	while (len > 0) {
833		if (len < bytes)
834			bytes = len;
835		if (!gfs2_write_alloc_required(ip, offset, bytes)) {
836			len -= bytes;
837			offset += bytes;
838			continue;
839		}
840
841		/* We need to determine how many bytes we can actually
842		 * fallocate without exceeding quota or going over the
843		 * end of the fs. We start off optimistically by assuming
844		 * we can write max_bytes */
845		max_bytes = (len > max_chunk_size) ? max_chunk_size : len;
846
847		/* Since max_bytes is most likely a theoretical max, we
848		 * calculate a more realistic 'bytes' to serve as a good
849		 * starting point for the number of bytes we may be able
850		 * to write */
851		gfs2_write_calc_reserv(ip, bytes, &data_blocks, &ind_blocks);
852		ap.target = data_blocks + ind_blocks;
853
854		error = gfs2_quota_lock_check(ip, &ap);
855		if (error)
856			return error;
857		/* ap.allowed tells us how many blocks quota will allow
858		 * us to write. Check if this reduces max_blks */
859		if (ap.allowed && ap.allowed < max_blks)
860			max_blks = ap.allowed;
861
862		error = gfs2_inplace_reserve(ip, &ap);
863		if (error)
864			goto out_qunlock;
865
866		/* check if the selected rgrp limits our max_blks further */
867		if (ap.allowed && ap.allowed < max_blks)
868			max_blks = ap.allowed;
869
870		/* Almost done. Calculate bytes that can be written using
871		 * max_blks. We also recompute max_bytes, data_blocks and
872		 * ind_blocks */
873		calc_max_reserv(ip, &max_bytes, &data_blocks,
874				&ind_blocks, max_blks);
875
876		rblocks = RES_DINODE + ind_blocks + RES_STATFS + RES_QUOTA +
877			  RES_RG_HDR + gfs2_rg_blocks(ip, data_blocks + ind_blocks);
878		if (gfs2_is_jdata(ip))
879			rblocks += data_blocks ? data_blocks : 1;
880
881		error = gfs2_trans_begin(sdp, rblocks,
882					 PAGE_CACHE_SIZE/sdp->sd_sb.sb_bsize);
883		if (error)
884			goto out_trans_fail;
885
886		error = fallocate_chunk(inode, offset, max_bytes, mode);
887		gfs2_trans_end(sdp);
888
889		if (error)
890			goto out_trans_fail;
891
892		len -= max_bytes;
893		offset += max_bytes;
894		gfs2_inplace_release(ip);
895		gfs2_quota_unlock(ip);
896	}
897
898	if (!(mode & FALLOC_FL_KEEP_SIZE) && (pos + count) > inode->i_size) {
899		i_size_write(inode, pos + count);
900		/* Marks the inode as dirty */
901		file_update_time(file);
902	}
903
904	return generic_write_sync(file, pos, count);
905
906out_trans_fail:
907	gfs2_inplace_release(ip);
908out_qunlock:
909	gfs2_quota_unlock(ip);
910	return error;
911}
912
913static long gfs2_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
914{
915	struct inode *inode = file_inode(file);
916	struct gfs2_inode *ip = GFS2_I(inode);
917	struct gfs2_holder gh;
918	int ret;
919
920	if (mode & ~FALLOC_FL_KEEP_SIZE)
921		return -EOPNOTSUPP;
922
923	mutex_lock(&inode->i_mutex);
924
925	gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh);
926	ret = gfs2_glock_nq(&gh);
927	if (ret)
928		goto out_uninit;
929
930	if (!(mode & FALLOC_FL_KEEP_SIZE) &&
931	    (offset + len) > inode->i_size) {
932		ret = inode_newsize_ok(inode, offset + len);
933		if (ret)
934			goto out_unlock;
935	}
936
937	ret = get_write_access(inode);
938	if (ret)
939		goto out_unlock;
940
941	ret = gfs2_rs_alloc(ip);
942	if (ret)
943		goto out_putw;
944
945	ret = __gfs2_fallocate(file, mode, offset, len);
946	if (ret)
947		gfs2_rs_deltree(ip->i_res);
948out_putw:
949	put_write_access(inode);
950out_unlock:
951	gfs2_glock_dq(&gh);
952out_uninit:
953	gfs2_holder_uninit(&gh);
954	mutex_unlock(&inode->i_mutex);
955	return ret;
956}
957
958static ssize_t gfs2_file_splice_write(struct pipe_inode_info *pipe,
959				      struct file *out, loff_t *ppos,
960				      size_t len, unsigned int flags)
961{
962	int error;
963	struct gfs2_inode *ip = GFS2_I(out->f_mapping->host);
964
965	error = gfs2_rs_alloc(ip);
966	if (error)
967		return (ssize_t)error;
968
969	gfs2_size_hint(out, *ppos, len);
970
971	return iter_file_splice_write(pipe, out, ppos, len, flags);
972}
973
974#ifdef CONFIG_GFS2_FS_LOCKING_DLM
975
976/**
977 * gfs2_lock - acquire/release a posix lock on a file
978 * @file: the file pointer
979 * @cmd: either modify or retrieve lock state, possibly wait
980 * @fl: type and range of lock
981 *
982 * Returns: errno
983 */
984
985static int gfs2_lock(struct file *file, int cmd, struct file_lock *fl)
986{
987	struct gfs2_inode *ip = GFS2_I(file->f_mapping->host);
988	struct gfs2_sbd *sdp = GFS2_SB(file->f_mapping->host);
989	struct lm_lockstruct *ls = &sdp->sd_lockstruct;
990
991	if (!(fl->fl_flags & FL_POSIX))
992		return -ENOLCK;
993	if (__mandatory_lock(&ip->i_inode) && fl->fl_type != F_UNLCK)
994		return -ENOLCK;
995
996	if (cmd == F_CANCELLK) {
997		/* Hack: */
998		cmd = F_SETLK;
999		fl->fl_type = F_UNLCK;
1000	}
1001	if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags))) {
1002		if (fl->fl_type == F_UNLCK)
1003			posix_lock_file_wait(file, fl);
1004		return -EIO;
1005	}
1006	if (IS_GETLK(cmd))
1007		return dlm_posix_get(ls->ls_dlm, ip->i_no_addr, file, fl);
1008	else if (fl->fl_type == F_UNLCK)
1009		return dlm_posix_unlock(ls->ls_dlm, ip->i_no_addr, file, fl);
1010	else
1011		return dlm_posix_lock(ls->ls_dlm, ip->i_no_addr, file, cmd, fl);
1012}
1013
1014static int do_flock(struct file *file, int cmd, struct file_lock *fl)
1015{
1016	struct gfs2_file *fp = file->private_data;
1017	struct gfs2_holder *fl_gh = &fp->f_fl_gh;
1018	struct gfs2_inode *ip = GFS2_I(file_inode(file));
1019	struct gfs2_glock *gl;
1020	unsigned int state;
1021	int flags;
1022	int error = 0;
1023	int sleeptime;
1024
1025	state = (fl->fl_type == F_WRLCK) ? LM_ST_EXCLUSIVE : LM_ST_SHARED;
1026	flags = (IS_SETLKW(cmd) ? 0 : LM_FLAG_TRY_1CB) | GL_EXACT;
1027
1028	mutex_lock(&fp->f_fl_mutex);
1029
1030	gl = fl_gh->gh_gl;
1031	if (gl) {
1032		if (fl_gh->gh_state == state)
1033			goto out;
1034		flock_lock_file_wait(file,
1035				     &(struct file_lock){.fl_type = F_UNLCK});
1036		gfs2_glock_dq(fl_gh);
1037		gfs2_holder_reinit(state, flags, fl_gh);
1038	} else {
1039		error = gfs2_glock_get(GFS2_SB(&ip->i_inode), ip->i_no_addr,
1040				       &gfs2_flock_glops, CREATE, &gl);
1041		if (error)
1042			goto out;
1043		gfs2_holder_init(gl, state, flags, fl_gh);
1044		gfs2_glock_put(gl);
1045	}
1046	for (sleeptime = 1; sleeptime <= 4; sleeptime <<= 1) {
1047		error = gfs2_glock_nq(fl_gh);
1048		if (error != GLR_TRYFAILED)
1049			break;
1050		fl_gh->gh_flags = LM_FLAG_TRY | GL_EXACT;
1051		fl_gh->gh_error = 0;
1052		msleep(sleeptime);
1053	}
1054	if (error) {
1055		gfs2_holder_uninit(fl_gh);
1056		if (error == GLR_TRYFAILED)
1057			error = -EAGAIN;
1058	} else {
1059		error = flock_lock_file_wait(file, fl);
1060		gfs2_assert_warn(GFS2_SB(&ip->i_inode), !error);
1061	}
1062
1063out:
1064	mutex_unlock(&fp->f_fl_mutex);
1065	return error;
1066}
1067
1068static void do_unflock(struct file *file, struct file_lock *fl)
1069{
1070	struct gfs2_file *fp = file->private_data;
1071	struct gfs2_holder *fl_gh = &fp->f_fl_gh;
1072
1073	mutex_lock(&fp->f_fl_mutex);
1074	flock_lock_file_wait(file, fl);
1075	if (fl_gh->gh_gl) {
1076		gfs2_glock_dq(fl_gh);
1077		gfs2_holder_uninit(fl_gh);
1078	}
1079	mutex_unlock(&fp->f_fl_mutex);
1080}
1081
1082/**
1083 * gfs2_flock - acquire/release a flock lock on a file
1084 * @file: the file pointer
1085 * @cmd: either modify or retrieve lock state, possibly wait
1086 * @fl: type and range of lock
1087 *
1088 * Returns: errno
1089 */
1090
1091static int gfs2_flock(struct file *file, int cmd, struct file_lock *fl)
1092{
1093	if (!(fl->fl_flags & FL_FLOCK))
1094		return -ENOLCK;
1095	if (fl->fl_type & LOCK_MAND)
1096		return -EOPNOTSUPP;
1097
1098	if (fl->fl_type == F_UNLCK) {
1099		do_unflock(file, fl);
1100		return 0;
1101	} else {
1102		return do_flock(file, cmd, fl);
1103	}
1104}
1105
1106const struct file_operations gfs2_file_fops = {
1107	.llseek		= gfs2_llseek,
1108	.read_iter	= generic_file_read_iter,
1109	.write_iter	= gfs2_file_write_iter,
1110	.unlocked_ioctl	= gfs2_ioctl,
1111	.mmap		= gfs2_mmap,
1112	.open		= gfs2_open,
1113	.release	= gfs2_release,
1114	.fsync		= gfs2_fsync,
1115	.lock		= gfs2_lock,
1116	.flock		= gfs2_flock,
1117	.splice_read	= generic_file_splice_read,
1118	.splice_write	= gfs2_file_splice_write,
1119	.setlease	= simple_nosetlease,
1120	.fallocate	= gfs2_fallocate,
1121};
1122
1123const struct file_operations gfs2_dir_fops = {
1124	.iterate	= gfs2_readdir,
1125	.unlocked_ioctl	= gfs2_ioctl,
1126	.open		= gfs2_open,
1127	.release	= gfs2_release,
1128	.fsync		= gfs2_fsync,
1129	.lock		= gfs2_lock,
1130	.flock		= gfs2_flock,
1131	.llseek		= default_llseek,
1132};
1133
1134#endif /* CONFIG_GFS2_FS_LOCKING_DLM */
1135
1136const struct file_operations gfs2_file_fops_nolock = {
1137	.llseek		= gfs2_llseek,
1138	.read_iter	= generic_file_read_iter,
1139	.write_iter	= gfs2_file_write_iter,
1140	.unlocked_ioctl	= gfs2_ioctl,
1141	.mmap		= gfs2_mmap,
1142	.open		= gfs2_open,
1143	.release	= gfs2_release,
1144	.fsync		= gfs2_fsync,
1145	.splice_read	= generic_file_splice_read,
1146	.splice_write	= gfs2_file_splice_write,
1147	.setlease	= generic_setlease,
1148	.fallocate	= gfs2_fallocate,
1149};
1150
1151const struct file_operations gfs2_dir_fops_nolock = {
1152	.iterate	= gfs2_readdir,
1153	.unlocked_ioctl	= gfs2_ioctl,
1154	.open		= gfs2_open,
1155	.release	= gfs2_release,
1156	.fsync		= gfs2_fsync,
1157	.llseek		= default_llseek,
1158};
1159
1160