1/*
2 * Copyright (c) 2000-2003 Silicon Graphics, Inc.
3 * All Rights Reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
17 */
18#include "xfs.h"
19#include "xfs_fs.h"
20#include "xfs_format.h"
21#include "xfs_log_format.h"
22#include "xfs_shared.h"
23#include "xfs_trans_resv.h"
24#include "xfs_bit.h"
25#include "xfs_mount.h"
26#include "xfs_inode.h"
27#include "xfs_bmap.h"
28#include "xfs_bmap_util.h"
29#include "xfs_alloc.h"
30#include "xfs_quota.h"
31#include "xfs_error.h"
32#include "xfs_trans.h"
33#include "xfs_buf_item.h"
34#include "xfs_trans_space.h"
35#include "xfs_trans_priv.h"
36#include "xfs_qm.h"
37#include "xfs_cksum.h"
38#include "xfs_trace.h"
39#include "xfs_log.h"
40#include "xfs_bmap_btree.h"
41
42/*
43 * Lock order:
44 *
45 * ip->i_lock
46 *   qi->qi_tree_lock
47 *     dquot->q_qlock (xfs_dqlock() and friends)
48 *       dquot->q_flush (xfs_dqflock() and friends)
49 *       qi->qi_lru_lock
50 *
51 * If two dquots need to be locked the order is user before group/project,
52 * otherwise by the lowest id first, see xfs_dqlock2.
53 */
54
55#ifdef DEBUG
56xfs_buftarg_t *xfs_dqerror_target;
57int xfs_do_dqerror;
58int xfs_dqreq_num;
59int xfs_dqerror_mod = 33;
60#endif
61
62struct kmem_zone		*xfs_qm_dqtrxzone;
63static struct kmem_zone		*xfs_qm_dqzone;
64
65static struct lock_class_key xfs_dquot_group_class;
66static struct lock_class_key xfs_dquot_project_class;
67
68/*
69 * This is called to free all the memory associated with a dquot
70 */
71void
72xfs_qm_dqdestroy(
73	xfs_dquot_t	*dqp)
74{
75	ASSERT(list_empty(&dqp->q_lru));
76
77	mutex_destroy(&dqp->q_qlock);
78	kmem_zone_free(xfs_qm_dqzone, dqp);
79
80	XFS_STATS_DEC(xs_qm_dquot);
81}
82
83/*
84 * If default limits are in force, push them into the dquot now.
85 * We overwrite the dquot limits only if they are zero and this
86 * is not the root dquot.
87 */
88void
89xfs_qm_adjust_dqlimits(
90	struct xfs_mount	*mp,
91	struct xfs_dquot	*dq)
92{
93	struct xfs_quotainfo	*q = mp->m_quotainfo;
94	struct xfs_disk_dquot	*d = &dq->q_core;
95	int			prealloc = 0;
96
97	ASSERT(d->d_id);
98
99	if (q->qi_bsoftlimit && !d->d_blk_softlimit) {
100		d->d_blk_softlimit = cpu_to_be64(q->qi_bsoftlimit);
101		prealloc = 1;
102	}
103	if (q->qi_bhardlimit && !d->d_blk_hardlimit) {
104		d->d_blk_hardlimit = cpu_to_be64(q->qi_bhardlimit);
105		prealloc = 1;
106	}
107	if (q->qi_isoftlimit && !d->d_ino_softlimit)
108		d->d_ino_softlimit = cpu_to_be64(q->qi_isoftlimit);
109	if (q->qi_ihardlimit && !d->d_ino_hardlimit)
110		d->d_ino_hardlimit = cpu_to_be64(q->qi_ihardlimit);
111	if (q->qi_rtbsoftlimit && !d->d_rtb_softlimit)
112		d->d_rtb_softlimit = cpu_to_be64(q->qi_rtbsoftlimit);
113	if (q->qi_rtbhardlimit && !d->d_rtb_hardlimit)
114		d->d_rtb_hardlimit = cpu_to_be64(q->qi_rtbhardlimit);
115
116	if (prealloc)
117		xfs_dquot_set_prealloc_limits(dq);
118}
119
120/*
121 * Check the limits and timers of a dquot and start or reset timers
122 * if necessary.
123 * This gets called even when quota enforcement is OFF, which makes our
124 * life a little less complicated. (We just don't reject any quota
125 * reservations in that case, when enforcement is off).
126 * We also return 0 as the values of the timers in Q_GETQUOTA calls, when
127 * enforcement's off.
128 * In contrast, warnings are a little different in that they don't
129 * 'automatically' get started when limits get exceeded.  They do
130 * get reset to zero, however, when we find the count to be under
131 * the soft limit (they are only ever set non-zero via userspace).
132 */
133void
134xfs_qm_adjust_dqtimers(
135	xfs_mount_t		*mp,
136	xfs_disk_dquot_t	*d)
137{
138	ASSERT(d->d_id);
139
140#ifdef DEBUG
141	if (d->d_blk_hardlimit)
142		ASSERT(be64_to_cpu(d->d_blk_softlimit) <=
143		       be64_to_cpu(d->d_blk_hardlimit));
144	if (d->d_ino_hardlimit)
145		ASSERT(be64_to_cpu(d->d_ino_softlimit) <=
146		       be64_to_cpu(d->d_ino_hardlimit));
147	if (d->d_rtb_hardlimit)
148		ASSERT(be64_to_cpu(d->d_rtb_softlimit) <=
149		       be64_to_cpu(d->d_rtb_hardlimit));
150#endif
151
152	if (!d->d_btimer) {
153		if ((d->d_blk_softlimit &&
154		     (be64_to_cpu(d->d_bcount) >
155		      be64_to_cpu(d->d_blk_softlimit))) ||
156		    (d->d_blk_hardlimit &&
157		     (be64_to_cpu(d->d_bcount) >
158		      be64_to_cpu(d->d_blk_hardlimit)))) {
159			d->d_btimer = cpu_to_be32(get_seconds() +
160					mp->m_quotainfo->qi_btimelimit);
161		} else {
162			d->d_bwarns = 0;
163		}
164	} else {
165		if ((!d->d_blk_softlimit ||
166		     (be64_to_cpu(d->d_bcount) <=
167		      be64_to_cpu(d->d_blk_softlimit))) &&
168		    (!d->d_blk_hardlimit ||
169		    (be64_to_cpu(d->d_bcount) <=
170		     be64_to_cpu(d->d_blk_hardlimit)))) {
171			d->d_btimer = 0;
172		}
173	}
174
175	if (!d->d_itimer) {
176		if ((d->d_ino_softlimit &&
177		     (be64_to_cpu(d->d_icount) >
178		      be64_to_cpu(d->d_ino_softlimit))) ||
179		    (d->d_ino_hardlimit &&
180		     (be64_to_cpu(d->d_icount) >
181		      be64_to_cpu(d->d_ino_hardlimit)))) {
182			d->d_itimer = cpu_to_be32(get_seconds() +
183					mp->m_quotainfo->qi_itimelimit);
184		} else {
185			d->d_iwarns = 0;
186		}
187	} else {
188		if ((!d->d_ino_softlimit ||
189		     (be64_to_cpu(d->d_icount) <=
190		      be64_to_cpu(d->d_ino_softlimit)))  &&
191		    (!d->d_ino_hardlimit ||
192		     (be64_to_cpu(d->d_icount) <=
193		      be64_to_cpu(d->d_ino_hardlimit)))) {
194			d->d_itimer = 0;
195		}
196	}
197
198	if (!d->d_rtbtimer) {
199		if ((d->d_rtb_softlimit &&
200		     (be64_to_cpu(d->d_rtbcount) >
201		      be64_to_cpu(d->d_rtb_softlimit))) ||
202		    (d->d_rtb_hardlimit &&
203		     (be64_to_cpu(d->d_rtbcount) >
204		      be64_to_cpu(d->d_rtb_hardlimit)))) {
205			d->d_rtbtimer = cpu_to_be32(get_seconds() +
206					mp->m_quotainfo->qi_rtbtimelimit);
207		} else {
208			d->d_rtbwarns = 0;
209		}
210	} else {
211		if ((!d->d_rtb_softlimit ||
212		     (be64_to_cpu(d->d_rtbcount) <=
213		      be64_to_cpu(d->d_rtb_softlimit))) &&
214		    (!d->d_rtb_hardlimit ||
215		     (be64_to_cpu(d->d_rtbcount) <=
216		      be64_to_cpu(d->d_rtb_hardlimit)))) {
217			d->d_rtbtimer = 0;
218		}
219	}
220}
221
222/*
223 * initialize a buffer full of dquots and log the whole thing
224 */
225STATIC void
226xfs_qm_init_dquot_blk(
227	xfs_trans_t	*tp,
228	xfs_mount_t	*mp,
229	xfs_dqid_t	id,
230	uint		type,
231	xfs_buf_t	*bp)
232{
233	struct xfs_quotainfo	*q = mp->m_quotainfo;
234	xfs_dqblk_t	*d;
235	int		curid, i;
236
237	ASSERT(tp);
238	ASSERT(xfs_buf_islocked(bp));
239
240	d = bp->b_addr;
241
242	/*
243	 * ID of the first dquot in the block - id's are zero based.
244	 */
245	curid = id - (id % q->qi_dqperchunk);
246	ASSERT(curid >= 0);
247	memset(d, 0, BBTOB(q->qi_dqchunklen));
248	for (i = 0; i < q->qi_dqperchunk; i++, d++, curid++) {
249		d->dd_diskdq.d_magic = cpu_to_be16(XFS_DQUOT_MAGIC);
250		d->dd_diskdq.d_version = XFS_DQUOT_VERSION;
251		d->dd_diskdq.d_id = cpu_to_be32(curid);
252		d->dd_diskdq.d_flags = type;
253		if (xfs_sb_version_hascrc(&mp->m_sb)) {
254			uuid_copy(&d->dd_uuid, &mp->m_sb.sb_uuid);
255			xfs_update_cksum((char *)d, sizeof(struct xfs_dqblk),
256					 XFS_DQUOT_CRC_OFF);
257		}
258	}
259
260	xfs_trans_dquot_buf(tp, bp,
261			    (type & XFS_DQ_USER ? XFS_BLF_UDQUOT_BUF :
262			    ((type & XFS_DQ_PROJ) ? XFS_BLF_PDQUOT_BUF :
263			     XFS_BLF_GDQUOT_BUF)));
264	xfs_trans_log_buf(tp, bp, 0, BBTOB(q->qi_dqchunklen) - 1);
265}
266
267/*
268 * Initialize the dynamic speculative preallocation thresholds. The lo/hi
269 * watermarks correspond to the soft and hard limits by default. If a soft limit
270 * is not specified, we use 95% of the hard limit.
271 */
272void
273xfs_dquot_set_prealloc_limits(struct xfs_dquot *dqp)
274{
275	__uint64_t space;
276
277	dqp->q_prealloc_hi_wmark = be64_to_cpu(dqp->q_core.d_blk_hardlimit);
278	dqp->q_prealloc_lo_wmark = be64_to_cpu(dqp->q_core.d_blk_softlimit);
279	if (!dqp->q_prealloc_lo_wmark) {
280		dqp->q_prealloc_lo_wmark = dqp->q_prealloc_hi_wmark;
281		do_div(dqp->q_prealloc_lo_wmark, 100);
282		dqp->q_prealloc_lo_wmark *= 95;
283	}
284
285	space = dqp->q_prealloc_hi_wmark;
286
287	do_div(space, 100);
288	dqp->q_low_space[XFS_QLOWSP_1_PCNT] = space;
289	dqp->q_low_space[XFS_QLOWSP_3_PCNT] = space * 3;
290	dqp->q_low_space[XFS_QLOWSP_5_PCNT] = space * 5;
291}
292
293/*
294 * Allocate a block and fill it with dquots.
295 * This is called when the bmapi finds a hole.
296 */
297STATIC int
298xfs_qm_dqalloc(
299	xfs_trans_t	**tpp,
300	xfs_mount_t	*mp,
301	xfs_dquot_t	*dqp,
302	xfs_inode_t	*quotip,
303	xfs_fileoff_t	offset_fsb,
304	xfs_buf_t	**O_bpp)
305{
306	xfs_fsblock_t	firstblock;
307	xfs_bmap_free_t flist;
308	xfs_bmbt_irec_t map;
309	int		nmaps, error, committed;
310	xfs_buf_t	*bp;
311	xfs_trans_t	*tp = *tpp;
312
313	ASSERT(tp != NULL);
314
315	trace_xfs_dqalloc(dqp);
316
317	/*
318	 * Initialize the bmap freelist prior to calling bmapi code.
319	 */
320	xfs_bmap_init(&flist, &firstblock);
321	xfs_ilock(quotip, XFS_ILOCK_EXCL);
322	/*
323	 * Return if this type of quotas is turned off while we didn't
324	 * have an inode lock
325	 */
326	if (!xfs_this_quota_on(dqp->q_mount, dqp->dq_flags)) {
327		xfs_iunlock(quotip, XFS_ILOCK_EXCL);
328		return -ESRCH;
329	}
330
331	xfs_trans_ijoin(tp, quotip, XFS_ILOCK_EXCL);
332	nmaps = 1;
333	error = xfs_bmapi_write(tp, quotip, offset_fsb,
334				XFS_DQUOT_CLUSTER_SIZE_FSB, XFS_BMAPI_METADATA,
335				&firstblock, XFS_QM_DQALLOC_SPACE_RES(mp),
336				&map, &nmaps, &flist);
337	if (error)
338		goto error0;
339	ASSERT(map.br_blockcount == XFS_DQUOT_CLUSTER_SIZE_FSB);
340	ASSERT(nmaps == 1);
341	ASSERT((map.br_startblock != DELAYSTARTBLOCK) &&
342	       (map.br_startblock != HOLESTARTBLOCK));
343
344	/*
345	 * Keep track of the blkno to save a lookup later
346	 */
347	dqp->q_blkno = XFS_FSB_TO_DADDR(mp, map.br_startblock);
348
349	/* now we can just get the buffer (there's nothing to read yet) */
350	bp = xfs_trans_get_buf(tp, mp->m_ddev_targp,
351			       dqp->q_blkno,
352			       mp->m_quotainfo->qi_dqchunklen,
353			       0);
354	if (!bp) {
355		error = -ENOMEM;
356		goto error1;
357	}
358	bp->b_ops = &xfs_dquot_buf_ops;
359
360	/*
361	 * Make a chunk of dquots out of this buffer and log
362	 * the entire thing.
363	 */
364	xfs_qm_init_dquot_blk(tp, mp, be32_to_cpu(dqp->q_core.d_id),
365			      dqp->dq_flags & XFS_DQ_ALLTYPES, bp);
366
367	/*
368	 * xfs_bmap_finish() may commit the current transaction and
369	 * start a second transaction if the freelist is not empty.
370	 *
371	 * Since we still want to modify this buffer, we need to
372	 * ensure that the buffer is not released on commit of
373	 * the first transaction and ensure the buffer is added to the
374	 * second transaction.
375	 *
376	 * If there is only one transaction then don't stop the buffer
377	 * from being released when it commits later on.
378	 */
379
380	xfs_trans_bhold(tp, bp);
381
382	if ((error = xfs_bmap_finish(tpp, &flist, &committed))) {
383		goto error1;
384	}
385
386	if (committed) {
387		tp = *tpp;
388		xfs_trans_bjoin(tp, bp);
389	} else {
390		xfs_trans_bhold_release(tp, bp);
391	}
392
393	*O_bpp = bp;
394	return 0;
395
396      error1:
397	xfs_bmap_cancel(&flist);
398      error0:
399	xfs_iunlock(quotip, XFS_ILOCK_EXCL);
400
401	return error;
402}
403
404STATIC int
405xfs_qm_dqrepair(
406	struct xfs_mount	*mp,
407	struct xfs_trans	*tp,
408	struct xfs_dquot	*dqp,
409	xfs_dqid_t		firstid,
410	struct xfs_buf		**bpp)
411{
412	int			error;
413	struct xfs_disk_dquot	*ddq;
414	struct xfs_dqblk	*d;
415	int			i;
416
417	/*
418	 * Read the buffer without verification so we get the corrupted
419	 * buffer returned to us. make sure we verify it on write, though.
420	 */
421	error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp, dqp->q_blkno,
422				   mp->m_quotainfo->qi_dqchunklen,
423				   0, bpp, NULL);
424
425	if (error) {
426		ASSERT(*bpp == NULL);
427		return error;
428	}
429	(*bpp)->b_ops = &xfs_dquot_buf_ops;
430
431	ASSERT(xfs_buf_islocked(*bpp));
432	d = (struct xfs_dqblk *)(*bpp)->b_addr;
433
434	/* Do the actual repair of dquots in this buffer */
435	for (i = 0; i < mp->m_quotainfo->qi_dqperchunk; i++) {
436		ddq = &d[i].dd_diskdq;
437		error = xfs_dqcheck(mp, ddq, firstid + i,
438				       dqp->dq_flags & XFS_DQ_ALLTYPES,
439				       XFS_QMOPT_DQREPAIR, "xfs_qm_dqrepair");
440		if (error) {
441			/* repair failed, we're screwed */
442			xfs_trans_brelse(tp, *bpp);
443			return -EIO;
444		}
445	}
446
447	return 0;
448}
449
450/*
451 * Maps a dquot to the buffer containing its on-disk version.
452 * This returns a ptr to the buffer containing the on-disk dquot
453 * in the bpp param, and a ptr to the on-disk dquot within that buffer
454 */
455STATIC int
456xfs_qm_dqtobp(
457	xfs_trans_t		**tpp,
458	xfs_dquot_t		*dqp,
459	xfs_disk_dquot_t	**O_ddpp,
460	xfs_buf_t		**O_bpp,
461	uint			flags)
462{
463	struct xfs_bmbt_irec	map;
464	int			nmaps = 1, error;
465	struct xfs_buf		*bp;
466	struct xfs_inode	*quotip = xfs_dq_to_quota_inode(dqp);
467	struct xfs_mount	*mp = dqp->q_mount;
468	xfs_dqid_t		id = be32_to_cpu(dqp->q_core.d_id);
469	struct xfs_trans	*tp = (tpp ? *tpp : NULL);
470	uint			lock_mode;
471
472	dqp->q_fileoffset = (xfs_fileoff_t)id / mp->m_quotainfo->qi_dqperchunk;
473
474	lock_mode = xfs_ilock_data_map_shared(quotip);
475	if (!xfs_this_quota_on(dqp->q_mount, dqp->dq_flags)) {
476		/*
477		 * Return if this type of quotas is turned off while we
478		 * didn't have the quota inode lock.
479		 */
480		xfs_iunlock(quotip, lock_mode);
481		return -ESRCH;
482	}
483
484	/*
485	 * Find the block map; no allocations yet
486	 */
487	error = xfs_bmapi_read(quotip, dqp->q_fileoffset,
488			       XFS_DQUOT_CLUSTER_SIZE_FSB, &map, &nmaps, 0);
489
490	xfs_iunlock(quotip, lock_mode);
491	if (error)
492		return error;
493
494	ASSERT(nmaps == 1);
495	ASSERT(map.br_blockcount == 1);
496
497	/*
498	 * Offset of dquot in the (fixed sized) dquot chunk.
499	 */
500	dqp->q_bufoffset = (id % mp->m_quotainfo->qi_dqperchunk) *
501		sizeof(xfs_dqblk_t);
502
503	ASSERT(map.br_startblock != DELAYSTARTBLOCK);
504	if (map.br_startblock == HOLESTARTBLOCK) {
505		/*
506		 * We don't allocate unless we're asked to
507		 */
508		if (!(flags & XFS_QMOPT_DQALLOC))
509			return -ENOENT;
510
511		ASSERT(tp);
512		error = xfs_qm_dqalloc(tpp, mp, dqp, quotip,
513					dqp->q_fileoffset, &bp);
514		if (error)
515			return error;
516		tp = *tpp;
517	} else {
518		trace_xfs_dqtobp_read(dqp);
519
520		/*
521		 * store the blkno etc so that we don't have to do the
522		 * mapping all the time
523		 */
524		dqp->q_blkno = XFS_FSB_TO_DADDR(mp, map.br_startblock);
525
526		error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp,
527					   dqp->q_blkno,
528					   mp->m_quotainfo->qi_dqchunklen,
529					   0, &bp, &xfs_dquot_buf_ops);
530
531		if (error == -EFSCORRUPTED && (flags & XFS_QMOPT_DQREPAIR)) {
532			xfs_dqid_t firstid = (xfs_dqid_t)map.br_startoff *
533						mp->m_quotainfo->qi_dqperchunk;
534			ASSERT(bp == NULL);
535			error = xfs_qm_dqrepair(mp, tp, dqp, firstid, &bp);
536		}
537
538		if (error) {
539			ASSERT(bp == NULL);
540			return error;
541		}
542	}
543
544	ASSERT(xfs_buf_islocked(bp));
545	*O_bpp = bp;
546	*O_ddpp = bp->b_addr + dqp->q_bufoffset;
547
548	return 0;
549}
550
551
552/*
553 * Read in the ondisk dquot using dqtobp() then copy it to an incore version,
554 * and release the buffer immediately.
555 *
556 * If XFS_QMOPT_DQALLOC is set, allocate a dquot on disk if it needed.
557 */
558int
559xfs_qm_dqread(
560	struct xfs_mount	*mp,
561	xfs_dqid_t		id,
562	uint			type,
563	uint			flags,
564	struct xfs_dquot	**O_dqpp)
565{
566	struct xfs_dquot	*dqp;
567	struct xfs_disk_dquot	*ddqp;
568	struct xfs_buf		*bp;
569	struct xfs_trans	*tp = NULL;
570	int			error;
571	int			cancelflags = 0;
572
573
574	dqp = kmem_zone_zalloc(xfs_qm_dqzone, KM_SLEEP);
575
576	dqp->dq_flags = type;
577	dqp->q_core.d_id = cpu_to_be32(id);
578	dqp->q_mount = mp;
579	INIT_LIST_HEAD(&dqp->q_lru);
580	mutex_init(&dqp->q_qlock);
581	init_waitqueue_head(&dqp->q_pinwait);
582
583	/*
584	 * Because we want to use a counting completion, complete
585	 * the flush completion once to allow a single access to
586	 * the flush completion without blocking.
587	 */
588	init_completion(&dqp->q_flush);
589	complete(&dqp->q_flush);
590
591	/*
592	 * Make sure group quotas have a different lock class than user
593	 * quotas.
594	 */
595	switch (type) {
596	case XFS_DQ_USER:
597		/* uses the default lock class */
598		break;
599	case XFS_DQ_GROUP:
600		lockdep_set_class(&dqp->q_qlock, &xfs_dquot_group_class);
601		break;
602	case XFS_DQ_PROJ:
603		lockdep_set_class(&dqp->q_qlock, &xfs_dquot_project_class);
604		break;
605	default:
606		ASSERT(0);
607		break;
608	}
609
610	XFS_STATS_INC(xs_qm_dquot);
611
612	trace_xfs_dqread(dqp);
613
614	if (flags & XFS_QMOPT_DQALLOC) {
615		tp = xfs_trans_alloc(mp, XFS_TRANS_QM_DQALLOC);
616		error = xfs_trans_reserve(tp, &M_RES(mp)->tr_qm_dqalloc,
617					  XFS_QM_DQALLOC_SPACE_RES(mp), 0);
618		if (error)
619			goto error1;
620		cancelflags = XFS_TRANS_RELEASE_LOG_RES;
621	}
622
623	/*
624	 * get a pointer to the on-disk dquot and the buffer containing it
625	 * dqp already knows its own type (GROUP/USER).
626	 */
627	error = xfs_qm_dqtobp(&tp, dqp, &ddqp, &bp, flags);
628	if (error) {
629		/*
630		 * This can happen if quotas got turned off (ESRCH),
631		 * or if the dquot didn't exist on disk and we ask to
632		 * allocate (ENOENT).
633		 */
634		trace_xfs_dqread_fail(dqp);
635		cancelflags |= XFS_TRANS_ABORT;
636		goto error1;
637	}
638
639	/* copy everything from disk dquot to the incore dquot */
640	memcpy(&dqp->q_core, ddqp, sizeof(xfs_disk_dquot_t));
641	xfs_qm_dquot_logitem_init(dqp);
642
643	/*
644	 * Reservation counters are defined as reservation plus current usage
645	 * to avoid having to add every time.
646	 */
647	dqp->q_res_bcount = be64_to_cpu(ddqp->d_bcount);
648	dqp->q_res_icount = be64_to_cpu(ddqp->d_icount);
649	dqp->q_res_rtbcount = be64_to_cpu(ddqp->d_rtbcount);
650
651	/* initialize the dquot speculative prealloc thresholds */
652	xfs_dquot_set_prealloc_limits(dqp);
653
654	/* Mark the buf so that this will stay incore a little longer */
655	xfs_buf_set_ref(bp, XFS_DQUOT_REF);
656
657	/*
658	 * We got the buffer with a xfs_trans_read_buf() (in dqtobp())
659	 * So we need to release with xfs_trans_brelse().
660	 * The strategy here is identical to that of inodes; we lock
661	 * the dquot in xfs_qm_dqget() before making it accessible to
662	 * others. This is because dquots, like inodes, need a good level of
663	 * concurrency, and we don't want to take locks on the entire buffers
664	 * for dquot accesses.
665	 * Note also that the dquot buffer may even be dirty at this point, if
666	 * this particular dquot was repaired. We still aren't afraid to
667	 * brelse it because we have the changes incore.
668	 */
669	ASSERT(xfs_buf_islocked(bp));
670	xfs_trans_brelse(tp, bp);
671
672	if (tp) {
673		error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES);
674		if (error)
675			goto error0;
676	}
677
678	*O_dqpp = dqp;
679	return error;
680
681error1:
682	if (tp)
683		xfs_trans_cancel(tp, cancelflags);
684error0:
685	xfs_qm_dqdestroy(dqp);
686	*O_dqpp = NULL;
687	return error;
688}
689
690/*
691 * Given the file system, inode OR id, and type (UDQUOT/GDQUOT), return a
692 * a locked dquot, doing an allocation (if requested) as needed.
693 * When both an inode and an id are given, the inode's id takes precedence.
694 * That is, if the id changes while we don't hold the ilock inside this
695 * function, the new dquot is returned, not necessarily the one requested
696 * in the id argument.
697 */
698int
699xfs_qm_dqget(
700	xfs_mount_t	*mp,
701	xfs_inode_t	*ip,	  /* locked inode (optional) */
702	xfs_dqid_t	id,	  /* uid/projid/gid depending on type */
703	uint		type,	  /* XFS_DQ_USER/XFS_DQ_PROJ/XFS_DQ_GROUP */
704	uint		flags,	  /* DQALLOC, DQSUSER, DQREPAIR, DOWARN */
705	xfs_dquot_t	**O_dqpp) /* OUT : locked incore dquot */
706{
707	struct xfs_quotainfo	*qi = mp->m_quotainfo;
708	struct radix_tree_root *tree = xfs_dquot_tree(qi, type);
709	struct xfs_dquot	*dqp;
710	int			error;
711
712	ASSERT(XFS_IS_QUOTA_RUNNING(mp));
713	if ((! XFS_IS_UQUOTA_ON(mp) && type == XFS_DQ_USER) ||
714	    (! XFS_IS_PQUOTA_ON(mp) && type == XFS_DQ_PROJ) ||
715	    (! XFS_IS_GQUOTA_ON(mp) && type == XFS_DQ_GROUP)) {
716		return -ESRCH;
717	}
718
719#ifdef DEBUG
720	if (xfs_do_dqerror) {
721		if ((xfs_dqerror_target == mp->m_ddev_targp) &&
722		    (xfs_dqreq_num++ % xfs_dqerror_mod) == 0) {
723			xfs_debug(mp, "Returning error in dqget");
724			return -EIO;
725		}
726	}
727
728	ASSERT(type == XFS_DQ_USER ||
729	       type == XFS_DQ_PROJ ||
730	       type == XFS_DQ_GROUP);
731	if (ip) {
732		ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
733		ASSERT(xfs_inode_dquot(ip, type) == NULL);
734	}
735#endif
736
737restart:
738	mutex_lock(&qi->qi_tree_lock);
739	dqp = radix_tree_lookup(tree, id);
740	if (dqp) {
741		xfs_dqlock(dqp);
742		if (dqp->dq_flags & XFS_DQ_FREEING) {
743			xfs_dqunlock(dqp);
744			mutex_unlock(&qi->qi_tree_lock);
745			trace_xfs_dqget_freeing(dqp);
746			delay(1);
747			goto restart;
748		}
749
750		dqp->q_nrefs++;
751		mutex_unlock(&qi->qi_tree_lock);
752
753		trace_xfs_dqget_hit(dqp);
754		XFS_STATS_INC(xs_qm_dqcachehits);
755		*O_dqpp = dqp;
756		return 0;
757	}
758	mutex_unlock(&qi->qi_tree_lock);
759	XFS_STATS_INC(xs_qm_dqcachemisses);
760
761	/*
762	 * Dquot cache miss. We don't want to keep the inode lock across
763	 * a (potential) disk read. Also we don't want to deal with the lock
764	 * ordering between quotainode and this inode. OTOH, dropping the inode
765	 * lock here means dealing with a chown that can happen before
766	 * we re-acquire the lock.
767	 */
768	if (ip)
769		xfs_iunlock(ip, XFS_ILOCK_EXCL);
770
771	error = xfs_qm_dqread(mp, id, type, flags, &dqp);
772
773	if (ip)
774		xfs_ilock(ip, XFS_ILOCK_EXCL);
775
776	if (error)
777		return error;
778
779	if (ip) {
780		/*
781		 * A dquot could be attached to this inode by now, since
782		 * we had dropped the ilock.
783		 */
784		if (xfs_this_quota_on(mp, type)) {
785			struct xfs_dquot	*dqp1;
786
787			dqp1 = xfs_inode_dquot(ip, type);
788			if (dqp1) {
789				xfs_qm_dqdestroy(dqp);
790				dqp = dqp1;
791				xfs_dqlock(dqp);
792				goto dqret;
793			}
794		} else {
795			/* inode stays locked on return */
796			xfs_qm_dqdestroy(dqp);
797			return -ESRCH;
798		}
799	}
800
801	mutex_lock(&qi->qi_tree_lock);
802	error = radix_tree_insert(tree, id, dqp);
803	if (unlikely(error)) {
804		WARN_ON(error != -EEXIST);
805
806		/*
807		 * Duplicate found. Just throw away the new dquot and start
808		 * over.
809		 */
810		mutex_unlock(&qi->qi_tree_lock);
811		trace_xfs_dqget_dup(dqp);
812		xfs_qm_dqdestroy(dqp);
813		XFS_STATS_INC(xs_qm_dquot_dups);
814		goto restart;
815	}
816
817	/*
818	 * We return a locked dquot to the caller, with a reference taken
819	 */
820	xfs_dqlock(dqp);
821	dqp->q_nrefs = 1;
822
823	qi->qi_dquots++;
824	mutex_unlock(&qi->qi_tree_lock);
825
826 dqret:
827	ASSERT((ip == NULL) || xfs_isilocked(ip, XFS_ILOCK_EXCL));
828	trace_xfs_dqget_miss(dqp);
829	*O_dqpp = dqp;
830	return 0;
831}
832
833/*
834 * Release a reference to the dquot (decrement ref-count) and unlock it.
835 *
836 * If there is a group quota attached to this dquot, carefully release that
837 * too without tripping over deadlocks'n'stuff.
838 */
839void
840xfs_qm_dqput(
841	struct xfs_dquot	*dqp)
842{
843	ASSERT(dqp->q_nrefs > 0);
844	ASSERT(XFS_DQ_IS_LOCKED(dqp));
845
846	trace_xfs_dqput(dqp);
847
848	if (--dqp->q_nrefs == 0) {
849		struct xfs_quotainfo	*qi = dqp->q_mount->m_quotainfo;
850		trace_xfs_dqput_free(dqp);
851
852		if (list_lru_add(&qi->qi_lru, &dqp->q_lru))
853			XFS_STATS_INC(xs_qm_dquot_unused);
854	}
855	xfs_dqunlock(dqp);
856}
857
858/*
859 * Release a dquot. Flush it if dirty, then dqput() it.
860 * dquot must not be locked.
861 */
862void
863xfs_qm_dqrele(
864	xfs_dquot_t	*dqp)
865{
866	if (!dqp)
867		return;
868
869	trace_xfs_dqrele(dqp);
870
871	xfs_dqlock(dqp);
872	/*
873	 * We don't care to flush it if the dquot is dirty here.
874	 * That will create stutters that we want to avoid.
875	 * Instead we do a delayed write when we try to reclaim
876	 * a dirty dquot. Also xfs_sync will take part of the burden...
877	 */
878	xfs_qm_dqput(dqp);
879}
880
881/*
882 * This is the dquot flushing I/O completion routine.  It is called
883 * from interrupt level when the buffer containing the dquot is
884 * flushed to disk.  It is responsible for removing the dquot logitem
885 * from the AIL if it has not been re-logged, and unlocking the dquot's
886 * flush lock. This behavior is very similar to that of inodes..
887 */
888STATIC void
889xfs_qm_dqflush_done(
890	struct xfs_buf		*bp,
891	struct xfs_log_item	*lip)
892{
893	xfs_dq_logitem_t	*qip = (struct xfs_dq_logitem *)lip;
894	xfs_dquot_t		*dqp = qip->qli_dquot;
895	struct xfs_ail		*ailp = lip->li_ailp;
896
897	/*
898	 * We only want to pull the item from the AIL if its
899	 * location in the log has not changed since we started the flush.
900	 * Thus, we only bother if the dquot's lsn has
901	 * not changed. First we check the lsn outside the lock
902	 * since it's cheaper, and then we recheck while
903	 * holding the lock before removing the dquot from the AIL.
904	 */
905	if ((lip->li_flags & XFS_LI_IN_AIL) &&
906	    lip->li_lsn == qip->qli_flush_lsn) {
907
908		/* xfs_trans_ail_delete() drops the AIL lock. */
909		spin_lock(&ailp->xa_lock);
910		if (lip->li_lsn == qip->qli_flush_lsn)
911			xfs_trans_ail_delete(ailp, lip, SHUTDOWN_CORRUPT_INCORE);
912		else
913			spin_unlock(&ailp->xa_lock);
914	}
915
916	/*
917	 * Release the dq's flush lock since we're done with it.
918	 */
919	xfs_dqfunlock(dqp);
920}
921
922/*
923 * Write a modified dquot to disk.
924 * The dquot must be locked and the flush lock too taken by caller.
925 * The flush lock will not be unlocked until the dquot reaches the disk,
926 * but the dquot is free to be unlocked and modified by the caller
927 * in the interim. Dquot is still locked on return. This behavior is
928 * identical to that of inodes.
929 */
930int
931xfs_qm_dqflush(
932	struct xfs_dquot	*dqp,
933	struct xfs_buf		**bpp)
934{
935	struct xfs_mount	*mp = dqp->q_mount;
936	struct xfs_buf		*bp;
937	struct xfs_disk_dquot	*ddqp;
938	int			error;
939
940	ASSERT(XFS_DQ_IS_LOCKED(dqp));
941	ASSERT(!completion_done(&dqp->q_flush));
942
943	trace_xfs_dqflush(dqp);
944
945	*bpp = NULL;
946
947	xfs_qm_dqunpin_wait(dqp);
948
949	/*
950	 * This may have been unpinned because the filesystem is shutting
951	 * down forcibly. If that's the case we must not write this dquot
952	 * to disk, because the log record didn't make it to disk.
953	 *
954	 * We also have to remove the log item from the AIL in this case,
955	 * as we wait for an emptry AIL as part of the unmount process.
956	 */
957	if (XFS_FORCED_SHUTDOWN(mp)) {
958		struct xfs_log_item	*lip = &dqp->q_logitem.qli_item;
959		dqp->dq_flags &= ~XFS_DQ_DIRTY;
960
961		spin_lock(&mp->m_ail->xa_lock);
962		if (lip->li_flags & XFS_LI_IN_AIL)
963			xfs_trans_ail_delete(mp->m_ail, lip,
964					     SHUTDOWN_CORRUPT_INCORE);
965		else
966			spin_unlock(&mp->m_ail->xa_lock);
967		error = -EIO;
968		goto out_unlock;
969	}
970
971	/*
972	 * Get the buffer containing the on-disk dquot
973	 */
974	error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp, dqp->q_blkno,
975				   mp->m_quotainfo->qi_dqchunklen, 0, &bp,
976				   &xfs_dquot_buf_ops);
977	if (error)
978		goto out_unlock;
979
980	/*
981	 * Calculate the location of the dquot inside the buffer.
982	 */
983	ddqp = bp->b_addr + dqp->q_bufoffset;
984
985	/*
986	 * A simple sanity check in case we got a corrupted dquot..
987	 */
988	error = xfs_dqcheck(mp, &dqp->q_core, be32_to_cpu(ddqp->d_id), 0,
989			   XFS_QMOPT_DOWARN, "dqflush (incore copy)");
990	if (error) {
991		xfs_buf_relse(bp);
992		xfs_dqfunlock(dqp);
993		xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
994		return -EIO;
995	}
996
997	/* This is the only portion of data that needs to persist */
998	memcpy(ddqp, &dqp->q_core, sizeof(xfs_disk_dquot_t));
999
1000	/*
1001	 * Clear the dirty field and remember the flush lsn for later use.
1002	 */
1003	dqp->dq_flags &= ~XFS_DQ_DIRTY;
1004
1005	xfs_trans_ail_copy_lsn(mp->m_ail, &dqp->q_logitem.qli_flush_lsn,
1006					&dqp->q_logitem.qli_item.li_lsn);
1007
1008	/*
1009	 * copy the lsn into the on-disk dquot now while we have the in memory
1010	 * dquot here. This can't be done later in the write verifier as we
1011	 * can't get access to the log item at that point in time.
1012	 *
1013	 * We also calculate the CRC here so that the on-disk dquot in the
1014	 * buffer always has a valid CRC. This ensures there is no possibility
1015	 * of a dquot without an up-to-date CRC getting to disk.
1016	 */
1017	if (xfs_sb_version_hascrc(&mp->m_sb)) {
1018		struct xfs_dqblk *dqb = (struct xfs_dqblk *)ddqp;
1019
1020		dqb->dd_lsn = cpu_to_be64(dqp->q_logitem.qli_item.li_lsn);
1021		xfs_update_cksum((char *)dqb, sizeof(struct xfs_dqblk),
1022				 XFS_DQUOT_CRC_OFF);
1023	}
1024
1025	/*
1026	 * Attach an iodone routine so that we can remove this dquot from the
1027	 * AIL and release the flush lock once the dquot is synced to disk.
1028	 */
1029	xfs_buf_attach_iodone(bp, xfs_qm_dqflush_done,
1030				  &dqp->q_logitem.qli_item);
1031
1032	/*
1033	 * If the buffer is pinned then push on the log so we won't
1034	 * get stuck waiting in the write for too long.
1035	 */
1036	if (xfs_buf_ispinned(bp)) {
1037		trace_xfs_dqflush_force(dqp);
1038		xfs_log_force(mp, 0);
1039	}
1040
1041	trace_xfs_dqflush_done(dqp);
1042	*bpp = bp;
1043	return 0;
1044
1045out_unlock:
1046	xfs_dqfunlock(dqp);
1047	return -EIO;
1048}
1049
1050/*
1051 * Lock two xfs_dquot structures.
1052 *
1053 * To avoid deadlocks we always lock the quota structure with
1054 * the lowerd id first.
1055 */
1056void
1057xfs_dqlock2(
1058	xfs_dquot_t	*d1,
1059	xfs_dquot_t	*d2)
1060{
1061	if (d1 && d2) {
1062		ASSERT(d1 != d2);
1063		if (be32_to_cpu(d1->q_core.d_id) >
1064		    be32_to_cpu(d2->q_core.d_id)) {
1065			mutex_lock(&d2->q_qlock);
1066			mutex_lock_nested(&d1->q_qlock, XFS_QLOCK_NESTED);
1067		} else {
1068			mutex_lock(&d1->q_qlock);
1069			mutex_lock_nested(&d2->q_qlock, XFS_QLOCK_NESTED);
1070		}
1071	} else if (d1) {
1072		mutex_lock(&d1->q_qlock);
1073	} else if (d2) {
1074		mutex_lock(&d2->q_qlock);
1075	}
1076}
1077
1078int __init
1079xfs_qm_init(void)
1080{
1081	xfs_qm_dqzone =
1082		kmem_zone_init(sizeof(struct xfs_dquot), "xfs_dquot");
1083	if (!xfs_qm_dqzone)
1084		goto out;
1085
1086	xfs_qm_dqtrxzone =
1087		kmem_zone_init(sizeof(struct xfs_dquot_acct), "xfs_dqtrx");
1088	if (!xfs_qm_dqtrxzone)
1089		goto out_free_dqzone;
1090
1091	return 0;
1092
1093out_free_dqzone:
1094	kmem_zone_destroy(xfs_qm_dqzone);
1095out:
1096	return -ENOMEM;
1097}
1098
1099void
1100xfs_qm_exit(void)
1101{
1102	kmem_zone_destroy(xfs_qm_dqtrxzone);
1103	kmem_zone_destroy(xfs_qm_dqzone);
1104}
1105