1/*
2 * Copyright (c) 2000-2003,2005 Silicon Graphics, Inc.
3 * Copyright (C) 2010 Red Hat, Inc.
4 * All Rights Reserved.
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License as
8 * published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it would be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write the Free Software Foundation,
17 * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
18 */
19#include "xfs.h"
20#include "xfs_fs.h"
21#include "xfs_shared.h"
22#include "xfs_format.h"
23#include "xfs_log_format.h"
24#include "xfs_trans_resv.h"
25#include "xfs_mount.h"
26#include "xfs_inode.h"
27#include "xfs_extent_busy.h"
28#include "xfs_quota.h"
29#include "xfs_trans.h"
30#include "xfs_trans_priv.h"
31#include "xfs_log.h"
32#include "xfs_trace.h"
33#include "xfs_error.h"
34
35kmem_zone_t	*xfs_trans_zone;
36kmem_zone_t	*xfs_log_item_desc_zone;
37
38/*
39 * Initialize the precomputed transaction reservation values
40 * in the mount structure.
41 */
42void
43xfs_trans_init(
44	struct xfs_mount	*mp)
45{
46	xfs_trans_resv_calc(mp, M_RES(mp));
47}
48
49/*
50 * This routine is called to allocate a transaction structure.
51 * The type parameter indicates the type of the transaction.  These
52 * are enumerated in xfs_trans.h.
53 *
54 * Dynamically allocate the transaction structure from the transaction
55 * zone, initialize it, and return it to the caller.
56 */
57xfs_trans_t *
58xfs_trans_alloc(
59	xfs_mount_t	*mp,
60	uint		type)
61{
62	xfs_trans_t     *tp;
63
64	sb_start_intwrite(mp->m_super);
65	tp = _xfs_trans_alloc(mp, type, KM_SLEEP);
66	tp->t_flags |= XFS_TRANS_FREEZE_PROT;
67	return tp;
68}
69
70xfs_trans_t *
71_xfs_trans_alloc(
72	xfs_mount_t	*mp,
73	uint		type,
74	xfs_km_flags_t	memflags)
75{
76	xfs_trans_t	*tp;
77
78	WARN_ON(mp->m_super->s_writers.frozen == SB_FREEZE_COMPLETE);
79	atomic_inc(&mp->m_active_trans);
80
81	tp = kmem_zone_zalloc(xfs_trans_zone, memflags);
82	tp->t_magic = XFS_TRANS_HEADER_MAGIC;
83	tp->t_type = type;
84	tp->t_mountp = mp;
85	INIT_LIST_HEAD(&tp->t_items);
86	INIT_LIST_HEAD(&tp->t_busy);
87	return tp;
88}
89
90/*
91 * Free the transaction structure.  If there is more clean up
92 * to do when the structure is freed, add it here.
93 */
94STATIC void
95xfs_trans_free(
96	struct xfs_trans	*tp)
97{
98	xfs_extent_busy_sort(&tp->t_busy);
99	xfs_extent_busy_clear(tp->t_mountp, &tp->t_busy, false);
100
101	atomic_dec(&tp->t_mountp->m_active_trans);
102	if (tp->t_flags & XFS_TRANS_FREEZE_PROT)
103		sb_end_intwrite(tp->t_mountp->m_super);
104	xfs_trans_free_dqinfo(tp);
105	kmem_zone_free(xfs_trans_zone, tp);
106}
107
108/*
109 * This is called to create a new transaction which will share the
110 * permanent log reservation of the given transaction.  The remaining
111 * unused block and rt extent reservations are also inherited.  This
112 * implies that the original transaction is no longer allowed to allocate
113 * blocks.  Locks and log items, however, are no inherited.  They must
114 * be added to the new transaction explicitly.
115 */
116xfs_trans_t *
117xfs_trans_dup(
118	xfs_trans_t	*tp)
119{
120	xfs_trans_t	*ntp;
121
122	ntp = kmem_zone_zalloc(xfs_trans_zone, KM_SLEEP);
123
124	/*
125	 * Initialize the new transaction structure.
126	 */
127	ntp->t_magic = XFS_TRANS_HEADER_MAGIC;
128	ntp->t_type = tp->t_type;
129	ntp->t_mountp = tp->t_mountp;
130	INIT_LIST_HEAD(&ntp->t_items);
131	INIT_LIST_HEAD(&ntp->t_busy);
132
133	ASSERT(tp->t_flags & XFS_TRANS_PERM_LOG_RES);
134	ASSERT(tp->t_ticket != NULL);
135
136	ntp->t_flags = XFS_TRANS_PERM_LOG_RES |
137		       (tp->t_flags & XFS_TRANS_RESERVE) |
138		       (tp->t_flags & XFS_TRANS_FREEZE_PROT);
139	/* We gave our writer reference to the new transaction */
140	tp->t_flags &= ~XFS_TRANS_FREEZE_PROT;
141	ntp->t_ticket = xfs_log_ticket_get(tp->t_ticket);
142	ntp->t_blk_res = tp->t_blk_res - tp->t_blk_res_used;
143	tp->t_blk_res = tp->t_blk_res_used;
144	ntp->t_rtx_res = tp->t_rtx_res - tp->t_rtx_res_used;
145	tp->t_rtx_res = tp->t_rtx_res_used;
146	ntp->t_pflags = tp->t_pflags;
147
148	xfs_trans_dup_dqinfo(tp, ntp);
149
150	atomic_inc(&tp->t_mountp->m_active_trans);
151	return ntp;
152}
153
154/*
155 * This is called to reserve free disk blocks and log space for the
156 * given transaction.  This must be done before allocating any resources
157 * within the transaction.
158 *
159 * This will return ENOSPC if there are not enough blocks available.
160 * It will sleep waiting for available log space.
161 * The only valid value for the flags parameter is XFS_RES_LOG_PERM, which
162 * is used by long running transactions.  If any one of the reservations
163 * fails then they will all be backed out.
164 *
165 * This does not do quota reservations. That typically is done by the
166 * caller afterwards.
167 */
168int
169xfs_trans_reserve(
170	struct xfs_trans	*tp,
171	struct xfs_trans_res	*resp,
172	uint			blocks,
173	uint			rtextents)
174{
175	int		error = 0;
176	bool		rsvd = (tp->t_flags & XFS_TRANS_RESERVE) != 0;
177
178	/* Mark this thread as being in a transaction */
179	current_set_flags_nested(&tp->t_pflags, PF_FSTRANS);
180
181	/*
182	 * Attempt to reserve the needed disk blocks by decrementing
183	 * the number needed from the number available.  This will
184	 * fail if the count would go below zero.
185	 */
186	if (blocks > 0) {
187		error = xfs_mod_fdblocks(tp->t_mountp, -((int64_t)blocks), rsvd);
188		if (error != 0) {
189			current_restore_flags_nested(&tp->t_pflags, PF_FSTRANS);
190			return -ENOSPC;
191		}
192		tp->t_blk_res += blocks;
193	}
194
195	/*
196	 * Reserve the log space needed for this transaction.
197	 */
198	if (resp->tr_logres > 0) {
199		bool	permanent = false;
200
201		ASSERT(tp->t_log_res == 0 ||
202		       tp->t_log_res == resp->tr_logres);
203		ASSERT(tp->t_log_count == 0 ||
204		       tp->t_log_count == resp->tr_logcount);
205
206		if (resp->tr_logflags & XFS_TRANS_PERM_LOG_RES) {
207			tp->t_flags |= XFS_TRANS_PERM_LOG_RES;
208			permanent = true;
209		} else {
210			ASSERT(tp->t_ticket == NULL);
211			ASSERT(!(tp->t_flags & XFS_TRANS_PERM_LOG_RES));
212		}
213
214		if (tp->t_ticket != NULL) {
215			ASSERT(resp->tr_logflags & XFS_TRANS_PERM_LOG_RES);
216			error = xfs_log_regrant(tp->t_mountp, tp->t_ticket);
217		} else {
218			error = xfs_log_reserve(tp->t_mountp,
219						resp->tr_logres,
220						resp->tr_logcount,
221						&tp->t_ticket, XFS_TRANSACTION,
222						permanent, tp->t_type);
223		}
224
225		if (error)
226			goto undo_blocks;
227
228		tp->t_log_res = resp->tr_logres;
229		tp->t_log_count = resp->tr_logcount;
230	}
231
232	/*
233	 * Attempt to reserve the needed realtime extents by decrementing
234	 * the number needed from the number available.  This will
235	 * fail if the count would go below zero.
236	 */
237	if (rtextents > 0) {
238		error = xfs_mod_frextents(tp->t_mountp, -((int64_t)rtextents));
239		if (error) {
240			error = -ENOSPC;
241			goto undo_log;
242		}
243		tp->t_rtx_res += rtextents;
244	}
245
246	return 0;
247
248	/*
249	 * Error cases jump to one of these labels to undo any
250	 * reservations which have already been performed.
251	 */
252undo_log:
253	if (resp->tr_logres > 0) {
254		int		log_flags;
255
256		if (resp->tr_logflags & XFS_TRANS_PERM_LOG_RES) {
257			log_flags = XFS_LOG_REL_PERM_RESERV;
258		} else {
259			log_flags = 0;
260		}
261		xfs_log_done(tp->t_mountp, tp->t_ticket, NULL, log_flags);
262		tp->t_ticket = NULL;
263		tp->t_log_res = 0;
264		tp->t_flags &= ~XFS_TRANS_PERM_LOG_RES;
265	}
266
267undo_blocks:
268	if (blocks > 0) {
269		xfs_mod_fdblocks(tp->t_mountp, -((int64_t)blocks), rsvd);
270		tp->t_blk_res = 0;
271	}
272
273	current_restore_flags_nested(&tp->t_pflags, PF_FSTRANS);
274
275	return error;
276}
277
278/*
279 * Record the indicated change to the given field for application
280 * to the file system's superblock when the transaction commits.
281 * For now, just store the change in the transaction structure.
282 *
283 * Mark the transaction structure to indicate that the superblock
284 * needs to be updated before committing.
285 *
286 * Because we may not be keeping track of allocated/free inodes and
287 * used filesystem blocks in the superblock, we do not mark the
288 * superblock dirty in this transaction if we modify these fields.
289 * We still need to update the transaction deltas so that they get
290 * applied to the incore superblock, but we don't want them to
291 * cause the superblock to get locked and logged if these are the
292 * only fields in the superblock that the transaction modifies.
293 */
294void
295xfs_trans_mod_sb(
296	xfs_trans_t	*tp,
297	uint		field,
298	int64_t		delta)
299{
300	uint32_t	flags = (XFS_TRANS_DIRTY|XFS_TRANS_SB_DIRTY);
301	xfs_mount_t	*mp = tp->t_mountp;
302
303	switch (field) {
304	case XFS_TRANS_SB_ICOUNT:
305		tp->t_icount_delta += delta;
306		if (xfs_sb_version_haslazysbcount(&mp->m_sb))
307			flags &= ~XFS_TRANS_SB_DIRTY;
308		break;
309	case XFS_TRANS_SB_IFREE:
310		tp->t_ifree_delta += delta;
311		if (xfs_sb_version_haslazysbcount(&mp->m_sb))
312			flags &= ~XFS_TRANS_SB_DIRTY;
313		break;
314	case XFS_TRANS_SB_FDBLOCKS:
315		/*
316		 * Track the number of blocks allocated in the
317		 * transaction.  Make sure it does not exceed the
318		 * number reserved.
319		 */
320		if (delta < 0) {
321			tp->t_blk_res_used += (uint)-delta;
322			ASSERT(tp->t_blk_res_used <= tp->t_blk_res);
323		}
324		tp->t_fdblocks_delta += delta;
325		if (xfs_sb_version_haslazysbcount(&mp->m_sb))
326			flags &= ~XFS_TRANS_SB_DIRTY;
327		break;
328	case XFS_TRANS_SB_RES_FDBLOCKS:
329		/*
330		 * The allocation has already been applied to the
331		 * in-core superblock's counter.  This should only
332		 * be applied to the on-disk superblock.
333		 */
334		ASSERT(delta < 0);
335		tp->t_res_fdblocks_delta += delta;
336		if (xfs_sb_version_haslazysbcount(&mp->m_sb))
337			flags &= ~XFS_TRANS_SB_DIRTY;
338		break;
339	case XFS_TRANS_SB_FREXTENTS:
340		/*
341		 * Track the number of blocks allocated in the
342		 * transaction.  Make sure it does not exceed the
343		 * number reserved.
344		 */
345		if (delta < 0) {
346			tp->t_rtx_res_used += (uint)-delta;
347			ASSERT(tp->t_rtx_res_used <= tp->t_rtx_res);
348		}
349		tp->t_frextents_delta += delta;
350		break;
351	case XFS_TRANS_SB_RES_FREXTENTS:
352		/*
353		 * The allocation has already been applied to the
354		 * in-core superblock's counter.  This should only
355		 * be applied to the on-disk superblock.
356		 */
357		ASSERT(delta < 0);
358		tp->t_res_frextents_delta += delta;
359		break;
360	case XFS_TRANS_SB_DBLOCKS:
361		ASSERT(delta > 0);
362		tp->t_dblocks_delta += delta;
363		break;
364	case XFS_TRANS_SB_AGCOUNT:
365		ASSERT(delta > 0);
366		tp->t_agcount_delta += delta;
367		break;
368	case XFS_TRANS_SB_IMAXPCT:
369		tp->t_imaxpct_delta += delta;
370		break;
371	case XFS_TRANS_SB_REXTSIZE:
372		tp->t_rextsize_delta += delta;
373		break;
374	case XFS_TRANS_SB_RBMBLOCKS:
375		tp->t_rbmblocks_delta += delta;
376		break;
377	case XFS_TRANS_SB_RBLOCKS:
378		tp->t_rblocks_delta += delta;
379		break;
380	case XFS_TRANS_SB_REXTENTS:
381		tp->t_rextents_delta += delta;
382		break;
383	case XFS_TRANS_SB_REXTSLOG:
384		tp->t_rextslog_delta += delta;
385		break;
386	default:
387		ASSERT(0);
388		return;
389	}
390
391	tp->t_flags |= flags;
392}
393
394/*
395 * xfs_trans_apply_sb_deltas() is called from the commit code
396 * to bring the superblock buffer into the current transaction
397 * and modify it as requested by earlier calls to xfs_trans_mod_sb().
398 *
399 * For now we just look at each field allowed to change and change
400 * it if necessary.
401 */
402STATIC void
403xfs_trans_apply_sb_deltas(
404	xfs_trans_t	*tp)
405{
406	xfs_dsb_t	*sbp;
407	xfs_buf_t	*bp;
408	int		whole = 0;
409
410	bp = xfs_trans_getsb(tp, tp->t_mountp, 0);
411	sbp = XFS_BUF_TO_SBP(bp);
412
413	/*
414	 * Check that superblock mods match the mods made to AGF counters.
415	 */
416	ASSERT((tp->t_fdblocks_delta + tp->t_res_fdblocks_delta) ==
417	       (tp->t_ag_freeblks_delta + tp->t_ag_flist_delta +
418		tp->t_ag_btree_delta));
419
420	/*
421	 * Only update the superblock counters if we are logging them
422	 */
423	if (!xfs_sb_version_haslazysbcount(&(tp->t_mountp->m_sb))) {
424		if (tp->t_icount_delta)
425			be64_add_cpu(&sbp->sb_icount, tp->t_icount_delta);
426		if (tp->t_ifree_delta)
427			be64_add_cpu(&sbp->sb_ifree, tp->t_ifree_delta);
428		if (tp->t_fdblocks_delta)
429			be64_add_cpu(&sbp->sb_fdblocks, tp->t_fdblocks_delta);
430		if (tp->t_res_fdblocks_delta)
431			be64_add_cpu(&sbp->sb_fdblocks, tp->t_res_fdblocks_delta);
432	}
433
434	if (tp->t_frextents_delta)
435		be64_add_cpu(&sbp->sb_frextents, tp->t_frextents_delta);
436	if (tp->t_res_frextents_delta)
437		be64_add_cpu(&sbp->sb_frextents, tp->t_res_frextents_delta);
438
439	if (tp->t_dblocks_delta) {
440		be64_add_cpu(&sbp->sb_dblocks, tp->t_dblocks_delta);
441		whole = 1;
442	}
443	if (tp->t_agcount_delta) {
444		be32_add_cpu(&sbp->sb_agcount, tp->t_agcount_delta);
445		whole = 1;
446	}
447	if (tp->t_imaxpct_delta) {
448		sbp->sb_imax_pct += tp->t_imaxpct_delta;
449		whole = 1;
450	}
451	if (tp->t_rextsize_delta) {
452		be32_add_cpu(&sbp->sb_rextsize, tp->t_rextsize_delta);
453		whole = 1;
454	}
455	if (tp->t_rbmblocks_delta) {
456		be32_add_cpu(&sbp->sb_rbmblocks, tp->t_rbmblocks_delta);
457		whole = 1;
458	}
459	if (tp->t_rblocks_delta) {
460		be64_add_cpu(&sbp->sb_rblocks, tp->t_rblocks_delta);
461		whole = 1;
462	}
463	if (tp->t_rextents_delta) {
464		be64_add_cpu(&sbp->sb_rextents, tp->t_rextents_delta);
465		whole = 1;
466	}
467	if (tp->t_rextslog_delta) {
468		sbp->sb_rextslog += tp->t_rextslog_delta;
469		whole = 1;
470	}
471
472	xfs_trans_buf_set_type(tp, bp, XFS_BLFT_SB_BUF);
473	if (whole)
474		/*
475		 * Log the whole thing, the fields are noncontiguous.
476		 */
477		xfs_trans_log_buf(tp, bp, 0, sizeof(xfs_dsb_t) - 1);
478	else
479		/*
480		 * Since all the modifiable fields are contiguous, we
481		 * can get away with this.
482		 */
483		xfs_trans_log_buf(tp, bp, offsetof(xfs_dsb_t, sb_icount),
484				  offsetof(xfs_dsb_t, sb_frextents) +
485				  sizeof(sbp->sb_frextents) - 1);
486}
487
488STATIC int
489xfs_sb_mod8(
490	uint8_t			*field,
491	int8_t			delta)
492{
493	int8_t			counter = *field;
494
495	counter += delta;
496	if (counter < 0) {
497		ASSERT(0);
498		return -EINVAL;
499	}
500	*field = counter;
501	return 0;
502}
503
504STATIC int
505xfs_sb_mod32(
506	uint32_t		*field,
507	int32_t			delta)
508{
509	int32_t			counter = *field;
510
511	counter += delta;
512	if (counter < 0) {
513		ASSERT(0);
514		return -EINVAL;
515	}
516	*field = counter;
517	return 0;
518}
519
520STATIC int
521xfs_sb_mod64(
522	uint64_t		*field,
523	int64_t			delta)
524{
525	int64_t			counter = *field;
526
527	counter += delta;
528	if (counter < 0) {
529		ASSERT(0);
530		return -EINVAL;
531	}
532	*field = counter;
533	return 0;
534}
535
536/*
537 * xfs_trans_unreserve_and_mod_sb() is called to release unused reservations
538 * and apply superblock counter changes to the in-core superblock.  The
539 * t_res_fdblocks_delta and t_res_frextents_delta fields are explicitly NOT
540 * applied to the in-core superblock.  The idea is that that has already been
541 * done.
542 *
543 * If we are not logging superblock counters, then the inode allocated/free and
544 * used block counts are not updated in the on disk superblock. In this case,
545 * XFS_TRANS_SB_DIRTY will not be set when the transaction is updated but we
546 * still need to update the incore superblock with the changes.
547 */
548void
549xfs_trans_unreserve_and_mod_sb(
550	struct xfs_trans	*tp)
551{
552	struct xfs_mount	*mp = tp->t_mountp;
553	bool			rsvd = (tp->t_flags & XFS_TRANS_RESERVE) != 0;
554	int64_t			blkdelta = 0;
555	int64_t			rtxdelta = 0;
556	int64_t			idelta = 0;
557	int64_t			ifreedelta = 0;
558	int			error;
559
560	/* calculate deltas */
561	if (tp->t_blk_res > 0)
562		blkdelta = tp->t_blk_res;
563	if ((tp->t_fdblocks_delta != 0) &&
564	    (xfs_sb_version_haslazysbcount(&mp->m_sb) ||
565	     (tp->t_flags & XFS_TRANS_SB_DIRTY)))
566	        blkdelta += tp->t_fdblocks_delta;
567
568	if (tp->t_rtx_res > 0)
569		rtxdelta = tp->t_rtx_res;
570	if ((tp->t_frextents_delta != 0) &&
571	    (tp->t_flags & XFS_TRANS_SB_DIRTY))
572		rtxdelta += tp->t_frextents_delta;
573
574	if (xfs_sb_version_haslazysbcount(&mp->m_sb) ||
575	     (tp->t_flags & XFS_TRANS_SB_DIRTY)) {
576		idelta = tp->t_icount_delta;
577		ifreedelta = tp->t_ifree_delta;
578	}
579
580	/* apply the per-cpu counters */
581	if (blkdelta) {
582		error = xfs_mod_fdblocks(mp, blkdelta, rsvd);
583		if (error)
584			goto out;
585	}
586
587	if (idelta) {
588		error = xfs_mod_icount(mp, idelta);
589		if (error)
590			goto out_undo_fdblocks;
591	}
592
593	if (ifreedelta) {
594		error = xfs_mod_ifree(mp, ifreedelta);
595		if (error)
596			goto out_undo_icount;
597	}
598
599	if (rtxdelta == 0 && !(tp->t_flags & XFS_TRANS_SB_DIRTY))
600		return;
601
602	/* apply remaining deltas */
603	spin_lock(&mp->m_sb_lock);
604	if (rtxdelta) {
605		error = xfs_sb_mod64(&mp->m_sb.sb_frextents, rtxdelta);
606		if (error)
607			goto out_undo_ifree;
608	}
609
610	if (tp->t_dblocks_delta != 0) {
611		error = xfs_sb_mod64(&mp->m_sb.sb_dblocks, tp->t_dblocks_delta);
612		if (error)
613			goto out_undo_frextents;
614	}
615	if (tp->t_agcount_delta != 0) {
616		error = xfs_sb_mod32(&mp->m_sb.sb_agcount, tp->t_agcount_delta);
617		if (error)
618			goto out_undo_dblocks;
619	}
620	if (tp->t_imaxpct_delta != 0) {
621		error = xfs_sb_mod8(&mp->m_sb.sb_imax_pct, tp->t_imaxpct_delta);
622		if (error)
623			goto out_undo_agcount;
624	}
625	if (tp->t_rextsize_delta != 0) {
626		error = xfs_sb_mod32(&mp->m_sb.sb_rextsize,
627				     tp->t_rextsize_delta);
628		if (error)
629			goto out_undo_imaxpct;
630	}
631	if (tp->t_rbmblocks_delta != 0) {
632		error = xfs_sb_mod32(&mp->m_sb.sb_rbmblocks,
633				     tp->t_rbmblocks_delta);
634		if (error)
635			goto out_undo_rextsize;
636	}
637	if (tp->t_rblocks_delta != 0) {
638		error = xfs_sb_mod64(&mp->m_sb.sb_rblocks, tp->t_rblocks_delta);
639		if (error)
640			goto out_undo_rbmblocks;
641	}
642	if (tp->t_rextents_delta != 0) {
643		error = xfs_sb_mod64(&mp->m_sb.sb_rextents,
644				     tp->t_rextents_delta);
645		if (error)
646			goto out_undo_rblocks;
647	}
648	if (tp->t_rextslog_delta != 0) {
649		error = xfs_sb_mod8(&mp->m_sb.sb_rextslog,
650				     tp->t_rextslog_delta);
651		if (error)
652			goto out_undo_rextents;
653	}
654	spin_unlock(&mp->m_sb_lock);
655	return;
656
657out_undo_rextents:
658	if (tp->t_rextents_delta)
659		xfs_sb_mod64(&mp->m_sb.sb_rextents, -tp->t_rextents_delta);
660out_undo_rblocks:
661	if (tp->t_rblocks_delta)
662		xfs_sb_mod64(&mp->m_sb.sb_rblocks, -tp->t_rblocks_delta);
663out_undo_rbmblocks:
664	if (tp->t_rbmblocks_delta)
665		xfs_sb_mod32(&mp->m_sb.sb_rbmblocks, -tp->t_rbmblocks_delta);
666out_undo_rextsize:
667	if (tp->t_rextsize_delta)
668		xfs_sb_mod32(&mp->m_sb.sb_rextsize, -tp->t_rextsize_delta);
669out_undo_imaxpct:
670	if (tp->t_rextsize_delta)
671		xfs_sb_mod8(&mp->m_sb.sb_imax_pct, -tp->t_imaxpct_delta);
672out_undo_agcount:
673	if (tp->t_agcount_delta)
674		xfs_sb_mod32(&mp->m_sb.sb_agcount, -tp->t_agcount_delta);
675out_undo_dblocks:
676	if (tp->t_dblocks_delta)
677		xfs_sb_mod64(&mp->m_sb.sb_dblocks, -tp->t_dblocks_delta);
678out_undo_frextents:
679	if (rtxdelta)
680		xfs_sb_mod64(&mp->m_sb.sb_frextents, -rtxdelta);
681out_undo_ifree:
682	spin_unlock(&mp->m_sb_lock);
683	if (ifreedelta)
684		xfs_mod_ifree(mp, -ifreedelta);
685out_undo_icount:
686	if (idelta)
687		xfs_mod_icount(mp, -idelta);
688out_undo_fdblocks:
689	if (blkdelta)
690		xfs_mod_fdblocks(mp, -blkdelta, rsvd);
691out:
692	ASSERT(error == 0);
693	return;
694}
695
696/*
697 * Add the given log item to the transaction's list of log items.
698 *
699 * The log item will now point to its new descriptor with its li_desc field.
700 */
701void
702xfs_trans_add_item(
703	struct xfs_trans	*tp,
704	struct xfs_log_item	*lip)
705{
706	struct xfs_log_item_desc *lidp;
707
708	ASSERT(lip->li_mountp == tp->t_mountp);
709	ASSERT(lip->li_ailp == tp->t_mountp->m_ail);
710
711	lidp = kmem_zone_zalloc(xfs_log_item_desc_zone, KM_SLEEP | KM_NOFS);
712
713	lidp->lid_item = lip;
714	lidp->lid_flags = 0;
715	list_add_tail(&lidp->lid_trans, &tp->t_items);
716
717	lip->li_desc = lidp;
718}
719
720STATIC void
721xfs_trans_free_item_desc(
722	struct xfs_log_item_desc *lidp)
723{
724	list_del_init(&lidp->lid_trans);
725	kmem_zone_free(xfs_log_item_desc_zone, lidp);
726}
727
728/*
729 * Unlink and free the given descriptor.
730 */
731void
732xfs_trans_del_item(
733	struct xfs_log_item	*lip)
734{
735	xfs_trans_free_item_desc(lip->li_desc);
736	lip->li_desc = NULL;
737}
738
739/*
740 * Unlock all of the items of a transaction and free all the descriptors
741 * of that transaction.
742 */
743void
744xfs_trans_free_items(
745	struct xfs_trans	*tp,
746	xfs_lsn_t		commit_lsn,
747	int			flags)
748{
749	struct xfs_log_item_desc *lidp, *next;
750
751	list_for_each_entry_safe(lidp, next, &tp->t_items, lid_trans) {
752		struct xfs_log_item	*lip = lidp->lid_item;
753
754		lip->li_desc = NULL;
755
756		if (commit_lsn != NULLCOMMITLSN)
757			lip->li_ops->iop_committing(lip, commit_lsn);
758		if (flags & XFS_TRANS_ABORT)
759			lip->li_flags |= XFS_LI_ABORTED;
760		lip->li_ops->iop_unlock(lip);
761
762		xfs_trans_free_item_desc(lidp);
763	}
764}
765
766static inline void
767xfs_log_item_batch_insert(
768	struct xfs_ail		*ailp,
769	struct xfs_ail_cursor	*cur,
770	struct xfs_log_item	**log_items,
771	int			nr_items,
772	xfs_lsn_t		commit_lsn)
773{
774	int	i;
775
776	spin_lock(&ailp->xa_lock);
777	/* xfs_trans_ail_update_bulk drops ailp->xa_lock */
778	xfs_trans_ail_update_bulk(ailp, cur, log_items, nr_items, commit_lsn);
779
780	for (i = 0; i < nr_items; i++) {
781		struct xfs_log_item *lip = log_items[i];
782
783		lip->li_ops->iop_unpin(lip, 0);
784	}
785}
786
787/*
788 * Bulk operation version of xfs_trans_committed that takes a log vector of
789 * items to insert into the AIL. This uses bulk AIL insertion techniques to
790 * minimise lock traffic.
791 *
792 * If we are called with the aborted flag set, it is because a log write during
793 * a CIL checkpoint commit has failed. In this case, all the items in the
794 * checkpoint have already gone through iop_commited and iop_unlock, which
795 * means that checkpoint commit abort handling is treated exactly the same
796 * as an iclog write error even though we haven't started any IO yet. Hence in
797 * this case all we need to do is iop_committed processing, followed by an
798 * iop_unpin(aborted) call.
799 *
800 * The AIL cursor is used to optimise the insert process. If commit_lsn is not
801 * at the end of the AIL, the insert cursor avoids the need to walk
802 * the AIL to find the insertion point on every xfs_log_item_batch_insert()
803 * call. This saves a lot of needless list walking and is a net win, even
804 * though it slightly increases that amount of AIL lock traffic to set it up
805 * and tear it down.
806 */
807void
808xfs_trans_committed_bulk(
809	struct xfs_ail		*ailp,
810	struct xfs_log_vec	*log_vector,
811	xfs_lsn_t		commit_lsn,
812	int			aborted)
813{
814#define LOG_ITEM_BATCH_SIZE	32
815	struct xfs_log_item	*log_items[LOG_ITEM_BATCH_SIZE];
816	struct xfs_log_vec	*lv;
817	struct xfs_ail_cursor	cur;
818	int			i = 0;
819
820	spin_lock(&ailp->xa_lock);
821	xfs_trans_ail_cursor_last(ailp, &cur, commit_lsn);
822	spin_unlock(&ailp->xa_lock);
823
824	/* unpin all the log items */
825	for (lv = log_vector; lv; lv = lv->lv_next ) {
826		struct xfs_log_item	*lip = lv->lv_item;
827		xfs_lsn_t		item_lsn;
828
829		if (aborted)
830			lip->li_flags |= XFS_LI_ABORTED;
831		item_lsn = lip->li_ops->iop_committed(lip, commit_lsn);
832
833		/* item_lsn of -1 means the item needs no further processing */
834		if (XFS_LSN_CMP(item_lsn, (xfs_lsn_t)-1) == 0)
835			continue;
836
837		/*
838		 * if we are aborting the operation, no point in inserting the
839		 * object into the AIL as we are in a shutdown situation.
840		 */
841		if (aborted) {
842			ASSERT(XFS_FORCED_SHUTDOWN(ailp->xa_mount));
843			lip->li_ops->iop_unpin(lip, 1);
844			continue;
845		}
846
847		if (item_lsn != commit_lsn) {
848
849			/*
850			 * Not a bulk update option due to unusual item_lsn.
851			 * Push into AIL immediately, rechecking the lsn once
852			 * we have the ail lock. Then unpin the item. This does
853			 * not affect the AIL cursor the bulk insert path is
854			 * using.
855			 */
856			spin_lock(&ailp->xa_lock);
857			if (XFS_LSN_CMP(item_lsn, lip->li_lsn) > 0)
858				xfs_trans_ail_update(ailp, lip, item_lsn);
859			else
860				spin_unlock(&ailp->xa_lock);
861			lip->li_ops->iop_unpin(lip, 0);
862			continue;
863		}
864
865		/* Item is a candidate for bulk AIL insert.  */
866		log_items[i++] = lv->lv_item;
867		if (i >= LOG_ITEM_BATCH_SIZE) {
868			xfs_log_item_batch_insert(ailp, &cur, log_items,
869					LOG_ITEM_BATCH_SIZE, commit_lsn);
870			i = 0;
871		}
872	}
873
874	/* make sure we insert the remainder! */
875	if (i)
876		xfs_log_item_batch_insert(ailp, &cur, log_items, i, commit_lsn);
877
878	spin_lock(&ailp->xa_lock);
879	xfs_trans_ail_cursor_done(&cur);
880	spin_unlock(&ailp->xa_lock);
881}
882
883/*
884 * Commit the given transaction to the log.
885 *
886 * XFS disk error handling mechanism is not based on a typical
887 * transaction abort mechanism. Logically after the filesystem
888 * gets marked 'SHUTDOWN', we can't let any new transactions
889 * be durable - ie. committed to disk - because some metadata might
890 * be inconsistent. In such cases, this returns an error, and the
891 * caller may assume that all locked objects joined to the transaction
892 * have already been unlocked as if the commit had succeeded.
893 * Do not reference the transaction structure after this call.
894 */
895int
896xfs_trans_commit(
897	struct xfs_trans	*tp,
898	uint			flags)
899{
900	struct xfs_mount	*mp = tp->t_mountp;
901	xfs_lsn_t		commit_lsn = -1;
902	int			error = 0;
903	int			log_flags = 0;
904	int			sync = tp->t_flags & XFS_TRANS_SYNC;
905
906	/*
907	 * Determine whether this commit is releasing a permanent
908	 * log reservation or not.
909	 */
910	if (flags & XFS_TRANS_RELEASE_LOG_RES) {
911		ASSERT(tp->t_flags & XFS_TRANS_PERM_LOG_RES);
912		log_flags = XFS_LOG_REL_PERM_RESERV;
913	}
914
915	/*
916	 * If there is nothing to be logged by the transaction,
917	 * then unlock all of the items associated with the
918	 * transaction and free the transaction structure.
919	 * Also make sure to return any reserved blocks to
920	 * the free pool.
921	 */
922	if (!(tp->t_flags & XFS_TRANS_DIRTY))
923		goto out_unreserve;
924
925	if (XFS_FORCED_SHUTDOWN(mp)) {
926		error = -EIO;
927		goto out_unreserve;
928	}
929
930	ASSERT(tp->t_ticket != NULL);
931
932	/*
933	 * If we need to update the superblock, then do it now.
934	 */
935	if (tp->t_flags & XFS_TRANS_SB_DIRTY)
936		xfs_trans_apply_sb_deltas(tp);
937	xfs_trans_apply_dquot_deltas(tp);
938
939	xfs_log_commit_cil(mp, tp, &commit_lsn, flags);
940
941	current_restore_flags_nested(&tp->t_pflags, PF_FSTRANS);
942	xfs_trans_free(tp);
943
944	/*
945	 * If the transaction needs to be synchronous, then force the
946	 * log out now and wait for it.
947	 */
948	if (sync) {
949		error = _xfs_log_force_lsn(mp, commit_lsn, XFS_LOG_SYNC, NULL);
950		XFS_STATS_INC(xs_trans_sync);
951	} else {
952		XFS_STATS_INC(xs_trans_async);
953	}
954
955	return error;
956
957out_unreserve:
958	xfs_trans_unreserve_and_mod_sb(tp);
959
960	/*
961	 * It is indeed possible for the transaction to be not dirty but
962	 * the dqinfo portion to be.  All that means is that we have some
963	 * (non-persistent) quota reservations that need to be unreserved.
964	 */
965	xfs_trans_unreserve_and_mod_dquots(tp);
966	if (tp->t_ticket) {
967		commit_lsn = xfs_log_done(mp, tp->t_ticket, NULL, log_flags);
968		if (commit_lsn == -1 && !error)
969			error = -EIO;
970	}
971	current_restore_flags_nested(&tp->t_pflags, PF_FSTRANS);
972	xfs_trans_free_items(tp, NULLCOMMITLSN, error ? XFS_TRANS_ABORT : 0);
973	xfs_trans_free(tp);
974
975	XFS_STATS_INC(xs_trans_empty);
976	return error;
977}
978
979/*
980 * Unlock all of the transaction's items and free the transaction.
981 * The transaction must not have modified any of its items, because
982 * there is no way to restore them to their previous state.
983 *
984 * If the transaction has made a log reservation, make sure to release
985 * it as well.
986 */
987void
988xfs_trans_cancel(
989	xfs_trans_t		*tp,
990	int			flags)
991{
992	int			log_flags;
993	xfs_mount_t		*mp = tp->t_mountp;
994
995	/*
996	 * See if the caller is being too lazy to figure out if
997	 * the transaction really needs an abort.
998	 */
999	if ((flags & XFS_TRANS_ABORT) && !(tp->t_flags & XFS_TRANS_DIRTY))
1000		flags &= ~XFS_TRANS_ABORT;
1001	/*
1002	 * See if the caller is relying on us to shut down the
1003	 * filesystem.  This happens in paths where we detect
1004	 * corruption and decide to give up.
1005	 */
1006	if ((tp->t_flags & XFS_TRANS_DIRTY) && !XFS_FORCED_SHUTDOWN(mp)) {
1007		XFS_ERROR_REPORT("xfs_trans_cancel", XFS_ERRLEVEL_LOW, mp);
1008		xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
1009	}
1010#ifdef DEBUG
1011	if (!(flags & XFS_TRANS_ABORT) && !XFS_FORCED_SHUTDOWN(mp)) {
1012		struct xfs_log_item_desc *lidp;
1013
1014		list_for_each_entry(lidp, &tp->t_items, lid_trans)
1015			ASSERT(!(lidp->lid_item->li_type == XFS_LI_EFD));
1016	}
1017#endif
1018	xfs_trans_unreserve_and_mod_sb(tp);
1019	xfs_trans_unreserve_and_mod_dquots(tp);
1020
1021	if (tp->t_ticket) {
1022		if (flags & XFS_TRANS_RELEASE_LOG_RES) {
1023			ASSERT(tp->t_flags & XFS_TRANS_PERM_LOG_RES);
1024			log_flags = XFS_LOG_REL_PERM_RESERV;
1025		} else {
1026			log_flags = 0;
1027		}
1028		xfs_log_done(mp, tp->t_ticket, NULL, log_flags);
1029	}
1030
1031	/* mark this thread as no longer being in a transaction */
1032	current_restore_flags_nested(&tp->t_pflags, PF_FSTRANS);
1033
1034	xfs_trans_free_items(tp, NULLCOMMITLSN, flags);
1035	xfs_trans_free(tp);
1036}
1037
1038/*
1039 * Roll from one trans in the sequence of PERMANENT transactions to
1040 * the next: permanent transactions are only flushed out when
1041 * committed with XFS_TRANS_RELEASE_LOG_RES, but we still want as soon
1042 * as possible to let chunks of it go to the log. So we commit the
1043 * chunk we've been working on and get a new transaction to continue.
1044 */
1045int
1046xfs_trans_roll(
1047	struct xfs_trans	**tpp,
1048	struct xfs_inode	*dp)
1049{
1050	struct xfs_trans	*trans;
1051	struct xfs_trans_res	tres;
1052	int			error;
1053
1054	/*
1055	 * Ensure that the inode is always logged.
1056	 */
1057	trans = *tpp;
1058	xfs_trans_log_inode(trans, dp, XFS_ILOG_CORE);
1059
1060	/*
1061	 * Copy the critical parameters from one trans to the next.
1062	 */
1063	tres.tr_logres = trans->t_log_res;
1064	tres.tr_logcount = trans->t_log_count;
1065	*tpp = xfs_trans_dup(trans);
1066
1067	/*
1068	 * Commit the current transaction.
1069	 * If this commit failed, then it'd just unlock those items that
1070	 * are not marked ihold. That also means that a filesystem shutdown
1071	 * is in progress. The caller takes the responsibility to cancel
1072	 * the duplicate transaction that gets returned.
1073	 */
1074	error = xfs_trans_commit(trans, 0);
1075	if (error)
1076		return error;
1077
1078	trans = *tpp;
1079
1080	/*
1081	 * transaction commit worked ok so we can drop the extra ticket
1082	 * reference that we gained in xfs_trans_dup()
1083	 */
1084	xfs_log_ticket_put(trans->t_ticket);
1085
1086
1087	/*
1088	 * Reserve space in the log for th next transaction.
1089	 * This also pushes items in the "AIL", the list of logged items,
1090	 * out to disk if they are taking up space at the tail of the log
1091	 * that we want to use.  This requires that either nothing be locked
1092	 * across this call, or that anything that is locked be logged in
1093	 * the prior and the next transactions.
1094	 */
1095	tres.tr_logflags = XFS_TRANS_PERM_LOG_RES;
1096	error = xfs_trans_reserve(trans, &tres, 0, 0);
1097	/*
1098	 *  Ensure that the inode is in the new transaction and locked.
1099	 */
1100	if (error)
1101		return error;
1102
1103	xfs_trans_ijoin(trans, dp, 0);
1104	return 0;
1105}
1106