1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (c) 2010 Red Hat, Inc. All Rights Reserved.
4 */
5
6#include "xfs.h"
7#include "xfs_fs.h"
8#include "xfs_format.h"
9#include "xfs_log_format.h"
10#include "xfs_shared.h"
11#include "xfs_trans_resv.h"
12#include "xfs_mount.h"
13#include "xfs_extent_busy.h"
14#include "xfs_trans.h"
15#include "xfs_trans_priv.h"
16#include "xfs_log.h"
17#include "xfs_log_priv.h"
18#include "xfs_trace.h"
19#include "xfs_discard.h"
20
21/*
22 * Allocate a new ticket. Failing to get a new ticket makes it really hard to
23 * recover, so we don't allow failure here. Also, we allocate in a context that
24 * we don't want to be issuing transactions from, so we need to tell the
25 * allocation code this as well.
26 *
27 * We don't reserve any space for the ticket - we are going to steal whatever
28 * space we require from transactions as they commit. To ensure we reserve all
29 * the space required, we need to set the current reservation of the ticket to
30 * zero so that we know to steal the initial transaction overhead from the
31 * first transaction commit.
32 */
33static struct xlog_ticket *
34xlog_cil_ticket_alloc(
35	struct xlog	*log)
36{
37	struct xlog_ticket *tic;
38
39	tic = xlog_ticket_alloc(log, 0, 1, 0);
40
41	/*
42	 * set the current reservation to zero so we know to steal the basic
43	 * transaction overhead reservation from the first transaction commit.
44	 */
45	tic->t_curr_res = 0;
46	tic->t_iclog_hdrs = 0;
47	return tic;
48}
49
50static inline void
51xlog_cil_set_iclog_hdr_count(struct xfs_cil *cil)
52{
53	struct xlog	*log = cil->xc_log;
54
55	atomic_set(&cil->xc_iclog_hdrs,
56		   (XLOG_CIL_BLOCKING_SPACE_LIMIT(log) /
57			(log->l_iclog_size - log->l_iclog_hsize)));
58}
59
60/*
61 * Check if the current log item was first committed in this sequence.
62 * We can't rely on just the log item being in the CIL, we have to check
63 * the recorded commit sequence number.
64 *
65 * Note: for this to be used in a non-racy manner, it has to be called with
66 * CIL flushing locked out. As a result, it should only be used during the
67 * transaction commit process when deciding what to format into the item.
68 */
69static bool
70xlog_item_in_current_chkpt(
71	struct xfs_cil		*cil,
72	struct xfs_log_item	*lip)
73{
74	if (test_bit(XLOG_CIL_EMPTY, &cil->xc_flags))
75		return false;
76
77	/*
78	 * li_seq is written on the first commit of a log item to record the
79	 * first checkpoint it is written to. Hence if it is different to the
80	 * current sequence, we're in a new checkpoint.
81	 */
82	return lip->li_seq == READ_ONCE(cil->xc_current_sequence);
83}
84
85bool
86xfs_log_item_in_current_chkpt(
87	struct xfs_log_item *lip)
88{
89	return xlog_item_in_current_chkpt(lip->li_log->l_cilp, lip);
90}
91
92/*
93 * Unavoidable forward declaration - xlog_cil_push_work() calls
94 * xlog_cil_ctx_alloc() itself.
95 */
96static void xlog_cil_push_work(struct work_struct *work);
97
98static struct xfs_cil_ctx *
99xlog_cil_ctx_alloc(void)
100{
101	struct xfs_cil_ctx	*ctx;
102
103	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL | __GFP_NOFAIL);
104	INIT_LIST_HEAD(&ctx->committing);
105	INIT_LIST_HEAD(&ctx->busy_extents.extent_list);
106	INIT_LIST_HEAD(&ctx->log_items);
107	INIT_LIST_HEAD(&ctx->lv_chain);
108	INIT_WORK(&ctx->push_work, xlog_cil_push_work);
109	return ctx;
110}
111
112/*
113 * Aggregate the CIL per cpu structures into global counts, lists, etc and
114 * clear the percpu state ready for the next context to use. This is called
115 * from the push code with the context lock held exclusively, hence nothing else
116 * will be accessing or modifying the per-cpu counters.
117 */
118static void
119xlog_cil_push_pcp_aggregate(
120	struct xfs_cil		*cil,
121	struct xfs_cil_ctx	*ctx)
122{
123	struct xlog_cil_pcp	*cilpcp;
124	int			cpu;
125
126	for_each_cpu(cpu, &ctx->cil_pcpmask) {
127		cilpcp = per_cpu_ptr(cil->xc_pcp, cpu);
128
129		ctx->ticket->t_curr_res += cilpcp->space_reserved;
130		cilpcp->space_reserved = 0;
131
132		if (!list_empty(&cilpcp->busy_extents)) {
133			list_splice_init(&cilpcp->busy_extents,
134					&ctx->busy_extents.extent_list);
135		}
136		if (!list_empty(&cilpcp->log_items))
137			list_splice_init(&cilpcp->log_items, &ctx->log_items);
138
139		/*
140		 * We're in the middle of switching cil contexts.  Reset the
141		 * counter we use to detect when the current context is nearing
142		 * full.
143		 */
144		cilpcp->space_used = 0;
145	}
146}
147
148/*
149 * Aggregate the CIL per-cpu space used counters into the global atomic value.
150 * This is called when the per-cpu counter aggregation will first pass the soft
151 * limit threshold so we can switch to atomic counter aggregation for accurate
152 * detection of hard limit traversal.
153 */
154static void
155xlog_cil_insert_pcp_aggregate(
156	struct xfs_cil		*cil,
157	struct xfs_cil_ctx	*ctx)
158{
159	struct xlog_cil_pcp	*cilpcp;
160	int			cpu;
161	int			count = 0;
162
163	/* Trigger atomic updates then aggregate only for the first caller */
164	if (!test_and_clear_bit(XLOG_CIL_PCP_SPACE, &cil->xc_flags))
165		return;
166
167	/*
168	 * We can race with other cpus setting cil_pcpmask.  However, we've
169	 * atomically cleared PCP_SPACE which forces other threads to add to
170	 * the global space used count.  cil_pcpmask is a superset of cilpcp
171	 * structures that could have a nonzero space_used.
172	 */
173	for_each_cpu(cpu, &ctx->cil_pcpmask) {
174		int	old, prev;
175
176		cilpcp = per_cpu_ptr(cil->xc_pcp, cpu);
177		do {
178			old = cilpcp->space_used;
179			prev = cmpxchg(&cilpcp->space_used, old, 0);
180		} while (old != prev);
181		count += old;
182	}
183	atomic_add(count, &ctx->space_used);
184}
185
186static void
187xlog_cil_ctx_switch(
188	struct xfs_cil		*cil,
189	struct xfs_cil_ctx	*ctx)
190{
191	xlog_cil_set_iclog_hdr_count(cil);
192	set_bit(XLOG_CIL_EMPTY, &cil->xc_flags);
193	set_bit(XLOG_CIL_PCP_SPACE, &cil->xc_flags);
194	ctx->sequence = ++cil->xc_current_sequence;
195	ctx->cil = cil;
196	cil->xc_ctx = ctx;
197}
198
199/*
200 * After the first stage of log recovery is done, we know where the head and
201 * tail of the log are. We need this log initialisation done before we can
202 * initialise the first CIL checkpoint context.
203 *
204 * Here we allocate a log ticket to track space usage during a CIL push.  This
205 * ticket is passed to xlog_write() directly so that we don't slowly leak log
206 * space by failing to account for space used by log headers and additional
207 * region headers for split regions.
208 */
209void
210xlog_cil_init_post_recovery(
211	struct xlog	*log)
212{
213	log->l_cilp->xc_ctx->ticket = xlog_cil_ticket_alloc(log);
214	log->l_cilp->xc_ctx->sequence = 1;
215	xlog_cil_set_iclog_hdr_count(log->l_cilp);
216}
217
218static inline int
219xlog_cil_iovec_space(
220	uint	niovecs)
221{
222	return round_up((sizeof(struct xfs_log_vec) +
223					niovecs * sizeof(struct xfs_log_iovec)),
224			sizeof(uint64_t));
225}
226
227/*
228 * Allocate or pin log vector buffers for CIL insertion.
229 *
230 * The CIL currently uses disposable buffers for copying a snapshot of the
231 * modified items into the log during a push. The biggest problem with this is
232 * the requirement to allocate the disposable buffer during the commit if:
233 *	a) does not exist; or
234 *	b) it is too small
235 *
236 * If we do this allocation within xlog_cil_insert_format_items(), it is done
237 * under the xc_ctx_lock, which means that a CIL push cannot occur during
238 * the memory allocation. This means that we have a potential deadlock situation
239 * under low memory conditions when we have lots of dirty metadata pinned in
240 * the CIL and we need a CIL commit to occur to free memory.
241 *
242 * To avoid this, we need to move the memory allocation outside the
243 * xc_ctx_lock, but because the log vector buffers are disposable, that opens
244 * up a TOCTOU race condition w.r.t. the CIL committing and removing the log
245 * vector buffers between the check and the formatting of the item into the
246 * log vector buffer within the xc_ctx_lock.
247 *
248 * Because the log vector buffer needs to be unchanged during the CIL push
249 * process, we cannot share the buffer between the transaction commit (which
250 * modifies the buffer) and the CIL push context that is writing the changes
251 * into the log. This means skipping preallocation of buffer space is
252 * unreliable, but we most definitely do not want to be allocating and freeing
253 * buffers unnecessarily during commits when overwrites can be done safely.
254 *
255 * The simplest solution to this problem is to allocate a shadow buffer when a
256 * log item is committed for the second time, and then to only use this buffer
257 * if necessary. The buffer can remain attached to the log item until such time
258 * it is needed, and this is the buffer that is reallocated to match the size of
259 * the incoming modification. Then during the formatting of the item we can swap
260 * the active buffer with the new one if we can't reuse the existing buffer. We
261 * don't free the old buffer as it may be reused on the next modification if
262 * it's size is right, otherwise we'll free and reallocate it at that point.
263 *
264 * This function builds a vector for the changes in each log item in the
265 * transaction. It then works out the length of the buffer needed for each log
266 * item, allocates them and attaches the vector to the log item in preparation
267 * for the formatting step which occurs under the xc_ctx_lock.
268 *
269 * While this means the memory footprint goes up, it avoids the repeated
270 * alloc/free pattern that repeated modifications of an item would otherwise
271 * cause, and hence minimises the CPU overhead of such behaviour.
272 */
273static void
274xlog_cil_alloc_shadow_bufs(
275	struct xlog		*log,
276	struct xfs_trans	*tp)
277{
278	struct xfs_log_item	*lip;
279
280	list_for_each_entry(lip, &tp->t_items, li_trans) {
281		struct xfs_log_vec *lv;
282		int	niovecs = 0;
283		int	nbytes = 0;
284		int	buf_size;
285		bool	ordered = false;
286
287		/* Skip items which aren't dirty in this transaction. */
288		if (!test_bit(XFS_LI_DIRTY, &lip->li_flags))
289			continue;
290
291		/* get number of vecs and size of data to be stored */
292		lip->li_ops->iop_size(lip, &niovecs, &nbytes);
293
294		/*
295		 * Ordered items need to be tracked but we do not wish to write
296		 * them. We need a logvec to track the object, but we do not
297		 * need an iovec or buffer to be allocated for copying data.
298		 */
299		if (niovecs == XFS_LOG_VEC_ORDERED) {
300			ordered = true;
301			niovecs = 0;
302			nbytes = 0;
303		}
304
305		/*
306		 * We 64-bit align the length of each iovec so that the start of
307		 * the next one is naturally aligned.  We'll need to account for
308		 * that slack space here.
309		 *
310		 * We also add the xlog_op_header to each region when
311		 * formatting, but that's not accounted to the size of the item
312		 * at this point. Hence we'll need an addition number of bytes
313		 * for each vector to hold an opheader.
314		 *
315		 * Then round nbytes up to 64-bit alignment so that the initial
316		 * buffer alignment is easy to calculate and verify.
317		 */
318		nbytes += niovecs *
319			(sizeof(uint64_t) + sizeof(struct xlog_op_header));
320		nbytes = round_up(nbytes, sizeof(uint64_t));
321
322		/*
323		 * The data buffer needs to start 64-bit aligned, so round up
324		 * that space to ensure we can align it appropriately and not
325		 * overrun the buffer.
326		 */
327		buf_size = nbytes + xlog_cil_iovec_space(niovecs);
328
329		/*
330		 * if we have no shadow buffer, or it is too small, we need to
331		 * reallocate it.
332		 */
333		if (!lip->li_lv_shadow ||
334		    buf_size > lip->li_lv_shadow->lv_size) {
335			/*
336			 * We free and allocate here as a realloc would copy
337			 * unnecessary data. We don't use kvzalloc() for the
338			 * same reason - we don't need to zero the data area in
339			 * the buffer, only the log vector header and the iovec
340			 * storage.
341			 */
342			kvfree(lip->li_lv_shadow);
343			lv = xlog_kvmalloc(buf_size);
344
345			memset(lv, 0, xlog_cil_iovec_space(niovecs));
346
347			INIT_LIST_HEAD(&lv->lv_list);
348			lv->lv_item = lip;
349			lv->lv_size = buf_size;
350			if (ordered)
351				lv->lv_buf_len = XFS_LOG_VEC_ORDERED;
352			else
353				lv->lv_iovecp = (struct xfs_log_iovec *)&lv[1];
354			lip->li_lv_shadow = lv;
355		} else {
356			/* same or smaller, optimise common overwrite case */
357			lv = lip->li_lv_shadow;
358			if (ordered)
359				lv->lv_buf_len = XFS_LOG_VEC_ORDERED;
360			else
361				lv->lv_buf_len = 0;
362			lv->lv_bytes = 0;
363		}
364
365		/* Ensure the lv is set up according to ->iop_size */
366		lv->lv_niovecs = niovecs;
367
368		/* The allocated data region lies beyond the iovec region */
369		lv->lv_buf = (char *)lv + xlog_cil_iovec_space(niovecs);
370	}
371
372}
373
374/*
375 * Prepare the log item for insertion into the CIL. Calculate the difference in
376 * log space it will consume, and if it is a new item pin it as well.
377 */
378STATIC void
379xfs_cil_prepare_item(
380	struct xlog		*log,
381	struct xfs_log_vec	*lv,
382	struct xfs_log_vec	*old_lv,
383	int			*diff_len)
384{
385	/* Account for the new LV being passed in */
386	if (lv->lv_buf_len != XFS_LOG_VEC_ORDERED)
387		*diff_len += lv->lv_bytes;
388
389	/*
390	 * If there is no old LV, this is the first time we've seen the item in
391	 * this CIL context and so we need to pin it. If we are replacing the
392	 * old_lv, then remove the space it accounts for and make it the shadow
393	 * buffer for later freeing. In both cases we are now switching to the
394	 * shadow buffer, so update the pointer to it appropriately.
395	 */
396	if (!old_lv) {
397		if (lv->lv_item->li_ops->iop_pin)
398			lv->lv_item->li_ops->iop_pin(lv->lv_item);
399		lv->lv_item->li_lv_shadow = NULL;
400	} else if (old_lv != lv) {
401		ASSERT(lv->lv_buf_len != XFS_LOG_VEC_ORDERED);
402
403		*diff_len -= old_lv->lv_bytes;
404		lv->lv_item->li_lv_shadow = old_lv;
405	}
406
407	/* attach new log vector to log item */
408	lv->lv_item->li_lv = lv;
409
410	/*
411	 * If this is the first time the item is being committed to the
412	 * CIL, store the sequence number on the log item so we can
413	 * tell in future commits whether this is the first checkpoint
414	 * the item is being committed into.
415	 */
416	if (!lv->lv_item->li_seq)
417		lv->lv_item->li_seq = log->l_cilp->xc_ctx->sequence;
418}
419
420/*
421 * Format log item into a flat buffers
422 *
423 * For delayed logging, we need to hold a formatted buffer containing all the
424 * changes on the log item. This enables us to relog the item in memory and
425 * write it out asynchronously without needing to relock the object that was
426 * modified at the time it gets written into the iclog.
427 *
428 * This function takes the prepared log vectors attached to each log item, and
429 * formats the changes into the log vector buffer. The buffer it uses is
430 * dependent on the current state of the vector in the CIL - the shadow lv is
431 * guaranteed to be large enough for the current modification, but we will only
432 * use that if we can't reuse the existing lv. If we can't reuse the existing
433 * lv, then simple swap it out for the shadow lv. We don't free it - that is
434 * done lazily either by th enext modification or the freeing of the log item.
435 *
436 * We don't set up region headers during this process; we simply copy the
437 * regions into the flat buffer. We can do this because we still have to do a
438 * formatting step to write the regions into the iclog buffer.  Writing the
439 * ophdrs during the iclog write means that we can support splitting large
440 * regions across iclog boundares without needing a change in the format of the
441 * item/region encapsulation.
442 *
443 * Hence what we need to do now is change the rewrite the vector array to point
444 * to the copied region inside the buffer we just allocated. This allows us to
445 * format the regions into the iclog as though they are being formatted
446 * directly out of the objects themselves.
447 */
448static void
449xlog_cil_insert_format_items(
450	struct xlog		*log,
451	struct xfs_trans	*tp,
452	int			*diff_len)
453{
454	struct xfs_log_item	*lip;
455
456	/* Bail out if we didn't find a log item.  */
457	if (list_empty(&tp->t_items)) {
458		ASSERT(0);
459		return;
460	}
461
462	list_for_each_entry(lip, &tp->t_items, li_trans) {
463		struct xfs_log_vec *lv;
464		struct xfs_log_vec *old_lv = NULL;
465		struct xfs_log_vec *shadow;
466		bool	ordered = false;
467
468		/* Skip items which aren't dirty in this transaction. */
469		if (!test_bit(XFS_LI_DIRTY, &lip->li_flags))
470			continue;
471
472		/*
473		 * The formatting size information is already attached to
474		 * the shadow lv on the log item.
475		 */
476		shadow = lip->li_lv_shadow;
477		if (shadow->lv_buf_len == XFS_LOG_VEC_ORDERED)
478			ordered = true;
479
480		/* Skip items that do not have any vectors for writing */
481		if (!shadow->lv_niovecs && !ordered)
482			continue;
483
484		/* compare to existing item size */
485		old_lv = lip->li_lv;
486		if (lip->li_lv && shadow->lv_size <= lip->li_lv->lv_size) {
487			/* same or smaller, optimise common overwrite case */
488			lv = lip->li_lv;
489
490			if (ordered)
491				goto insert;
492
493			/*
494			 * set the item up as though it is a new insertion so
495			 * that the space reservation accounting is correct.
496			 */
497			*diff_len -= lv->lv_bytes;
498
499			/* Ensure the lv is set up according to ->iop_size */
500			lv->lv_niovecs = shadow->lv_niovecs;
501
502			/* reset the lv buffer information for new formatting */
503			lv->lv_buf_len = 0;
504			lv->lv_bytes = 0;
505			lv->lv_buf = (char *)lv +
506					xlog_cil_iovec_space(lv->lv_niovecs);
507		} else {
508			/* switch to shadow buffer! */
509			lv = shadow;
510			lv->lv_item = lip;
511			if (ordered) {
512				/* track as an ordered logvec */
513				ASSERT(lip->li_lv == NULL);
514				goto insert;
515			}
516		}
517
518		ASSERT(IS_ALIGNED((unsigned long)lv->lv_buf, sizeof(uint64_t)));
519		lip->li_ops->iop_format(lip, lv);
520insert:
521		xfs_cil_prepare_item(log, lv, old_lv, diff_len);
522	}
523}
524
525/*
526 * The use of lockless waitqueue_active() requires that the caller has
527 * serialised itself against the wakeup call in xlog_cil_push_work(). That
528 * can be done by either holding the push lock or the context lock.
529 */
530static inline bool
531xlog_cil_over_hard_limit(
532	struct xlog	*log,
533	int32_t		space_used)
534{
535	if (waitqueue_active(&log->l_cilp->xc_push_wait))
536		return true;
537	if (space_used >= XLOG_CIL_BLOCKING_SPACE_LIMIT(log))
538		return true;
539	return false;
540}
541
542/*
543 * Insert the log items into the CIL and calculate the difference in space
544 * consumed by the item. Add the space to the checkpoint ticket and calculate
545 * if the change requires additional log metadata. If it does, take that space
546 * as well. Remove the amount of space we added to the checkpoint ticket from
547 * the current transaction ticket so that the accounting works out correctly.
548 */
549static void
550xlog_cil_insert_items(
551	struct xlog		*log,
552	struct xfs_trans	*tp,
553	uint32_t		released_space)
554{
555	struct xfs_cil		*cil = log->l_cilp;
556	struct xfs_cil_ctx	*ctx = cil->xc_ctx;
557	struct xfs_log_item	*lip;
558	int			len = 0;
559	int			iovhdr_res = 0, split_res = 0, ctx_res = 0;
560	int			space_used;
561	int			order;
562	unsigned int		cpu_nr;
563	struct xlog_cil_pcp	*cilpcp;
564
565	ASSERT(tp);
566
567	/*
568	 * We can do this safely because the context can't checkpoint until we
569	 * are done so it doesn't matter exactly how we update the CIL.
570	 */
571	xlog_cil_insert_format_items(log, tp, &len);
572
573	/*
574	 * Subtract the space released by intent cancelation from the space we
575	 * consumed so that we remove it from the CIL space and add it back to
576	 * the current transaction reservation context.
577	 */
578	len -= released_space;
579
580	/*
581	 * Grab the per-cpu pointer for the CIL before we start any accounting.
582	 * That ensures that we are running with pre-emption disabled and so we
583	 * can't be scheduled away between split sample/update operations that
584	 * are done without outside locking to serialise them.
585	 */
586	cpu_nr = get_cpu();
587	cilpcp = this_cpu_ptr(cil->xc_pcp);
588
589	/* Tell the future push that there was work added by this CPU. */
590	if (!cpumask_test_cpu(cpu_nr, &ctx->cil_pcpmask))
591		cpumask_test_and_set_cpu(cpu_nr, &ctx->cil_pcpmask);
592
593	/*
594	 * We need to take the CIL checkpoint unit reservation on the first
595	 * commit into the CIL. Test the XLOG_CIL_EMPTY bit first so we don't
596	 * unnecessarily do an atomic op in the fast path here. We can clear the
597	 * XLOG_CIL_EMPTY bit as we are under the xc_ctx_lock here and that
598	 * needs to be held exclusively to reset the XLOG_CIL_EMPTY bit.
599	 */
600	if (test_bit(XLOG_CIL_EMPTY, &cil->xc_flags) &&
601	    test_and_clear_bit(XLOG_CIL_EMPTY, &cil->xc_flags))
602		ctx_res = ctx->ticket->t_unit_res;
603
604	/*
605	 * Check if we need to steal iclog headers. atomic_read() is not a
606	 * locked atomic operation, so we can check the value before we do any
607	 * real atomic ops in the fast path. If we've already taken the CIL unit
608	 * reservation from this commit, we've already got one iclog header
609	 * space reserved so we have to account for that otherwise we risk
610	 * overrunning the reservation on this ticket.
611	 *
612	 * If the CIL is already at the hard limit, we might need more header
613	 * space that originally reserved. So steal more header space from every
614	 * commit that occurs once we are over the hard limit to ensure the CIL
615	 * push won't run out of reservation space.
616	 *
617	 * This can steal more than we need, but that's OK.
618	 *
619	 * The cil->xc_ctx_lock provides the serialisation necessary for safely
620	 * calling xlog_cil_over_hard_limit() in this context.
621	 */
622	space_used = atomic_read(&ctx->space_used) + cilpcp->space_used + len;
623	if (atomic_read(&cil->xc_iclog_hdrs) > 0 ||
624	    xlog_cil_over_hard_limit(log, space_used)) {
625		split_res = log->l_iclog_hsize +
626					sizeof(struct xlog_op_header);
627		if (ctx_res)
628			ctx_res += split_res * (tp->t_ticket->t_iclog_hdrs - 1);
629		else
630			ctx_res = split_res * tp->t_ticket->t_iclog_hdrs;
631		atomic_sub(tp->t_ticket->t_iclog_hdrs, &cil->xc_iclog_hdrs);
632	}
633	cilpcp->space_reserved += ctx_res;
634
635	/*
636	 * Accurately account when over the soft limit, otherwise fold the
637	 * percpu count into the global count if over the per-cpu threshold.
638	 */
639	if (!test_bit(XLOG_CIL_PCP_SPACE, &cil->xc_flags)) {
640		atomic_add(len, &ctx->space_used);
641	} else if (cilpcp->space_used + len >
642			(XLOG_CIL_SPACE_LIMIT(log) / num_online_cpus())) {
643		space_used = atomic_add_return(cilpcp->space_used + len,
644						&ctx->space_used);
645		cilpcp->space_used = 0;
646
647		/*
648		 * If we just transitioned over the soft limit, we need to
649		 * transition to the global atomic counter.
650		 */
651		if (space_used >= XLOG_CIL_SPACE_LIMIT(log))
652			xlog_cil_insert_pcp_aggregate(cil, ctx);
653	} else {
654		cilpcp->space_used += len;
655	}
656	/* attach the transaction to the CIL if it has any busy extents */
657	if (!list_empty(&tp->t_busy))
658		list_splice_init(&tp->t_busy, &cilpcp->busy_extents);
659
660	/*
661	 * Now update the order of everything modified in the transaction
662	 * and insert items into the CIL if they aren't already there.
663	 * We do this here so we only need to take the CIL lock once during
664	 * the transaction commit.
665	 */
666	order = atomic_inc_return(&ctx->order_id);
667	list_for_each_entry(lip, &tp->t_items, li_trans) {
668		/* Skip items which aren't dirty in this transaction. */
669		if (!test_bit(XFS_LI_DIRTY, &lip->li_flags))
670			continue;
671
672		lip->li_order_id = order;
673		if (!list_empty(&lip->li_cil))
674			continue;
675		list_add_tail(&lip->li_cil, &cilpcp->log_items);
676	}
677	put_cpu();
678
679	/*
680	 * If we've overrun the reservation, dump the tx details before we move
681	 * the log items. Shutdown is imminent...
682	 */
683	tp->t_ticket->t_curr_res -= ctx_res + len;
684	if (WARN_ON(tp->t_ticket->t_curr_res < 0)) {
685		xfs_warn(log->l_mp, "Transaction log reservation overrun:");
686		xfs_warn(log->l_mp,
687			 "  log items: %d bytes (iov hdrs: %d bytes)",
688			 len, iovhdr_res);
689		xfs_warn(log->l_mp, "  split region headers: %d bytes",
690			 split_res);
691		xfs_warn(log->l_mp, "  ctx ticket: %d bytes", ctx_res);
692		xlog_print_trans(tp);
693		xlog_force_shutdown(log, SHUTDOWN_LOG_IO_ERROR);
694	}
695}
696
697static void
698xlog_cil_free_logvec(
699	struct list_head	*lv_chain)
700{
701	struct xfs_log_vec	*lv;
702
703	while (!list_empty(lv_chain)) {
704		lv = list_first_entry(lv_chain, struct xfs_log_vec, lv_list);
705		list_del_init(&lv->lv_list);
706		kvfree(lv);
707	}
708}
709
710/*
711 * Mark all items committed and clear busy extents. We free the log vector
712 * chains in a separate pass so that we unpin the log items as quickly as
713 * possible.
714 */
715static void
716xlog_cil_committed(
717	struct xfs_cil_ctx	*ctx)
718{
719	struct xfs_mount	*mp = ctx->cil->xc_log->l_mp;
720	bool			abort = xlog_is_shutdown(ctx->cil->xc_log);
721
722	/*
723	 * If the I/O failed, we're aborting the commit and already shutdown.
724	 * Wake any commit waiters before aborting the log items so we don't
725	 * block async log pushers on callbacks. Async log pushers explicitly do
726	 * not wait on log force completion because they may be holding locks
727	 * required to unpin items.
728	 */
729	if (abort) {
730		spin_lock(&ctx->cil->xc_push_lock);
731		wake_up_all(&ctx->cil->xc_start_wait);
732		wake_up_all(&ctx->cil->xc_commit_wait);
733		spin_unlock(&ctx->cil->xc_push_lock);
734	}
735
736	xfs_trans_committed_bulk(ctx->cil->xc_log->l_ailp, &ctx->lv_chain,
737					ctx->start_lsn, abort);
738
739	xfs_extent_busy_sort(&ctx->busy_extents.extent_list);
740	xfs_extent_busy_clear(mp, &ctx->busy_extents.extent_list,
741			      xfs_has_discard(mp) && !abort);
742
743	spin_lock(&ctx->cil->xc_push_lock);
744	list_del(&ctx->committing);
745	spin_unlock(&ctx->cil->xc_push_lock);
746
747	xlog_cil_free_logvec(&ctx->lv_chain);
748
749	if (!list_empty(&ctx->busy_extents.extent_list)) {
750		ctx->busy_extents.mount = mp;
751		ctx->busy_extents.owner = ctx;
752		xfs_discard_extents(mp, &ctx->busy_extents);
753		return;
754	}
755
756	kfree(ctx);
757}
758
759void
760xlog_cil_process_committed(
761	struct list_head	*list)
762{
763	struct xfs_cil_ctx	*ctx;
764
765	while ((ctx = list_first_entry_or_null(list,
766			struct xfs_cil_ctx, iclog_entry))) {
767		list_del(&ctx->iclog_entry);
768		xlog_cil_committed(ctx);
769	}
770}
771
772/*
773* Record the LSN of the iclog we were just granted space to start writing into.
774* If the context doesn't have a start_lsn recorded, then this iclog will
775* contain the start record for the checkpoint. Otherwise this write contains
776* the commit record for the checkpoint.
777*/
778void
779xlog_cil_set_ctx_write_state(
780	struct xfs_cil_ctx	*ctx,
781	struct xlog_in_core	*iclog)
782{
783	struct xfs_cil		*cil = ctx->cil;
784	xfs_lsn_t		lsn = be64_to_cpu(iclog->ic_header.h_lsn);
785
786	ASSERT(!ctx->commit_lsn);
787	if (!ctx->start_lsn) {
788		spin_lock(&cil->xc_push_lock);
789		/*
790		 * The LSN we need to pass to the log items on transaction
791		 * commit is the LSN reported by the first log vector write, not
792		 * the commit lsn. If we use the commit record lsn then we can
793		 * move the grant write head beyond the tail LSN and overwrite
794		 * it.
795		 */
796		ctx->start_lsn = lsn;
797		wake_up_all(&cil->xc_start_wait);
798		spin_unlock(&cil->xc_push_lock);
799
800		/*
801		 * Make sure the metadata we are about to overwrite in the log
802		 * has been flushed to stable storage before this iclog is
803		 * issued.
804		 */
805		spin_lock(&cil->xc_log->l_icloglock);
806		iclog->ic_flags |= XLOG_ICL_NEED_FLUSH;
807		spin_unlock(&cil->xc_log->l_icloglock);
808		return;
809	}
810
811	/*
812	 * Take a reference to the iclog for the context so that we still hold
813	 * it when xlog_write is done and has released it. This means the
814	 * context controls when the iclog is released for IO.
815	 */
816	atomic_inc(&iclog->ic_refcnt);
817
818	/*
819	 * xlog_state_get_iclog_space() guarantees there is enough space in the
820	 * iclog for an entire commit record, so we can attach the context
821	 * callbacks now.  This needs to be done before we make the commit_lsn
822	 * visible to waiters so that checkpoints with commit records in the
823	 * same iclog order their IO completion callbacks in the same order that
824	 * the commit records appear in the iclog.
825	 */
826	spin_lock(&cil->xc_log->l_icloglock);
827	list_add_tail(&ctx->iclog_entry, &iclog->ic_callbacks);
828	spin_unlock(&cil->xc_log->l_icloglock);
829
830	/*
831	 * Now we can record the commit LSN and wake anyone waiting for this
832	 * sequence to have the ordered commit record assigned to a physical
833	 * location in the log.
834	 */
835	spin_lock(&cil->xc_push_lock);
836	ctx->commit_iclog = iclog;
837	ctx->commit_lsn = lsn;
838	wake_up_all(&cil->xc_commit_wait);
839	spin_unlock(&cil->xc_push_lock);
840}
841
842
843/*
844 * Ensure that the order of log writes follows checkpoint sequence order. This
845 * relies on the context LSN being zero until the log write has guaranteed the
846 * LSN that the log write will start at via xlog_state_get_iclog_space().
847 */
848enum _record_type {
849	_START_RECORD,
850	_COMMIT_RECORD,
851};
852
853static int
854xlog_cil_order_write(
855	struct xfs_cil		*cil,
856	xfs_csn_t		sequence,
857	enum _record_type	record)
858{
859	struct xfs_cil_ctx	*ctx;
860
861restart:
862	spin_lock(&cil->xc_push_lock);
863	list_for_each_entry(ctx, &cil->xc_committing, committing) {
864		/*
865		 * Avoid getting stuck in this loop because we were woken by the
866		 * shutdown, but then went back to sleep once already in the
867		 * shutdown state.
868		 */
869		if (xlog_is_shutdown(cil->xc_log)) {
870			spin_unlock(&cil->xc_push_lock);
871			return -EIO;
872		}
873
874		/*
875		 * Higher sequences will wait for this one so skip them.
876		 * Don't wait for our own sequence, either.
877		 */
878		if (ctx->sequence >= sequence)
879			continue;
880
881		/* Wait until the LSN for the record has been recorded. */
882		switch (record) {
883		case _START_RECORD:
884			if (!ctx->start_lsn) {
885				xlog_wait(&cil->xc_start_wait, &cil->xc_push_lock);
886				goto restart;
887			}
888			break;
889		case _COMMIT_RECORD:
890			if (!ctx->commit_lsn) {
891				xlog_wait(&cil->xc_commit_wait, &cil->xc_push_lock);
892				goto restart;
893			}
894			break;
895		}
896	}
897	spin_unlock(&cil->xc_push_lock);
898	return 0;
899}
900
901/*
902 * Write out the log vector change now attached to the CIL context. This will
903 * write a start record that needs to be strictly ordered in ascending CIL
904 * sequence order so that log recovery will always use in-order start LSNs when
905 * replaying checkpoints.
906 */
907static int
908xlog_cil_write_chain(
909	struct xfs_cil_ctx	*ctx,
910	uint32_t		chain_len)
911{
912	struct xlog		*log = ctx->cil->xc_log;
913	int			error;
914
915	error = xlog_cil_order_write(ctx->cil, ctx->sequence, _START_RECORD);
916	if (error)
917		return error;
918	return xlog_write(log, ctx, &ctx->lv_chain, ctx->ticket, chain_len);
919}
920
921/*
922 * Write out the commit record of a checkpoint transaction to close off a
923 * running log write. These commit records are strictly ordered in ascending CIL
924 * sequence order so that log recovery will always replay the checkpoints in the
925 * correct order.
926 */
927static int
928xlog_cil_write_commit_record(
929	struct xfs_cil_ctx	*ctx)
930{
931	struct xlog		*log = ctx->cil->xc_log;
932	struct xlog_op_header	ophdr = {
933		.oh_clientid = XFS_TRANSACTION,
934		.oh_tid = cpu_to_be32(ctx->ticket->t_tid),
935		.oh_flags = XLOG_COMMIT_TRANS,
936	};
937	struct xfs_log_iovec	reg = {
938		.i_addr = &ophdr,
939		.i_len = sizeof(struct xlog_op_header),
940		.i_type = XLOG_REG_TYPE_COMMIT,
941	};
942	struct xfs_log_vec	vec = {
943		.lv_niovecs = 1,
944		.lv_iovecp = &reg,
945	};
946	int			error;
947	LIST_HEAD(lv_chain);
948	list_add(&vec.lv_list, &lv_chain);
949
950	if (xlog_is_shutdown(log))
951		return -EIO;
952
953	error = xlog_cil_order_write(ctx->cil, ctx->sequence, _COMMIT_RECORD);
954	if (error)
955		return error;
956
957	/* account for space used by record data */
958	ctx->ticket->t_curr_res -= reg.i_len;
959	error = xlog_write(log, ctx, &lv_chain, ctx->ticket, reg.i_len);
960	if (error)
961		xlog_force_shutdown(log, SHUTDOWN_LOG_IO_ERROR);
962	return error;
963}
964
965struct xlog_cil_trans_hdr {
966	struct xlog_op_header	oph[2];
967	struct xfs_trans_header	thdr;
968	struct xfs_log_iovec	lhdr[2];
969};
970
971/*
972 * Build a checkpoint transaction header to begin the journal transaction.  We
973 * need to account for the space used by the transaction header here as it is
974 * not accounted for in xlog_write().
975 *
976 * This is the only place we write a transaction header, so we also build the
977 * log opheaders that indicate the start of a log transaction and wrap the
978 * transaction header. We keep the start record in it's own log vector rather
979 * than compacting them into a single region as this ends up making the logic
980 * in xlog_write() for handling empty opheaders for start, commit and unmount
981 * records much simpler.
982 */
983static void
984xlog_cil_build_trans_hdr(
985	struct xfs_cil_ctx	*ctx,
986	struct xlog_cil_trans_hdr *hdr,
987	struct xfs_log_vec	*lvhdr,
988	int			num_iovecs)
989{
990	struct xlog_ticket	*tic = ctx->ticket;
991	__be32			tid = cpu_to_be32(tic->t_tid);
992
993	memset(hdr, 0, sizeof(*hdr));
994
995	/* Log start record */
996	hdr->oph[0].oh_tid = tid;
997	hdr->oph[0].oh_clientid = XFS_TRANSACTION;
998	hdr->oph[0].oh_flags = XLOG_START_TRANS;
999
1000	/* log iovec region pointer */
1001	hdr->lhdr[0].i_addr = &hdr->oph[0];
1002	hdr->lhdr[0].i_len = sizeof(struct xlog_op_header);
1003	hdr->lhdr[0].i_type = XLOG_REG_TYPE_LRHEADER;
1004
1005	/* log opheader */
1006	hdr->oph[1].oh_tid = tid;
1007	hdr->oph[1].oh_clientid = XFS_TRANSACTION;
1008	hdr->oph[1].oh_len = cpu_to_be32(sizeof(struct xfs_trans_header));
1009
1010	/* transaction header in host byte order format */
1011	hdr->thdr.th_magic = XFS_TRANS_HEADER_MAGIC;
1012	hdr->thdr.th_type = XFS_TRANS_CHECKPOINT;
1013	hdr->thdr.th_tid = tic->t_tid;
1014	hdr->thdr.th_num_items = num_iovecs;
1015
1016	/* log iovec region pointer */
1017	hdr->lhdr[1].i_addr = &hdr->oph[1];
1018	hdr->lhdr[1].i_len = sizeof(struct xlog_op_header) +
1019				sizeof(struct xfs_trans_header);
1020	hdr->lhdr[1].i_type = XLOG_REG_TYPE_TRANSHDR;
1021
1022	lvhdr->lv_niovecs = 2;
1023	lvhdr->lv_iovecp = &hdr->lhdr[0];
1024	lvhdr->lv_bytes = hdr->lhdr[0].i_len + hdr->lhdr[1].i_len;
1025
1026	tic->t_curr_res -= lvhdr->lv_bytes;
1027}
1028
1029/*
1030 * CIL item reordering compare function. We want to order in ascending ID order,
1031 * but we want to leave items with the same ID in the order they were added to
1032 * the list. This is important for operations like reflink where we log 4 order
1033 * dependent intents in a single transaction when we overwrite an existing
1034 * shared extent with a new shared extent. i.e. BUI(unmap), CUI(drop),
1035 * CUI (inc), BUI(remap)...
1036 */
1037static int
1038xlog_cil_order_cmp(
1039	void			*priv,
1040	const struct list_head	*a,
1041	const struct list_head	*b)
1042{
1043	struct xfs_log_vec	*l1 = container_of(a, struct xfs_log_vec, lv_list);
1044	struct xfs_log_vec	*l2 = container_of(b, struct xfs_log_vec, lv_list);
1045
1046	return l1->lv_order_id > l2->lv_order_id;
1047}
1048
1049/*
1050 * Pull all the log vectors off the items in the CIL, and remove the items from
1051 * the CIL. We don't need the CIL lock here because it's only needed on the
1052 * transaction commit side which is currently locked out by the flush lock.
1053 *
1054 * If a log item is marked with a whiteout, we do not need to write it to the
1055 * journal and so we just move them to the whiteout list for the caller to
1056 * dispose of appropriately.
1057 */
1058static void
1059xlog_cil_build_lv_chain(
1060	struct xfs_cil_ctx	*ctx,
1061	struct list_head	*whiteouts,
1062	uint32_t		*num_iovecs,
1063	uint32_t		*num_bytes)
1064{
1065	while (!list_empty(&ctx->log_items)) {
1066		struct xfs_log_item	*item;
1067		struct xfs_log_vec	*lv;
1068
1069		item = list_first_entry(&ctx->log_items,
1070					struct xfs_log_item, li_cil);
1071
1072		if (test_bit(XFS_LI_WHITEOUT, &item->li_flags)) {
1073			list_move(&item->li_cil, whiteouts);
1074			trace_xfs_cil_whiteout_skip(item);
1075			continue;
1076		}
1077
1078		lv = item->li_lv;
1079		lv->lv_order_id = item->li_order_id;
1080
1081		/* we don't write ordered log vectors */
1082		if (lv->lv_buf_len != XFS_LOG_VEC_ORDERED)
1083			*num_bytes += lv->lv_bytes;
1084		*num_iovecs += lv->lv_niovecs;
1085		list_add_tail(&lv->lv_list, &ctx->lv_chain);
1086
1087		list_del_init(&item->li_cil);
1088		item->li_order_id = 0;
1089		item->li_lv = NULL;
1090	}
1091}
1092
1093static void
1094xlog_cil_cleanup_whiteouts(
1095	struct list_head	*whiteouts)
1096{
1097	while (!list_empty(whiteouts)) {
1098		struct xfs_log_item *item = list_first_entry(whiteouts,
1099						struct xfs_log_item, li_cil);
1100		list_del_init(&item->li_cil);
1101		trace_xfs_cil_whiteout_unpin(item);
1102		item->li_ops->iop_unpin(item, 1);
1103	}
1104}
1105
1106/*
1107 * Push the Committed Item List to the log.
1108 *
1109 * If the current sequence is the same as xc_push_seq we need to do a flush. If
1110 * xc_push_seq is less than the current sequence, then it has already been
1111 * flushed and we don't need to do anything - the caller will wait for it to
1112 * complete if necessary.
1113 *
1114 * xc_push_seq is checked unlocked against the sequence number for a match.
1115 * Hence we can allow log forces to run racily and not issue pushes for the
1116 * same sequence twice.  If we get a race between multiple pushes for the same
1117 * sequence they will block on the first one and then abort, hence avoiding
1118 * needless pushes.
1119 *
1120 * This runs from a workqueue so it does not inherent any specific memory
1121 * allocation context. However, we do not want to block on memory reclaim
1122 * recursing back into the filesystem because this push may have been triggered
1123 * by memory reclaim itself. Hence we really need to run under full GFP_NOFS
1124 * contraints here.
1125 */
1126static void
1127xlog_cil_push_work(
1128	struct work_struct	*work)
1129{
1130	unsigned int		nofs_flags = memalloc_nofs_save();
1131	struct xfs_cil_ctx	*ctx =
1132		container_of(work, struct xfs_cil_ctx, push_work);
1133	struct xfs_cil		*cil = ctx->cil;
1134	struct xlog		*log = cil->xc_log;
1135	struct xfs_cil_ctx	*new_ctx;
1136	int			num_iovecs = 0;
1137	int			num_bytes = 0;
1138	int			error = 0;
1139	struct xlog_cil_trans_hdr thdr;
1140	struct xfs_log_vec	lvhdr = {};
1141	xfs_csn_t		push_seq;
1142	bool			push_commit_stable;
1143	LIST_HEAD		(whiteouts);
1144	struct xlog_ticket	*ticket;
1145
1146	new_ctx = xlog_cil_ctx_alloc();
1147	new_ctx->ticket = xlog_cil_ticket_alloc(log);
1148
1149	down_write(&cil->xc_ctx_lock);
1150
1151	spin_lock(&cil->xc_push_lock);
1152	push_seq = cil->xc_push_seq;
1153	ASSERT(push_seq <= ctx->sequence);
1154	push_commit_stable = cil->xc_push_commit_stable;
1155	cil->xc_push_commit_stable = false;
1156
1157	/*
1158	 * As we are about to switch to a new, empty CIL context, we no longer
1159	 * need to throttle tasks on CIL space overruns. Wake any waiters that
1160	 * the hard push throttle may have caught so they can start committing
1161	 * to the new context. The ctx->xc_push_lock provides the serialisation
1162	 * necessary for safely using the lockless waitqueue_active() check in
1163	 * this context.
1164	 */
1165	if (waitqueue_active(&cil->xc_push_wait))
1166		wake_up_all(&cil->xc_push_wait);
1167
1168	xlog_cil_push_pcp_aggregate(cil, ctx);
1169
1170	/*
1171	 * Check if we've anything to push. If there is nothing, then we don't
1172	 * move on to a new sequence number and so we have to be able to push
1173	 * this sequence again later.
1174	 */
1175	if (test_bit(XLOG_CIL_EMPTY, &cil->xc_flags)) {
1176		cil->xc_push_seq = 0;
1177		spin_unlock(&cil->xc_push_lock);
1178		goto out_skip;
1179	}
1180
1181
1182	/* check for a previously pushed sequence */
1183	if (push_seq < ctx->sequence) {
1184		spin_unlock(&cil->xc_push_lock);
1185		goto out_skip;
1186	}
1187
1188	/*
1189	 * We are now going to push this context, so add it to the committing
1190	 * list before we do anything else. This ensures that anyone waiting on
1191	 * this push can easily detect the difference between a "push in
1192	 * progress" and "CIL is empty, nothing to do".
1193	 *
1194	 * IOWs, a wait loop can now check for:
1195	 *	the current sequence not being found on the committing list;
1196	 *	an empty CIL; and
1197	 *	an unchanged sequence number
1198	 * to detect a push that had nothing to do and therefore does not need
1199	 * waiting on. If the CIL is not empty, we get put on the committing
1200	 * list before emptying the CIL and bumping the sequence number. Hence
1201	 * an empty CIL and an unchanged sequence number means we jumped out
1202	 * above after doing nothing.
1203	 *
1204	 * Hence the waiter will either find the commit sequence on the
1205	 * committing list or the sequence number will be unchanged and the CIL
1206	 * still dirty. In that latter case, the push has not yet started, and
1207	 * so the waiter will have to continue trying to check the CIL
1208	 * committing list until it is found. In extreme cases of delay, the
1209	 * sequence may fully commit between the attempts the wait makes to wait
1210	 * on the commit sequence.
1211	 */
1212	list_add(&ctx->committing, &cil->xc_committing);
1213	spin_unlock(&cil->xc_push_lock);
1214
1215	xlog_cil_build_lv_chain(ctx, &whiteouts, &num_iovecs, &num_bytes);
1216
1217	/*
1218	 * Switch the contexts so we can drop the context lock and move out
1219	 * of a shared context. We can't just go straight to the commit record,
1220	 * though - we need to synchronise with previous and future commits so
1221	 * that the commit records are correctly ordered in the log to ensure
1222	 * that we process items during log IO completion in the correct order.
1223	 *
1224	 * For example, if we get an EFI in one checkpoint and the EFD in the
1225	 * next (e.g. due to log forces), we do not want the checkpoint with
1226	 * the EFD to be committed before the checkpoint with the EFI.  Hence
1227	 * we must strictly order the commit records of the checkpoints so
1228	 * that: a) the checkpoint callbacks are attached to the iclogs in the
1229	 * correct order; and b) the checkpoints are replayed in correct order
1230	 * in log recovery.
1231	 *
1232	 * Hence we need to add this context to the committing context list so
1233	 * that higher sequences will wait for us to write out a commit record
1234	 * before they do.
1235	 *
1236	 * xfs_log_force_seq requires us to mirror the new sequence into the cil
1237	 * structure atomically with the addition of this sequence to the
1238	 * committing list. This also ensures that we can do unlocked checks
1239	 * against the current sequence in log forces without risking
1240	 * deferencing a freed context pointer.
1241	 */
1242	spin_lock(&cil->xc_push_lock);
1243	xlog_cil_ctx_switch(cil, new_ctx);
1244	spin_unlock(&cil->xc_push_lock);
1245	up_write(&cil->xc_ctx_lock);
1246
1247	/*
1248	 * Sort the log vector chain before we add the transaction headers.
1249	 * This ensures we always have the transaction headers at the start
1250	 * of the chain.
1251	 */
1252	list_sort(NULL, &ctx->lv_chain, xlog_cil_order_cmp);
1253
1254	/*
1255	 * Build a checkpoint transaction header and write it to the log to
1256	 * begin the transaction. We need to account for the space used by the
1257	 * transaction header here as it is not accounted for in xlog_write().
1258	 * Add the lvhdr to the head of the lv chain we pass to xlog_write() so
1259	 * it gets written into the iclog first.
1260	 */
1261	xlog_cil_build_trans_hdr(ctx, &thdr, &lvhdr, num_iovecs);
1262	num_bytes += lvhdr.lv_bytes;
1263	list_add(&lvhdr.lv_list, &ctx->lv_chain);
1264
1265	/*
1266	 * Take the lvhdr back off the lv_chain immediately after calling
1267	 * xlog_cil_write_chain() as it should not be passed to log IO
1268	 * completion.
1269	 */
1270	error = xlog_cil_write_chain(ctx, num_bytes);
1271	list_del(&lvhdr.lv_list);
1272	if (error)
1273		goto out_abort_free_ticket;
1274
1275	error = xlog_cil_write_commit_record(ctx);
1276	if (error)
1277		goto out_abort_free_ticket;
1278
1279	/*
1280	 * Grab the ticket from the ctx so we can ungrant it after releasing the
1281	 * commit_iclog. The ctx may be freed by the time we return from
1282	 * releasing the commit_iclog (i.e. checkpoint has been completed and
1283	 * callback run) so we can't reference the ctx after the call to
1284	 * xlog_state_release_iclog().
1285	 */
1286	ticket = ctx->ticket;
1287
1288	/*
1289	 * If the checkpoint spans multiple iclogs, wait for all previous iclogs
1290	 * to complete before we submit the commit_iclog. We can't use state
1291	 * checks for this - ACTIVE can be either a past completed iclog or a
1292	 * future iclog being filled, while WANT_SYNC through SYNC_DONE can be a
1293	 * past or future iclog awaiting IO or ordered IO completion to be run.
1294	 * In the latter case, if it's a future iclog and we wait on it, the we
1295	 * will hang because it won't get processed through to ic_force_wait
1296	 * wakeup until this commit_iclog is written to disk.  Hence we use the
1297	 * iclog header lsn and compare it to the commit lsn to determine if we
1298	 * need to wait on iclogs or not.
1299	 */
1300	spin_lock(&log->l_icloglock);
1301	if (ctx->start_lsn != ctx->commit_lsn) {
1302		xfs_lsn_t	plsn;
1303
1304		plsn = be64_to_cpu(ctx->commit_iclog->ic_prev->ic_header.h_lsn);
1305		if (plsn && XFS_LSN_CMP(plsn, ctx->commit_lsn) < 0) {
1306			/*
1307			 * Waiting on ic_force_wait orders the completion of
1308			 * iclogs older than ic_prev. Hence we only need to wait
1309			 * on the most recent older iclog here.
1310			 */
1311			xlog_wait_on_iclog(ctx->commit_iclog->ic_prev);
1312			spin_lock(&log->l_icloglock);
1313		}
1314
1315		/*
1316		 * We need to issue a pre-flush so that the ordering for this
1317		 * checkpoint is correctly preserved down to stable storage.
1318		 */
1319		ctx->commit_iclog->ic_flags |= XLOG_ICL_NEED_FLUSH;
1320	}
1321
1322	/*
1323	 * The commit iclog must be written to stable storage to guarantee
1324	 * journal IO vs metadata writeback IO is correctly ordered on stable
1325	 * storage.
1326	 *
1327	 * If the push caller needs the commit to be immediately stable and the
1328	 * commit_iclog is not yet marked as XLOG_STATE_WANT_SYNC to indicate it
1329	 * will be written when released, switch it's state to WANT_SYNC right
1330	 * now.
1331	 */
1332	ctx->commit_iclog->ic_flags |= XLOG_ICL_NEED_FUA;
1333	if (push_commit_stable &&
1334	    ctx->commit_iclog->ic_state == XLOG_STATE_ACTIVE)
1335		xlog_state_switch_iclogs(log, ctx->commit_iclog, 0);
1336	ticket = ctx->ticket;
1337	xlog_state_release_iclog(log, ctx->commit_iclog, ticket);
1338
1339	/* Not safe to reference ctx now! */
1340
1341	spin_unlock(&log->l_icloglock);
1342	xlog_cil_cleanup_whiteouts(&whiteouts);
1343	xfs_log_ticket_ungrant(log, ticket);
1344	memalloc_nofs_restore(nofs_flags);
1345	return;
1346
1347out_skip:
1348	up_write(&cil->xc_ctx_lock);
1349	xfs_log_ticket_put(new_ctx->ticket);
1350	kfree(new_ctx);
1351	memalloc_nofs_restore(nofs_flags);
1352	return;
1353
1354out_abort_free_ticket:
1355	ASSERT(xlog_is_shutdown(log));
1356	xlog_cil_cleanup_whiteouts(&whiteouts);
1357	if (!ctx->commit_iclog) {
1358		xfs_log_ticket_ungrant(log, ctx->ticket);
1359		xlog_cil_committed(ctx);
1360		memalloc_nofs_restore(nofs_flags);
1361		return;
1362	}
1363	spin_lock(&log->l_icloglock);
1364	ticket = ctx->ticket;
1365	xlog_state_release_iclog(log, ctx->commit_iclog, ticket);
1366	/* Not safe to reference ctx now! */
1367	spin_unlock(&log->l_icloglock);
1368	xfs_log_ticket_ungrant(log, ticket);
1369	memalloc_nofs_restore(nofs_flags);
1370}
1371
1372/*
1373 * We need to push CIL every so often so we don't cache more than we can fit in
1374 * the log. The limit really is that a checkpoint can't be more than half the
1375 * log (the current checkpoint is not allowed to overwrite the previous
1376 * checkpoint), but commit latency and memory usage limit this to a smaller
1377 * size.
1378 */
1379static void
1380xlog_cil_push_background(
1381	struct xlog	*log) __releases(cil->xc_ctx_lock)
1382{
1383	struct xfs_cil	*cil = log->l_cilp;
1384	int		space_used = atomic_read(&cil->xc_ctx->space_used);
1385
1386	/*
1387	 * The cil won't be empty because we are called while holding the
1388	 * context lock so whatever we added to the CIL will still be there.
1389	 */
1390	ASSERT(!test_bit(XLOG_CIL_EMPTY, &cil->xc_flags));
1391
1392	/*
1393	 * We are done if:
1394	 * - we haven't used up all the space available yet; or
1395	 * - we've already queued up a push; and
1396	 * - we're not over the hard limit; and
1397	 * - nothing has been over the hard limit.
1398	 *
1399	 * If so, we don't need to take the push lock as there's nothing to do.
1400	 */
1401	if (space_used < XLOG_CIL_SPACE_LIMIT(log) ||
1402	    (cil->xc_push_seq == cil->xc_current_sequence &&
1403	     space_used < XLOG_CIL_BLOCKING_SPACE_LIMIT(log) &&
1404	     !waitqueue_active(&cil->xc_push_wait))) {
1405		up_read(&cil->xc_ctx_lock);
1406		return;
1407	}
1408
1409	spin_lock(&cil->xc_push_lock);
1410	if (cil->xc_push_seq < cil->xc_current_sequence) {
1411		cil->xc_push_seq = cil->xc_current_sequence;
1412		queue_work(cil->xc_push_wq, &cil->xc_ctx->push_work);
1413	}
1414
1415	/*
1416	 * Drop the context lock now, we can't hold that if we need to sleep
1417	 * because we are over the blocking threshold. The push_lock is still
1418	 * held, so blocking threshold sleep/wakeup is still correctly
1419	 * serialised here.
1420	 */
1421	up_read(&cil->xc_ctx_lock);
1422
1423	/*
1424	 * If we are well over the space limit, throttle the work that is being
1425	 * done until the push work on this context has begun. Enforce the hard
1426	 * throttle on all transaction commits once it has been activated, even
1427	 * if the committing transactions have resulted in the space usage
1428	 * dipping back down under the hard limit.
1429	 *
1430	 * The ctx->xc_push_lock provides the serialisation necessary for safely
1431	 * calling xlog_cil_over_hard_limit() in this context.
1432	 */
1433	if (xlog_cil_over_hard_limit(log, space_used)) {
1434		trace_xfs_log_cil_wait(log, cil->xc_ctx->ticket);
1435		ASSERT(space_used < log->l_logsize);
1436		xlog_wait(&cil->xc_push_wait, &cil->xc_push_lock);
1437		return;
1438	}
1439
1440	spin_unlock(&cil->xc_push_lock);
1441
1442}
1443
1444/*
1445 * xlog_cil_push_now() is used to trigger an immediate CIL push to the sequence
1446 * number that is passed. When it returns, the work will be queued for
1447 * @push_seq, but it won't be completed.
1448 *
1449 * If the caller is performing a synchronous force, we will flush the workqueue
1450 * to get previously queued work moving to minimise the wait time they will
1451 * undergo waiting for all outstanding pushes to complete. The caller is
1452 * expected to do the required waiting for push_seq to complete.
1453 *
1454 * If the caller is performing an async push, we need to ensure that the
1455 * checkpoint is fully flushed out of the iclogs when we finish the push. If we
1456 * don't do this, then the commit record may remain sitting in memory in an
1457 * ACTIVE iclog. This then requires another full log force to push to disk,
1458 * which defeats the purpose of having an async, non-blocking CIL force
1459 * mechanism. Hence in this case we need to pass a flag to the push work to
1460 * indicate it needs to flush the commit record itself.
1461 */
1462static void
1463xlog_cil_push_now(
1464	struct xlog	*log,
1465	xfs_lsn_t	push_seq,
1466	bool		async)
1467{
1468	struct xfs_cil	*cil = log->l_cilp;
1469
1470	if (!cil)
1471		return;
1472
1473	ASSERT(push_seq && push_seq <= cil->xc_current_sequence);
1474
1475	/* start on any pending background push to minimise wait time on it */
1476	if (!async)
1477		flush_workqueue(cil->xc_push_wq);
1478
1479	spin_lock(&cil->xc_push_lock);
1480
1481	/*
1482	 * If this is an async flush request, we always need to set the
1483	 * xc_push_commit_stable flag even if something else has already queued
1484	 * a push. The flush caller is asking for the CIL to be on stable
1485	 * storage when the next push completes, so regardless of who has queued
1486	 * the push, the flush requires stable semantics from it.
1487	 */
1488	cil->xc_push_commit_stable = async;
1489
1490	/*
1491	 * If the CIL is empty or we've already pushed the sequence then
1492	 * there's no more work that we need to do.
1493	 */
1494	if (test_bit(XLOG_CIL_EMPTY, &cil->xc_flags) ||
1495	    push_seq <= cil->xc_push_seq) {
1496		spin_unlock(&cil->xc_push_lock);
1497		return;
1498	}
1499
1500	cil->xc_push_seq = push_seq;
1501	queue_work(cil->xc_push_wq, &cil->xc_ctx->push_work);
1502	spin_unlock(&cil->xc_push_lock);
1503}
1504
1505bool
1506xlog_cil_empty(
1507	struct xlog	*log)
1508{
1509	struct xfs_cil	*cil = log->l_cilp;
1510	bool		empty = false;
1511
1512	spin_lock(&cil->xc_push_lock);
1513	if (test_bit(XLOG_CIL_EMPTY, &cil->xc_flags))
1514		empty = true;
1515	spin_unlock(&cil->xc_push_lock);
1516	return empty;
1517}
1518
1519/*
1520 * If there are intent done items in this transaction and the related intent was
1521 * committed in the current (same) CIL checkpoint, we don't need to write either
1522 * the intent or intent done item to the journal as the change will be
1523 * journalled atomically within this checkpoint. As we cannot remove items from
1524 * the CIL here, mark the related intent with a whiteout so that the CIL push
1525 * can remove it rather than writing it to the journal. Then remove the intent
1526 * done item from the current transaction and release it so it doesn't get put
1527 * into the CIL at all.
1528 */
1529static uint32_t
1530xlog_cil_process_intents(
1531	struct xfs_cil		*cil,
1532	struct xfs_trans	*tp)
1533{
1534	struct xfs_log_item	*lip, *ilip, *next;
1535	uint32_t		len = 0;
1536
1537	list_for_each_entry_safe(lip, next, &tp->t_items, li_trans) {
1538		if (!(lip->li_ops->flags & XFS_ITEM_INTENT_DONE))
1539			continue;
1540
1541		ilip = lip->li_ops->iop_intent(lip);
1542		if (!ilip || !xlog_item_in_current_chkpt(cil, ilip))
1543			continue;
1544		set_bit(XFS_LI_WHITEOUT, &ilip->li_flags);
1545		trace_xfs_cil_whiteout_mark(ilip);
1546		len += ilip->li_lv->lv_bytes;
1547		kvfree(ilip->li_lv);
1548		ilip->li_lv = NULL;
1549
1550		xfs_trans_del_item(lip);
1551		lip->li_ops->iop_release(lip);
1552	}
1553	return len;
1554}
1555
1556/*
1557 * Commit a transaction with the given vector to the Committed Item List.
1558 *
1559 * To do this, we need to format the item, pin it in memory if required and
1560 * account for the space used by the transaction. Once we have done that we
1561 * need to release the unused reservation for the transaction, attach the
1562 * transaction to the checkpoint context so we carry the busy extents through
1563 * to checkpoint completion, and then unlock all the items in the transaction.
1564 *
1565 * Called with the context lock already held in read mode to lock out
1566 * background commit, returns without it held once background commits are
1567 * allowed again.
1568 */
1569void
1570xlog_cil_commit(
1571	struct xlog		*log,
1572	struct xfs_trans	*tp,
1573	xfs_csn_t		*commit_seq,
1574	bool			regrant)
1575{
1576	struct xfs_cil		*cil = log->l_cilp;
1577	struct xfs_log_item	*lip, *next;
1578	uint32_t		released_space = 0;
1579
1580	/*
1581	 * Do all necessary memory allocation before we lock the CIL.
1582	 * This ensures the allocation does not deadlock with a CIL
1583	 * push in memory reclaim (e.g. from kswapd).
1584	 */
1585	xlog_cil_alloc_shadow_bufs(log, tp);
1586
1587	/* lock out background commit */
1588	down_read(&cil->xc_ctx_lock);
1589
1590	if (tp->t_flags & XFS_TRANS_HAS_INTENT_DONE)
1591		released_space = xlog_cil_process_intents(cil, tp);
1592
1593	xlog_cil_insert_items(log, tp, released_space);
1594
1595	if (regrant && !xlog_is_shutdown(log))
1596		xfs_log_ticket_regrant(log, tp->t_ticket);
1597	else
1598		xfs_log_ticket_ungrant(log, tp->t_ticket);
1599	tp->t_ticket = NULL;
1600	xfs_trans_unreserve_and_mod_sb(tp);
1601
1602	/*
1603	 * Once all the items of the transaction have been copied to the CIL,
1604	 * the items can be unlocked and possibly freed.
1605	 *
1606	 * This needs to be done before we drop the CIL context lock because we
1607	 * have to update state in the log items and unlock them before they go
1608	 * to disk. If we don't, then the CIL checkpoint can race with us and
1609	 * we can run checkpoint completion before we've updated and unlocked
1610	 * the log items. This affects (at least) processing of stale buffers,
1611	 * inodes and EFIs.
1612	 */
1613	trace_xfs_trans_commit_items(tp, _RET_IP_);
1614	list_for_each_entry_safe(lip, next, &tp->t_items, li_trans) {
1615		xfs_trans_del_item(lip);
1616		if (lip->li_ops->iop_committing)
1617			lip->li_ops->iop_committing(lip, cil->xc_ctx->sequence);
1618	}
1619	if (commit_seq)
1620		*commit_seq = cil->xc_ctx->sequence;
1621
1622	/* xlog_cil_push_background() releases cil->xc_ctx_lock */
1623	xlog_cil_push_background(log);
1624}
1625
1626/*
1627 * Flush the CIL to stable storage but don't wait for it to complete. This
1628 * requires the CIL push to ensure the commit record for the push hits the disk,
1629 * but otherwise is no different to a push done from a log force.
1630 */
1631void
1632xlog_cil_flush(
1633	struct xlog	*log)
1634{
1635	xfs_csn_t	seq = log->l_cilp->xc_current_sequence;
1636
1637	trace_xfs_log_force(log->l_mp, seq, _RET_IP_);
1638	xlog_cil_push_now(log, seq, true);
1639
1640	/*
1641	 * If the CIL is empty, make sure that any previous checkpoint that may
1642	 * still be in an active iclog is pushed to stable storage.
1643	 */
1644	if (test_bit(XLOG_CIL_EMPTY, &log->l_cilp->xc_flags))
1645		xfs_log_force(log->l_mp, 0);
1646}
1647
1648/*
1649 * Conditionally push the CIL based on the sequence passed in.
1650 *
1651 * We only need to push if we haven't already pushed the sequence number given.
1652 * Hence the only time we will trigger a push here is if the push sequence is
1653 * the same as the current context.
1654 *
1655 * We return the current commit lsn to allow the callers to determine if a
1656 * iclog flush is necessary following this call.
1657 */
1658xfs_lsn_t
1659xlog_cil_force_seq(
1660	struct xlog	*log,
1661	xfs_csn_t	sequence)
1662{
1663	struct xfs_cil		*cil = log->l_cilp;
1664	struct xfs_cil_ctx	*ctx;
1665	xfs_lsn_t		commit_lsn = NULLCOMMITLSN;
1666
1667	ASSERT(sequence <= cil->xc_current_sequence);
1668
1669	if (!sequence)
1670		sequence = cil->xc_current_sequence;
1671	trace_xfs_log_force(log->l_mp, sequence, _RET_IP_);
1672
1673	/*
1674	 * check to see if we need to force out the current context.
1675	 * xlog_cil_push() handles racing pushes for the same sequence,
1676	 * so no need to deal with it here.
1677	 */
1678restart:
1679	xlog_cil_push_now(log, sequence, false);
1680
1681	/*
1682	 * See if we can find a previous sequence still committing.
1683	 * We need to wait for all previous sequence commits to complete
1684	 * before allowing the force of push_seq to go ahead. Hence block
1685	 * on commits for those as well.
1686	 */
1687	spin_lock(&cil->xc_push_lock);
1688	list_for_each_entry(ctx, &cil->xc_committing, committing) {
1689		/*
1690		 * Avoid getting stuck in this loop because we were woken by the
1691		 * shutdown, but then went back to sleep once already in the
1692		 * shutdown state.
1693		 */
1694		if (xlog_is_shutdown(log))
1695			goto out_shutdown;
1696		if (ctx->sequence > sequence)
1697			continue;
1698		if (!ctx->commit_lsn) {
1699			/*
1700			 * It is still being pushed! Wait for the push to
1701			 * complete, then start again from the beginning.
1702			 */
1703			XFS_STATS_INC(log->l_mp, xs_log_force_sleep);
1704			xlog_wait(&cil->xc_commit_wait, &cil->xc_push_lock);
1705			goto restart;
1706		}
1707		if (ctx->sequence != sequence)
1708			continue;
1709		/* found it! */
1710		commit_lsn = ctx->commit_lsn;
1711	}
1712
1713	/*
1714	 * The call to xlog_cil_push_now() executes the push in the background.
1715	 * Hence by the time we have got here it our sequence may not have been
1716	 * pushed yet. This is true if the current sequence still matches the
1717	 * push sequence after the above wait loop and the CIL still contains
1718	 * dirty objects. This is guaranteed by the push code first adding the
1719	 * context to the committing list before emptying the CIL.
1720	 *
1721	 * Hence if we don't find the context in the committing list and the
1722	 * current sequence number is unchanged then the CIL contents are
1723	 * significant.  If the CIL is empty, if means there was nothing to push
1724	 * and that means there is nothing to wait for. If the CIL is not empty,
1725	 * it means we haven't yet started the push, because if it had started
1726	 * we would have found the context on the committing list.
1727	 */
1728	if (sequence == cil->xc_current_sequence &&
1729	    !test_bit(XLOG_CIL_EMPTY, &cil->xc_flags)) {
1730		spin_unlock(&cil->xc_push_lock);
1731		goto restart;
1732	}
1733
1734	spin_unlock(&cil->xc_push_lock);
1735	return commit_lsn;
1736
1737	/*
1738	 * We detected a shutdown in progress. We need to trigger the log force
1739	 * to pass through it's iclog state machine error handling, even though
1740	 * we are already in a shutdown state. Hence we can't return
1741	 * NULLCOMMITLSN here as that has special meaning to log forces (i.e.
1742	 * LSN is already stable), so we return a zero LSN instead.
1743	 */
1744out_shutdown:
1745	spin_unlock(&cil->xc_push_lock);
1746	return 0;
1747}
1748
1749/*
1750 * Perform initial CIL structure initialisation.
1751 */
1752int
1753xlog_cil_init(
1754	struct xlog		*log)
1755{
1756	struct xfs_cil		*cil;
1757	struct xfs_cil_ctx	*ctx;
1758	struct xlog_cil_pcp	*cilpcp;
1759	int			cpu;
1760
1761	cil = kzalloc(sizeof(*cil), GFP_KERNEL | __GFP_RETRY_MAYFAIL);
1762	if (!cil)
1763		return -ENOMEM;
1764	/*
1765	 * Limit the CIL pipeline depth to 4 concurrent works to bound the
1766	 * concurrency the log spinlocks will be exposed to.
1767	 */
1768	cil->xc_push_wq = alloc_workqueue("xfs-cil/%s",
1769			XFS_WQFLAGS(WQ_FREEZABLE | WQ_MEM_RECLAIM | WQ_UNBOUND),
1770			4, log->l_mp->m_super->s_id);
1771	if (!cil->xc_push_wq)
1772		goto out_destroy_cil;
1773
1774	cil->xc_log = log;
1775	cil->xc_pcp = alloc_percpu(struct xlog_cil_pcp);
1776	if (!cil->xc_pcp)
1777		goto out_destroy_wq;
1778
1779	for_each_possible_cpu(cpu) {
1780		cilpcp = per_cpu_ptr(cil->xc_pcp, cpu);
1781		INIT_LIST_HEAD(&cilpcp->busy_extents);
1782		INIT_LIST_HEAD(&cilpcp->log_items);
1783	}
1784
1785	INIT_LIST_HEAD(&cil->xc_committing);
1786	spin_lock_init(&cil->xc_push_lock);
1787	init_waitqueue_head(&cil->xc_push_wait);
1788	init_rwsem(&cil->xc_ctx_lock);
1789	init_waitqueue_head(&cil->xc_start_wait);
1790	init_waitqueue_head(&cil->xc_commit_wait);
1791	log->l_cilp = cil;
1792
1793	ctx = xlog_cil_ctx_alloc();
1794	xlog_cil_ctx_switch(cil, ctx);
1795	return 0;
1796
1797out_destroy_wq:
1798	destroy_workqueue(cil->xc_push_wq);
1799out_destroy_cil:
1800	kfree(cil);
1801	return -ENOMEM;
1802}
1803
1804void
1805xlog_cil_destroy(
1806	struct xlog	*log)
1807{
1808	struct xfs_cil	*cil = log->l_cilp;
1809
1810	if (cil->xc_ctx) {
1811		if (cil->xc_ctx->ticket)
1812			xfs_log_ticket_put(cil->xc_ctx->ticket);
1813		kfree(cil->xc_ctx);
1814	}
1815
1816	ASSERT(test_bit(XLOG_CIL_EMPTY, &cil->xc_flags));
1817	free_percpu(cil->xc_pcp);
1818	destroy_workqueue(cil->xc_push_wq);
1819	kfree(cil);
1820}
1821
1822