1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _RAID5_H
3#define _RAID5_H
4
5#include <linux/raid/xor.h>
6#include <linux/dmaengine.h>
7#include <linux/local_lock.h>
8
9/*
10 *
11 * Each stripe contains one buffer per device.  Each buffer can be in
12 * one of a number of states stored in "flags".  Changes between
13 * these states happen *almost* exclusively under the protection of the
14 * STRIPE_ACTIVE flag.  Some very specific changes can happen in bi_end_io, and
15 * these are not protected by STRIPE_ACTIVE.
16 *
17 * The flag bits that are used to represent these states are:
18 *   R5_UPTODATE and R5_LOCKED
19 *
20 * State Empty == !UPTODATE, !LOCK
21 *        We have no data, and there is no active request
22 * State Want == !UPTODATE, LOCK
23 *        A read request is being submitted for this block
24 * State Dirty == UPTODATE, LOCK
25 *        Some new data is in this buffer, and it is being written out
26 * State Clean == UPTODATE, !LOCK
27 *        We have valid data which is the same as on disc
28 *
29 * The possible state transitions are:
30 *
31 *  Empty -> Want   - on read or write to get old data for  parity calc
32 *  Empty -> Dirty  - on compute_parity to satisfy write/sync request.
33 *  Empty -> Clean  - on compute_block when computing a block for failed drive
34 *  Want  -> Empty  - on failed read
35 *  Want  -> Clean  - on successful completion of read request
36 *  Dirty -> Clean  - on successful completion of write request
37 *  Dirty -> Clean  - on failed write
38 *  Clean -> Dirty  - on compute_parity to satisfy write/sync (RECONSTRUCT or RMW)
39 *
40 * The Want->Empty, Want->Clean, Dirty->Clean, transitions
41 * all happen in b_end_io at interrupt time.
42 * Each sets the Uptodate bit before releasing the Lock bit.
43 * This leaves one multi-stage transition:
44 *    Want->Dirty->Clean
45 * This is safe because thinking that a Clean buffer is actually dirty
46 * will at worst delay some action, and the stripe will be scheduled
47 * for attention after the transition is complete.
48 *
49 * There is one possibility that is not covered by these states.  That
50 * is if one drive has failed and there is a spare being rebuilt.  We
51 * can't distinguish between a clean block that has been generated
52 * from parity calculations, and a clean block that has been
53 * successfully written to the spare ( or to parity when resyncing).
54 * To distinguish these states we have a stripe bit STRIPE_INSYNC that
55 * is set whenever a write is scheduled to the spare, or to the parity
56 * disc if there is no spare.  A sync request clears this bit, and
57 * when we find it set with no buffers locked, we know the sync is
58 * complete.
59 *
60 * Buffers for the md device that arrive via make_request are attached
61 * to the appropriate stripe in one of two lists linked on b_reqnext.
62 * One list (bh_read) for read requests, one (bh_write) for write.
63 * There should never be more than one buffer on the two lists
64 * together, but we are not guaranteed of that so we allow for more.
65 *
66 * If a buffer is on the read list when the associated cache buffer is
67 * Uptodate, the data is copied into the read buffer and it's b_end_io
68 * routine is called.  This may happen in the end_request routine only
69 * if the buffer has just successfully been read.  end_request should
70 * remove the buffers from the list and then set the Uptodate bit on
71 * the buffer.  Other threads may do this only if they first check
72 * that the Uptodate bit is set.  Once they have checked that they may
73 * take buffers off the read queue.
74 *
75 * When a buffer on the write list is committed for write it is copied
76 * into the cache buffer, which is then marked dirty, and moved onto a
77 * third list, the written list (bh_written).  Once both the parity
78 * block and the cached buffer are successfully written, any buffer on
79 * a written list can be returned with b_end_io.
80 *
81 * The write list and read list both act as fifos.  The read list,
82 * write list and written list are protected by the device_lock.
83 * The device_lock is only for list manipulations and will only be
84 * held for a very short time.  It can be claimed from interrupts.
85 *
86 *
87 * Stripes in the stripe cache can be on one of two lists (or on
88 * neither).  The "inactive_list" contains stripes which are not
89 * currently being used for any request.  They can freely be reused
90 * for another stripe.  The "handle_list" contains stripes that need
91 * to be handled in some way.  Both of these are fifo queues.  Each
92 * stripe is also (potentially) linked to a hash bucket in the hash
93 * table so that it can be found by sector number.  Stripes that are
94 * not hashed must be on the inactive_list, and will normally be at
95 * the front.  All stripes start life this way.
96 *
97 * The inactive_list, handle_list and hash bucket lists are all protected by the
98 * device_lock.
99 *  - stripes have a reference counter. If count==0, they are on a list.
100 *  - If a stripe might need handling, STRIPE_HANDLE is set.
101 *  - When refcount reaches zero, then if STRIPE_HANDLE it is put on
102 *    handle_list else inactive_list
103 *
104 * This, combined with the fact that STRIPE_HANDLE is only ever
105 * cleared while a stripe has a non-zero count means that if the
106 * refcount is 0 and STRIPE_HANDLE is set, then it is on the
107 * handle_list and if recount is 0 and STRIPE_HANDLE is not set, then
108 * the stripe is on inactive_list.
109 *
110 * The possible transitions are:
111 *  activate an unhashed/inactive stripe (get_active_stripe())
112 *     lockdev check-hash unlink-stripe cnt++ clean-stripe hash-stripe unlockdev
113 *  activate a hashed, possibly active stripe (get_active_stripe())
114 *     lockdev check-hash if(!cnt++)unlink-stripe unlockdev
115 *  attach a request to an active stripe (add_stripe_bh())
116 *     lockdev attach-buffer unlockdev
117 *  handle a stripe (handle_stripe())
118 *     setSTRIPE_ACTIVE,  clrSTRIPE_HANDLE ...
119 *		(lockdev check-buffers unlockdev) ..
120 *		change-state ..
121 *		record io/ops needed clearSTRIPE_ACTIVE schedule io/ops
122 *  release an active stripe (release_stripe())
123 *     lockdev if (!--cnt) { if  STRIPE_HANDLE, add to handle_list else add to inactive-list } unlockdev
124 *
125 * The refcount counts each thread that have activated the stripe,
126 * plus raid5d if it is handling it, plus one for each active request
127 * on a cached buffer, and plus one if the stripe is undergoing stripe
128 * operations.
129 *
130 * The stripe operations are:
131 * -copying data between the stripe cache and user application buffers
132 * -computing blocks to save a disk access, or to recover a missing block
133 * -updating the parity on a write operation (reconstruct write and
134 *  read-modify-write)
135 * -checking parity correctness
136 * -running i/o to disk
137 * These operations are carried out by raid5_run_ops which uses the async_tx
138 * api to (optionally) offload operations to dedicated hardware engines.
139 * When requesting an operation handle_stripe sets the pending bit for the
140 * operation and increments the count.  raid5_run_ops is then run whenever
141 * the count is non-zero.
142 * There are some critical dependencies between the operations that prevent some
143 * from being requested while another is in flight.
144 * 1/ Parity check operations destroy the in cache version of the parity block,
145 *    so we prevent parity dependent operations like writes and compute_blocks
146 *    from starting while a check is in progress.  Some dma engines can perform
147 *    the check without damaging the parity block, in these cases the parity
148 *    block is re-marked up to date (assuming the check was successful) and is
149 *    not re-read from disk.
150 * 2/ When a write operation is requested we immediately lock the affected
151 *    blocks, and mark them as not up to date.  This causes new read requests
152 *    to be held off, as well as parity checks and compute block operations.
153 * 3/ Once a compute block operation has been requested handle_stripe treats
154 *    that block as if it is up to date.  raid5_run_ops guaruntees that any
155 *    operation that is dependent on the compute block result is initiated after
156 *    the compute block completes.
157 */
158
159/*
160 * Operations state - intermediate states that are visible outside of
161 *   STRIPE_ACTIVE.
162 * In general _idle indicates nothing is running, _run indicates a data
163 * processing operation is active, and _result means the data processing result
164 * is stable and can be acted upon.  For simple operations like biofill and
165 * compute that only have an _idle and _run state they are indicated with
166 * sh->state flags (STRIPE_BIOFILL_RUN and STRIPE_COMPUTE_RUN)
167 */
168/**
169 * enum check_states - handles syncing / repairing a stripe
170 * @check_state_idle - check operations are quiesced
171 * @check_state_run - check operation is running
172 * @check_state_result - set outside lock when check result is valid
173 * @check_state_compute_run - check failed and we are repairing
174 * @check_state_compute_result - set outside lock when compute result is valid
175 */
176enum check_states {
177	check_state_idle = 0,
178	check_state_run, /* xor parity check */
179	check_state_run_q, /* q-parity check */
180	check_state_run_pq, /* pq dual parity check */
181	check_state_check_result,
182	check_state_compute_run, /* parity repair */
183	check_state_compute_result,
184};
185
186/**
187 * enum reconstruct_states - handles writing or expanding a stripe
188 */
189enum reconstruct_states {
190	reconstruct_state_idle = 0,
191	reconstruct_state_prexor_drain_run,	/* prexor-write */
192	reconstruct_state_drain_run,		/* write */
193	reconstruct_state_run,			/* expand */
194	reconstruct_state_prexor_drain_result,
195	reconstruct_state_drain_result,
196	reconstruct_state_result,
197};
198
199#define DEFAULT_STRIPE_SIZE	4096
200struct stripe_head {
201	struct hlist_node	hash;
202	struct list_head	lru;	      /* inactive_list or handle_list */
203	struct llist_node	release_list;
204	struct r5conf		*raid_conf;
205	short			generation;	/* increments with every
206						 * reshape */
207	sector_t		sector;		/* sector of this row */
208	short			pd_idx;		/* parity disk index */
209	short			qd_idx;		/* 'Q' disk index for raid6 */
210	short			ddf_layout;/* use DDF ordering to calculate Q */
211	short			hash_lock_index;
212	unsigned long		state;		/* state flags */
213	atomic_t		count;	      /* nr of active thread/requests */
214	int			bm_seq;	/* sequence number for bitmap flushes */
215	int			disks;		/* disks in stripe */
216	int			overwrite_disks; /* total overwrite disks in stripe,
217						  * this is only checked when stripe
218						  * has STRIPE_BATCH_READY
219						  */
220	enum check_states	check_state;
221	enum reconstruct_states reconstruct_state;
222	spinlock_t		stripe_lock;
223	int			cpu;
224	struct r5worker_group	*group;
225
226	struct stripe_head	*batch_head; /* protected by stripe lock */
227	spinlock_t		batch_lock; /* only header's lock is useful */
228	struct list_head	batch_list; /* protected by head's batch lock*/
229
230	union {
231		struct r5l_io_unit	*log_io;
232		struct ppl_io_unit	*ppl_io;
233	};
234
235	struct list_head	log_list;
236	sector_t		log_start; /* first meta block on the journal */
237	struct list_head	r5c; /* for r5c_cache->stripe_in_journal */
238
239	struct page		*ppl_page; /* partial parity of this stripe */
240	/**
241	 * struct stripe_operations
242	 * @target - STRIPE_OP_COMPUTE_BLK target
243	 * @target2 - 2nd compute target in the raid6 case
244	 * @zero_sum_result - P and Q verification flags
245	 * @request - async service request flags for raid_run_ops
246	 */
247	struct stripe_operations {
248		int 		     target, target2;
249		enum sum_check_flags zero_sum_result;
250	} ops;
251
252#if PAGE_SIZE != DEFAULT_STRIPE_SIZE
253	/* These pages will be used by bios in dev[i] */
254	struct page	**pages;
255	int	nr_pages;	/* page array size */
256	int	stripes_per_page;
257#endif
258	struct r5dev {
259		/* rreq and rvec are used for the replacement device when
260		 * writing data to both devices.
261		 */
262		struct bio	req, rreq;
263		struct bio_vec	vec, rvec;
264		struct page	*page, *orig_page;
265		unsigned int    offset;     /* offset of the page */
266		struct bio	*toread, *read, *towrite, *written;
267		sector_t	sector;			/* sector of this page */
268		unsigned long	flags;
269		u32		log_checksum;
270		unsigned short	write_hint;
271	} dev[]; /* allocated depending of RAID geometry ("disks" member) */
272};
273
274/* stripe_head_state - collects and tracks the dynamic state of a stripe_head
275 *     for handle_stripe.
276 */
277struct stripe_head_state {
278	/* 'syncing' means that we need to read all devices, either
279	 * to check/correct parity, or to reconstruct a missing device.
280	 * 'replacing' means we are replacing one or more drives and
281	 * the source is valid at this point so we don't need to
282	 * read all devices, just the replacement targets.
283	 */
284	int syncing, expanding, expanded, replacing;
285	int locked, uptodate, to_read, to_write, failed, written;
286	int to_fill, compute, req_compute, non_overwrite;
287	int injournal, just_cached;
288	int failed_num[2];
289	int p_failed, q_failed;
290	int dec_preread_active;
291	unsigned long ops_request;
292
293	struct md_rdev *blocked_rdev;
294	int handle_bad_blocks;
295	int log_failed;
296	int waiting_extra_page;
297};
298
299/* Flags for struct r5dev.flags */
300enum r5dev_flags {
301	R5_UPTODATE,	/* page contains current data */
302	R5_LOCKED,	/* IO has been submitted on "req" */
303	R5_DOUBLE_LOCKED,/* Cannot clear R5_LOCKED until 2 writes complete */
304	R5_OVERWRITE,	/* towrite covers whole page */
305/* and some that are internal to handle_stripe */
306	R5_Insync,	/* rdev && rdev->in_sync at start */
307	R5_Wantread,	/* want to schedule a read */
308	R5_Wantwrite,
309	R5_Overlap,	/* There is a pending overlapping request
310			 * on this block */
311	R5_ReadNoMerge, /* prevent bio from merging in block-layer */
312	R5_ReadError,	/* seen a read error here recently */
313	R5_ReWrite,	/* have tried to over-write the readerror */
314
315	R5_Expanded,	/* This block now has post-expand data */
316	R5_Wantcompute,	/* compute_block in progress treat as
317			 * uptodate
318			 */
319	R5_Wantfill,	/* dev->toread contains a bio that needs
320			 * filling
321			 */
322	R5_Wantdrain,	/* dev->towrite needs to be drained */
323	R5_WantFUA,	/* Write should be FUA */
324	R5_SyncIO,	/* The IO is sync */
325	R5_WriteError,	/* got a write error - need to record it */
326	R5_MadeGood,	/* A bad block has been fixed by writing to it */
327	R5_ReadRepl,	/* Will/did read from replacement rather than orig */
328	R5_MadeGoodRepl,/* A bad block on the replacement device has been
329			 * fixed by writing to it */
330	R5_NeedReplace,	/* This device has a replacement which is not
331			 * up-to-date at this stripe. */
332	R5_WantReplace, /* We need to update the replacement, we have read
333			 * data in, and now is a good time to write it out.
334			 */
335	R5_Discard,	/* Discard the stripe */
336	R5_SkipCopy,	/* Don't copy data from bio to stripe cache */
337	R5_InJournal,	/* data being written is in the journal device.
338			 * if R5_InJournal is set for parity pd_idx, all the
339			 * data and parity being written are in the journal
340			 * device
341			 */
342	R5_OrigPageUPTDODATE,	/* with write back cache, we read old data into
343				 * dev->orig_page for prexor. When this flag is
344				 * set, orig_page contains latest data in the
345				 * raid disk.
346				 */
347};
348
349/*
350 * Stripe state
351 */
352enum {
353	STRIPE_ACTIVE,
354	STRIPE_HANDLE,
355	STRIPE_SYNC_REQUESTED,
356	STRIPE_SYNCING,
357	STRIPE_INSYNC,
358	STRIPE_REPLACED,
359	STRIPE_PREREAD_ACTIVE,
360	STRIPE_DELAYED,
361	STRIPE_DEGRADED,
362	STRIPE_BIT_DELAY,
363	STRIPE_EXPANDING,
364	STRIPE_EXPAND_SOURCE,
365	STRIPE_EXPAND_READY,
366	STRIPE_IO_STARTED,	/* do not count towards 'bypass_count' */
367	STRIPE_FULL_WRITE,	/* all blocks are set to be overwritten */
368	STRIPE_BIOFILL_RUN,
369	STRIPE_COMPUTE_RUN,
370	STRIPE_ON_UNPLUG_LIST,
371	STRIPE_DISCARD,
372	STRIPE_ON_RELEASE_LIST,
373	STRIPE_BATCH_READY,
374	STRIPE_BATCH_ERR,
375	STRIPE_BITMAP_PENDING,	/* Being added to bitmap, don't add
376				 * to batch yet.
377				 */
378	STRIPE_LOG_TRAPPED,	/* trapped into log (see raid5-cache.c)
379				 * this bit is used in two scenarios:
380				 *
381				 * 1. write-out phase
382				 *  set in first entry of r5l_write_stripe
383				 *  clear in second entry of r5l_write_stripe
384				 *  used to bypass logic in handle_stripe
385				 *
386				 * 2. caching phase
387				 *  set in r5c_try_caching_write()
388				 *  clear when journal write is done
389				 *  used to initiate r5c_cache_data()
390				 *  also used to bypass logic in handle_stripe
391				 */
392	STRIPE_R5C_CACHING,	/* the stripe is in caching phase
393				 * see more detail in the raid5-cache.c
394				 */
395	STRIPE_R5C_PARTIAL_STRIPE,	/* in r5c cache (to-be/being handled or
396					 * in conf->r5c_partial_stripe_list)
397					 */
398	STRIPE_R5C_FULL_STRIPE,	/* in r5c cache (to-be/being handled or
399				 * in conf->r5c_full_stripe_list)
400				 */
401	STRIPE_R5C_PREFLUSH,	/* need to flush journal device */
402};
403
404#define STRIPE_EXPAND_SYNC_FLAGS \
405	((1 << STRIPE_EXPAND_SOURCE) |\
406	(1 << STRIPE_EXPAND_READY) |\
407	(1 << STRIPE_EXPANDING) |\
408	(1 << STRIPE_SYNC_REQUESTED))
409/*
410 * Operation request flags
411 */
412enum {
413	STRIPE_OP_BIOFILL,
414	STRIPE_OP_COMPUTE_BLK,
415	STRIPE_OP_PREXOR,
416	STRIPE_OP_BIODRAIN,
417	STRIPE_OP_RECONSTRUCT,
418	STRIPE_OP_CHECK,
419	STRIPE_OP_PARTIAL_PARITY,
420};
421
422/*
423 * RAID parity calculation preferences
424 */
425enum {
426	PARITY_DISABLE_RMW = 0,
427	PARITY_ENABLE_RMW,
428	PARITY_PREFER_RMW,
429};
430
431/*
432 * Pages requested from set_syndrome_sources()
433 */
434enum {
435	SYNDROME_SRC_ALL,
436	SYNDROME_SRC_WANT_DRAIN,
437	SYNDROME_SRC_WRITTEN,
438};
439/*
440 * Plugging:
441 *
442 * To improve write throughput, we need to delay the handling of some
443 * stripes until there has been a chance that several write requests
444 * for the one stripe have all been collected.
445 * In particular, any write request that would require pre-reading
446 * is put on a "delayed" queue until there are no stripes currently
447 * in a pre-read phase.  Further, if the "delayed" queue is empty when
448 * a stripe is put on it then we "plug" the queue and do not process it
449 * until an unplug call is made. (the unplug_io_fn() is called).
450 *
451 * When preread is initiated on a stripe, we set PREREAD_ACTIVE and add
452 * it to the count of prereading stripes.
453 * When write is initiated, or the stripe refcnt == 0 (just in case) we
454 * clear the PREREAD_ACTIVE flag and decrement the count
455 * Whenever the 'handle' queue is empty and the device is not plugged, we
456 * move any strips from delayed to handle and clear the DELAYED flag and set
457 * PREREAD_ACTIVE.
458 * In stripe_handle, if we find pre-reading is necessary, we do it if
459 * PREREAD_ACTIVE is set, else we set DELAYED which will send it to the delayed queue.
460 * HANDLE gets cleared if stripe_handle leaves nothing locked.
461 */
462
463/* Note: disk_info.rdev can be set to NULL asynchronously by raid5_remove_disk.
464 * There are three safe ways to access disk_info.rdev.
465 * 1/ when holding mddev->reconfig_mutex
466 * 2/ when resync/recovery/reshape is known to be happening - i.e. in code that
467 *    is called as part of performing resync/recovery/reshape.
468 * 3/ while holding rcu_read_lock(), use rcu_dereference to get the pointer
469 *    and if it is non-NULL, increment rdev->nr_pending before dropping the RCU
470 *    lock.
471 * When .rdev is set to NULL, the nr_pending count checked again and if
472 * it has been incremented, the pointer is put back in .rdev.
473 */
474
475struct disk_info {
476	struct md_rdev	*rdev;
477	struct md_rdev	*replacement;
478	struct page	*extra_page; /* extra page to use in prexor */
479};
480
481/*
482 * Stripe cache
483 */
484
485#define NR_STRIPES		256
486
487#if PAGE_SIZE == DEFAULT_STRIPE_SIZE
488#define STRIPE_SIZE		PAGE_SIZE
489#define STRIPE_SHIFT		(PAGE_SHIFT - 9)
490#define STRIPE_SECTORS		(STRIPE_SIZE>>9)
491#endif
492
493#define	IO_THRESHOLD		1
494#define BYPASS_THRESHOLD	1
495#define NR_HASH			(PAGE_SIZE / sizeof(struct hlist_head))
496#define HASH_MASK		(NR_HASH - 1)
497#define MAX_STRIPE_BATCH	8
498
499/* NOTE NR_STRIPE_HASH_LOCKS must remain below 64.
500 * This is because we sometimes take all the spinlocks
501 * and creating that much locking depth can cause
502 * problems.
503 */
504#define NR_STRIPE_HASH_LOCKS 8
505#define STRIPE_HASH_LOCKS_MASK (NR_STRIPE_HASH_LOCKS - 1)
506
507struct r5worker {
508	struct work_struct work;
509	struct r5worker_group *group;
510	struct list_head temp_inactive_list[NR_STRIPE_HASH_LOCKS];
511	bool working;
512};
513
514struct r5worker_group {
515	struct list_head handle_list;
516	struct list_head loprio_list;
517	struct r5conf *conf;
518	struct r5worker *workers;
519	int stripes_cnt;
520};
521
522/*
523 * r5c journal modes of the array: write-back or write-through.
524 * write-through mode has identical behavior as existing log only
525 * implementation.
526 */
527enum r5c_journal_mode {
528	R5C_JOURNAL_MODE_WRITE_THROUGH = 0,
529	R5C_JOURNAL_MODE_WRITE_BACK = 1,
530};
531
532enum r5_cache_state {
533	R5_INACTIVE_BLOCKED,	/* release of inactive stripes blocked,
534				 * waiting for 25% to be free
535				 */
536	R5_ALLOC_MORE,		/* It might help to allocate another
537				 * stripe.
538				 */
539	R5_DID_ALLOC,		/* A stripe was allocated, don't allocate
540				 * more until at least one has been
541				 * released.  This avoids flooding
542				 * the cache.
543				 */
544	R5C_LOG_TIGHT,		/* log device space tight, need to
545				 * prioritize stripes at last_checkpoint
546				 */
547	R5C_LOG_CRITICAL,	/* log device is running out of space,
548				 * only process stripes that are already
549				 * occupying the log
550				 */
551	R5C_EXTRA_PAGE_IN_USE,	/* a stripe is using disk_info.extra_page
552				 * for prexor
553				 */
554};
555
556#define PENDING_IO_MAX 512
557#define PENDING_IO_ONE_FLUSH 128
558struct r5pending_data {
559	struct list_head sibling;
560	sector_t sector; /* stripe sector */
561	struct bio_list bios;
562};
563
564struct raid5_percpu {
565	struct page	*spare_page; /* Used when checking P/Q in raid6 */
566	void		*scribble;  /* space for constructing buffer
567				     * lists and performing address
568				     * conversions
569				     */
570	int             scribble_obj_size;
571	local_lock_t    lock;
572};
573
574struct r5conf {
575	struct hlist_head	*stripe_hashtbl;
576	/* only protect corresponding hash list and inactive_list */
577	spinlock_t		hash_locks[NR_STRIPE_HASH_LOCKS];
578	struct mddev		*mddev;
579	int			chunk_sectors;
580	int			level, algorithm, rmw_level;
581	int			max_degraded;
582	int			raid_disks;
583	int			max_nr_stripes;
584	int			min_nr_stripes;
585#if PAGE_SIZE != DEFAULT_STRIPE_SIZE
586	unsigned long	stripe_size;
587	unsigned int	stripe_shift;
588	unsigned long	stripe_sectors;
589#endif
590
591	/* reshape_progress is the leading edge of a 'reshape'
592	 * It has value MaxSector when no reshape is happening
593	 * If delta_disks < 0, it is the last sector we started work on,
594	 * else is it the next sector to work on.
595	 */
596	sector_t		reshape_progress;
597	/* reshape_safe is the trailing edge of a reshape.  We know that
598	 * before (or after) this address, all reshape has completed.
599	 */
600	sector_t		reshape_safe;
601	int			previous_raid_disks;
602	int			prev_chunk_sectors;
603	int			prev_algo;
604	short			generation; /* increments with every reshape */
605	seqcount_spinlock_t	gen_lock;	/* lock against generation changes */
606	unsigned long		reshape_checkpoint; /* Time we last updated
607						     * metadata */
608	long long		min_offset_diff; /* minimum difference between
609						  * data_offset and
610						  * new_data_offset across all
611						  * devices.  May be negative,
612						  * but is closest to zero.
613						  */
614
615	struct list_head	handle_list; /* stripes needing handling */
616	struct list_head	loprio_list; /* low priority stripes */
617	struct list_head	hold_list; /* preread ready stripes */
618	struct list_head	delayed_list; /* stripes that have plugged requests */
619	struct list_head	bitmap_list; /* stripes delaying awaiting bitmap update */
620	struct bio		*retry_read_aligned; /* currently retrying aligned bios   */
621	unsigned int		retry_read_offset; /* sector offset into retry_read_aligned */
622	struct bio		*retry_read_aligned_list; /* aligned bios retry list  */
623	atomic_t		preread_active_stripes; /* stripes with scheduled io */
624	atomic_t		active_aligned_reads;
625	atomic_t		pending_full_writes; /* full write backlog */
626	int			bypass_count; /* bypassed prereads */
627	int			bypass_threshold; /* preread nice */
628	int			skip_copy; /* Don't copy data from bio to stripe cache */
629	struct list_head	*last_hold; /* detect hold_list promotions */
630
631	atomic_t		reshape_stripes; /* stripes with pending writes for reshape */
632	/* unfortunately we need two cache names as we temporarily have
633	 * two caches.
634	 */
635	int			active_name;
636	char			cache_name[2][32];
637	struct kmem_cache	*slab_cache; /* for allocating stripes */
638	struct mutex		cache_size_mutex; /* Protect changes to cache size */
639
640	int			seq_flush, seq_write;
641	int			quiesce;
642
643	int			fullsync;  /* set to 1 if a full sync is needed,
644					    * (fresh device added).
645					    * Cleared when a sync completes.
646					    */
647	int			recovery_disabled;
648	/* per cpu variables */
649	struct raid5_percpu __percpu *percpu;
650	int scribble_disks;
651	int scribble_sectors;
652	struct hlist_node node;
653
654	/*
655	 * Free stripes pool
656	 */
657	atomic_t		active_stripes;
658	struct list_head	inactive_list[NR_STRIPE_HASH_LOCKS];
659
660	atomic_t		r5c_cached_full_stripes;
661	struct list_head	r5c_full_stripe_list;
662	atomic_t		r5c_cached_partial_stripes;
663	struct list_head	r5c_partial_stripe_list;
664	atomic_t		r5c_flushing_full_stripes;
665	atomic_t		r5c_flushing_partial_stripes;
666
667	atomic_t		empty_inactive_list_nr;
668	struct llist_head	released_stripes;
669	wait_queue_head_t	wait_for_quiescent;
670	wait_queue_head_t	wait_for_stripe;
671	wait_queue_head_t	wait_for_overlap;
672	unsigned long		cache_state;
673	struct shrinker		*shrinker;
674	int			pool_size; /* number of disks in stripeheads in pool */
675	spinlock_t		device_lock;
676	struct disk_info	*disks;
677	struct bio_set		bio_split;
678
679	/* When taking over an array from a different personality, we store
680	 * the new thread here until we fully activate the array.
681	 */
682	struct md_thread __rcu	*thread;
683	struct list_head	temp_inactive_list[NR_STRIPE_HASH_LOCKS];
684	struct r5worker_group	*worker_groups;
685	int			group_cnt;
686	int			worker_cnt_per_group;
687	struct r5l_log		*log;
688	void			*log_private;
689
690	spinlock_t		pending_bios_lock;
691	bool			batch_bio_dispatch;
692	struct r5pending_data	*pending_data;
693	struct list_head	free_list;
694	struct list_head	pending_list;
695	int			pending_data_cnt;
696	struct r5pending_data	*next_pending_data;
697};
698
699#if PAGE_SIZE == DEFAULT_STRIPE_SIZE
700#define RAID5_STRIPE_SIZE(conf)	STRIPE_SIZE
701#define RAID5_STRIPE_SHIFT(conf)	STRIPE_SHIFT
702#define RAID5_STRIPE_SECTORS(conf)	STRIPE_SECTORS
703#else
704#define RAID5_STRIPE_SIZE(conf)	((conf)->stripe_size)
705#define RAID5_STRIPE_SHIFT(conf)	((conf)->stripe_shift)
706#define RAID5_STRIPE_SECTORS(conf)	((conf)->stripe_sectors)
707#endif
708
709/* bio's attached to a stripe+device for I/O are linked together in bi_sector
710 * order without overlap.  There may be several bio's per stripe+device, and
711 * a bio could span several devices.
712 * When walking this list for a particular stripe+device, we must never proceed
713 * beyond a bio that extends past this device, as the next bio might no longer
714 * be valid.
715 * This function is used to determine the 'next' bio in the list, given the
716 * sector of the current stripe+device
717 */
718static inline struct bio *r5_next_bio(struct r5conf *conf, struct bio *bio, sector_t sector)
719{
720	if (bio_end_sector(bio) < sector + RAID5_STRIPE_SECTORS(conf))
721		return bio->bi_next;
722	else
723		return NULL;
724}
725
726/*
727 * Our supported algorithms
728 */
729#define ALGORITHM_LEFT_ASYMMETRIC	0 /* Rotating Parity N with Data Restart */
730#define ALGORITHM_RIGHT_ASYMMETRIC	1 /* Rotating Parity 0 with Data Restart */
731#define ALGORITHM_LEFT_SYMMETRIC	2 /* Rotating Parity N with Data Continuation */
732#define ALGORITHM_RIGHT_SYMMETRIC	3 /* Rotating Parity 0 with Data Continuation */
733
734/* Define non-rotating (raid4) algorithms.  These allow
735 * conversion of raid4 to raid5.
736 */
737#define ALGORITHM_PARITY_0		4 /* P or P,Q are initial devices */
738#define ALGORITHM_PARITY_N		5 /* P or P,Q are final devices. */
739
740/* DDF RAID6 layouts differ from md/raid6 layouts in two ways.
741 * Firstly, the exact positioning of the parity block is slightly
742 * different between the 'LEFT_*' modes of md and the "_N_*" modes
743 * of DDF.
744 * Secondly, or order of datablocks over which the Q syndrome is computed
745 * is different.
746 * Consequently we have different layouts for DDF/raid6 than md/raid6.
747 * These layouts are from the DDFv1.2 spec.
748 * Interestingly DDFv1.2-Errata-A does not specify N_CONTINUE but
749 * leaves RLQ=3 as 'Vendor Specific'
750 */
751
752#define ALGORITHM_ROTATING_ZERO_RESTART	8 /* DDF PRL=6 RLQ=1 */
753#define ALGORITHM_ROTATING_N_RESTART	9 /* DDF PRL=6 RLQ=2 */
754#define ALGORITHM_ROTATING_N_CONTINUE	10 /*DDF PRL=6 RLQ=3 */
755
756/* For every RAID5 algorithm we define a RAID6 algorithm
757 * with exactly the same layout for data and parity, and
758 * with the Q block always on the last device (N-1).
759 * This allows trivial conversion from RAID5 to RAID6
760 */
761#define ALGORITHM_LEFT_ASYMMETRIC_6	16
762#define ALGORITHM_RIGHT_ASYMMETRIC_6	17
763#define ALGORITHM_LEFT_SYMMETRIC_6	18
764#define ALGORITHM_RIGHT_SYMMETRIC_6	19
765#define ALGORITHM_PARITY_0_6		20
766#define ALGORITHM_PARITY_N_6		ALGORITHM_PARITY_N
767
768static inline int algorithm_valid_raid5(int layout)
769{
770	return (layout >= 0) &&
771		(layout <= 5);
772}
773static inline int algorithm_valid_raid6(int layout)
774{
775	return (layout >= 0 && layout <= 5)
776		||
777		(layout >= 8 && layout <= 10)
778		||
779		(layout >= 16 && layout <= 20);
780}
781
782static inline int algorithm_is_DDF(int layout)
783{
784	return layout >= 8 && layout <= 10;
785}
786
787#if PAGE_SIZE != DEFAULT_STRIPE_SIZE
788/*
789 * Return offset of the corresponding page for r5dev.
790 */
791static inline int raid5_get_page_offset(struct stripe_head *sh, int disk_idx)
792{
793	return (disk_idx % sh->stripes_per_page) * RAID5_STRIPE_SIZE(sh->raid_conf);
794}
795
796/*
797 * Return corresponding page address for r5dev.
798 */
799static inline struct page *
800raid5_get_dev_page(struct stripe_head *sh, int disk_idx)
801{
802	return sh->pages[disk_idx / sh->stripes_per_page];
803}
804#endif
805
806void md_raid5_kick_device(struct r5conf *conf);
807int raid5_set_cache_size(struct mddev *mddev, int size);
808sector_t raid5_compute_blocknr(struct stripe_head *sh, int i, int previous);
809void raid5_release_stripe(struct stripe_head *sh);
810sector_t raid5_compute_sector(struct r5conf *conf, sector_t r_sector,
811		int previous, int *dd_idx, struct stripe_head *sh);
812
813struct stripe_request_ctx;
814/* get stripe from previous generation (when reshaping) */
815#define R5_GAS_PREVIOUS		(1 << 0)
816/* do not block waiting for a free stripe */
817#define R5_GAS_NOBLOCK		(1 << 1)
818/* do not block waiting for quiesce to be released */
819#define R5_GAS_NOQUIESCE	(1 << 2)
820struct stripe_head *raid5_get_active_stripe(struct r5conf *conf,
821		struct stripe_request_ctx *ctx, sector_t sector,
822		unsigned int flags);
823
824int raid5_calc_degraded(struct r5conf *conf);
825int r5c_journal_mode_set(struct mddev *mddev, int journal_mode);
826#endif
827