1/*
2 * raid10.c : Multiple Devices driver for Linux
3 *
4 * Copyright (C) 2000-2004 Neil Brown
5 *
6 * RAID-10 support for md.
7 *
8 * Base on code in raid1.c.  See raid1.c for further copyright information.
9 *
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2, or (at your option)
14 * any later version.
15 *
16 * You should have received a copy of the GNU General Public License
17 * (for example /usr/src/linux/COPYING); if not, write to the Free
18 * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19 */
20
21#include <linux/slab.h>
22#include <linux/delay.h>
23#include <linux/blkdev.h>
24#include <linux/module.h>
25#include <linux/seq_file.h>
26#include <linux/ratelimit.h>
27#include <linux/kthread.h>
28#include "md.h"
29#include "raid10.h"
30#include "raid0.h"
31#include "bitmap.h"
32
33/*
34 * RAID10 provides a combination of RAID0 and RAID1 functionality.
35 * The layout of data is defined by
36 *    chunk_size
37 *    raid_disks
38 *    near_copies (stored in low byte of layout)
39 *    far_copies (stored in second byte of layout)
40 *    far_offset (stored in bit 16 of layout )
41 *    use_far_sets (stored in bit 17 of layout )
42 *
43 * The data to be stored is divided into chunks using chunksize.  Each device
44 * is divided into far_copies sections.   In each section, chunks are laid out
45 * in a style similar to raid0, but near_copies copies of each chunk is stored
46 * (each on a different drive).  The starting device for each section is offset
47 * near_copies from the starting device of the previous section.  Thus there
48 * are (near_copies * far_copies) of each chunk, and each is on a different
49 * drive.  near_copies and far_copies must be at least one, and their product
50 * is at most raid_disks.
51 *
52 * If far_offset is true, then the far_copies are handled a bit differently.
53 * The copies are still in different stripes, but instead of being very far
54 * apart on disk, there are adjacent stripes.
55 *
56 * The far and offset algorithms are handled slightly differently if
57 * 'use_far_sets' is true.  In this case, the array's devices are grouped into
58 * sets that are (near_copies * far_copies) in size.  The far copied stripes
59 * are still shifted by 'near_copies' devices, but this shifting stays confined
60 * to the set rather than the entire array.  This is done to improve the number
61 * of device combinations that can fail without causing the array to fail.
62 * Example 'far' algorithm w/o 'use_far_sets' (each letter represents a chunk
63 * on a device):
64 *    A B C D    A B C D E
65 *      ...         ...
66 *    D A B C    E A B C D
67 * Example 'far' algorithm w/ 'use_far_sets' enabled (sets illustrated w/ []'s):
68 *    [A B] [C D]    [A B] [C D E]
69 *    |...| |...|    |...| | ... |
70 *    [B A] [D C]    [B A] [E C D]
71 */
72
73/*
74 * Number of guaranteed r10bios in case of extreme VM load:
75 */
76#define	NR_RAID10_BIOS 256
77
78/* when we get a read error on a read-only array, we redirect to another
79 * device without failing the first device, or trying to over-write to
80 * correct the read error.  To keep track of bad blocks on a per-bio
81 * level, we store IO_BLOCKED in the appropriate 'bios' pointer
82 */
83#define IO_BLOCKED ((struct bio *)1)
84/* When we successfully write to a known bad-block, we need to remove the
85 * bad-block marking which must be done from process context.  So we record
86 * the success by setting devs[n].bio to IO_MADE_GOOD
87 */
88#define IO_MADE_GOOD ((struct bio *)2)
89
90#define BIO_SPECIAL(bio) ((unsigned long)bio <= 2)
91
92/* When there are this many requests queued to be written by
93 * the raid10 thread, we become 'congested' to provide back-pressure
94 * for writeback.
95 */
96static int max_queued_requests = 1024;
97
98static void allow_barrier(struct r10conf *conf);
99static void lower_barrier(struct r10conf *conf);
100static int _enough(struct r10conf *conf, int previous, int ignore);
101static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr,
102				int *skipped);
103static void reshape_request_write(struct mddev *mddev, struct r10bio *r10_bio);
104static void end_reshape_write(struct bio *bio, int error);
105static void end_reshape(struct r10conf *conf);
106
107static void * r10bio_pool_alloc(gfp_t gfp_flags, void *data)
108{
109	struct r10conf *conf = data;
110	int size = offsetof(struct r10bio, devs[conf->copies]);
111
112	/* allocate a r10bio with room for raid_disks entries in the
113	 * bios array */
114	return kzalloc(size, gfp_flags);
115}
116
117static void r10bio_pool_free(void *r10_bio, void *data)
118{
119	kfree(r10_bio);
120}
121
122/* Maximum size of each resync request */
123#define RESYNC_BLOCK_SIZE (64*1024)
124#define RESYNC_PAGES ((RESYNC_BLOCK_SIZE + PAGE_SIZE-1) / PAGE_SIZE)
125/* amount of memory to reserve for resync requests */
126#define RESYNC_WINDOW (1024*1024)
127/* maximum number of concurrent requests, memory permitting */
128#define RESYNC_DEPTH (32*1024*1024/RESYNC_BLOCK_SIZE)
129
130/*
131 * When performing a resync, we need to read and compare, so
132 * we need as many pages are there are copies.
133 * When performing a recovery, we need 2 bios, one for read,
134 * one for write (we recover only one drive per r10buf)
135 *
136 */
137static void * r10buf_pool_alloc(gfp_t gfp_flags, void *data)
138{
139	struct r10conf *conf = data;
140	struct page *page;
141	struct r10bio *r10_bio;
142	struct bio *bio;
143	int i, j;
144	int nalloc;
145
146	r10_bio = r10bio_pool_alloc(gfp_flags, conf);
147	if (!r10_bio)
148		return NULL;
149
150	if (test_bit(MD_RECOVERY_SYNC, &conf->mddev->recovery) ||
151	    test_bit(MD_RECOVERY_RESHAPE, &conf->mddev->recovery))
152		nalloc = conf->copies; /* resync */
153	else
154		nalloc = 2; /* recovery */
155
156	/*
157	 * Allocate bios.
158	 */
159	for (j = nalloc ; j-- ; ) {
160		bio = bio_kmalloc(gfp_flags, RESYNC_PAGES);
161		if (!bio)
162			goto out_free_bio;
163		r10_bio->devs[j].bio = bio;
164		if (!conf->have_replacement)
165			continue;
166		bio = bio_kmalloc(gfp_flags, RESYNC_PAGES);
167		if (!bio)
168			goto out_free_bio;
169		r10_bio->devs[j].repl_bio = bio;
170	}
171	/*
172	 * Allocate RESYNC_PAGES data pages and attach them
173	 * where needed.
174	 */
175	for (j = 0 ; j < nalloc; j++) {
176		struct bio *rbio = r10_bio->devs[j].repl_bio;
177		bio = r10_bio->devs[j].bio;
178		for (i = 0; i < RESYNC_PAGES; i++) {
179			if (j > 0 && !test_bit(MD_RECOVERY_SYNC,
180					       &conf->mddev->recovery)) {
181				/* we can share bv_page's during recovery
182				 * and reshape */
183				struct bio *rbio = r10_bio->devs[0].bio;
184				page = rbio->bi_io_vec[i].bv_page;
185				get_page(page);
186			} else
187				page = alloc_page(gfp_flags);
188			if (unlikely(!page))
189				goto out_free_pages;
190
191			bio->bi_io_vec[i].bv_page = page;
192			if (rbio)
193				rbio->bi_io_vec[i].bv_page = page;
194		}
195	}
196
197	return r10_bio;
198
199out_free_pages:
200	for ( ; i > 0 ; i--)
201		safe_put_page(bio->bi_io_vec[i-1].bv_page);
202	while (j--)
203		for (i = 0; i < RESYNC_PAGES ; i++)
204			safe_put_page(r10_bio->devs[j].bio->bi_io_vec[i].bv_page);
205	j = 0;
206out_free_bio:
207	for ( ; j < nalloc; j++) {
208		if (r10_bio->devs[j].bio)
209			bio_put(r10_bio->devs[j].bio);
210		if (r10_bio->devs[j].repl_bio)
211			bio_put(r10_bio->devs[j].repl_bio);
212	}
213	r10bio_pool_free(r10_bio, conf);
214	return NULL;
215}
216
217static void r10buf_pool_free(void *__r10_bio, void *data)
218{
219	int i;
220	struct r10conf *conf = data;
221	struct r10bio *r10bio = __r10_bio;
222	int j;
223
224	for (j=0; j < conf->copies; j++) {
225		struct bio *bio = r10bio->devs[j].bio;
226		if (bio) {
227			for (i = 0; i < RESYNC_PAGES; i++) {
228				safe_put_page(bio->bi_io_vec[i].bv_page);
229				bio->bi_io_vec[i].bv_page = NULL;
230			}
231			bio_put(bio);
232		}
233		bio = r10bio->devs[j].repl_bio;
234		if (bio)
235			bio_put(bio);
236	}
237	r10bio_pool_free(r10bio, conf);
238}
239
240static void put_all_bios(struct r10conf *conf, struct r10bio *r10_bio)
241{
242	int i;
243
244	for (i = 0; i < conf->copies; i++) {
245		struct bio **bio = & r10_bio->devs[i].bio;
246		if (!BIO_SPECIAL(*bio))
247			bio_put(*bio);
248		*bio = NULL;
249		bio = &r10_bio->devs[i].repl_bio;
250		if (r10_bio->read_slot < 0 && !BIO_SPECIAL(*bio))
251			bio_put(*bio);
252		*bio = NULL;
253	}
254}
255
256static void free_r10bio(struct r10bio *r10_bio)
257{
258	struct r10conf *conf = r10_bio->mddev->private;
259
260	put_all_bios(conf, r10_bio);
261	mempool_free(r10_bio, conf->r10bio_pool);
262}
263
264static void put_buf(struct r10bio *r10_bio)
265{
266	struct r10conf *conf = r10_bio->mddev->private;
267
268	mempool_free(r10_bio, conf->r10buf_pool);
269
270	lower_barrier(conf);
271}
272
273static void reschedule_retry(struct r10bio *r10_bio)
274{
275	unsigned long flags;
276	struct mddev *mddev = r10_bio->mddev;
277	struct r10conf *conf = mddev->private;
278
279	spin_lock_irqsave(&conf->device_lock, flags);
280	list_add(&r10_bio->retry_list, &conf->retry_list);
281	conf->nr_queued ++;
282	spin_unlock_irqrestore(&conf->device_lock, flags);
283
284	/* wake up frozen array... */
285	wake_up(&conf->wait_barrier);
286
287	md_wakeup_thread(mddev->thread);
288}
289
290/*
291 * raid_end_bio_io() is called when we have finished servicing a mirrored
292 * operation and are ready to return a success/failure code to the buffer
293 * cache layer.
294 */
295static void raid_end_bio_io(struct r10bio *r10_bio)
296{
297	struct bio *bio = r10_bio->master_bio;
298	int done;
299	struct r10conf *conf = r10_bio->mddev->private;
300
301	if (bio->bi_phys_segments) {
302		unsigned long flags;
303		spin_lock_irqsave(&conf->device_lock, flags);
304		bio->bi_phys_segments--;
305		done = (bio->bi_phys_segments == 0);
306		spin_unlock_irqrestore(&conf->device_lock, flags);
307	} else
308		done = 1;
309	if (!test_bit(R10BIO_Uptodate, &r10_bio->state))
310		clear_bit(BIO_UPTODATE, &bio->bi_flags);
311	if (done) {
312		bio_endio(bio, 0);
313		/*
314		 * Wake up any possible resync thread that waits for the device
315		 * to go idle.
316		 */
317		allow_barrier(conf);
318	}
319	free_r10bio(r10_bio);
320}
321
322/*
323 * Update disk head position estimator based on IRQ completion info.
324 */
325static inline void update_head_pos(int slot, struct r10bio *r10_bio)
326{
327	struct r10conf *conf = r10_bio->mddev->private;
328
329	conf->mirrors[r10_bio->devs[slot].devnum].head_position =
330		r10_bio->devs[slot].addr + (r10_bio->sectors);
331}
332
333/*
334 * Find the disk number which triggered given bio
335 */
336static int find_bio_disk(struct r10conf *conf, struct r10bio *r10_bio,
337			 struct bio *bio, int *slotp, int *replp)
338{
339	int slot;
340	int repl = 0;
341
342	for (slot = 0; slot < conf->copies; slot++) {
343		if (r10_bio->devs[slot].bio == bio)
344			break;
345		if (r10_bio->devs[slot].repl_bio == bio) {
346			repl = 1;
347			break;
348		}
349	}
350
351	BUG_ON(slot == conf->copies);
352	update_head_pos(slot, r10_bio);
353
354	if (slotp)
355		*slotp = slot;
356	if (replp)
357		*replp = repl;
358	return r10_bio->devs[slot].devnum;
359}
360
361static void raid10_end_read_request(struct bio *bio, int error)
362{
363	int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
364	struct r10bio *r10_bio = bio->bi_private;
365	int slot, dev;
366	struct md_rdev *rdev;
367	struct r10conf *conf = r10_bio->mddev->private;
368
369	slot = r10_bio->read_slot;
370	dev = r10_bio->devs[slot].devnum;
371	rdev = r10_bio->devs[slot].rdev;
372	/*
373	 * this branch is our 'one mirror IO has finished' event handler:
374	 */
375	update_head_pos(slot, r10_bio);
376
377	if (uptodate) {
378		/*
379		 * Set R10BIO_Uptodate in our master bio, so that
380		 * we will return a good error code to the higher
381		 * levels even if IO on some other mirrored buffer fails.
382		 *
383		 * The 'master' represents the composite IO operation to
384		 * user-side. So if something waits for IO, then it will
385		 * wait for the 'master' bio.
386		 */
387		set_bit(R10BIO_Uptodate, &r10_bio->state);
388	} else {
389		/* If all other devices that store this block have
390		 * failed, we want to return the error upwards rather
391		 * than fail the last device.  Here we redefine
392		 * "uptodate" to mean "Don't want to retry"
393		 */
394		if (!_enough(conf, test_bit(R10BIO_Previous, &r10_bio->state),
395			     rdev->raid_disk))
396			uptodate = 1;
397	}
398	if (uptodate) {
399		raid_end_bio_io(r10_bio);
400		rdev_dec_pending(rdev, conf->mddev);
401	} else {
402		/*
403		 * oops, read error - keep the refcount on the rdev
404		 */
405		char b[BDEVNAME_SIZE];
406		printk_ratelimited(KERN_ERR
407				   "md/raid10:%s: %s: rescheduling sector %llu\n",
408				   mdname(conf->mddev),
409				   bdevname(rdev->bdev, b),
410				   (unsigned long long)r10_bio->sector);
411		set_bit(R10BIO_ReadError, &r10_bio->state);
412		reschedule_retry(r10_bio);
413	}
414}
415
416static void close_write(struct r10bio *r10_bio)
417{
418	/* clear the bitmap if all writes complete successfully */
419	bitmap_endwrite(r10_bio->mddev->bitmap, r10_bio->sector,
420			r10_bio->sectors,
421			!test_bit(R10BIO_Degraded, &r10_bio->state),
422			0);
423	md_write_end(r10_bio->mddev);
424}
425
426static void one_write_done(struct r10bio *r10_bio)
427{
428	if (atomic_dec_and_test(&r10_bio->remaining)) {
429		if (test_bit(R10BIO_WriteError, &r10_bio->state))
430			reschedule_retry(r10_bio);
431		else {
432			close_write(r10_bio);
433			if (test_bit(R10BIO_MadeGood, &r10_bio->state))
434				reschedule_retry(r10_bio);
435			else
436				raid_end_bio_io(r10_bio);
437		}
438	}
439}
440
441static void raid10_end_write_request(struct bio *bio, int error)
442{
443	int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
444	struct r10bio *r10_bio = bio->bi_private;
445	int dev;
446	int dec_rdev = 1;
447	struct r10conf *conf = r10_bio->mddev->private;
448	int slot, repl;
449	struct md_rdev *rdev = NULL;
450
451	dev = find_bio_disk(conf, r10_bio, bio, &slot, &repl);
452
453	if (repl)
454		rdev = conf->mirrors[dev].replacement;
455	if (!rdev) {
456		smp_rmb();
457		repl = 0;
458		rdev = conf->mirrors[dev].rdev;
459	}
460	/*
461	 * this branch is our 'one mirror IO has finished' event handler:
462	 */
463	if (!uptodate) {
464		if (repl)
465			/* Never record new bad blocks to replacement,
466			 * just fail it.
467			 */
468			md_error(rdev->mddev, rdev);
469		else {
470			set_bit(WriteErrorSeen,	&rdev->flags);
471			if (!test_and_set_bit(WantReplacement, &rdev->flags))
472				set_bit(MD_RECOVERY_NEEDED,
473					&rdev->mddev->recovery);
474			set_bit(R10BIO_WriteError, &r10_bio->state);
475			dec_rdev = 0;
476		}
477	} else {
478		/*
479		 * Set R10BIO_Uptodate in our master bio, so that
480		 * we will return a good error code for to the higher
481		 * levels even if IO on some other mirrored buffer fails.
482		 *
483		 * The 'master' represents the composite IO operation to
484		 * user-side. So if something waits for IO, then it will
485		 * wait for the 'master' bio.
486		 */
487		sector_t first_bad;
488		int bad_sectors;
489
490		/*
491		 * Do not set R10BIO_Uptodate if the current device is
492		 * rebuilding or Faulty. This is because we cannot use
493		 * such device for properly reading the data back (we could
494		 * potentially use it, if the current write would have felt
495		 * before rdev->recovery_offset, but for simplicity we don't
496		 * check this here.
497		 */
498		if (test_bit(In_sync, &rdev->flags) &&
499		    !test_bit(Faulty, &rdev->flags))
500			set_bit(R10BIO_Uptodate, &r10_bio->state);
501
502		/* Maybe we can clear some bad blocks. */
503		if (is_badblock(rdev,
504				r10_bio->devs[slot].addr,
505				r10_bio->sectors,
506				&first_bad, &bad_sectors)) {
507			bio_put(bio);
508			if (repl)
509				r10_bio->devs[slot].repl_bio = IO_MADE_GOOD;
510			else
511				r10_bio->devs[slot].bio = IO_MADE_GOOD;
512			dec_rdev = 0;
513			set_bit(R10BIO_MadeGood, &r10_bio->state);
514		}
515	}
516
517	/*
518	 *
519	 * Let's see if all mirrored write operations have finished
520	 * already.
521	 */
522	one_write_done(r10_bio);
523	if (dec_rdev)
524		rdev_dec_pending(rdev, conf->mddev);
525}
526
527/*
528 * RAID10 layout manager
529 * As well as the chunksize and raid_disks count, there are two
530 * parameters: near_copies and far_copies.
531 * near_copies * far_copies must be <= raid_disks.
532 * Normally one of these will be 1.
533 * If both are 1, we get raid0.
534 * If near_copies == raid_disks, we get raid1.
535 *
536 * Chunks are laid out in raid0 style with near_copies copies of the
537 * first chunk, followed by near_copies copies of the next chunk and
538 * so on.
539 * If far_copies > 1, then after 1/far_copies of the array has been assigned
540 * as described above, we start again with a device offset of near_copies.
541 * So we effectively have another copy of the whole array further down all
542 * the drives, but with blocks on different drives.
543 * With this layout, and block is never stored twice on the one device.
544 *
545 * raid10_find_phys finds the sector offset of a given virtual sector
546 * on each device that it is on.
547 *
548 * raid10_find_virt does the reverse mapping, from a device and a
549 * sector offset to a virtual address
550 */
551
552static void __raid10_find_phys(struct geom *geo, struct r10bio *r10bio)
553{
554	int n,f;
555	sector_t sector;
556	sector_t chunk;
557	sector_t stripe;
558	int dev;
559	int slot = 0;
560	int last_far_set_start, last_far_set_size;
561
562	last_far_set_start = (geo->raid_disks / geo->far_set_size) - 1;
563	last_far_set_start *= geo->far_set_size;
564
565	last_far_set_size = geo->far_set_size;
566	last_far_set_size += (geo->raid_disks % geo->far_set_size);
567
568	/* now calculate first sector/dev */
569	chunk = r10bio->sector >> geo->chunk_shift;
570	sector = r10bio->sector & geo->chunk_mask;
571
572	chunk *= geo->near_copies;
573	stripe = chunk;
574	dev = sector_div(stripe, geo->raid_disks);
575	if (geo->far_offset)
576		stripe *= geo->far_copies;
577
578	sector += stripe << geo->chunk_shift;
579
580	/* and calculate all the others */
581	for (n = 0; n < geo->near_copies; n++) {
582		int d = dev;
583		int set;
584		sector_t s = sector;
585		r10bio->devs[slot].devnum = d;
586		r10bio->devs[slot].addr = s;
587		slot++;
588
589		for (f = 1; f < geo->far_copies; f++) {
590			set = d / geo->far_set_size;
591			d += geo->near_copies;
592
593			if ((geo->raid_disks % geo->far_set_size) &&
594			    (d > last_far_set_start)) {
595				d -= last_far_set_start;
596				d %= last_far_set_size;
597				d += last_far_set_start;
598			} else {
599				d %= geo->far_set_size;
600				d += geo->far_set_size * set;
601			}
602			s += geo->stride;
603			r10bio->devs[slot].devnum = d;
604			r10bio->devs[slot].addr = s;
605			slot++;
606		}
607		dev++;
608		if (dev >= geo->raid_disks) {
609			dev = 0;
610			sector += (geo->chunk_mask + 1);
611		}
612	}
613}
614
615static void raid10_find_phys(struct r10conf *conf, struct r10bio *r10bio)
616{
617	struct geom *geo = &conf->geo;
618
619	if (conf->reshape_progress != MaxSector &&
620	    ((r10bio->sector >= conf->reshape_progress) !=
621	     conf->mddev->reshape_backwards)) {
622		set_bit(R10BIO_Previous, &r10bio->state);
623		geo = &conf->prev;
624	} else
625		clear_bit(R10BIO_Previous, &r10bio->state);
626
627	__raid10_find_phys(geo, r10bio);
628}
629
630static sector_t raid10_find_virt(struct r10conf *conf, sector_t sector, int dev)
631{
632	sector_t offset, chunk, vchunk;
633	/* Never use conf->prev as this is only called during resync
634	 * or recovery, so reshape isn't happening
635	 */
636	struct geom *geo = &conf->geo;
637	int far_set_start = (dev / geo->far_set_size) * geo->far_set_size;
638	int far_set_size = geo->far_set_size;
639	int last_far_set_start;
640
641	if (geo->raid_disks % geo->far_set_size) {
642		last_far_set_start = (geo->raid_disks / geo->far_set_size) - 1;
643		last_far_set_start *= geo->far_set_size;
644
645		if (dev >= last_far_set_start) {
646			far_set_size = geo->far_set_size;
647			far_set_size += (geo->raid_disks % geo->far_set_size);
648			far_set_start = last_far_set_start;
649		}
650	}
651
652	offset = sector & geo->chunk_mask;
653	if (geo->far_offset) {
654		int fc;
655		chunk = sector >> geo->chunk_shift;
656		fc = sector_div(chunk, geo->far_copies);
657		dev -= fc * geo->near_copies;
658		if (dev < far_set_start)
659			dev += far_set_size;
660	} else {
661		while (sector >= geo->stride) {
662			sector -= geo->stride;
663			if (dev < (geo->near_copies + far_set_start))
664				dev += far_set_size - geo->near_copies;
665			else
666				dev -= geo->near_copies;
667		}
668		chunk = sector >> geo->chunk_shift;
669	}
670	vchunk = chunk * geo->raid_disks + dev;
671	sector_div(vchunk, geo->near_copies);
672	return (vchunk << geo->chunk_shift) + offset;
673}
674
675/**
676 *	raid10_mergeable_bvec -- tell bio layer if a two requests can be merged
677 *	@mddev: the md device
678 *	@bvm: properties of new bio
679 *	@biovec: the request that could be merged to it.
680 *
681 *	Return amount of bytes we can accept at this offset
682 *	This requires checking for end-of-chunk if near_copies != raid_disks,
683 *	and for subordinate merge_bvec_fns if merge_check_needed.
684 */
685static int raid10_mergeable_bvec(struct mddev *mddev,
686				 struct bvec_merge_data *bvm,
687				 struct bio_vec *biovec)
688{
689	struct r10conf *conf = mddev->private;
690	sector_t sector = bvm->bi_sector + get_start_sect(bvm->bi_bdev);
691	int max;
692	unsigned int chunk_sectors;
693	unsigned int bio_sectors = bvm->bi_size >> 9;
694	struct geom *geo = &conf->geo;
695
696	chunk_sectors = (conf->geo.chunk_mask & conf->prev.chunk_mask) + 1;
697	if (conf->reshape_progress != MaxSector &&
698	    ((sector >= conf->reshape_progress) !=
699	     conf->mddev->reshape_backwards))
700		geo = &conf->prev;
701
702	if (geo->near_copies < geo->raid_disks) {
703		max = (chunk_sectors - ((sector & (chunk_sectors - 1))
704					+ bio_sectors)) << 9;
705		if (max < 0)
706			/* bio_add cannot handle a negative return */
707			max = 0;
708		if (max <= biovec->bv_len && bio_sectors == 0)
709			return biovec->bv_len;
710	} else
711		max = biovec->bv_len;
712
713	if (mddev->merge_check_needed) {
714		struct {
715			struct r10bio r10_bio;
716			struct r10dev devs[conf->copies];
717		} on_stack;
718		struct r10bio *r10_bio = &on_stack.r10_bio;
719		int s;
720		if (conf->reshape_progress != MaxSector) {
721			/* Cannot give any guidance during reshape */
722			if (max <= biovec->bv_len && bio_sectors == 0)
723				return biovec->bv_len;
724			return 0;
725		}
726		r10_bio->sector = sector;
727		raid10_find_phys(conf, r10_bio);
728		rcu_read_lock();
729		for (s = 0; s < conf->copies; s++) {
730			int disk = r10_bio->devs[s].devnum;
731			struct md_rdev *rdev = rcu_dereference(
732				conf->mirrors[disk].rdev);
733			if (rdev && !test_bit(Faulty, &rdev->flags)) {
734				struct request_queue *q =
735					bdev_get_queue(rdev->bdev);
736				if (q->merge_bvec_fn) {
737					bvm->bi_sector = r10_bio->devs[s].addr
738						+ rdev->data_offset;
739					bvm->bi_bdev = rdev->bdev;
740					max = min(max, q->merge_bvec_fn(
741							  q, bvm, biovec));
742				}
743			}
744			rdev = rcu_dereference(conf->mirrors[disk].replacement);
745			if (rdev && !test_bit(Faulty, &rdev->flags)) {
746				struct request_queue *q =
747					bdev_get_queue(rdev->bdev);
748				if (q->merge_bvec_fn) {
749					bvm->bi_sector = r10_bio->devs[s].addr
750						+ rdev->data_offset;
751					bvm->bi_bdev = rdev->bdev;
752					max = min(max, q->merge_bvec_fn(
753							  q, bvm, biovec));
754				}
755			}
756		}
757		rcu_read_unlock();
758	}
759	return max;
760}
761
762/*
763 * This routine returns the disk from which the requested read should
764 * be done. There is a per-array 'next expected sequential IO' sector
765 * number - if this matches on the next IO then we use the last disk.
766 * There is also a per-disk 'last know head position' sector that is
767 * maintained from IRQ contexts, both the normal and the resync IO
768 * completion handlers update this position correctly. If there is no
769 * perfect sequential match then we pick the disk whose head is closest.
770 *
771 * If there are 2 mirrors in the same 2 devices, performance degrades
772 * because position is mirror, not device based.
773 *
774 * The rdev for the device selected will have nr_pending incremented.
775 */
776
777/*
778 * FIXME: possibly should rethink readbalancing and do it differently
779 * depending on near_copies / far_copies geometry.
780 */
781static struct md_rdev *read_balance(struct r10conf *conf,
782				    struct r10bio *r10_bio,
783				    int *max_sectors)
784{
785	const sector_t this_sector = r10_bio->sector;
786	int disk, slot;
787	int sectors = r10_bio->sectors;
788	int best_good_sectors;
789	sector_t new_distance, best_dist;
790	struct md_rdev *best_rdev, *rdev = NULL;
791	int do_balance;
792	int best_slot;
793	struct geom *geo = &conf->geo;
794
795	raid10_find_phys(conf, r10_bio);
796	rcu_read_lock();
797retry:
798	sectors = r10_bio->sectors;
799	best_slot = -1;
800	best_rdev = NULL;
801	best_dist = MaxSector;
802	best_good_sectors = 0;
803	do_balance = 1;
804	/*
805	 * Check if we can balance. We can balance on the whole
806	 * device if no resync is going on (recovery is ok), or below
807	 * the resync window. We take the first readable disk when
808	 * above the resync window.
809	 */
810	if (conf->mddev->recovery_cp < MaxSector
811	    && (this_sector + sectors >= conf->next_resync))
812		do_balance = 0;
813
814	for (slot = 0; slot < conf->copies ; slot++) {
815		sector_t first_bad;
816		int bad_sectors;
817		sector_t dev_sector;
818
819		if (r10_bio->devs[slot].bio == IO_BLOCKED)
820			continue;
821		disk = r10_bio->devs[slot].devnum;
822		rdev = rcu_dereference(conf->mirrors[disk].replacement);
823		if (rdev == NULL || test_bit(Faulty, &rdev->flags) ||
824		    test_bit(Unmerged, &rdev->flags) ||
825		    r10_bio->devs[slot].addr + sectors > rdev->recovery_offset)
826			rdev = rcu_dereference(conf->mirrors[disk].rdev);
827		if (rdev == NULL ||
828		    test_bit(Faulty, &rdev->flags) ||
829		    test_bit(Unmerged, &rdev->flags))
830			continue;
831		if (!test_bit(In_sync, &rdev->flags) &&
832		    r10_bio->devs[slot].addr + sectors > rdev->recovery_offset)
833			continue;
834
835		dev_sector = r10_bio->devs[slot].addr;
836		if (is_badblock(rdev, dev_sector, sectors,
837				&first_bad, &bad_sectors)) {
838			if (best_dist < MaxSector)
839				/* Already have a better slot */
840				continue;
841			if (first_bad <= dev_sector) {
842				/* Cannot read here.  If this is the
843				 * 'primary' device, then we must not read
844				 * beyond 'bad_sectors' from another device.
845				 */
846				bad_sectors -= (dev_sector - first_bad);
847				if (!do_balance && sectors > bad_sectors)
848					sectors = bad_sectors;
849				if (best_good_sectors > sectors)
850					best_good_sectors = sectors;
851			} else {
852				sector_t good_sectors =
853					first_bad - dev_sector;
854				if (good_sectors > best_good_sectors) {
855					best_good_sectors = good_sectors;
856					best_slot = slot;
857					best_rdev = rdev;
858				}
859				if (!do_balance)
860					/* Must read from here */
861					break;
862			}
863			continue;
864		} else
865			best_good_sectors = sectors;
866
867		if (!do_balance)
868			break;
869
870		/* This optimisation is debatable, and completely destroys
871		 * sequential read speed for 'far copies' arrays.  So only
872		 * keep it for 'near' arrays, and review those later.
873		 */
874		if (geo->near_copies > 1 && !atomic_read(&rdev->nr_pending))
875			break;
876
877		/* for far > 1 always use the lowest address */
878		if (geo->far_copies > 1)
879			new_distance = r10_bio->devs[slot].addr;
880		else
881			new_distance = abs(r10_bio->devs[slot].addr -
882					   conf->mirrors[disk].head_position);
883		if (new_distance < best_dist) {
884			best_dist = new_distance;
885			best_slot = slot;
886			best_rdev = rdev;
887		}
888	}
889	if (slot >= conf->copies) {
890		slot = best_slot;
891		rdev = best_rdev;
892	}
893
894	if (slot >= 0) {
895		atomic_inc(&rdev->nr_pending);
896		if (test_bit(Faulty, &rdev->flags)) {
897			/* Cannot risk returning a device that failed
898			 * before we inc'ed nr_pending
899			 */
900			rdev_dec_pending(rdev, conf->mddev);
901			goto retry;
902		}
903		r10_bio->read_slot = slot;
904	} else
905		rdev = NULL;
906	rcu_read_unlock();
907	*max_sectors = best_good_sectors;
908
909	return rdev;
910}
911
912static int raid10_congested(struct mddev *mddev, int bits)
913{
914	struct r10conf *conf = mddev->private;
915	int i, ret = 0;
916
917	if ((bits & (1 << BDI_async_congested)) &&
918	    conf->pending_count >= max_queued_requests)
919		return 1;
920
921	rcu_read_lock();
922	for (i = 0;
923	     (i < conf->geo.raid_disks || i < conf->prev.raid_disks)
924		     && ret == 0;
925	     i++) {
926		struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev);
927		if (rdev && !test_bit(Faulty, &rdev->flags)) {
928			struct request_queue *q = bdev_get_queue(rdev->bdev);
929
930			ret |= bdi_congested(&q->backing_dev_info, bits);
931		}
932	}
933	rcu_read_unlock();
934	return ret;
935}
936
937static void flush_pending_writes(struct r10conf *conf)
938{
939	/* Any writes that have been queued but are awaiting
940	 * bitmap updates get flushed here.
941	 */
942	spin_lock_irq(&conf->device_lock);
943
944	if (conf->pending_bio_list.head) {
945		struct bio *bio;
946		bio = bio_list_get(&conf->pending_bio_list);
947		conf->pending_count = 0;
948		spin_unlock_irq(&conf->device_lock);
949		/* flush any pending bitmap writes to disk
950		 * before proceeding w/ I/O */
951		bitmap_unplug(conf->mddev->bitmap);
952		wake_up(&conf->wait_barrier);
953
954		while (bio) { /* submit pending writes */
955			struct bio *next = bio->bi_next;
956			bio->bi_next = NULL;
957			if (unlikely((bio->bi_rw & REQ_DISCARD) &&
958			    !blk_queue_discard(bdev_get_queue(bio->bi_bdev))))
959				/* Just ignore it */
960				bio_endio(bio, 0);
961			else
962				generic_make_request(bio);
963			bio = next;
964		}
965	} else
966		spin_unlock_irq(&conf->device_lock);
967}
968
969/* Barriers....
970 * Sometimes we need to suspend IO while we do something else,
971 * either some resync/recovery, or reconfigure the array.
972 * To do this we raise a 'barrier'.
973 * The 'barrier' is a counter that can be raised multiple times
974 * to count how many activities are happening which preclude
975 * normal IO.
976 * We can only raise the barrier if there is no pending IO.
977 * i.e. if nr_pending == 0.
978 * We choose only to raise the barrier if no-one is waiting for the
979 * barrier to go down.  This means that as soon as an IO request
980 * is ready, no other operations which require a barrier will start
981 * until the IO request has had a chance.
982 *
983 * So: regular IO calls 'wait_barrier'.  When that returns there
984 *    is no backgroup IO happening,  It must arrange to call
985 *    allow_barrier when it has finished its IO.
986 * backgroup IO calls must call raise_barrier.  Once that returns
987 *    there is no normal IO happeing.  It must arrange to call
988 *    lower_barrier when the particular background IO completes.
989 */
990
991static void raise_barrier(struct r10conf *conf, int force)
992{
993	BUG_ON(force && !conf->barrier);
994	spin_lock_irq(&conf->resync_lock);
995
996	/* Wait until no block IO is waiting (unless 'force') */
997	wait_event_lock_irq(conf->wait_barrier, force || !conf->nr_waiting,
998			    conf->resync_lock);
999
1000	/* block any new IO from starting */
1001	conf->barrier++;
1002
1003	/* Now wait for all pending IO to complete */
1004	wait_event_lock_irq(conf->wait_barrier,
1005			    !conf->nr_pending && conf->barrier < RESYNC_DEPTH,
1006			    conf->resync_lock);
1007
1008	spin_unlock_irq(&conf->resync_lock);
1009}
1010
1011static void lower_barrier(struct r10conf *conf)
1012{
1013	unsigned long flags;
1014	spin_lock_irqsave(&conf->resync_lock, flags);
1015	conf->barrier--;
1016	spin_unlock_irqrestore(&conf->resync_lock, flags);
1017	wake_up(&conf->wait_barrier);
1018}
1019
1020static void wait_barrier(struct r10conf *conf)
1021{
1022	spin_lock_irq(&conf->resync_lock);
1023	if (conf->barrier) {
1024		conf->nr_waiting++;
1025		/* Wait for the barrier to drop.
1026		 * However if there are already pending
1027		 * requests (preventing the barrier from
1028		 * rising completely), and the
1029		 * pre-process bio queue isn't empty,
1030		 * then don't wait, as we need to empty
1031		 * that queue to get the nr_pending
1032		 * count down.
1033		 */
1034		wait_event_lock_irq(conf->wait_barrier,
1035				    !conf->barrier ||
1036				    (conf->nr_pending &&
1037				     current->bio_list &&
1038				     !bio_list_empty(current->bio_list)),
1039				    conf->resync_lock);
1040		conf->nr_waiting--;
1041	}
1042	conf->nr_pending++;
1043	spin_unlock_irq(&conf->resync_lock);
1044}
1045
1046static void allow_barrier(struct r10conf *conf)
1047{
1048	unsigned long flags;
1049	spin_lock_irqsave(&conf->resync_lock, flags);
1050	conf->nr_pending--;
1051	spin_unlock_irqrestore(&conf->resync_lock, flags);
1052	wake_up(&conf->wait_barrier);
1053}
1054
1055static void freeze_array(struct r10conf *conf, int extra)
1056{
1057	/* stop syncio and normal IO and wait for everything to
1058	 * go quiet.
1059	 * We increment barrier and nr_waiting, and then
1060	 * wait until nr_pending match nr_queued+extra
1061	 * This is called in the context of one normal IO request
1062	 * that has failed. Thus any sync request that might be pending
1063	 * will be blocked by nr_pending, and we need to wait for
1064	 * pending IO requests to complete or be queued for re-try.
1065	 * Thus the number queued (nr_queued) plus this request (extra)
1066	 * must match the number of pending IOs (nr_pending) before
1067	 * we continue.
1068	 */
1069	spin_lock_irq(&conf->resync_lock);
1070	conf->barrier++;
1071	conf->nr_waiting++;
1072	wait_event_lock_irq_cmd(conf->wait_barrier,
1073				conf->nr_pending == conf->nr_queued+extra,
1074				conf->resync_lock,
1075				flush_pending_writes(conf));
1076
1077	spin_unlock_irq(&conf->resync_lock);
1078}
1079
1080static void unfreeze_array(struct r10conf *conf)
1081{
1082	/* reverse the effect of the freeze */
1083	spin_lock_irq(&conf->resync_lock);
1084	conf->barrier--;
1085	conf->nr_waiting--;
1086	wake_up(&conf->wait_barrier);
1087	spin_unlock_irq(&conf->resync_lock);
1088}
1089
1090static sector_t choose_data_offset(struct r10bio *r10_bio,
1091				   struct md_rdev *rdev)
1092{
1093	if (!test_bit(MD_RECOVERY_RESHAPE, &rdev->mddev->recovery) ||
1094	    test_bit(R10BIO_Previous, &r10_bio->state))
1095		return rdev->data_offset;
1096	else
1097		return rdev->new_data_offset;
1098}
1099
1100struct raid10_plug_cb {
1101	struct blk_plug_cb	cb;
1102	struct bio_list		pending;
1103	int			pending_cnt;
1104};
1105
1106static void raid10_unplug(struct blk_plug_cb *cb, bool from_schedule)
1107{
1108	struct raid10_plug_cb *plug = container_of(cb, struct raid10_plug_cb,
1109						   cb);
1110	struct mddev *mddev = plug->cb.data;
1111	struct r10conf *conf = mddev->private;
1112	struct bio *bio;
1113
1114	if (from_schedule || current->bio_list) {
1115		spin_lock_irq(&conf->device_lock);
1116		bio_list_merge(&conf->pending_bio_list, &plug->pending);
1117		conf->pending_count += plug->pending_cnt;
1118		spin_unlock_irq(&conf->device_lock);
1119		wake_up(&conf->wait_barrier);
1120		md_wakeup_thread(mddev->thread);
1121		kfree(plug);
1122		return;
1123	}
1124
1125	/* we aren't scheduling, so we can do the write-out directly. */
1126	bio = bio_list_get(&plug->pending);
1127	bitmap_unplug(mddev->bitmap);
1128	wake_up(&conf->wait_barrier);
1129
1130	while (bio) { /* submit pending writes */
1131		struct bio *next = bio->bi_next;
1132		bio->bi_next = NULL;
1133		if (unlikely((bio->bi_rw & REQ_DISCARD) &&
1134		    !blk_queue_discard(bdev_get_queue(bio->bi_bdev))))
1135			/* Just ignore it */
1136			bio_endio(bio, 0);
1137		else
1138			generic_make_request(bio);
1139		bio = next;
1140	}
1141	kfree(plug);
1142}
1143
1144static void __make_request(struct mddev *mddev, struct bio *bio)
1145{
1146	struct r10conf *conf = mddev->private;
1147	struct r10bio *r10_bio;
1148	struct bio *read_bio;
1149	int i;
1150	const int rw = bio_data_dir(bio);
1151	const unsigned long do_sync = (bio->bi_rw & REQ_SYNC);
1152	const unsigned long do_fua = (bio->bi_rw & REQ_FUA);
1153	const unsigned long do_discard = (bio->bi_rw
1154					  & (REQ_DISCARD | REQ_SECURE));
1155	const unsigned long do_same = (bio->bi_rw & REQ_WRITE_SAME);
1156	unsigned long flags;
1157	struct md_rdev *blocked_rdev;
1158	struct blk_plug_cb *cb;
1159	struct raid10_plug_cb *plug = NULL;
1160	int sectors_handled;
1161	int max_sectors;
1162	int sectors;
1163
1164	/*
1165	 * Register the new request and wait if the reconstruction
1166	 * thread has put up a bar for new requests.
1167	 * Continue immediately if no resync is active currently.
1168	 */
1169	wait_barrier(conf);
1170
1171	sectors = bio_sectors(bio);
1172	while (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
1173	    bio->bi_iter.bi_sector < conf->reshape_progress &&
1174	    bio->bi_iter.bi_sector + sectors > conf->reshape_progress) {
1175		/* IO spans the reshape position.  Need to wait for
1176		 * reshape to pass
1177		 */
1178		allow_barrier(conf);
1179		wait_event(conf->wait_barrier,
1180			   conf->reshape_progress <= bio->bi_iter.bi_sector ||
1181			   conf->reshape_progress >= bio->bi_iter.bi_sector +
1182			   sectors);
1183		wait_barrier(conf);
1184	}
1185	if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
1186	    bio_data_dir(bio) == WRITE &&
1187	    (mddev->reshape_backwards
1188	     ? (bio->bi_iter.bi_sector < conf->reshape_safe &&
1189		bio->bi_iter.bi_sector + sectors > conf->reshape_progress)
1190	     : (bio->bi_iter.bi_sector + sectors > conf->reshape_safe &&
1191		bio->bi_iter.bi_sector < conf->reshape_progress))) {
1192		/* Need to update reshape_position in metadata */
1193		mddev->reshape_position = conf->reshape_progress;
1194		set_bit(MD_CHANGE_DEVS, &mddev->flags);
1195		set_bit(MD_CHANGE_PENDING, &mddev->flags);
1196		md_wakeup_thread(mddev->thread);
1197		wait_event(mddev->sb_wait,
1198			   !test_bit(MD_CHANGE_PENDING, &mddev->flags));
1199
1200		conf->reshape_safe = mddev->reshape_position;
1201	}
1202
1203	r10_bio = mempool_alloc(conf->r10bio_pool, GFP_NOIO);
1204
1205	r10_bio->master_bio = bio;
1206	r10_bio->sectors = sectors;
1207
1208	r10_bio->mddev = mddev;
1209	r10_bio->sector = bio->bi_iter.bi_sector;
1210	r10_bio->state = 0;
1211
1212	/* We might need to issue multiple reads to different
1213	 * devices if there are bad blocks around, so we keep
1214	 * track of the number of reads in bio->bi_phys_segments.
1215	 * If this is 0, there is only one r10_bio and no locking
1216	 * will be needed when the request completes.  If it is
1217	 * non-zero, then it is the number of not-completed requests.
1218	 */
1219	bio->bi_phys_segments = 0;
1220	clear_bit(BIO_SEG_VALID, &bio->bi_flags);
1221
1222	if (rw == READ) {
1223		/*
1224		 * read balancing logic:
1225		 */
1226		struct md_rdev *rdev;
1227		int slot;
1228
1229read_again:
1230		rdev = read_balance(conf, r10_bio, &max_sectors);
1231		if (!rdev) {
1232			raid_end_bio_io(r10_bio);
1233			return;
1234		}
1235		slot = r10_bio->read_slot;
1236
1237		read_bio = bio_clone_mddev(bio, GFP_NOIO, mddev);
1238		bio_trim(read_bio, r10_bio->sector - bio->bi_iter.bi_sector,
1239			 max_sectors);
1240
1241		r10_bio->devs[slot].bio = read_bio;
1242		r10_bio->devs[slot].rdev = rdev;
1243
1244		read_bio->bi_iter.bi_sector = r10_bio->devs[slot].addr +
1245			choose_data_offset(r10_bio, rdev);
1246		read_bio->bi_bdev = rdev->bdev;
1247		read_bio->bi_end_io = raid10_end_read_request;
1248		read_bio->bi_rw = READ | do_sync;
1249		read_bio->bi_private = r10_bio;
1250
1251		if (max_sectors < r10_bio->sectors) {
1252			/* Could not read all from this device, so we will
1253			 * need another r10_bio.
1254			 */
1255			sectors_handled = (r10_bio->sector + max_sectors
1256					   - bio->bi_iter.bi_sector);
1257			r10_bio->sectors = max_sectors;
1258			spin_lock_irq(&conf->device_lock);
1259			if (bio->bi_phys_segments == 0)
1260				bio->bi_phys_segments = 2;
1261			else
1262				bio->bi_phys_segments++;
1263			spin_unlock_irq(&conf->device_lock);
1264			/* Cannot call generic_make_request directly
1265			 * as that will be queued in __generic_make_request
1266			 * and subsequent mempool_alloc might block
1267			 * waiting for it.  so hand bio over to raid10d.
1268			 */
1269			reschedule_retry(r10_bio);
1270
1271			r10_bio = mempool_alloc(conf->r10bio_pool, GFP_NOIO);
1272
1273			r10_bio->master_bio = bio;
1274			r10_bio->sectors = bio_sectors(bio) - sectors_handled;
1275			r10_bio->state = 0;
1276			r10_bio->mddev = mddev;
1277			r10_bio->sector = bio->bi_iter.bi_sector +
1278				sectors_handled;
1279			goto read_again;
1280		} else
1281			generic_make_request(read_bio);
1282		return;
1283	}
1284
1285	/*
1286	 * WRITE:
1287	 */
1288	if (conf->pending_count >= max_queued_requests) {
1289		md_wakeup_thread(mddev->thread);
1290		wait_event(conf->wait_barrier,
1291			   conf->pending_count < max_queued_requests);
1292	}
1293	/* first select target devices under rcu_lock and
1294	 * inc refcount on their rdev.  Record them by setting
1295	 * bios[x] to bio
1296	 * If there are known/acknowledged bad blocks on any device
1297	 * on which we have seen a write error, we want to avoid
1298	 * writing to those blocks.  This potentially requires several
1299	 * writes to write around the bad blocks.  Each set of writes
1300	 * gets its own r10_bio with a set of bios attached.  The number
1301	 * of r10_bios is recored in bio->bi_phys_segments just as with
1302	 * the read case.
1303	 */
1304
1305	r10_bio->read_slot = -1; /* make sure repl_bio gets freed */
1306	raid10_find_phys(conf, r10_bio);
1307retry_write:
1308	blocked_rdev = NULL;
1309	rcu_read_lock();
1310	max_sectors = r10_bio->sectors;
1311
1312	for (i = 0;  i < conf->copies; i++) {
1313		int d = r10_bio->devs[i].devnum;
1314		struct md_rdev *rdev = rcu_dereference(conf->mirrors[d].rdev);
1315		struct md_rdev *rrdev = rcu_dereference(
1316			conf->mirrors[d].replacement);
1317		if (rdev == rrdev)
1318			rrdev = NULL;
1319		if (rdev && unlikely(test_bit(Blocked, &rdev->flags))) {
1320			atomic_inc(&rdev->nr_pending);
1321			blocked_rdev = rdev;
1322			break;
1323		}
1324		if (rrdev && unlikely(test_bit(Blocked, &rrdev->flags))) {
1325			atomic_inc(&rrdev->nr_pending);
1326			blocked_rdev = rrdev;
1327			break;
1328		}
1329		if (rdev && (test_bit(Faulty, &rdev->flags)
1330			     || test_bit(Unmerged, &rdev->flags)))
1331			rdev = NULL;
1332		if (rrdev && (test_bit(Faulty, &rrdev->flags)
1333			      || test_bit(Unmerged, &rrdev->flags)))
1334			rrdev = NULL;
1335
1336		r10_bio->devs[i].bio = NULL;
1337		r10_bio->devs[i].repl_bio = NULL;
1338
1339		if (!rdev && !rrdev) {
1340			set_bit(R10BIO_Degraded, &r10_bio->state);
1341			continue;
1342		}
1343		if (rdev && test_bit(WriteErrorSeen, &rdev->flags)) {
1344			sector_t first_bad;
1345			sector_t dev_sector = r10_bio->devs[i].addr;
1346			int bad_sectors;
1347			int is_bad;
1348
1349			is_bad = is_badblock(rdev, dev_sector,
1350					     max_sectors,
1351					     &first_bad, &bad_sectors);
1352			if (is_bad < 0) {
1353				/* Mustn't write here until the bad block
1354				 * is acknowledged
1355				 */
1356				atomic_inc(&rdev->nr_pending);
1357				set_bit(BlockedBadBlocks, &rdev->flags);
1358				blocked_rdev = rdev;
1359				break;
1360			}
1361			if (is_bad && first_bad <= dev_sector) {
1362				/* Cannot write here at all */
1363				bad_sectors -= (dev_sector - first_bad);
1364				if (bad_sectors < max_sectors)
1365					/* Mustn't write more than bad_sectors
1366					 * to other devices yet
1367					 */
1368					max_sectors = bad_sectors;
1369				/* We don't set R10BIO_Degraded as that
1370				 * only applies if the disk is missing,
1371				 * so it might be re-added, and we want to
1372				 * know to recover this chunk.
1373				 * In this case the device is here, and the
1374				 * fact that this chunk is not in-sync is
1375				 * recorded in the bad block log.
1376				 */
1377				continue;
1378			}
1379			if (is_bad) {
1380				int good_sectors = first_bad - dev_sector;
1381				if (good_sectors < max_sectors)
1382					max_sectors = good_sectors;
1383			}
1384		}
1385		if (rdev) {
1386			r10_bio->devs[i].bio = bio;
1387			atomic_inc(&rdev->nr_pending);
1388		}
1389		if (rrdev) {
1390			r10_bio->devs[i].repl_bio = bio;
1391			atomic_inc(&rrdev->nr_pending);
1392		}
1393	}
1394	rcu_read_unlock();
1395
1396	if (unlikely(blocked_rdev)) {
1397		/* Have to wait for this device to get unblocked, then retry */
1398		int j;
1399		int d;
1400
1401		for (j = 0; j < i; j++) {
1402			if (r10_bio->devs[j].bio) {
1403				d = r10_bio->devs[j].devnum;
1404				rdev_dec_pending(conf->mirrors[d].rdev, mddev);
1405			}
1406			if (r10_bio->devs[j].repl_bio) {
1407				struct md_rdev *rdev;
1408				d = r10_bio->devs[j].devnum;
1409				rdev = conf->mirrors[d].replacement;
1410				if (!rdev) {
1411					/* Race with remove_disk */
1412					smp_mb();
1413					rdev = conf->mirrors[d].rdev;
1414				}
1415				rdev_dec_pending(rdev, mddev);
1416			}
1417		}
1418		allow_barrier(conf);
1419		md_wait_for_blocked_rdev(blocked_rdev, mddev);
1420		wait_barrier(conf);
1421		goto retry_write;
1422	}
1423
1424	if (max_sectors < r10_bio->sectors) {
1425		/* We are splitting this into multiple parts, so
1426		 * we need to prepare for allocating another r10_bio.
1427		 */
1428		r10_bio->sectors = max_sectors;
1429		spin_lock_irq(&conf->device_lock);
1430		if (bio->bi_phys_segments == 0)
1431			bio->bi_phys_segments = 2;
1432		else
1433			bio->bi_phys_segments++;
1434		spin_unlock_irq(&conf->device_lock);
1435	}
1436	sectors_handled = r10_bio->sector + max_sectors -
1437		bio->bi_iter.bi_sector;
1438
1439	atomic_set(&r10_bio->remaining, 1);
1440	bitmap_startwrite(mddev->bitmap, r10_bio->sector, r10_bio->sectors, 0);
1441
1442	for (i = 0; i < conf->copies; i++) {
1443		struct bio *mbio;
1444		int d = r10_bio->devs[i].devnum;
1445		if (r10_bio->devs[i].bio) {
1446			struct md_rdev *rdev = conf->mirrors[d].rdev;
1447			mbio = bio_clone_mddev(bio, GFP_NOIO, mddev);
1448			bio_trim(mbio, r10_bio->sector - bio->bi_iter.bi_sector,
1449				 max_sectors);
1450			r10_bio->devs[i].bio = mbio;
1451
1452			mbio->bi_iter.bi_sector	= (r10_bio->devs[i].addr+
1453					   choose_data_offset(r10_bio,
1454							      rdev));
1455			mbio->bi_bdev = rdev->bdev;
1456			mbio->bi_end_io	= raid10_end_write_request;
1457			mbio->bi_rw =
1458				WRITE | do_sync | do_fua | do_discard | do_same;
1459			mbio->bi_private = r10_bio;
1460
1461			atomic_inc(&r10_bio->remaining);
1462
1463			cb = blk_check_plugged(raid10_unplug, mddev,
1464					       sizeof(*plug));
1465			if (cb)
1466				plug = container_of(cb, struct raid10_plug_cb,
1467						    cb);
1468			else
1469				plug = NULL;
1470			spin_lock_irqsave(&conf->device_lock, flags);
1471			if (plug) {
1472				bio_list_add(&plug->pending, mbio);
1473				plug->pending_cnt++;
1474			} else {
1475				bio_list_add(&conf->pending_bio_list, mbio);
1476				conf->pending_count++;
1477			}
1478			spin_unlock_irqrestore(&conf->device_lock, flags);
1479			if (!plug)
1480				md_wakeup_thread(mddev->thread);
1481		}
1482
1483		if (r10_bio->devs[i].repl_bio) {
1484			struct md_rdev *rdev = conf->mirrors[d].replacement;
1485			if (rdev == NULL) {
1486				/* Replacement just got moved to main 'rdev' */
1487				smp_mb();
1488				rdev = conf->mirrors[d].rdev;
1489			}
1490			mbio = bio_clone_mddev(bio, GFP_NOIO, mddev);
1491			bio_trim(mbio, r10_bio->sector - bio->bi_iter.bi_sector,
1492				 max_sectors);
1493			r10_bio->devs[i].repl_bio = mbio;
1494
1495			mbio->bi_iter.bi_sector	= (r10_bio->devs[i].addr +
1496					   choose_data_offset(
1497						   r10_bio, rdev));
1498			mbio->bi_bdev = rdev->bdev;
1499			mbio->bi_end_io	= raid10_end_write_request;
1500			mbio->bi_rw =
1501				WRITE | do_sync | do_fua | do_discard | do_same;
1502			mbio->bi_private = r10_bio;
1503
1504			atomic_inc(&r10_bio->remaining);
1505			spin_lock_irqsave(&conf->device_lock, flags);
1506			bio_list_add(&conf->pending_bio_list, mbio);
1507			conf->pending_count++;
1508			spin_unlock_irqrestore(&conf->device_lock, flags);
1509			if (!mddev_check_plugged(mddev))
1510				md_wakeup_thread(mddev->thread);
1511		}
1512	}
1513
1514	/* Don't remove the bias on 'remaining' (one_write_done) until
1515	 * after checking if we need to go around again.
1516	 */
1517
1518	if (sectors_handled < bio_sectors(bio)) {
1519		one_write_done(r10_bio);
1520		/* We need another r10_bio.  It has already been counted
1521		 * in bio->bi_phys_segments.
1522		 */
1523		r10_bio = mempool_alloc(conf->r10bio_pool, GFP_NOIO);
1524
1525		r10_bio->master_bio = bio;
1526		r10_bio->sectors = bio_sectors(bio) - sectors_handled;
1527
1528		r10_bio->mddev = mddev;
1529		r10_bio->sector = bio->bi_iter.bi_sector + sectors_handled;
1530		r10_bio->state = 0;
1531		goto retry_write;
1532	}
1533	one_write_done(r10_bio);
1534}
1535
1536static void make_request(struct mddev *mddev, struct bio *bio)
1537{
1538	struct r10conf *conf = mddev->private;
1539	sector_t chunk_mask = (conf->geo.chunk_mask & conf->prev.chunk_mask);
1540	int chunk_sects = chunk_mask + 1;
1541
1542	struct bio *split;
1543
1544	if (unlikely(bio->bi_rw & REQ_FLUSH)) {
1545		md_flush_request(mddev, bio);
1546		return;
1547	}
1548
1549	md_write_start(mddev, bio);
1550
1551	do {
1552
1553		/*
1554		 * If this request crosses a chunk boundary, we need to split
1555		 * it.
1556		 */
1557		if (unlikely((bio->bi_iter.bi_sector & chunk_mask) +
1558			     bio_sectors(bio) > chunk_sects
1559			     && (conf->geo.near_copies < conf->geo.raid_disks
1560				 || conf->prev.near_copies <
1561				 conf->prev.raid_disks))) {
1562			split = bio_split(bio, chunk_sects -
1563					  (bio->bi_iter.bi_sector &
1564					   (chunk_sects - 1)),
1565					  GFP_NOIO, fs_bio_set);
1566			bio_chain(split, bio);
1567		} else {
1568			split = bio;
1569		}
1570
1571		__make_request(mddev, split);
1572	} while (split != bio);
1573
1574	/* In case raid10d snuck in to freeze_array */
1575	wake_up(&conf->wait_barrier);
1576}
1577
1578static void status(struct seq_file *seq, struct mddev *mddev)
1579{
1580	struct r10conf *conf = mddev->private;
1581	int i;
1582
1583	if (conf->geo.near_copies < conf->geo.raid_disks)
1584		seq_printf(seq, " %dK chunks", mddev->chunk_sectors / 2);
1585	if (conf->geo.near_copies > 1)
1586		seq_printf(seq, " %d near-copies", conf->geo.near_copies);
1587	if (conf->geo.far_copies > 1) {
1588		if (conf->geo.far_offset)
1589			seq_printf(seq, " %d offset-copies", conf->geo.far_copies);
1590		else
1591			seq_printf(seq, " %d far-copies", conf->geo.far_copies);
1592	}
1593	seq_printf(seq, " [%d/%d] [", conf->geo.raid_disks,
1594					conf->geo.raid_disks - mddev->degraded);
1595	for (i = 0; i < conf->geo.raid_disks; i++)
1596		seq_printf(seq, "%s",
1597			      conf->mirrors[i].rdev &&
1598			      test_bit(In_sync, &conf->mirrors[i].rdev->flags) ? "U" : "_");
1599	seq_printf(seq, "]");
1600}
1601
1602/* check if there are enough drives for
1603 * every block to appear on atleast one.
1604 * Don't consider the device numbered 'ignore'
1605 * as we might be about to remove it.
1606 */
1607static int _enough(struct r10conf *conf, int previous, int ignore)
1608{
1609	int first = 0;
1610	int has_enough = 0;
1611	int disks, ncopies;
1612	if (previous) {
1613		disks = conf->prev.raid_disks;
1614		ncopies = conf->prev.near_copies;
1615	} else {
1616		disks = conf->geo.raid_disks;
1617		ncopies = conf->geo.near_copies;
1618	}
1619
1620	rcu_read_lock();
1621	do {
1622		int n = conf->copies;
1623		int cnt = 0;
1624		int this = first;
1625		while (n--) {
1626			struct md_rdev *rdev;
1627			if (this != ignore &&
1628			    (rdev = rcu_dereference(conf->mirrors[this].rdev)) &&
1629			    test_bit(In_sync, &rdev->flags))
1630				cnt++;
1631			this = (this+1) % disks;
1632		}
1633		if (cnt == 0)
1634			goto out;
1635		first = (first + ncopies) % disks;
1636	} while (first != 0);
1637	has_enough = 1;
1638out:
1639	rcu_read_unlock();
1640	return has_enough;
1641}
1642
1643static int enough(struct r10conf *conf, int ignore)
1644{
1645	/* when calling 'enough', both 'prev' and 'geo' must
1646	 * be stable.
1647	 * This is ensured if ->reconfig_mutex or ->device_lock
1648	 * is held.
1649	 */
1650	return _enough(conf, 0, ignore) &&
1651		_enough(conf, 1, ignore);
1652}
1653
1654static void error(struct mddev *mddev, struct md_rdev *rdev)
1655{
1656	char b[BDEVNAME_SIZE];
1657	struct r10conf *conf = mddev->private;
1658	unsigned long flags;
1659
1660	/*
1661	 * If it is not operational, then we have already marked it as dead
1662	 * else if it is the last working disks, ignore the error, let the
1663	 * next level up know.
1664	 * else mark the drive as failed
1665	 */
1666	spin_lock_irqsave(&conf->device_lock, flags);
1667	if (test_bit(In_sync, &rdev->flags)
1668	    && !enough(conf, rdev->raid_disk)) {
1669		/*
1670		 * Don't fail the drive, just return an IO error.
1671		 */
1672		spin_unlock_irqrestore(&conf->device_lock, flags);
1673		return;
1674	}
1675	if (test_and_clear_bit(In_sync, &rdev->flags))
1676		mddev->degraded++;
1677	/*
1678	 * If recovery is running, make sure it aborts.
1679	 */
1680	set_bit(MD_RECOVERY_INTR, &mddev->recovery);
1681	set_bit(Blocked, &rdev->flags);
1682	set_bit(Faulty, &rdev->flags);
1683	set_bit(MD_CHANGE_DEVS, &mddev->flags);
1684	spin_unlock_irqrestore(&conf->device_lock, flags);
1685	printk(KERN_ALERT
1686	       "md/raid10:%s: Disk failure on %s, disabling device.\n"
1687	       "md/raid10:%s: Operation continuing on %d devices.\n",
1688	       mdname(mddev), bdevname(rdev->bdev, b),
1689	       mdname(mddev), conf->geo.raid_disks - mddev->degraded);
1690}
1691
1692static void print_conf(struct r10conf *conf)
1693{
1694	int i;
1695	struct raid10_info *tmp;
1696
1697	printk(KERN_DEBUG "RAID10 conf printout:\n");
1698	if (!conf) {
1699		printk(KERN_DEBUG "(!conf)\n");
1700		return;
1701	}
1702	printk(KERN_DEBUG " --- wd:%d rd:%d\n", conf->geo.raid_disks - conf->mddev->degraded,
1703		conf->geo.raid_disks);
1704
1705	for (i = 0; i < conf->geo.raid_disks; i++) {
1706		char b[BDEVNAME_SIZE];
1707		tmp = conf->mirrors + i;
1708		if (tmp->rdev)
1709			printk(KERN_DEBUG " disk %d, wo:%d, o:%d, dev:%s\n",
1710				i, !test_bit(In_sync, &tmp->rdev->flags),
1711			        !test_bit(Faulty, &tmp->rdev->flags),
1712				bdevname(tmp->rdev->bdev,b));
1713	}
1714}
1715
1716static void close_sync(struct r10conf *conf)
1717{
1718	wait_barrier(conf);
1719	allow_barrier(conf);
1720
1721	mempool_destroy(conf->r10buf_pool);
1722	conf->r10buf_pool = NULL;
1723}
1724
1725static int raid10_spare_active(struct mddev *mddev)
1726{
1727	int i;
1728	struct r10conf *conf = mddev->private;
1729	struct raid10_info *tmp;
1730	int count = 0;
1731	unsigned long flags;
1732
1733	/*
1734	 * Find all non-in_sync disks within the RAID10 configuration
1735	 * and mark them in_sync
1736	 */
1737	for (i = 0; i < conf->geo.raid_disks; i++) {
1738		tmp = conf->mirrors + i;
1739		if (tmp->replacement
1740		    && tmp->replacement->recovery_offset == MaxSector
1741		    && !test_bit(Faulty, &tmp->replacement->flags)
1742		    && !test_and_set_bit(In_sync, &tmp->replacement->flags)) {
1743			/* Replacement has just become active */
1744			if (!tmp->rdev
1745			    || !test_and_clear_bit(In_sync, &tmp->rdev->flags))
1746				count++;
1747			if (tmp->rdev) {
1748				/* Replaced device not technically faulty,
1749				 * but we need to be sure it gets removed
1750				 * and never re-added.
1751				 */
1752				set_bit(Faulty, &tmp->rdev->flags);
1753				sysfs_notify_dirent_safe(
1754					tmp->rdev->sysfs_state);
1755			}
1756			sysfs_notify_dirent_safe(tmp->replacement->sysfs_state);
1757		} else if (tmp->rdev
1758			   && tmp->rdev->recovery_offset == MaxSector
1759			   && !test_bit(Faulty, &tmp->rdev->flags)
1760			   && !test_and_set_bit(In_sync, &tmp->rdev->flags)) {
1761			count++;
1762			sysfs_notify_dirent_safe(tmp->rdev->sysfs_state);
1763		}
1764	}
1765	spin_lock_irqsave(&conf->device_lock, flags);
1766	mddev->degraded -= count;
1767	spin_unlock_irqrestore(&conf->device_lock, flags);
1768
1769	print_conf(conf);
1770	return count;
1771}
1772
1773static int raid10_add_disk(struct mddev *mddev, struct md_rdev *rdev)
1774{
1775	struct r10conf *conf = mddev->private;
1776	int err = -EEXIST;
1777	int mirror;
1778	int first = 0;
1779	int last = conf->geo.raid_disks - 1;
1780	struct request_queue *q = bdev_get_queue(rdev->bdev);
1781
1782	if (mddev->recovery_cp < MaxSector)
1783		/* only hot-add to in-sync arrays, as recovery is
1784		 * very different from resync
1785		 */
1786		return -EBUSY;
1787	if (rdev->saved_raid_disk < 0 && !_enough(conf, 1, -1))
1788		return -EINVAL;
1789
1790	if (rdev->raid_disk >= 0)
1791		first = last = rdev->raid_disk;
1792
1793	if (q->merge_bvec_fn) {
1794		set_bit(Unmerged, &rdev->flags);
1795		mddev->merge_check_needed = 1;
1796	}
1797
1798	if (rdev->saved_raid_disk >= first &&
1799	    conf->mirrors[rdev->saved_raid_disk].rdev == NULL)
1800		mirror = rdev->saved_raid_disk;
1801	else
1802		mirror = first;
1803	for ( ; mirror <= last ; mirror++) {
1804		struct raid10_info *p = &conf->mirrors[mirror];
1805		if (p->recovery_disabled == mddev->recovery_disabled)
1806			continue;
1807		if (p->rdev) {
1808			if (!test_bit(WantReplacement, &p->rdev->flags) ||
1809			    p->replacement != NULL)
1810				continue;
1811			clear_bit(In_sync, &rdev->flags);
1812			set_bit(Replacement, &rdev->flags);
1813			rdev->raid_disk = mirror;
1814			err = 0;
1815			if (mddev->gendisk)
1816				disk_stack_limits(mddev->gendisk, rdev->bdev,
1817						  rdev->data_offset << 9);
1818			conf->fullsync = 1;
1819			rcu_assign_pointer(p->replacement, rdev);
1820			break;
1821		}
1822
1823		if (mddev->gendisk)
1824			disk_stack_limits(mddev->gendisk, rdev->bdev,
1825					  rdev->data_offset << 9);
1826
1827		p->head_position = 0;
1828		p->recovery_disabled = mddev->recovery_disabled - 1;
1829		rdev->raid_disk = mirror;
1830		err = 0;
1831		if (rdev->saved_raid_disk != mirror)
1832			conf->fullsync = 1;
1833		rcu_assign_pointer(p->rdev, rdev);
1834		break;
1835	}
1836	if (err == 0 && test_bit(Unmerged, &rdev->flags)) {
1837		/* Some requests might not have seen this new
1838		 * merge_bvec_fn.  We must wait for them to complete
1839		 * before merging the device fully.
1840		 * First we make sure any code which has tested
1841		 * our function has submitted the request, then
1842		 * we wait for all outstanding requests to complete.
1843		 */
1844		synchronize_sched();
1845		freeze_array(conf, 0);
1846		unfreeze_array(conf);
1847		clear_bit(Unmerged, &rdev->flags);
1848	}
1849	md_integrity_add_rdev(rdev, mddev);
1850	if (mddev->queue && blk_queue_discard(bdev_get_queue(rdev->bdev)))
1851		queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, mddev->queue);
1852
1853	print_conf(conf);
1854	return err;
1855}
1856
1857static int raid10_remove_disk(struct mddev *mddev, struct md_rdev *rdev)
1858{
1859	struct r10conf *conf = mddev->private;
1860	int err = 0;
1861	int number = rdev->raid_disk;
1862	struct md_rdev **rdevp;
1863	struct raid10_info *p = conf->mirrors + number;
1864
1865	print_conf(conf);
1866	if (rdev == p->rdev)
1867		rdevp = &p->rdev;
1868	else if (rdev == p->replacement)
1869		rdevp = &p->replacement;
1870	else
1871		return 0;
1872
1873	if (test_bit(In_sync, &rdev->flags) ||
1874	    atomic_read(&rdev->nr_pending)) {
1875		err = -EBUSY;
1876		goto abort;
1877	}
1878	/* Only remove faulty devices if recovery
1879	 * is not possible.
1880	 */
1881	if (!test_bit(Faulty, &rdev->flags) &&
1882	    mddev->recovery_disabled != p->recovery_disabled &&
1883	    (!p->replacement || p->replacement == rdev) &&
1884	    number < conf->geo.raid_disks &&
1885	    enough(conf, -1)) {
1886		err = -EBUSY;
1887		goto abort;
1888	}
1889	*rdevp = NULL;
1890	synchronize_rcu();
1891	if (atomic_read(&rdev->nr_pending)) {
1892		/* lost the race, try later */
1893		err = -EBUSY;
1894		*rdevp = rdev;
1895		goto abort;
1896	} else if (p->replacement) {
1897		/* We must have just cleared 'rdev' */
1898		p->rdev = p->replacement;
1899		clear_bit(Replacement, &p->replacement->flags);
1900		smp_mb(); /* Make sure other CPUs may see both as identical
1901			   * but will never see neither -- if they are careful.
1902			   */
1903		p->replacement = NULL;
1904		clear_bit(WantReplacement, &rdev->flags);
1905	} else
1906		/* We might have just remove the Replacement as faulty
1907		 * Clear the flag just in case
1908		 */
1909		clear_bit(WantReplacement, &rdev->flags);
1910
1911	err = md_integrity_register(mddev);
1912
1913abort:
1914
1915	print_conf(conf);
1916	return err;
1917}
1918
1919static void end_sync_read(struct bio *bio, int error)
1920{
1921	struct r10bio *r10_bio = bio->bi_private;
1922	struct r10conf *conf = r10_bio->mddev->private;
1923	int d;
1924
1925	if (bio == r10_bio->master_bio) {
1926		/* this is a reshape read */
1927		d = r10_bio->read_slot; /* really the read dev */
1928	} else
1929		d = find_bio_disk(conf, r10_bio, bio, NULL, NULL);
1930
1931	if (test_bit(BIO_UPTODATE, &bio->bi_flags))
1932		set_bit(R10BIO_Uptodate, &r10_bio->state);
1933	else
1934		/* The write handler will notice the lack of
1935		 * R10BIO_Uptodate and record any errors etc
1936		 */
1937		atomic_add(r10_bio->sectors,
1938			   &conf->mirrors[d].rdev->corrected_errors);
1939
1940	/* for reconstruct, we always reschedule after a read.
1941	 * for resync, only after all reads
1942	 */
1943	rdev_dec_pending(conf->mirrors[d].rdev, conf->mddev);
1944	if (test_bit(R10BIO_IsRecover, &r10_bio->state) ||
1945	    atomic_dec_and_test(&r10_bio->remaining)) {
1946		/* we have read all the blocks,
1947		 * do the comparison in process context in raid10d
1948		 */
1949		reschedule_retry(r10_bio);
1950	}
1951}
1952
1953static void end_sync_request(struct r10bio *r10_bio)
1954{
1955	struct mddev *mddev = r10_bio->mddev;
1956
1957	while (atomic_dec_and_test(&r10_bio->remaining)) {
1958		if (r10_bio->master_bio == NULL) {
1959			/* the primary of several recovery bios */
1960			sector_t s = r10_bio->sectors;
1961			if (test_bit(R10BIO_MadeGood, &r10_bio->state) ||
1962			    test_bit(R10BIO_WriteError, &r10_bio->state))
1963				reschedule_retry(r10_bio);
1964			else
1965				put_buf(r10_bio);
1966			md_done_sync(mddev, s, 1);
1967			break;
1968		} else {
1969			struct r10bio *r10_bio2 = (struct r10bio *)r10_bio->master_bio;
1970			if (test_bit(R10BIO_MadeGood, &r10_bio->state) ||
1971			    test_bit(R10BIO_WriteError, &r10_bio->state))
1972				reschedule_retry(r10_bio);
1973			else
1974				put_buf(r10_bio);
1975			r10_bio = r10_bio2;
1976		}
1977	}
1978}
1979
1980static void end_sync_write(struct bio *bio, int error)
1981{
1982	int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
1983	struct r10bio *r10_bio = bio->bi_private;
1984	struct mddev *mddev = r10_bio->mddev;
1985	struct r10conf *conf = mddev->private;
1986	int d;
1987	sector_t first_bad;
1988	int bad_sectors;
1989	int slot;
1990	int repl;
1991	struct md_rdev *rdev = NULL;
1992
1993	d = find_bio_disk(conf, r10_bio, bio, &slot, &repl);
1994	if (repl)
1995		rdev = conf->mirrors[d].replacement;
1996	else
1997		rdev = conf->mirrors[d].rdev;
1998
1999	if (!uptodate) {
2000		if (repl)
2001			md_error(mddev, rdev);
2002		else {
2003			set_bit(WriteErrorSeen, &rdev->flags);
2004			if (!test_and_set_bit(WantReplacement, &rdev->flags))
2005				set_bit(MD_RECOVERY_NEEDED,
2006					&rdev->mddev->recovery);
2007			set_bit(R10BIO_WriteError, &r10_bio->state);
2008		}
2009	} else if (is_badblock(rdev,
2010			     r10_bio->devs[slot].addr,
2011			     r10_bio->sectors,
2012			     &first_bad, &bad_sectors))
2013		set_bit(R10BIO_MadeGood, &r10_bio->state);
2014
2015	rdev_dec_pending(rdev, mddev);
2016
2017	end_sync_request(r10_bio);
2018}
2019
2020/*
2021 * Note: sync and recover and handled very differently for raid10
2022 * This code is for resync.
2023 * For resync, we read through virtual addresses and read all blocks.
2024 * If there is any error, we schedule a write.  The lowest numbered
2025 * drive is authoritative.
2026 * However requests come for physical address, so we need to map.
2027 * For every physical address there are raid_disks/copies virtual addresses,
2028 * which is always are least one, but is not necessarly an integer.
2029 * This means that a physical address can span multiple chunks, so we may
2030 * have to submit multiple io requests for a single sync request.
2031 */
2032/*
2033 * We check if all blocks are in-sync and only write to blocks that
2034 * aren't in sync
2035 */
2036static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio)
2037{
2038	struct r10conf *conf = mddev->private;
2039	int i, first;
2040	struct bio *tbio, *fbio;
2041	int vcnt;
2042
2043	atomic_set(&r10_bio->remaining, 1);
2044
2045	/* find the first device with a block */
2046	for (i=0; i<conf->copies; i++)
2047		if (test_bit(BIO_UPTODATE, &r10_bio->devs[i].bio->bi_flags))
2048			break;
2049
2050	if (i == conf->copies)
2051		goto done;
2052
2053	first = i;
2054	fbio = r10_bio->devs[i].bio;
2055
2056	vcnt = (r10_bio->sectors + (PAGE_SIZE >> 9) - 1) >> (PAGE_SHIFT - 9);
2057	/* now find blocks with errors */
2058	for (i=0 ; i < conf->copies ; i++) {
2059		int  j, d;
2060
2061		tbio = r10_bio->devs[i].bio;
2062
2063		if (tbio->bi_end_io != end_sync_read)
2064			continue;
2065		if (i == first)
2066			continue;
2067		if (test_bit(BIO_UPTODATE, &r10_bio->devs[i].bio->bi_flags)) {
2068			/* We know that the bi_io_vec layout is the same for
2069			 * both 'first' and 'i', so we just compare them.
2070			 * All vec entries are PAGE_SIZE;
2071			 */
2072			int sectors = r10_bio->sectors;
2073			for (j = 0; j < vcnt; j++) {
2074				int len = PAGE_SIZE;
2075				if (sectors < (len / 512))
2076					len = sectors * 512;
2077				if (memcmp(page_address(fbio->bi_io_vec[j].bv_page),
2078					   page_address(tbio->bi_io_vec[j].bv_page),
2079					   len))
2080					break;
2081				sectors -= len/512;
2082			}
2083			if (j == vcnt)
2084				continue;
2085			atomic64_add(r10_bio->sectors, &mddev->resync_mismatches);
2086			if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery))
2087				/* Don't fix anything. */
2088				continue;
2089		}
2090		/* Ok, we need to write this bio, either to correct an
2091		 * inconsistency or to correct an unreadable block.
2092		 * First we need to fixup bv_offset, bv_len and
2093		 * bi_vecs, as the read request might have corrupted these
2094		 */
2095		bio_reset(tbio);
2096
2097		tbio->bi_vcnt = vcnt;
2098		tbio->bi_iter.bi_size = r10_bio->sectors << 9;
2099		tbio->bi_rw = WRITE;
2100		tbio->bi_private = r10_bio;
2101		tbio->bi_iter.bi_sector = r10_bio->devs[i].addr;
2102
2103		for (j=0; j < vcnt ; j++) {
2104			tbio->bi_io_vec[j].bv_offset = 0;
2105			tbio->bi_io_vec[j].bv_len = PAGE_SIZE;
2106
2107			memcpy(page_address(tbio->bi_io_vec[j].bv_page),
2108			       page_address(fbio->bi_io_vec[j].bv_page),
2109			       PAGE_SIZE);
2110		}
2111		tbio->bi_end_io = end_sync_write;
2112
2113		d = r10_bio->devs[i].devnum;
2114		atomic_inc(&conf->mirrors[d].rdev->nr_pending);
2115		atomic_inc(&r10_bio->remaining);
2116		md_sync_acct(conf->mirrors[d].rdev->bdev, bio_sectors(tbio));
2117
2118		tbio->bi_iter.bi_sector += conf->mirrors[d].rdev->data_offset;
2119		tbio->bi_bdev = conf->mirrors[d].rdev->bdev;
2120		generic_make_request(tbio);
2121	}
2122
2123	/* Now write out to any replacement devices
2124	 * that are active
2125	 */
2126	for (i = 0; i < conf->copies; i++) {
2127		int j, d;
2128
2129		tbio = r10_bio->devs[i].repl_bio;
2130		if (!tbio || !tbio->bi_end_io)
2131			continue;
2132		if (r10_bio->devs[i].bio->bi_end_io != end_sync_write
2133		    && r10_bio->devs[i].bio != fbio)
2134			for (j = 0; j < vcnt; j++)
2135				memcpy(page_address(tbio->bi_io_vec[j].bv_page),
2136				       page_address(fbio->bi_io_vec[j].bv_page),
2137				       PAGE_SIZE);
2138		d = r10_bio->devs[i].devnum;
2139		atomic_inc(&r10_bio->remaining);
2140		md_sync_acct(conf->mirrors[d].replacement->bdev,
2141			     bio_sectors(tbio));
2142		generic_make_request(tbio);
2143	}
2144
2145done:
2146	if (atomic_dec_and_test(&r10_bio->remaining)) {
2147		md_done_sync(mddev, r10_bio->sectors, 1);
2148		put_buf(r10_bio);
2149	}
2150}
2151
2152/*
2153 * Now for the recovery code.
2154 * Recovery happens across physical sectors.
2155 * We recover all non-is_sync drives by finding the virtual address of
2156 * each, and then choose a working drive that also has that virt address.
2157 * There is a separate r10_bio for each non-in_sync drive.
2158 * Only the first two slots are in use. The first for reading,
2159 * The second for writing.
2160 *
2161 */
2162static void fix_recovery_read_error(struct r10bio *r10_bio)
2163{
2164	/* We got a read error during recovery.
2165	 * We repeat the read in smaller page-sized sections.
2166	 * If a read succeeds, write it to the new device or record
2167	 * a bad block if we cannot.
2168	 * If a read fails, record a bad block on both old and
2169	 * new devices.
2170	 */
2171	struct mddev *mddev = r10_bio->mddev;
2172	struct r10conf *conf = mddev->private;
2173	struct bio *bio = r10_bio->devs[0].bio;
2174	sector_t sect = 0;
2175	int sectors = r10_bio->sectors;
2176	int idx = 0;
2177	int dr = r10_bio->devs[0].devnum;
2178	int dw = r10_bio->devs[1].devnum;
2179
2180	while (sectors) {
2181		int s = sectors;
2182		struct md_rdev *rdev;
2183		sector_t addr;
2184		int ok;
2185
2186		if (s > (PAGE_SIZE>>9))
2187			s = PAGE_SIZE >> 9;
2188
2189		rdev = conf->mirrors[dr].rdev;
2190		addr = r10_bio->devs[0].addr + sect,
2191		ok = sync_page_io(rdev,
2192				  addr,
2193				  s << 9,
2194				  bio->bi_io_vec[idx].bv_page,
2195				  READ, false);
2196		if (ok) {
2197			rdev = conf->mirrors[dw].rdev;
2198			addr = r10_bio->devs[1].addr + sect;
2199			ok = sync_page_io(rdev,
2200					  addr,
2201					  s << 9,
2202					  bio->bi_io_vec[idx].bv_page,
2203					  WRITE, false);
2204			if (!ok) {
2205				set_bit(WriteErrorSeen, &rdev->flags);
2206				if (!test_and_set_bit(WantReplacement,
2207						      &rdev->flags))
2208					set_bit(MD_RECOVERY_NEEDED,
2209						&rdev->mddev->recovery);
2210			}
2211		}
2212		if (!ok) {
2213			/* We don't worry if we cannot set a bad block -
2214			 * it really is bad so there is no loss in not
2215			 * recording it yet
2216			 */
2217			rdev_set_badblocks(rdev, addr, s, 0);
2218
2219			if (rdev != conf->mirrors[dw].rdev) {
2220				/* need bad block on destination too */
2221				struct md_rdev *rdev2 = conf->mirrors[dw].rdev;
2222				addr = r10_bio->devs[1].addr + sect;
2223				ok = rdev_set_badblocks(rdev2, addr, s, 0);
2224				if (!ok) {
2225					/* just abort the recovery */
2226					printk(KERN_NOTICE
2227					       "md/raid10:%s: recovery aborted"
2228					       " due to read error\n",
2229					       mdname(mddev));
2230
2231					conf->mirrors[dw].recovery_disabled
2232						= mddev->recovery_disabled;
2233					set_bit(MD_RECOVERY_INTR,
2234						&mddev->recovery);
2235					break;
2236				}
2237			}
2238		}
2239
2240		sectors -= s;
2241		sect += s;
2242		idx++;
2243	}
2244}
2245
2246static void recovery_request_write(struct mddev *mddev, struct r10bio *r10_bio)
2247{
2248	struct r10conf *conf = mddev->private;
2249	int d;
2250	struct bio *wbio, *wbio2;
2251
2252	if (!test_bit(R10BIO_Uptodate, &r10_bio->state)) {
2253		fix_recovery_read_error(r10_bio);
2254		end_sync_request(r10_bio);
2255		return;
2256	}
2257
2258	/*
2259	 * share the pages with the first bio
2260	 * and submit the write request
2261	 */
2262	d = r10_bio->devs[1].devnum;
2263	wbio = r10_bio->devs[1].bio;
2264	wbio2 = r10_bio->devs[1].repl_bio;
2265	/* Need to test wbio2->bi_end_io before we call
2266	 * generic_make_request as if the former is NULL,
2267	 * the latter is free to free wbio2.
2268	 */
2269	if (wbio2 && !wbio2->bi_end_io)
2270		wbio2 = NULL;
2271	if (wbio->bi_end_io) {
2272		atomic_inc(&conf->mirrors[d].rdev->nr_pending);
2273		md_sync_acct(conf->mirrors[d].rdev->bdev, bio_sectors(wbio));
2274		generic_make_request(wbio);
2275	}
2276	if (wbio2) {
2277		atomic_inc(&conf->mirrors[d].replacement->nr_pending);
2278		md_sync_acct(conf->mirrors[d].replacement->bdev,
2279			     bio_sectors(wbio2));
2280		generic_make_request(wbio2);
2281	}
2282}
2283
2284/*
2285 * Used by fix_read_error() to decay the per rdev read_errors.
2286 * We halve the read error count for every hour that has elapsed
2287 * since the last recorded read error.
2288 *
2289 */
2290static void check_decay_read_errors(struct mddev *mddev, struct md_rdev *rdev)
2291{
2292	struct timespec cur_time_mon;
2293	unsigned long hours_since_last;
2294	unsigned int read_errors = atomic_read(&rdev->read_errors);
2295
2296	ktime_get_ts(&cur_time_mon);
2297
2298	if (rdev->last_read_error.tv_sec == 0 &&
2299	    rdev->last_read_error.tv_nsec == 0) {
2300		/* first time we've seen a read error */
2301		rdev->last_read_error = cur_time_mon;
2302		return;
2303	}
2304
2305	hours_since_last = (cur_time_mon.tv_sec -
2306			    rdev->last_read_error.tv_sec) / 3600;
2307
2308	rdev->last_read_error = cur_time_mon;
2309
2310	/*
2311	 * if hours_since_last is > the number of bits in read_errors
2312	 * just set read errors to 0. We do this to avoid
2313	 * overflowing the shift of read_errors by hours_since_last.
2314	 */
2315	if (hours_since_last >= 8 * sizeof(read_errors))
2316		atomic_set(&rdev->read_errors, 0);
2317	else
2318		atomic_set(&rdev->read_errors, read_errors >> hours_since_last);
2319}
2320
2321static int r10_sync_page_io(struct md_rdev *rdev, sector_t sector,
2322			    int sectors, struct page *page, int rw)
2323{
2324	sector_t first_bad;
2325	int bad_sectors;
2326
2327	if (is_badblock(rdev, sector, sectors, &first_bad, &bad_sectors)
2328	    && (rw == READ || test_bit(WriteErrorSeen, &rdev->flags)))
2329		return -1;
2330	if (sync_page_io(rdev, sector, sectors << 9, page, rw, false))
2331		/* success */
2332		return 1;
2333	if (rw == WRITE) {
2334		set_bit(WriteErrorSeen, &rdev->flags);
2335		if (!test_and_set_bit(WantReplacement, &rdev->flags))
2336			set_bit(MD_RECOVERY_NEEDED,
2337				&rdev->mddev->recovery);
2338	}
2339	/* need to record an error - either for the block or the device */
2340	if (!rdev_set_badblocks(rdev, sector, sectors, 0))
2341		md_error(rdev->mddev, rdev);
2342	return 0;
2343}
2344
2345/*
2346 * This is a kernel thread which:
2347 *
2348 *	1.	Retries failed read operations on working mirrors.
2349 *	2.	Updates the raid superblock when problems encounter.
2350 *	3.	Performs writes following reads for array synchronising.
2351 */
2352
2353static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10bio *r10_bio)
2354{
2355	int sect = 0; /* Offset from r10_bio->sector */
2356	int sectors = r10_bio->sectors;
2357	struct md_rdev*rdev;
2358	int max_read_errors = atomic_read(&mddev->max_corr_read_errors);
2359	int d = r10_bio->devs[r10_bio->read_slot].devnum;
2360
2361	/* still own a reference to this rdev, so it cannot
2362	 * have been cleared recently.
2363	 */
2364	rdev = conf->mirrors[d].rdev;
2365
2366	if (test_bit(Faulty, &rdev->flags))
2367		/* drive has already been failed, just ignore any
2368		   more fix_read_error() attempts */
2369		return;
2370
2371	check_decay_read_errors(mddev, rdev);
2372	atomic_inc(&rdev->read_errors);
2373	if (atomic_read(&rdev->read_errors) > max_read_errors) {
2374		char b[BDEVNAME_SIZE];
2375		bdevname(rdev->bdev, b);
2376
2377		printk(KERN_NOTICE
2378		       "md/raid10:%s: %s: Raid device exceeded "
2379		       "read_error threshold [cur %d:max %d]\n",
2380		       mdname(mddev), b,
2381		       atomic_read(&rdev->read_errors), max_read_errors);
2382		printk(KERN_NOTICE
2383		       "md/raid10:%s: %s: Failing raid device\n",
2384		       mdname(mddev), b);
2385		md_error(mddev, conf->mirrors[d].rdev);
2386		r10_bio->devs[r10_bio->read_slot].bio = IO_BLOCKED;
2387		return;
2388	}
2389
2390	while(sectors) {
2391		int s = sectors;
2392		int sl = r10_bio->read_slot;
2393		int success = 0;
2394		int start;
2395
2396		if (s > (PAGE_SIZE>>9))
2397			s = PAGE_SIZE >> 9;
2398
2399		rcu_read_lock();
2400		do {
2401			sector_t first_bad;
2402			int bad_sectors;
2403
2404			d = r10_bio->devs[sl].devnum;
2405			rdev = rcu_dereference(conf->mirrors[d].rdev);
2406			if (rdev &&
2407			    !test_bit(Unmerged, &rdev->flags) &&
2408			    test_bit(In_sync, &rdev->flags) &&
2409			    is_badblock(rdev, r10_bio->devs[sl].addr + sect, s,
2410					&first_bad, &bad_sectors) == 0) {
2411				atomic_inc(&rdev->nr_pending);
2412				rcu_read_unlock();
2413				success = sync_page_io(rdev,
2414						       r10_bio->devs[sl].addr +
2415						       sect,
2416						       s<<9,
2417						       conf->tmppage, READ, false);
2418				rdev_dec_pending(rdev, mddev);
2419				rcu_read_lock();
2420				if (success)
2421					break;
2422			}
2423			sl++;
2424			if (sl == conf->copies)
2425				sl = 0;
2426		} while (!success && sl != r10_bio->read_slot);
2427		rcu_read_unlock();
2428
2429		if (!success) {
2430			/* Cannot read from anywhere, just mark the block
2431			 * as bad on the first device to discourage future
2432			 * reads.
2433			 */
2434			int dn = r10_bio->devs[r10_bio->read_slot].devnum;
2435			rdev = conf->mirrors[dn].rdev;
2436
2437			if (!rdev_set_badblocks(
2438				    rdev,
2439				    r10_bio->devs[r10_bio->read_slot].addr
2440				    + sect,
2441				    s, 0)) {
2442				md_error(mddev, rdev);
2443				r10_bio->devs[r10_bio->read_slot].bio
2444					= IO_BLOCKED;
2445			}
2446			break;
2447		}
2448
2449		start = sl;
2450		/* write it back and re-read */
2451		rcu_read_lock();
2452		while (sl != r10_bio->read_slot) {
2453			char b[BDEVNAME_SIZE];
2454
2455			if (sl==0)
2456				sl = conf->copies;
2457			sl--;
2458			d = r10_bio->devs[sl].devnum;
2459			rdev = rcu_dereference(conf->mirrors[d].rdev);
2460			if (!rdev ||
2461			    test_bit(Unmerged, &rdev->flags) ||
2462			    !test_bit(In_sync, &rdev->flags))
2463				continue;
2464
2465			atomic_inc(&rdev->nr_pending);
2466			rcu_read_unlock();
2467			if (r10_sync_page_io(rdev,
2468					     r10_bio->devs[sl].addr +
2469					     sect,
2470					     s, conf->tmppage, WRITE)
2471			    == 0) {
2472				/* Well, this device is dead */
2473				printk(KERN_NOTICE
2474				       "md/raid10:%s: read correction "
2475				       "write failed"
2476				       " (%d sectors at %llu on %s)\n",
2477				       mdname(mddev), s,
2478				       (unsigned long long)(
2479					       sect +
2480					       choose_data_offset(r10_bio,
2481								  rdev)),
2482				       bdevname(rdev->bdev, b));
2483				printk(KERN_NOTICE "md/raid10:%s: %s: failing "
2484				       "drive\n",
2485				       mdname(mddev),
2486				       bdevname(rdev->bdev, b));
2487			}
2488			rdev_dec_pending(rdev, mddev);
2489			rcu_read_lock();
2490		}
2491		sl = start;
2492		while (sl != r10_bio->read_slot) {
2493			char b[BDEVNAME_SIZE];
2494
2495			if (sl==0)
2496				sl = conf->copies;
2497			sl--;
2498			d = r10_bio->devs[sl].devnum;
2499			rdev = rcu_dereference(conf->mirrors[d].rdev);
2500			if (!rdev ||
2501			    !test_bit(In_sync, &rdev->flags))
2502				continue;
2503
2504			atomic_inc(&rdev->nr_pending);
2505			rcu_read_unlock();
2506			switch (r10_sync_page_io(rdev,
2507					     r10_bio->devs[sl].addr +
2508					     sect,
2509					     s, conf->tmppage,
2510						 READ)) {
2511			case 0:
2512				/* Well, this device is dead */
2513				printk(KERN_NOTICE
2514				       "md/raid10:%s: unable to read back "
2515				       "corrected sectors"
2516				       " (%d sectors at %llu on %s)\n",
2517				       mdname(mddev), s,
2518				       (unsigned long long)(
2519					       sect +
2520					       choose_data_offset(r10_bio, rdev)),
2521				       bdevname(rdev->bdev, b));
2522				printk(KERN_NOTICE "md/raid10:%s: %s: failing "
2523				       "drive\n",
2524				       mdname(mddev),
2525				       bdevname(rdev->bdev, b));
2526				break;
2527			case 1:
2528				printk(KERN_INFO
2529				       "md/raid10:%s: read error corrected"
2530				       " (%d sectors at %llu on %s)\n",
2531				       mdname(mddev), s,
2532				       (unsigned long long)(
2533					       sect +
2534					       choose_data_offset(r10_bio, rdev)),
2535				       bdevname(rdev->bdev, b));
2536				atomic_add(s, &rdev->corrected_errors);
2537			}
2538
2539			rdev_dec_pending(rdev, mddev);
2540			rcu_read_lock();
2541		}
2542		rcu_read_unlock();
2543
2544		sectors -= s;
2545		sect += s;
2546	}
2547}
2548
2549static int narrow_write_error(struct r10bio *r10_bio, int i)
2550{
2551	struct bio *bio = r10_bio->master_bio;
2552	struct mddev *mddev = r10_bio->mddev;
2553	struct r10conf *conf = mddev->private;
2554	struct md_rdev *rdev = conf->mirrors[r10_bio->devs[i].devnum].rdev;
2555	/* bio has the data to be written to slot 'i' where
2556	 * we just recently had a write error.
2557	 * We repeatedly clone the bio and trim down to one block,
2558	 * then try the write.  Where the write fails we record
2559	 * a bad block.
2560	 * It is conceivable that the bio doesn't exactly align with
2561	 * blocks.  We must handle this.
2562	 *
2563	 * We currently own a reference to the rdev.
2564	 */
2565
2566	int block_sectors;
2567	sector_t sector;
2568	int sectors;
2569	int sect_to_write = r10_bio->sectors;
2570	int ok = 1;
2571
2572	if (rdev->badblocks.shift < 0)
2573		return 0;
2574
2575	block_sectors = roundup(1 << rdev->badblocks.shift,
2576				bdev_logical_block_size(rdev->bdev) >> 9);
2577	sector = r10_bio->sector;
2578	sectors = ((r10_bio->sector + block_sectors)
2579		   & ~(sector_t)(block_sectors - 1))
2580		- sector;
2581
2582	while (sect_to_write) {
2583		struct bio *wbio;
2584		if (sectors > sect_to_write)
2585			sectors = sect_to_write;
2586		/* Write at 'sector' for 'sectors' */
2587		wbio = bio_clone_mddev(bio, GFP_NOIO, mddev);
2588		bio_trim(wbio, sector - bio->bi_iter.bi_sector, sectors);
2589		wbio->bi_iter.bi_sector = (r10_bio->devs[i].addr+
2590				   choose_data_offset(r10_bio, rdev) +
2591				   (sector - r10_bio->sector));
2592		wbio->bi_bdev = rdev->bdev;
2593		if (submit_bio_wait(WRITE, wbio) < 0)
2594			/* Failure! */
2595			ok = rdev_set_badblocks(rdev, sector,
2596						sectors, 0)
2597				&& ok;
2598
2599		bio_put(wbio);
2600		sect_to_write -= sectors;
2601		sector += sectors;
2602		sectors = block_sectors;
2603	}
2604	return ok;
2605}
2606
2607static void handle_read_error(struct mddev *mddev, struct r10bio *r10_bio)
2608{
2609	int slot = r10_bio->read_slot;
2610	struct bio *bio;
2611	struct r10conf *conf = mddev->private;
2612	struct md_rdev *rdev = r10_bio->devs[slot].rdev;
2613	char b[BDEVNAME_SIZE];
2614	unsigned long do_sync;
2615	int max_sectors;
2616
2617	/* we got a read error. Maybe the drive is bad.  Maybe just
2618	 * the block and we can fix it.
2619	 * We freeze all other IO, and try reading the block from
2620	 * other devices.  When we find one, we re-write
2621	 * and check it that fixes the read error.
2622	 * This is all done synchronously while the array is
2623	 * frozen.
2624	 */
2625	bio = r10_bio->devs[slot].bio;
2626	bdevname(bio->bi_bdev, b);
2627	bio_put(bio);
2628	r10_bio->devs[slot].bio = NULL;
2629
2630	if (mddev->ro == 0) {
2631		freeze_array(conf, 1);
2632		fix_read_error(conf, mddev, r10_bio);
2633		unfreeze_array(conf);
2634	} else
2635		r10_bio->devs[slot].bio = IO_BLOCKED;
2636
2637	rdev_dec_pending(rdev, mddev);
2638
2639read_more:
2640	rdev = read_balance(conf, r10_bio, &max_sectors);
2641	if (rdev == NULL) {
2642		printk(KERN_ALERT "md/raid10:%s: %s: unrecoverable I/O"
2643		       " read error for block %llu\n",
2644		       mdname(mddev), b,
2645		       (unsigned long long)r10_bio->sector);
2646		raid_end_bio_io(r10_bio);
2647		return;
2648	}
2649
2650	do_sync = (r10_bio->master_bio->bi_rw & REQ_SYNC);
2651	slot = r10_bio->read_slot;
2652	printk_ratelimited(
2653		KERN_ERR
2654		"md/raid10:%s: %s: redirecting "
2655		"sector %llu to another mirror\n",
2656		mdname(mddev),
2657		bdevname(rdev->bdev, b),
2658		(unsigned long long)r10_bio->sector);
2659	bio = bio_clone_mddev(r10_bio->master_bio,
2660			      GFP_NOIO, mddev);
2661	bio_trim(bio, r10_bio->sector - bio->bi_iter.bi_sector, max_sectors);
2662	r10_bio->devs[slot].bio = bio;
2663	r10_bio->devs[slot].rdev = rdev;
2664	bio->bi_iter.bi_sector = r10_bio->devs[slot].addr
2665		+ choose_data_offset(r10_bio, rdev);
2666	bio->bi_bdev = rdev->bdev;
2667	bio->bi_rw = READ | do_sync;
2668	bio->bi_private = r10_bio;
2669	bio->bi_end_io = raid10_end_read_request;
2670	if (max_sectors < r10_bio->sectors) {
2671		/* Drat - have to split this up more */
2672		struct bio *mbio = r10_bio->master_bio;
2673		int sectors_handled =
2674			r10_bio->sector + max_sectors
2675			- mbio->bi_iter.bi_sector;
2676		r10_bio->sectors = max_sectors;
2677		spin_lock_irq(&conf->device_lock);
2678		if (mbio->bi_phys_segments == 0)
2679			mbio->bi_phys_segments = 2;
2680		else
2681			mbio->bi_phys_segments++;
2682		spin_unlock_irq(&conf->device_lock);
2683		generic_make_request(bio);
2684
2685		r10_bio = mempool_alloc(conf->r10bio_pool,
2686					GFP_NOIO);
2687		r10_bio->master_bio = mbio;
2688		r10_bio->sectors = bio_sectors(mbio) - sectors_handled;
2689		r10_bio->state = 0;
2690		set_bit(R10BIO_ReadError,
2691			&r10_bio->state);
2692		r10_bio->mddev = mddev;
2693		r10_bio->sector = mbio->bi_iter.bi_sector
2694			+ sectors_handled;
2695
2696		goto read_more;
2697	} else
2698		generic_make_request(bio);
2699}
2700
2701static void handle_write_completed(struct r10conf *conf, struct r10bio *r10_bio)
2702{
2703	/* Some sort of write request has finished and it
2704	 * succeeded in writing where we thought there was a
2705	 * bad block.  So forget the bad block.
2706	 * Or possibly if failed and we need to record
2707	 * a bad block.
2708	 */
2709	int m;
2710	struct md_rdev *rdev;
2711
2712	if (test_bit(R10BIO_IsSync, &r10_bio->state) ||
2713	    test_bit(R10BIO_IsRecover, &r10_bio->state)) {
2714		for (m = 0; m < conf->copies; m++) {
2715			int dev = r10_bio->devs[m].devnum;
2716			rdev = conf->mirrors[dev].rdev;
2717			if (r10_bio->devs[m].bio == NULL)
2718				continue;
2719			if (test_bit(BIO_UPTODATE,
2720				     &r10_bio->devs[m].bio->bi_flags)) {
2721				rdev_clear_badblocks(
2722					rdev,
2723					r10_bio->devs[m].addr,
2724					r10_bio->sectors, 0);
2725			} else {
2726				if (!rdev_set_badblocks(
2727					    rdev,
2728					    r10_bio->devs[m].addr,
2729					    r10_bio->sectors, 0))
2730					md_error(conf->mddev, rdev);
2731			}
2732			rdev = conf->mirrors[dev].replacement;
2733			if (r10_bio->devs[m].repl_bio == NULL)
2734				continue;
2735			if (test_bit(BIO_UPTODATE,
2736				     &r10_bio->devs[m].repl_bio->bi_flags)) {
2737				rdev_clear_badblocks(
2738					rdev,
2739					r10_bio->devs[m].addr,
2740					r10_bio->sectors, 0);
2741			} else {
2742				if (!rdev_set_badblocks(
2743					    rdev,
2744					    r10_bio->devs[m].addr,
2745					    r10_bio->sectors, 0))
2746					md_error(conf->mddev, rdev);
2747			}
2748		}
2749		put_buf(r10_bio);
2750	} else {
2751		for (m = 0; m < conf->copies; m++) {
2752			int dev = r10_bio->devs[m].devnum;
2753			struct bio *bio = r10_bio->devs[m].bio;
2754			rdev = conf->mirrors[dev].rdev;
2755			if (bio == IO_MADE_GOOD) {
2756				rdev_clear_badblocks(
2757					rdev,
2758					r10_bio->devs[m].addr,
2759					r10_bio->sectors, 0);
2760				rdev_dec_pending(rdev, conf->mddev);
2761			} else if (bio != NULL &&
2762				   !test_bit(BIO_UPTODATE, &bio->bi_flags)) {
2763				if (!narrow_write_error(r10_bio, m)) {
2764					md_error(conf->mddev, rdev);
2765					set_bit(R10BIO_Degraded,
2766						&r10_bio->state);
2767				}
2768				rdev_dec_pending(rdev, conf->mddev);
2769			}
2770			bio = r10_bio->devs[m].repl_bio;
2771			rdev = conf->mirrors[dev].replacement;
2772			if (rdev && bio == IO_MADE_GOOD) {
2773				rdev_clear_badblocks(
2774					rdev,
2775					r10_bio->devs[m].addr,
2776					r10_bio->sectors, 0);
2777				rdev_dec_pending(rdev, conf->mddev);
2778			}
2779		}
2780		if (test_bit(R10BIO_WriteError,
2781			     &r10_bio->state))
2782			close_write(r10_bio);
2783		raid_end_bio_io(r10_bio);
2784	}
2785}
2786
2787static void raid10d(struct md_thread *thread)
2788{
2789	struct mddev *mddev = thread->mddev;
2790	struct r10bio *r10_bio;
2791	unsigned long flags;
2792	struct r10conf *conf = mddev->private;
2793	struct list_head *head = &conf->retry_list;
2794	struct blk_plug plug;
2795
2796	md_check_recovery(mddev);
2797
2798	blk_start_plug(&plug);
2799	for (;;) {
2800
2801		flush_pending_writes(conf);
2802
2803		spin_lock_irqsave(&conf->device_lock, flags);
2804		if (list_empty(head)) {
2805			spin_unlock_irqrestore(&conf->device_lock, flags);
2806			break;
2807		}
2808		r10_bio = list_entry(head->prev, struct r10bio, retry_list);
2809		list_del(head->prev);
2810		conf->nr_queued--;
2811		spin_unlock_irqrestore(&conf->device_lock, flags);
2812
2813		mddev = r10_bio->mddev;
2814		conf = mddev->private;
2815		if (test_bit(R10BIO_MadeGood, &r10_bio->state) ||
2816		    test_bit(R10BIO_WriteError, &r10_bio->state))
2817			handle_write_completed(conf, r10_bio);
2818		else if (test_bit(R10BIO_IsReshape, &r10_bio->state))
2819			reshape_request_write(mddev, r10_bio);
2820		else if (test_bit(R10BIO_IsSync, &r10_bio->state))
2821			sync_request_write(mddev, r10_bio);
2822		else if (test_bit(R10BIO_IsRecover, &r10_bio->state))
2823			recovery_request_write(mddev, r10_bio);
2824		else if (test_bit(R10BIO_ReadError, &r10_bio->state))
2825			handle_read_error(mddev, r10_bio);
2826		else {
2827			/* just a partial read to be scheduled from a
2828			 * separate context
2829			 */
2830			int slot = r10_bio->read_slot;
2831			generic_make_request(r10_bio->devs[slot].bio);
2832		}
2833
2834		cond_resched();
2835		if (mddev->flags & ~(1<<MD_CHANGE_PENDING))
2836			md_check_recovery(mddev);
2837	}
2838	blk_finish_plug(&plug);
2839}
2840
2841static int init_resync(struct r10conf *conf)
2842{
2843	int buffs;
2844	int i;
2845
2846	buffs = RESYNC_WINDOW / RESYNC_BLOCK_SIZE;
2847	BUG_ON(conf->r10buf_pool);
2848	conf->have_replacement = 0;
2849	for (i = 0; i < conf->geo.raid_disks; i++)
2850		if (conf->mirrors[i].replacement)
2851			conf->have_replacement = 1;
2852	conf->r10buf_pool = mempool_create(buffs, r10buf_pool_alloc, r10buf_pool_free, conf);
2853	if (!conf->r10buf_pool)
2854		return -ENOMEM;
2855	conf->next_resync = 0;
2856	return 0;
2857}
2858
2859/*
2860 * perform a "sync" on one "block"
2861 *
2862 * We need to make sure that no normal I/O request - particularly write
2863 * requests - conflict with active sync requests.
2864 *
2865 * This is achieved by tracking pending requests and a 'barrier' concept
2866 * that can be installed to exclude normal IO requests.
2867 *
2868 * Resync and recovery are handled very differently.
2869 * We differentiate by looking at MD_RECOVERY_SYNC in mddev->recovery.
2870 *
2871 * For resync, we iterate over virtual addresses, read all copies,
2872 * and update if there are differences.  If only one copy is live,
2873 * skip it.
2874 * For recovery, we iterate over physical addresses, read a good
2875 * value for each non-in_sync drive, and over-write.
2876 *
2877 * So, for recovery we may have several outstanding complex requests for a
2878 * given address, one for each out-of-sync device.  We model this by allocating
2879 * a number of r10_bio structures, one for each out-of-sync device.
2880 * As we setup these structures, we collect all bio's together into a list
2881 * which we then process collectively to add pages, and then process again
2882 * to pass to generic_make_request.
2883 *
2884 * The r10_bio structures are linked using a borrowed master_bio pointer.
2885 * This link is counted in ->remaining.  When the r10_bio that points to NULL
2886 * has its remaining count decremented to 0, the whole complex operation
2887 * is complete.
2888 *
2889 */
2890
2891static sector_t sync_request(struct mddev *mddev, sector_t sector_nr,
2892			     int *skipped)
2893{
2894	struct r10conf *conf = mddev->private;
2895	struct r10bio *r10_bio;
2896	struct bio *biolist = NULL, *bio;
2897	sector_t max_sector, nr_sectors;
2898	int i;
2899	int max_sync;
2900	sector_t sync_blocks;
2901	sector_t sectors_skipped = 0;
2902	int chunks_skipped = 0;
2903	sector_t chunk_mask = conf->geo.chunk_mask;
2904
2905	if (!conf->r10buf_pool)
2906		if (init_resync(conf))
2907			return 0;
2908
2909	/*
2910	 * Allow skipping a full rebuild for incremental assembly
2911	 * of a clean array, like RAID1 does.
2912	 */
2913	if (mddev->bitmap == NULL &&
2914	    mddev->recovery_cp == MaxSector &&
2915	    mddev->reshape_position == MaxSector &&
2916	    !test_bit(MD_RECOVERY_SYNC, &mddev->recovery) &&
2917	    !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) &&
2918	    !test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
2919	    conf->fullsync == 0) {
2920		*skipped = 1;
2921		return mddev->dev_sectors - sector_nr;
2922	}
2923
2924 skipped:
2925	max_sector = mddev->dev_sectors;
2926	if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ||
2927	    test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
2928		max_sector = mddev->resync_max_sectors;
2929	if (sector_nr >= max_sector) {
2930		/* If we aborted, we need to abort the
2931		 * sync on the 'current' bitmap chucks (there can
2932		 * be several when recovering multiple devices).
2933		 * as we may have started syncing it but not finished.
2934		 * We can find the current address in
2935		 * mddev->curr_resync, but for recovery,
2936		 * we need to convert that to several
2937		 * virtual addresses.
2938		 */
2939		if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) {
2940			end_reshape(conf);
2941			close_sync(conf);
2942			return 0;
2943		}
2944
2945		if (mddev->curr_resync < max_sector) { /* aborted */
2946			if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery))
2947				bitmap_end_sync(mddev->bitmap, mddev->curr_resync,
2948						&sync_blocks, 1);
2949			else for (i = 0; i < conf->geo.raid_disks; i++) {
2950				sector_t sect =
2951					raid10_find_virt(conf, mddev->curr_resync, i);
2952				bitmap_end_sync(mddev->bitmap, sect,
2953						&sync_blocks, 1);
2954			}
2955		} else {
2956			/* completed sync */
2957			if ((!mddev->bitmap || conf->fullsync)
2958			    && conf->have_replacement
2959			    && test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
2960				/* Completed a full sync so the replacements
2961				 * are now fully recovered.
2962				 */
2963				for (i = 0; i < conf->geo.raid_disks; i++)
2964					if (conf->mirrors[i].replacement)
2965						conf->mirrors[i].replacement
2966							->recovery_offset
2967							= MaxSector;
2968			}
2969			conf->fullsync = 0;
2970		}
2971		bitmap_close_sync(mddev->bitmap);
2972		close_sync(conf);
2973		*skipped = 1;
2974		return sectors_skipped;
2975	}
2976
2977	if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
2978		return reshape_request(mddev, sector_nr, skipped);
2979
2980	if (chunks_skipped >= conf->geo.raid_disks) {
2981		/* if there has been nothing to do on any drive,
2982		 * then there is nothing to do at all..
2983		 */
2984		*skipped = 1;
2985		return (max_sector - sector_nr) + sectors_skipped;
2986	}
2987
2988	if (max_sector > mddev->resync_max)
2989		max_sector = mddev->resync_max; /* Don't do IO beyond here */
2990
2991	/* make sure whole request will fit in a chunk - if chunks
2992	 * are meaningful
2993	 */
2994	if (conf->geo.near_copies < conf->geo.raid_disks &&
2995	    max_sector > (sector_nr | chunk_mask))
2996		max_sector = (sector_nr | chunk_mask) + 1;
2997
2998	/* Again, very different code for resync and recovery.
2999	 * Both must result in an r10bio with a list of bios that
3000	 * have bi_end_io, bi_sector, bi_bdev set,
3001	 * and bi_private set to the r10bio.
3002	 * For recovery, we may actually create several r10bios
3003	 * with 2 bios in each, that correspond to the bios in the main one.
3004	 * In this case, the subordinate r10bios link back through a
3005	 * borrowed master_bio pointer, and the counter in the master
3006	 * includes a ref from each subordinate.
3007	 */
3008	/* First, we decide what to do and set ->bi_end_io
3009	 * To end_sync_read if we want to read, and
3010	 * end_sync_write if we will want to write.
3011	 */
3012
3013	max_sync = RESYNC_PAGES << (PAGE_SHIFT-9);
3014	if (!test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
3015		/* recovery... the complicated one */
3016		int j;
3017		r10_bio = NULL;
3018
3019		for (i = 0 ; i < conf->geo.raid_disks; i++) {
3020			int still_degraded;
3021			struct r10bio *rb2;
3022			sector_t sect;
3023			int must_sync;
3024			int any_working;
3025			struct raid10_info *mirror = &conf->mirrors[i];
3026
3027			if ((mirror->rdev == NULL ||
3028			     test_bit(In_sync, &mirror->rdev->flags))
3029			    &&
3030			    (mirror->replacement == NULL ||
3031			     test_bit(Faulty,
3032				      &mirror->replacement->flags)))
3033				continue;
3034
3035			still_degraded = 0;
3036			/* want to reconstruct this device */
3037			rb2 = r10_bio;
3038			sect = raid10_find_virt(conf, sector_nr, i);
3039			if (sect >= mddev->resync_max_sectors) {
3040				/* last stripe is not complete - don't
3041				 * try to recover this sector.
3042				 */
3043				continue;
3044			}
3045			/* Unless we are doing a full sync, or a replacement
3046			 * we only need to recover the block if it is set in
3047			 * the bitmap
3048			 */
3049			must_sync = bitmap_start_sync(mddev->bitmap, sect,
3050						      &sync_blocks, 1);
3051			if (sync_blocks < max_sync)
3052				max_sync = sync_blocks;
3053			if (!must_sync &&
3054			    mirror->replacement == NULL &&
3055			    !conf->fullsync) {
3056				/* yep, skip the sync_blocks here, but don't assume
3057				 * that there will never be anything to do here
3058				 */
3059				chunks_skipped = -1;
3060				continue;
3061			}
3062
3063			r10_bio = mempool_alloc(conf->r10buf_pool, GFP_NOIO);
3064			r10_bio->state = 0;
3065			raise_barrier(conf, rb2 != NULL);
3066			atomic_set(&r10_bio->remaining, 0);
3067
3068			r10_bio->master_bio = (struct bio*)rb2;
3069			if (rb2)
3070				atomic_inc(&rb2->remaining);
3071			r10_bio->mddev = mddev;
3072			set_bit(R10BIO_IsRecover, &r10_bio->state);
3073			r10_bio->sector = sect;
3074
3075			raid10_find_phys(conf, r10_bio);
3076
3077			/* Need to check if the array will still be
3078			 * degraded
3079			 */
3080			for (j = 0; j < conf->geo.raid_disks; j++)
3081				if (conf->mirrors[j].rdev == NULL ||
3082				    test_bit(Faulty, &conf->mirrors[j].rdev->flags)) {
3083					still_degraded = 1;
3084					break;
3085				}
3086
3087			must_sync = bitmap_start_sync(mddev->bitmap, sect,
3088						      &sync_blocks, still_degraded);
3089
3090			any_working = 0;
3091			for (j=0; j<conf->copies;j++) {
3092				int k;
3093				int d = r10_bio->devs[j].devnum;
3094				sector_t from_addr, to_addr;
3095				struct md_rdev *rdev;
3096				sector_t sector, first_bad;
3097				int bad_sectors;
3098				if (!conf->mirrors[d].rdev ||
3099				    !test_bit(In_sync, &conf->mirrors[d].rdev->flags))
3100					continue;
3101				/* This is where we read from */
3102				any_working = 1;
3103				rdev = conf->mirrors[d].rdev;
3104				sector = r10_bio->devs[j].addr;
3105
3106				if (is_badblock(rdev, sector, max_sync,
3107						&first_bad, &bad_sectors)) {
3108					if (first_bad > sector)
3109						max_sync = first_bad - sector;
3110					else {
3111						bad_sectors -= (sector
3112								- first_bad);
3113						if (max_sync > bad_sectors)
3114							max_sync = bad_sectors;
3115						continue;
3116					}
3117				}
3118				bio = r10_bio->devs[0].bio;
3119				bio_reset(bio);
3120				bio->bi_next = biolist;
3121				biolist = bio;
3122				bio->bi_private = r10_bio;
3123				bio->bi_end_io = end_sync_read;
3124				bio->bi_rw = READ;
3125				from_addr = r10_bio->devs[j].addr;
3126				bio->bi_iter.bi_sector = from_addr +
3127					rdev->data_offset;
3128				bio->bi_bdev = rdev->bdev;
3129				atomic_inc(&rdev->nr_pending);
3130				/* and we write to 'i' (if not in_sync) */
3131
3132				for (k=0; k<conf->copies; k++)
3133					if (r10_bio->devs[k].devnum == i)
3134						break;
3135				BUG_ON(k == conf->copies);
3136				to_addr = r10_bio->devs[k].addr;
3137				r10_bio->devs[0].devnum = d;
3138				r10_bio->devs[0].addr = from_addr;
3139				r10_bio->devs[1].devnum = i;
3140				r10_bio->devs[1].addr = to_addr;
3141
3142				rdev = mirror->rdev;
3143				if (!test_bit(In_sync, &rdev->flags)) {
3144					bio = r10_bio->devs[1].bio;
3145					bio_reset(bio);
3146					bio->bi_next = biolist;
3147					biolist = bio;
3148					bio->bi_private = r10_bio;
3149					bio->bi_end_io = end_sync_write;
3150					bio->bi_rw = WRITE;
3151					bio->bi_iter.bi_sector = to_addr
3152						+ rdev->data_offset;
3153					bio->bi_bdev = rdev->bdev;
3154					atomic_inc(&r10_bio->remaining);
3155				} else
3156					r10_bio->devs[1].bio->bi_end_io = NULL;
3157
3158				/* and maybe write to replacement */
3159				bio = r10_bio->devs[1].repl_bio;
3160				if (bio)
3161					bio->bi_end_io = NULL;
3162				rdev = mirror->replacement;
3163				/* Note: if rdev != NULL, then bio
3164				 * cannot be NULL as r10buf_pool_alloc will
3165				 * have allocated it.
3166				 * So the second test here is pointless.
3167				 * But it keeps semantic-checkers happy, and
3168				 * this comment keeps human reviewers
3169				 * happy.
3170				 */
3171				if (rdev == NULL || bio == NULL ||
3172				    test_bit(Faulty, &rdev->flags))
3173					break;
3174				bio_reset(bio);
3175				bio->bi_next = biolist;
3176				biolist = bio;
3177				bio->bi_private = r10_bio;
3178				bio->bi_end_io = end_sync_write;
3179				bio->bi_rw = WRITE;
3180				bio->bi_iter.bi_sector = to_addr +
3181					rdev->data_offset;
3182				bio->bi_bdev = rdev->bdev;
3183				atomic_inc(&r10_bio->remaining);
3184				break;
3185			}
3186			if (j == conf->copies) {
3187				/* Cannot recover, so abort the recovery or
3188				 * record a bad block */
3189				if (any_working) {
3190					/* problem is that there are bad blocks
3191					 * on other device(s)
3192					 */
3193					int k;
3194					for (k = 0; k < conf->copies; k++)
3195						if (r10_bio->devs[k].devnum == i)
3196							break;
3197					if (!test_bit(In_sync,
3198						      &mirror->rdev->flags)
3199					    && !rdev_set_badblocks(
3200						    mirror->rdev,
3201						    r10_bio->devs[k].addr,
3202						    max_sync, 0))
3203						any_working = 0;
3204					if (mirror->replacement &&
3205					    !rdev_set_badblocks(
3206						    mirror->replacement,
3207						    r10_bio->devs[k].addr,
3208						    max_sync, 0))
3209						any_working = 0;
3210				}
3211				if (!any_working)  {
3212					if (!test_and_set_bit(MD_RECOVERY_INTR,
3213							      &mddev->recovery))
3214						printk(KERN_INFO "md/raid10:%s: insufficient "
3215						       "working devices for recovery.\n",
3216						       mdname(mddev));
3217					mirror->recovery_disabled
3218						= mddev->recovery_disabled;
3219				}
3220				put_buf(r10_bio);
3221				if (rb2)
3222					atomic_dec(&rb2->remaining);
3223				r10_bio = rb2;
3224				break;
3225			}
3226		}
3227		if (biolist == NULL) {
3228			while (r10_bio) {
3229				struct r10bio *rb2 = r10_bio;
3230				r10_bio = (struct r10bio*) rb2->master_bio;
3231				rb2->master_bio = NULL;
3232				put_buf(rb2);
3233			}
3234			goto giveup;
3235		}
3236	} else {
3237		/* resync. Schedule a read for every block at this virt offset */
3238		int count = 0;
3239
3240		bitmap_cond_end_sync(mddev->bitmap, sector_nr);
3241
3242		if (!bitmap_start_sync(mddev->bitmap, sector_nr,
3243				       &sync_blocks, mddev->degraded) &&
3244		    !conf->fullsync && !test_bit(MD_RECOVERY_REQUESTED,
3245						 &mddev->recovery)) {
3246			/* We can skip this block */
3247			*skipped = 1;
3248			return sync_blocks + sectors_skipped;
3249		}
3250		if (sync_blocks < max_sync)
3251			max_sync = sync_blocks;
3252		r10_bio = mempool_alloc(conf->r10buf_pool, GFP_NOIO);
3253		r10_bio->state = 0;
3254
3255		r10_bio->mddev = mddev;
3256		atomic_set(&r10_bio->remaining, 0);
3257		raise_barrier(conf, 0);
3258		conf->next_resync = sector_nr;
3259
3260		r10_bio->master_bio = NULL;
3261		r10_bio->sector = sector_nr;
3262		set_bit(R10BIO_IsSync, &r10_bio->state);
3263		raid10_find_phys(conf, r10_bio);
3264		r10_bio->sectors = (sector_nr | chunk_mask) - sector_nr + 1;
3265
3266		for (i = 0; i < conf->copies; i++) {
3267			int d = r10_bio->devs[i].devnum;
3268			sector_t first_bad, sector;
3269			int bad_sectors;
3270
3271			if (r10_bio->devs[i].repl_bio)
3272				r10_bio->devs[i].repl_bio->bi_end_io = NULL;
3273
3274			bio = r10_bio->devs[i].bio;
3275			bio_reset(bio);
3276			clear_bit(BIO_UPTODATE, &bio->bi_flags);
3277			if (conf->mirrors[d].rdev == NULL ||
3278			    test_bit(Faulty, &conf->mirrors[d].rdev->flags))
3279				continue;
3280			sector = r10_bio->devs[i].addr;
3281			if (is_badblock(conf->mirrors[d].rdev,
3282					sector, max_sync,
3283					&first_bad, &bad_sectors)) {
3284				if (first_bad > sector)
3285					max_sync = first_bad - sector;
3286				else {
3287					bad_sectors -= (sector - first_bad);
3288					if (max_sync > bad_sectors)
3289						max_sync = bad_sectors;
3290					continue;
3291				}
3292			}
3293			atomic_inc(&conf->mirrors[d].rdev->nr_pending);
3294			atomic_inc(&r10_bio->remaining);
3295			bio->bi_next = biolist;
3296			biolist = bio;
3297			bio->bi_private = r10_bio;
3298			bio->bi_end_io = end_sync_read;
3299			bio->bi_rw = READ;
3300			bio->bi_iter.bi_sector = sector +
3301				conf->mirrors[d].rdev->data_offset;
3302			bio->bi_bdev = conf->mirrors[d].rdev->bdev;
3303			count++;
3304
3305			if (conf->mirrors[d].replacement == NULL ||
3306			    test_bit(Faulty,
3307				     &conf->mirrors[d].replacement->flags))
3308				continue;
3309
3310			/* Need to set up for writing to the replacement */
3311			bio = r10_bio->devs[i].repl_bio;
3312			bio_reset(bio);
3313			clear_bit(BIO_UPTODATE, &bio->bi_flags);
3314
3315			sector = r10_bio->devs[i].addr;
3316			atomic_inc(&conf->mirrors[d].rdev->nr_pending);
3317			bio->bi_next = biolist;
3318			biolist = bio;
3319			bio->bi_private = r10_bio;
3320			bio->bi_end_io = end_sync_write;
3321			bio->bi_rw = WRITE;
3322			bio->bi_iter.bi_sector = sector +
3323				conf->mirrors[d].replacement->data_offset;
3324			bio->bi_bdev = conf->mirrors[d].replacement->bdev;
3325			count++;
3326		}
3327
3328		if (count < 2) {
3329			for (i=0; i<conf->copies; i++) {
3330				int d = r10_bio->devs[i].devnum;
3331				if (r10_bio->devs[i].bio->bi_end_io)
3332					rdev_dec_pending(conf->mirrors[d].rdev,
3333							 mddev);
3334				if (r10_bio->devs[i].repl_bio &&
3335				    r10_bio->devs[i].repl_bio->bi_end_io)
3336					rdev_dec_pending(
3337						conf->mirrors[d].replacement,
3338						mddev);
3339			}
3340			put_buf(r10_bio);
3341			biolist = NULL;
3342			goto giveup;
3343		}
3344	}
3345
3346	nr_sectors = 0;
3347	if (sector_nr + max_sync < max_sector)
3348		max_sector = sector_nr + max_sync;
3349	do {
3350		struct page *page;
3351		int len = PAGE_SIZE;
3352		if (sector_nr + (len>>9) > max_sector)
3353			len = (max_sector - sector_nr) << 9;
3354		if (len == 0)
3355			break;
3356		for (bio= biolist ; bio ; bio=bio->bi_next) {
3357			struct bio *bio2;
3358			page = bio->bi_io_vec[bio->bi_vcnt].bv_page;
3359			if (bio_add_page(bio, page, len, 0))
3360				continue;
3361
3362			/* stop here */
3363			bio->bi_io_vec[bio->bi_vcnt].bv_page = page;
3364			for (bio2 = biolist;
3365			     bio2 && bio2 != bio;
3366			     bio2 = bio2->bi_next) {
3367				/* remove last page from this bio */
3368				bio2->bi_vcnt--;
3369				bio2->bi_iter.bi_size -= len;
3370				__clear_bit(BIO_SEG_VALID, &bio2->bi_flags);
3371			}
3372			goto bio_full;
3373		}
3374		nr_sectors += len>>9;
3375		sector_nr += len>>9;
3376	} while (biolist->bi_vcnt < RESYNC_PAGES);
3377 bio_full:
3378	r10_bio->sectors = nr_sectors;
3379
3380	while (biolist) {
3381		bio = biolist;
3382		biolist = biolist->bi_next;
3383
3384		bio->bi_next = NULL;
3385		r10_bio = bio->bi_private;
3386		r10_bio->sectors = nr_sectors;
3387
3388		if (bio->bi_end_io == end_sync_read) {
3389			md_sync_acct(bio->bi_bdev, nr_sectors);
3390			set_bit(BIO_UPTODATE, &bio->bi_flags);
3391			generic_make_request(bio);
3392		}
3393	}
3394
3395	if (sectors_skipped)
3396		/* pretend they weren't skipped, it makes
3397		 * no important difference in this case
3398		 */
3399		md_done_sync(mddev, sectors_skipped, 1);
3400
3401	return sectors_skipped + nr_sectors;
3402 giveup:
3403	/* There is nowhere to write, so all non-sync
3404	 * drives must be failed or in resync, all drives
3405	 * have a bad block, so try the next chunk...
3406	 */
3407	if (sector_nr + max_sync < max_sector)
3408		max_sector = sector_nr + max_sync;
3409
3410	sectors_skipped += (max_sector - sector_nr);
3411	chunks_skipped ++;
3412	sector_nr = max_sector;
3413	goto skipped;
3414}
3415
3416static sector_t
3417raid10_size(struct mddev *mddev, sector_t sectors, int raid_disks)
3418{
3419	sector_t size;
3420	struct r10conf *conf = mddev->private;
3421
3422	if (!raid_disks)
3423		raid_disks = min(conf->geo.raid_disks,
3424				 conf->prev.raid_disks);
3425	if (!sectors)
3426		sectors = conf->dev_sectors;
3427
3428	size = sectors >> conf->geo.chunk_shift;
3429	sector_div(size, conf->geo.far_copies);
3430	size = size * raid_disks;
3431	sector_div(size, conf->geo.near_copies);
3432
3433	return size << conf->geo.chunk_shift;
3434}
3435
3436static void calc_sectors(struct r10conf *conf, sector_t size)
3437{
3438	/* Calculate the number of sectors-per-device that will
3439	 * actually be used, and set conf->dev_sectors and
3440	 * conf->stride
3441	 */
3442
3443	size = size >> conf->geo.chunk_shift;
3444	sector_div(size, conf->geo.far_copies);
3445	size = size * conf->geo.raid_disks;
3446	sector_div(size, conf->geo.near_copies);
3447	/* 'size' is now the number of chunks in the array */
3448	/* calculate "used chunks per device" */
3449	size = size * conf->copies;
3450
3451	/* We need to round up when dividing by raid_disks to
3452	 * get the stride size.
3453	 */
3454	size = DIV_ROUND_UP_SECTOR_T(size, conf->geo.raid_disks);
3455
3456	conf->dev_sectors = size << conf->geo.chunk_shift;
3457
3458	if (conf->geo.far_offset)
3459		conf->geo.stride = 1 << conf->geo.chunk_shift;
3460	else {
3461		sector_div(size, conf->geo.far_copies);
3462		conf->geo.stride = size << conf->geo.chunk_shift;
3463	}
3464}
3465
3466enum geo_type {geo_new, geo_old, geo_start};
3467static int setup_geo(struct geom *geo, struct mddev *mddev, enum geo_type new)
3468{
3469	int nc, fc, fo;
3470	int layout, chunk, disks;
3471	switch (new) {
3472	case geo_old:
3473		layout = mddev->layout;
3474		chunk = mddev->chunk_sectors;
3475		disks = mddev->raid_disks - mddev->delta_disks;
3476		break;
3477	case geo_new:
3478		layout = mddev->new_layout;
3479		chunk = mddev->new_chunk_sectors;
3480		disks = mddev->raid_disks;
3481		break;
3482	default: /* avoid 'may be unused' warnings */
3483	case geo_start: /* new when starting reshape - raid_disks not
3484			 * updated yet. */
3485		layout = mddev->new_layout;
3486		chunk = mddev->new_chunk_sectors;
3487		disks = mddev->raid_disks + mddev->delta_disks;
3488		break;
3489	}
3490	if (layout >> 18)
3491		return -1;
3492	if (chunk < (PAGE_SIZE >> 9) ||
3493	    !is_power_of_2(chunk))
3494		return -2;
3495	nc = layout & 255;
3496	fc = (layout >> 8) & 255;
3497	fo = layout & (1<<16);
3498	geo->raid_disks = disks;
3499	geo->near_copies = nc;
3500	geo->far_copies = fc;
3501	geo->far_offset = fo;
3502	geo->far_set_size = (layout & (1<<17)) ? disks / fc : disks;
3503	geo->chunk_mask = chunk - 1;
3504	geo->chunk_shift = ffz(~chunk);
3505	return nc*fc;
3506}
3507
3508static struct r10conf *setup_conf(struct mddev *mddev)
3509{
3510	struct r10conf *conf = NULL;
3511	int err = -EINVAL;
3512	struct geom geo;
3513	int copies;
3514
3515	copies = setup_geo(&geo, mddev, geo_new);
3516
3517	if (copies == -2) {
3518		printk(KERN_ERR "md/raid10:%s: chunk size must be "
3519		       "at least PAGE_SIZE(%ld) and be a power of 2.\n",
3520		       mdname(mddev), PAGE_SIZE);
3521		goto out;
3522	}
3523
3524	if (copies < 2 || copies > mddev->raid_disks) {
3525		printk(KERN_ERR "md/raid10:%s: unsupported raid10 layout: 0x%8x\n",
3526		       mdname(mddev), mddev->new_layout);
3527		goto out;
3528	}
3529
3530	err = -ENOMEM;
3531	conf = kzalloc(sizeof(struct r10conf), GFP_KERNEL);
3532	if (!conf)
3533		goto out;
3534
3535	/* FIXME calc properly */
3536	conf->mirrors = kzalloc(sizeof(struct raid10_info)*(mddev->raid_disks +
3537							    max(0,-mddev->delta_disks)),
3538				GFP_KERNEL);
3539	if (!conf->mirrors)
3540		goto out;
3541
3542	conf->tmppage = alloc_page(GFP_KERNEL);
3543	if (!conf->tmppage)
3544		goto out;
3545
3546	conf->geo = geo;
3547	conf->copies = copies;
3548	conf->r10bio_pool = mempool_create(NR_RAID10_BIOS, r10bio_pool_alloc,
3549					   r10bio_pool_free, conf);
3550	if (!conf->r10bio_pool)
3551		goto out;
3552
3553	calc_sectors(conf, mddev->dev_sectors);
3554	if (mddev->reshape_position == MaxSector) {
3555		conf->prev = conf->geo;
3556		conf->reshape_progress = MaxSector;
3557	} else {
3558		if (setup_geo(&conf->prev, mddev, geo_old) != conf->copies) {
3559			err = -EINVAL;
3560			goto out;
3561		}
3562		conf->reshape_progress = mddev->reshape_position;
3563		if (conf->prev.far_offset)
3564			conf->prev.stride = 1 << conf->prev.chunk_shift;
3565		else
3566			/* far_copies must be 1 */
3567			conf->prev.stride = conf->dev_sectors;
3568	}
3569	conf->reshape_safe = conf->reshape_progress;
3570	spin_lock_init(&conf->device_lock);
3571	INIT_LIST_HEAD(&conf->retry_list);
3572
3573	spin_lock_init(&conf->resync_lock);
3574	init_waitqueue_head(&conf->wait_barrier);
3575
3576	conf->thread = md_register_thread(raid10d, mddev, "raid10");
3577	if (!conf->thread)
3578		goto out;
3579
3580	conf->mddev = mddev;
3581	return conf;
3582
3583 out:
3584	if (err == -ENOMEM)
3585		printk(KERN_ERR "md/raid10:%s: couldn't allocate memory.\n",
3586		       mdname(mddev));
3587	if (conf) {
3588		if (conf->r10bio_pool)
3589			mempool_destroy(conf->r10bio_pool);
3590		kfree(conf->mirrors);
3591		safe_put_page(conf->tmppage);
3592		kfree(conf);
3593	}
3594	return ERR_PTR(err);
3595}
3596
3597static int run(struct mddev *mddev)
3598{
3599	struct r10conf *conf;
3600	int i, disk_idx, chunk_size;
3601	struct raid10_info *disk;
3602	struct md_rdev *rdev;
3603	sector_t size;
3604	sector_t min_offset_diff = 0;
3605	int first = 1;
3606	bool discard_supported = false;
3607
3608	if (mddev->private == NULL) {
3609		conf = setup_conf(mddev);
3610		if (IS_ERR(conf))
3611			return PTR_ERR(conf);
3612		mddev->private = conf;
3613	}
3614	conf = mddev->private;
3615	if (!conf)
3616		goto out;
3617
3618	mddev->thread = conf->thread;
3619	conf->thread = NULL;
3620
3621	chunk_size = mddev->chunk_sectors << 9;
3622	if (mddev->queue) {
3623		blk_queue_max_discard_sectors(mddev->queue,
3624					      mddev->chunk_sectors);
3625		blk_queue_max_write_same_sectors(mddev->queue, 0);
3626		blk_queue_io_min(mddev->queue, chunk_size);
3627		if (conf->geo.raid_disks % conf->geo.near_copies)
3628			blk_queue_io_opt(mddev->queue, chunk_size * conf->geo.raid_disks);
3629		else
3630			blk_queue_io_opt(mddev->queue, chunk_size *
3631					 (conf->geo.raid_disks / conf->geo.near_copies));
3632	}
3633
3634	rdev_for_each(rdev, mddev) {
3635		long long diff;
3636		struct request_queue *q;
3637
3638		disk_idx = rdev->raid_disk;
3639		if (disk_idx < 0)
3640			continue;
3641		if (disk_idx >= conf->geo.raid_disks &&
3642		    disk_idx >= conf->prev.raid_disks)
3643			continue;
3644		disk = conf->mirrors + disk_idx;
3645
3646		if (test_bit(Replacement, &rdev->flags)) {
3647			if (disk->replacement)
3648				goto out_free_conf;
3649			disk->replacement = rdev;
3650		} else {
3651			if (disk->rdev)
3652				goto out_free_conf;
3653			disk->rdev = rdev;
3654		}
3655		q = bdev_get_queue(rdev->bdev);
3656		if (q->merge_bvec_fn)
3657			mddev->merge_check_needed = 1;
3658		diff = (rdev->new_data_offset - rdev->data_offset);
3659		if (!mddev->reshape_backwards)
3660			diff = -diff;
3661		if (diff < 0)
3662			diff = 0;
3663		if (first || diff < min_offset_diff)
3664			min_offset_diff = diff;
3665
3666		if (mddev->gendisk)
3667			disk_stack_limits(mddev->gendisk, rdev->bdev,
3668					  rdev->data_offset << 9);
3669
3670		disk->head_position = 0;
3671
3672		if (blk_queue_discard(bdev_get_queue(rdev->bdev)))
3673			discard_supported = true;
3674	}
3675
3676	if (mddev->queue) {
3677		if (discard_supported)
3678			queue_flag_set_unlocked(QUEUE_FLAG_DISCARD,
3679						mddev->queue);
3680		else
3681			queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD,
3682						  mddev->queue);
3683	}
3684	/* need to check that every block has at least one working mirror */
3685	if (!enough(conf, -1)) {
3686		printk(KERN_ERR "md/raid10:%s: not enough operational mirrors.\n",
3687		       mdname(mddev));
3688		goto out_free_conf;
3689	}
3690
3691	if (conf->reshape_progress != MaxSector) {
3692		/* must ensure that shape change is supported */
3693		if (conf->geo.far_copies != 1 &&
3694		    conf->geo.far_offset == 0)
3695			goto out_free_conf;
3696		if (conf->prev.far_copies != 1 &&
3697		    conf->prev.far_offset == 0)
3698			goto out_free_conf;
3699	}
3700
3701	mddev->degraded = 0;
3702	for (i = 0;
3703	     i < conf->geo.raid_disks
3704		     || i < conf->prev.raid_disks;
3705	     i++) {
3706
3707		disk = conf->mirrors + i;
3708
3709		if (!disk->rdev && disk->replacement) {
3710			/* The replacement is all we have - use it */
3711			disk->rdev = disk->replacement;
3712			disk->replacement = NULL;
3713			clear_bit(Replacement, &disk->rdev->flags);
3714		}
3715
3716		if (!disk->rdev ||
3717		    !test_bit(In_sync, &disk->rdev->flags)) {
3718			disk->head_position = 0;
3719			mddev->degraded++;
3720			if (disk->rdev &&
3721			    disk->rdev->saved_raid_disk < 0)
3722				conf->fullsync = 1;
3723		}
3724		disk->recovery_disabled = mddev->recovery_disabled - 1;
3725	}
3726
3727	if (mddev->recovery_cp != MaxSector)
3728		printk(KERN_NOTICE "md/raid10:%s: not clean"
3729		       " -- starting background reconstruction\n",
3730		       mdname(mddev));
3731	printk(KERN_INFO
3732		"md/raid10:%s: active with %d out of %d devices\n",
3733		mdname(mddev), conf->geo.raid_disks - mddev->degraded,
3734		conf->geo.raid_disks);
3735	/*
3736	 * Ok, everything is just fine now
3737	 */
3738	mddev->dev_sectors = conf->dev_sectors;
3739	size = raid10_size(mddev, 0, 0);
3740	md_set_array_sectors(mddev, size);
3741	mddev->resync_max_sectors = size;
3742
3743	if (mddev->queue) {
3744		int stripe = conf->geo.raid_disks *
3745			((mddev->chunk_sectors << 9) / PAGE_SIZE);
3746
3747		/* Calculate max read-ahead size.
3748		 * We need to readahead at least twice a whole stripe....
3749		 * maybe...
3750		 */
3751		stripe /= conf->geo.near_copies;
3752		if (mddev->queue->backing_dev_info.ra_pages < 2 * stripe)
3753			mddev->queue->backing_dev_info.ra_pages = 2 * stripe;
3754	}
3755
3756	if (md_integrity_register(mddev))
3757		goto out_free_conf;
3758
3759	if (conf->reshape_progress != MaxSector) {
3760		unsigned long before_length, after_length;
3761
3762		before_length = ((1 << conf->prev.chunk_shift) *
3763				 conf->prev.far_copies);
3764		after_length = ((1 << conf->geo.chunk_shift) *
3765				conf->geo.far_copies);
3766
3767		if (max(before_length, after_length) > min_offset_diff) {
3768			/* This cannot work */
3769			printk("md/raid10: offset difference not enough to continue reshape\n");
3770			goto out_free_conf;
3771		}
3772		conf->offset_diff = min_offset_diff;
3773
3774		clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
3775		clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
3776		set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
3777		set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
3778		mddev->sync_thread = md_register_thread(md_do_sync, mddev,
3779							"reshape");
3780	}
3781
3782	return 0;
3783
3784out_free_conf:
3785	md_unregister_thread(&mddev->thread);
3786	if (conf->r10bio_pool)
3787		mempool_destroy(conf->r10bio_pool);
3788	safe_put_page(conf->tmppage);
3789	kfree(conf->mirrors);
3790	kfree(conf);
3791	mddev->private = NULL;
3792out:
3793	return -EIO;
3794}
3795
3796static void raid10_free(struct mddev *mddev, void *priv)
3797{
3798	struct r10conf *conf = priv;
3799
3800	if (conf->r10bio_pool)
3801		mempool_destroy(conf->r10bio_pool);
3802	safe_put_page(conf->tmppage);
3803	kfree(conf->mirrors);
3804	kfree(conf->mirrors_old);
3805	kfree(conf->mirrors_new);
3806	kfree(conf);
3807}
3808
3809static void raid10_quiesce(struct mddev *mddev, int state)
3810{
3811	struct r10conf *conf = mddev->private;
3812
3813	switch(state) {
3814	case 1:
3815		raise_barrier(conf, 0);
3816		break;
3817	case 0:
3818		lower_barrier(conf);
3819		break;
3820	}
3821}
3822
3823static int raid10_resize(struct mddev *mddev, sector_t sectors)
3824{
3825	/* Resize of 'far' arrays is not supported.
3826	 * For 'near' and 'offset' arrays we can set the
3827	 * number of sectors used to be an appropriate multiple
3828	 * of the chunk size.
3829	 * For 'offset', this is far_copies*chunksize.
3830	 * For 'near' the multiplier is the LCM of
3831	 * near_copies and raid_disks.
3832	 * So if far_copies > 1 && !far_offset, fail.
3833	 * Else find LCM(raid_disks, near_copy)*far_copies and
3834	 * multiply by chunk_size.  Then round to this number.
3835	 * This is mostly done by raid10_size()
3836	 */
3837	struct r10conf *conf = mddev->private;
3838	sector_t oldsize, size;
3839
3840	if (mddev->reshape_position != MaxSector)
3841		return -EBUSY;
3842
3843	if (conf->geo.far_copies > 1 && !conf->geo.far_offset)
3844		return -EINVAL;
3845
3846	oldsize = raid10_size(mddev, 0, 0);
3847	size = raid10_size(mddev, sectors, 0);
3848	if (mddev->external_size &&
3849	    mddev->array_sectors > size)
3850		return -EINVAL;
3851	if (mddev->bitmap) {
3852		int ret = bitmap_resize(mddev->bitmap, size, 0, 0);
3853		if (ret)
3854			return ret;
3855	}
3856	md_set_array_sectors(mddev, size);
3857	set_capacity(mddev->gendisk, mddev->array_sectors);
3858	revalidate_disk(mddev->gendisk);
3859	if (sectors > mddev->dev_sectors &&
3860	    mddev->recovery_cp > oldsize) {
3861		mddev->recovery_cp = oldsize;
3862		set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
3863	}
3864	calc_sectors(conf, sectors);
3865	mddev->dev_sectors = conf->dev_sectors;
3866	mddev->resync_max_sectors = size;
3867	return 0;
3868}
3869
3870static void *raid10_takeover_raid0(struct mddev *mddev, sector_t size, int devs)
3871{
3872	struct md_rdev *rdev;
3873	struct r10conf *conf;
3874
3875	if (mddev->degraded > 0) {
3876		printk(KERN_ERR "md/raid10:%s: Error: degraded raid0!\n",
3877		       mdname(mddev));
3878		return ERR_PTR(-EINVAL);
3879	}
3880	sector_div(size, devs);
3881
3882	/* Set new parameters */
3883	mddev->new_level = 10;
3884	/* new layout: far_copies = 1, near_copies = 2 */
3885	mddev->new_layout = (1<<8) + 2;
3886	mddev->new_chunk_sectors = mddev->chunk_sectors;
3887	mddev->delta_disks = mddev->raid_disks;
3888	mddev->raid_disks *= 2;
3889	/* make sure it will be not marked as dirty */
3890	mddev->recovery_cp = MaxSector;
3891	mddev->dev_sectors = size;
3892
3893	conf = setup_conf(mddev);
3894	if (!IS_ERR(conf)) {
3895		rdev_for_each(rdev, mddev)
3896			if (rdev->raid_disk >= 0) {
3897				rdev->new_raid_disk = rdev->raid_disk * 2;
3898				rdev->sectors = size;
3899			}
3900		conf->barrier = 1;
3901	}
3902
3903	return conf;
3904}
3905
3906static void *raid10_takeover(struct mddev *mddev)
3907{
3908	struct r0conf *raid0_conf;
3909
3910	/* raid10 can take over:
3911	 *  raid0 - providing it has only two drives
3912	 */
3913	if (mddev->level == 0) {
3914		/* for raid0 takeover only one zone is supported */
3915		raid0_conf = mddev->private;
3916		if (raid0_conf->nr_strip_zones > 1) {
3917			printk(KERN_ERR "md/raid10:%s: cannot takeover raid 0"
3918			       " with more than one zone.\n",
3919			       mdname(mddev));
3920			return ERR_PTR(-EINVAL);
3921		}
3922		return raid10_takeover_raid0(mddev,
3923			raid0_conf->strip_zone->zone_end,
3924			raid0_conf->strip_zone->nb_dev);
3925	}
3926	return ERR_PTR(-EINVAL);
3927}
3928
3929static int raid10_check_reshape(struct mddev *mddev)
3930{
3931	/* Called when there is a request to change
3932	 * - layout (to ->new_layout)
3933	 * - chunk size (to ->new_chunk_sectors)
3934	 * - raid_disks (by delta_disks)
3935	 * or when trying to restart a reshape that was ongoing.
3936	 *
3937	 * We need to validate the request and possibly allocate
3938	 * space if that might be an issue later.
3939	 *
3940	 * Currently we reject any reshape of a 'far' mode array,
3941	 * allow chunk size to change if new is generally acceptable,
3942	 * allow raid_disks to increase, and allow
3943	 * a switch between 'near' mode and 'offset' mode.
3944	 */
3945	struct r10conf *conf = mddev->private;
3946	struct geom geo;
3947
3948	if (conf->geo.far_copies != 1 && !conf->geo.far_offset)
3949		return -EINVAL;
3950
3951	if (setup_geo(&geo, mddev, geo_start) != conf->copies)
3952		/* mustn't change number of copies */
3953		return -EINVAL;
3954	if (geo.far_copies > 1 && !geo.far_offset)
3955		/* Cannot switch to 'far' mode */
3956		return -EINVAL;
3957
3958	if (mddev->array_sectors & geo.chunk_mask)
3959			/* not factor of array size */
3960			return -EINVAL;
3961
3962	if (!enough(conf, -1))
3963		return -EINVAL;
3964
3965	kfree(conf->mirrors_new);
3966	conf->mirrors_new = NULL;
3967	if (mddev->delta_disks > 0) {
3968		/* allocate new 'mirrors' list */
3969		conf->mirrors_new = kzalloc(
3970			sizeof(struct raid10_info)
3971			*(mddev->raid_disks +
3972			  mddev->delta_disks),
3973			GFP_KERNEL);
3974		if (!conf->mirrors_new)
3975			return -ENOMEM;
3976	}
3977	return 0;
3978}
3979
3980/*
3981 * Need to check if array has failed when deciding whether to:
3982 *  - start an array
3983 *  - remove non-faulty devices
3984 *  - add a spare
3985 *  - allow a reshape
3986 * This determination is simple when no reshape is happening.
3987 * However if there is a reshape, we need to carefully check
3988 * both the before and after sections.
3989 * This is because some failed devices may only affect one
3990 * of the two sections, and some non-in_sync devices may
3991 * be insync in the section most affected by failed devices.
3992 */
3993static int calc_degraded(struct r10conf *conf)
3994{
3995	int degraded, degraded2;
3996	int i;
3997
3998	rcu_read_lock();
3999	degraded = 0;
4000	/* 'prev' section first */
4001	for (i = 0; i < conf->prev.raid_disks; i++) {
4002		struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev);
4003		if (!rdev || test_bit(Faulty, &rdev->flags))
4004			degraded++;
4005		else if (!test_bit(In_sync, &rdev->flags))
4006			/* When we can reduce the number of devices in
4007			 * an array, this might not contribute to
4008			 * 'degraded'.  It does now.
4009			 */
4010			degraded++;
4011	}
4012	rcu_read_unlock();
4013	if (conf->geo.raid_disks == conf->prev.raid_disks)
4014		return degraded;
4015	rcu_read_lock();
4016	degraded2 = 0;
4017	for (i = 0; i < conf->geo.raid_disks; i++) {
4018		struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev);
4019		if (!rdev || test_bit(Faulty, &rdev->flags))
4020			degraded2++;
4021		else if (!test_bit(In_sync, &rdev->flags)) {
4022			/* If reshape is increasing the number of devices,
4023			 * this section has already been recovered, so
4024			 * it doesn't contribute to degraded.
4025			 * else it does.
4026			 */
4027			if (conf->geo.raid_disks <= conf->prev.raid_disks)
4028				degraded2++;
4029		}
4030	}
4031	rcu_read_unlock();
4032	if (degraded2 > degraded)
4033		return degraded2;
4034	return degraded;
4035}
4036
4037static int raid10_start_reshape(struct mddev *mddev)
4038{
4039	/* A 'reshape' has been requested. This commits
4040	 * the various 'new' fields and sets MD_RECOVER_RESHAPE
4041	 * This also checks if there are enough spares and adds them
4042	 * to the array.
4043	 * We currently require enough spares to make the final
4044	 * array non-degraded.  We also require that the difference
4045	 * between old and new data_offset - on each device - is
4046	 * enough that we never risk over-writing.
4047	 */
4048
4049	unsigned long before_length, after_length;
4050	sector_t min_offset_diff = 0;
4051	int first = 1;
4052	struct geom new;
4053	struct r10conf *conf = mddev->private;
4054	struct md_rdev *rdev;
4055	int spares = 0;
4056	int ret;
4057
4058	if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
4059		return -EBUSY;
4060
4061	if (setup_geo(&new, mddev, geo_start) != conf->copies)
4062		return -EINVAL;
4063
4064	before_length = ((1 << conf->prev.chunk_shift) *
4065			 conf->prev.far_copies);
4066	after_length = ((1 << conf->geo.chunk_shift) *
4067			conf->geo.far_copies);
4068
4069	rdev_for_each(rdev, mddev) {
4070		if (!test_bit(In_sync, &rdev->flags)
4071		    && !test_bit(Faulty, &rdev->flags))
4072			spares++;
4073		if (rdev->raid_disk >= 0) {
4074			long long diff = (rdev->new_data_offset
4075					  - rdev->data_offset);
4076			if (!mddev->reshape_backwards)
4077				diff = -diff;
4078			if (diff < 0)
4079				diff = 0;
4080			if (first || diff < min_offset_diff)
4081				min_offset_diff = diff;
4082		}
4083	}
4084
4085	if (max(before_length, after_length) > min_offset_diff)
4086		return -EINVAL;
4087
4088	if (spares < mddev->delta_disks)
4089		return -EINVAL;
4090
4091	conf->offset_diff = min_offset_diff;
4092	spin_lock_irq(&conf->device_lock);
4093	if (conf->mirrors_new) {
4094		memcpy(conf->mirrors_new, conf->mirrors,
4095		       sizeof(struct raid10_info)*conf->prev.raid_disks);
4096		smp_mb();
4097		kfree(conf->mirrors_old);
4098		conf->mirrors_old = conf->mirrors;
4099		conf->mirrors = conf->mirrors_new;
4100		conf->mirrors_new = NULL;
4101	}
4102	setup_geo(&conf->geo, mddev, geo_start);
4103	smp_mb();
4104	if (mddev->reshape_backwards) {
4105		sector_t size = raid10_size(mddev, 0, 0);
4106		if (size < mddev->array_sectors) {
4107			spin_unlock_irq(&conf->device_lock);
4108			printk(KERN_ERR "md/raid10:%s: array size must be reduce before number of disks\n",
4109			       mdname(mddev));
4110			return -EINVAL;
4111		}
4112		mddev->resync_max_sectors = size;
4113		conf->reshape_progress = size;
4114	} else
4115		conf->reshape_progress = 0;
4116	conf->reshape_safe = conf->reshape_progress;
4117	spin_unlock_irq(&conf->device_lock);
4118
4119	if (mddev->delta_disks && mddev->bitmap) {
4120		ret = bitmap_resize(mddev->bitmap,
4121				    raid10_size(mddev, 0,
4122						conf->geo.raid_disks),
4123				    0, 0);
4124		if (ret)
4125			goto abort;
4126	}
4127	if (mddev->delta_disks > 0) {
4128		rdev_for_each(rdev, mddev)
4129			if (rdev->raid_disk < 0 &&
4130			    !test_bit(Faulty, &rdev->flags)) {
4131				if (raid10_add_disk(mddev, rdev) == 0) {
4132					if (rdev->raid_disk >=
4133					    conf->prev.raid_disks)
4134						set_bit(In_sync, &rdev->flags);
4135					else
4136						rdev->recovery_offset = 0;
4137
4138					if (sysfs_link_rdev(mddev, rdev))
4139						/* Failure here  is OK */;
4140				}
4141			} else if (rdev->raid_disk >= conf->prev.raid_disks
4142				   && !test_bit(Faulty, &rdev->flags)) {
4143				/* This is a spare that was manually added */
4144				set_bit(In_sync, &rdev->flags);
4145			}
4146	}
4147	/* When a reshape changes the number of devices,
4148	 * ->degraded is measured against the larger of the
4149	 * pre and  post numbers.
4150	 */
4151	spin_lock_irq(&conf->device_lock);
4152	mddev->degraded = calc_degraded(conf);
4153	spin_unlock_irq(&conf->device_lock);
4154	mddev->raid_disks = conf->geo.raid_disks;
4155	mddev->reshape_position = conf->reshape_progress;
4156	set_bit(MD_CHANGE_DEVS, &mddev->flags);
4157
4158	clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
4159	clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
4160	clear_bit(MD_RECOVERY_DONE, &mddev->recovery);
4161	set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
4162	set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
4163
4164	mddev->sync_thread = md_register_thread(md_do_sync, mddev,
4165						"reshape");
4166	if (!mddev->sync_thread) {
4167		ret = -EAGAIN;
4168		goto abort;
4169	}
4170	conf->reshape_checkpoint = jiffies;
4171	md_wakeup_thread(mddev->sync_thread);
4172	md_new_event(mddev);
4173	return 0;
4174
4175abort:
4176	mddev->recovery = 0;
4177	spin_lock_irq(&conf->device_lock);
4178	conf->geo = conf->prev;
4179	mddev->raid_disks = conf->geo.raid_disks;
4180	rdev_for_each(rdev, mddev)
4181		rdev->new_data_offset = rdev->data_offset;
4182	smp_wmb();
4183	conf->reshape_progress = MaxSector;
4184	conf->reshape_safe = MaxSector;
4185	mddev->reshape_position = MaxSector;
4186	spin_unlock_irq(&conf->device_lock);
4187	return ret;
4188}
4189
4190/* Calculate the last device-address that could contain
4191 * any block from the chunk that includes the array-address 's'
4192 * and report the next address.
4193 * i.e. the address returned will be chunk-aligned and after
4194 * any data that is in the chunk containing 's'.
4195 */
4196static sector_t last_dev_address(sector_t s, struct geom *geo)
4197{
4198	s = (s | geo->chunk_mask) + 1;
4199	s >>= geo->chunk_shift;
4200	s *= geo->near_copies;
4201	s = DIV_ROUND_UP_SECTOR_T(s, geo->raid_disks);
4202	s *= geo->far_copies;
4203	s <<= geo->chunk_shift;
4204	return s;
4205}
4206
4207/* Calculate the first device-address that could contain
4208 * any block from the chunk that includes the array-address 's'.
4209 * This too will be the start of a chunk
4210 */
4211static sector_t first_dev_address(sector_t s, struct geom *geo)
4212{
4213	s >>= geo->chunk_shift;
4214	s *= geo->near_copies;
4215	sector_div(s, geo->raid_disks);
4216	s *= geo->far_copies;
4217	s <<= geo->chunk_shift;
4218	return s;
4219}
4220
4221static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr,
4222				int *skipped)
4223{
4224	/* We simply copy at most one chunk (smallest of old and new)
4225	 * at a time, possibly less if that exceeds RESYNC_PAGES,
4226	 * or we hit a bad block or something.
4227	 * This might mean we pause for normal IO in the middle of
4228	 * a chunk, but that is not a problem was mddev->reshape_position
4229	 * can record any location.
4230	 *
4231	 * If we will want to write to a location that isn't
4232	 * yet recorded as 'safe' (i.e. in metadata on disk) then
4233	 * we need to flush all reshape requests and update the metadata.
4234	 *
4235	 * When reshaping forwards (e.g. to more devices), we interpret
4236	 * 'safe' as the earliest block which might not have been copied
4237	 * down yet.  We divide this by previous stripe size and multiply
4238	 * by previous stripe length to get lowest device offset that we
4239	 * cannot write to yet.
4240	 * We interpret 'sector_nr' as an address that we want to write to.
4241	 * From this we use last_device_address() to find where we might
4242	 * write to, and first_device_address on the  'safe' position.
4243	 * If this 'next' write position is after the 'safe' position,
4244	 * we must update the metadata to increase the 'safe' position.
4245	 *
4246	 * When reshaping backwards, we round in the opposite direction
4247	 * and perform the reverse test:  next write position must not be
4248	 * less than current safe position.
4249	 *
4250	 * In all this the minimum difference in data offsets
4251	 * (conf->offset_diff - always positive) allows a bit of slack,
4252	 * so next can be after 'safe', but not by more than offset_disk
4253	 *
4254	 * We need to prepare all the bios here before we start any IO
4255	 * to ensure the size we choose is acceptable to all devices.
4256	 * The means one for each copy for write-out and an extra one for
4257	 * read-in.
4258	 * We store the read-in bio in ->master_bio and the others in
4259	 * ->devs[x].bio and ->devs[x].repl_bio.
4260	 */
4261	struct r10conf *conf = mddev->private;
4262	struct r10bio *r10_bio;
4263	sector_t next, safe, last;
4264	int max_sectors;
4265	int nr_sectors;
4266	int s;
4267	struct md_rdev *rdev;
4268	int need_flush = 0;
4269	struct bio *blist;
4270	struct bio *bio, *read_bio;
4271	int sectors_done = 0;
4272
4273	if (sector_nr == 0) {
4274		/* If restarting in the middle, skip the initial sectors */
4275		if (mddev->reshape_backwards &&
4276		    conf->reshape_progress < raid10_size(mddev, 0, 0)) {
4277			sector_nr = (raid10_size(mddev, 0, 0)
4278				     - conf->reshape_progress);
4279		} else if (!mddev->reshape_backwards &&
4280			   conf->reshape_progress > 0)
4281			sector_nr = conf->reshape_progress;
4282		if (sector_nr) {
4283			mddev->curr_resync_completed = sector_nr;
4284			sysfs_notify(&mddev->kobj, NULL, "sync_completed");
4285			*skipped = 1;
4286			return sector_nr;
4287		}
4288	}
4289
4290	/* We don't use sector_nr to track where we are up to
4291	 * as that doesn't work well for ->reshape_backwards.
4292	 * So just use ->reshape_progress.
4293	 */
4294	if (mddev->reshape_backwards) {
4295		/* 'next' is the earliest device address that we might
4296		 * write to for this chunk in the new layout
4297		 */
4298		next = first_dev_address(conf->reshape_progress - 1,
4299					 &conf->geo);
4300
4301		/* 'safe' is the last device address that we might read from
4302		 * in the old layout after a restart
4303		 */
4304		safe = last_dev_address(conf->reshape_safe - 1,
4305					&conf->prev);
4306
4307		if (next + conf->offset_diff < safe)
4308			need_flush = 1;
4309
4310		last = conf->reshape_progress - 1;
4311		sector_nr = last & ~(sector_t)(conf->geo.chunk_mask
4312					       & conf->prev.chunk_mask);
4313		if (sector_nr + RESYNC_BLOCK_SIZE/512 < last)
4314			sector_nr = last + 1 - RESYNC_BLOCK_SIZE/512;
4315	} else {
4316		/* 'next' is after the last device address that we
4317		 * might write to for this chunk in the new layout
4318		 */
4319		next = last_dev_address(conf->reshape_progress, &conf->geo);
4320
4321		/* 'safe' is the earliest device address that we might
4322		 * read from in the old layout after a restart
4323		 */
4324		safe = first_dev_address(conf->reshape_safe, &conf->prev);
4325
4326		/* Need to update metadata if 'next' might be beyond 'safe'
4327		 * as that would possibly corrupt data
4328		 */
4329		if (next > safe + conf->offset_diff)
4330			need_flush = 1;
4331
4332		sector_nr = conf->reshape_progress;
4333		last  = sector_nr | (conf->geo.chunk_mask
4334				     & conf->prev.chunk_mask);
4335
4336		if (sector_nr + RESYNC_BLOCK_SIZE/512 <= last)
4337			last = sector_nr + RESYNC_BLOCK_SIZE/512 - 1;
4338	}
4339
4340	if (need_flush ||
4341	    time_after(jiffies, conf->reshape_checkpoint + 10*HZ)) {
4342		/* Need to update reshape_position in metadata */
4343		wait_barrier(conf);
4344		mddev->reshape_position = conf->reshape_progress;
4345		if (mddev->reshape_backwards)
4346			mddev->curr_resync_completed = raid10_size(mddev, 0, 0)
4347				- conf->reshape_progress;
4348		else
4349			mddev->curr_resync_completed = conf->reshape_progress;
4350		conf->reshape_checkpoint = jiffies;
4351		set_bit(MD_CHANGE_DEVS, &mddev->flags);
4352		md_wakeup_thread(mddev->thread);
4353		wait_event(mddev->sb_wait, mddev->flags == 0 ||
4354			   test_bit(MD_RECOVERY_INTR, &mddev->recovery));
4355		if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
4356			allow_barrier(conf);
4357			return sectors_done;
4358		}
4359		conf->reshape_safe = mddev->reshape_position;
4360		allow_barrier(conf);
4361	}
4362
4363read_more:
4364	/* Now schedule reads for blocks from sector_nr to last */
4365	r10_bio = mempool_alloc(conf->r10buf_pool, GFP_NOIO);
4366	r10_bio->state = 0;
4367	raise_barrier(conf, sectors_done != 0);
4368	atomic_set(&r10_bio->remaining, 0);
4369	r10_bio->mddev = mddev;
4370	r10_bio->sector = sector_nr;
4371	set_bit(R10BIO_IsReshape, &r10_bio->state);
4372	r10_bio->sectors = last - sector_nr + 1;
4373	rdev = read_balance(conf, r10_bio, &max_sectors);
4374	BUG_ON(!test_bit(R10BIO_Previous, &r10_bio->state));
4375
4376	if (!rdev) {
4377		/* Cannot read from here, so need to record bad blocks
4378		 * on all the target devices.
4379		 */
4380		// FIXME
4381		mempool_free(r10_bio, conf->r10buf_pool);
4382		set_bit(MD_RECOVERY_INTR, &mddev->recovery);
4383		return sectors_done;
4384	}
4385
4386	read_bio = bio_alloc_mddev(GFP_KERNEL, RESYNC_PAGES, mddev);
4387
4388	read_bio->bi_bdev = rdev->bdev;
4389	read_bio->bi_iter.bi_sector = (r10_bio->devs[r10_bio->read_slot].addr
4390			       + rdev->data_offset);
4391	read_bio->bi_private = r10_bio;
4392	read_bio->bi_end_io = end_sync_read;
4393	read_bio->bi_rw = READ;
4394	read_bio->bi_flags &= (~0UL << BIO_RESET_BITS);
4395	__set_bit(BIO_UPTODATE, &read_bio->bi_flags);
4396	read_bio->bi_vcnt = 0;
4397	read_bio->bi_iter.bi_size = 0;
4398	r10_bio->master_bio = read_bio;
4399	r10_bio->read_slot = r10_bio->devs[r10_bio->read_slot].devnum;
4400
4401	/* Now find the locations in the new layout */
4402	__raid10_find_phys(&conf->geo, r10_bio);
4403
4404	blist = read_bio;
4405	read_bio->bi_next = NULL;
4406
4407	for (s = 0; s < conf->copies*2; s++) {
4408		struct bio *b;
4409		int d = r10_bio->devs[s/2].devnum;
4410		struct md_rdev *rdev2;
4411		if (s&1) {
4412			rdev2 = conf->mirrors[d].replacement;
4413			b = r10_bio->devs[s/2].repl_bio;
4414		} else {
4415			rdev2 = conf->mirrors[d].rdev;
4416			b = r10_bio->devs[s/2].bio;
4417		}
4418		if (!rdev2 || test_bit(Faulty, &rdev2->flags))
4419			continue;
4420
4421		bio_reset(b);
4422		b->bi_bdev = rdev2->bdev;
4423		b->bi_iter.bi_sector = r10_bio->devs[s/2].addr +
4424			rdev2->new_data_offset;
4425		b->bi_private = r10_bio;
4426		b->bi_end_io = end_reshape_write;
4427		b->bi_rw = WRITE;
4428		b->bi_next = blist;
4429		blist = b;
4430	}
4431
4432	/* Now add as many pages as possible to all of these bios. */
4433
4434	nr_sectors = 0;
4435	for (s = 0 ; s < max_sectors; s += PAGE_SIZE >> 9) {
4436		struct page *page = r10_bio->devs[0].bio->bi_io_vec[s/(PAGE_SIZE>>9)].bv_page;
4437		int len = (max_sectors - s) << 9;
4438		if (len > PAGE_SIZE)
4439			len = PAGE_SIZE;
4440		for (bio = blist; bio ; bio = bio->bi_next) {
4441			struct bio *bio2;
4442			if (bio_add_page(bio, page, len, 0))
4443				continue;
4444
4445			/* Didn't fit, must stop */
4446			for (bio2 = blist;
4447			     bio2 && bio2 != bio;
4448			     bio2 = bio2->bi_next) {
4449				/* Remove last page from this bio */
4450				bio2->bi_vcnt--;
4451				bio2->bi_iter.bi_size -= len;
4452				__clear_bit(BIO_SEG_VALID, &bio2->bi_flags);
4453			}
4454			goto bio_full;
4455		}
4456		sector_nr += len >> 9;
4457		nr_sectors += len >> 9;
4458	}
4459bio_full:
4460	r10_bio->sectors = nr_sectors;
4461
4462	/* Now submit the read */
4463	md_sync_acct(read_bio->bi_bdev, r10_bio->sectors);
4464	atomic_inc(&r10_bio->remaining);
4465	read_bio->bi_next = NULL;
4466	generic_make_request(read_bio);
4467	sector_nr += nr_sectors;
4468	sectors_done += nr_sectors;
4469	if (sector_nr <= last)
4470		goto read_more;
4471
4472	/* Now that we have done the whole section we can
4473	 * update reshape_progress
4474	 */
4475	if (mddev->reshape_backwards)
4476		conf->reshape_progress -= sectors_done;
4477	else
4478		conf->reshape_progress += sectors_done;
4479
4480	return sectors_done;
4481}
4482
4483static void end_reshape_request(struct r10bio *r10_bio);
4484static int handle_reshape_read_error(struct mddev *mddev,
4485				     struct r10bio *r10_bio);
4486static void reshape_request_write(struct mddev *mddev, struct r10bio *r10_bio)
4487{
4488	/* Reshape read completed.  Hopefully we have a block
4489	 * to write out.
4490	 * If we got a read error then we do sync 1-page reads from
4491	 * elsewhere until we find the data - or give up.
4492	 */
4493	struct r10conf *conf = mddev->private;
4494	int s;
4495
4496	if (!test_bit(R10BIO_Uptodate, &r10_bio->state))
4497		if (handle_reshape_read_error(mddev, r10_bio) < 0) {
4498			/* Reshape has been aborted */
4499			md_done_sync(mddev, r10_bio->sectors, 0);
4500			return;
4501		}
4502
4503	/* We definitely have the data in the pages, schedule the
4504	 * writes.
4505	 */
4506	atomic_set(&r10_bio->remaining, 1);
4507	for (s = 0; s < conf->copies*2; s++) {
4508		struct bio *b;
4509		int d = r10_bio->devs[s/2].devnum;
4510		struct md_rdev *rdev;
4511		if (s&1) {
4512			rdev = conf->mirrors[d].replacement;
4513			b = r10_bio->devs[s/2].repl_bio;
4514		} else {
4515			rdev = conf->mirrors[d].rdev;
4516			b = r10_bio->devs[s/2].bio;
4517		}
4518		if (!rdev || test_bit(Faulty, &rdev->flags))
4519			continue;
4520		atomic_inc(&rdev->nr_pending);
4521		md_sync_acct(b->bi_bdev, r10_bio->sectors);
4522		atomic_inc(&r10_bio->remaining);
4523		b->bi_next = NULL;
4524		generic_make_request(b);
4525	}
4526	end_reshape_request(r10_bio);
4527}
4528
4529static void end_reshape(struct r10conf *conf)
4530{
4531	if (test_bit(MD_RECOVERY_INTR, &conf->mddev->recovery))
4532		return;
4533
4534	spin_lock_irq(&conf->device_lock);
4535	conf->prev = conf->geo;
4536	md_finish_reshape(conf->mddev);
4537	smp_wmb();
4538	conf->reshape_progress = MaxSector;
4539	conf->reshape_safe = MaxSector;
4540	spin_unlock_irq(&conf->device_lock);
4541
4542	/* read-ahead size must cover two whole stripes, which is
4543	 * 2 * (datadisks) * chunksize where 'n' is the number of raid devices
4544	 */
4545	if (conf->mddev->queue) {
4546		int stripe = conf->geo.raid_disks *
4547			((conf->mddev->chunk_sectors << 9) / PAGE_SIZE);
4548		stripe /= conf->geo.near_copies;
4549		if (conf->mddev->queue->backing_dev_info.ra_pages < 2 * stripe)
4550			conf->mddev->queue->backing_dev_info.ra_pages = 2 * stripe;
4551	}
4552	conf->fullsync = 0;
4553}
4554
4555static int handle_reshape_read_error(struct mddev *mddev,
4556				     struct r10bio *r10_bio)
4557{
4558	/* Use sync reads to get the blocks from somewhere else */
4559	int sectors = r10_bio->sectors;
4560	struct r10conf *conf = mddev->private;
4561	struct {
4562		struct r10bio r10_bio;
4563		struct r10dev devs[conf->copies];
4564	} on_stack;
4565	struct r10bio *r10b = &on_stack.r10_bio;
4566	int slot = 0;
4567	int idx = 0;
4568	struct bio_vec *bvec = r10_bio->master_bio->bi_io_vec;
4569
4570	r10b->sector = r10_bio->sector;
4571	__raid10_find_phys(&conf->prev, r10b);
4572
4573	while (sectors) {
4574		int s = sectors;
4575		int success = 0;
4576		int first_slot = slot;
4577
4578		if (s > (PAGE_SIZE >> 9))
4579			s = PAGE_SIZE >> 9;
4580
4581		while (!success) {
4582			int d = r10b->devs[slot].devnum;
4583			struct md_rdev *rdev = conf->mirrors[d].rdev;
4584			sector_t addr;
4585			if (rdev == NULL ||
4586			    test_bit(Faulty, &rdev->flags) ||
4587			    !test_bit(In_sync, &rdev->flags))
4588				goto failed;
4589
4590			addr = r10b->devs[slot].addr + idx * PAGE_SIZE;
4591			success = sync_page_io(rdev,
4592					       addr,
4593					       s << 9,
4594					       bvec[idx].bv_page,
4595					       READ, false);
4596			if (success)
4597				break;
4598		failed:
4599			slot++;
4600			if (slot >= conf->copies)
4601				slot = 0;
4602			if (slot == first_slot)
4603				break;
4604		}
4605		if (!success) {
4606			/* couldn't read this block, must give up */
4607			set_bit(MD_RECOVERY_INTR,
4608				&mddev->recovery);
4609			return -EIO;
4610		}
4611		sectors -= s;
4612		idx++;
4613	}
4614	return 0;
4615}
4616
4617static void end_reshape_write(struct bio *bio, int error)
4618{
4619	int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
4620	struct r10bio *r10_bio = bio->bi_private;
4621	struct mddev *mddev = r10_bio->mddev;
4622	struct r10conf *conf = mddev->private;
4623	int d;
4624	int slot;
4625	int repl;
4626	struct md_rdev *rdev = NULL;
4627
4628	d = find_bio_disk(conf, r10_bio, bio, &slot, &repl);
4629	if (repl)
4630		rdev = conf->mirrors[d].replacement;
4631	if (!rdev) {
4632		smp_mb();
4633		rdev = conf->mirrors[d].rdev;
4634	}
4635
4636	if (!uptodate) {
4637		/* FIXME should record badblock */
4638		md_error(mddev, rdev);
4639	}
4640
4641	rdev_dec_pending(rdev, mddev);
4642	end_reshape_request(r10_bio);
4643}
4644
4645static void end_reshape_request(struct r10bio *r10_bio)
4646{
4647	if (!atomic_dec_and_test(&r10_bio->remaining))
4648		return;
4649	md_done_sync(r10_bio->mddev, r10_bio->sectors, 1);
4650	bio_put(r10_bio->master_bio);
4651	put_buf(r10_bio);
4652}
4653
4654static void raid10_finish_reshape(struct mddev *mddev)
4655{
4656	struct r10conf *conf = mddev->private;
4657
4658	if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
4659		return;
4660
4661	if (mddev->delta_disks > 0) {
4662		sector_t size = raid10_size(mddev, 0, 0);
4663		md_set_array_sectors(mddev, size);
4664		if (mddev->recovery_cp > mddev->resync_max_sectors) {
4665			mddev->recovery_cp = mddev->resync_max_sectors;
4666			set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
4667		}
4668		mddev->resync_max_sectors = size;
4669		set_capacity(mddev->gendisk, mddev->array_sectors);
4670		revalidate_disk(mddev->gendisk);
4671	} else {
4672		int d;
4673		for (d = conf->geo.raid_disks ;
4674		     d < conf->geo.raid_disks - mddev->delta_disks;
4675		     d++) {
4676			struct md_rdev *rdev = conf->mirrors[d].rdev;
4677			if (rdev)
4678				clear_bit(In_sync, &rdev->flags);
4679			rdev = conf->mirrors[d].replacement;
4680			if (rdev)
4681				clear_bit(In_sync, &rdev->flags);
4682		}
4683	}
4684	mddev->layout = mddev->new_layout;
4685	mddev->chunk_sectors = 1 << conf->geo.chunk_shift;
4686	mddev->reshape_position = MaxSector;
4687	mddev->delta_disks = 0;
4688	mddev->reshape_backwards = 0;
4689}
4690
4691static struct md_personality raid10_personality =
4692{
4693	.name		= "raid10",
4694	.level		= 10,
4695	.owner		= THIS_MODULE,
4696	.make_request	= make_request,
4697	.run		= run,
4698	.free		= raid10_free,
4699	.status		= status,
4700	.error_handler	= error,
4701	.hot_add_disk	= raid10_add_disk,
4702	.hot_remove_disk= raid10_remove_disk,
4703	.spare_active	= raid10_spare_active,
4704	.sync_request	= sync_request,
4705	.quiesce	= raid10_quiesce,
4706	.size		= raid10_size,
4707	.resize		= raid10_resize,
4708	.takeover	= raid10_takeover,
4709	.check_reshape	= raid10_check_reshape,
4710	.start_reshape	= raid10_start_reshape,
4711	.finish_reshape	= raid10_finish_reshape,
4712	.congested	= raid10_congested,
4713	.mergeable_bvec	= raid10_mergeable_bvec,
4714};
4715
4716static int __init raid_init(void)
4717{
4718	return register_md_personality(&raid10_personality);
4719}
4720
4721static void raid_exit(void)
4722{
4723	unregister_md_personality(&raid10_personality);
4724}
4725
4726module_init(raid_init);
4727module_exit(raid_exit);
4728MODULE_LICENSE("GPL");
4729MODULE_DESCRIPTION("RAID10 (striped mirror) personality for MD");
4730MODULE_ALIAS("md-personality-9"); /* RAID10 */
4731MODULE_ALIAS("md-raid10");
4732MODULE_ALIAS("md-level-10");
4733
4734module_param(max_queued_requests, int, S_IRUGO|S_IWUSR);
4735