Lines Matching refs:dd_idx
764 int dd_idx; in stripe_add_to_batch_list() local
808 dd_idx = 0; in stripe_add_to_batch_list()
809 while (dd_idx == sh->pd_idx || dd_idx == sh->qd_idx) in stripe_add_to_batch_list()
810 dd_idx++; in stripe_add_to_batch_list()
811 if (head->dev[dd_idx].towrite->bi_rw != sh->dev[dd_idx].towrite->bi_rw) in stripe_add_to_batch_list()
2535 int previous, int *dd_idx, in raid5_compute_sector() argument
2564 *dd_idx = sector_div(stripe, data_disks); in raid5_compute_sector()
2578 if (*dd_idx >= pd_idx) in raid5_compute_sector()
2579 (*dd_idx)++; in raid5_compute_sector()
2583 if (*dd_idx >= pd_idx) in raid5_compute_sector()
2584 (*dd_idx)++; in raid5_compute_sector()
2588 *dd_idx = (pd_idx + 1 + *dd_idx) % raid_disks; in raid5_compute_sector()
2592 *dd_idx = (pd_idx + 1 + *dd_idx) % raid_disks; in raid5_compute_sector()
2596 (*dd_idx)++; in raid5_compute_sector()
2612 (*dd_idx)++; /* Q D D D P */ in raid5_compute_sector()
2614 } else if (*dd_idx >= pd_idx) in raid5_compute_sector()
2615 (*dd_idx) += 2; /* D D P Q D */ in raid5_compute_sector()
2621 (*dd_idx)++; /* Q D D D P */ in raid5_compute_sector()
2623 } else if (*dd_idx >= pd_idx) in raid5_compute_sector()
2624 (*dd_idx) += 2; /* D D P Q D */ in raid5_compute_sector()
2629 *dd_idx = (pd_idx + 2 + *dd_idx) % raid_disks; in raid5_compute_sector()
2634 *dd_idx = (pd_idx + 2 + *dd_idx) % raid_disks; in raid5_compute_sector()
2640 (*dd_idx) += 2; in raid5_compute_sector()
2654 (*dd_idx)++; /* Q D D D P */ in raid5_compute_sector()
2656 } else if (*dd_idx >= pd_idx) in raid5_compute_sector()
2657 (*dd_idx) += 2; /* D D P Q D */ in raid5_compute_sector()
2670 (*dd_idx)++; /* Q D D D P */ in raid5_compute_sector()
2672 } else if (*dd_idx >= pd_idx) in raid5_compute_sector()
2673 (*dd_idx) += 2; /* D D P Q D */ in raid5_compute_sector()
2681 *dd_idx = (pd_idx + 1 + *dd_idx) % raid_disks; in raid5_compute_sector()
2688 if (*dd_idx >= pd_idx) in raid5_compute_sector()
2689 (*dd_idx)++; in raid5_compute_sector()
2695 if (*dd_idx >= pd_idx) in raid5_compute_sector()
2696 (*dd_idx)++; in raid5_compute_sector()
2702 *dd_idx = (pd_idx + 1 + *dd_idx) % (raid_disks-1); in raid5_compute_sector()
2708 *dd_idx = (pd_idx + 1 + *dd_idx) % (raid_disks-1); in raid5_compute_sector()
2714 (*dd_idx)++; in raid5_compute_sector()
2749 int dummy1, dd_idx = i; in raid5_compute_blocknr() local
2847 if (check != sh->sector || dummy1 != dd_idx || sh2.pd_idx != sh->pd_idx in raid5_compute_blocknr()
2951 static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, in add_stripe_bio() argument
2975 bip = &sh->dev[dd_idx].towrite; in add_stripe_bio()
2979 bip = &sh->dev[dd_idx].toread; in add_stripe_bio()
2999 sector_t sector = sh->dev[dd_idx].sector; in add_stripe_bio()
3000 for (bi=sh->dev[dd_idx].towrite; in add_stripe_bio()
3001 sector < sh->dev[dd_idx].sector + STRIPE_SECTORS && in add_stripe_bio()
3003 bi = r5_next_bio(bi, sh->dev[dd_idx].sector)) { in add_stripe_bio()
3007 if (sector >= sh->dev[dd_idx].sector + STRIPE_SECTORS) in add_stripe_bio()
3008 if (!test_and_set_bit(R5_OVERWRITE, &sh->dev[dd_idx].flags)) in add_stripe_bio()
3014 (unsigned long long)sh->sector, dd_idx); in add_stripe_bio()
3047 set_bit(R5_Overlap, &sh->dev[dd_idx].flags); in add_stripe_bio()
3059 int dd_idx; in stripe_set_idx() local
3067 &dd_idx, sh); in stripe_set_idx()
3951 int dd_idx, j; in handle_stripe_expansion() local
3957 &dd_idx, NULL); in handle_stripe_expansion()
3966 test_bit(R5_Expanded, &sh2->dev[dd_idx].flags)) { in handle_stripe_expansion()
3974 tx = async_memcpy(sh2->dev[dd_idx].page, in handle_stripe_expansion()
3978 set_bit(R5_Expanded, &sh2->dev[dd_idx].flags); in handle_stripe_expansion()
3979 set_bit(R5_UPTODATE, &sh2->dev[dd_idx].flags); in handle_stripe_expansion()
4785 int dd_idx; in raid5_read_one_chunk() local
4811 0, &dd_idx, NULL); in raid5_read_one_chunk()
4815 rdev = rcu_dereference(conf->disks[dd_idx].replacement); in raid5_read_one_chunk()
4818 rdev = rcu_dereference(conf->disks[dd_idx].rdev); in raid5_read_one_chunk()
5146 int dd_idx; in make_request() local
5232 &dd_idx, NULL); in make_request()
5292 !add_stripe_bio(sh, bi, dd_idx, rw, previous)) { in make_request()
5350 int dd_idx; in reshape_request() local
5526 1, &dd_idx, NULL); in reshape_request()
5530 1, &dd_idx, NULL); in reshape_request()
5688 int dd_idx; in retry_aligned_read() local
5697 0, &dd_idx, NULL); in retry_aligned_read()
5718 if (!add_stripe_bio(sh, raid_bio, dd_idx, 0, 0)) { in retry_aligned_read()
5725 set_bit(R5_ReadNoMerge, &sh->dev[dd_idx].flags); in retry_aligned_read()