Lines Matching defs:dd_idx

921 	int dd_idx;
952 dd_idx = 0;
953 while (dd_idx == sh->pd_idx || dd_idx == sh->qd_idx)
954 dd_idx++;
955 if (head->dev[dd_idx].towrite->bi_opf != sh->dev[dd_idx].towrite->bi_opf ||
956 bio_op(head->dev[dd_idx].towrite) != bio_op(sh->dev[dd_idx].towrite))
2959 int previous, int *dd_idx,
2988 *dd_idx = sector_div(stripe, data_disks);
3002 if (*dd_idx >= pd_idx)
3003 (*dd_idx)++;
3007 if (*dd_idx >= pd_idx)
3008 (*dd_idx)++;
3012 *dd_idx = (pd_idx + 1 + *dd_idx) % raid_disks;
3016 *dd_idx = (pd_idx + 1 + *dd_idx) % raid_disks;
3020 (*dd_idx)++;
3036 (*dd_idx)++; /* Q D D D P */
3038 } else if (*dd_idx >= pd_idx)
3039 (*dd_idx) += 2; /* D D P Q D */
3045 (*dd_idx)++; /* Q D D D P */
3047 } else if (*dd_idx >= pd_idx)
3048 (*dd_idx) += 2; /* D D P Q D */
3053 *dd_idx = (pd_idx + 2 + *dd_idx) % raid_disks;
3058 *dd_idx = (pd_idx + 2 + *dd_idx) % raid_disks;
3064 (*dd_idx) += 2;
3078 (*dd_idx)++; /* Q D D D P */
3080 } else if (*dd_idx >= pd_idx)
3081 (*dd_idx) += 2; /* D D P Q D */
3094 (*dd_idx)++; /* Q D D D P */
3096 } else if (*dd_idx >= pd_idx)
3097 (*dd_idx) += 2; /* D D P Q D */
3105 *dd_idx = (pd_idx + 1 + *dd_idx) % raid_disks;
3112 if (*dd_idx >= pd_idx)
3113 (*dd_idx)++;
3119 if (*dd_idx >= pd_idx)
3120 (*dd_idx)++;
3126 *dd_idx = (pd_idx + 1 + *dd_idx) % (raid_disks-1);
3132 *dd_idx = (pd_idx + 1 + *dd_idx) % (raid_disks-1);
3138 (*dd_idx)++;
3173 int dummy1, dd_idx = i;
3271 if (check != sh->sector || dummy1 != dd_idx || sh2.pd_idx != sh->pd_idx
3446 int dd_idx, int forwrite)
3459 bip = &sh->dev[dd_idx].towrite;
3461 bip = &sh->dev[dd_idx].toread;
3488 (i == dd_idx || sh->dev[i].towrite)) {
3506 int dd_idx, int forwrite, int previous)
3513 bip = &sh->dev[dd_idx].towrite;
3517 bip = &sh->dev[dd_idx].toread;
3535 sector_t sector = sh->dev[dd_idx].sector;
3536 for (bi=sh->dev[dd_idx].towrite;
3537 sector < sh->dev[dd_idx].sector + RAID5_STRIPE_SECTORS(conf) &&
3539 bi = r5_next_bio(conf, bi, sh->dev[dd_idx].sector)) {
3543 if (sector >= sh->dev[dd_idx].sector + RAID5_STRIPE_SECTORS(conf))
3544 if (!test_and_set_bit(R5_OVERWRITE, &sh->dev[dd_idx].flags))
3549 (*bip)->bi_iter.bi_sector, sh->sector, dd_idx,
3550 sh->dev[dd_idx].sector);
3584 int dd_idx, int forwrite, int previous)
3588 if (stripe_bio_overlaps(sh, bi, dd_idx, forwrite)) {
3589 set_bit(R5_Overlap, &sh->dev[dd_idx].flags);
3594 __add_stripe_bio(sh, bi, dd_idx, forwrite, previous);
3606 int dd_idx;
3614 &dd_idx, sh);
4587 int dd_idx, j;
4593 &dd_idx, NULL);
4603 test_bit(R5_Expanded, &sh2->dev[dd_idx].flags)) {
4611 tx = async_memcpy(sh2->dev[dd_idx].page,
4612 sh->dev[i].page, sh2->dev[dd_idx].offset,
4616 set_bit(R5_Expanded, &sh2->dev[dd_idx].flags);
4617 set_bit(R5_UPTODATE, &sh2->dev[dd_idx].flags);
5455 int dd_idx;
5464 &dd_idx, NULL);
5470 rdev = conf->disks[dd_idx].replacement;
5473 rdev = conf->disks[dd_idx].rdev;
5832 int dd_idx;
5834 for (dd_idx = 0; dd_idx < sh->disks; dd_idx++) {
5835 if (dd_idx == sh->pd_idx || dd_idx == sh->qd_idx)
5838 min_sector = min(min_sector, sh->dev[dd_idx].sector);
5839 max_sector = max(max_sector, sh->dev[dd_idx].sector);
5858 int dd_idx;
5863 for (dd_idx = 0; dd_idx < sh->disks; dd_idx++) {
5864 struct r5dev *dev = &sh->dev[dd_idx];
5866 if (dd_idx == sh->pd_idx || dd_idx == sh->qd_idx)
5873 if (stripe_bio_overlaps(sh, bi, dd_idx, forwrite)) {
5883 for (dd_idx = 0; dd_idx < sh->disks; dd_idx++) {
5884 struct r5dev *dev = &sh->dev[dd_idx];
5886 if (dd_idx == sh->pd_idx || dd_idx == sh->qd_idx)
5893 __add_stripe_bio(sh, bi, dd_idx, forwrite, previous);
5912 int seq, dd_idx;
5942 &dd_idx, NULL);
6032 int dd_idx;
6039 sector = raid5_compute_sector(conf, r_sector, 0, &dd_idx, &sh);
6047 dd_idx++;
6048 while (dd_idx == sh.pd_idx || dd_idx == sh.qd_idx)
6049 dd_idx++;
6050 if (dd_idx >= raid_disks)
6209 int dd_idx;
6395 1, &dd_idx, NULL);
6399 1, &dd_idx, NULL);
6565 * it will be only one 'dd_idx' and only need one call to raid5_compute_sector.
6568 int dd_idx;
6576 0, &dd_idx, NULL);
6597 if (!add_stripe_bio(sh, raid_bio, dd_idx, 0, 0)) {
6604 set_bit(R5_ReadNoMerge, &sh->dev[dd_idx].flags);