Lines Matching defs:stripe

25  * When we discover that we will need to write to any block in a stripe
29 * we plug the array and queue the stripe for later.
120 /* Find first data disk in a raid6 stripe */
237 * In the following cases, the stripe cannot be released to cached
238 * lists. Therefore, we make the stripe write out and set
241 * 2. when resync is requested fot the stripe.
289 /* full stripe */
427 pr_debug("remove_hash(), stripe %llu\n",
437 pr_debug("insert_hash(), stripe %llu\n",
443 /* find an idle stripe, make sure it is unhashed, and return it. */
574 static void stripe_set_idx(sector_t stripe, struct r5conf *conf, int previous,
587 pr_debug("init_stripe called, stripe %llu\n",
646 * Slow path. The reference count is zero which means the stripe must
647 * be on a list (sh->lru). Must remove the stripe from the list that
778 * bitmap to track stripe sectors that have been added to stripes
790 * and there is an inactive stripe available.
821 * hold a reference to a stripe and raid5_quiesce()
902 /* Only freshly new full stripe normal write stripe can be added to a batch list */
923 /* Don't cross chunks, so stripe pd_idx/qd_idx is the same */
967 * We must assign batch_head of this stripe within the
969 * stripe could clear BATCH_READY bit of this stripe and
970 * this stripe->batch_head doesn't get assigned, which
971 * could confuse clear_batch_ready for this stripe
977 * can still add the stripe to batch list
1025 /* We are in a reshape, and this is a new-generation stripe,
1440 pr_debug("%s: stripe %llu\n", __func__,
1480 pr_debug("%s: stripe %llu\n", __func__,
1523 pr_debug("%s: stripe %llu\n", __func__,
1576 pr_debug("%s: stripe %llu block: %d\n",
1606 * Populates srcs in proper layout order for the stripe and returns the
1681 pr_debug("%s: stripe %llu block: %d\n",
1737 pr_debug("%s: stripe %llu block1: %d block2: %d\n",
1768 pr_debug("%s: stripe: %llu faila: %d failb: %d\n",
1844 pr_debug("%s: stripe %llu\n", __func__,
1870 pr_debug("%s: stripe %llu\n", __func__,
1906 pr_debug("%s: stripe %llu\n", __func__,
1927 pr_debug("%s: stripe %llu\n", __func__,
2000 pr_debug("%s: stripe %llu\n", __func__,
2055 pr_debug("%s: stripe %llu\n", __func__,
2149 pr_debug("%s: stripe %llu\n", __func__, (unsigned long long)sh->sector);
2203 pr_debug("%s: stripe %llu\n", __func__,
2225 pr_debug("%s: stripe %llu\n", __func__,
2259 pr_debug("%s: stripe %llu checkp: %d\n", __func__,
2405 /* we just created an active stripe so... */
2519 * New slots in each stripe get 'page' set to a new page.
2527 * no IO will be possible. Old stripe heads are freed once their
2962 sector_t stripe, stripe2;
2985 * Compute the stripe number
2987 stripe = chunk_number;
2988 *dd_idx = sector_div(stripe, data_disks);
2989 stripe2 = stripe;
3086 /* Same a left_asymmetric, by first stripe is
3156 new_sector = (sector_t)stripe * sectors_per_chunk + chunk_offset;
3170 sector_t stripe;
3178 stripe = new_sector;
3266 chunk_number = stripe * data_disks + i;
3282 * schedule_reconstruction() to delay towrite to some dev of a stripe.
3287 * 1. degraded stripe has a non-overwrite to the missing dev, AND this
3288 * stripe has data in journal (for other devices).
3301 * stripe, we need to reserve (conf->raid_disk + 1) pages per stripe
3303 * operation, we only need (conf->max_degraded + 1) pages per stripe.
3307 * Note: To make sure the stripe makes progress, we only delay
3314 * based on data in stripe cache. The array is read-only to upper
3369 * stripe cache
3440 pr_debug("%s: stripe %llu locked: %d ops_request: %lx\n",
3451 pr_debug("checking bi b#%llu to stripe s#%llu\n",
3475 * stripe are allowed because for a single stripe_head we can
3548 pr_debug("added bi b#%llu to stripe s#%llu, disk %d, logical %llu\n",
3561 * STRIPE_BIT_DELAY. This is important as once a stripe
3579 * Each stripe/dev can have one or more bios attached.
3601 static void stripe_set_idx(sector_t stripe, struct r5conf *conf, int previous,
3607 int chunk_offset = sector_div(stripe, sectors_per_chunk);
3611 stripe * (disks - conf->max_degraded)
3929 pr_debug("Computing stripe %llu block %d\n",
3958 pr_debug("Computing stripe %llu blocks %d,%d\n",
3992 * is already in flight, or if the stripe contents are in the
3999 * For degraded stripe with data in journal, do not handle
4000 * read requests yet, instead, flush the stripe to raid
4096 * SCSI discard will change some bio fields and the stripe has
4097 * no updated data, so remove it from hash list and the stripe
4331 /* check that a write has not made the stripe insync */
4352 * STRIPE_INSYNC not set and let the stripe be handled again
4460 /* check that a write has not made the stripe insync */
4579 /* We have read all the blocks in this stripe and now we need to
4580 * copy some of them into a target stripe for expand.
4597 /* so far only the early blocks of this stripe
4635 * handle_stripe - do things to a stripe.
4637 * We lock the stripe by setting STRIPE_ACTIVE and then examine the
4839 * Return '1' if this is a member of batch, or '0' if it is a lone stripe or
4854 * this stripe could be added to a batch list before we check
4897 "stripe state: %lx\n", sh->state);
4900 "head stripe state: %lx\n", head_sh->state);
4949 * handle_stripe should not continue handle the batched stripe, only
4950 * the head of batch list or lone stripe can continue. Otherwise we
4952 * is set for the batched stripe.
4985 pr_debug("handling stripe %llu, state=%#lx cnt=%d, "
5027 * the stripe if there is data need write to raid disks
5119 * When the stripe finishes full journal write cycle (write to journal
5148 * stripe under reclaim: !caching && injournal
5153 /* stripe under reclaim: !caching && injournal */
5164 /* maybe we need to check and possibly fix the parity for this stripe
5546 /* __get_priority_stripe - get the next stripe to process
5548 * Full stripe writes are allowed to pass preread active stripes up until
5552 * stripe with in flight i/o. The bypass_count will be reset when the
5672 * STRIPE_ON_UNPLUG_LIST clear but the stripe
5923 * to the stripe that we think it is, we will have
5952 /* cannot get stripe, just give-up */
5960 * Expansion moved on while waiting for a stripe.
5965 * won't proceed until we finish with the stripe.
5998 /* we only need flush for one stripe */
6127 * Lets start with the stripe with the lowest chunk offset in the first
6151 * otherwise the batch_last stripe head could prevent
6199 * into the destination stripe and release that stripe.
6357 /* If any of this stripe is beyond the end of the old
6388 /* Ok, those stripe are ready. We can start scheduling
6496 * stripe, and as resync_max will always be on a chunk boundary,
6528 /* make sure we don't swamp the stripe cache if someone else
6561 * So we do one stripe head at a time and record in
6585 /* already done this stripe */
6591 /* failed to get a stripe - must wait */
7617 pr_info("md/raid:%s: force stripe size %d for reshape\n",
7631 * Losing a stripe head costs more than the time to refill it,
7699 int data_disks, stripe;
7709 * We can only discard a whole stripe. It doesn't make sense to
7712 stripe = roundup_pow_of_two(data_disks * (mddev->chunk_sectors << 9));
7718 lim.discard_granularity = stripe;
7728 * Consider a scenario: discard a stripe (the stripe could be
7730 * stripe (the stripe could be inconsistent again depending on which
7731 * disks are used to calculate parity); the disk is broken; The stripe
7740 lim.max_discard_sectors < (stripe >> 9) ||
7741 lim.discard_granularity < stripe)
7745 * Requests require having a bitmap for each stripe.
7802 * Difficulties arise if the stripe we would write to
7803 * next is at or after the stripe we would read from next.
7831 /* reshape_position must be on a new-stripe boundary, and one
7842 pr_warn("md/raid:%s: reshape_position not on a stripe boundary\n",
7847 /* here_new is the stripe we will write to */
7850 /* here_old is the first stripe that we might need to read
7873 /* Reading from the same stripe as writing to - bad */
8329 * We need a minimum of one full stripe,, and for sensible progress