Searched refs:disks (Results 1 - 25 of 36) sorted by last modified time

12

/linux-master/fs/bcachefs/
H A Dec.c33 static void raid5_recov(unsigned disks, unsigned failed_idx, argument
38 BUG_ON(failed_idx >= disks);
43 while (i < disks) {
44 nr = min_t(unsigned, disks - i, MAX_XOR_BLOCKS);
/linux-master/drivers/md/
H A Draid1.c529 * disks. Setting the Returned bit ensures that this
783 /* At least two disks to choose from so failfast is OK */
826 * If all disks are rotational, choose the closest disk. If any disk is
829 * mixed ratation/non-rotational disks depending on workload.
844 * 2) Now that there is no resync, loop through all disks and skipping slow
845 * disks and disks with bad blocks for now. Only pay attention to key disk
848 * 3) If we've made it this far, now look for disks with bad blocks and choose
1427 int i, disks; local
1483 disks
2328 int disks = conf->raid_disks * 2; local
[all...]
H A Ddm-raid.c903 list_add_tail(&rs->dev[i].rdev.same_set, &rs->md.disks);
909 list_add_tail(&rs->journal_dev.rdev.same_set, &rs->md.disks);
1392 /* Define the +/-# of disks to add to/remove from the given raid set */
1651 rs->ti->error = "Bogus raid10 data copies or delta disks";
1748 /* raid0 with multiple disks -> raid4/5/6 */
1765 /* Can takeover raid10_near with raid disks divisable by data copies! */
1786 /* raid10_{near,far} with 2 disks -> raid4/5 */
1793 /* raid1 with 2 disks -> raid4/5 */
1815 /* raid4 -> raid1/5 with 2 disks */
1837 /* raid5 with 2 disks
[all...]
H A Draid5.c127 if (sh->qd_idx == sh->disks - 1)
138 /* When walking through the disks in a raid5, starting at raid6_d0,
139 * We need to map each disk to a 'slot', where the data disks are slot
233 for (i = sh->disks; i--; )
502 init_stripe_shared_pages(struct stripe_head *sh, struct r5conf *conf, int disks) argument
511 nr_pages = (disks + cnt - 1) / cnt;
592 sh->disks = previous ? conf->previous_raid_disks : conf->raid_disks;
597 for (i = sh->disks; i--; ) {
699 struct md_rdev *rdev = READ_ONCE(conf->disks[i].rdev);
702 rdev = READ_ONCE(conf->disks[
1143 int i, disks = sh->disks; local
1562 int disks = sh->disks; local
1616 int disks = sh->disks; local
1659 int disks = sh->disks; local
1723 int i, count, disks = sh->disks; local
1859 int disks = sh->disks; local
1923 int disks = sh->disks; local
1994 int disks = sh->disks; local
2042 int disks = sh->disks; local
2213 int disks = sh->disks; local
2277 int overlap_clear = 0, i, disks = sh->disks; local
2357 alloc_stripe(struct kmem_cache *sc, gfp_t gfp, int disks, struct r5conf *conf) argument
2722 int disks = sh->disks, i; local
2850 int disks = sh->disks, i; local
3340 int i, pd_idx = sh->pd_idx, qd_idx = sh->qd_idx, disks = sh->disks; local
3608 int disks = previous ? conf->previous_raid_disks : conf->raid_disks; local
3618 handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh, struct stripe_head_state *s, int disks) argument
3792 need_this_block(struct stripe_head *sh, struct stripe_head_state *s, int disk_idx, int disks) argument
3899 fetch_block(struct stripe_head *sh, struct stripe_head_state *s, int disk_idx, int disks) argument
3985 handle_stripe_fill(struct stripe_head *sh, struct stripe_head_state *s, int disks) argument
4026 handle_stripe_clean_event(struct r5conf *conf, struct stripe_head *sh, int disks) argument
4141 handle_stripe_dirtying(struct r5conf *conf, struct stripe_head *sh, struct stripe_head_state *s, int disks) argument
4305 handle_parity_checks5(struct r5conf *conf, struct stripe_head *sh, struct stripe_head_state *s, int disks) argument
4398 handle_parity_checks6(struct r5conf *conf, struct stripe_head *sh, struct stripe_head_state *s, int disks) argument
4651 int disks = sh->disks; local
4943 int disks = sh->disks; local
[all...]
H A Draid5-ppl.c36 * disks. Every stripe_head in the entry must write to the same data disks.
81 * If write-back cache is enabled for any of the disks in the array, its data
161 int disks = sh->disks; local
176 * rmw: xor old data and parity from updated disks
182 /* rcw: xor data from all not updated disks */
183 for (i = disks; i--;) {
193 NULL, sh, (void *) (srcs + sh->disks + 2));
299 for (i = 0; i < sh->disks;
[all...]
H A Draid10.c819 /* At least 2 disks to choose from so failfast is OK */
1903 int disks, ncopies; local
1905 disks = conf->prev.raid_disks;
1908 disks = conf->geo.raid_disks;
1922 this = (this+1) % disks;
1926 first = (first + ncopies) % disks;
2030 * Find all non-in_sync disks within the RAID10 configuration
3816 int layout, chunk, disks; local
3821 disks = mddev->raid_disks - mddev->delta_disks;
3826 disks
[all...]
H A Dmd.h313 struct list_head disks; member in struct:mddev
406 /* resync even though the same disks are shared among md-devices */
737 * iterates through the 'same array disks' ringlist
740 list_for_each_entry(rdev, &((mddev)->disks), same_set)
743 list_for_each_entry_safe(rdev, tmp, &((mddev)->disks), same_set)
746 list_for_each_entry_rcu(rdev, &((mddev)->disks), same_set)
H A Dmd.c692 if (mddev->raid_disks || !list_empty(&mddev->disks) ||
748 INIT_LIST_HEAD(&mddev->disks);
1320 sb->disks[rdev->desc_nr].state & ((1<<MD_DISK_SYNC) | (1 << MD_DISK_ACTIVE)))
1449 if (sb->disks[rdev->desc_nr].state & (
1467 desc = sb->disks + rdev->desc_nr;
1503 * 1/ zero out disks
1505 * 3/ any empty disks < next_spare become removed
1507 * disks[0] gets initialised to REMOVED because
1567 sb->disks[0].state = (1<<MD_DISK_REMOVED);
1588 d = &sb->disks[rdev
[all...]
H A Dmd-bitmap.c168 /* Iterate the disks of an mddev, using rcu to protect access to the
184 rdev = list_entry(&mddev->disks, struct md_rdev, same_set);
189 list_for_each_entry_continue_rcu(rdev, &mddev->disks, same_set) {
H A Draid5.h215 int disks; /* disks in stripe */ member in struct:stripe_head
216 int overwrite_disks; /* total overwrite disks in stripe,
271 } dev[]; /* allocated depending of RAID geometry ("disks" member) */
674 int pool_size; /* number of disks in stripeheads in pool */
676 struct disk_info *disks; member in struct:r5conf
H A Draid5-cache.c69 * Stripes in caching phase do not write the raid disks. Instead, all
78 * - write data and parity to raid disks
206 * first and then start move data to raid disks, there is no requirement to
308 struct stripe_head *sh, int disks)
312 for (i = sh->disks; i--; ) {
470 for (i = sh->disks; i--; )
486 for (i = sh->disks; i--; )
926 for (i = 0; i < sh->disks; i++) {
991 * data from log to raid disks), so we shouldn't wait for reclaim here
1015 for (i = 0; i < sh->disks;
307 r5c_handle_cached_data_endio(struct r5conf *conf, struct stripe_head *sh, int disks) argument
2627 r5c_try_caching_write(struct r5conf *conf, struct stripe_head *sh, struct stripe_head_state *s, int disks) argument
[all...]
H A Dmd-autodetect.c180 if (!list_empty(&mddev->disks) || mddev->raid_disks) {
H A Draid5-log.h16 struct stripe_head_state *s, int disks);
23 struct stripe_head *sh, int disks);
/linux-master/drivers/block/
H A Dfloppy.c87 /* 1995/4/24 -- Dan Fandrich -- added support for Commodore 1581 3.5" disks
423 * The LSB (bit 2) is flipped. For most disks, the first sector
425 * disks (such as Ensoniq EPS 16plus) it is 0 (represented as 0x01<<2).
426 * For Amstrad CPC disks it is 0xC1 (represented as 0xC0<<2).
478 static struct gendisk *disks[N_DRIVE][ARRAY_SIZE(floppy_type)]; variable in typeref:struct:gendisk
2586 /* 2M disks have phantom sectors on the first track */
3417 * We do this in order to provide a means to eject floppy disks before
4049 set_capacity(disks[drive][ITYPE(new_dev)], floppy_sizes[new_dev]);
4541 disks[drive][type] = disk;
4557 if (disks[driv
[all...]
H A Dswim3.c12 * handle GCR disks
42 static struct gendisk *disks[MAX_FLOPPIES]; variable in typeref:struct:gendisk
842 struct request_queue *q = disks[fs->index]->queue;
1236 disks[floppy_count++] = disk;
/linux-master/include/uapi/linux/raid/
H A Dmd_p.h32 * 128 - 511 12 32-words descriptors of the disks in the raid set.
144 __u32 nr_disks; /* 9 total disks in the raid set */
145 __u32 raid_disks; /* 10 disks in a fully functional raid set */
158 __u32 active_disks; /* 2 Number of currently active disks */
159 __u32 working_disks; /* 3 Number of working disks */
160 __u32 failed_disks; /* 4 Number of failed disks */
161 __u32 spare_disks; /* 5 Number of spare disks */
197 mdp_disk_t disks[MD_SB_DISKS]; member in struct:mdp_superblock_s
/linux-master/lib/raid6/
H A Dalgos.c156 void *(*const dptrs)[RAID6_TEST_DISKS], const int disks)
159 int start = (disks>>1)-1, stop = disks-3; /* work on the second half of the disks */
181 (*algo)->gen_syndrome(disks, PAGE_SIZE, *dptrs);
191 (perf * HZ * (disks-2)) >>
211 (bestgenperf * HZ * (disks - 2)) >>
223 best->xor_syndrome(disks, start, stop,
230 (perf * HZ * (disks - 2)) >>
244 const int disks local
155 raid6_choose_gen( void *(*const dptrs)[RAID6_TEST_DISKS], const int disks) argument
[all...]
H A Dloongarch_simd.c33 static void raid6_lsx_gen_syndrome(int disks, size_t bytes, void **ptrs) argument
39 z0 = disks - 3; /* Highest data disk */
114 static void raid6_lsx_xor_syndrome(int disks, int start, int stop, argument
122 p = dptr[disks-2]; /* XOR parity */
123 q = dptr[disks-1]; /* RS syndrome */
264 static void raid6_lasx_gen_syndrome(int disks, size_t bytes, void **ptrs) argument
270 z0 = disks - 3; /* Highest data disk */
323 static void raid6_lasx_xor_syndrome(int disks, int start, int stop, argument
331 p = dptr[disks-2]; /* XOR parity */
332 q = dptr[disks
[all...]
H A Drecov_loongarch_simd.c29 static void raid6_2data_recov_lsx(int disks, size_t bytes, int faila, argument
36 p = (u8 *)ptrs[disks - 2];
37 q = (u8 *)ptrs[disks - 1];
46 ptrs[disks - 2] = dp;
49 ptrs[disks - 1] = dq;
51 raid6_call.gen_syndrome(disks, bytes, ptrs);
56 ptrs[disks - 2] = p;
57 ptrs[disks - 1] = q;
186 static void raid6_datap_recov_lsx(int disks, size_t bytes, int faila, argument
192 p = (u8 *)ptrs[disks
303 raid6_2data_recov_lasx(int disks, size_t bytes, int faila, int failb, void **ptrs) argument
425 raid6_datap_recov_lasx(int disks, size_t bytes, int faila, void **ptrs) argument
[all...]
H A Drecov.c19 static void raid6_2data_recov_intx1(int disks, size_t bytes, int faila, argument
27 p = (u8 *)ptrs[disks-2];
28 q = (u8 *)ptrs[disks-1];
35 ptrs[disks-2] = dp;
38 ptrs[disks-1] = dq;
40 raid6_call.gen_syndrome(disks, bytes, ptrs);
45 ptrs[disks-2] = p;
46 ptrs[disks-1] = q;
63 static void raid6_datap_recov_intx1(int disks, size_t bytes, int faila, argument
69 p = (u8 *)ptrs[disks
107 raid6_dual_recov(int disks, size_t bytes, int faila, int failb, void **ptrs) argument
[all...]
H A Drecov_neon.c23 static void raid6_2data_recov_neon(int disks, size_t bytes, int faila, argument
30 p = (u8 *)ptrs[disks - 2];
31 q = (u8 *)ptrs[disks - 1];
40 ptrs[disks - 2] = dp;
43 ptrs[disks - 1] = dq;
45 raid6_call.gen_syndrome(disks, bytes, ptrs);
50 ptrs[disks - 2] = p;
51 ptrs[disks - 1] = q;
63 static void raid6_datap_recov_neon(int disks, size_t bytes, int faila, argument
69 p = (u8 *)ptrs[disks
[all...]
H A Dneon.h3 void raid6_neon1_gen_syndrome_real(int disks, unsigned long bytes, void **ptrs);
4 void raid6_neon1_xor_syndrome_real(int disks, int start, int stop,
6 void raid6_neon2_gen_syndrome_real(int disks, unsigned long bytes, void **ptrs);
7 void raid6_neon2_xor_syndrome_real(int disks, int start, int stop,
9 void raid6_neon4_gen_syndrome_real(int disks, unsigned long bytes, void **ptrs);
10 void raid6_neon4_xor_syndrome_real(int disks, int start, int stop,
12 void raid6_neon8_gen_syndrome_real(int disks, unsigned long bytes, void **ptrs);
13 void raid6_neon8_xor_syndrome_real(int disks, int start, int stop,
/linux-master/include/linux/raid/
H A Dpq.h154 extern void (*raid6_2data_recov)(int disks, size_t bytes, int faila, int failb,
156 extern void (*raid6_datap_recov)(int disks, size_t bytes, int faila,
158 void raid6_dual_recov(int disks, size_t bytes, int faila, int failb,
/linux-master/crypto/async_tx/
H A Dasync_pq.c22 * blocks[disks-2] and the 'Q' destination address at blocks[disks-1]
36 const unsigned char *scfs, int disks,
46 int src_cnt = disks - 2;
76 dma_dest[0] = unmap->addr[disks - 2];
77 dma_dest[1] = unmap->addr[disks - 1];
107 do_sync_gen_syndrome(struct page **blocks, unsigned int *offsets, int disks, argument
112 int start = -1, stop = disks - 3;
119 for (i = 0; i < disks; i++) {
121 BUG_ON(i > disks
35 do_async_gen_syndrome(struct dma_chan *chan, const unsigned char *scfs, int disks, struct dmaengine_unmap_data *unmap, enum dma_ctrl_flags dma_flags, struct async_submit_ctl *submit) argument
177 async_gen_syndrome(struct page **blocks, unsigned int *offsets, int disks, size_t len, struct async_submit_ctl *submit) argument
272 pq_val_chan(struct async_submit_ctl *submit, struct page **blocks, int disks, size_t len) argument
298 async_syndrome_val(struct page **blocks, unsigned int *offsets, int disks, size_t len, enum sum_check_flags *pqres, struct page *spare, unsigned int s_off, struct async_submit_ctl *submit) argument
[all...]
H A Draid6test.c35 static void makedata(int disks) argument
39 for (i = 0; i < disks; i++) {
46 static char disk_type(int d, int disks) argument
48 if (d == disks - 2)
50 else if (d == disks - 1)
57 static void raid6_dual_recov(int disks, size_t bytes, int faila, int failb, argument
68 if (failb == disks-1) {
69 if (faila == disks-2) {
73 disks, bytes, &submit);
80 BUG_ON(disks > NDISK
127 test_disks(int i, int j, int disks) argument
152 test(int disks, int *tests) argument
[all...]

Completed in 366 milliseconds

12