Searched refs:disks (Results 1 - 25 of 36) sorted by path

12

/linux-master/lib/raid6/
H A Dmmx.c35 static void raid6_mmx1_gen_syndrome(int disks, size_t bytes, void **ptrs) argument
41 z0 = disks - 3; /* Highest data disk */
83 static void raid6_mmx2_gen_syndrome(int disks, size_t bytes, void **ptrs) argument
89 z0 = disks - 3; /* Highest data disk */
H A Dneon.c30 static void raid6_neon ## _n ## _gen_syndrome(int disks, \
36 raid6_neon ## _n ## _gen_syndrome_real(disks, \
40 static void raid6_neon ## _n ## _xor_syndrome(int disks, \
47 raid6_neon ## _n ## _xor_syndrome_real(disks, \
H A Drecov_avx512.c24 static void raid6_2data_recov_avx512(int disks, size_t bytes, int faila, argument
32 p = (u8 *)ptrs[disks-2];
33 q = (u8 *)ptrs[disks-1];
43 ptrs[disks-2] = dp;
46 ptrs[disks-1] = dq;
48 raid6_call.gen_syndrome(disks, bytes, ptrs);
53 ptrs[disks-2] = p;
54 ptrs[disks-1] = q;
227 static void raid6_datap_recov_avx512(int disks, size_t bytes, int faila, argument
234 p = (u8 *)ptrs[disks
[all...]
H A Drecov_s390xc.c23 static void raid6_2data_recov_s390xc(int disks, size_t bytes, int faila, argument
31 p = (u8 *)ptrs[disks-2];
32 q = (u8 *)ptrs[disks-1];
39 ptrs[disks-2] = dp;
42 ptrs[disks-1] = dq;
44 raid6_call.gen_syndrome(disks, bytes, ptrs);
49 ptrs[disks-2] = p;
50 ptrs[disks-1] = q;
72 static void raid6_datap_recov_s390xc(int disks, size_t bytes, int faila, argument
79 p = (u8 *)ptrs[disks
[all...]
H A Dsse1.c40 static void raid6_sse11_gen_syndrome(int disks, size_t bytes, void **ptrs) argument
46 z0 = disks - 3; /* Highest data disk */
99 static void raid6_sse12_gen_syndrome(int disks, size_t bytes, void **ptrs) argument
105 z0 = disks - 3; /* Highest data disk */
H A Dsse2.c36 static void raid6_sse21_gen_syndrome(int disks, size_t bytes, void **ptrs) argument
42 z0 = disks - 3; /* Highest data disk */
87 static void raid6_sse21_xor_syndrome(int disks, int start, int stop, argument
95 p = dptr[disks-2]; /* XOR parity */
96 q = dptr[disks-1]; /* RS syndrome */
146 static void raid6_sse22_gen_syndrome(int disks, size_t bytes, void **ptrs) argument
152 z0 = disks - 3; /* Highest data disk */
198 static void raid6_sse22_xor_syndrome(int disks, int start, int stop, argument
206 p = dptr[disks-2]; /* XOR parity */
207 q = dptr[disks
278 raid6_sse24_gen_syndrome(int disks, size_t bytes, void **ptrs) argument
364 raid6_sse24_xor_syndrome(int disks, int start, int stop, size_t bytes, void **ptrs) argument
[all...]
/linux-master/crypto/async_tx/
H A Dasync_pq.c22 * blocks[disks-2] and the 'Q' destination address at blocks[disks-1]
36 const unsigned char *scfs, int disks,
46 int src_cnt = disks - 2;
76 dma_dest[0] = unmap->addr[disks - 2];
77 dma_dest[1] = unmap->addr[disks - 1];
107 do_sync_gen_syndrome(struct page **blocks, unsigned int *offsets, int disks, argument
112 int start = -1, stop = disks - 3;
119 for (i = 0; i < disks; i++) {
121 BUG_ON(i > disks
35 do_async_gen_syndrome(struct dma_chan *chan, const unsigned char *scfs, int disks, struct dmaengine_unmap_data *unmap, enum dma_ctrl_flags dma_flags, struct async_submit_ctl *submit) argument
177 async_gen_syndrome(struct page **blocks, unsigned int *offsets, int disks, size_t len, struct async_submit_ctl *submit) argument
272 pq_val_chan(struct async_submit_ctl *submit, struct page **blocks, int disks, size_t len) argument
298 async_syndrome_val(struct page **blocks, unsigned int *offsets, int disks, size_t len, enum sum_check_flags *pqres, struct page *spare, unsigned int s_off, struct async_submit_ctl *submit) argument
[all...]
H A Dasync_raid6_recov.c153 __2data_recov_4(int disks, size_t bytes, int faila, int failb, argument
168 p = blocks[disks-2];
169 p_off = offs[disks-2];
170 q = blocks[disks-1];
171 q_off = offs[disks-1];
203 __2data_recov_5(int disks, size_t bytes, int faila, int failb, argument
221 for (i = 0; i < disks-2; i++) {
231 p = blocks[disks-2];
232 p_off = offs[disks-2];
233 q = blocks[disks
294 __2data_recov_n(int disks, size_t bytes, int faila, int failb, struct page **blocks, unsigned int *offs, struct async_submit_ctl *submit) argument
393 async_raid6_2data_recov(int disks, size_t bytes, int faila, int failb, struct page **blocks, unsigned int *offs, struct async_submit_ctl *submit) argument
471 async_raid6_datap_recov(int disks, size_t bytes, int faila, struct page **blocks, unsigned int *offs, struct async_submit_ctl *submit) argument
[all...]
H A Draid6test.c35 static void makedata(int disks) argument
39 for (i = 0; i < disks; i++) {
46 static char disk_type(int d, int disks) argument
48 if (d == disks - 2)
50 else if (d == disks - 1)
57 static void raid6_dual_recov(int disks, size_t bytes, int faila, int failb, argument
68 if (failb == disks-1) {
69 if (faila == disks-2) {
73 disks, bytes, &submit);
80 BUG_ON(disks > NDISK
127 test_disks(int i, int j, int disks) argument
152 test(int disks, int *tests) argument
[all...]
/linux-master/drivers/block/
H A Dfloppy.c87 /* 1995/4/24 -- Dan Fandrich -- added support for Commodore 1581 3.5" disks
423 * The LSB (bit 2) is flipped. For most disks, the first sector
425 * disks (such as Ensoniq EPS 16plus) it is 0 (represented as 0x01<<2).
426 * For Amstrad CPC disks it is 0xC1 (represented as 0xC0<<2).
478 static struct gendisk *disks[N_DRIVE][ARRAY_SIZE(floppy_type)]; variable in typeref:struct:gendisk
2586 /* 2M disks have phantom sectors on the first track */
3417 * We do this in order to provide a means to eject floppy disks before
4049 set_capacity(disks[drive][ITYPE(new_dev)], floppy_sizes[new_dev]);
4541 disks[drive][type] = disk;
4557 if (disks[driv
[all...]
H A Dswim3.c12 * handle GCR disks
42 static struct gendisk *disks[MAX_FLOPPIES]; variable in typeref:struct:gendisk
842 struct request_queue *q = disks[fs->index]->queue;
1236 disks[floppy_count++] = disk;
/linux-master/drivers/md/
H A Ddm-raid.c903 list_add_tail(&rs->dev[i].rdev.same_set, &rs->md.disks);
909 list_add_tail(&rs->journal_dev.rdev.same_set, &rs->md.disks);
1392 /* Define the +/-# of disks to add to/remove from the given raid set */
1651 rs->ti->error = "Bogus raid10 data copies or delta disks";
1748 /* raid0 with multiple disks -> raid4/5/6 */
1765 /* Can takeover raid10_near with raid disks divisable by data copies! */
1786 /* raid10_{near,far} with 2 disks -> raid4/5 */
1793 /* raid1 with 2 disks -> raid4/5 */
1815 /* raid4 -> raid1/5 with 2 disks */
1837 /* raid5 with 2 disks
[all...]
H A Dmd-autodetect.c180 if (!list_empty(&mddev->disks) || mddev->raid_disks) {
H A Dmd-bitmap.c168 /* Iterate the disks of an mddev, using rcu to protect access to the
184 rdev = list_entry(&mddev->disks, struct md_rdev, same_set);
189 list_for_each_entry_continue_rcu(rdev, &mddev->disks, same_set) {
H A Dmd.c692 if (mddev->raid_disks || !list_empty(&mddev->disks) ||
748 INIT_LIST_HEAD(&mddev->disks);
1320 sb->disks[rdev->desc_nr].state & ((1<<MD_DISK_SYNC) | (1 << MD_DISK_ACTIVE)))
1449 if (sb->disks[rdev->desc_nr].state & (
1467 desc = sb->disks + rdev->desc_nr;
1503 * 1/ zero out disks
1505 * 3/ any empty disks < next_spare become removed
1507 * disks[0] gets initialised to REMOVED because
1567 sb->disks[0].state = (1<<MD_DISK_REMOVED);
1588 d = &sb->disks[rdev
[all...]
H A Dmd.h313 struct list_head disks; member in struct:mddev
406 /* resync even though the same disks are shared among md-devices */
737 * iterates through the 'same array disks' ringlist
740 list_for_each_entry(rdev, &((mddev)->disks), same_set)
743 list_for_each_entry_safe(rdev, tmp, &((mddev)->disks), same_set)
746 list_for_each_entry_rcu(rdev, &((mddev)->disks), same_set)
H A Draid1.c529 * disks. Setting the Returned bit ensures that this
783 /* At least two disks to choose from so failfast is OK */
826 * If all disks are rotational, choose the closest disk. If any disk is
829 * mixed ratation/non-rotational disks depending on workload.
844 * 2) Now that there is no resync, loop through all disks and skipping slow
845 * disks and disks with bad blocks for now. Only pay attention to key disk
848 * 3) If we've made it this far, now look for disks with bad blocks and choose
1427 int i, disks; local
1483 disks
2328 int disks = conf->raid_disks * 2; local
[all...]
H A Draid10.c819 /* At least 2 disks to choose from so failfast is OK */
1903 int disks, ncopies; local
1905 disks = conf->prev.raid_disks;
1908 disks = conf->geo.raid_disks;
1922 this = (this+1) % disks;
1926 first = (first + ncopies) % disks;
2030 * Find all non-in_sync disks within the RAID10 configuration
3816 int layout, chunk, disks; local
3821 disks = mddev->raid_disks - mddev->delta_disks;
3826 disks
[all...]
H A Draid5-cache.c69 * Stripes in caching phase do not write the raid disks. Instead, all
78 * - write data and parity to raid disks
206 * first and then start move data to raid disks, there is no requirement to
308 struct stripe_head *sh, int disks)
312 for (i = sh->disks; i--; ) {
470 for (i = sh->disks; i--; )
486 for (i = sh->disks; i--; )
926 for (i = 0; i < sh->disks; i++) {
991 * data from log to raid disks), so we shouldn't wait for reclaim here
1015 for (i = 0; i < sh->disks;
307 r5c_handle_cached_data_endio(struct r5conf *conf, struct stripe_head *sh, int disks) argument
2627 r5c_try_caching_write(struct r5conf *conf, struct stripe_head *sh, struct stripe_head_state *s, int disks) argument
[all...]
H A Draid5-log.h16 struct stripe_head_state *s, int disks);
23 struct stripe_head *sh, int disks);
H A Draid5-ppl.c36 * disks. Every stripe_head in the entry must write to the same data disks.
81 * If write-back cache is enabled for any of the disks in the array, its data
161 int disks = sh->disks; local
176 * rmw: xor old data and parity from updated disks
182 /* rcw: xor data from all not updated disks */
183 for (i = disks; i--;) {
193 NULL, sh, (void *) (srcs + sh->disks + 2));
299 for (i = 0; i < sh->disks;
[all...]
H A Draid5.c127 if (sh->qd_idx == sh->disks - 1)
138 /* When walking through the disks in a raid5, starting at raid6_d0,
139 * We need to map each disk to a 'slot', where the data disks are slot
233 for (i = sh->disks; i--; )
502 init_stripe_shared_pages(struct stripe_head *sh, struct r5conf *conf, int disks) argument
511 nr_pages = (disks + cnt - 1) / cnt;
592 sh->disks = previous ? conf->previous_raid_disks : conf->raid_disks;
597 for (i = sh->disks; i--; ) {
699 struct md_rdev *rdev = READ_ONCE(conf->disks[i].rdev);
702 rdev = READ_ONCE(conf->disks[
1143 int i, disks = sh->disks; local
1562 int disks = sh->disks; local
1616 int disks = sh->disks; local
1659 int disks = sh->disks; local
1723 int i, count, disks = sh->disks; local
1859 int disks = sh->disks; local
1923 int disks = sh->disks; local
1994 int disks = sh->disks; local
2042 int disks = sh->disks; local
2213 int disks = sh->disks; local
2277 int overlap_clear = 0, i, disks = sh->disks; local
2357 alloc_stripe(struct kmem_cache *sc, gfp_t gfp, int disks, struct r5conf *conf) argument
2722 int disks = sh->disks, i; local
2850 int disks = sh->disks, i; local
3340 int i, pd_idx = sh->pd_idx, qd_idx = sh->qd_idx, disks = sh->disks; local
3608 int disks = previous ? conf->previous_raid_disks : conf->raid_disks; local
3618 handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh, struct stripe_head_state *s, int disks) argument
3792 need_this_block(struct stripe_head *sh, struct stripe_head_state *s, int disk_idx, int disks) argument
3899 fetch_block(struct stripe_head *sh, struct stripe_head_state *s, int disk_idx, int disks) argument
3985 handle_stripe_fill(struct stripe_head *sh, struct stripe_head_state *s, int disks) argument
4026 handle_stripe_clean_event(struct r5conf *conf, struct stripe_head *sh, int disks) argument
4141 handle_stripe_dirtying(struct r5conf *conf, struct stripe_head *sh, struct stripe_head_state *s, int disks) argument
4305 handle_parity_checks5(struct r5conf *conf, struct stripe_head *sh, struct stripe_head_state *s, int disks) argument
4398 handle_parity_checks6(struct r5conf *conf, struct stripe_head *sh, struct stripe_head_state *s, int disks) argument
4651 int disks = sh->disks; local
4943 int disks = sh->disks; local
[all...]
H A Draid5.h215 int disks; /* disks in stripe */ member in struct:stripe_head
216 int overwrite_disks; /* total overwrite disks in stripe,
271 } dev[]; /* allocated depending of RAID geometry ("disks" member) */
674 int pool_size; /* number of disks in stripeheads in pool */
676 struct disk_info *disks; member in struct:r5conf
/linux-master/fs/bcachefs/
H A Dec.c33 static void raid5_recov(unsigned disks, unsigned failed_idx, argument
38 BUG_ON(failed_idx >= disks);
43 while (i < disks) {
44 nr = min_t(unsigned, disks - i, MAX_XOR_BLOCKS);
/linux-master/include/linux/raid/
H A Dpq.h154 extern void (*raid6_2data_recov)(int disks, size_t bytes, int faila, int failb,
156 extern void (*raid6_datap_recov)(int disks, size_t bytes, int faila,
158 void raid6_dual_recov(int disks, size_t bytes, int faila, int failb,

Completed in 679 milliseconds

12