Lines Matching refs:disks

22  * blocks[disks-2] and the 'Q' destination address at blocks[disks-1]
36 const unsigned char *scfs, int disks,
46 int src_cnt = disks - 2;
76 dma_dest[0] = unmap->addr[disks - 2];
77 dma_dest[1] = unmap->addr[disks - 1];
107 do_sync_gen_syndrome(struct page **blocks, unsigned int *offsets, int disks,
112 int start = -1, stop = disks - 3;
119 for (i = 0; i < disks; i++) {
121 BUG_ON(i > disks - 3); /* P or Q can't be zero */
126 if (i < disks - 2) {
136 raid6_call.xor_syndrome(disks, start, stop, len, srcs);
138 raid6_call.gen_syndrome(disks, len, srcs);
157 * @blocks: source blocks from idx 0..disks-3, P @ disks-2 and Q @ disks-1
159 * @disks: number of blocks (including missing P or Q, see below)
166 * 'disks' note: callers can optionally omit either P or Q (but not
167 * both) from the calculation by setting blocks[disks-2] or
168 * blocks[disks-1] to NULL. When P or Q is omitted 'len' must be <=
170 * synchronous path. 'disks' always accounts for both destination
171 * buffers. If any source buffers (blocks[i] where i < disks - 2) are
177 async_gen_syndrome(struct page **blocks, unsigned int *offsets, int disks,
180 int src_cnt = disks - 2;
182 &P(blocks, disks), 2,
187 BUG_ON(disks > MAX_DISKS || !(P(blocks, disks) || Q(blocks, disks)));
190 unmap = dmaengine_get_unmap_data(device->dev, disks, GFP_NOWAIT);
196 is_dma_pq_aligned_offs(device, offsets, disks, len)) {
203 pr_debug("%s: (async) disks: %d len: %zu\n",
204 __func__, disks, len);
225 if (P(blocks, disks))
226 unmap->addr[j++] = dma_map_page(device->dev, P(blocks, disks),
227 P(offsets, disks),
235 if (Q(blocks, disks))
236 unmap->addr[j++] = dma_map_page(device->dev, Q(blocks, disks),
237 Q(offsets, disks),
252 pr_debug("%s: (sync) disks: %d len: %zu\n", __func__, disks, len);
257 if (!P(blocks, disks)) {
258 P(blocks, disks) = pq_scribble_page;
259 P(offsets, disks) = 0;
261 if (!Q(blocks, disks)) {
262 Q(blocks, disks) = pq_scribble_page;
263 Q(offsets, disks) = 0;
265 do_sync_gen_syndrome(blocks, offsets, disks, len, submit);
272 pq_val_chan(struct async_submit_ctl *submit, struct page **blocks, int disks, size_t len)
278 disks, len);
283 * @blocks: source blocks from idx 0..disks-3, P @ disks-2 and Q @ disks-1
285 * @disks: number of blocks (including missing P or Q, see below)
293 * and 'disks' parameters of this routine. The synchronous path
298 async_syndrome_val(struct page **blocks, unsigned int *offsets, int disks,
302 struct dma_chan *chan = pq_val_chan(submit, blocks, disks, len);
309 BUG_ON(disks < 4 || disks > MAX_DISKS);
312 unmap = dmaengine_get_unmap_data(device->dev, disks, GFP_NOWAIT);
314 if (unmap && disks <= dma_maxpq(device, 0) &&
315 is_dma_pq_aligned_offs(device, offsets, disks, len)) {
320 pr_debug("%s: (async) disks: %d len: %zu\n",
321 __func__, disks, len);
324 for (i = 0; i < disks-2; i++)
335 if (!P(blocks, disks)) {
339 pq[0] = dma_map_page(dev, P(blocks, disks),
340 P(offsets, disks), len,
345 if (!Q(blocks, disks)) {
349 pq[1] = dma_map_page(dev, Q(blocks, disks),
350 Q(offsets, disks), len,
374 struct page *p_src = P(blocks, disks);
375 unsigned int p_off = P(offsets, disks);
376 struct page *q_src = Q(blocks, disks);
377 unsigned int q_off = Q(offsets, disks);
384 pr_debug("%s: (sync) disks: %d len: %zu\n",
385 __func__, disks, len);
404 blocks, offsets, disks-2, len, submit);
412 P(blocks, disks) = NULL;
413 Q(blocks, disks) = spare;
414 Q(offsets, disks) = s_off;
416 tx = async_gen_syndrome(blocks, offsets, disks,
425 P(blocks, disks) = p_src;
426 P(offsets, disks) = p_off;
427 Q(blocks, disks) = q_src;
428 Q(offsets, disks) = q_off;