rf_dagdegrd.c revision 1.4
1/*	$NetBSD: rf_dagdegrd.c,v 1.4 1999/08/13 03:41:53 oster Exp $	*/
2/*
3 * Copyright (c) 1995 Carnegie-Mellon University.
4 * All rights reserved.
5 *
6 * Author: Mark Holland, Daniel Stodolsky, William V. Courtright II
7 *
8 * Permission to use, copy, modify and distribute this software and
9 * its documentation is hereby granted, provided that both the copyright
10 * notice and this permission notice appear in all copies of the
11 * software, derivative works or modified versions, and any portions
12 * thereof, and that both notices appear in supporting documentation.
13 *
14 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
15 * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
16 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
17 *
18 * Carnegie Mellon requests users of this software to return to
19 *
20 *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
21 *  School of Computer Science
22 *  Carnegie Mellon University
23 *  Pittsburgh PA 15213-3890
24 *
25 * any improvements or extensions that they make and grant Carnegie the
26 * rights to redistribute these changes.
27 */
28
29/*
30 * rf_dagdegrd.c
31 *
32 * code for creating degraded read DAGs
33 */
34
35#include "rf_types.h"
36#include "rf_raid.h"
37#include "rf_dag.h"
38#include "rf_dagutils.h"
39#include "rf_dagfuncs.h"
40#include "rf_threadid.h"
41#include "rf_debugMem.h"
42#include "rf_memchunk.h"
43#include "rf_general.h"
44#include "rf_dagdegrd.h"
45
46
47/******************************************************************************
48 *
49 * General comments on DAG creation:
50 *
51 * All DAGs in this file use roll-away error recovery.  Each DAG has a single
52 * commit node, usually called "Cmt."  If an error occurs before the Cmt node
53 * is reached, the execution engine will halt forward execution and work
54 * backward through the graph, executing the undo functions.  Assuming that
55 * each node in the graph prior to the Cmt node are undoable and atomic - or -
56 * does not make changes to permanent state, the graph will fail atomically.
57 * If an error occurs after the Cmt node executes, the engine will roll-forward
58 * through the graph, blindly executing nodes until it reaches the end.
59 * If a graph reaches the end, it is assumed to have completed successfully.
60 *
61 * A graph has only 1 Cmt node.
62 *
63 */
64
65
66/******************************************************************************
67 *
68 * The following wrappers map the standard DAG creation interface to the
69 * DAG creation routines.  Additionally, these wrappers enable experimentation
70 * with new DAG structures by providing an extra level of indirection, allowing
71 * the DAG creation routines to be replaced at this single point.
72 */
73
74void
75rf_CreateRaidFiveDegradedReadDAG(
76    RF_Raid_t * raidPtr,
77    RF_AccessStripeMap_t * asmap,
78    RF_DagHeader_t * dag_h,
79    void *bp,
80    RF_RaidAccessFlags_t flags,
81    RF_AllocListElem_t * allocList)
82{
83	rf_CreateDegradedReadDAG(raidPtr, asmap, dag_h, bp, flags, allocList,
84	    &rf_xorRecoveryFuncs);
85}
86
87
88/******************************************************************************
89 *
90 * DAG creation code begins here
91 */
92
93
94/******************************************************************************
95 * Create a degraded read DAG for RAID level 1
96 *
97 * Hdr -> Nil -> R(p/s)d -> Commit -> Trm
98 *
99 * The "Rd" node reads data from the surviving disk in the mirror pair
100 *   Rpd - read of primary copy
101 *   Rsd - read of secondary copy
102 *
103 * Parameters:  raidPtr   - description of the physical array
104 *              asmap     - logical & physical addresses for this access
105 *              bp        - buffer ptr (for holding write data)
106 *              flags     - general flags (e.g. disk locking)
107 *              allocList - list of memory allocated in DAG creation
108 *****************************************************************************/
109
110void
111rf_CreateRaidOneDegradedReadDAG(
112    RF_Raid_t * raidPtr,
113    RF_AccessStripeMap_t * asmap,
114    RF_DagHeader_t * dag_h,
115    void *bp,
116    RF_RaidAccessFlags_t flags,
117    RF_AllocListElem_t * allocList)
118{
119	RF_DagNode_t *nodes, *rdNode, *blockNode, *commitNode, *termNode;
120	RF_StripeNum_t parityStripeID;
121	RF_ReconUnitNum_t which_ru;
122	RF_PhysDiskAddr_t *pda;
123	int     useMirror, i;
124
125	useMirror = 0;
126	parityStripeID = rf_RaidAddressToParityStripeID(&(raidPtr->Layout),
127	    asmap->raidAddress, &which_ru);
128	if (rf_dagDebug) {
129		printf("[Creating RAID level 1 degraded read DAG]\n");
130	}
131	dag_h->creator = "RaidOneDegradedReadDAG";
132	/* alloc the Wnd nodes and the Wmir node */
133	if (asmap->numDataFailed == 0)
134		useMirror = RF_FALSE;
135	else
136		useMirror = RF_TRUE;
137
138	/* total number of nodes = 1 + (block + commit + terminator) */
139	RF_CallocAndAdd(nodes, 4, sizeof(RF_DagNode_t), (RF_DagNode_t *), allocList);
140	i = 0;
141	rdNode = &nodes[i];
142	i++;
143	blockNode = &nodes[i];
144	i++;
145	commitNode = &nodes[i];
146	i++;
147	termNode = &nodes[i];
148	i++;
149
150	/* this dag can not commit until the commit node is reached.   errors
151	 * prior to the commit point imply the dag has failed and must be
152	 * retried */
153	dag_h->numCommitNodes = 1;
154	dag_h->numCommits = 0;
155	dag_h->numSuccedents = 1;
156
157	/* initialize the block, commit, and terminator nodes */
158	rf_InitNode(blockNode, rf_wait, RF_FALSE, rf_NullNodeFunc, rf_NullNodeUndoFunc,
159	    NULL, 1, 0, 0, 0, dag_h, "Nil", allocList);
160	rf_InitNode(commitNode, rf_wait, RF_TRUE, rf_NullNodeFunc, rf_NullNodeUndoFunc,
161	    NULL, 1, 1, 0, 0, dag_h, "Cmt", allocList);
162	rf_InitNode(termNode, rf_wait, RF_FALSE, rf_TerminateFunc, rf_TerminateUndoFunc,
163	    NULL, 0, 1, 0, 0, dag_h, "Trm", allocList);
164
165	pda = asmap->physInfo;
166	RF_ASSERT(pda != NULL);
167	/* parityInfo must describe entire parity unit */
168	RF_ASSERT(asmap->parityInfo->next == NULL);
169
170	/* initialize the data node */
171	if (!useMirror) {
172		/* read primary copy of data */
173		rf_InitNode(rdNode, rf_wait, RF_FALSE, rf_DiskReadFunc, rf_DiskReadUndoFunc,
174		    rf_GenericWakeupFunc, 1, 1, 4, 0, dag_h, "Rpd", allocList);
175		rdNode->params[0].p = pda;
176		rdNode->params[1].p = pda->bufPtr;
177		rdNode->params[2].v = parityStripeID;
178		rdNode->params[3].v = RF_CREATE_PARAM3(RF_IO_NORMAL_PRIORITY, 0, 0, which_ru);
179	} else {
180		/* read secondary copy of data */
181		rf_InitNode(rdNode, rf_wait, RF_FALSE, rf_DiskReadFunc, rf_DiskReadUndoFunc,
182		    rf_GenericWakeupFunc, 1, 1, 4, 0, dag_h, "Rsd", allocList);
183		rdNode->params[0].p = asmap->parityInfo;
184		rdNode->params[1].p = pda->bufPtr;
185		rdNode->params[2].v = parityStripeID;
186		rdNode->params[3].v = RF_CREATE_PARAM3(RF_IO_NORMAL_PRIORITY, 0, 0, which_ru);
187	}
188
189	/* connect header to block node */
190	RF_ASSERT(dag_h->numSuccedents == 1);
191	RF_ASSERT(blockNode->numAntecedents == 0);
192	dag_h->succedents[0] = blockNode;
193
194	/* connect block node to rdnode */
195	RF_ASSERT(blockNode->numSuccedents == 1);
196	RF_ASSERT(rdNode->numAntecedents == 1);
197	blockNode->succedents[0] = rdNode;
198	rdNode->antecedents[0] = blockNode;
199	rdNode->antType[0] = rf_control;
200
201	/* connect rdnode to commit node */
202	RF_ASSERT(rdNode->numSuccedents == 1);
203	RF_ASSERT(commitNode->numAntecedents == 1);
204	rdNode->succedents[0] = commitNode;
205	commitNode->antecedents[0] = rdNode;
206	commitNode->antType[0] = rf_control;
207
208	/* connect commit node to terminator */
209	RF_ASSERT(commitNode->numSuccedents == 1);
210	RF_ASSERT(termNode->numAntecedents == 1);
211	RF_ASSERT(termNode->numSuccedents == 0);
212	commitNode->succedents[0] = termNode;
213	termNode->antecedents[0] = commitNode;
214	termNode->antType[0] = rf_control;
215}
216
217
218
219/******************************************************************************
220 *
221 * creates a DAG to perform a degraded-mode read of data within one stripe.
222 * This DAG is as follows:
223 *
224 * Hdr -> Block -> Rud -> Xor -> Cmt -> T
225 *              -> Rrd ->
226 *              -> Rp -->
227 *
228 * Each R node is a successor of the L node
229 * One successor arc from each R node goes to C, and the other to X
230 * There is one Rud for each chunk of surviving user data requested by the
231 * user, and one Rrd for each chunk of surviving user data _not_ being read by
232 * the user
233 * R = read, ud = user data, rd = recovery (surviving) data, p = parity
234 * X = XOR, C = Commit, T = terminate
235 *
236 * The block node guarantees a single source node.
237 *
238 * Note:  The target buffer for the XOR node is set to the actual user buffer
239 * where the failed data is supposed to end up.  This buffer is zero'd by the
240 * code here.  Thus, if you create a degraded read dag, use it, and then
241 * re-use, you have to be sure to zero the target buffer prior to the re-use.
242 *
243 * The recfunc argument at the end specifies the name and function used for
244 * the redundancy
245 * recovery function.
246 *
247 *****************************************************************************/
248
249void
250rf_CreateDegradedReadDAG(
251    RF_Raid_t * raidPtr,
252    RF_AccessStripeMap_t * asmap,
253    RF_DagHeader_t * dag_h,
254    void *bp,
255    RF_RaidAccessFlags_t flags,
256    RF_AllocListElem_t * allocList,
257    RF_RedFuncs_t * recFunc)
258{
259	RF_DagNode_t *nodes, *rudNodes, *rrdNodes, *xorNode, *blockNode;
260	RF_DagNode_t *commitNode, *rpNode, *termNode;
261	int     nNodes, nRrdNodes, nRudNodes, nXorBufs, i;
262	int     j, paramNum;
263	RF_SectorCount_t sectorsPerSU;
264	RF_ReconUnitNum_t which_ru;
265	char   *overlappingPDAs;/* a temporary array of flags */
266	RF_AccessStripeMapHeader_t *new_asm_h[2];
267	RF_PhysDiskAddr_t *pda, *parityPDA;
268	RF_StripeNum_t parityStripeID;
269	RF_PhysDiskAddr_t *failedPDA;
270	RF_RaidLayout_t *layoutPtr;
271	char   *rpBuf;
272
273	layoutPtr = &(raidPtr->Layout);
274	/* failedPDA points to the pda within the asm that targets the failed
275	 * disk */
276	failedPDA = asmap->failedPDAs[0];
277	parityStripeID = rf_RaidAddressToParityStripeID(layoutPtr,
278	    asmap->raidAddress, &which_ru);
279	sectorsPerSU = layoutPtr->sectorsPerStripeUnit;
280
281	if (rf_dagDebug) {
282		printf("[Creating degraded read DAG]\n");
283	}
284	RF_ASSERT(asmap->numDataFailed == 1);
285	dag_h->creator = "DegradedReadDAG";
286
287	/*
288         * generate two ASMs identifying the surviving data we need
289         * in order to recover the lost data
290         */
291
292	/* overlappingPDAs array must be zero'd */
293	RF_Calloc(overlappingPDAs, asmap->numStripeUnitsAccessed, sizeof(char), (char *));
294	rf_GenerateFailedAccessASMs(raidPtr, asmap, failedPDA, dag_h, new_asm_h, &nXorBufs,
295	    &rpBuf, overlappingPDAs, allocList);
296
297	/*
298         * create all the nodes at once
299         *
300         * -1 because no access is generated for the failed pda
301         */
302	nRudNodes = asmap->numStripeUnitsAccessed - 1;
303	nRrdNodes = ((new_asm_h[0]) ? new_asm_h[0]->stripeMap->numStripeUnitsAccessed : 0) +
304	    ((new_asm_h[1]) ? new_asm_h[1]->stripeMap->numStripeUnitsAccessed : 0);
305	nNodes = 5 + nRudNodes + nRrdNodes;	/* lock, unlock, xor, Rp, Rud,
306						 * Rrd */
307	RF_CallocAndAdd(nodes, nNodes, sizeof(RF_DagNode_t), (RF_DagNode_t *),
308	    allocList);
309	i = 0;
310	blockNode = &nodes[i];
311	i++;
312	commitNode = &nodes[i];
313	i++;
314	xorNode = &nodes[i];
315	i++;
316	rpNode = &nodes[i];
317	i++;
318	termNode = &nodes[i];
319	i++;
320	rudNodes = &nodes[i];
321	i += nRudNodes;
322	rrdNodes = &nodes[i];
323	i += nRrdNodes;
324	RF_ASSERT(i == nNodes);
325
326	/* initialize nodes */
327	dag_h->numCommitNodes = 1;
328	dag_h->numCommits = 0;
329	/* this dag can not commit until the commit node is reached errors
330	 * prior to the commit point imply the dag has failed */
331	dag_h->numSuccedents = 1;
332
333	rf_InitNode(blockNode, rf_wait, RF_FALSE, rf_NullNodeFunc, rf_NullNodeUndoFunc,
334	    NULL, nRudNodes + nRrdNodes + 1, 0, 0, 0, dag_h, "Nil", allocList);
335	rf_InitNode(commitNode, rf_wait, RF_TRUE, rf_NullNodeFunc, rf_NullNodeUndoFunc,
336	    NULL, 1, 1, 0, 0, dag_h, "Cmt", allocList);
337	rf_InitNode(termNode, rf_wait, RF_FALSE, rf_TerminateFunc, rf_TerminateUndoFunc,
338	    NULL, 0, 1, 0, 0, dag_h, "Trm", allocList);
339	rf_InitNode(xorNode, rf_wait, RF_FALSE, recFunc->simple, rf_NullNodeUndoFunc,
340	    NULL, 1, nRudNodes + nRrdNodes + 1, 2 * nXorBufs + 2, 1, dag_h,
341	    recFunc->SimpleName, allocList);
342
343	/* fill in the Rud nodes */
344	for (pda = asmap->physInfo, i = 0; i < nRudNodes; i++, pda = pda->next) {
345		if (pda == failedPDA) {
346			i--;
347			continue;
348		}
349		rf_InitNode(&rudNodes[i], rf_wait, RF_FALSE, rf_DiskReadFunc,
350		    rf_DiskReadUndoFunc, rf_GenericWakeupFunc, 1, 1, 4, 0, dag_h,
351		    "Rud", allocList);
352		RF_ASSERT(pda);
353		rudNodes[i].params[0].p = pda;
354		rudNodes[i].params[1].p = pda->bufPtr;
355		rudNodes[i].params[2].v = parityStripeID;
356		rudNodes[i].params[3].v = RF_CREATE_PARAM3(RF_IO_NORMAL_PRIORITY, 0, 0, which_ru);
357	}
358
359	/* fill in the Rrd nodes */
360	i = 0;
361	if (new_asm_h[0]) {
362		for (pda = new_asm_h[0]->stripeMap->physInfo;
363		    i < new_asm_h[0]->stripeMap->numStripeUnitsAccessed;
364		    i++, pda = pda->next) {
365			rf_InitNode(&rrdNodes[i], rf_wait, RF_FALSE, rf_DiskReadFunc,
366			    rf_DiskReadUndoFunc, rf_GenericWakeupFunc, 1, 1, 4, 0,
367			    dag_h, "Rrd", allocList);
368			RF_ASSERT(pda);
369			rrdNodes[i].params[0].p = pda;
370			rrdNodes[i].params[1].p = pda->bufPtr;
371			rrdNodes[i].params[2].v = parityStripeID;
372			rrdNodes[i].params[3].v = RF_CREATE_PARAM3(RF_IO_NORMAL_PRIORITY, 0, 0, which_ru);
373		}
374	}
375	if (new_asm_h[1]) {
376		for (j = 0, pda = new_asm_h[1]->stripeMap->physInfo;
377		    j < new_asm_h[1]->stripeMap->numStripeUnitsAccessed;
378		    j++, pda = pda->next) {
379			rf_InitNode(&rrdNodes[i + j], rf_wait, RF_FALSE, rf_DiskReadFunc,
380			    rf_DiskReadUndoFunc, rf_GenericWakeupFunc, 1, 1, 4, 0,
381			    dag_h, "Rrd", allocList);
382			RF_ASSERT(pda);
383			rrdNodes[i + j].params[0].p = pda;
384			rrdNodes[i + j].params[1].p = pda->bufPtr;
385			rrdNodes[i + j].params[2].v = parityStripeID;
386			rrdNodes[i + j].params[3].v = RF_CREATE_PARAM3(RF_IO_NORMAL_PRIORITY, 0, 0, which_ru);
387		}
388	}
389	/* make a PDA for the parity unit */
390	RF_MallocAndAdd(parityPDA, sizeof(RF_PhysDiskAddr_t), (RF_PhysDiskAddr_t *), allocList);
391	parityPDA->row = asmap->parityInfo->row;
392	parityPDA->col = asmap->parityInfo->col;
393	parityPDA->startSector = ((asmap->parityInfo->startSector / sectorsPerSU)
394	    * sectorsPerSU) + (failedPDA->startSector % sectorsPerSU);
395	parityPDA->numSector = failedPDA->numSector;
396
397	/* initialize the Rp node */
398	rf_InitNode(rpNode, rf_wait, RF_FALSE, rf_DiskReadFunc, rf_DiskReadUndoFunc,
399	    rf_GenericWakeupFunc, 1, 1, 4, 0, dag_h, "Rp ", allocList);
400	rpNode->params[0].p = parityPDA;
401	rpNode->params[1].p = rpBuf;
402	rpNode->params[2].v = parityStripeID;
403	rpNode->params[3].v = RF_CREATE_PARAM3(RF_IO_NORMAL_PRIORITY, 0, 0, which_ru);
404
405	/*
406         * the last and nastiest step is to assign all
407         * the parameters of the Xor node
408         */
409	paramNum = 0;
410	for (i = 0; i < nRrdNodes; i++) {
411		/* all the Rrd nodes need to be xored together */
412		xorNode->params[paramNum++] = rrdNodes[i].params[0];
413		xorNode->params[paramNum++] = rrdNodes[i].params[1];
414	}
415	for (i = 0; i < nRudNodes; i++) {
416		/* any Rud nodes that overlap the failed access need to be
417		 * xored in */
418		if (overlappingPDAs[i]) {
419			RF_MallocAndAdd(pda, sizeof(RF_PhysDiskAddr_t), (RF_PhysDiskAddr_t *), allocList);
420			bcopy((char *) rudNodes[i].params[0].p, (char *) pda, sizeof(RF_PhysDiskAddr_t));
421			rf_RangeRestrictPDA(raidPtr, failedPDA, pda, RF_RESTRICT_DOBUFFER, 0);
422			xorNode->params[paramNum++].p = pda;
423			xorNode->params[paramNum++].p = pda->bufPtr;
424		}
425	}
426	RF_Free(overlappingPDAs, asmap->numStripeUnitsAccessed * sizeof(char));
427
428	/* install parity pda as last set of params to be xor'd */
429	xorNode->params[paramNum++].p = parityPDA;
430	xorNode->params[paramNum++].p = rpBuf;
431
432	/*
433         * the last 2 params to the recovery xor node are
434         * the failed PDA and the raidPtr
435         */
436	xorNode->params[paramNum++].p = failedPDA;
437	xorNode->params[paramNum++].p = raidPtr;
438	RF_ASSERT(paramNum == 2 * nXorBufs + 2);
439
440	/*
441         * The xor node uses results[0] as the target buffer.
442         * Set pointer and zero the buffer. In the kernel, this
443         * may be a user buffer in which case we have to remap it.
444         */
445	xorNode->results[0] = failedPDA->bufPtr;
446	RF_BZERO(bp, failedPDA->bufPtr, rf_RaidAddressToByte(raidPtr,
447		failedPDA->numSector));
448
449	/* connect nodes to form graph */
450	/* connect the header to the block node */
451	RF_ASSERT(dag_h->numSuccedents == 1);
452	RF_ASSERT(blockNode->numAntecedents == 0);
453	dag_h->succedents[0] = blockNode;
454
455	/* connect the block node to the read nodes */
456	RF_ASSERT(blockNode->numSuccedents == (1 + nRrdNodes + nRudNodes));
457	RF_ASSERT(rpNode->numAntecedents == 1);
458	blockNode->succedents[0] = rpNode;
459	rpNode->antecedents[0] = blockNode;
460	rpNode->antType[0] = rf_control;
461	for (i = 0; i < nRrdNodes; i++) {
462		RF_ASSERT(rrdNodes[i].numSuccedents == 1);
463		blockNode->succedents[1 + i] = &rrdNodes[i];
464		rrdNodes[i].antecedents[0] = blockNode;
465		rrdNodes[i].antType[0] = rf_control;
466	}
467	for (i = 0; i < nRudNodes; i++) {
468		RF_ASSERT(rudNodes[i].numSuccedents == 1);
469		blockNode->succedents[1 + nRrdNodes + i] = &rudNodes[i];
470		rudNodes[i].antecedents[0] = blockNode;
471		rudNodes[i].antType[0] = rf_control;
472	}
473
474	/* connect the read nodes to the xor node */
475	RF_ASSERT(xorNode->numAntecedents == (1 + nRrdNodes + nRudNodes));
476	RF_ASSERT(rpNode->numSuccedents == 1);
477	rpNode->succedents[0] = xorNode;
478	xorNode->antecedents[0] = rpNode;
479	xorNode->antType[0] = rf_trueData;
480	for (i = 0; i < nRrdNodes; i++) {
481		RF_ASSERT(rrdNodes[i].numSuccedents == 1);
482		rrdNodes[i].succedents[0] = xorNode;
483		xorNode->antecedents[1 + i] = &rrdNodes[i];
484		xorNode->antType[1 + i] = rf_trueData;
485	}
486	for (i = 0; i < nRudNodes; i++) {
487		RF_ASSERT(rudNodes[i].numSuccedents == 1);
488		rudNodes[i].succedents[0] = xorNode;
489		xorNode->antecedents[1 + nRrdNodes + i] = &rudNodes[i];
490		xorNode->antType[1 + nRrdNodes + i] = rf_trueData;
491	}
492
493	/* connect the xor node to the commit node */
494	RF_ASSERT(xorNode->numSuccedents == 1);
495	RF_ASSERT(commitNode->numAntecedents == 1);
496	xorNode->succedents[0] = commitNode;
497	commitNode->antecedents[0] = xorNode;
498	commitNode->antType[0] = rf_control;
499
500	/* connect the termNode to the commit node */
501	RF_ASSERT(commitNode->numSuccedents == 1);
502	RF_ASSERT(termNode->numAntecedents == 1);
503	RF_ASSERT(termNode->numSuccedents == 0);
504	commitNode->succedents[0] = termNode;
505	termNode->antType[0] = rf_control;
506	termNode->antecedents[0] = commitNode;
507}
508
509
510/******************************************************************************
511 * Create a degraded read DAG for Chained Declustering
512 *
513 * Hdr -> Nil -> R(p/s)d -> Cmt -> Trm
514 *
515 * The "Rd" node reads data from the surviving disk in the mirror pair
516 *   Rpd - read of primary copy
517 *   Rsd - read of secondary copy
518 *
519 * Parameters:  raidPtr   - description of the physical array
520 *              asmap     - logical & physical addresses for this access
521 *              bp        - buffer ptr (for holding write data)
522 *              flags     - general flags (e.g. disk locking)
523 *              allocList - list of memory allocated in DAG creation
524 *****************************************************************************/
525
526void
527rf_CreateRaidCDegradedReadDAG(
528    RF_Raid_t * raidPtr,
529    RF_AccessStripeMap_t * asmap,
530    RF_DagHeader_t * dag_h,
531    void *bp,
532    RF_RaidAccessFlags_t flags,
533    RF_AllocListElem_t * allocList)
534{
535	RF_DagNode_t *nodes, *rdNode, *blockNode, *commitNode, *termNode;
536	RF_StripeNum_t parityStripeID;
537	int     useMirror, i, shiftable;
538	RF_ReconUnitNum_t which_ru;
539	RF_PhysDiskAddr_t *pda;
540
541	if ((asmap->numDataFailed + asmap->numParityFailed) == 0) {
542		shiftable = RF_TRUE;
543	} else {
544		shiftable = RF_FALSE;
545	}
546	useMirror = 0;
547	parityStripeID = rf_RaidAddressToParityStripeID(&(raidPtr->Layout),
548	    asmap->raidAddress, &which_ru);
549
550	if (rf_dagDebug) {
551		printf("[Creating RAID C degraded read DAG]\n");
552	}
553	dag_h->creator = "RaidCDegradedReadDAG";
554	/* alloc the Wnd nodes and the Wmir node */
555	if (asmap->numDataFailed == 0)
556		useMirror = RF_FALSE;
557	else
558		useMirror = RF_TRUE;
559
560	/* total number of nodes = 1 + (block + commit + terminator) */
561	RF_CallocAndAdd(nodes, 4, sizeof(RF_DagNode_t), (RF_DagNode_t *), allocList);
562	i = 0;
563	rdNode = &nodes[i];
564	i++;
565	blockNode = &nodes[i];
566	i++;
567	commitNode = &nodes[i];
568	i++;
569	termNode = &nodes[i];
570	i++;
571
572	/*
573         * This dag can not commit until the commit node is reached.
574         * Errors prior to the commit point imply the dag has failed
575         * and must be retried.
576         */
577	dag_h->numCommitNodes = 1;
578	dag_h->numCommits = 0;
579	dag_h->numSuccedents = 1;
580
581	/* initialize the block, commit, and terminator nodes */
582	rf_InitNode(blockNode, rf_wait, RF_FALSE, rf_NullNodeFunc, rf_NullNodeUndoFunc,
583	    NULL, 1, 0, 0, 0, dag_h, "Nil", allocList);
584	rf_InitNode(commitNode, rf_wait, RF_TRUE, rf_NullNodeFunc, rf_NullNodeUndoFunc,
585	    NULL, 1, 1, 0, 0, dag_h, "Cmt", allocList);
586	rf_InitNode(termNode, rf_wait, RF_FALSE, rf_TerminateFunc, rf_TerminateUndoFunc,
587	    NULL, 0, 1, 0, 0, dag_h, "Trm", allocList);
588
589	pda = asmap->physInfo;
590	RF_ASSERT(pda != NULL);
591	/* parityInfo must describe entire parity unit */
592	RF_ASSERT(asmap->parityInfo->next == NULL);
593
594	/* initialize the data node */
595	if (!useMirror) {
596		rf_InitNode(rdNode, rf_wait, RF_FALSE, rf_DiskReadFunc, rf_DiskReadUndoFunc,
597		    rf_GenericWakeupFunc, 1, 1, 4, 0, dag_h, "Rpd", allocList);
598		if (shiftable && rf_compute_workload_shift(raidPtr, pda)) {
599			/* shift this read to the next disk in line */
600			rdNode->params[0].p = asmap->parityInfo;
601			rdNode->params[1].p = pda->bufPtr;
602			rdNode->params[2].v = parityStripeID;
603			rdNode->params[3].v = RF_CREATE_PARAM3(RF_IO_NORMAL_PRIORITY, 0, 0, which_ru);
604		} else {
605			/* read primary copy */
606			rdNode->params[0].p = pda;
607			rdNode->params[1].p = pda->bufPtr;
608			rdNode->params[2].v = parityStripeID;
609			rdNode->params[3].v = RF_CREATE_PARAM3(RF_IO_NORMAL_PRIORITY, 0, 0, which_ru);
610		}
611	} else {
612		/* read secondary copy of data */
613		rf_InitNode(rdNode, rf_wait, RF_FALSE, rf_DiskReadFunc, rf_DiskReadUndoFunc,
614		    rf_GenericWakeupFunc, 1, 1, 4, 0, dag_h, "Rsd", allocList);
615		rdNode->params[0].p = asmap->parityInfo;
616		rdNode->params[1].p = pda->bufPtr;
617		rdNode->params[2].v = parityStripeID;
618		rdNode->params[3].v = RF_CREATE_PARAM3(RF_IO_NORMAL_PRIORITY, 0, 0, which_ru);
619	}
620
621	/* connect header to block node */
622	RF_ASSERT(dag_h->numSuccedents == 1);
623	RF_ASSERT(blockNode->numAntecedents == 0);
624	dag_h->succedents[0] = blockNode;
625
626	/* connect block node to rdnode */
627	RF_ASSERT(blockNode->numSuccedents == 1);
628	RF_ASSERT(rdNode->numAntecedents == 1);
629	blockNode->succedents[0] = rdNode;
630	rdNode->antecedents[0] = blockNode;
631	rdNode->antType[0] = rf_control;
632
633	/* connect rdnode to commit node */
634	RF_ASSERT(rdNode->numSuccedents == 1);
635	RF_ASSERT(commitNode->numAntecedents == 1);
636	rdNode->succedents[0] = commitNode;
637	commitNode->antecedents[0] = rdNode;
638	commitNode->antType[0] = rf_control;
639
640	/* connect commit node to terminator */
641	RF_ASSERT(commitNode->numSuccedents == 1);
642	RF_ASSERT(termNode->numAntecedents == 1);
643	RF_ASSERT(termNode->numSuccedents == 0);
644	commitNode->succedents[0] = termNode;
645	termNode->antecedents[0] = commitNode;
646	termNode->antType[0] = rf_control;
647}
648/*
649 * XXX move this elsewhere?
650 */
651void
652rf_DD_GenerateFailedAccessASMs(
653    RF_Raid_t * raidPtr,
654    RF_AccessStripeMap_t * asmap,
655    RF_PhysDiskAddr_t ** pdap,
656    int *nNodep,
657    RF_PhysDiskAddr_t ** pqpdap,
658    int *nPQNodep,
659    RF_AllocListElem_t * allocList)
660{
661	RF_RaidLayout_t *layoutPtr = &(raidPtr->Layout);
662	int     PDAPerDisk, i;
663	RF_SectorCount_t secPerSU = layoutPtr->sectorsPerStripeUnit;
664	int     numDataCol = layoutPtr->numDataCol;
665	int     state;
666	RF_SectorNum_t suoff, suend;
667	unsigned firstDataCol, napdas, count;
668	RF_SectorNum_t fone_start, fone_end, ftwo_start = 0, ftwo_end = 0;
669	RF_PhysDiskAddr_t *fone = asmap->failedPDAs[0], *ftwo = asmap->failedPDAs[1];
670	RF_PhysDiskAddr_t *pda_p;
671	RF_PhysDiskAddr_t *phys_p;
672	RF_RaidAddr_t sosAddr;
673
674	/* determine how many pda's we will have to generate per unaccess
675	 * stripe. If there is only one failed data unit, it is one; if two,
676	 * possibly two, depending wether they overlap. */
677
678	fone_start = rf_StripeUnitOffset(layoutPtr, fone->startSector);
679	fone_end = fone_start + fone->numSector;
680
681#define CONS_PDA(if,start,num) \
682  pda_p->row = asmap->if->row;    pda_p->col = asmap->if->col; \
683  pda_p->startSector = ((asmap->if->startSector / secPerSU) * secPerSU) + start; \
684  pda_p->numSector = num; \
685  pda_p->next = NULL; \
686  RF_MallocAndAdd(pda_p->bufPtr,rf_RaidAddressToByte(raidPtr,num),(char *), allocList)
687
688	if (asmap->numDataFailed == 1) {
689		PDAPerDisk = 1;
690		state = 1;
691		RF_MallocAndAdd(*pqpdap, 2 * sizeof(RF_PhysDiskAddr_t), (RF_PhysDiskAddr_t *), allocList);
692		pda_p = *pqpdap;
693		/* build p */
694		CONS_PDA(parityInfo, fone_start, fone->numSector);
695		pda_p->type = RF_PDA_TYPE_PARITY;
696		pda_p++;
697		/* build q */
698		CONS_PDA(qInfo, fone_start, fone->numSector);
699		pda_p->type = RF_PDA_TYPE_Q;
700	} else {
701		ftwo_start = rf_StripeUnitOffset(layoutPtr, ftwo->startSector);
702		ftwo_end = ftwo_start + ftwo->numSector;
703		if (fone->numSector + ftwo->numSector > secPerSU) {
704			PDAPerDisk = 1;
705			state = 2;
706			RF_MallocAndAdd(*pqpdap, 2 * sizeof(RF_PhysDiskAddr_t), (RF_PhysDiskAddr_t *), allocList);
707			pda_p = *pqpdap;
708			CONS_PDA(parityInfo, 0, secPerSU);
709			pda_p->type = RF_PDA_TYPE_PARITY;
710			pda_p++;
711			CONS_PDA(qInfo, 0, secPerSU);
712			pda_p->type = RF_PDA_TYPE_Q;
713		} else {
714			PDAPerDisk = 2;
715			state = 3;
716			/* four of them, fone, then ftwo */
717			RF_MallocAndAdd(*pqpdap, 4 * sizeof(RF_PhysDiskAddr_t), (RF_PhysDiskAddr_t *), allocList);
718			pda_p = *pqpdap;
719			CONS_PDA(parityInfo, fone_start, fone->numSector);
720			pda_p->type = RF_PDA_TYPE_PARITY;
721			pda_p++;
722			CONS_PDA(qInfo, fone_start, fone->numSector);
723			pda_p->type = RF_PDA_TYPE_Q;
724			pda_p++;
725			CONS_PDA(parityInfo, ftwo_start, ftwo->numSector);
726			pda_p->type = RF_PDA_TYPE_PARITY;
727			pda_p++;
728			CONS_PDA(qInfo, ftwo_start, ftwo->numSector);
729			pda_p->type = RF_PDA_TYPE_Q;
730		}
731	}
732	/* figure out number of nonaccessed pda */
733	napdas = PDAPerDisk * (numDataCol - asmap->numStripeUnitsAccessed - (ftwo == NULL ? 1 : 0));
734	*nPQNodep = PDAPerDisk;
735
736	/* sweep over the over accessed pda's, figuring out the number of
737	 * additional pda's to generate. Of course, skip the failed ones */
738
739	count = 0;
740	for (pda_p = asmap->physInfo; pda_p; pda_p = pda_p->next) {
741		if ((pda_p == fone) || (pda_p == ftwo))
742			continue;
743		suoff = rf_StripeUnitOffset(layoutPtr, pda_p->startSector);
744		suend = suoff + pda_p->numSector;
745		switch (state) {
746		case 1:	/* one failed PDA to overlap */
747			/* if a PDA doesn't contain the failed unit, it can
748			 * only miss the start or end, not both */
749			if ((suoff > fone_start) || (suend < fone_end))
750				count++;
751			break;
752		case 2:	/* whole stripe */
753			if (suoff)	/* leak at begining */
754				count++;
755			if (suend < numDataCol)	/* leak at end */
756				count++;
757			break;
758		case 3:	/* two disjoint units */
759			if ((suoff > fone_start) || (suend < fone_end))
760				count++;
761			if ((suoff > ftwo_start) || (suend < ftwo_end))
762				count++;
763			break;
764		default:
765			RF_PANIC();
766		}
767	}
768
769	napdas += count;
770	*nNodep = napdas;
771	if (napdas == 0)
772		return;		/* short circuit */
773
774	/* allocate up our list of pda's */
775
776	RF_CallocAndAdd(pda_p, napdas, sizeof(RF_PhysDiskAddr_t), (RF_PhysDiskAddr_t *), allocList);
777	*pdap = pda_p;
778
779	/* linkem together */
780	for (i = 0; i < (napdas - 1); i++)
781		pda_p[i].next = pda_p + (i + 1);
782
783	/* march through the one's up to the first accessed disk */
784	firstDataCol = rf_RaidAddressToStripeUnitID(&(raidPtr->Layout), asmap->physInfo->raidAddress) % numDataCol;
785	sosAddr = rf_RaidAddressOfPrevStripeBoundary(layoutPtr, asmap->raidAddress);
786	for (i = 0; i < firstDataCol; i++) {
787		if ((pda_p - (*pdap)) == napdas)
788			continue;
789		pda_p->type = RF_PDA_TYPE_DATA;
790		pda_p->raidAddress = sosAddr + (i * secPerSU);
791		(raidPtr->Layout.map->MapSector) (raidPtr, pda_p->raidAddress, &(pda_p->row), &(pda_p->col), &(pda_p->startSector), 0);
792		/* skip over dead disks */
793		if (RF_DEAD_DISK(raidPtr->Disks[pda_p->row][pda_p->col].status))
794			continue;
795		switch (state) {
796		case 1:	/* fone */
797			pda_p->numSector = fone->numSector;
798			pda_p->raidAddress += fone_start;
799			pda_p->startSector += fone_start;
800			RF_MallocAndAdd(pda_p->bufPtr, rf_RaidAddressToByte(raidPtr, pda_p->numSector), (char *), allocList);
801			break;
802		case 2:	/* full stripe */
803			pda_p->numSector = secPerSU;
804			RF_MallocAndAdd(pda_p->bufPtr, rf_RaidAddressToByte(raidPtr, secPerSU), (char *), allocList);
805			break;
806		case 3:	/* two slabs */
807			pda_p->numSector = fone->numSector;
808			pda_p->raidAddress += fone_start;
809			pda_p->startSector += fone_start;
810			RF_MallocAndAdd(pda_p->bufPtr, rf_RaidAddressToByte(raidPtr, pda_p->numSector), (char *), allocList);
811			pda_p++;
812			pda_p->type = RF_PDA_TYPE_DATA;
813			pda_p->raidAddress = sosAddr + (i * secPerSU);
814			(raidPtr->Layout.map->MapSector) (raidPtr, pda_p->raidAddress, &(pda_p->row), &(pda_p->col), &(pda_p->startSector), 0);
815			pda_p->numSector = ftwo->numSector;
816			pda_p->raidAddress += ftwo_start;
817			pda_p->startSector += ftwo_start;
818			RF_MallocAndAdd(pda_p->bufPtr, rf_RaidAddressToByte(raidPtr, pda_p->numSector), (char *), allocList);
819			break;
820		default:
821			RF_PANIC();
822		}
823		pda_p++;
824	}
825
826	/* march through the touched stripe units */
827	for (phys_p = asmap->physInfo; phys_p; phys_p = phys_p->next, i++) {
828		if ((phys_p == asmap->failedPDAs[0]) || (phys_p == asmap->failedPDAs[1]))
829			continue;
830		suoff = rf_StripeUnitOffset(layoutPtr, phys_p->startSector);
831		suend = suoff + phys_p->numSector;
832		switch (state) {
833		case 1:	/* single buffer */
834			if (suoff > fone_start) {
835				RF_ASSERT(suend >= fone_end);
836				/* The data read starts after the mapped
837				 * access, snip off the begining */
838				pda_p->numSector = suoff - fone_start;
839				pda_p->raidAddress = sosAddr + (i * secPerSU) + fone_start;
840				(raidPtr->Layout.map->MapSector) (raidPtr, pda_p->raidAddress, &(pda_p->row), &(pda_p->col), &(pda_p->startSector), 0);
841				RF_MallocAndAdd(pda_p->bufPtr, rf_RaidAddressToByte(raidPtr, pda_p->numSector), (char *), allocList);
842				pda_p++;
843			}
844			if (suend < fone_end) {
845				RF_ASSERT(suoff <= fone_start);
846				/* The data read stops before the end of the
847				 * failed access, extend */
848				pda_p->numSector = fone_end - suend;
849				pda_p->raidAddress = sosAddr + (i * secPerSU) + suend;	/* off by one? */
850				(raidPtr->Layout.map->MapSector) (raidPtr, pda_p->raidAddress, &(pda_p->row), &(pda_p->col), &(pda_p->startSector), 0);
851				RF_MallocAndAdd(pda_p->bufPtr, rf_RaidAddressToByte(raidPtr, pda_p->numSector), (char *), allocList);
852				pda_p++;
853			}
854			break;
855		case 2:	/* whole stripe unit */
856			RF_ASSERT((suoff == 0) || (suend == secPerSU));
857			if (suend < secPerSU) {	/* short read, snip from end
858						 * on */
859				pda_p->numSector = secPerSU - suend;
860				pda_p->raidAddress = sosAddr + (i * secPerSU) + suend;	/* off by one? */
861				(raidPtr->Layout.map->MapSector) (raidPtr, pda_p->raidAddress, &(pda_p->row), &(pda_p->col), &(pda_p->startSector), 0);
862				RF_MallocAndAdd(pda_p->bufPtr, rf_RaidAddressToByte(raidPtr, pda_p->numSector), (char *), allocList);
863				pda_p++;
864			} else
865				if (suoff > 0) {	/* short at front */
866					pda_p->numSector = suoff;
867					pda_p->raidAddress = sosAddr + (i * secPerSU);
868					(raidPtr->Layout.map->MapSector) (raidPtr, pda_p->raidAddress, &(pda_p->row), &(pda_p->col), &(pda_p->startSector), 0);
869					RF_MallocAndAdd(pda_p->bufPtr, rf_RaidAddressToByte(raidPtr, pda_p->numSector), (char *), allocList);
870					pda_p++;
871				}
872			break;
873		case 3:	/* two nonoverlapping failures */
874			if ((suoff > fone_start) || (suend < fone_end)) {
875				if (suoff > fone_start) {
876					RF_ASSERT(suend >= fone_end);
877					/* The data read starts after the
878					 * mapped access, snip off the
879					 * begining */
880					pda_p->numSector = suoff - fone_start;
881					pda_p->raidAddress = sosAddr + (i * secPerSU) + fone_start;
882					(raidPtr->Layout.map->MapSector) (raidPtr, pda_p->raidAddress, &(pda_p->row), &(pda_p->col), &(pda_p->startSector), 0);
883					RF_MallocAndAdd(pda_p->bufPtr, rf_RaidAddressToByte(raidPtr, pda_p->numSector), (char *), allocList);
884					pda_p++;
885				}
886				if (suend < fone_end) {
887					RF_ASSERT(suoff <= fone_start);
888					/* The data read stops before the end
889					 * of the failed access, extend */
890					pda_p->numSector = fone_end - suend;
891					pda_p->raidAddress = sosAddr + (i * secPerSU) + suend;	/* off by one? */
892					(raidPtr->Layout.map->MapSector) (raidPtr, pda_p->raidAddress, &(pda_p->row), &(pda_p->col), &(pda_p->startSector), 0);
893					RF_MallocAndAdd(pda_p->bufPtr, rf_RaidAddressToByte(raidPtr, pda_p->numSector), (char *), allocList);
894					pda_p++;
895				}
896			}
897			if ((suoff > ftwo_start) || (suend < ftwo_end)) {
898				if (suoff > ftwo_start) {
899					RF_ASSERT(suend >= ftwo_end);
900					/* The data read starts after the
901					 * mapped access, snip off the
902					 * begining */
903					pda_p->numSector = suoff - ftwo_start;
904					pda_p->raidAddress = sosAddr + (i * secPerSU) + ftwo_start;
905					(raidPtr->Layout.map->MapSector) (raidPtr, pda_p->raidAddress, &(pda_p->row), &(pda_p->col), &(pda_p->startSector), 0);
906					RF_MallocAndAdd(pda_p->bufPtr, rf_RaidAddressToByte(raidPtr, pda_p->numSector), (char *), allocList);
907					pda_p++;
908				}
909				if (suend < ftwo_end) {
910					RF_ASSERT(suoff <= ftwo_start);
911					/* The data read stops before the end
912					 * of the failed access, extend */
913					pda_p->numSector = ftwo_end - suend;
914					pda_p->raidAddress = sosAddr + (i * secPerSU) + suend;	/* off by one? */
915					(raidPtr->Layout.map->MapSector) (raidPtr, pda_p->raidAddress, &(pda_p->row), &(pda_p->col), &(pda_p->startSector), 0);
916					RF_MallocAndAdd(pda_p->bufPtr, rf_RaidAddressToByte(raidPtr, pda_p->numSector), (char *), allocList);
917					pda_p++;
918				}
919			}
920			break;
921		default:
922			RF_PANIC();
923		}
924	}
925
926	/* after the last accessed disk */
927	for (; i < numDataCol; i++) {
928		if ((pda_p - (*pdap)) == napdas)
929			continue;
930		pda_p->type = RF_PDA_TYPE_DATA;
931		pda_p->raidAddress = sosAddr + (i * secPerSU);
932		(raidPtr->Layout.map->MapSector) (raidPtr, pda_p->raidAddress, &(pda_p->row), &(pda_p->col), &(pda_p->startSector), 0);
933		/* skip over dead disks */
934		if (RF_DEAD_DISK(raidPtr->Disks[pda_p->row][pda_p->col].status))
935			continue;
936		switch (state) {
937		case 1:	/* fone */
938			pda_p->numSector = fone->numSector;
939			pda_p->raidAddress += fone_start;
940			pda_p->startSector += fone_start;
941			RF_MallocAndAdd(pda_p->bufPtr, rf_RaidAddressToByte(raidPtr, pda_p->numSector), (char *), allocList);
942			break;
943		case 2:	/* full stripe */
944			pda_p->numSector = secPerSU;
945			RF_MallocAndAdd(pda_p->bufPtr, rf_RaidAddressToByte(raidPtr, secPerSU), (char *), allocList);
946			break;
947		case 3:	/* two slabs */
948			pda_p->numSector = fone->numSector;
949			pda_p->raidAddress += fone_start;
950			pda_p->startSector += fone_start;
951			RF_MallocAndAdd(pda_p->bufPtr, rf_RaidAddressToByte(raidPtr, pda_p->numSector), (char *), allocList);
952			pda_p++;
953			pda_p->type = RF_PDA_TYPE_DATA;
954			pda_p->raidAddress = sosAddr + (i * secPerSU);
955			(raidPtr->Layout.map->MapSector) (raidPtr, pda_p->raidAddress, &(pda_p->row), &(pda_p->col), &(pda_p->startSector), 0);
956			pda_p->numSector = ftwo->numSector;
957			pda_p->raidAddress += ftwo_start;
958			pda_p->startSector += ftwo_start;
959			RF_MallocAndAdd(pda_p->bufPtr, rf_RaidAddressToByte(raidPtr, pda_p->numSector), (char *), allocList);
960			break;
961		default:
962			RF_PANIC();
963		}
964		pda_p++;
965	}
966
967	RF_ASSERT(pda_p - *pdap == napdas);
968	return;
969}
970#define INIT_DISK_NODE(node,name) \
971rf_InitNode(node, rf_wait, RF_FALSE, rf_DiskReadFunc, rf_DiskReadUndoFunc, rf_GenericWakeupFunc, 2,1,4,0, dag_h, name, allocList); \
972(node)->succedents[0] = unblockNode; \
973(node)->succedents[1] = recoveryNode; \
974(node)->antecedents[0] = blockNode; \
975(node)->antType[0] = rf_control
976
977#define DISK_NODE_PARAMS(_node_,_p_) \
978  (_node_).params[0].p = _p_ ; \
979  (_node_).params[1].p = (_p_)->bufPtr; \
980  (_node_).params[2].v = parityStripeID; \
981  (_node_).params[3].v = RF_CREATE_PARAM3(RF_IO_NORMAL_PRIORITY, 0, 0, which_ru)
982
983void
984rf_DoubleDegRead(
985    RF_Raid_t * raidPtr,
986    RF_AccessStripeMap_t * asmap,
987    RF_DagHeader_t * dag_h,
988    void *bp,
989    RF_RaidAccessFlags_t flags,
990    RF_AllocListElem_t * allocList,
991    char *redundantReadNodeName,
992    char *recoveryNodeName,
993    int (*recovFunc) (RF_DagNode_t *))
994{
995	RF_RaidLayout_t *layoutPtr = &(raidPtr->Layout);
996	RF_DagNode_t *nodes, *rudNodes, *rrdNodes, *recoveryNode, *blockNode,
997	       *unblockNode, *rpNodes, *rqNodes, *termNode;
998	RF_PhysDiskAddr_t *pda, *pqPDAs;
999	RF_PhysDiskAddr_t *npdas;
1000	int     nNodes, nRrdNodes, nRudNodes, i;
1001	RF_ReconUnitNum_t which_ru;
1002	int     nReadNodes, nPQNodes;
1003	RF_PhysDiskAddr_t *failedPDA = asmap->failedPDAs[0];
1004	RF_PhysDiskAddr_t *failedPDAtwo = asmap->failedPDAs[1];
1005	RF_StripeNum_t parityStripeID = rf_RaidAddressToParityStripeID(layoutPtr, asmap->raidAddress, &which_ru);
1006
1007	if (rf_dagDebug)
1008		printf("[Creating Double Degraded Read DAG]\n");
1009	rf_DD_GenerateFailedAccessASMs(raidPtr, asmap, &npdas, &nRrdNodes, &pqPDAs, &nPQNodes, allocList);
1010
1011	nRudNodes = asmap->numStripeUnitsAccessed - (asmap->numDataFailed);
1012	nReadNodes = nRrdNodes + nRudNodes + 2 * nPQNodes;
1013	nNodes = 4 /* block, unblock, recovery, term */ + nReadNodes;
1014
1015	RF_CallocAndAdd(nodes, nNodes, sizeof(RF_DagNode_t), (RF_DagNode_t *), allocList);
1016	i = 0;
1017	blockNode = &nodes[i];
1018	i += 1;
1019	unblockNode = &nodes[i];
1020	i += 1;
1021	recoveryNode = &nodes[i];
1022	i += 1;
1023	termNode = &nodes[i];
1024	i += 1;
1025	rudNodes = &nodes[i];
1026	i += nRudNodes;
1027	rrdNodes = &nodes[i];
1028	i += nRrdNodes;
1029	rpNodes = &nodes[i];
1030	i += nPQNodes;
1031	rqNodes = &nodes[i];
1032	i += nPQNodes;
1033	RF_ASSERT(i == nNodes);
1034
1035	dag_h->numSuccedents = 1;
1036	dag_h->succedents[0] = blockNode;
1037	dag_h->creator = "DoubleDegRead";
1038	dag_h->numCommits = 0;
1039	dag_h->numCommitNodes = 1;	/* unblock */
1040
1041	rf_InitNode(termNode, rf_wait, RF_FALSE, rf_TerminateFunc, rf_TerminateUndoFunc, NULL, 0, 2, 0, 0, dag_h, "Trm", allocList);
1042	termNode->antecedents[0] = unblockNode;
1043	termNode->antType[0] = rf_control;
1044	termNode->antecedents[1] = recoveryNode;
1045	termNode->antType[1] = rf_control;
1046
1047	/* init the block and unblock nodes */
1048	/* The block node has all nodes except itself, unblock and recovery as
1049	 * successors. Similarly for predecessors of the unblock. */
1050	rf_InitNode(blockNode, rf_wait, RF_FALSE, rf_NullNodeFunc, rf_NullNodeUndoFunc, NULL, nReadNodes, 0, 0, 0, dag_h, "Nil", allocList);
1051	rf_InitNode(unblockNode, rf_wait, RF_TRUE, rf_NullNodeFunc, rf_NullNodeUndoFunc, NULL, 1, nReadNodes, 0, 0, dag_h, "Nil", allocList);
1052
1053	for (i = 0; i < nReadNodes; i++) {
1054		blockNode->succedents[i] = rudNodes + i;
1055		unblockNode->antecedents[i] = rudNodes + i;
1056		unblockNode->antType[i] = rf_control;
1057	}
1058	unblockNode->succedents[0] = termNode;
1059
1060	/* The recovery node has all the reads as predecessors, and the term
1061	 * node as successors. It gets a pda as a param from each of the read
1062	 * nodes plus the raidPtr. For each failed unit is has a result pda. */
1063	rf_InitNode(recoveryNode, rf_wait, RF_FALSE, recovFunc, rf_NullNodeUndoFunc, NULL,
1064	    1,			/* succesors */
1065	    nReadNodes,		/* preds */
1066	    nReadNodes + 2,	/* params */
1067	    asmap->numDataFailed,	/* results */
1068	    dag_h, recoveryNodeName, allocList);
1069
1070	recoveryNode->succedents[0] = termNode;
1071	for (i = 0; i < nReadNodes; i++) {
1072		recoveryNode->antecedents[i] = rudNodes + i;
1073		recoveryNode->antType[i] = rf_trueData;
1074	}
1075
1076	/* build the read nodes, then come back and fill in recovery params
1077	 * and results */
1078	pda = asmap->physInfo;
1079	for (i = 0; i < nRudNodes; pda = pda->next) {
1080		if ((pda == failedPDA) || (pda == failedPDAtwo))
1081			continue;
1082		INIT_DISK_NODE(rudNodes + i, "Rud");
1083		RF_ASSERT(pda);
1084		DISK_NODE_PARAMS(rudNodes[i], pda);
1085		i++;
1086	}
1087
1088	pda = npdas;
1089	for (i = 0; i < nRrdNodes; i++, pda = pda->next) {
1090		INIT_DISK_NODE(rrdNodes + i, "Rrd");
1091		RF_ASSERT(pda);
1092		DISK_NODE_PARAMS(rrdNodes[i], pda);
1093	}
1094
1095	/* redundancy pdas */
1096	pda = pqPDAs;
1097	INIT_DISK_NODE(rpNodes, "Rp");
1098	RF_ASSERT(pda);
1099	DISK_NODE_PARAMS(rpNodes[0], pda);
1100	pda++;
1101	INIT_DISK_NODE(rqNodes, redundantReadNodeName);
1102	RF_ASSERT(pda);
1103	DISK_NODE_PARAMS(rqNodes[0], pda);
1104	if (nPQNodes == 2) {
1105		pda++;
1106		INIT_DISK_NODE(rpNodes + 1, "Rp");
1107		RF_ASSERT(pda);
1108		DISK_NODE_PARAMS(rpNodes[1], pda);
1109		pda++;
1110		INIT_DISK_NODE(rqNodes + 1, redundantReadNodeName);
1111		RF_ASSERT(pda);
1112		DISK_NODE_PARAMS(rqNodes[1], pda);
1113	}
1114	/* fill in recovery node params */
1115	for (i = 0; i < nReadNodes; i++)
1116		recoveryNode->params[i] = rudNodes[i].params[0];	/* pda */
1117	recoveryNode->params[i++].p = (void *) raidPtr;
1118	recoveryNode->params[i++].p = (void *) asmap;
1119	recoveryNode->results[0] = failedPDA;
1120	if (asmap->numDataFailed == 2)
1121		recoveryNode->results[1] = failedPDAtwo;
1122
1123	/* zero fill the target data buffers? */
1124}
1125