rf_dagdegrd.c revision 1.5
1/*	$NetBSD: rf_dagdegrd.c,v 1.5 2000/01/07 03:40:57 oster Exp $	*/
2/*
3 * Copyright (c) 1995 Carnegie-Mellon University.
4 * All rights reserved.
5 *
6 * Author: Mark Holland, Daniel Stodolsky, William V. Courtright II
7 *
8 * Permission to use, copy, modify and distribute this software and
9 * its documentation is hereby granted, provided that both the copyright
10 * notice and this permission notice appear in all copies of the
11 * software, derivative works or modified versions, and any portions
12 * thereof, and that both notices appear in supporting documentation.
13 *
14 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
15 * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
16 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
17 *
18 * Carnegie Mellon requests users of this software to return to
19 *
20 *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
21 *  School of Computer Science
22 *  Carnegie Mellon University
23 *  Pittsburgh PA 15213-3890
24 *
25 * any improvements or extensions that they make and grant Carnegie the
26 * rights to redistribute these changes.
27 */
28
29/*
30 * rf_dagdegrd.c
31 *
32 * code for creating degraded read DAGs
33 */
34
35#include "rf_types.h"
36#include "rf_raid.h"
37#include "rf_dag.h"
38#include "rf_dagutils.h"
39#include "rf_dagfuncs.h"
40#include "rf_debugMem.h"
41#include "rf_memchunk.h"
42#include "rf_general.h"
43#include "rf_dagdegrd.h"
44
45
46/******************************************************************************
47 *
48 * General comments on DAG creation:
49 *
50 * All DAGs in this file use roll-away error recovery.  Each DAG has a single
51 * commit node, usually called "Cmt."  If an error occurs before the Cmt node
52 * is reached, the execution engine will halt forward execution and work
53 * backward through the graph, executing the undo functions.  Assuming that
54 * each node in the graph prior to the Cmt node are undoable and atomic - or -
55 * does not make changes to permanent state, the graph will fail atomically.
56 * If an error occurs after the Cmt node executes, the engine will roll-forward
57 * through the graph, blindly executing nodes until it reaches the end.
58 * If a graph reaches the end, it is assumed to have completed successfully.
59 *
60 * A graph has only 1 Cmt node.
61 *
62 */
63
64
65/******************************************************************************
66 *
67 * The following wrappers map the standard DAG creation interface to the
68 * DAG creation routines.  Additionally, these wrappers enable experimentation
69 * with new DAG structures by providing an extra level of indirection, allowing
70 * the DAG creation routines to be replaced at this single point.
71 */
72
73void
74rf_CreateRaidFiveDegradedReadDAG(
75    RF_Raid_t * raidPtr,
76    RF_AccessStripeMap_t * asmap,
77    RF_DagHeader_t * dag_h,
78    void *bp,
79    RF_RaidAccessFlags_t flags,
80    RF_AllocListElem_t * allocList)
81{
82	rf_CreateDegradedReadDAG(raidPtr, asmap, dag_h, bp, flags, allocList,
83	    &rf_xorRecoveryFuncs);
84}
85
86
87/******************************************************************************
88 *
89 * DAG creation code begins here
90 */
91
92
93/******************************************************************************
94 * Create a degraded read DAG for RAID level 1
95 *
96 * Hdr -> Nil -> R(p/s)d -> Commit -> Trm
97 *
98 * The "Rd" node reads data from the surviving disk in the mirror pair
99 *   Rpd - read of primary copy
100 *   Rsd - read of secondary copy
101 *
102 * Parameters:  raidPtr   - description of the physical array
103 *              asmap     - logical & physical addresses for this access
104 *              bp        - buffer ptr (for holding write data)
105 *              flags     - general flags (e.g. disk locking)
106 *              allocList - list of memory allocated in DAG creation
107 *****************************************************************************/
108
109void
110rf_CreateRaidOneDegradedReadDAG(
111    RF_Raid_t * raidPtr,
112    RF_AccessStripeMap_t * asmap,
113    RF_DagHeader_t * dag_h,
114    void *bp,
115    RF_RaidAccessFlags_t flags,
116    RF_AllocListElem_t * allocList)
117{
118	RF_DagNode_t *nodes, *rdNode, *blockNode, *commitNode, *termNode;
119	RF_StripeNum_t parityStripeID;
120	RF_ReconUnitNum_t which_ru;
121	RF_PhysDiskAddr_t *pda;
122	int     useMirror, i;
123
124	useMirror = 0;
125	parityStripeID = rf_RaidAddressToParityStripeID(&(raidPtr->Layout),
126	    asmap->raidAddress, &which_ru);
127	if (rf_dagDebug) {
128		printf("[Creating RAID level 1 degraded read DAG]\n");
129	}
130	dag_h->creator = "RaidOneDegradedReadDAG";
131	/* alloc the Wnd nodes and the Wmir node */
132	if (asmap->numDataFailed == 0)
133		useMirror = RF_FALSE;
134	else
135		useMirror = RF_TRUE;
136
137	/* total number of nodes = 1 + (block + commit + terminator) */
138	RF_CallocAndAdd(nodes, 4, sizeof(RF_DagNode_t), (RF_DagNode_t *), allocList);
139	i = 0;
140	rdNode = &nodes[i];
141	i++;
142	blockNode = &nodes[i];
143	i++;
144	commitNode = &nodes[i];
145	i++;
146	termNode = &nodes[i];
147	i++;
148
149	/* this dag can not commit until the commit node is reached.   errors
150	 * prior to the commit point imply the dag has failed and must be
151	 * retried */
152	dag_h->numCommitNodes = 1;
153	dag_h->numCommits = 0;
154	dag_h->numSuccedents = 1;
155
156	/* initialize the block, commit, and terminator nodes */
157	rf_InitNode(blockNode, rf_wait, RF_FALSE, rf_NullNodeFunc, rf_NullNodeUndoFunc,
158	    NULL, 1, 0, 0, 0, dag_h, "Nil", allocList);
159	rf_InitNode(commitNode, rf_wait, RF_TRUE, rf_NullNodeFunc, rf_NullNodeUndoFunc,
160	    NULL, 1, 1, 0, 0, dag_h, "Cmt", allocList);
161	rf_InitNode(termNode, rf_wait, RF_FALSE, rf_TerminateFunc, rf_TerminateUndoFunc,
162	    NULL, 0, 1, 0, 0, dag_h, "Trm", allocList);
163
164	pda = asmap->physInfo;
165	RF_ASSERT(pda != NULL);
166	/* parityInfo must describe entire parity unit */
167	RF_ASSERT(asmap->parityInfo->next == NULL);
168
169	/* initialize the data node */
170	if (!useMirror) {
171		/* read primary copy of data */
172		rf_InitNode(rdNode, rf_wait, RF_FALSE, rf_DiskReadFunc, rf_DiskReadUndoFunc,
173		    rf_GenericWakeupFunc, 1, 1, 4, 0, dag_h, "Rpd", allocList);
174		rdNode->params[0].p = pda;
175		rdNode->params[1].p = pda->bufPtr;
176		rdNode->params[2].v = parityStripeID;
177		rdNode->params[3].v = RF_CREATE_PARAM3(RF_IO_NORMAL_PRIORITY, 0, 0, which_ru);
178	} else {
179		/* read secondary copy of data */
180		rf_InitNode(rdNode, rf_wait, RF_FALSE, rf_DiskReadFunc, rf_DiskReadUndoFunc,
181		    rf_GenericWakeupFunc, 1, 1, 4, 0, dag_h, "Rsd", allocList);
182		rdNode->params[0].p = asmap->parityInfo;
183		rdNode->params[1].p = pda->bufPtr;
184		rdNode->params[2].v = parityStripeID;
185		rdNode->params[3].v = RF_CREATE_PARAM3(RF_IO_NORMAL_PRIORITY, 0, 0, which_ru);
186	}
187
188	/* connect header to block node */
189	RF_ASSERT(dag_h->numSuccedents == 1);
190	RF_ASSERT(blockNode->numAntecedents == 0);
191	dag_h->succedents[0] = blockNode;
192
193	/* connect block node to rdnode */
194	RF_ASSERT(blockNode->numSuccedents == 1);
195	RF_ASSERT(rdNode->numAntecedents == 1);
196	blockNode->succedents[0] = rdNode;
197	rdNode->antecedents[0] = blockNode;
198	rdNode->antType[0] = rf_control;
199
200	/* connect rdnode to commit node */
201	RF_ASSERT(rdNode->numSuccedents == 1);
202	RF_ASSERT(commitNode->numAntecedents == 1);
203	rdNode->succedents[0] = commitNode;
204	commitNode->antecedents[0] = rdNode;
205	commitNode->antType[0] = rf_control;
206
207	/* connect commit node to terminator */
208	RF_ASSERT(commitNode->numSuccedents == 1);
209	RF_ASSERT(termNode->numAntecedents == 1);
210	RF_ASSERT(termNode->numSuccedents == 0);
211	commitNode->succedents[0] = termNode;
212	termNode->antecedents[0] = commitNode;
213	termNode->antType[0] = rf_control;
214}
215
216
217
218/******************************************************************************
219 *
220 * creates a DAG to perform a degraded-mode read of data within one stripe.
221 * This DAG is as follows:
222 *
223 * Hdr -> Block -> Rud -> Xor -> Cmt -> T
224 *              -> Rrd ->
225 *              -> Rp -->
226 *
227 * Each R node is a successor of the L node
228 * One successor arc from each R node goes to C, and the other to X
229 * There is one Rud for each chunk of surviving user data requested by the
230 * user, and one Rrd for each chunk of surviving user data _not_ being read by
231 * the user
232 * R = read, ud = user data, rd = recovery (surviving) data, p = parity
233 * X = XOR, C = Commit, T = terminate
234 *
235 * The block node guarantees a single source node.
236 *
237 * Note:  The target buffer for the XOR node is set to the actual user buffer
238 * where the failed data is supposed to end up.  This buffer is zero'd by the
239 * code here.  Thus, if you create a degraded read dag, use it, and then
240 * re-use, you have to be sure to zero the target buffer prior to the re-use.
241 *
242 * The recfunc argument at the end specifies the name and function used for
243 * the redundancy
244 * recovery function.
245 *
246 *****************************************************************************/
247
248void
249rf_CreateDegradedReadDAG(
250    RF_Raid_t * raidPtr,
251    RF_AccessStripeMap_t * asmap,
252    RF_DagHeader_t * dag_h,
253    void *bp,
254    RF_RaidAccessFlags_t flags,
255    RF_AllocListElem_t * allocList,
256    RF_RedFuncs_t * recFunc)
257{
258	RF_DagNode_t *nodes, *rudNodes, *rrdNodes, *xorNode, *blockNode;
259	RF_DagNode_t *commitNode, *rpNode, *termNode;
260	int     nNodes, nRrdNodes, nRudNodes, nXorBufs, i;
261	int     j, paramNum;
262	RF_SectorCount_t sectorsPerSU;
263	RF_ReconUnitNum_t which_ru;
264	char   *overlappingPDAs;/* a temporary array of flags */
265	RF_AccessStripeMapHeader_t *new_asm_h[2];
266	RF_PhysDiskAddr_t *pda, *parityPDA;
267	RF_StripeNum_t parityStripeID;
268	RF_PhysDiskAddr_t *failedPDA;
269	RF_RaidLayout_t *layoutPtr;
270	char   *rpBuf;
271
272	layoutPtr = &(raidPtr->Layout);
273	/* failedPDA points to the pda within the asm that targets the failed
274	 * disk */
275	failedPDA = asmap->failedPDAs[0];
276	parityStripeID = rf_RaidAddressToParityStripeID(layoutPtr,
277	    asmap->raidAddress, &which_ru);
278	sectorsPerSU = layoutPtr->sectorsPerStripeUnit;
279
280	if (rf_dagDebug) {
281		printf("[Creating degraded read DAG]\n");
282	}
283	RF_ASSERT(asmap->numDataFailed == 1);
284	dag_h->creator = "DegradedReadDAG";
285
286	/*
287         * generate two ASMs identifying the surviving data we need
288         * in order to recover the lost data
289         */
290
291	/* overlappingPDAs array must be zero'd */
292	RF_Calloc(overlappingPDAs, asmap->numStripeUnitsAccessed, sizeof(char), (char *));
293	rf_GenerateFailedAccessASMs(raidPtr, asmap, failedPDA, dag_h, new_asm_h, &nXorBufs,
294	    &rpBuf, overlappingPDAs, allocList);
295
296	/*
297         * create all the nodes at once
298         *
299         * -1 because no access is generated for the failed pda
300         */
301	nRudNodes = asmap->numStripeUnitsAccessed - 1;
302	nRrdNodes = ((new_asm_h[0]) ? new_asm_h[0]->stripeMap->numStripeUnitsAccessed : 0) +
303	    ((new_asm_h[1]) ? new_asm_h[1]->stripeMap->numStripeUnitsAccessed : 0);
304	nNodes = 5 + nRudNodes + nRrdNodes;	/* lock, unlock, xor, Rp, Rud,
305						 * Rrd */
306	RF_CallocAndAdd(nodes, nNodes, sizeof(RF_DagNode_t), (RF_DagNode_t *),
307	    allocList);
308	i = 0;
309	blockNode = &nodes[i];
310	i++;
311	commitNode = &nodes[i];
312	i++;
313	xorNode = &nodes[i];
314	i++;
315	rpNode = &nodes[i];
316	i++;
317	termNode = &nodes[i];
318	i++;
319	rudNodes = &nodes[i];
320	i += nRudNodes;
321	rrdNodes = &nodes[i];
322	i += nRrdNodes;
323	RF_ASSERT(i == nNodes);
324
325	/* initialize nodes */
326	dag_h->numCommitNodes = 1;
327	dag_h->numCommits = 0;
328	/* this dag can not commit until the commit node is reached errors
329	 * prior to the commit point imply the dag has failed */
330	dag_h->numSuccedents = 1;
331
332	rf_InitNode(blockNode, rf_wait, RF_FALSE, rf_NullNodeFunc, rf_NullNodeUndoFunc,
333	    NULL, nRudNodes + nRrdNodes + 1, 0, 0, 0, dag_h, "Nil", allocList);
334	rf_InitNode(commitNode, rf_wait, RF_TRUE, rf_NullNodeFunc, rf_NullNodeUndoFunc,
335	    NULL, 1, 1, 0, 0, dag_h, "Cmt", allocList);
336	rf_InitNode(termNode, rf_wait, RF_FALSE, rf_TerminateFunc, rf_TerminateUndoFunc,
337	    NULL, 0, 1, 0, 0, dag_h, "Trm", allocList);
338	rf_InitNode(xorNode, rf_wait, RF_FALSE, recFunc->simple, rf_NullNodeUndoFunc,
339	    NULL, 1, nRudNodes + nRrdNodes + 1, 2 * nXorBufs + 2, 1, dag_h,
340	    recFunc->SimpleName, allocList);
341
342	/* fill in the Rud nodes */
343	for (pda = asmap->physInfo, i = 0; i < nRudNodes; i++, pda = pda->next) {
344		if (pda == failedPDA) {
345			i--;
346			continue;
347		}
348		rf_InitNode(&rudNodes[i], rf_wait, RF_FALSE, rf_DiskReadFunc,
349		    rf_DiskReadUndoFunc, rf_GenericWakeupFunc, 1, 1, 4, 0, dag_h,
350		    "Rud", allocList);
351		RF_ASSERT(pda);
352		rudNodes[i].params[0].p = pda;
353		rudNodes[i].params[1].p = pda->bufPtr;
354		rudNodes[i].params[2].v = parityStripeID;
355		rudNodes[i].params[3].v = RF_CREATE_PARAM3(RF_IO_NORMAL_PRIORITY, 0, 0, which_ru);
356	}
357
358	/* fill in the Rrd nodes */
359	i = 0;
360	if (new_asm_h[0]) {
361		for (pda = new_asm_h[0]->stripeMap->physInfo;
362		    i < new_asm_h[0]->stripeMap->numStripeUnitsAccessed;
363		    i++, pda = pda->next) {
364			rf_InitNode(&rrdNodes[i], rf_wait, RF_FALSE, rf_DiskReadFunc,
365			    rf_DiskReadUndoFunc, rf_GenericWakeupFunc, 1, 1, 4, 0,
366			    dag_h, "Rrd", allocList);
367			RF_ASSERT(pda);
368			rrdNodes[i].params[0].p = pda;
369			rrdNodes[i].params[1].p = pda->bufPtr;
370			rrdNodes[i].params[2].v = parityStripeID;
371			rrdNodes[i].params[3].v = RF_CREATE_PARAM3(RF_IO_NORMAL_PRIORITY, 0, 0, which_ru);
372		}
373	}
374	if (new_asm_h[1]) {
375		for (j = 0, pda = new_asm_h[1]->stripeMap->physInfo;
376		    j < new_asm_h[1]->stripeMap->numStripeUnitsAccessed;
377		    j++, pda = pda->next) {
378			rf_InitNode(&rrdNodes[i + j], rf_wait, RF_FALSE, rf_DiskReadFunc,
379			    rf_DiskReadUndoFunc, rf_GenericWakeupFunc, 1, 1, 4, 0,
380			    dag_h, "Rrd", allocList);
381			RF_ASSERT(pda);
382			rrdNodes[i + j].params[0].p = pda;
383			rrdNodes[i + j].params[1].p = pda->bufPtr;
384			rrdNodes[i + j].params[2].v = parityStripeID;
385			rrdNodes[i + j].params[3].v = RF_CREATE_PARAM3(RF_IO_NORMAL_PRIORITY, 0, 0, which_ru);
386		}
387	}
388	/* make a PDA for the parity unit */
389	RF_MallocAndAdd(parityPDA, sizeof(RF_PhysDiskAddr_t), (RF_PhysDiskAddr_t *), allocList);
390	parityPDA->row = asmap->parityInfo->row;
391	parityPDA->col = asmap->parityInfo->col;
392	parityPDA->startSector = ((asmap->parityInfo->startSector / sectorsPerSU)
393	    * sectorsPerSU) + (failedPDA->startSector % sectorsPerSU);
394	parityPDA->numSector = failedPDA->numSector;
395
396	/* initialize the Rp node */
397	rf_InitNode(rpNode, rf_wait, RF_FALSE, rf_DiskReadFunc, rf_DiskReadUndoFunc,
398	    rf_GenericWakeupFunc, 1, 1, 4, 0, dag_h, "Rp ", allocList);
399	rpNode->params[0].p = parityPDA;
400	rpNode->params[1].p = rpBuf;
401	rpNode->params[2].v = parityStripeID;
402	rpNode->params[3].v = RF_CREATE_PARAM3(RF_IO_NORMAL_PRIORITY, 0, 0, which_ru);
403
404	/*
405         * the last and nastiest step is to assign all
406         * the parameters of the Xor node
407         */
408	paramNum = 0;
409	for (i = 0; i < nRrdNodes; i++) {
410		/* all the Rrd nodes need to be xored together */
411		xorNode->params[paramNum++] = rrdNodes[i].params[0];
412		xorNode->params[paramNum++] = rrdNodes[i].params[1];
413	}
414	for (i = 0; i < nRudNodes; i++) {
415		/* any Rud nodes that overlap the failed access need to be
416		 * xored in */
417		if (overlappingPDAs[i]) {
418			RF_MallocAndAdd(pda, sizeof(RF_PhysDiskAddr_t), (RF_PhysDiskAddr_t *), allocList);
419			bcopy((char *) rudNodes[i].params[0].p, (char *) pda, sizeof(RF_PhysDiskAddr_t));
420			rf_RangeRestrictPDA(raidPtr, failedPDA, pda, RF_RESTRICT_DOBUFFER, 0);
421			xorNode->params[paramNum++].p = pda;
422			xorNode->params[paramNum++].p = pda->bufPtr;
423		}
424	}
425	RF_Free(overlappingPDAs, asmap->numStripeUnitsAccessed * sizeof(char));
426
427	/* install parity pda as last set of params to be xor'd */
428	xorNode->params[paramNum++].p = parityPDA;
429	xorNode->params[paramNum++].p = rpBuf;
430
431	/*
432         * the last 2 params to the recovery xor node are
433         * the failed PDA and the raidPtr
434         */
435	xorNode->params[paramNum++].p = failedPDA;
436	xorNode->params[paramNum++].p = raidPtr;
437	RF_ASSERT(paramNum == 2 * nXorBufs + 2);
438
439	/*
440         * The xor node uses results[0] as the target buffer.
441         * Set pointer and zero the buffer. In the kernel, this
442         * may be a user buffer in which case we have to remap it.
443         */
444	xorNode->results[0] = failedPDA->bufPtr;
445	RF_BZERO(bp, failedPDA->bufPtr, rf_RaidAddressToByte(raidPtr,
446		failedPDA->numSector));
447
448	/* connect nodes to form graph */
449	/* connect the header to the block node */
450	RF_ASSERT(dag_h->numSuccedents == 1);
451	RF_ASSERT(blockNode->numAntecedents == 0);
452	dag_h->succedents[0] = blockNode;
453
454	/* connect the block node to the read nodes */
455	RF_ASSERT(blockNode->numSuccedents == (1 + nRrdNodes + nRudNodes));
456	RF_ASSERT(rpNode->numAntecedents == 1);
457	blockNode->succedents[0] = rpNode;
458	rpNode->antecedents[0] = blockNode;
459	rpNode->antType[0] = rf_control;
460	for (i = 0; i < nRrdNodes; i++) {
461		RF_ASSERT(rrdNodes[i].numSuccedents == 1);
462		blockNode->succedents[1 + i] = &rrdNodes[i];
463		rrdNodes[i].antecedents[0] = blockNode;
464		rrdNodes[i].antType[0] = rf_control;
465	}
466	for (i = 0; i < nRudNodes; i++) {
467		RF_ASSERT(rudNodes[i].numSuccedents == 1);
468		blockNode->succedents[1 + nRrdNodes + i] = &rudNodes[i];
469		rudNodes[i].antecedents[0] = blockNode;
470		rudNodes[i].antType[0] = rf_control;
471	}
472
473	/* connect the read nodes to the xor node */
474	RF_ASSERT(xorNode->numAntecedents == (1 + nRrdNodes + nRudNodes));
475	RF_ASSERT(rpNode->numSuccedents == 1);
476	rpNode->succedents[0] = xorNode;
477	xorNode->antecedents[0] = rpNode;
478	xorNode->antType[0] = rf_trueData;
479	for (i = 0; i < nRrdNodes; i++) {
480		RF_ASSERT(rrdNodes[i].numSuccedents == 1);
481		rrdNodes[i].succedents[0] = xorNode;
482		xorNode->antecedents[1 + i] = &rrdNodes[i];
483		xorNode->antType[1 + i] = rf_trueData;
484	}
485	for (i = 0; i < nRudNodes; i++) {
486		RF_ASSERT(rudNodes[i].numSuccedents == 1);
487		rudNodes[i].succedents[0] = xorNode;
488		xorNode->antecedents[1 + nRrdNodes + i] = &rudNodes[i];
489		xorNode->antType[1 + nRrdNodes + i] = rf_trueData;
490	}
491
492	/* connect the xor node to the commit node */
493	RF_ASSERT(xorNode->numSuccedents == 1);
494	RF_ASSERT(commitNode->numAntecedents == 1);
495	xorNode->succedents[0] = commitNode;
496	commitNode->antecedents[0] = xorNode;
497	commitNode->antType[0] = rf_control;
498
499	/* connect the termNode to the commit node */
500	RF_ASSERT(commitNode->numSuccedents == 1);
501	RF_ASSERT(termNode->numAntecedents == 1);
502	RF_ASSERT(termNode->numSuccedents == 0);
503	commitNode->succedents[0] = termNode;
504	termNode->antType[0] = rf_control;
505	termNode->antecedents[0] = commitNode;
506}
507
508
509/******************************************************************************
510 * Create a degraded read DAG for Chained Declustering
511 *
512 * Hdr -> Nil -> R(p/s)d -> Cmt -> Trm
513 *
514 * The "Rd" node reads data from the surviving disk in the mirror pair
515 *   Rpd - read of primary copy
516 *   Rsd - read of secondary copy
517 *
518 * Parameters:  raidPtr   - description of the physical array
519 *              asmap     - logical & physical addresses for this access
520 *              bp        - buffer ptr (for holding write data)
521 *              flags     - general flags (e.g. disk locking)
522 *              allocList - list of memory allocated in DAG creation
523 *****************************************************************************/
524
525void
526rf_CreateRaidCDegradedReadDAG(
527    RF_Raid_t * raidPtr,
528    RF_AccessStripeMap_t * asmap,
529    RF_DagHeader_t * dag_h,
530    void *bp,
531    RF_RaidAccessFlags_t flags,
532    RF_AllocListElem_t * allocList)
533{
534	RF_DagNode_t *nodes, *rdNode, *blockNode, *commitNode, *termNode;
535	RF_StripeNum_t parityStripeID;
536	int     useMirror, i, shiftable;
537	RF_ReconUnitNum_t which_ru;
538	RF_PhysDiskAddr_t *pda;
539
540	if ((asmap->numDataFailed + asmap->numParityFailed) == 0) {
541		shiftable = RF_TRUE;
542	} else {
543		shiftable = RF_FALSE;
544	}
545	useMirror = 0;
546	parityStripeID = rf_RaidAddressToParityStripeID(&(raidPtr->Layout),
547	    asmap->raidAddress, &which_ru);
548
549	if (rf_dagDebug) {
550		printf("[Creating RAID C degraded read DAG]\n");
551	}
552	dag_h->creator = "RaidCDegradedReadDAG";
553	/* alloc the Wnd nodes and the Wmir node */
554	if (asmap->numDataFailed == 0)
555		useMirror = RF_FALSE;
556	else
557		useMirror = RF_TRUE;
558
559	/* total number of nodes = 1 + (block + commit + terminator) */
560	RF_CallocAndAdd(nodes, 4, sizeof(RF_DagNode_t), (RF_DagNode_t *), allocList);
561	i = 0;
562	rdNode = &nodes[i];
563	i++;
564	blockNode = &nodes[i];
565	i++;
566	commitNode = &nodes[i];
567	i++;
568	termNode = &nodes[i];
569	i++;
570
571	/*
572         * This dag can not commit until the commit node is reached.
573         * Errors prior to the commit point imply the dag has failed
574         * and must be retried.
575         */
576	dag_h->numCommitNodes = 1;
577	dag_h->numCommits = 0;
578	dag_h->numSuccedents = 1;
579
580	/* initialize the block, commit, and terminator nodes */
581	rf_InitNode(blockNode, rf_wait, RF_FALSE, rf_NullNodeFunc, rf_NullNodeUndoFunc,
582	    NULL, 1, 0, 0, 0, dag_h, "Nil", allocList);
583	rf_InitNode(commitNode, rf_wait, RF_TRUE, rf_NullNodeFunc, rf_NullNodeUndoFunc,
584	    NULL, 1, 1, 0, 0, dag_h, "Cmt", allocList);
585	rf_InitNode(termNode, rf_wait, RF_FALSE, rf_TerminateFunc, rf_TerminateUndoFunc,
586	    NULL, 0, 1, 0, 0, dag_h, "Trm", allocList);
587
588	pda = asmap->physInfo;
589	RF_ASSERT(pda != NULL);
590	/* parityInfo must describe entire parity unit */
591	RF_ASSERT(asmap->parityInfo->next == NULL);
592
593	/* initialize the data node */
594	if (!useMirror) {
595		rf_InitNode(rdNode, rf_wait, RF_FALSE, rf_DiskReadFunc, rf_DiskReadUndoFunc,
596		    rf_GenericWakeupFunc, 1, 1, 4, 0, dag_h, "Rpd", allocList);
597		if (shiftable && rf_compute_workload_shift(raidPtr, pda)) {
598			/* shift this read to the next disk in line */
599			rdNode->params[0].p = asmap->parityInfo;
600			rdNode->params[1].p = pda->bufPtr;
601			rdNode->params[2].v = parityStripeID;
602			rdNode->params[3].v = RF_CREATE_PARAM3(RF_IO_NORMAL_PRIORITY, 0, 0, which_ru);
603		} else {
604			/* read primary copy */
605			rdNode->params[0].p = pda;
606			rdNode->params[1].p = pda->bufPtr;
607			rdNode->params[2].v = parityStripeID;
608			rdNode->params[3].v = RF_CREATE_PARAM3(RF_IO_NORMAL_PRIORITY, 0, 0, which_ru);
609		}
610	} else {
611		/* read secondary copy of data */
612		rf_InitNode(rdNode, rf_wait, RF_FALSE, rf_DiskReadFunc, rf_DiskReadUndoFunc,
613		    rf_GenericWakeupFunc, 1, 1, 4, 0, dag_h, "Rsd", allocList);
614		rdNode->params[0].p = asmap->parityInfo;
615		rdNode->params[1].p = pda->bufPtr;
616		rdNode->params[2].v = parityStripeID;
617		rdNode->params[3].v = RF_CREATE_PARAM3(RF_IO_NORMAL_PRIORITY, 0, 0, which_ru);
618	}
619
620	/* connect header to block node */
621	RF_ASSERT(dag_h->numSuccedents == 1);
622	RF_ASSERT(blockNode->numAntecedents == 0);
623	dag_h->succedents[0] = blockNode;
624
625	/* connect block node to rdnode */
626	RF_ASSERT(blockNode->numSuccedents == 1);
627	RF_ASSERT(rdNode->numAntecedents == 1);
628	blockNode->succedents[0] = rdNode;
629	rdNode->antecedents[0] = blockNode;
630	rdNode->antType[0] = rf_control;
631
632	/* connect rdnode to commit node */
633	RF_ASSERT(rdNode->numSuccedents == 1);
634	RF_ASSERT(commitNode->numAntecedents == 1);
635	rdNode->succedents[0] = commitNode;
636	commitNode->antecedents[0] = rdNode;
637	commitNode->antType[0] = rf_control;
638
639	/* connect commit node to terminator */
640	RF_ASSERT(commitNode->numSuccedents == 1);
641	RF_ASSERT(termNode->numAntecedents == 1);
642	RF_ASSERT(termNode->numSuccedents == 0);
643	commitNode->succedents[0] = termNode;
644	termNode->antecedents[0] = commitNode;
645	termNode->antType[0] = rf_control;
646}
647/*
648 * XXX move this elsewhere?
649 */
650void
651rf_DD_GenerateFailedAccessASMs(
652    RF_Raid_t * raidPtr,
653    RF_AccessStripeMap_t * asmap,
654    RF_PhysDiskAddr_t ** pdap,
655    int *nNodep,
656    RF_PhysDiskAddr_t ** pqpdap,
657    int *nPQNodep,
658    RF_AllocListElem_t * allocList)
659{
660	RF_RaidLayout_t *layoutPtr = &(raidPtr->Layout);
661	int     PDAPerDisk, i;
662	RF_SectorCount_t secPerSU = layoutPtr->sectorsPerStripeUnit;
663	int     numDataCol = layoutPtr->numDataCol;
664	int     state;
665	RF_SectorNum_t suoff, suend;
666	unsigned firstDataCol, napdas, count;
667	RF_SectorNum_t fone_start, fone_end, ftwo_start = 0, ftwo_end = 0;
668	RF_PhysDiskAddr_t *fone = asmap->failedPDAs[0], *ftwo = asmap->failedPDAs[1];
669	RF_PhysDiskAddr_t *pda_p;
670	RF_PhysDiskAddr_t *phys_p;
671	RF_RaidAddr_t sosAddr;
672
673	/* determine how many pda's we will have to generate per unaccess
674	 * stripe. If there is only one failed data unit, it is one; if two,
675	 * possibly two, depending wether they overlap. */
676
677	fone_start = rf_StripeUnitOffset(layoutPtr, fone->startSector);
678	fone_end = fone_start + fone->numSector;
679
680#define CONS_PDA(if,start,num) \
681  pda_p->row = asmap->if->row;    pda_p->col = asmap->if->col; \
682  pda_p->startSector = ((asmap->if->startSector / secPerSU) * secPerSU) + start; \
683  pda_p->numSector = num; \
684  pda_p->next = NULL; \
685  RF_MallocAndAdd(pda_p->bufPtr,rf_RaidAddressToByte(raidPtr,num),(char *), allocList)
686
687	if (asmap->numDataFailed == 1) {
688		PDAPerDisk = 1;
689		state = 1;
690		RF_MallocAndAdd(*pqpdap, 2 * sizeof(RF_PhysDiskAddr_t), (RF_PhysDiskAddr_t *), allocList);
691		pda_p = *pqpdap;
692		/* build p */
693		CONS_PDA(parityInfo, fone_start, fone->numSector);
694		pda_p->type = RF_PDA_TYPE_PARITY;
695		pda_p++;
696		/* build q */
697		CONS_PDA(qInfo, fone_start, fone->numSector);
698		pda_p->type = RF_PDA_TYPE_Q;
699	} else {
700		ftwo_start = rf_StripeUnitOffset(layoutPtr, ftwo->startSector);
701		ftwo_end = ftwo_start + ftwo->numSector;
702		if (fone->numSector + ftwo->numSector > secPerSU) {
703			PDAPerDisk = 1;
704			state = 2;
705			RF_MallocAndAdd(*pqpdap, 2 * sizeof(RF_PhysDiskAddr_t), (RF_PhysDiskAddr_t *), allocList);
706			pda_p = *pqpdap;
707			CONS_PDA(parityInfo, 0, secPerSU);
708			pda_p->type = RF_PDA_TYPE_PARITY;
709			pda_p++;
710			CONS_PDA(qInfo, 0, secPerSU);
711			pda_p->type = RF_PDA_TYPE_Q;
712		} else {
713			PDAPerDisk = 2;
714			state = 3;
715			/* four of them, fone, then ftwo */
716			RF_MallocAndAdd(*pqpdap, 4 * sizeof(RF_PhysDiskAddr_t), (RF_PhysDiskAddr_t *), allocList);
717			pda_p = *pqpdap;
718			CONS_PDA(parityInfo, fone_start, fone->numSector);
719			pda_p->type = RF_PDA_TYPE_PARITY;
720			pda_p++;
721			CONS_PDA(qInfo, fone_start, fone->numSector);
722			pda_p->type = RF_PDA_TYPE_Q;
723			pda_p++;
724			CONS_PDA(parityInfo, ftwo_start, ftwo->numSector);
725			pda_p->type = RF_PDA_TYPE_PARITY;
726			pda_p++;
727			CONS_PDA(qInfo, ftwo_start, ftwo->numSector);
728			pda_p->type = RF_PDA_TYPE_Q;
729		}
730	}
731	/* figure out number of nonaccessed pda */
732	napdas = PDAPerDisk * (numDataCol - asmap->numStripeUnitsAccessed - (ftwo == NULL ? 1 : 0));
733	*nPQNodep = PDAPerDisk;
734
735	/* sweep over the over accessed pda's, figuring out the number of
736	 * additional pda's to generate. Of course, skip the failed ones */
737
738	count = 0;
739	for (pda_p = asmap->physInfo; pda_p; pda_p = pda_p->next) {
740		if ((pda_p == fone) || (pda_p == ftwo))
741			continue;
742		suoff = rf_StripeUnitOffset(layoutPtr, pda_p->startSector);
743		suend = suoff + pda_p->numSector;
744		switch (state) {
745		case 1:	/* one failed PDA to overlap */
746			/* if a PDA doesn't contain the failed unit, it can
747			 * only miss the start or end, not both */
748			if ((suoff > fone_start) || (suend < fone_end))
749				count++;
750			break;
751		case 2:	/* whole stripe */
752			if (suoff)	/* leak at begining */
753				count++;
754			if (suend < numDataCol)	/* leak at end */
755				count++;
756			break;
757		case 3:	/* two disjoint units */
758			if ((suoff > fone_start) || (suend < fone_end))
759				count++;
760			if ((suoff > ftwo_start) || (suend < ftwo_end))
761				count++;
762			break;
763		default:
764			RF_PANIC();
765		}
766	}
767
768	napdas += count;
769	*nNodep = napdas;
770	if (napdas == 0)
771		return;		/* short circuit */
772
773	/* allocate up our list of pda's */
774
775	RF_CallocAndAdd(pda_p, napdas, sizeof(RF_PhysDiskAddr_t), (RF_PhysDiskAddr_t *), allocList);
776	*pdap = pda_p;
777
778	/* linkem together */
779	for (i = 0; i < (napdas - 1); i++)
780		pda_p[i].next = pda_p + (i + 1);
781
782	/* march through the one's up to the first accessed disk */
783	firstDataCol = rf_RaidAddressToStripeUnitID(&(raidPtr->Layout), asmap->physInfo->raidAddress) % numDataCol;
784	sosAddr = rf_RaidAddressOfPrevStripeBoundary(layoutPtr, asmap->raidAddress);
785	for (i = 0; i < firstDataCol; i++) {
786		if ((pda_p - (*pdap)) == napdas)
787			continue;
788		pda_p->type = RF_PDA_TYPE_DATA;
789		pda_p->raidAddress = sosAddr + (i * secPerSU);
790		(raidPtr->Layout.map->MapSector) (raidPtr, pda_p->raidAddress, &(pda_p->row), &(pda_p->col), &(pda_p->startSector), 0);
791		/* skip over dead disks */
792		if (RF_DEAD_DISK(raidPtr->Disks[pda_p->row][pda_p->col].status))
793			continue;
794		switch (state) {
795		case 1:	/* fone */
796			pda_p->numSector = fone->numSector;
797			pda_p->raidAddress += fone_start;
798			pda_p->startSector += fone_start;
799			RF_MallocAndAdd(pda_p->bufPtr, rf_RaidAddressToByte(raidPtr, pda_p->numSector), (char *), allocList);
800			break;
801		case 2:	/* full stripe */
802			pda_p->numSector = secPerSU;
803			RF_MallocAndAdd(pda_p->bufPtr, rf_RaidAddressToByte(raidPtr, secPerSU), (char *), allocList);
804			break;
805		case 3:	/* two slabs */
806			pda_p->numSector = fone->numSector;
807			pda_p->raidAddress += fone_start;
808			pda_p->startSector += fone_start;
809			RF_MallocAndAdd(pda_p->bufPtr, rf_RaidAddressToByte(raidPtr, pda_p->numSector), (char *), allocList);
810			pda_p++;
811			pda_p->type = RF_PDA_TYPE_DATA;
812			pda_p->raidAddress = sosAddr + (i * secPerSU);
813			(raidPtr->Layout.map->MapSector) (raidPtr, pda_p->raidAddress, &(pda_p->row), &(pda_p->col), &(pda_p->startSector), 0);
814			pda_p->numSector = ftwo->numSector;
815			pda_p->raidAddress += ftwo_start;
816			pda_p->startSector += ftwo_start;
817			RF_MallocAndAdd(pda_p->bufPtr, rf_RaidAddressToByte(raidPtr, pda_p->numSector), (char *), allocList);
818			break;
819		default:
820			RF_PANIC();
821		}
822		pda_p++;
823	}
824
825	/* march through the touched stripe units */
826	for (phys_p = asmap->physInfo; phys_p; phys_p = phys_p->next, i++) {
827		if ((phys_p == asmap->failedPDAs[0]) || (phys_p == asmap->failedPDAs[1]))
828			continue;
829		suoff = rf_StripeUnitOffset(layoutPtr, phys_p->startSector);
830		suend = suoff + phys_p->numSector;
831		switch (state) {
832		case 1:	/* single buffer */
833			if (suoff > fone_start) {
834				RF_ASSERT(suend >= fone_end);
835				/* The data read starts after the mapped
836				 * access, snip off the begining */
837				pda_p->numSector = suoff - fone_start;
838				pda_p->raidAddress = sosAddr + (i * secPerSU) + fone_start;
839				(raidPtr->Layout.map->MapSector) (raidPtr, pda_p->raidAddress, &(pda_p->row), &(pda_p->col), &(pda_p->startSector), 0);
840				RF_MallocAndAdd(pda_p->bufPtr, rf_RaidAddressToByte(raidPtr, pda_p->numSector), (char *), allocList);
841				pda_p++;
842			}
843			if (suend < fone_end) {
844				RF_ASSERT(suoff <= fone_start);
845				/* The data read stops before the end of the
846				 * failed access, extend */
847				pda_p->numSector = fone_end - suend;
848				pda_p->raidAddress = sosAddr + (i * secPerSU) + suend;	/* off by one? */
849				(raidPtr->Layout.map->MapSector) (raidPtr, pda_p->raidAddress, &(pda_p->row), &(pda_p->col), &(pda_p->startSector), 0);
850				RF_MallocAndAdd(pda_p->bufPtr, rf_RaidAddressToByte(raidPtr, pda_p->numSector), (char *), allocList);
851				pda_p++;
852			}
853			break;
854		case 2:	/* whole stripe unit */
855			RF_ASSERT((suoff == 0) || (suend == secPerSU));
856			if (suend < secPerSU) {	/* short read, snip from end
857						 * on */
858				pda_p->numSector = secPerSU - suend;
859				pda_p->raidAddress = sosAddr + (i * secPerSU) + suend;	/* off by one? */
860				(raidPtr->Layout.map->MapSector) (raidPtr, pda_p->raidAddress, &(pda_p->row), &(pda_p->col), &(pda_p->startSector), 0);
861				RF_MallocAndAdd(pda_p->bufPtr, rf_RaidAddressToByte(raidPtr, pda_p->numSector), (char *), allocList);
862				pda_p++;
863			} else
864				if (suoff > 0) {	/* short at front */
865					pda_p->numSector = suoff;
866					pda_p->raidAddress = sosAddr + (i * secPerSU);
867					(raidPtr->Layout.map->MapSector) (raidPtr, pda_p->raidAddress, &(pda_p->row), &(pda_p->col), &(pda_p->startSector), 0);
868					RF_MallocAndAdd(pda_p->bufPtr, rf_RaidAddressToByte(raidPtr, pda_p->numSector), (char *), allocList);
869					pda_p++;
870				}
871			break;
872		case 3:	/* two nonoverlapping failures */
873			if ((suoff > fone_start) || (suend < fone_end)) {
874				if (suoff > fone_start) {
875					RF_ASSERT(suend >= fone_end);
876					/* The data read starts after the
877					 * mapped access, snip off the
878					 * begining */
879					pda_p->numSector = suoff - fone_start;
880					pda_p->raidAddress = sosAddr + (i * secPerSU) + fone_start;
881					(raidPtr->Layout.map->MapSector) (raidPtr, pda_p->raidAddress, &(pda_p->row), &(pda_p->col), &(pda_p->startSector), 0);
882					RF_MallocAndAdd(pda_p->bufPtr, rf_RaidAddressToByte(raidPtr, pda_p->numSector), (char *), allocList);
883					pda_p++;
884				}
885				if (suend < fone_end) {
886					RF_ASSERT(suoff <= fone_start);
887					/* The data read stops before the end
888					 * of the failed access, extend */
889					pda_p->numSector = fone_end - suend;
890					pda_p->raidAddress = sosAddr + (i * secPerSU) + suend;	/* off by one? */
891					(raidPtr->Layout.map->MapSector) (raidPtr, pda_p->raidAddress, &(pda_p->row), &(pda_p->col), &(pda_p->startSector), 0);
892					RF_MallocAndAdd(pda_p->bufPtr, rf_RaidAddressToByte(raidPtr, pda_p->numSector), (char *), allocList);
893					pda_p++;
894				}
895			}
896			if ((suoff > ftwo_start) || (suend < ftwo_end)) {
897				if (suoff > ftwo_start) {
898					RF_ASSERT(suend >= ftwo_end);
899					/* The data read starts after the
900					 * mapped access, snip off the
901					 * begining */
902					pda_p->numSector = suoff - ftwo_start;
903					pda_p->raidAddress = sosAddr + (i * secPerSU) + ftwo_start;
904					(raidPtr->Layout.map->MapSector) (raidPtr, pda_p->raidAddress, &(pda_p->row), &(pda_p->col), &(pda_p->startSector), 0);
905					RF_MallocAndAdd(pda_p->bufPtr, rf_RaidAddressToByte(raidPtr, pda_p->numSector), (char *), allocList);
906					pda_p++;
907				}
908				if (suend < ftwo_end) {
909					RF_ASSERT(suoff <= ftwo_start);
910					/* The data read stops before the end
911					 * of the failed access, extend */
912					pda_p->numSector = ftwo_end - suend;
913					pda_p->raidAddress = sosAddr + (i * secPerSU) + suend;	/* off by one? */
914					(raidPtr->Layout.map->MapSector) (raidPtr, pda_p->raidAddress, &(pda_p->row), &(pda_p->col), &(pda_p->startSector), 0);
915					RF_MallocAndAdd(pda_p->bufPtr, rf_RaidAddressToByte(raidPtr, pda_p->numSector), (char *), allocList);
916					pda_p++;
917				}
918			}
919			break;
920		default:
921			RF_PANIC();
922		}
923	}
924
925	/* after the last accessed disk */
926	for (; i < numDataCol; i++) {
927		if ((pda_p - (*pdap)) == napdas)
928			continue;
929		pda_p->type = RF_PDA_TYPE_DATA;
930		pda_p->raidAddress = sosAddr + (i * secPerSU);
931		(raidPtr->Layout.map->MapSector) (raidPtr, pda_p->raidAddress, &(pda_p->row), &(pda_p->col), &(pda_p->startSector), 0);
932		/* skip over dead disks */
933		if (RF_DEAD_DISK(raidPtr->Disks[pda_p->row][pda_p->col].status))
934			continue;
935		switch (state) {
936		case 1:	/* fone */
937			pda_p->numSector = fone->numSector;
938			pda_p->raidAddress += fone_start;
939			pda_p->startSector += fone_start;
940			RF_MallocAndAdd(pda_p->bufPtr, rf_RaidAddressToByte(raidPtr, pda_p->numSector), (char *), allocList);
941			break;
942		case 2:	/* full stripe */
943			pda_p->numSector = secPerSU;
944			RF_MallocAndAdd(pda_p->bufPtr, rf_RaidAddressToByte(raidPtr, secPerSU), (char *), allocList);
945			break;
946		case 3:	/* two slabs */
947			pda_p->numSector = fone->numSector;
948			pda_p->raidAddress += fone_start;
949			pda_p->startSector += fone_start;
950			RF_MallocAndAdd(pda_p->bufPtr, rf_RaidAddressToByte(raidPtr, pda_p->numSector), (char *), allocList);
951			pda_p++;
952			pda_p->type = RF_PDA_TYPE_DATA;
953			pda_p->raidAddress = sosAddr + (i * secPerSU);
954			(raidPtr->Layout.map->MapSector) (raidPtr, pda_p->raidAddress, &(pda_p->row), &(pda_p->col), &(pda_p->startSector), 0);
955			pda_p->numSector = ftwo->numSector;
956			pda_p->raidAddress += ftwo_start;
957			pda_p->startSector += ftwo_start;
958			RF_MallocAndAdd(pda_p->bufPtr, rf_RaidAddressToByte(raidPtr, pda_p->numSector), (char *), allocList);
959			break;
960		default:
961			RF_PANIC();
962		}
963		pda_p++;
964	}
965
966	RF_ASSERT(pda_p - *pdap == napdas);
967	return;
968}
969#define INIT_DISK_NODE(node,name) \
970rf_InitNode(node, rf_wait, RF_FALSE, rf_DiskReadFunc, rf_DiskReadUndoFunc, rf_GenericWakeupFunc, 2,1,4,0, dag_h, name, allocList); \
971(node)->succedents[0] = unblockNode; \
972(node)->succedents[1] = recoveryNode; \
973(node)->antecedents[0] = blockNode; \
974(node)->antType[0] = rf_control
975
976#define DISK_NODE_PARAMS(_node_,_p_) \
977  (_node_).params[0].p = _p_ ; \
978  (_node_).params[1].p = (_p_)->bufPtr; \
979  (_node_).params[2].v = parityStripeID; \
980  (_node_).params[3].v = RF_CREATE_PARAM3(RF_IO_NORMAL_PRIORITY, 0, 0, which_ru)
981
982void
983rf_DoubleDegRead(
984    RF_Raid_t * raidPtr,
985    RF_AccessStripeMap_t * asmap,
986    RF_DagHeader_t * dag_h,
987    void *bp,
988    RF_RaidAccessFlags_t flags,
989    RF_AllocListElem_t * allocList,
990    char *redundantReadNodeName,
991    char *recoveryNodeName,
992    int (*recovFunc) (RF_DagNode_t *))
993{
994	RF_RaidLayout_t *layoutPtr = &(raidPtr->Layout);
995	RF_DagNode_t *nodes, *rudNodes, *rrdNodes, *recoveryNode, *blockNode,
996	       *unblockNode, *rpNodes, *rqNodes, *termNode;
997	RF_PhysDiskAddr_t *pda, *pqPDAs;
998	RF_PhysDiskAddr_t *npdas;
999	int     nNodes, nRrdNodes, nRudNodes, i;
1000	RF_ReconUnitNum_t which_ru;
1001	int     nReadNodes, nPQNodes;
1002	RF_PhysDiskAddr_t *failedPDA = asmap->failedPDAs[0];
1003	RF_PhysDiskAddr_t *failedPDAtwo = asmap->failedPDAs[1];
1004	RF_StripeNum_t parityStripeID = rf_RaidAddressToParityStripeID(layoutPtr, asmap->raidAddress, &which_ru);
1005
1006	if (rf_dagDebug)
1007		printf("[Creating Double Degraded Read DAG]\n");
1008	rf_DD_GenerateFailedAccessASMs(raidPtr, asmap, &npdas, &nRrdNodes, &pqPDAs, &nPQNodes, allocList);
1009
1010	nRudNodes = asmap->numStripeUnitsAccessed - (asmap->numDataFailed);
1011	nReadNodes = nRrdNodes + nRudNodes + 2 * nPQNodes;
1012	nNodes = 4 /* block, unblock, recovery, term */ + nReadNodes;
1013
1014	RF_CallocAndAdd(nodes, nNodes, sizeof(RF_DagNode_t), (RF_DagNode_t *), allocList);
1015	i = 0;
1016	blockNode = &nodes[i];
1017	i += 1;
1018	unblockNode = &nodes[i];
1019	i += 1;
1020	recoveryNode = &nodes[i];
1021	i += 1;
1022	termNode = &nodes[i];
1023	i += 1;
1024	rudNodes = &nodes[i];
1025	i += nRudNodes;
1026	rrdNodes = &nodes[i];
1027	i += nRrdNodes;
1028	rpNodes = &nodes[i];
1029	i += nPQNodes;
1030	rqNodes = &nodes[i];
1031	i += nPQNodes;
1032	RF_ASSERT(i == nNodes);
1033
1034	dag_h->numSuccedents = 1;
1035	dag_h->succedents[0] = blockNode;
1036	dag_h->creator = "DoubleDegRead";
1037	dag_h->numCommits = 0;
1038	dag_h->numCommitNodes = 1;	/* unblock */
1039
1040	rf_InitNode(termNode, rf_wait, RF_FALSE, rf_TerminateFunc, rf_TerminateUndoFunc, NULL, 0, 2, 0, 0, dag_h, "Trm", allocList);
1041	termNode->antecedents[0] = unblockNode;
1042	termNode->antType[0] = rf_control;
1043	termNode->antecedents[1] = recoveryNode;
1044	termNode->antType[1] = rf_control;
1045
1046	/* init the block and unblock nodes */
1047	/* The block node has all nodes except itself, unblock and recovery as
1048	 * successors. Similarly for predecessors of the unblock. */
1049	rf_InitNode(blockNode, rf_wait, RF_FALSE, rf_NullNodeFunc, rf_NullNodeUndoFunc, NULL, nReadNodes, 0, 0, 0, dag_h, "Nil", allocList);
1050	rf_InitNode(unblockNode, rf_wait, RF_TRUE, rf_NullNodeFunc, rf_NullNodeUndoFunc, NULL, 1, nReadNodes, 0, 0, dag_h, "Nil", allocList);
1051
1052	for (i = 0; i < nReadNodes; i++) {
1053		blockNode->succedents[i] = rudNodes + i;
1054		unblockNode->antecedents[i] = rudNodes + i;
1055		unblockNode->antType[i] = rf_control;
1056	}
1057	unblockNode->succedents[0] = termNode;
1058
1059	/* The recovery node has all the reads as predecessors, and the term
1060	 * node as successors. It gets a pda as a param from each of the read
1061	 * nodes plus the raidPtr. For each failed unit is has a result pda. */
1062	rf_InitNode(recoveryNode, rf_wait, RF_FALSE, recovFunc, rf_NullNodeUndoFunc, NULL,
1063	    1,			/* succesors */
1064	    nReadNodes,		/* preds */
1065	    nReadNodes + 2,	/* params */
1066	    asmap->numDataFailed,	/* results */
1067	    dag_h, recoveryNodeName, allocList);
1068
1069	recoveryNode->succedents[0] = termNode;
1070	for (i = 0; i < nReadNodes; i++) {
1071		recoveryNode->antecedents[i] = rudNodes + i;
1072		recoveryNode->antType[i] = rf_trueData;
1073	}
1074
1075	/* build the read nodes, then come back and fill in recovery params
1076	 * and results */
1077	pda = asmap->physInfo;
1078	for (i = 0; i < nRudNodes; pda = pda->next) {
1079		if ((pda == failedPDA) || (pda == failedPDAtwo))
1080			continue;
1081		INIT_DISK_NODE(rudNodes + i, "Rud");
1082		RF_ASSERT(pda);
1083		DISK_NODE_PARAMS(rudNodes[i], pda);
1084		i++;
1085	}
1086
1087	pda = npdas;
1088	for (i = 0; i < nRrdNodes; i++, pda = pda->next) {
1089		INIT_DISK_NODE(rrdNodes + i, "Rrd");
1090		RF_ASSERT(pda);
1091		DISK_NODE_PARAMS(rrdNodes[i], pda);
1092	}
1093
1094	/* redundancy pdas */
1095	pda = pqPDAs;
1096	INIT_DISK_NODE(rpNodes, "Rp");
1097	RF_ASSERT(pda);
1098	DISK_NODE_PARAMS(rpNodes[0], pda);
1099	pda++;
1100	INIT_DISK_NODE(rqNodes, redundantReadNodeName);
1101	RF_ASSERT(pda);
1102	DISK_NODE_PARAMS(rqNodes[0], pda);
1103	if (nPQNodes == 2) {
1104		pda++;
1105		INIT_DISK_NODE(rpNodes + 1, "Rp");
1106		RF_ASSERT(pda);
1107		DISK_NODE_PARAMS(rpNodes[1], pda);
1108		pda++;
1109		INIT_DISK_NODE(rqNodes + 1, redundantReadNodeName);
1110		RF_ASSERT(pda);
1111		DISK_NODE_PARAMS(rqNodes[1], pda);
1112	}
1113	/* fill in recovery node params */
1114	for (i = 0; i < nReadNodes; i++)
1115		recoveryNode->params[i] = rudNodes[i].params[0];	/* pda */
1116	recoveryNode->params[i++].p = (void *) raidPtr;
1117	recoveryNode->params[i++].p = (void *) asmap;
1118	recoveryNode->results[0] = failedPDA;
1119	if (asmap->numDataFailed == 2)
1120		recoveryNode->results[1] = failedPDAtwo;
1121
1122	/* zero fill the target data buffers? */
1123}
1124