rf_dagfuncs.c revision 1.7
1/*	$NetBSD: rf_dagfuncs.c,v 1.7 2001/02/03 12:51:10 mrg Exp $	*/
2/*
3 * Copyright (c) 1995 Carnegie-Mellon University.
4 * All rights reserved.
5 *
6 * Author: Mark Holland, William V. Courtright II
7 *
8 * Permission to use, copy, modify and distribute this software and
9 * its documentation is hereby granted, provided that both the copyright
10 * notice and this permission notice appear in all copies of the
11 * software, derivative works or modified versions, and any portions
12 * thereof, and that both notices appear in supporting documentation.
13 *
14 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
15 * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
16 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
17 *
18 * Carnegie Mellon requests users of this software to return to
19 *
20 *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
21 *  School of Computer Science
22 *  Carnegie Mellon University
23 *  Pittsburgh PA 15213-3890
24 *
25 * any improvements or extensions that they make and grant Carnegie the
26 * rights to redistribute these changes.
27 */
28
29/*
30 * dagfuncs.c -- DAG node execution routines
31 *
32 * Rules:
33 * 1. Every DAG execution function must eventually cause node->status to
34 *    get set to "good" or "bad", and "FinishNode" to be called. In the
35 *    case of nodes that complete immediately (xor, NullNodeFunc, etc),
36 *    the node execution function can do these two things directly. In
37 *    the case of nodes that have to wait for some event (a disk read to
38 *    complete, a lock to be released, etc) to occur before they can
39 *    complete, this is typically achieved by having whatever module
40 *    is doing the operation call GenericWakeupFunc upon completion.
41 * 2. DAG execution functions should check the status in the DAG header
42 *    and NOP out their operations if the status is not "enable". However,
43 *    execution functions that release resources must be sure to release
44 *    them even when they NOP out the function that would use them.
45 *    Functions that acquire resources should go ahead and acquire them
46 *    even when they NOP, so that a downstream release node will not have
47 *    to check to find out whether or not the acquire was suppressed.
48 */
49
50#include <sys/param.h>
51#include <sys/ioctl.h>
52
53#include "rf_archs.h"
54#include "rf_raid.h"
55#include "rf_dag.h"
56#include "rf_layout.h"
57#include "rf_etimer.h"
58#include "rf_acctrace.h"
59#include "rf_diskqueue.h"
60#include "rf_dagfuncs.h"
61#include "rf_general.h"
62#include "rf_engine.h"
63#include "rf_dagutils.h"
64
65#include "rf_kintf.h"
66
67#if RF_INCLUDE_PARITYLOGGING > 0
68#include "rf_paritylog.h"
69#endif				/* RF_INCLUDE_PARITYLOGGING > 0 */
70
71int     (*rf_DiskReadFunc) (RF_DagNode_t *);
72int     (*rf_DiskWriteFunc) (RF_DagNode_t *);
73int     (*rf_DiskReadUndoFunc) (RF_DagNode_t *);
74int     (*rf_DiskWriteUndoFunc) (RF_DagNode_t *);
75int     (*rf_DiskUnlockFunc) (RF_DagNode_t *);
76int     (*rf_DiskUnlockUndoFunc) (RF_DagNode_t *);
77int     (*rf_RegularXorUndoFunc) (RF_DagNode_t *);
78int     (*rf_SimpleXorUndoFunc) (RF_DagNode_t *);
79int     (*rf_RecoveryXorUndoFunc) (RF_DagNode_t *);
80
81/*****************************************************************************************
82 * main (only) configuration routine for this module
83 ****************************************************************************************/
84int
85rf_ConfigureDAGFuncs(listp)
86	RF_ShutdownList_t **listp;
87{
88	RF_ASSERT(((sizeof(long) == 8) && RF_LONGSHIFT == 3) || ((sizeof(long) == 4) && RF_LONGSHIFT == 2));
89	rf_DiskReadFunc = rf_DiskReadFuncForThreads;
90	rf_DiskReadUndoFunc = rf_DiskUndoFunc;
91	rf_DiskWriteFunc = rf_DiskWriteFuncForThreads;
92	rf_DiskWriteUndoFunc = rf_DiskUndoFunc;
93	rf_DiskUnlockFunc = rf_DiskUnlockFuncForThreads;
94	rf_DiskUnlockUndoFunc = rf_NullNodeUndoFunc;
95	rf_RegularXorUndoFunc = rf_NullNodeUndoFunc;
96	rf_SimpleXorUndoFunc = rf_NullNodeUndoFunc;
97	rf_RecoveryXorUndoFunc = rf_NullNodeUndoFunc;
98	return (0);
99}
100
101
102
103/*****************************************************************************************
104 * the execution function associated with a terminate node
105 ****************************************************************************************/
106int
107rf_TerminateFunc(node)
108	RF_DagNode_t *node;
109{
110	RF_ASSERT(node->dagHdr->numCommits == node->dagHdr->numCommitNodes);
111	node->status = rf_good;
112	return (rf_FinishNode(node, RF_THREAD_CONTEXT));
113}
114
115int
116rf_TerminateUndoFunc(node)
117	RF_DagNode_t *node;
118{
119	return (0);
120}
121
122
123/*****************************************************************************************
124 * execution functions associated with a mirror node
125 *
126 * parameters:
127 *
128 * 0 - physical disk addres of data
129 * 1 - buffer for holding read data
130 * 2 - parity stripe ID
131 * 3 - flags
132 * 4 - physical disk address of mirror (parity)
133 *
134 ****************************************************************************************/
135
136int
137rf_DiskReadMirrorIdleFunc(node)
138	RF_DagNode_t *node;
139{
140	/* select the mirror copy with the shortest queue and fill in node
141	 * parameters with physical disk address */
142
143	rf_SelectMirrorDiskIdle(node);
144	return (rf_DiskReadFunc(node));
145}
146
147int
148rf_DiskReadMirrorPartitionFunc(node)
149	RF_DagNode_t *node;
150{
151	/* select the mirror copy with the shortest queue and fill in node
152	 * parameters with physical disk address */
153
154	rf_SelectMirrorDiskPartition(node);
155	return (rf_DiskReadFunc(node));
156}
157
158int
159rf_DiskReadMirrorUndoFunc(node)
160	RF_DagNode_t *node;
161{
162	return (0);
163}
164
165
166
167#if RF_INCLUDE_PARITYLOGGING > 0
168/*****************************************************************************************
169 * the execution function associated with a parity log update node
170 ****************************************************************************************/
171int
172rf_ParityLogUpdateFunc(node)
173	RF_DagNode_t *node;
174{
175	RF_PhysDiskAddr_t *pda = (RF_PhysDiskAddr_t *) node->params[0].p;
176	caddr_t buf = (caddr_t) node->params[1].p;
177	RF_ParityLogData_t *logData;
178	RF_AccTraceEntry_t *tracerec = node->dagHdr->tracerec;
179	RF_Etimer_t timer;
180
181	if (node->dagHdr->status == rf_enable) {
182		RF_ETIMER_START(timer);
183		logData = rf_CreateParityLogData(RF_UPDATE, pda, buf,
184		    (RF_Raid_t *) (node->dagHdr->raidPtr),
185		    node->wakeFunc, (void *) node,
186		    node->dagHdr->tracerec, timer);
187		if (logData)
188			rf_ParityLogAppend(logData, RF_FALSE, NULL, RF_FALSE);
189		else {
190			RF_ETIMER_STOP(timer);
191			RF_ETIMER_EVAL(timer);
192			tracerec->plog_us += RF_ETIMER_VAL_US(timer);
193			(node->wakeFunc) (node, ENOMEM);
194		}
195	}
196	return (0);
197}
198
199
200/*****************************************************************************************
201 * the execution function associated with a parity log overwrite node
202 ****************************************************************************************/
203int
204rf_ParityLogOverwriteFunc(node)
205	RF_DagNode_t *node;
206{
207	RF_PhysDiskAddr_t *pda = (RF_PhysDiskAddr_t *) node->params[0].p;
208	caddr_t buf = (caddr_t) node->params[1].p;
209	RF_ParityLogData_t *logData;
210	RF_AccTraceEntry_t *tracerec = node->dagHdr->tracerec;
211	RF_Etimer_t timer;
212
213	if (node->dagHdr->status == rf_enable) {
214		RF_ETIMER_START(timer);
215		logData = rf_CreateParityLogData(RF_OVERWRITE, pda, buf, (RF_Raid_t *) (node->dagHdr->raidPtr),
216		    node->wakeFunc, (void *) node, node->dagHdr->tracerec, timer);
217		if (logData)
218			rf_ParityLogAppend(logData, RF_FALSE, NULL, RF_FALSE);
219		else {
220			RF_ETIMER_STOP(timer);
221			RF_ETIMER_EVAL(timer);
222			tracerec->plog_us += RF_ETIMER_VAL_US(timer);
223			(node->wakeFunc) (node, ENOMEM);
224		}
225	}
226	return (0);
227}
228#else				/* RF_INCLUDE_PARITYLOGGING > 0 */
229
230int
231rf_ParityLogUpdateFunc(node)
232	RF_DagNode_t *node;
233{
234	return (0);
235}
236int
237rf_ParityLogOverwriteFunc(node)
238	RF_DagNode_t *node;
239{
240	return (0);
241}
242#endif				/* RF_INCLUDE_PARITYLOGGING > 0 */
243
244int
245rf_ParityLogUpdateUndoFunc(node)
246	RF_DagNode_t *node;
247{
248	return (0);
249}
250
251int
252rf_ParityLogOverwriteUndoFunc(node)
253	RF_DagNode_t *node;
254{
255	return (0);
256}
257/*****************************************************************************************
258 * the execution function associated with a NOP node
259 ****************************************************************************************/
260int
261rf_NullNodeFunc(node)
262	RF_DagNode_t *node;
263{
264	node->status = rf_good;
265	return (rf_FinishNode(node, RF_THREAD_CONTEXT));
266}
267
268int
269rf_NullNodeUndoFunc(node)
270	RF_DagNode_t *node;
271{
272	node->status = rf_undone;
273	return (rf_FinishNode(node, RF_THREAD_CONTEXT));
274}
275
276
277/*****************************************************************************************
278 * the execution function associated with a disk-read node
279 ****************************************************************************************/
280int
281rf_DiskReadFuncForThreads(node)
282	RF_DagNode_t *node;
283{
284	RF_DiskQueueData_t *req;
285	RF_PhysDiskAddr_t *pda = (RF_PhysDiskAddr_t *) node->params[0].p;
286	caddr_t buf = (caddr_t) node->params[1].p;
287	RF_StripeNum_t parityStripeID = (RF_StripeNum_t) node->params[2].v;
288	unsigned priority = RF_EXTRACT_PRIORITY(node->params[3].v);
289	unsigned lock = RF_EXTRACT_LOCK_FLAG(node->params[3].v);
290	unsigned unlock = RF_EXTRACT_UNLOCK_FLAG(node->params[3].v);
291	unsigned which_ru = RF_EXTRACT_RU(node->params[3].v);
292	RF_DiskQueueDataFlags_t flags = 0;
293	RF_IoType_t iotype = (node->dagHdr->status == rf_enable) ? RF_IO_TYPE_READ : RF_IO_TYPE_NOP;
294	RF_DiskQueue_t **dqs = ((RF_Raid_t *) (node->dagHdr->raidPtr))->Queues;
295	void   *b_proc = NULL;
296
297	if (node->dagHdr->bp)
298		b_proc = (void *) ((struct buf *) node->dagHdr->bp)->b_proc;
299
300	RF_ASSERT(!(lock && unlock));
301	flags |= (lock) ? RF_LOCK_DISK_QUEUE : 0;
302	flags |= (unlock) ? RF_UNLOCK_DISK_QUEUE : 0;
303
304	req = rf_CreateDiskQueueData(iotype, pda->startSector, pda->numSector,
305	    buf, parityStripeID, which_ru,
306	    (int (*) (void *, int)) node->wakeFunc,
307	    node, NULL, node->dagHdr->tracerec,
308	    (void *) (node->dagHdr->raidPtr), flags, b_proc);
309	if (!req) {
310		(node->wakeFunc) (node, ENOMEM);
311	} else {
312		node->dagFuncData = (void *) req;
313		rf_DiskIOEnqueue(&(dqs[pda->row][pda->col]), req, priority);
314	}
315	return (0);
316}
317
318
319/*****************************************************************************************
320 * the execution function associated with a disk-write node
321 ****************************************************************************************/
322int
323rf_DiskWriteFuncForThreads(node)
324	RF_DagNode_t *node;
325{
326	RF_DiskQueueData_t *req;
327	RF_PhysDiskAddr_t *pda = (RF_PhysDiskAddr_t *) node->params[0].p;
328	caddr_t buf = (caddr_t) node->params[1].p;
329	RF_StripeNum_t parityStripeID = (RF_StripeNum_t) node->params[2].v;
330	unsigned priority = RF_EXTRACT_PRIORITY(node->params[3].v);
331	unsigned lock = RF_EXTRACT_LOCK_FLAG(node->params[3].v);
332	unsigned unlock = RF_EXTRACT_UNLOCK_FLAG(node->params[3].v);
333	unsigned which_ru = RF_EXTRACT_RU(node->params[3].v);
334	RF_DiskQueueDataFlags_t flags = 0;
335	RF_IoType_t iotype = (node->dagHdr->status == rf_enable) ? RF_IO_TYPE_WRITE : RF_IO_TYPE_NOP;
336	RF_DiskQueue_t **dqs = ((RF_Raid_t *) (node->dagHdr->raidPtr))->Queues;
337	void   *b_proc = NULL;
338
339	if (node->dagHdr->bp)
340		b_proc = (void *) ((struct buf *) node->dagHdr->bp)->b_proc;
341
342	/* normal processing (rollaway or forward recovery) begins here */
343	RF_ASSERT(!(lock && unlock));
344	flags |= (lock) ? RF_LOCK_DISK_QUEUE : 0;
345	flags |= (unlock) ? RF_UNLOCK_DISK_QUEUE : 0;
346	req = rf_CreateDiskQueueData(iotype, pda->startSector, pda->numSector,
347	    buf, parityStripeID, which_ru,
348	    (int (*) (void *, int)) node->wakeFunc,
349	    (void *) node, NULL,
350	    node->dagHdr->tracerec,
351	    (void *) (node->dagHdr->raidPtr),
352	    flags, b_proc);
353
354	if (!req) {
355		(node->wakeFunc) (node, ENOMEM);
356	} else {
357		node->dagFuncData = (void *) req;
358		rf_DiskIOEnqueue(&(dqs[pda->row][pda->col]), req, priority);
359	}
360
361	return (0);
362}
363/*****************************************************************************************
364 * the undo function for disk nodes
365 * Note:  this is not a proper undo of a write node, only locks are released.
366 *        old data is not restored to disk!
367 ****************************************************************************************/
368int
369rf_DiskUndoFunc(node)
370	RF_DagNode_t *node;
371{
372	RF_DiskQueueData_t *req;
373	RF_PhysDiskAddr_t *pda = (RF_PhysDiskAddr_t *) node->params[0].p;
374	RF_DiskQueue_t **dqs = ((RF_Raid_t *) (node->dagHdr->raidPtr))->Queues;
375
376	req = rf_CreateDiskQueueData(RF_IO_TYPE_NOP,
377	    0L, 0, NULL, 0L, 0,
378	    (int (*) (void *, int)) node->wakeFunc,
379	    (void *) node,
380	    NULL, node->dagHdr->tracerec,
381	    (void *) (node->dagHdr->raidPtr),
382	    RF_UNLOCK_DISK_QUEUE, NULL);
383	if (!req)
384		(node->wakeFunc) (node, ENOMEM);
385	else {
386		node->dagFuncData = (void *) req;
387		rf_DiskIOEnqueue(&(dqs[pda->row][pda->col]), req, RF_IO_NORMAL_PRIORITY);
388	}
389
390	return (0);
391}
392/*****************************************************************************************
393 * the execution function associated with an "unlock disk queue" node
394 ****************************************************************************************/
395int
396rf_DiskUnlockFuncForThreads(node)
397	RF_DagNode_t *node;
398{
399	RF_DiskQueueData_t *req;
400	RF_PhysDiskAddr_t *pda = (RF_PhysDiskAddr_t *) node->params[0].p;
401	RF_DiskQueue_t **dqs = ((RF_Raid_t *) (node->dagHdr->raidPtr))->Queues;
402
403	req = rf_CreateDiskQueueData(RF_IO_TYPE_NOP,
404	    0L, 0, NULL, 0L, 0,
405	    (int (*) (void *, int)) node->wakeFunc,
406	    (void *) node,
407	    NULL, node->dagHdr->tracerec,
408	    (void *) (node->dagHdr->raidPtr),
409	    RF_UNLOCK_DISK_QUEUE, NULL);
410	if (!req)
411		(node->wakeFunc) (node, ENOMEM);
412	else {
413		node->dagFuncData = (void *) req;
414		rf_DiskIOEnqueue(&(dqs[pda->row][pda->col]), req, RF_IO_NORMAL_PRIORITY);
415	}
416
417	return (0);
418}
419/*****************************************************************************************
420 * Callback routine for DiskRead and DiskWrite nodes.  When the disk op completes,
421 * the routine is called to set the node status and inform the execution engine that
422 * the node has fired.
423 ****************************************************************************************/
424int
425rf_GenericWakeupFunc(node, status)
426	RF_DagNode_t *node;
427	int     status;
428{
429	switch (node->status) {
430	case rf_bwd1:
431		node->status = rf_bwd2;
432		if (node->dagFuncData)
433			rf_FreeDiskQueueData((RF_DiskQueueData_t *) node->dagFuncData);
434		return (rf_DiskWriteFuncForThreads(node));
435		break;
436	case rf_fired:
437		if (status)
438			node->status = rf_bad;
439		else
440			node->status = rf_good;
441		break;
442	case rf_recover:
443		/* probably should never reach this case */
444		if (status)
445			node->status = rf_panic;
446		else
447			node->status = rf_undone;
448		break;
449	default:
450		printf("rf_GenericWakeupFunc:");
451		printf("node->status is %d,", node->status);
452		printf("status is %d \n", status);
453		RF_PANIC();
454		break;
455	}
456	if (node->dagFuncData)
457		rf_FreeDiskQueueData((RF_DiskQueueData_t *) node->dagFuncData);
458	return (rf_FinishNode(node, RF_INTR_CONTEXT));
459}
460
461
462/*****************************************************************************************
463 * there are three distinct types of xor nodes
464 * A "regular xor" is used in the fault-free case where the access spans a complete
465 * stripe unit.  It assumes that the result buffer is one full stripe unit in size,
466 * and uses the stripe-unit-offset values that it computes from the PDAs to determine
467 * where within the stripe unit to XOR each argument buffer.
468 *
469 * A "simple xor" is used in the fault-free case where the access touches only a portion
470 * of one (or two, in some cases) stripe unit(s).  It assumes that all the argument
471 * buffers are of the same size and have the same stripe unit offset.
472 *
473 * A "recovery xor" is used in the degraded-mode case.  It's similar to the regular
474 * xor function except that it takes the failed PDA as an additional parameter, and
475 * uses it to determine what portions of the argument buffers need to be xor'd into
476 * the result buffer, and where in the result buffer they should go.
477 ****************************************************************************************/
478
479/* xor the params together and store the result in the result field.
480 * assume the result field points to a buffer that is the size of one SU,
481 * and use the pda params to determine where within the buffer to XOR
482 * the input buffers.
483 */
484int
485rf_RegularXorFunc(node)
486	RF_DagNode_t *node;
487{
488	RF_Raid_t *raidPtr = (RF_Raid_t *) node->params[node->numParams - 1].p;
489	RF_AccTraceEntry_t *tracerec = node->dagHdr->tracerec;
490	RF_Etimer_t timer;
491	int     i, retcode;
492
493	retcode = 0;
494	if (node->dagHdr->status == rf_enable) {
495		/* don't do the XOR if the input is the same as the output */
496		RF_ETIMER_START(timer);
497		for (i = 0; i < node->numParams - 1; i += 2)
498			if (node->params[i + 1].p != node->results[0]) {
499				retcode = rf_XorIntoBuffer(raidPtr, (RF_PhysDiskAddr_t *) node->params[i].p,
500				    (char *) node->params[i + 1].p, (char *) node->results[0], node->dagHdr->bp);
501			}
502		RF_ETIMER_STOP(timer);
503		RF_ETIMER_EVAL(timer);
504		tracerec->xor_us += RF_ETIMER_VAL_US(timer);
505	}
506	return (rf_GenericWakeupFunc(node, retcode));	/* call wake func
507							 * explicitly since no
508							 * I/O in this node */
509}
510/* xor the inputs into the result buffer, ignoring placement issues */
511int
512rf_SimpleXorFunc(node)
513	RF_DagNode_t *node;
514{
515	RF_Raid_t *raidPtr = (RF_Raid_t *) node->params[node->numParams - 1].p;
516	int     i, retcode = 0;
517	RF_AccTraceEntry_t *tracerec = node->dagHdr->tracerec;
518	RF_Etimer_t timer;
519
520	if (node->dagHdr->status == rf_enable) {
521		RF_ETIMER_START(timer);
522		/* don't do the XOR if the input is the same as the output */
523		for (i = 0; i < node->numParams - 1; i += 2)
524			if (node->params[i + 1].p != node->results[0]) {
525				retcode = rf_bxor((char *) node->params[i + 1].p, (char *) node->results[0],
526				    rf_RaidAddressToByte(raidPtr, ((RF_PhysDiskAddr_t *) node->params[i].p)->numSector),
527				    (struct buf *) node->dagHdr->bp);
528			}
529		RF_ETIMER_STOP(timer);
530		RF_ETIMER_EVAL(timer);
531		tracerec->xor_us += RF_ETIMER_VAL_US(timer);
532	}
533	return (rf_GenericWakeupFunc(node, retcode));	/* call wake func
534							 * explicitly since no
535							 * I/O in this node */
536}
537/* this xor is used by the degraded-mode dag functions to recover lost data.
538 * the second-to-last parameter is the PDA for the failed portion of the access.
539 * the code here looks at this PDA and assumes that the xor target buffer is
540 * equal in size to the number of sectors in the failed PDA.  It then uses
541 * the other PDAs in the parameter list to determine where within the target
542 * buffer the corresponding data should be xored.
543 */
544int
545rf_RecoveryXorFunc(node)
546	RF_DagNode_t *node;
547{
548	RF_Raid_t *raidPtr = (RF_Raid_t *) node->params[node->numParams - 1].p;
549	RF_RaidLayout_t *layoutPtr = (RF_RaidLayout_t *) & raidPtr->Layout;
550	RF_PhysDiskAddr_t *failedPDA = (RF_PhysDiskAddr_t *) node->params[node->numParams - 2].p;
551	int     i, retcode = 0;
552	RF_PhysDiskAddr_t *pda;
553	int     suoffset, failedSUOffset = rf_StripeUnitOffset(layoutPtr, failedPDA->startSector);
554	char   *srcbuf, *destbuf;
555	RF_AccTraceEntry_t *tracerec = node->dagHdr->tracerec;
556	RF_Etimer_t timer;
557
558	if (node->dagHdr->status == rf_enable) {
559		RF_ETIMER_START(timer);
560		for (i = 0; i < node->numParams - 2; i += 2)
561			if (node->params[i + 1].p != node->results[0]) {
562				pda = (RF_PhysDiskAddr_t *) node->params[i].p;
563				srcbuf = (char *) node->params[i + 1].p;
564				suoffset = rf_StripeUnitOffset(layoutPtr, pda->startSector);
565				destbuf = ((char *) node->results[0]) + rf_RaidAddressToByte(raidPtr, suoffset - failedSUOffset);
566				retcode = rf_bxor(srcbuf, destbuf, rf_RaidAddressToByte(raidPtr, pda->numSector), node->dagHdr->bp);
567			}
568		RF_ETIMER_STOP(timer);
569		RF_ETIMER_EVAL(timer);
570		tracerec->xor_us += RF_ETIMER_VAL_US(timer);
571	}
572	return (rf_GenericWakeupFunc(node, retcode));
573}
574/*****************************************************************************************
575 * The next three functions are utilities used by the above xor-execution functions.
576 ****************************************************************************************/
577
578
579/*
580 * this is just a glorified buffer xor.  targbuf points to a buffer that is one full stripe unit
581 * in size.  srcbuf points to a buffer that may be less than 1 SU, but never more.  When the
582 * access described by pda is one SU in size (which by implication means it's SU-aligned),
583 * all that happens is (targbuf) <- (srcbuf ^ targbuf).  When the access is less than one
584 * SU in size the XOR occurs on only the portion of targbuf identified in the pda.
585 */
586
587int
588rf_XorIntoBuffer(raidPtr, pda, srcbuf, targbuf, bp)
589	RF_Raid_t *raidPtr;
590	RF_PhysDiskAddr_t *pda;
591	char   *srcbuf;
592	char   *targbuf;
593	void   *bp;
594{
595	char   *targptr;
596	int     sectPerSU = raidPtr->Layout.sectorsPerStripeUnit;
597	int     SUOffset = pda->startSector % sectPerSU;
598	int     length, retcode = 0;
599
600	RF_ASSERT(pda->numSector <= sectPerSU);
601
602	targptr = targbuf + rf_RaidAddressToByte(raidPtr, SUOffset);
603	length = rf_RaidAddressToByte(raidPtr, pda->numSector);
604	retcode = rf_bxor(srcbuf, targptr, length, bp);
605	return (retcode);
606}
607/* it really should be the case that the buffer pointers (returned by malloc)
608 * are aligned to the natural word size of the machine, so this is the only
609 * case we optimize for.  The length should always be a multiple of the sector
610 * size, so there should be no problem with leftover bytes at the end.
611 */
612int
613rf_bxor(src, dest, len, bp)
614	char   *src;
615	char   *dest;
616	int     len;
617	void   *bp;
618{
619	unsigned mask = sizeof(long) - 1, retcode = 0;
620
621	if (!(((unsigned long) src) & mask) && !(((unsigned long) dest) & mask) && !(len & mask)) {
622		retcode = rf_longword_bxor((unsigned long *) src, (unsigned long *) dest, len >> RF_LONGSHIFT, bp);
623	} else {
624		RF_ASSERT(0);
625	}
626	return (retcode);
627}
628/* map a user buffer into kernel space, if necessary */
629#define REMAP_VA(_bp,x,y) (y) = (x)
630
631/* When XORing in kernel mode, we need to map each user page to kernel space before we can access it.
632 * We don't want to assume anything about which input buffers are in kernel/user
633 * space, nor about their alignment, so in each loop we compute the maximum number
634 * of bytes that we can xor without crossing any page boundaries, and do only this many
635 * bytes before the next remap.
636 */
637int
638rf_longword_bxor(src, dest, len, bp)
639	unsigned long *src;
640	unsigned long *dest;
641	int     len;		/* longwords */
642	void   *bp;
643{
644	unsigned long *end = src + len;
645	unsigned long d0, d1, d2, d3, s0, s1, s2, s3;	/* temps */
646	unsigned long *pg_src, *pg_dest;	/* per-page source/dest
647							 * pointers */
648	int     longs_this_time;/* # longwords to xor in the current iteration */
649
650	REMAP_VA(bp, src, pg_src);
651	REMAP_VA(bp, dest, pg_dest);
652	if (!pg_src || !pg_dest)
653		return (EFAULT);
654
655	while (len >= 4) {
656		longs_this_time = RF_MIN(len, RF_MIN(RF_BLIP(pg_src), RF_BLIP(pg_dest)) >> RF_LONGSHIFT);	/* note len in longwords */
657		src += longs_this_time;
658		dest += longs_this_time;
659		len -= longs_this_time;
660		while (longs_this_time >= 4) {
661			d0 = pg_dest[0];
662			d1 = pg_dest[1];
663			d2 = pg_dest[2];
664			d3 = pg_dest[3];
665			s0 = pg_src[0];
666			s1 = pg_src[1];
667			s2 = pg_src[2];
668			s3 = pg_src[3];
669			pg_dest[0] = d0 ^ s0;
670			pg_dest[1] = d1 ^ s1;
671			pg_dest[2] = d2 ^ s2;
672			pg_dest[3] = d3 ^ s3;
673			pg_src += 4;
674			pg_dest += 4;
675			longs_this_time -= 4;
676		}
677		while (longs_this_time > 0) {	/* cannot cross any page
678						 * boundaries here */
679			*pg_dest++ ^= *pg_src++;
680			longs_this_time--;
681		}
682
683		/* either we're done, or we've reached a page boundary on one
684		 * (or possibly both) of the pointers */
685		if (len) {
686			if (RF_PAGE_ALIGNED(src))
687				REMAP_VA(bp, src, pg_src);
688			if (RF_PAGE_ALIGNED(dest))
689				REMAP_VA(bp, dest, pg_dest);
690			if (!pg_src || !pg_dest)
691				return (EFAULT);
692		}
693	}
694	while (src < end) {
695		*pg_dest++ ^= *pg_src++;
696		src++;
697		dest++;
698		len--;
699		if (RF_PAGE_ALIGNED(src))
700			REMAP_VA(bp, src, pg_src);
701		if (RF_PAGE_ALIGNED(dest))
702			REMAP_VA(bp, dest, pg_dest);
703	}
704	RF_ASSERT(len == 0);
705	return (0);
706}
707
708
709/*
710   dst = a ^ b ^ c;
711   a may equal dst
712   see comment above longword_bxor
713*/
714int
715rf_longword_bxor3(dst, a, b, c, len, bp)
716	unsigned long *dst;
717	unsigned long *a;
718	unsigned long *b;
719	unsigned long *c;
720	int     len;		/* length in longwords */
721	void   *bp;
722{
723	unsigned long a0, a1, a2, a3, b0, b1, b2, b3;
724	unsigned long *pg_a, *pg_b, *pg_c, *pg_dst;	/* per-page source/dest
725								 * pointers */
726	int     longs_this_time;/* # longs to xor in the current iteration */
727	char    dst_is_a = 0;
728
729	REMAP_VA(bp, a, pg_a);
730	REMAP_VA(bp, b, pg_b);
731	REMAP_VA(bp, c, pg_c);
732	if (a == dst) {
733		pg_dst = pg_a;
734		dst_is_a = 1;
735	} else {
736		REMAP_VA(bp, dst, pg_dst);
737	}
738
739	/* align dest to cache line.  Can't cross a pg boundary on dst here. */
740	while ((((unsigned long) pg_dst) & 0x1f)) {
741		*pg_dst++ = *pg_a++ ^ *pg_b++ ^ *pg_c++;
742		dst++;
743		a++;
744		b++;
745		c++;
746		if (RF_PAGE_ALIGNED(a)) {
747			REMAP_VA(bp, a, pg_a);
748			if (!pg_a)
749				return (EFAULT);
750		}
751		if (RF_PAGE_ALIGNED(b)) {
752			REMAP_VA(bp, a, pg_b);
753			if (!pg_b)
754				return (EFAULT);
755		}
756		if (RF_PAGE_ALIGNED(c)) {
757			REMAP_VA(bp, a, pg_c);
758			if (!pg_c)
759				return (EFAULT);
760		}
761		len--;
762	}
763
764	while (len > 4) {
765		longs_this_time = RF_MIN(len, RF_MIN(RF_BLIP(a), RF_MIN(RF_BLIP(b), RF_MIN(RF_BLIP(c), RF_BLIP(dst)))) >> RF_LONGSHIFT);
766		a += longs_this_time;
767		b += longs_this_time;
768		c += longs_this_time;
769		dst += longs_this_time;
770		len -= longs_this_time;
771		while (longs_this_time >= 4) {
772			a0 = pg_a[0];
773			longs_this_time -= 4;
774
775			a1 = pg_a[1];
776			a2 = pg_a[2];
777
778			a3 = pg_a[3];
779			pg_a += 4;
780
781			b0 = pg_b[0];
782			b1 = pg_b[1];
783
784			b2 = pg_b[2];
785			b3 = pg_b[3];
786			/* start dual issue */
787			a0 ^= b0;
788			b0 = pg_c[0];
789
790			pg_b += 4;
791			a1 ^= b1;
792
793			a2 ^= b2;
794			a3 ^= b3;
795
796			b1 = pg_c[1];
797			a0 ^= b0;
798
799			b2 = pg_c[2];
800			a1 ^= b1;
801
802			b3 = pg_c[3];
803			a2 ^= b2;
804
805			pg_dst[0] = a0;
806			a3 ^= b3;
807			pg_dst[1] = a1;
808			pg_c += 4;
809			pg_dst[2] = a2;
810			pg_dst[3] = a3;
811			pg_dst += 4;
812		}
813		while (longs_this_time > 0) {	/* cannot cross any page
814						 * boundaries here */
815			*pg_dst++ = *pg_a++ ^ *pg_b++ ^ *pg_c++;
816			longs_this_time--;
817		}
818
819		if (len) {
820			if (RF_PAGE_ALIGNED(a)) {
821				REMAP_VA(bp, a, pg_a);
822				if (!pg_a)
823					return (EFAULT);
824				if (dst_is_a)
825					pg_dst = pg_a;
826			}
827			if (RF_PAGE_ALIGNED(b)) {
828				REMAP_VA(bp, b, pg_b);
829				if (!pg_b)
830					return (EFAULT);
831			}
832			if (RF_PAGE_ALIGNED(c)) {
833				REMAP_VA(bp, c, pg_c);
834				if (!pg_c)
835					return (EFAULT);
836			}
837			if (!dst_is_a)
838				if (RF_PAGE_ALIGNED(dst)) {
839					REMAP_VA(bp, dst, pg_dst);
840					if (!pg_dst)
841						return (EFAULT);
842				}
843		}
844	}
845	while (len) {
846		*pg_dst++ = *pg_a++ ^ *pg_b++ ^ *pg_c++;
847		dst++;
848		a++;
849		b++;
850		c++;
851		if (RF_PAGE_ALIGNED(a)) {
852			REMAP_VA(bp, a, pg_a);
853			if (!pg_a)
854				return (EFAULT);
855			if (dst_is_a)
856				pg_dst = pg_a;
857		}
858		if (RF_PAGE_ALIGNED(b)) {
859			REMAP_VA(bp, b, pg_b);
860			if (!pg_b)
861				return (EFAULT);
862		}
863		if (RF_PAGE_ALIGNED(c)) {
864			REMAP_VA(bp, c, pg_c);
865			if (!pg_c)
866				return (EFAULT);
867		}
868		if (!dst_is_a)
869			if (RF_PAGE_ALIGNED(dst)) {
870				REMAP_VA(bp, dst, pg_dst);
871				if (!pg_dst)
872					return (EFAULT);
873			}
874		len--;
875	}
876	return (0);
877}
878
879int
880rf_bxor3(dst, a, b, c, len, bp)
881	unsigned char *dst;
882	unsigned char *a;
883	unsigned char *b;
884	unsigned char *c;
885	unsigned long len;
886	void   *bp;
887{
888	RF_ASSERT(((RF_UL(dst) | RF_UL(a) | RF_UL(b) | RF_UL(c) | len) & 0x7) == 0);
889
890	return (rf_longword_bxor3((unsigned long *) dst, (unsigned long *) a,
891		(unsigned long *) b, (unsigned long *) c, len >> RF_LONGSHIFT, bp));
892}
893