rf_reconutil.c revision 1.12
1/*	$NetBSD: rf_reconutil.c,v 1.12 2002/11/19 01:49:42 oster Exp $	*/
2/*
3 * Copyright (c) 1995 Carnegie-Mellon University.
4 * All rights reserved.
5 *
6 * Author: Mark Holland
7 *
8 * Permission to use, copy, modify and distribute this software and
9 * its documentation is hereby granted, provided that both the copyright
10 * notice and this permission notice appear in all copies of the
11 * software, derivative works or modified versions, and any portions
12 * thereof, and that both notices appear in supporting documentation.
13 *
14 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
15 * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
16 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
17 *
18 * Carnegie Mellon requests users of this software to return to
19 *
20 *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
21 *  School of Computer Science
22 *  Carnegie Mellon University
23 *  Pittsburgh PA 15213-3890
24 *
25 * any improvements or extensions that they make and grant Carnegie the
26 * rights to redistribute these changes.
27 */
28
29/********************************************
30 * rf_reconutil.c -- reconstruction utilities
31 ********************************************/
32
33#include <sys/cdefs.h>
34__KERNEL_RCSID(0, "$NetBSD: rf_reconutil.c,v 1.12 2002/11/19 01:49:42 oster Exp $");
35
36#include <dev/raidframe/raidframevar.h>
37
38#include "rf_raid.h"
39#include "rf_desc.h"
40#include "rf_reconutil.h"
41#include "rf_reconbuffer.h"
42#include "rf_general.h"
43#include "rf_decluster.h"
44#include "rf_raid5_rotatedspare.h"
45#include "rf_interdecluster.h"
46#include "rf_chaindecluster.h"
47
48/*******************************************************************
49 * allocates/frees the reconstruction control information structures
50 *******************************************************************/
51RF_ReconCtrl_t *
52rf_MakeReconControl(reconDesc, frow, fcol, srow, scol)
53	RF_RaidReconDesc_t *reconDesc;
54	RF_RowCol_t frow;	/* failed row and column */
55	RF_RowCol_t fcol;
56	RF_RowCol_t srow;	/* identifies which spare we're using */
57	RF_RowCol_t scol;
58{
59	RF_Raid_t *raidPtr = reconDesc->raidPtr;
60	RF_RaidLayout_t *layoutPtr = &raidPtr->Layout;
61	RF_ReconUnitCount_t RUsPerPU = layoutPtr->SUsPerPU / layoutPtr->SUsPerRU;
62	RF_ReconUnitCount_t numSpareRUs;
63	RF_ReconCtrl_t *reconCtrlPtr;
64	RF_ReconBuffer_t *rbuf;
65	RF_LayoutSW_t *lp;
66#if (RF_INCLUDE_PARITY_DECLUSTERING_DS > 0)
67	int     retcode;
68#endif
69	int rc;
70	RF_RowCol_t i;
71
72	lp = raidPtr->Layout.map;
73
74	/* make and zero the global reconstruction structure and the per-disk
75	 * structure */
76	RF_Calloc(reconCtrlPtr, 1, sizeof(RF_ReconCtrl_t), (RF_ReconCtrl_t *));
77
78	/* note: this zeros the perDiskInfo */
79	RF_Calloc(reconCtrlPtr->perDiskInfo, raidPtr->numCol,
80		  sizeof(RF_PerDiskReconCtrl_t), (RF_PerDiskReconCtrl_t *));
81	reconCtrlPtr->reconDesc = reconDesc;
82	reconCtrlPtr->fcol = fcol;
83	reconCtrlPtr->spareRow = srow;
84	reconCtrlPtr->spareCol = scol;
85	reconCtrlPtr->lastPSID = layoutPtr->numStripe / layoutPtr->SUsPerPU;
86	reconCtrlPtr->percentComplete = 0;
87
88	/* initialize each per-disk recon information structure */
89	for (i = 0; i < raidPtr->numCol; i++) {
90		reconCtrlPtr->perDiskInfo[i].reconCtrl = reconCtrlPtr;
91		reconCtrlPtr->perDiskInfo[i].row = frow;
92		reconCtrlPtr->perDiskInfo[i].col = i;
93		/* make it appear as if we just finished an RU */
94		reconCtrlPtr->perDiskInfo[i].curPSID = -1;
95		reconCtrlPtr->perDiskInfo[i].ru_count = RUsPerPU - 1;
96	}
97
98	/* Get the number of spare units per disk and the sparemap in case
99	 * spare is distributed  */
100
101	if (lp->GetNumSpareRUs) {
102		numSpareRUs = lp->GetNumSpareRUs(raidPtr);
103	} else {
104		numSpareRUs = 0;
105	}
106
107#if (RF_INCLUDE_PARITY_DECLUSTERING_DS > 0)
108	/*
109         * Not all distributed sparing archs need dynamic mappings
110         */
111	if (lp->InstallSpareTable) {
112		retcode = rf_InstallSpareTable(raidPtr, frow, fcol);
113		if (retcode) {
114			RF_PANIC();	/* XXX fix this */
115		}
116	}
117#endif
118	/* make the reconstruction map */
119	reconCtrlPtr->reconMap = rf_MakeReconMap(raidPtr, (int) (layoutPtr->SUsPerRU * layoutPtr->sectorsPerStripeUnit),
120	    raidPtr->sectorsPerDisk, numSpareRUs);
121
122	/* make the per-disk reconstruction buffers */
123	for (i = 0; i < raidPtr->numCol; i++) {
124		reconCtrlPtr->perDiskInfo[i].rbuf = (i == fcol) ? NULL : rf_MakeReconBuffer(raidPtr, frow, i, RF_RBUF_TYPE_EXCLUSIVE);
125	}
126
127	/* initialize the event queue */
128	rc = rf_mutex_init(&reconCtrlPtr->eq_mutex);
129	if (rc) {
130		/* XXX deallocate, cleanup */
131		rf_print_unable_to_init_mutex(__FILE__, __LINE__, rc);
132		return (NULL);
133	}
134	rc = rf_cond_init(&reconCtrlPtr->eq_cond);
135	if (rc) {
136		/* XXX deallocate, cleanup */
137		rf_print_unable_to_init_cond(__FILE__, __LINE__, rc);
138		return (NULL);
139	}
140	reconCtrlPtr->eventQueue = NULL;
141	reconCtrlPtr->eq_count = 0;
142
143	/* make the floating recon buffers and append them to the free list */
144	rc = rf_mutex_init(&reconCtrlPtr->rb_mutex);
145	if (rc) {
146		/* XXX deallocate, cleanup */
147		rf_print_unable_to_init_mutex(__FILE__, __LINE__, rc);
148		return (NULL);
149	}
150	reconCtrlPtr->fullBufferList = NULL;
151	reconCtrlPtr->priorityList = NULL;
152	reconCtrlPtr->floatingRbufs = NULL;
153	reconCtrlPtr->committedRbufs = NULL;
154	for (i = 0; i < raidPtr->numFloatingReconBufs; i++) {
155		rbuf = rf_MakeReconBuffer(raidPtr, frow, fcol,
156					  RF_RBUF_TYPE_FLOATING);
157		rbuf->next = reconCtrlPtr->floatingRbufs;
158		reconCtrlPtr->floatingRbufs = rbuf;
159	}
160
161	/* create the parity stripe status table */
162	reconCtrlPtr->pssTable = rf_MakeParityStripeStatusTable(raidPtr);
163
164	/* set the initial min head sep counter val */
165	reconCtrlPtr->minHeadSepCounter = 0;
166
167	return (reconCtrlPtr);
168}
169
170void
171rf_FreeReconControl(raidPtr, row)
172	RF_Raid_t *raidPtr;
173	RF_RowCol_t row;
174{
175	RF_ReconCtrl_t *reconCtrlPtr = raidPtr->reconControl[row];
176	RF_ReconBuffer_t *t;
177	RF_ReconUnitNum_t i;
178
179	RF_ASSERT(reconCtrlPtr);
180	for (i = 0; i < raidPtr->numCol; i++)
181		if (reconCtrlPtr->perDiskInfo[i].rbuf)
182			rf_FreeReconBuffer(reconCtrlPtr->perDiskInfo[i].rbuf);
183	for (i = 0; i < raidPtr->numFloatingReconBufs; i++) {
184		t = reconCtrlPtr->floatingRbufs;
185		RF_ASSERT(t);
186		reconCtrlPtr->floatingRbufs = t->next;
187		rf_FreeReconBuffer(t);
188	}
189	rf_mutex_destroy(&reconCtrlPtr->rb_mutex);
190	rf_mutex_destroy(&reconCtrlPtr->eq_mutex);
191	rf_cond_destroy(&reconCtrlPtr->eq_cond);
192	rf_FreeReconMap(reconCtrlPtr->reconMap);
193	rf_FreeParityStripeStatusTable(raidPtr, reconCtrlPtr->pssTable);
194	RF_Free(reconCtrlPtr->perDiskInfo,
195		raidPtr->numCol * sizeof(RF_PerDiskReconCtrl_t));
196	RF_Free(reconCtrlPtr, sizeof(*reconCtrlPtr));
197}
198
199
200/******************************************************************************
201 * computes the default head separation limit
202 *****************************************************************************/
203RF_HeadSepLimit_t
204rf_GetDefaultHeadSepLimit(raidPtr)
205	RF_Raid_t *raidPtr;
206{
207	RF_HeadSepLimit_t hsl;
208	RF_LayoutSW_t *lp;
209
210	lp = raidPtr->Layout.map;
211	if (lp->GetDefaultHeadSepLimit == NULL)
212		return (-1);
213	hsl = lp->GetDefaultHeadSepLimit(raidPtr);
214	return (hsl);
215}
216
217
218/******************************************************************************
219 * computes the default number of floating recon buffers
220 *****************************************************************************/
221int
222rf_GetDefaultNumFloatingReconBuffers(raidPtr)
223	RF_Raid_t *raidPtr;
224{
225	RF_LayoutSW_t *lp;
226	int     nrb;
227
228	lp = raidPtr->Layout.map;
229	if (lp->GetDefaultNumFloatingReconBuffers == NULL)
230		return (3 * raidPtr->numCol);
231	nrb = lp->GetDefaultNumFloatingReconBuffers(raidPtr);
232	return (nrb);
233}
234
235
236/******************************************************************************
237 * creates and initializes a reconstruction buffer
238 *****************************************************************************/
239RF_ReconBuffer_t *
240rf_MakeReconBuffer(
241    RF_Raid_t * raidPtr,
242    RF_RowCol_t row,
243    RF_RowCol_t col,
244    RF_RbufType_t type)
245{
246	RF_RaidLayout_t *layoutPtr = &raidPtr->Layout;
247	RF_ReconBuffer_t *t;
248	u_int   recon_buffer_size = rf_RaidAddressToByte(raidPtr, layoutPtr->SUsPerRU * layoutPtr->sectorsPerStripeUnit);
249
250	RF_Malloc(t, sizeof(RF_ReconBuffer_t), (RF_ReconBuffer_t *));
251	RF_Malloc(t->buffer, recon_buffer_size, (caddr_t));
252	t->raidPtr = raidPtr;
253	t->row = row;
254	t->col = col;
255	t->priority = RF_IO_RECON_PRIORITY;
256	t->type = type;
257	t->pssPtr = NULL;
258	t->next = NULL;
259	return (t);
260}
261/******************************************************************************
262 * frees a reconstruction buffer
263 *****************************************************************************/
264void
265rf_FreeReconBuffer(rbuf)
266	RF_ReconBuffer_t *rbuf;
267{
268	RF_Raid_t *raidPtr = rbuf->raidPtr;
269	u_int   recon_buffer_size;
270
271	recon_buffer_size = rf_RaidAddressToByte(raidPtr, raidPtr->Layout.SUsPerRU * raidPtr->Layout.sectorsPerStripeUnit);
272
273	RF_Free(rbuf->buffer, recon_buffer_size);
274	RF_Free(rbuf, sizeof(*rbuf));
275}
276
277#if RF_DEBUG_RECON
278/******************************************************************************
279 * debug only:  sanity check the number of floating recon bufs in use
280 *****************************************************************************/
281void
282rf_CheckFloatingRbufCount(raidPtr, dolock)
283	RF_Raid_t *raidPtr;
284	int     dolock;
285{
286	RF_ReconParityStripeStatus_t *p;
287	RF_PSStatusHeader_t *pssTable;
288	RF_ReconBuffer_t *rbuf;
289	int     i, j, sum = 0;
290	RF_RowCol_t frow = 0;
291
292	for (i = 0; i < raidPtr->numRow; i++)
293		if (raidPtr->reconControl[i]) {
294			frow = i;
295			break;
296		}
297	RF_ASSERT(frow >= 0);
298
299	if (dolock)
300		RF_LOCK_MUTEX(raidPtr->reconControl[frow]->rb_mutex);
301	pssTable = raidPtr->reconControl[frow]->pssTable;
302
303	for (i = 0; i < raidPtr->pssTableSize; i++) {
304		RF_LOCK_MUTEX(pssTable[i].mutex);
305		for (p = pssTable[i].chain; p; p = p->next) {
306			rbuf = (RF_ReconBuffer_t *) p->rbuf;
307			if (rbuf && rbuf->type == RF_RBUF_TYPE_FLOATING)
308				sum++;
309
310			rbuf = (RF_ReconBuffer_t *) p->writeRbuf;
311			if (rbuf && rbuf->type == RF_RBUF_TYPE_FLOATING)
312				sum++;
313
314			for (j = 0; j < p->xorBufCount; j++) {
315				rbuf = (RF_ReconBuffer_t *) p->rbufsForXor[j];
316				RF_ASSERT(rbuf);
317				if (rbuf->type == RF_RBUF_TYPE_FLOATING)
318					sum++;
319			}
320		}
321		RF_UNLOCK_MUTEX(pssTable[i].mutex);
322	}
323
324	for (rbuf = raidPtr->reconControl[frow]->floatingRbufs; rbuf;
325	     rbuf = rbuf->next) {
326		if (rbuf->type == RF_RBUF_TYPE_FLOATING)
327			sum++;
328	}
329	for (rbuf = raidPtr->reconControl[frow]->committedRbufs; rbuf;
330	     rbuf = rbuf->next) {
331		if (rbuf->type == RF_RBUF_TYPE_FLOATING)
332			sum++;
333	}
334	for (rbuf = raidPtr->reconControl[frow]->fullBufferList; rbuf;
335	     rbuf = rbuf->next) {
336		if (rbuf->type == RF_RBUF_TYPE_FLOATING)
337			sum++;
338	}
339	for (rbuf = raidPtr->reconControl[frow]->priorityList; rbuf;
340	     rbuf = rbuf->next) {
341		if (rbuf->type == RF_RBUF_TYPE_FLOATING)
342			sum++;
343	}
344
345	RF_ASSERT(sum == raidPtr->numFloatingReconBufs);
346
347	if (dolock)
348		RF_UNLOCK_MUTEX(raidPtr->reconControl[frow]->rb_mutex);
349}
350#endif
351
352