rf_reconutil.c revision 1.6
1/*	$NetBSD: rf_reconutil.c,v 1.6 2002/09/14 17:53:58 oster Exp $	*/
2/*
3 * Copyright (c) 1995 Carnegie-Mellon University.
4 * All rights reserved.
5 *
6 * Author: Mark Holland
7 *
8 * Permission to use, copy, modify and distribute this software and
9 * its documentation is hereby granted, provided that both the copyright
10 * notice and this permission notice appear in all copies of the
11 * software, derivative works or modified versions, and any portions
12 * thereof, and that both notices appear in supporting documentation.
13 *
14 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
15 * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
16 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
17 *
18 * Carnegie Mellon requests users of this software to return to
19 *
20 *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
21 *  School of Computer Science
22 *  Carnegie Mellon University
23 *  Pittsburgh PA 15213-3890
24 *
25 * any improvements or extensions that they make and grant Carnegie the
26 * rights to redistribute these changes.
27 */
28
29/********************************************
30 * rf_reconutil.c -- reconstruction utilities
31 ********************************************/
32
33#include <sys/cdefs.h>
34__KERNEL_RCSID(0, "$NetBSD: rf_reconutil.c,v 1.6 2002/09/14 17:53:58 oster Exp $");
35
36#include <dev/raidframe/raidframevar.h>
37
38#include "rf_raid.h"
39#include "rf_desc.h"
40#include "rf_reconutil.h"
41#include "rf_reconbuffer.h"
42#include "rf_general.h"
43#include "rf_decluster.h"
44#include "rf_raid5_rotatedspare.h"
45#include "rf_interdecluster.h"
46#include "rf_chaindecluster.h"
47
48/*******************************************************************
49 * allocates/frees the reconstruction control information structures
50 *******************************************************************/
51RF_ReconCtrl_t *
52rf_MakeReconControl(reconDesc, frow, fcol, srow, scol)
53	RF_RaidReconDesc_t *reconDesc;
54	RF_RowCol_t frow;	/* failed row and column */
55	RF_RowCol_t fcol;
56	RF_RowCol_t srow;	/* identifies which spare we're using */
57	RF_RowCol_t scol;
58{
59	RF_Raid_t *raidPtr = reconDesc->raidPtr;
60	RF_RaidLayout_t *layoutPtr = &raidPtr->Layout;
61	RF_ReconUnitCount_t RUsPerPU = layoutPtr->SUsPerPU / layoutPtr->SUsPerRU;
62	RF_ReconUnitCount_t numSpareRUs;
63	RF_ReconCtrl_t *reconCtrlPtr;
64	RF_ReconBuffer_t *rbuf;
65	RF_LayoutSW_t *lp;
66	int     retcode, rc;
67	RF_RowCol_t i;
68
69	lp = raidPtr->Layout.map;
70
71	/* make and zero the global reconstruction structure and the per-disk
72	 * structure */
73	RF_Calloc(reconCtrlPtr, 1, sizeof(RF_ReconCtrl_t), (RF_ReconCtrl_t *));
74	RF_Calloc(reconCtrlPtr->perDiskInfo, raidPtr->numCol, sizeof(RF_PerDiskReconCtrl_t), (RF_PerDiskReconCtrl_t *));	/* this zeros it */
75	reconCtrlPtr->reconDesc = reconDesc;
76	reconCtrlPtr->fcol = fcol;
77	reconCtrlPtr->spareRow = srow;
78	reconCtrlPtr->spareCol = scol;
79	reconCtrlPtr->lastPSID = layoutPtr->numStripe / layoutPtr->SUsPerPU;
80	reconCtrlPtr->percentComplete = 0;
81
82	/* initialize each per-disk recon information structure */
83	for (i = 0; i < raidPtr->numCol; i++) {
84		reconCtrlPtr->perDiskInfo[i].reconCtrl = reconCtrlPtr;
85		reconCtrlPtr->perDiskInfo[i].row = frow;
86		reconCtrlPtr->perDiskInfo[i].col = i;
87		reconCtrlPtr->perDiskInfo[i].curPSID = -1;	/* make it appear as if
88								 * we just finished an
89								 * RU */
90		reconCtrlPtr->perDiskInfo[i].ru_count = RUsPerPU - 1;
91	}
92
93	/* Get the number of spare units per disk and the sparemap in case
94	 * spare is distributed  */
95
96	if (lp->GetNumSpareRUs) {
97		numSpareRUs = lp->GetNumSpareRUs(raidPtr);
98	} else {
99		numSpareRUs = 0;
100	}
101
102	/*
103         * Not all distributed sparing archs need dynamic mappings
104         */
105	if (lp->InstallSpareTable) {
106		retcode = rf_InstallSpareTable(raidPtr, frow, fcol);
107		if (retcode) {
108			RF_PANIC();	/* XXX fix this */
109		}
110	}
111	/* make the reconstruction map */
112	reconCtrlPtr->reconMap = rf_MakeReconMap(raidPtr, (int) (layoutPtr->SUsPerRU * layoutPtr->sectorsPerStripeUnit),
113	    raidPtr->sectorsPerDisk, numSpareRUs);
114
115	/* make the per-disk reconstruction buffers */
116	for (i = 0; i < raidPtr->numCol; i++) {
117		reconCtrlPtr->perDiskInfo[i].rbuf = (i == fcol) ? NULL : rf_MakeReconBuffer(raidPtr, frow, i, RF_RBUF_TYPE_EXCLUSIVE);
118	}
119
120	/* initialize the event queue */
121	rc = rf_mutex_init(&reconCtrlPtr->eq_mutex);
122	if (rc) {
123		/* XXX deallocate, cleanup */
124		rf_print_unable_to_init_mutex(__FILE__, __LINE__, rc);
125		return (NULL);
126	}
127	rc = rf_cond_init(&reconCtrlPtr->eq_cond);
128	if (rc) {
129		/* XXX deallocate, cleanup */
130		rf_print_unable_to_init_cond(__FILE__, __LINE__, rc);
131		return (NULL);
132	}
133	reconCtrlPtr->eventQueue = NULL;
134	reconCtrlPtr->eq_count = 0;
135
136	/* make the floating recon buffers and append them to the free list */
137	rc = rf_mutex_init(&reconCtrlPtr->rb_mutex);
138	if (rc) {
139		/* XXX deallocate, cleanup */
140		rf_print_unable_to_init_mutex(__FILE__, __LINE__, rc);
141		return (NULL);
142	}
143	reconCtrlPtr->fullBufferList = NULL;
144	reconCtrlPtr->priorityList = NULL;
145	reconCtrlPtr->floatingRbufs = NULL;
146	reconCtrlPtr->committedRbufs = NULL;
147	for (i = 0; i < raidPtr->numFloatingReconBufs; i++) {
148		rbuf = rf_MakeReconBuffer(raidPtr, frow, fcol, RF_RBUF_TYPE_FLOATING);
149		rbuf->next = reconCtrlPtr->floatingRbufs;
150		reconCtrlPtr->floatingRbufs = rbuf;
151	}
152
153	/* create the parity stripe status table */
154	reconCtrlPtr->pssTable = rf_MakeParityStripeStatusTable(raidPtr);
155
156	/* set the initial min head sep counter val */
157	reconCtrlPtr->minHeadSepCounter = 0;
158
159	return (reconCtrlPtr);
160}
161
162void
163rf_FreeReconControl(raidPtr, row)
164	RF_Raid_t *raidPtr;
165	RF_RowCol_t row;
166{
167	RF_ReconCtrl_t *reconCtrlPtr = raidPtr->reconControl[row];
168	RF_ReconBuffer_t *t;
169	RF_ReconUnitNum_t i;
170
171	RF_ASSERT(reconCtrlPtr);
172	for (i = 0; i < raidPtr->numCol; i++)
173		if (reconCtrlPtr->perDiskInfo[i].rbuf)
174			rf_FreeReconBuffer(reconCtrlPtr->perDiskInfo[i].rbuf);
175	for (i = 0; i < raidPtr->numFloatingReconBufs; i++) {
176		t = reconCtrlPtr->floatingRbufs;
177		RF_ASSERT(t);
178		reconCtrlPtr->floatingRbufs = t->next;
179		rf_FreeReconBuffer(t);
180	}
181	rf_mutex_destroy(&reconCtrlPtr->rb_mutex);
182	rf_mutex_destroy(&reconCtrlPtr->eq_mutex);
183	rf_cond_destroy(&reconCtrlPtr->eq_cond);
184	rf_FreeReconMap(reconCtrlPtr->reconMap);
185	rf_FreeParityStripeStatusTable(raidPtr, reconCtrlPtr->pssTable);
186	RF_Free(reconCtrlPtr->perDiskInfo, raidPtr->numCol * sizeof(RF_PerDiskReconCtrl_t));
187	RF_Free(reconCtrlPtr, sizeof(*reconCtrlPtr));
188}
189
190
191/******************************************************************************
192 * computes the default head separation limit
193 *****************************************************************************/
194RF_HeadSepLimit_t
195rf_GetDefaultHeadSepLimit(raidPtr)
196	RF_Raid_t *raidPtr;
197{
198	RF_HeadSepLimit_t hsl;
199	RF_LayoutSW_t *lp;
200
201	lp = raidPtr->Layout.map;
202	if (lp->GetDefaultHeadSepLimit == NULL)
203		return (-1);
204	hsl = lp->GetDefaultHeadSepLimit(raidPtr);
205	return (hsl);
206}
207
208
209/******************************************************************************
210 * computes the default number of floating recon buffers
211 *****************************************************************************/
212int
213rf_GetDefaultNumFloatingReconBuffers(raidPtr)
214	RF_Raid_t *raidPtr;
215{
216	RF_LayoutSW_t *lp;
217	int     nrb;
218
219	lp = raidPtr->Layout.map;
220	if (lp->GetDefaultNumFloatingReconBuffers == NULL)
221		return (3 * raidPtr->numCol);
222	nrb = lp->GetDefaultNumFloatingReconBuffers(raidPtr);
223	return (nrb);
224}
225
226
227/******************************************************************************
228 * creates and initializes a reconstruction buffer
229 *****************************************************************************/
230RF_ReconBuffer_t *
231rf_MakeReconBuffer(
232    RF_Raid_t * raidPtr,
233    RF_RowCol_t row,
234    RF_RowCol_t col,
235    RF_RbufType_t type)
236{
237	RF_RaidLayout_t *layoutPtr = &raidPtr->Layout;
238	RF_ReconBuffer_t *t;
239	u_int   recon_buffer_size = rf_RaidAddressToByte(raidPtr, layoutPtr->SUsPerRU * layoutPtr->sectorsPerStripeUnit);
240
241	RF_Malloc(t, sizeof(RF_ReconBuffer_t), (RF_ReconBuffer_t *));
242	RF_Malloc(t->buffer, recon_buffer_size, (caddr_t));
243	RF_Malloc(t->arrived, raidPtr->numCol * sizeof(char), (char *));
244	t->raidPtr = raidPtr;
245	t->row = row;
246	t->col = col;
247	t->priority = RF_IO_RECON_PRIORITY;
248	t->type = type;
249	t->pssPtr = NULL;
250	t->next = NULL;
251	return (t);
252}
253/******************************************************************************
254 * frees a reconstruction buffer
255 *****************************************************************************/
256void
257rf_FreeReconBuffer(rbuf)
258	RF_ReconBuffer_t *rbuf;
259{
260	RF_Raid_t *raidPtr = rbuf->raidPtr;
261	u_int   recon_buffer_size = rf_RaidAddressToByte(raidPtr, raidPtr->Layout.SUsPerRU * raidPtr->Layout.sectorsPerStripeUnit);
262
263	RF_Free(rbuf->arrived, raidPtr->numCol * sizeof(char));
264	RF_Free(rbuf->buffer, recon_buffer_size);
265	RF_Free(rbuf, sizeof(*rbuf));
266}
267
268
269/******************************************************************************
270 * debug only:  sanity check the number of floating recon bufs in use
271 *****************************************************************************/
272void
273rf_CheckFloatingRbufCount(raidPtr, dolock)
274	RF_Raid_t *raidPtr;
275	int     dolock;
276{
277	RF_ReconParityStripeStatus_t *p;
278	RF_PSStatusHeader_t *pssTable;
279	RF_ReconBuffer_t *rbuf;
280	int     i, j, sum = 0;
281	RF_RowCol_t frow = 0;
282
283	for (i = 0; i < raidPtr->numRow; i++)
284		if (raidPtr->reconControl[i]) {
285			frow = i;
286			break;
287		}
288	RF_ASSERT(frow >= 0);
289
290	if (dolock)
291		RF_LOCK_MUTEX(raidPtr->reconControl[frow]->rb_mutex);
292	pssTable = raidPtr->reconControl[frow]->pssTable;
293
294	for (i = 0; i < raidPtr->pssTableSize; i++) {
295		RF_LOCK_MUTEX(pssTable[i].mutex);
296		for (p = pssTable[i].chain; p; p = p->next) {
297			rbuf = (RF_ReconBuffer_t *) p->rbuf;
298			if (rbuf && rbuf->type == RF_RBUF_TYPE_FLOATING)
299				sum++;
300
301			rbuf = (RF_ReconBuffer_t *) p->writeRbuf;
302			if (rbuf && rbuf->type == RF_RBUF_TYPE_FLOATING)
303				sum++;
304
305			for (j = 0; j < p->xorBufCount; j++) {
306				rbuf = (RF_ReconBuffer_t *) p->rbufsForXor[j];
307				RF_ASSERT(rbuf);
308				if (rbuf->type == RF_RBUF_TYPE_FLOATING)
309					sum++;
310			}
311		}
312		RF_UNLOCK_MUTEX(pssTable[i].mutex);
313	}
314
315	for (rbuf = raidPtr->reconControl[frow]->floatingRbufs; rbuf; rbuf = rbuf->next) {
316		if (rbuf->type == RF_RBUF_TYPE_FLOATING)
317			sum++;
318	}
319	for (rbuf = raidPtr->reconControl[frow]->committedRbufs; rbuf; rbuf = rbuf->next) {
320		if (rbuf->type == RF_RBUF_TYPE_FLOATING)
321			sum++;
322	}
323	for (rbuf = raidPtr->reconControl[frow]->fullBufferList; rbuf; rbuf = rbuf->next) {
324		if (rbuf->type == RF_RBUF_TYPE_FLOATING)
325			sum++;
326	}
327	for (rbuf = raidPtr->reconControl[frow]->priorityList; rbuf; rbuf = rbuf->next) {
328		if (rbuf->type == RF_RBUF_TYPE_FLOATING)
329			sum++;
330	}
331
332	RF_ASSERT(sum == raidPtr->numFloatingReconBufs);
333
334	if (dolock)
335		RF_UNLOCK_MUTEX(raidPtr->reconControl[frow]->rb_mutex);
336}
337