rf_decluster.c revision 1.3
1/*	$NetBSD: rf_decluster.c,v 1.3 1999/02/05 00:06:08 oster Exp $	*/
2/*
3 * Copyright (c) 1995 Carnegie-Mellon University.
4 * All rights reserved.
5 *
6 * Author: Mark Holland
7 *
8 * Permission to use, copy, modify and distribute this software and
9 * its documentation is hereby granted, provided that both the copyright
10 * notice and this permission notice appear in all copies of the
11 * software, derivative works or modified versions, and any portions
12 * thereof, and that both notices appear in supporting documentation.
13 *
14 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
15 * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
16 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
17 *
18 * Carnegie Mellon requests users of this software to return to
19 *
20 *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
21 *  School of Computer Science
22 *  Carnegie Mellon University
23 *  Pittsburgh PA 15213-3890
24 *
25 * any improvements or extensions that they make and grant Carnegie the
26 * rights to redistribute these changes.
27 */
28
29/*----------------------------------------------------------------------
30 *
31 * rf_decluster.c -- code related to the declustered layout
32 *
33 * Created 10-21-92 (MCH)
34 *
35 * Nov 93:  adding support for distributed sparing.  This code is a little
36 *          complex:  the basic layout used is as follows:
37 *          let F = (v-1)/GCD(r,v-1).  The spare space for each set of
38 *          F consecutive fulltables is grouped together and placed after
39 *          that set of tables.
40 *                   +------------------------------+
41 *                   |        F fulltables          |
42 *                   |        Spare Space           |
43 *                   |        F fulltables          |
44 *                   |        Spare Space           |
45 *                   |            ...               |
46 *                   +------------------------------+
47 *
48 *--------------------------------------------------------------------*/
49
50#include "rf_types.h"
51#include "rf_raid.h"
52#include "rf_raidframe.h"
53#include "rf_configure.h"
54#include "rf_decluster.h"
55#include "rf_debugMem.h"
56#include "rf_utils.h"
57#include "rf_alloclist.h"
58#include "rf_general.h"
59#include "rf_shutdown.h"
60#include "rf_sys.h"
61
62extern int rf_copyback_in_progress;	/* debug only */
63
64/* found in rf_kintf.c */
65int     rf_GetSpareTableFromDaemon(RF_SparetWait_t * req);
66
67/* configuration code */
68
69int
70rf_ConfigureDeclustered(
71    RF_ShutdownList_t ** listp,
72    RF_Raid_t * raidPtr,
73    RF_Config_t * cfgPtr)
74{
75	RF_RaidLayout_t *layoutPtr = &(raidPtr->Layout);
76	int     b, v, k, r, lambda;	/* block design params */
77	int     i, j;
78	RF_RowCol_t *first_avail_slot;
79	RF_StripeCount_t complete_FT_count, numCompleteFullTablesPerDisk;
80	RF_DeclusteredConfigInfo_t *info;
81	RF_StripeCount_t PUsPerDisk, spareRegionDepthInPUs, numCompleteSpareRegionsPerDisk,
82	        extraPUsPerDisk;
83	RF_StripeCount_t totSparePUsPerDisk;
84	RF_SectorNum_t diskOffsetOfLastFullTableInSUs;
85	RF_SectorCount_t SpareSpaceInSUs;
86	char   *cfgBuf = (char *) (cfgPtr->layoutSpecific);
87	RF_StripeNum_t l, SUID;
88
89	SUID = l = 0;
90	numCompleteSpareRegionsPerDisk = 0;
91
92	/* 1. create layout specific structure */
93	RF_MallocAndAdd(info, sizeof(RF_DeclusteredConfigInfo_t), (RF_DeclusteredConfigInfo_t *), raidPtr->cleanupList);
94	if (info == NULL)
95		return (ENOMEM);
96	layoutPtr->layoutSpecificInfo = (void *) info;
97	info->SpareTable = NULL;
98
99	/* 2. extract parameters from the config structure */
100	if (layoutPtr->map->flags & RF_DISTRIBUTE_SPARE) {
101		(void) bcopy(cfgBuf, info->sparemap_fname, RF_SPAREMAP_NAME_LEN);
102	}
103	cfgBuf += RF_SPAREMAP_NAME_LEN;
104
105	b = *((int *) cfgBuf);
106	cfgBuf += sizeof(int);
107	v = *((int *) cfgBuf);
108	cfgBuf += sizeof(int);
109	k = *((int *) cfgBuf);
110	cfgBuf += sizeof(int);
111	r = *((int *) cfgBuf);
112	cfgBuf += sizeof(int);
113	lambda = *((int *) cfgBuf);
114	cfgBuf += sizeof(int);
115	raidPtr->noRotate = *((int *) cfgBuf);
116	cfgBuf += sizeof(int);
117
118	/* the sparemaps are generated assuming that parity is rotated, so we
119	 * issue a warning if both distributed sparing and no-rotate are on at
120	 * the same time */
121	if ((layoutPtr->map->flags & RF_DISTRIBUTE_SPARE) && raidPtr->noRotate) {
122		RF_ERRORMSG("Warning:  distributed sparing specified without parity rotation.\n");
123	}
124	if (raidPtr->numCol != v) {
125		RF_ERRORMSG2("RAID: config error: table element count (%d) not equal to no. of cols (%d)\n", v, raidPtr->numCol);
126		return (EINVAL);
127	}
128	/* 3.  set up the values used in the mapping code */
129	info->BlocksPerTable = b;
130	info->Lambda = lambda;
131	info->NumParityReps = info->groupSize = k;
132	info->SUsPerTable = b * (k - 1) * layoutPtr->SUsPerPU;	/* b blks, k-1 SUs each */
133	info->SUsPerFullTable = k * info->SUsPerTable;	/* rot k times */
134	info->PUsPerBlock = k - 1;
135	info->SUsPerBlock = info->PUsPerBlock * layoutPtr->SUsPerPU;
136	info->TableDepthInPUs = (b * k) / v;
137	info->FullTableDepthInPUs = info->TableDepthInPUs * k;	/* k repetitions */
138
139	/* used only in distributed sparing case */
140	info->FullTablesPerSpareRegion = (v - 1) / rf_gcd(r, v - 1);	/* (v-1)/gcd fulltables */
141	info->TablesPerSpareRegion = k * info->FullTablesPerSpareRegion;
142	info->SpareSpaceDepthPerRegionInSUs = (r * info->TablesPerSpareRegion / (v - 1)) * layoutPtr->SUsPerPU;
143
144	/* check to make sure the block design is sufficiently small */
145	if ((raidPtr->Layout.map->flags & RF_DISTRIBUTE_SPARE)) {
146		if (info->FullTableDepthInPUs * layoutPtr->SUsPerPU + info->SpareSpaceDepthPerRegionInSUs > layoutPtr->stripeUnitsPerDisk) {
147			RF_ERRORMSG3("RAID: config error: Full Table depth (%d) + Spare Space (%d) larger than disk size (%d) (BD too big)\n",
148			    (int) info->FullTableDepthInPUs,
149			    (int) info->SpareSpaceDepthPerRegionInSUs,
150			    (int) layoutPtr->stripeUnitsPerDisk);
151			return (EINVAL);
152		}
153	} else {
154		if (info->TableDepthInPUs * layoutPtr->SUsPerPU > layoutPtr->stripeUnitsPerDisk) {
155			RF_ERRORMSG2("RAID: config error: Table depth (%d) larger than disk size (%d) (BD too big)\n",
156			    (int) (info->TableDepthInPUs * layoutPtr->SUsPerPU), \
157			    (int) layoutPtr->stripeUnitsPerDisk);
158			return (EINVAL);
159		}
160	}
161
162
163	/* compute the size of each disk, and the number of tables in the last
164	 * fulltable (which need not be complete) */
165	if (raidPtr->Layout.map->flags & RF_DISTRIBUTE_SPARE) {
166
167		PUsPerDisk = layoutPtr->stripeUnitsPerDisk / layoutPtr->SUsPerPU;
168		spareRegionDepthInPUs = (info->TablesPerSpareRegion * info->TableDepthInPUs +
169		    (info->TablesPerSpareRegion * info->TableDepthInPUs) / (v - 1));
170		info->SpareRegionDepthInSUs = spareRegionDepthInPUs * layoutPtr->SUsPerPU;
171
172		numCompleteSpareRegionsPerDisk = PUsPerDisk / spareRegionDepthInPUs;
173		info->NumCompleteSRs = numCompleteSpareRegionsPerDisk;
174		extraPUsPerDisk = PUsPerDisk % spareRegionDepthInPUs;
175
176		/* assume conservatively that we need the full amount of spare
177		 * space in one region in order to provide spares for the
178		 * partial spare region at the end of the array.  We set "i"
179		 * to the number of tables in the partial spare region.  This
180		 * may actually include some fulltables. */
181		extraPUsPerDisk -= (info->SpareSpaceDepthPerRegionInSUs / layoutPtr->SUsPerPU);
182		if (extraPUsPerDisk <= 0)
183			i = 0;
184		else
185			i = extraPUsPerDisk / info->TableDepthInPUs;
186
187		complete_FT_count = raidPtr->numRow * (numCompleteSpareRegionsPerDisk * (info->TablesPerSpareRegion / k) + i / k);
188		info->FullTableLimitSUID = complete_FT_count * info->SUsPerFullTable;
189		info->ExtraTablesPerDisk = i % k;
190
191		/* note that in the last spare region, the spare space is
192		 * complete even though data/parity space is not */
193		totSparePUsPerDisk = (numCompleteSpareRegionsPerDisk + 1) * (info->SpareSpaceDepthPerRegionInSUs / layoutPtr->SUsPerPU);
194		info->TotSparePUsPerDisk = totSparePUsPerDisk;
195
196		layoutPtr->stripeUnitsPerDisk =
197		    ((complete_FT_count / raidPtr->numRow) * info->FullTableDepthInPUs +	/* data & parity space */
198		    info->ExtraTablesPerDisk * info->TableDepthInPUs +
199		    totSparePUsPerDisk	/* spare space */
200		    ) * layoutPtr->SUsPerPU;
201		layoutPtr->dataStripeUnitsPerDisk =
202		    (complete_FT_count * info->FullTableDepthInPUs + info->ExtraTablesPerDisk * info->TableDepthInPUs)
203		    * layoutPtr->SUsPerPU * (k - 1) / k;
204
205	} else {
206		/* non-dist spare case:  force each disk to contain an
207		 * integral number of tables */
208		layoutPtr->stripeUnitsPerDisk /= (info->TableDepthInPUs * layoutPtr->SUsPerPU);
209		layoutPtr->stripeUnitsPerDisk *= (info->TableDepthInPUs * layoutPtr->SUsPerPU);
210
211		/* compute the number of tables in the last fulltable, which
212		 * need not be complete */
213		complete_FT_count =
214		    ((layoutPtr->stripeUnitsPerDisk / layoutPtr->SUsPerPU) / info->FullTableDepthInPUs) * raidPtr->numRow;
215
216		info->FullTableLimitSUID = complete_FT_count * info->SUsPerFullTable;
217		info->ExtraTablesPerDisk =
218		    ((layoutPtr->stripeUnitsPerDisk / layoutPtr->SUsPerPU) / info->TableDepthInPUs) % k;
219	}
220
221	raidPtr->sectorsPerDisk = layoutPtr->stripeUnitsPerDisk * layoutPtr->sectorsPerStripeUnit;
222
223	/* find the disk offset of the stripe unit where the last fulltable
224	 * starts */
225	numCompleteFullTablesPerDisk = complete_FT_count / raidPtr->numRow;
226	diskOffsetOfLastFullTableInSUs = numCompleteFullTablesPerDisk * info->FullTableDepthInPUs * layoutPtr->SUsPerPU;
227	if (raidPtr->Layout.map->flags & RF_DISTRIBUTE_SPARE) {
228		SpareSpaceInSUs = numCompleteSpareRegionsPerDisk * info->SpareSpaceDepthPerRegionInSUs;
229		diskOffsetOfLastFullTableInSUs += SpareSpaceInSUs;
230		info->DiskOffsetOfLastSpareSpaceChunkInSUs =
231		    diskOffsetOfLastFullTableInSUs + info->ExtraTablesPerDisk * info->TableDepthInPUs * layoutPtr->SUsPerPU;
232	}
233	info->DiskOffsetOfLastFullTableInSUs = diskOffsetOfLastFullTableInSUs;
234	info->numCompleteFullTablesPerDisk = numCompleteFullTablesPerDisk;
235
236	/* 4.  create and initialize the lookup tables */
237	info->LayoutTable = rf_make_2d_array(b, k, raidPtr->cleanupList);
238	if (info->LayoutTable == NULL)
239		return (ENOMEM);
240	info->OffsetTable = rf_make_2d_array(b, k, raidPtr->cleanupList);
241	if (info->OffsetTable == NULL)
242		return (ENOMEM);
243	info->BlockTable = rf_make_2d_array(info->TableDepthInPUs * layoutPtr->SUsPerPU, raidPtr->numCol, raidPtr->cleanupList);
244	if (info->BlockTable == NULL)
245		return (ENOMEM);
246
247	first_avail_slot = rf_make_1d_array(v, NULL);
248	if (first_avail_slot == NULL)
249		return (ENOMEM);
250
251	for (i = 0; i < b; i++)
252		for (j = 0; j < k; j++)
253			info->LayoutTable[i][j] = *cfgBuf++;
254
255	/* initialize offset table */
256	for (i = 0; i < b; i++)
257		for (j = 0; j < k; j++) {
258			info->OffsetTable[i][j] = first_avail_slot[info->LayoutTable[i][j]];
259			first_avail_slot[info->LayoutTable[i][j]]++;
260		}
261
262	/* initialize block table */
263	for (SUID = l = 0; l < layoutPtr->SUsPerPU; l++) {
264		for (i = 0; i < b; i++) {
265			for (j = 0; j < k; j++) {
266				info->BlockTable[(info->OffsetTable[i][j] * layoutPtr->SUsPerPU) + l]
267				    [info->LayoutTable[i][j]] = SUID;
268			}
269			SUID++;
270		}
271	}
272
273	rf_free_1d_array(first_avail_slot, v);
274
275	/* 5.  set up the remaining redundant-but-useful parameters */
276
277	raidPtr->totalSectors = (k * complete_FT_count + raidPtr->numRow * info->ExtraTablesPerDisk) *
278	    info->SUsPerTable * layoutPtr->sectorsPerStripeUnit;
279	layoutPtr->numStripe = (raidPtr->totalSectors / layoutPtr->sectorsPerStripeUnit) / (k - 1);
280
281	/* strange evaluation order below to try and minimize overflow
282	 * problems */
283
284	layoutPtr->dataSectorsPerStripe = (k - 1) * layoutPtr->sectorsPerStripeUnit;
285	layoutPtr->bytesPerStripeUnit = layoutPtr->sectorsPerStripeUnit << raidPtr->logBytesPerSector;
286	layoutPtr->numDataCol = k - 1;
287	layoutPtr->numParityCol = 1;
288
289	return (0);
290}
291/* declustering with distributed sparing */
292static void rf_ShutdownDeclusteredDS(RF_ThreadArg_t);
293static void
294rf_ShutdownDeclusteredDS(arg)
295	RF_ThreadArg_t arg;
296{
297	RF_DeclusteredConfigInfo_t *info;
298	RF_Raid_t *raidPtr;
299
300	raidPtr = (RF_Raid_t *) arg;
301	info = (RF_DeclusteredConfigInfo_t *) raidPtr->Layout.layoutSpecificInfo;
302	if (info->SpareTable)
303		rf_FreeSpareTable(raidPtr);
304}
305
306int
307rf_ConfigureDeclusteredDS(
308    RF_ShutdownList_t ** listp,
309    RF_Raid_t * raidPtr,
310    RF_Config_t * cfgPtr)
311{
312	int     rc;
313
314	rc = rf_ConfigureDeclustered(listp, raidPtr, cfgPtr);
315	if (rc)
316		return (rc);
317	rc = rf_ShutdownCreate(listp, rf_ShutdownDeclusteredDS, raidPtr);
318	if (rc) {
319		RF_ERRORMSG1("Got %d adding shutdown event for DeclusteredDS\n", rc);
320		rf_ShutdownDeclusteredDS(raidPtr);
321		return (rc);
322	}
323	return (0);
324}
325
326void
327rf_MapSectorDeclustered(raidPtr, raidSector, row, col, diskSector, remap)
328	RF_Raid_t *raidPtr;
329	RF_RaidAddr_t raidSector;
330	RF_RowCol_t *row;
331	RF_RowCol_t *col;
332	RF_SectorNum_t *diskSector;
333	int     remap;
334{
335	RF_RaidLayout_t *layoutPtr = &(raidPtr->Layout);
336	RF_DeclusteredConfigInfo_t *info = (RF_DeclusteredConfigInfo_t *) layoutPtr->layoutSpecificInfo;
337	RF_StripeNum_t SUID = raidSector / layoutPtr->sectorsPerStripeUnit;
338	RF_StripeNum_t FullTableID, FullTableOffset, TableID, TableOffset;
339	RF_StripeNum_t BlockID, BlockOffset, RepIndex;
340	RF_StripeCount_t sus_per_fulltable = info->SUsPerFullTable;
341	RF_StripeCount_t fulltable_depth = info->FullTableDepthInPUs * layoutPtr->SUsPerPU;
342	RF_StripeNum_t base_suid = 0, outSU, SpareRegion = 0, SpareSpace = 0;
343
344	rf_decluster_adjust_params(layoutPtr, &SUID, &sus_per_fulltable, &fulltable_depth, &base_suid);
345
346	FullTableID = SUID / sus_per_fulltable;	/* fulltable ID within array
347						 * (across rows) */
348	if (raidPtr->numRow == 1)
349		*row = 0;	/* avoid a mod and a div in the common case */
350	else {
351		*row = FullTableID % raidPtr->numRow;
352		FullTableID /= raidPtr->numRow;	/* convert to fulltable ID on
353						 * this disk */
354	}
355	if (raidPtr->Layout.map->flags & RF_DISTRIBUTE_SPARE) {
356		SpareRegion = FullTableID / info->FullTablesPerSpareRegion;
357		SpareSpace = SpareRegion * info->SpareSpaceDepthPerRegionInSUs;
358	}
359	FullTableOffset = SUID % sus_per_fulltable;
360	TableID = FullTableOffset / info->SUsPerTable;
361	TableOffset = FullTableOffset - TableID * info->SUsPerTable;
362	BlockID = TableOffset / info->PUsPerBlock;
363	BlockOffset = TableOffset - BlockID * info->PUsPerBlock;
364	BlockID %= info->BlocksPerTable;
365	RepIndex = info->PUsPerBlock - TableID;
366	if (!raidPtr->noRotate)
367		BlockOffset += ((BlockOffset >= RepIndex) ? 1 : 0);
368	*col = info->LayoutTable[BlockID][BlockOffset];
369
370	/* remap to distributed spare space if indicated */
371	if (remap) {
372		RF_ASSERT(raidPtr->Disks[*row][*col].status == rf_ds_reconstructing || raidPtr->Disks[*row][*col].status == rf_ds_dist_spared ||
373		    (rf_copyback_in_progress && raidPtr->Disks[*row][*col].status == rf_ds_optimal));
374		rf_remap_to_spare_space(layoutPtr, info, *row, FullTableID, TableID, BlockID, (base_suid) ? 1 : 0, SpareRegion, col, &outSU);
375	} else {
376
377		outSU = base_suid;
378		outSU += FullTableID * fulltable_depth;	/* offs to strt of FT */
379		outSU += SpareSpace;	/* skip rsvd spare space */
380		outSU += TableID * info->TableDepthInPUs * layoutPtr->SUsPerPU;	/* offs to strt of tble */
381		outSU += info->OffsetTable[BlockID][BlockOffset] * layoutPtr->SUsPerPU;	/* offs to the PU */
382	}
383	outSU += TableOffset / (info->BlocksPerTable * info->PUsPerBlock);	/* offs to the SU within
384										 * a PU */
385
386	/* convert SUs to sectors, and, if not aligned to SU boundary, add in
387	 * offset to sector.  */
388	*diskSector = outSU * layoutPtr->sectorsPerStripeUnit + (raidSector % layoutPtr->sectorsPerStripeUnit);
389
390	RF_ASSERT(*col != -1);
391}
392
393
394/* prototyping this inexplicably causes the compile of the layout table (rf_layout.c) to fail */
395void
396rf_MapParityDeclustered(
397    RF_Raid_t * raidPtr,
398    RF_RaidAddr_t raidSector,
399    RF_RowCol_t * row,
400    RF_RowCol_t * col,
401    RF_SectorNum_t * diskSector,
402    int remap)
403{
404	RF_RaidLayout_t *layoutPtr = &(raidPtr->Layout);
405	RF_DeclusteredConfigInfo_t *info = (RF_DeclusteredConfigInfo_t *) layoutPtr->layoutSpecificInfo;
406	RF_StripeNum_t SUID = raidSector / layoutPtr->sectorsPerStripeUnit;
407	RF_StripeNum_t FullTableID, FullTableOffset, TableID, TableOffset;
408	RF_StripeNum_t BlockID, BlockOffset, RepIndex;
409	RF_StripeCount_t sus_per_fulltable = info->SUsPerFullTable;
410	RF_StripeCount_t fulltable_depth = info->FullTableDepthInPUs * layoutPtr->SUsPerPU;
411	RF_StripeNum_t base_suid = 0, outSU, SpareRegion = 0, SpareSpace = 0;
412
413	rf_decluster_adjust_params(layoutPtr, &SUID, &sus_per_fulltable, &fulltable_depth, &base_suid);
414
415	/* compute row & (possibly) spare space exactly as before */
416	FullTableID = SUID / sus_per_fulltable;
417	if (raidPtr->numRow == 1)
418		*row = 0;	/* avoid a mod and a div in the common case */
419	else {
420		*row = FullTableID % raidPtr->numRow;
421		FullTableID /= raidPtr->numRow;	/* convert to fulltable ID on
422						 * this disk */
423	}
424	if ((raidPtr->Layout.map->flags & RF_DISTRIBUTE_SPARE)) {
425		SpareRegion = FullTableID / info->FullTablesPerSpareRegion;
426		SpareSpace = SpareRegion * info->SpareSpaceDepthPerRegionInSUs;
427	}
428	/* compute BlockID and RepIndex exactly as before */
429	FullTableOffset = SUID % sus_per_fulltable;
430	TableID = FullTableOffset / info->SUsPerTable;
431	TableOffset = FullTableOffset - TableID * info->SUsPerTable;
432	/* TableOffset     = FullTableOffset % info->SUsPerTable; */
433	/* BlockID         = (TableOffset / info->PUsPerBlock) %
434	 * info->BlocksPerTable; */
435	BlockID = TableOffset / info->PUsPerBlock;
436	/* BlockOffset     = TableOffset % info->PUsPerBlock; */
437	BlockOffset = TableOffset - BlockID * info->PUsPerBlock;
438	BlockID %= info->BlocksPerTable;
439
440	/* the parity block is in the position indicated by RepIndex */
441	RepIndex = (raidPtr->noRotate) ? info->PUsPerBlock : info->PUsPerBlock - TableID;
442	*col = info->LayoutTable[BlockID][RepIndex];
443
444	if (remap) {
445		RF_ASSERT(raidPtr->Disks[*row][*col].status == rf_ds_reconstructing || raidPtr->Disks[*row][*col].status == rf_ds_dist_spared ||
446		    (rf_copyback_in_progress && raidPtr->Disks[*row][*col].status == rf_ds_optimal));
447		rf_remap_to_spare_space(layoutPtr, info, *row, FullTableID, TableID, BlockID, (base_suid) ? 1 : 0, SpareRegion, col, &outSU);
448	} else {
449
450		/* compute sector as before, except use RepIndex instead of
451		 * BlockOffset */
452		outSU = base_suid;
453		outSU += FullTableID * fulltable_depth;
454		outSU += SpareSpace;	/* skip rsvd spare space */
455		outSU += TableID * info->TableDepthInPUs * layoutPtr->SUsPerPU;
456		outSU += info->OffsetTable[BlockID][RepIndex] * layoutPtr->SUsPerPU;
457	}
458
459	outSU += TableOffset / (info->BlocksPerTable * info->PUsPerBlock);
460	*diskSector = outSU * layoutPtr->sectorsPerStripeUnit + (raidSector % layoutPtr->sectorsPerStripeUnit);
461
462	RF_ASSERT(*col != -1);
463}
464/* returns an array of ints identifying the disks that comprise the stripe containing the indicated address.
465 * the caller must _never_ attempt to modify this array.
466 */
467void
468rf_IdentifyStripeDeclustered(
469    RF_Raid_t * raidPtr,
470    RF_RaidAddr_t addr,
471    RF_RowCol_t ** diskids,
472    RF_RowCol_t * outRow)
473{
474	RF_RaidLayout_t *layoutPtr = &(raidPtr->Layout);
475	RF_DeclusteredConfigInfo_t *info = (RF_DeclusteredConfigInfo_t *) layoutPtr->layoutSpecificInfo;
476	RF_StripeCount_t sus_per_fulltable = info->SUsPerFullTable;
477	RF_StripeCount_t fulltable_depth = info->FullTableDepthInPUs * layoutPtr->SUsPerPU;
478	RF_StripeNum_t base_suid = 0;
479	RF_StripeNum_t SUID = rf_RaidAddressToStripeUnitID(layoutPtr, addr);
480	RF_StripeNum_t stripeID, FullTableID;
481	int     tableOffset;
482
483	rf_decluster_adjust_params(layoutPtr, &SUID, &sus_per_fulltable, &fulltable_depth, &base_suid);
484	FullTableID = SUID / sus_per_fulltable;	/* fulltable ID within array
485						 * (across rows) */
486	*outRow = FullTableID % raidPtr->numRow;
487	stripeID = rf_StripeUnitIDToStripeID(layoutPtr, SUID);	/* find stripe offset
488								 * into array */
489	tableOffset = (stripeID % info->BlocksPerTable);	/* find offset into
490								 * block design table */
491	*diskids = info->LayoutTable[tableOffset];
492}
493/* This returns the default head-separation limit, which is measured
494 * in "required units for reconstruction".  Each time a disk fetches
495 * a unit, it bumps a counter.  The head-sep code prohibits any disk
496 * from getting more than headSepLimit counter values ahead of any
497 * other.
498 *
499 * We assume here that the number of floating recon buffers is already
500 * set.  There are r stripes to be reconstructed in each table, and so
501 * if we have a total of B buffers, we can have at most B/r tables
502 * under recon at any one time.  In each table, lambda units are required
503 * from each disk, so given B buffers, the head sep limit has to be
504 * (lambda*B)/r units.  We subtract one to avoid weird boundary cases.
505 *
506 * for example, suppose were given 50 buffers, r=19, and lambda=4 as in
507 * the 20.5 design.  There are 19 stripes/table to be reconstructed, so
508 * we can have 50/19 tables concurrently under reconstruction, which means
509 * we can allow the fastest disk to get 50/19 tables ahead of the slower
510 * disk.  There are lambda "required units" for each disk, so the fastest
511 * disk can get 4*50/19 = 10 counter values ahead of the slowest.
512 *
513 * If numBufsToAccumulate is not 1, we need to limit the head sep further
514 * because multiple bufs will be required for each stripe under recon.
515 */
516RF_HeadSepLimit_t
517rf_GetDefaultHeadSepLimitDeclustered(
518    RF_Raid_t * raidPtr)
519{
520	RF_DeclusteredConfigInfo_t *info = (RF_DeclusteredConfigInfo_t *) raidPtr->Layout.layoutSpecificInfo;
521
522	return (info->Lambda * raidPtr->numFloatingReconBufs / info->TableDepthInPUs / rf_numBufsToAccumulate);
523}
524/* returns the default number of recon buffers to use.  The value
525 * is somewhat arbitrary...it's intended to be large enough to allow
526 * for a reasonably large head-sep limit, but small enough that you
527 * don't use up all your system memory with buffers.
528 */
529int
530rf_GetDefaultNumFloatingReconBuffersDeclustered(RF_Raid_t * raidPtr)
531{
532	return (100 * rf_numBufsToAccumulate);
533}
534/* sectors in the last fulltable of the array need to be handled
535 * specially since this fulltable can be incomplete.  this function
536 * changes the values of certain params to handle this.
537 *
538 * the idea here is that MapSector et. al. figure out which disk the
539 * addressed unit lives on by computing the modulos of the unit number
540 * with the number of units per fulltable, table, etc.  In the last
541 * fulltable, there are fewer units per fulltable, so we need to adjust
542 * the number of user data units per fulltable to reflect this.
543 *
544 * so, we (1) convert the fulltable size and depth parameters to
545 * the size of the partial fulltable at the end, (2) compute the
546 * disk sector offset where this fulltable starts, and (3) convert
547 * the users stripe unit number from an offset into the array to
548 * an offset into the last fulltable.
549 */
550void
551rf_decluster_adjust_params(
552    RF_RaidLayout_t * layoutPtr,
553    RF_StripeNum_t * SUID,
554    RF_StripeCount_t * sus_per_fulltable,
555    RF_StripeCount_t * fulltable_depth,
556    RF_StripeNum_t * base_suid)
557{
558	RF_DeclusteredConfigInfo_t *info = (RF_DeclusteredConfigInfo_t *) layoutPtr->layoutSpecificInfo;
559#if defined(__NetBSD__) && defined(_KERNEL)
560	/* Nothing! */
561#else
562	char    pc = layoutPtr->map->parityConfig;
563#endif
564
565	if (*SUID >= info->FullTableLimitSUID) {
566		/* new full table size is size of last full table on disk */
567		*sus_per_fulltable = info->ExtraTablesPerDisk * info->SUsPerTable;
568
569		/* new full table depth is corresponding depth */
570		*fulltable_depth = info->ExtraTablesPerDisk * info->TableDepthInPUs * layoutPtr->SUsPerPU;
571
572		/* set up the new base offset */
573		*base_suid = info->DiskOffsetOfLastFullTableInSUs;
574
575		/* convert users array address to an offset into the last
576		 * fulltable */
577		*SUID -= info->FullTableLimitSUID;
578	}
579}
580/*
581 * map a stripe ID to a parity stripe ID.
582 * See comment above RaidAddressToParityStripeID in layout.c.
583 */
584void
585rf_MapSIDToPSIDDeclustered(
586    RF_RaidLayout_t * layoutPtr,
587    RF_StripeNum_t stripeID,
588    RF_StripeNum_t * psID,
589    RF_ReconUnitNum_t * which_ru)
590{
591	RF_DeclusteredConfigInfo_t *info;
592
593	info = (RF_DeclusteredConfigInfo_t *) layoutPtr->layoutSpecificInfo;
594
595	*psID = (stripeID / (layoutPtr->SUsPerPU * info->BlocksPerTable))
596	    * info->BlocksPerTable + (stripeID % info->BlocksPerTable);
597	*which_ru = (stripeID % (info->BlocksPerTable * layoutPtr->SUsPerPU))
598	    / info->BlocksPerTable;
599	RF_ASSERT((*which_ru) < layoutPtr->SUsPerPU / layoutPtr->SUsPerRU);
600}
601/*
602 * Called from MapSector and MapParity to retarget an access at the spare unit.
603 * Modifies the "col" and "outSU" parameters only.
604 */
605void
606rf_remap_to_spare_space(
607    RF_RaidLayout_t * layoutPtr,
608    RF_DeclusteredConfigInfo_t * info,
609    RF_RowCol_t row,
610    RF_StripeNum_t FullTableID,
611    RF_StripeNum_t TableID,
612    RF_SectorNum_t BlockID,
613    RF_StripeNum_t base_suid,
614    RF_StripeNum_t SpareRegion,
615    RF_RowCol_t * outCol,
616    RF_StripeNum_t * outSU)
617{
618	RF_StripeNum_t ftID, spareTableStartSU, TableInSpareRegion, lastSROffset,
619	        which_ft;
620
621	/*
622         * note that FullTableID and hence SpareRegion may have gotten
623         * tweaked by rf_decluster_adjust_params. We detect this by
624         * noticing that base_suid is not 0.
625         */
626	if (base_suid == 0) {
627		ftID = FullTableID;
628	} else {
629		/*
630	         * There may be > 1.0 full tables in the last (i.e. partial)
631	         * spare region.  find out which of these we're in.
632	         */
633		lastSROffset = info->NumCompleteSRs * info->SpareRegionDepthInSUs;
634		which_ft = (info->DiskOffsetOfLastFullTableInSUs - lastSROffset) / (info->FullTableDepthInPUs * layoutPtr->SUsPerPU);
635
636		/* compute the actual full table ID */
637		ftID = info->DiskOffsetOfLastFullTableInSUs / (info->FullTableDepthInPUs * layoutPtr->SUsPerPU) + which_ft;
638		SpareRegion = info->NumCompleteSRs;
639	}
640	TableInSpareRegion = (ftID * info->NumParityReps + TableID) % info->TablesPerSpareRegion;
641
642	*outCol = info->SpareTable[TableInSpareRegion][BlockID].spareDisk;
643	RF_ASSERT(*outCol != -1);
644
645	spareTableStartSU = (SpareRegion == info->NumCompleteSRs) ?
646	    info->DiskOffsetOfLastFullTableInSUs + info->ExtraTablesPerDisk * info->TableDepthInPUs * layoutPtr->SUsPerPU :
647	    (SpareRegion + 1) * info->SpareRegionDepthInSUs - info->SpareSpaceDepthPerRegionInSUs;
648	*outSU = spareTableStartSU + info->SpareTable[TableInSpareRegion][BlockID].spareBlockOffsetInSUs;
649	if (*outSU >= layoutPtr->stripeUnitsPerDisk) {
650		printf("rf_remap_to_spare_space: invalid remapped disk SU offset %ld\n", (long) *outSU);
651	}
652}
653
654int
655rf_InstallSpareTable(
656    RF_Raid_t * raidPtr,
657    RF_RowCol_t frow,
658    RF_RowCol_t fcol)
659{
660	RF_DeclusteredConfigInfo_t *info = (RF_DeclusteredConfigInfo_t *) raidPtr->Layout.layoutSpecificInfo;
661	RF_SparetWait_t *req;
662	int     retcode;
663
664	RF_Malloc(req, sizeof(*req), (RF_SparetWait_t *));
665	req->C = raidPtr->numCol;
666	req->G = raidPtr->Layout.numDataCol + raidPtr->Layout.numParityCol;
667	req->fcol = fcol;
668	req->SUsPerPU = raidPtr->Layout.SUsPerPU;
669	req->TablesPerSpareRegion = info->TablesPerSpareRegion;
670	req->BlocksPerTable = info->BlocksPerTable;
671	req->TableDepthInPUs = info->TableDepthInPUs;
672	req->SpareSpaceDepthPerRegionInSUs = info->SpareSpaceDepthPerRegionInSUs;
673
674	retcode = rf_GetSpareTableFromDaemon(req);
675	RF_ASSERT(!retcode);	/* XXX -- fix this to recover gracefully --
676				 * XXX */
677	return (retcode);
678}
679/*
680 * Invoked via ioctl to install a spare table in the kernel.
681 */
682int
683rf_SetSpareTable(raidPtr, data)
684	RF_Raid_t *raidPtr;
685	void   *data;
686{
687	RF_DeclusteredConfigInfo_t *info = (RF_DeclusteredConfigInfo_t *) raidPtr->Layout.layoutSpecificInfo;
688	RF_SpareTableEntry_t **ptrs;
689	int     i, retcode;
690
691	/* what we need to copyin is a 2-d array, so first copyin the user
692	 * pointers to the rows in the table */
693	RF_Malloc(ptrs, info->TablesPerSpareRegion * sizeof(RF_SpareTableEntry_t *), (RF_SpareTableEntry_t **));
694	retcode = copyin((caddr_t) data, (caddr_t) ptrs, info->TablesPerSpareRegion * sizeof(RF_SpareTableEntry_t *));
695
696	if (retcode)
697		return (retcode);
698
699	/* now allocate kernel space for the row pointers */
700	RF_Malloc(info->SpareTable, info->TablesPerSpareRegion * sizeof(RF_SpareTableEntry_t *), (RF_SpareTableEntry_t **));
701
702	/* now allocate kernel space for each row in the table, and copy it in
703	 * from user space */
704	for (i = 0; i < info->TablesPerSpareRegion; i++) {
705		RF_Malloc(info->SpareTable[i], info->BlocksPerTable * sizeof(RF_SpareTableEntry_t), (RF_SpareTableEntry_t *));
706		retcode = copyin(ptrs[i], info->SpareTable[i], info->BlocksPerTable * sizeof(RF_SpareTableEntry_t));
707		if (retcode) {
708			info->SpareTable = NULL;	/* blow off the memory
709							 * we've allocated */
710			return (retcode);
711		}
712	}
713
714	/* free up the temporary array we used */
715	RF_Free(ptrs, info->TablesPerSpareRegion * sizeof(RF_SpareTableEntry_t *));
716
717	return (0);
718}
719
720RF_ReconUnitCount_t
721rf_GetNumSpareRUsDeclustered(raidPtr)
722	RF_Raid_t *raidPtr;
723{
724	RF_RaidLayout_t *layoutPtr = &raidPtr->Layout;
725
726	return (((RF_DeclusteredConfigInfo_t *) layoutPtr->layoutSpecificInfo)->TotSparePUsPerDisk);
727}
728
729
730void
731rf_FreeSpareTable(raidPtr)
732	RF_Raid_t *raidPtr;
733{
734	long    i;
735	RF_RaidLayout_t *layoutPtr = &raidPtr->Layout;
736	RF_DeclusteredConfigInfo_t *info = (RF_DeclusteredConfigInfo_t *) layoutPtr->layoutSpecificInfo;
737	RF_SpareTableEntry_t **table = info->SpareTable;
738
739	for (i = 0; i < info->TablesPerSpareRegion; i++) {
740		RF_Free(table[i], info->BlocksPerTable * sizeof(RF_SpareTableEntry_t));
741	}
742	RF_Free(table, info->TablesPerSpareRegion * sizeof(RF_SpareTableEntry_t *));
743	info->SpareTable = (RF_SpareTableEntry_t **) NULL;
744}
745