rf_decluster.c revision 1.11
1/*	$NetBSD: rf_decluster.c,v 1.11 2002/09/23 02:40:08 oster Exp $	*/
2/*
3 * Copyright (c) 1995 Carnegie-Mellon University.
4 * All rights reserved.
5 *
6 * Author: Mark Holland
7 *
8 * Permission to use, copy, modify and distribute this software and
9 * its documentation is hereby granted, provided that both the copyright
10 * notice and this permission notice appear in all copies of the
11 * software, derivative works or modified versions, and any portions
12 * thereof, and that both notices appear in supporting documentation.
13 *
14 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
15 * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
16 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
17 *
18 * Carnegie Mellon requests users of this software to return to
19 *
20 *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
21 *  School of Computer Science
22 *  Carnegie Mellon University
23 *  Pittsburgh PA 15213-3890
24 *
25 * any improvements or extensions that they make and grant Carnegie the
26 * rights to redistribute these changes.
27 */
28
29/*----------------------------------------------------------------------
30 *
31 * rf_decluster.c -- code related to the declustered layout
32 *
33 * Created 10-21-92 (MCH)
34 *
35 * Nov 93:  adding support for distributed sparing.  This code is a little
36 *          complex:  the basic layout used is as follows:
37 *          let F = (v-1)/GCD(r,v-1).  The spare space for each set of
38 *          F consecutive fulltables is grouped together and placed after
39 *          that set of tables.
40 *                   +------------------------------+
41 *                   |        F fulltables          |
42 *                   |        Spare Space           |
43 *                   |        F fulltables          |
44 *                   |        Spare Space           |
45 *                   |            ...               |
46 *                   +------------------------------+
47 *
48 *--------------------------------------------------------------------*/
49
50#include <sys/cdefs.h>
51__KERNEL_RCSID(0, "$NetBSD: rf_decluster.c,v 1.11 2002/09/23 02:40:08 oster Exp $");
52
53#include <dev/raidframe/raidframevar.h>
54
55#include "rf_archs.h"
56#include "rf_raid.h"
57#include "rf_decluster.h"
58#include "rf_debugMem.h"
59#include "rf_utils.h"
60#include "rf_alloclist.h"
61#include "rf_general.h"
62#include "rf_shutdown.h"
63
64
65extern int rf_copyback_in_progress;	/* debug only */
66
67/* found in rf_kintf.c */
68int     rf_GetSpareTableFromDaemon(RF_SparetWait_t * req);
69
70#if (RF_INCLUDE_PARITY_DECLUSTERING > 0) || (RF_INCLUDE_PARITY_DECLUSTERING_PQ > 0)
71
72/* configuration code */
73
74int
75rf_ConfigureDeclustered(
76    RF_ShutdownList_t ** listp,
77    RF_Raid_t * raidPtr,
78    RF_Config_t * cfgPtr)
79{
80	RF_RaidLayout_t *layoutPtr = &(raidPtr->Layout);
81	int     b, v, k, r, lambda;	/* block design params */
82	int     i, j;
83	RF_RowCol_t *first_avail_slot;
84	RF_StripeCount_t complete_FT_count, numCompleteFullTablesPerDisk;
85	RF_DeclusteredConfigInfo_t *info;
86	RF_StripeCount_t PUsPerDisk, spareRegionDepthInPUs, numCompleteSpareRegionsPerDisk,
87	        extraPUsPerDisk;
88	RF_StripeCount_t totSparePUsPerDisk;
89	RF_SectorNum_t diskOffsetOfLastFullTableInSUs;
90	RF_SectorCount_t SpareSpaceInSUs;
91	char   *cfgBuf = (char *) (cfgPtr->layoutSpecific);
92	RF_StripeNum_t l, SUID;
93
94	SUID = l = 0;
95	numCompleteSpareRegionsPerDisk = 0;
96
97	/* 1. create layout specific structure */
98	RF_MallocAndAdd(info, sizeof(RF_DeclusteredConfigInfo_t), (RF_DeclusteredConfigInfo_t *), raidPtr->cleanupList);
99	if (info == NULL)
100		return (ENOMEM);
101	layoutPtr->layoutSpecificInfo = (void *) info;
102	info->SpareTable = NULL;
103
104	/* 2. extract parameters from the config structure */
105	if (layoutPtr->map->flags & RF_DISTRIBUTE_SPARE) {
106		(void)memcpy(info->sparemap_fname, cfgBuf, RF_SPAREMAP_NAME_LEN);
107	}
108	cfgBuf += RF_SPAREMAP_NAME_LEN;
109
110	b = *((int *) cfgBuf);
111	cfgBuf += sizeof(int);
112	v = *((int *) cfgBuf);
113	cfgBuf += sizeof(int);
114	k = *((int *) cfgBuf);
115	cfgBuf += sizeof(int);
116	r = *((int *) cfgBuf);
117	cfgBuf += sizeof(int);
118	lambda = *((int *) cfgBuf);
119	cfgBuf += sizeof(int);
120	raidPtr->noRotate = *((int *) cfgBuf);
121	cfgBuf += sizeof(int);
122
123	/* the sparemaps are generated assuming that parity is rotated, so we
124	 * issue a warning if both distributed sparing and no-rotate are on at
125	 * the same time */
126	if ((layoutPtr->map->flags & RF_DISTRIBUTE_SPARE) && raidPtr->noRotate) {
127		RF_ERRORMSG("Warning:  distributed sparing specified without parity rotation.\n");
128	}
129	if (raidPtr->numCol != v) {
130		RF_ERRORMSG2("RAID: config error: table element count (%d) not equal to no. of cols (%d)\n", v, raidPtr->numCol);
131		return (EINVAL);
132	}
133	/* 3.  set up the values used in the mapping code */
134	info->BlocksPerTable = b;
135	info->Lambda = lambda;
136	info->NumParityReps = info->groupSize = k;
137	info->SUsPerTable = b * (k - 1) * layoutPtr->SUsPerPU;	/* b blks, k-1 SUs each */
138	info->SUsPerFullTable = k * info->SUsPerTable;	/* rot k times */
139	info->PUsPerBlock = k - 1;
140	info->SUsPerBlock = info->PUsPerBlock * layoutPtr->SUsPerPU;
141	info->TableDepthInPUs = (b * k) / v;
142	info->FullTableDepthInPUs = info->TableDepthInPUs * k;	/* k repetitions */
143
144	/* used only in distributed sparing case */
145	info->FullTablesPerSpareRegion = (v - 1) / rf_gcd(r, v - 1);	/* (v-1)/gcd fulltables */
146	info->TablesPerSpareRegion = k * info->FullTablesPerSpareRegion;
147	info->SpareSpaceDepthPerRegionInSUs = (r * info->TablesPerSpareRegion / (v - 1)) * layoutPtr->SUsPerPU;
148
149	/* check to make sure the block design is sufficiently small */
150	if ((raidPtr->Layout.map->flags & RF_DISTRIBUTE_SPARE)) {
151		if (info->FullTableDepthInPUs * layoutPtr->SUsPerPU + info->SpareSpaceDepthPerRegionInSUs > layoutPtr->stripeUnitsPerDisk) {
152			RF_ERRORMSG3("RAID: config error: Full Table depth (%d) + Spare Space (%d) larger than disk size (%d) (BD too big)\n",
153			    (int) info->FullTableDepthInPUs,
154			    (int) info->SpareSpaceDepthPerRegionInSUs,
155			    (int) layoutPtr->stripeUnitsPerDisk);
156			return (EINVAL);
157		}
158	} else {
159		if (info->TableDepthInPUs * layoutPtr->SUsPerPU > layoutPtr->stripeUnitsPerDisk) {
160			RF_ERRORMSG2("RAID: config error: Table depth (%d) larger than disk size (%d) (BD too big)\n",
161			    (int) (info->TableDepthInPUs * layoutPtr->SUsPerPU), \
162			    (int) layoutPtr->stripeUnitsPerDisk);
163			return (EINVAL);
164		}
165	}
166
167
168	/* compute the size of each disk, and the number of tables in the last
169	 * fulltable (which need not be complete) */
170	if (raidPtr->Layout.map->flags & RF_DISTRIBUTE_SPARE) {
171
172		PUsPerDisk = layoutPtr->stripeUnitsPerDisk / layoutPtr->SUsPerPU;
173		spareRegionDepthInPUs = (info->TablesPerSpareRegion * info->TableDepthInPUs +
174		    (info->TablesPerSpareRegion * info->TableDepthInPUs) / (v - 1));
175		info->SpareRegionDepthInSUs = spareRegionDepthInPUs * layoutPtr->SUsPerPU;
176
177		numCompleteSpareRegionsPerDisk = PUsPerDisk / spareRegionDepthInPUs;
178		info->NumCompleteSRs = numCompleteSpareRegionsPerDisk;
179		extraPUsPerDisk = PUsPerDisk % spareRegionDepthInPUs;
180
181		/* assume conservatively that we need the full amount of spare
182		 * space in one region in order to provide spares for the
183		 * partial spare region at the end of the array.  We set "i"
184		 * to the number of tables in the partial spare region.  This
185		 * may actually include some fulltables. */
186		extraPUsPerDisk -= (info->SpareSpaceDepthPerRegionInSUs / layoutPtr->SUsPerPU);
187		if (extraPUsPerDisk <= 0)
188			i = 0;
189		else
190			i = extraPUsPerDisk / info->TableDepthInPUs;
191
192		complete_FT_count = raidPtr->numRow * (numCompleteSpareRegionsPerDisk * (info->TablesPerSpareRegion / k) + i / k);
193		info->FullTableLimitSUID = complete_FT_count * info->SUsPerFullTable;
194		info->ExtraTablesPerDisk = i % k;
195
196		/* note that in the last spare region, the spare space is
197		 * complete even though data/parity space is not */
198		totSparePUsPerDisk = (numCompleteSpareRegionsPerDisk + 1) * (info->SpareSpaceDepthPerRegionInSUs / layoutPtr->SUsPerPU);
199		info->TotSparePUsPerDisk = totSparePUsPerDisk;
200
201		layoutPtr->stripeUnitsPerDisk =
202		    ((complete_FT_count / raidPtr->numRow) * info->FullTableDepthInPUs +	/* data & parity space */
203		    info->ExtraTablesPerDisk * info->TableDepthInPUs +
204		    totSparePUsPerDisk	/* spare space */
205		    ) * layoutPtr->SUsPerPU;
206		layoutPtr->dataStripeUnitsPerDisk =
207		    (complete_FT_count * info->FullTableDepthInPUs + info->ExtraTablesPerDisk * info->TableDepthInPUs)
208		    * layoutPtr->SUsPerPU * (k - 1) / k;
209
210	} else {
211		/* non-dist spare case:  force each disk to contain an
212		 * integral number of tables */
213		layoutPtr->stripeUnitsPerDisk /= (info->TableDepthInPUs * layoutPtr->SUsPerPU);
214		layoutPtr->stripeUnitsPerDisk *= (info->TableDepthInPUs * layoutPtr->SUsPerPU);
215
216		/* compute the number of tables in the last fulltable, which
217		 * need not be complete */
218		complete_FT_count =
219		    ((layoutPtr->stripeUnitsPerDisk / layoutPtr->SUsPerPU) / info->FullTableDepthInPUs) * raidPtr->numRow;
220
221		info->FullTableLimitSUID = complete_FT_count * info->SUsPerFullTable;
222		info->ExtraTablesPerDisk =
223		    ((layoutPtr->stripeUnitsPerDisk / layoutPtr->SUsPerPU) / info->TableDepthInPUs) % k;
224	}
225
226	raidPtr->sectorsPerDisk = layoutPtr->stripeUnitsPerDisk * layoutPtr->sectorsPerStripeUnit;
227
228	/* find the disk offset of the stripe unit where the last fulltable
229	 * starts */
230	numCompleteFullTablesPerDisk = complete_FT_count / raidPtr->numRow;
231	diskOffsetOfLastFullTableInSUs = numCompleteFullTablesPerDisk * info->FullTableDepthInPUs * layoutPtr->SUsPerPU;
232	if (raidPtr->Layout.map->flags & RF_DISTRIBUTE_SPARE) {
233		SpareSpaceInSUs = numCompleteSpareRegionsPerDisk * info->SpareSpaceDepthPerRegionInSUs;
234		diskOffsetOfLastFullTableInSUs += SpareSpaceInSUs;
235		info->DiskOffsetOfLastSpareSpaceChunkInSUs =
236		    diskOffsetOfLastFullTableInSUs + info->ExtraTablesPerDisk * info->TableDepthInPUs * layoutPtr->SUsPerPU;
237	}
238	info->DiskOffsetOfLastFullTableInSUs = diskOffsetOfLastFullTableInSUs;
239	info->numCompleteFullTablesPerDisk = numCompleteFullTablesPerDisk;
240
241	/* 4.  create and initialize the lookup tables */
242	info->LayoutTable = rf_make_2d_array(b, k, raidPtr->cleanupList);
243	if (info->LayoutTable == NULL)
244		return (ENOMEM);
245	info->OffsetTable = rf_make_2d_array(b, k, raidPtr->cleanupList);
246	if (info->OffsetTable == NULL)
247		return (ENOMEM);
248	info->BlockTable = rf_make_2d_array(info->TableDepthInPUs * layoutPtr->SUsPerPU, raidPtr->numCol, raidPtr->cleanupList);
249	if (info->BlockTable == NULL)
250		return (ENOMEM);
251
252	first_avail_slot = rf_make_1d_array(v, NULL);
253	if (first_avail_slot == NULL)
254		return (ENOMEM);
255
256	for (i = 0; i < b; i++)
257		for (j = 0; j < k; j++)
258			info->LayoutTable[i][j] = *cfgBuf++;
259
260	/* initialize offset table */
261	for (i = 0; i < b; i++)
262		for (j = 0; j < k; j++) {
263			info->OffsetTable[i][j] = first_avail_slot[info->LayoutTable[i][j]];
264			first_avail_slot[info->LayoutTable[i][j]]++;
265		}
266
267	/* initialize block table */
268	for (SUID = l = 0; l < layoutPtr->SUsPerPU; l++) {
269		for (i = 0; i < b; i++) {
270			for (j = 0; j < k; j++) {
271				info->BlockTable[(info->OffsetTable[i][j] * layoutPtr->SUsPerPU) + l]
272				    [info->LayoutTable[i][j]] = SUID;
273			}
274			SUID++;
275		}
276	}
277
278	rf_free_1d_array(first_avail_slot, v);
279
280	/* 5.  set up the remaining redundant-but-useful parameters */
281
282	raidPtr->totalSectors = (k * complete_FT_count + raidPtr->numRow * info->ExtraTablesPerDisk) *
283	    info->SUsPerTable * layoutPtr->sectorsPerStripeUnit;
284	layoutPtr->numStripe = (raidPtr->totalSectors / layoutPtr->sectorsPerStripeUnit) / (k - 1);
285
286	/* strange evaluation order below to try and minimize overflow
287	 * problems */
288
289	layoutPtr->dataSectorsPerStripe = (k - 1) * layoutPtr->sectorsPerStripeUnit;
290	layoutPtr->numDataCol = k - 1;
291	layoutPtr->numParityCol = 1;
292
293	return (0);
294}
295/* declustering with distributed sparing */
296static void rf_ShutdownDeclusteredDS(RF_ThreadArg_t);
297static void
298rf_ShutdownDeclusteredDS(arg)
299	RF_ThreadArg_t arg;
300{
301	RF_DeclusteredConfigInfo_t *info;
302	RF_Raid_t *raidPtr;
303
304	raidPtr = (RF_Raid_t *) arg;
305	info = (RF_DeclusteredConfigInfo_t *) raidPtr->Layout.layoutSpecificInfo;
306	if (info->SpareTable)
307		rf_FreeSpareTable(raidPtr);
308}
309
310int
311rf_ConfigureDeclusteredDS(
312    RF_ShutdownList_t ** listp,
313    RF_Raid_t * raidPtr,
314    RF_Config_t * cfgPtr)
315{
316	int     rc;
317
318	rc = rf_ConfigureDeclustered(listp, raidPtr, cfgPtr);
319	if (rc)
320		return (rc);
321	rc = rf_ShutdownCreate(listp, rf_ShutdownDeclusteredDS, raidPtr);
322	if (rc) {
323		RF_ERRORMSG1("Got %d adding shutdown event for DeclusteredDS\n", rc);
324		rf_ShutdownDeclusteredDS(raidPtr);
325		return (rc);
326	}
327	return (0);
328}
329
330void
331rf_MapSectorDeclustered(raidPtr, raidSector, row, col, diskSector, remap)
332	RF_Raid_t *raidPtr;
333	RF_RaidAddr_t raidSector;
334	RF_RowCol_t *row;
335	RF_RowCol_t *col;
336	RF_SectorNum_t *diskSector;
337	int     remap;
338{
339	RF_RaidLayout_t *layoutPtr = &(raidPtr->Layout);
340	RF_DeclusteredConfigInfo_t *info = (RF_DeclusteredConfigInfo_t *) layoutPtr->layoutSpecificInfo;
341	RF_StripeNum_t SUID = raidSector / layoutPtr->sectorsPerStripeUnit;
342	RF_StripeNum_t FullTableID, FullTableOffset, TableID, TableOffset;
343	RF_StripeNum_t BlockID, BlockOffset, RepIndex;
344	RF_StripeCount_t sus_per_fulltable = info->SUsPerFullTable;
345	RF_StripeCount_t fulltable_depth = info->FullTableDepthInPUs * layoutPtr->SUsPerPU;
346	RF_StripeNum_t base_suid = 0, outSU, SpareRegion = 0, SpareSpace = 0;
347
348	rf_decluster_adjust_params(layoutPtr, &SUID, &sus_per_fulltable, &fulltable_depth, &base_suid);
349
350	FullTableID = SUID / sus_per_fulltable;	/* fulltable ID within array
351						 * (across rows) */
352	if (raidPtr->numRow == 1)
353		*row = 0;	/* avoid a mod and a div in the common case */
354	else {
355		*row = FullTableID % raidPtr->numRow;
356		FullTableID /= raidPtr->numRow;	/* convert to fulltable ID on
357						 * this disk */
358	}
359	if (raidPtr->Layout.map->flags & RF_DISTRIBUTE_SPARE) {
360		SpareRegion = FullTableID / info->FullTablesPerSpareRegion;
361		SpareSpace = SpareRegion * info->SpareSpaceDepthPerRegionInSUs;
362	}
363	FullTableOffset = SUID % sus_per_fulltable;
364	TableID = FullTableOffset / info->SUsPerTable;
365	TableOffset = FullTableOffset - TableID * info->SUsPerTable;
366	BlockID = TableOffset / info->PUsPerBlock;
367	BlockOffset = TableOffset - BlockID * info->PUsPerBlock;
368	BlockID %= info->BlocksPerTable;
369	RepIndex = info->PUsPerBlock - TableID;
370	if (!raidPtr->noRotate)
371		BlockOffset += ((BlockOffset >= RepIndex) ? 1 : 0);
372	*col = info->LayoutTable[BlockID][BlockOffset];
373
374	/* remap to distributed spare space if indicated */
375	if (remap) {
376		RF_ASSERT(raidPtr->Disks[*row][*col].status == rf_ds_reconstructing || raidPtr->Disks[*row][*col].status == rf_ds_dist_spared ||
377		    (rf_copyback_in_progress && raidPtr->Disks[*row][*col].status == rf_ds_optimal));
378		rf_remap_to_spare_space(layoutPtr, info, *row, FullTableID, TableID, BlockID, (base_suid) ? 1 : 0, SpareRegion, col, &outSU);
379	} else {
380
381		outSU = base_suid;
382		outSU += FullTableID * fulltable_depth;	/* offs to strt of FT */
383		outSU += SpareSpace;	/* skip rsvd spare space */
384		outSU += TableID * info->TableDepthInPUs * layoutPtr->SUsPerPU;	/* offs to strt of tble */
385		outSU += info->OffsetTable[BlockID][BlockOffset] * layoutPtr->SUsPerPU;	/* offs to the PU */
386	}
387	outSU += TableOffset / (info->BlocksPerTable * info->PUsPerBlock);	/* offs to the SU within
388										 * a PU */
389
390	/* convert SUs to sectors, and, if not aligned to SU boundary, add in
391	 * offset to sector.  */
392	*diskSector = outSU * layoutPtr->sectorsPerStripeUnit + (raidSector % layoutPtr->sectorsPerStripeUnit);
393
394	RF_ASSERT(*col != -1);
395}
396
397
398/* prototyping this inexplicably causes the compile of the layout table (rf_layout.c) to fail */
399void
400rf_MapParityDeclustered(
401    RF_Raid_t * raidPtr,
402    RF_RaidAddr_t raidSector,
403    RF_RowCol_t * row,
404    RF_RowCol_t * col,
405    RF_SectorNum_t * diskSector,
406    int remap)
407{
408	RF_RaidLayout_t *layoutPtr = &(raidPtr->Layout);
409	RF_DeclusteredConfigInfo_t *info = (RF_DeclusteredConfigInfo_t *) layoutPtr->layoutSpecificInfo;
410	RF_StripeNum_t SUID = raidSector / layoutPtr->sectorsPerStripeUnit;
411	RF_StripeNum_t FullTableID, FullTableOffset, TableID, TableOffset;
412	RF_StripeNum_t BlockID, BlockOffset, RepIndex;
413	RF_StripeCount_t sus_per_fulltable = info->SUsPerFullTable;
414	RF_StripeCount_t fulltable_depth = info->FullTableDepthInPUs * layoutPtr->SUsPerPU;
415	RF_StripeNum_t base_suid = 0, outSU, SpareRegion = 0, SpareSpace = 0;
416
417	rf_decluster_adjust_params(layoutPtr, &SUID, &sus_per_fulltable, &fulltable_depth, &base_suid);
418
419	/* compute row & (possibly) spare space exactly as before */
420	FullTableID = SUID / sus_per_fulltable;
421	if (raidPtr->numRow == 1)
422		*row = 0;	/* avoid a mod and a div in the common case */
423	else {
424		*row = FullTableID % raidPtr->numRow;
425		FullTableID /= raidPtr->numRow;	/* convert to fulltable ID on
426						 * this disk */
427	}
428	if ((raidPtr->Layout.map->flags & RF_DISTRIBUTE_SPARE)) {
429		SpareRegion = FullTableID / info->FullTablesPerSpareRegion;
430		SpareSpace = SpareRegion * info->SpareSpaceDepthPerRegionInSUs;
431	}
432	/* compute BlockID and RepIndex exactly as before */
433	FullTableOffset = SUID % sus_per_fulltable;
434	TableID = FullTableOffset / info->SUsPerTable;
435	TableOffset = FullTableOffset - TableID * info->SUsPerTable;
436	/* TableOffset     = FullTableOffset % info->SUsPerTable; */
437	/* BlockID         = (TableOffset / info->PUsPerBlock) %
438	 * info->BlocksPerTable; */
439	BlockID = TableOffset / info->PUsPerBlock;
440	/* BlockOffset     = TableOffset % info->PUsPerBlock; */
441	BlockOffset = TableOffset - BlockID * info->PUsPerBlock;
442	BlockID %= info->BlocksPerTable;
443
444	/* the parity block is in the position indicated by RepIndex */
445	RepIndex = (raidPtr->noRotate) ? info->PUsPerBlock : info->PUsPerBlock - TableID;
446	*col = info->LayoutTable[BlockID][RepIndex];
447
448	if (remap) {
449		RF_ASSERT(raidPtr->Disks[*row][*col].status == rf_ds_reconstructing || raidPtr->Disks[*row][*col].status == rf_ds_dist_spared ||
450		    (rf_copyback_in_progress && raidPtr->Disks[*row][*col].status == rf_ds_optimal));
451		rf_remap_to_spare_space(layoutPtr, info, *row, FullTableID, TableID, BlockID, (base_suid) ? 1 : 0, SpareRegion, col, &outSU);
452	} else {
453
454		/* compute sector as before, except use RepIndex instead of
455		 * BlockOffset */
456		outSU = base_suid;
457		outSU += FullTableID * fulltable_depth;
458		outSU += SpareSpace;	/* skip rsvd spare space */
459		outSU += TableID * info->TableDepthInPUs * layoutPtr->SUsPerPU;
460		outSU += info->OffsetTable[BlockID][RepIndex] * layoutPtr->SUsPerPU;
461	}
462
463	outSU += TableOffset / (info->BlocksPerTable * info->PUsPerBlock);
464	*diskSector = outSU * layoutPtr->sectorsPerStripeUnit + (raidSector % layoutPtr->sectorsPerStripeUnit);
465
466	RF_ASSERT(*col != -1);
467}
468/* returns an array of ints identifying the disks that comprise the stripe containing the indicated address.
469 * the caller must _never_ attempt to modify this array.
470 */
471void
472rf_IdentifyStripeDeclustered(
473    RF_Raid_t * raidPtr,
474    RF_RaidAddr_t addr,
475    RF_RowCol_t ** diskids,
476    RF_RowCol_t * outRow)
477{
478	RF_RaidLayout_t *layoutPtr = &(raidPtr->Layout);
479	RF_DeclusteredConfigInfo_t *info = (RF_DeclusteredConfigInfo_t *) layoutPtr->layoutSpecificInfo;
480	RF_StripeCount_t sus_per_fulltable = info->SUsPerFullTable;
481	RF_StripeCount_t fulltable_depth = info->FullTableDepthInPUs * layoutPtr->SUsPerPU;
482	RF_StripeNum_t base_suid = 0;
483	RF_StripeNum_t SUID = rf_RaidAddressToStripeUnitID(layoutPtr, addr);
484	RF_StripeNum_t stripeID, FullTableID;
485	int     tableOffset;
486
487	rf_decluster_adjust_params(layoutPtr, &SUID, &sus_per_fulltable, &fulltable_depth, &base_suid);
488	FullTableID = SUID / sus_per_fulltable;	/* fulltable ID within array
489						 * (across rows) */
490	*outRow = FullTableID % raidPtr->numRow;
491	stripeID = rf_StripeUnitIDToStripeID(layoutPtr, SUID);	/* find stripe offset
492								 * into array */
493	tableOffset = (stripeID % info->BlocksPerTable);	/* find offset into
494								 * block design table */
495	*diskids = info->LayoutTable[tableOffset];
496}
497/* This returns the default head-separation limit, which is measured
498 * in "required units for reconstruction".  Each time a disk fetches
499 * a unit, it bumps a counter.  The head-sep code prohibits any disk
500 * from getting more than headSepLimit counter values ahead of any
501 * other.
502 *
503 * We assume here that the number of floating recon buffers is already
504 * set.  There are r stripes to be reconstructed in each table, and so
505 * if we have a total of B buffers, we can have at most B/r tables
506 * under recon at any one time.  In each table, lambda units are required
507 * from each disk, so given B buffers, the head sep limit has to be
508 * (lambda*B)/r units.  We subtract one to avoid weird boundary cases.
509 *
510 * for example, suppose were given 50 buffers, r=19, and lambda=4 as in
511 * the 20.5 design.  There are 19 stripes/table to be reconstructed, so
512 * we can have 50/19 tables concurrently under reconstruction, which means
513 * we can allow the fastest disk to get 50/19 tables ahead of the slower
514 * disk.  There are lambda "required units" for each disk, so the fastest
515 * disk can get 4*50/19 = 10 counter values ahead of the slowest.
516 *
517 * If numBufsToAccumulate is not 1, we need to limit the head sep further
518 * because multiple bufs will be required for each stripe under recon.
519 */
520RF_HeadSepLimit_t
521rf_GetDefaultHeadSepLimitDeclustered(
522    RF_Raid_t * raidPtr)
523{
524	RF_DeclusteredConfigInfo_t *info = (RF_DeclusteredConfigInfo_t *) raidPtr->Layout.layoutSpecificInfo;
525
526	return (info->Lambda * raidPtr->numFloatingReconBufs / info->TableDepthInPUs / rf_numBufsToAccumulate);
527}
528/* returns the default number of recon buffers to use.  The value
529 * is somewhat arbitrary...it's intended to be large enough to allow
530 * for a reasonably large head-sep limit, but small enough that you
531 * don't use up all your system memory with buffers.
532 */
533int
534rf_GetDefaultNumFloatingReconBuffersDeclustered(RF_Raid_t * raidPtr)
535{
536	return (100 * rf_numBufsToAccumulate);
537}
538/* sectors in the last fulltable of the array need to be handled
539 * specially since this fulltable can be incomplete.  this function
540 * changes the values of certain params to handle this.
541 *
542 * the idea here is that MapSector et. al. figure out which disk the
543 * addressed unit lives on by computing the modulos of the unit number
544 * with the number of units per fulltable, table, etc.  In the last
545 * fulltable, there are fewer units per fulltable, so we need to adjust
546 * the number of user data units per fulltable to reflect this.
547 *
548 * so, we (1) convert the fulltable size and depth parameters to
549 * the size of the partial fulltable at the end, (2) compute the
550 * disk sector offset where this fulltable starts, and (3) convert
551 * the users stripe unit number from an offset into the array to
552 * an offset into the last fulltable.
553 */
554void
555rf_decluster_adjust_params(
556    RF_RaidLayout_t * layoutPtr,
557    RF_StripeNum_t * SUID,
558    RF_StripeCount_t * sus_per_fulltable,
559    RF_StripeCount_t * fulltable_depth,
560    RF_StripeNum_t * base_suid)
561{
562	RF_DeclusteredConfigInfo_t *info = (RF_DeclusteredConfigInfo_t *) layoutPtr->layoutSpecificInfo;
563
564	if (*SUID >= info->FullTableLimitSUID) {
565		/* new full table size is size of last full table on disk */
566		*sus_per_fulltable = info->ExtraTablesPerDisk * info->SUsPerTable;
567
568		/* new full table depth is corresponding depth */
569		*fulltable_depth = info->ExtraTablesPerDisk * info->TableDepthInPUs * layoutPtr->SUsPerPU;
570
571		/* set up the new base offset */
572		*base_suid = info->DiskOffsetOfLastFullTableInSUs;
573
574		/* convert users array address to an offset into the last
575		 * fulltable */
576		*SUID -= info->FullTableLimitSUID;
577	}
578}
579/*
580 * map a stripe ID to a parity stripe ID.
581 * See comment above RaidAddressToParityStripeID in layout.c.
582 */
583void
584rf_MapSIDToPSIDDeclustered(
585    RF_RaidLayout_t * layoutPtr,
586    RF_StripeNum_t stripeID,
587    RF_StripeNum_t * psID,
588    RF_ReconUnitNum_t * which_ru)
589{
590	RF_DeclusteredConfigInfo_t *info;
591
592	info = (RF_DeclusteredConfigInfo_t *) layoutPtr->layoutSpecificInfo;
593
594	*psID = (stripeID / (layoutPtr->SUsPerPU * info->BlocksPerTable))
595	    * info->BlocksPerTable + (stripeID % info->BlocksPerTable);
596	*which_ru = (stripeID % (info->BlocksPerTable * layoutPtr->SUsPerPU))
597	    / info->BlocksPerTable;
598	RF_ASSERT((*which_ru) < layoutPtr->SUsPerPU / layoutPtr->SUsPerRU);
599}
600/*
601 * Called from MapSector and MapParity to retarget an access at the spare unit.
602 * Modifies the "col" and "outSU" parameters only.
603 */
604void
605rf_remap_to_spare_space(
606    RF_RaidLayout_t * layoutPtr,
607    RF_DeclusteredConfigInfo_t * info,
608    RF_RowCol_t row,
609    RF_StripeNum_t FullTableID,
610    RF_StripeNum_t TableID,
611    RF_SectorNum_t BlockID,
612    RF_StripeNum_t base_suid,
613    RF_StripeNum_t SpareRegion,
614    RF_RowCol_t * outCol,
615    RF_StripeNum_t * outSU)
616{
617	RF_StripeNum_t ftID, spareTableStartSU, TableInSpareRegion, lastSROffset,
618	        which_ft;
619
620	/*
621         * note that FullTableID and hence SpareRegion may have gotten
622         * tweaked by rf_decluster_adjust_params. We detect this by
623         * noticing that base_suid is not 0.
624         */
625	if (base_suid == 0) {
626		ftID = FullTableID;
627	} else {
628		/*
629	         * There may be > 1.0 full tables in the last (i.e. partial)
630	         * spare region.  find out which of these we're in.
631	         */
632		lastSROffset = info->NumCompleteSRs * info->SpareRegionDepthInSUs;
633		which_ft = (info->DiskOffsetOfLastFullTableInSUs - lastSROffset) / (info->FullTableDepthInPUs * layoutPtr->SUsPerPU);
634
635		/* compute the actual full table ID */
636		ftID = info->DiskOffsetOfLastFullTableInSUs / (info->FullTableDepthInPUs * layoutPtr->SUsPerPU) + which_ft;
637		SpareRegion = info->NumCompleteSRs;
638	}
639	TableInSpareRegion = (ftID * info->NumParityReps + TableID) % info->TablesPerSpareRegion;
640
641	*outCol = info->SpareTable[TableInSpareRegion][BlockID].spareDisk;
642	RF_ASSERT(*outCol != -1);
643
644	spareTableStartSU = (SpareRegion == info->NumCompleteSRs) ?
645	    info->DiskOffsetOfLastFullTableInSUs + info->ExtraTablesPerDisk * info->TableDepthInPUs * layoutPtr->SUsPerPU :
646	    (SpareRegion + 1) * info->SpareRegionDepthInSUs - info->SpareSpaceDepthPerRegionInSUs;
647	*outSU = spareTableStartSU + info->SpareTable[TableInSpareRegion][BlockID].spareBlockOffsetInSUs;
648	if (*outSU >= layoutPtr->stripeUnitsPerDisk) {
649		printf("rf_remap_to_spare_space: invalid remapped disk SU offset %ld\n", (long) *outSU);
650	}
651}
652
653#endif /* (RF_INCLUDE_PARITY_DECLUSTERING > 0)  || (RF_INCLUDE_PARITY_DECLUSTERING_PQ > 0) */
654
655
656int
657rf_InstallSpareTable(
658    RF_Raid_t * raidPtr,
659    RF_RowCol_t frow,
660    RF_RowCol_t fcol)
661{
662	RF_DeclusteredConfigInfo_t *info = (RF_DeclusteredConfigInfo_t *) raidPtr->Layout.layoutSpecificInfo;
663	RF_SparetWait_t *req;
664	int     retcode;
665
666	RF_Malloc(req, sizeof(*req), (RF_SparetWait_t *));
667	req->C = raidPtr->numCol;
668	req->G = raidPtr->Layout.numDataCol + raidPtr->Layout.numParityCol;
669	req->fcol = fcol;
670	req->SUsPerPU = raidPtr->Layout.SUsPerPU;
671	req->TablesPerSpareRegion = info->TablesPerSpareRegion;
672	req->BlocksPerTable = info->BlocksPerTable;
673	req->TableDepthInPUs = info->TableDepthInPUs;
674	req->SpareSpaceDepthPerRegionInSUs = info->SpareSpaceDepthPerRegionInSUs;
675
676	retcode = rf_GetSpareTableFromDaemon(req);
677	RF_ASSERT(!retcode);	/* XXX -- fix this to recover gracefully --
678				 * XXX */
679	return (retcode);
680}
681#if (RF_INCLUDE_PARITY_DECLUSTERING > 0) || (RF_INCLUDE_PARITY_DECLUSTERING_PQ > 0)
682/*
683 * Invoked via ioctl to install a spare table in the kernel.
684 */
685int
686rf_SetSpareTable(raidPtr, data)
687	RF_Raid_t *raidPtr;
688	void   *data;
689{
690	RF_DeclusteredConfigInfo_t *info = (RF_DeclusteredConfigInfo_t *) raidPtr->Layout.layoutSpecificInfo;
691	RF_SpareTableEntry_t **ptrs;
692	int     i, retcode;
693
694	/* what we need to copyin is a 2-d array, so first copyin the user
695	 * pointers to the rows in the table */
696	RF_Malloc(ptrs, info->TablesPerSpareRegion * sizeof(RF_SpareTableEntry_t *), (RF_SpareTableEntry_t **));
697	retcode = copyin((caddr_t) data, (caddr_t) ptrs, info->TablesPerSpareRegion * sizeof(RF_SpareTableEntry_t *));
698
699	if (retcode)
700		return (retcode);
701
702	/* now allocate kernel space for the row pointers */
703	RF_Malloc(info->SpareTable, info->TablesPerSpareRegion * sizeof(RF_SpareTableEntry_t *), (RF_SpareTableEntry_t **));
704
705	/* now allocate kernel space for each row in the table, and copy it in
706	 * from user space */
707	for (i = 0; i < info->TablesPerSpareRegion; i++) {
708		RF_Malloc(info->SpareTable[i], info->BlocksPerTable * sizeof(RF_SpareTableEntry_t), (RF_SpareTableEntry_t *));
709		retcode = copyin(ptrs[i], info->SpareTable[i], info->BlocksPerTable * sizeof(RF_SpareTableEntry_t));
710		if (retcode) {
711			info->SpareTable = NULL;	/* blow off the memory
712							 * we've allocated */
713			return (retcode);
714		}
715	}
716
717	/* free up the temporary array we used */
718	RF_Free(ptrs, info->TablesPerSpareRegion * sizeof(RF_SpareTableEntry_t *));
719
720	return (0);
721}
722
723RF_ReconUnitCount_t
724rf_GetNumSpareRUsDeclustered(raidPtr)
725	RF_Raid_t *raidPtr;
726{
727	RF_RaidLayout_t *layoutPtr = &raidPtr->Layout;
728
729	return (((RF_DeclusteredConfigInfo_t *) layoutPtr->layoutSpecificInfo)->TotSparePUsPerDisk);
730}
731#endif /* (RF_INCLUDE_PARITY_DECLUSTERING > 0)  || (RF_INCLUDE_PARITY_DECLUSTERING_PQ > 0) */
732
733void
734rf_FreeSpareTable(raidPtr)
735	RF_Raid_t *raidPtr;
736{
737	long    i;
738	RF_RaidLayout_t *layoutPtr = &raidPtr->Layout;
739	RF_DeclusteredConfigInfo_t *info = (RF_DeclusteredConfigInfo_t *) layoutPtr->layoutSpecificInfo;
740	RF_SpareTableEntry_t **table = info->SpareTable;
741
742	for (i = 0; i < info->TablesPerSpareRegion; i++) {
743		RF_Free(table[i], info->BlocksPerTable * sizeof(RF_SpareTableEntry_t));
744	}
745	RF_Free(table, info->TablesPerSpareRegion * sizeof(RF_SpareTableEntry_t *));
746	info->SpareTable = (RF_SpareTableEntry_t **) NULL;
747}
748