1/*-
2 * Copyright (c) 2011 Alexander Motin <mav@FreeBSD.org>
3 * Copyright (c) 2000 - 2008 Søren Schmidt <sos@FreeBSD.org>
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 *    notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 *    notice, this list of conditions and the following disclaimer in the
13 *    documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 */
27
28#include <sys/cdefs.h>
29__FBSDID("$FreeBSD$");
30
31#include <sys/param.h>
32#include <sys/bio.h>
33#include <sys/endian.h>
34#include <sys/kernel.h>
35#include <sys/kobj.h>
36#include <sys/limits.h>
37#include <sys/lock.h>
38#include <sys/malloc.h>
39#include <sys/mutex.h>
40#include <sys/systm.h>
41#include <geom/geom.h>
42#include "geom/raid/g_raid.h"
43#include "g_raid_md_if.h"
44
45static MALLOC_DEFINE(M_MD_PROMISE, "md_promise_data", "GEOM_RAID Promise metadata");
46
47#define	PROMISE_MAX_DISKS	8
48#define	PROMISE_MAX_SUBDISKS	2
49#define	PROMISE_META_OFFSET	14
50
51struct promise_raid_disk {
52	uint8_t		flags;			/* Subdisk status. */
53#define PROMISE_F_VALID		0x01
54#define PROMISE_F_ONLINE	0x02
55#define PROMISE_F_ASSIGNED	0x04
56#define PROMISE_F_SPARE		0x08
57#define PROMISE_F_DUPLICATE	0x10
58#define PROMISE_F_REDIR		0x20
59#define PROMISE_F_DOWN		0x40
60#define PROMISE_F_READY		0x80
61
62	uint8_t		number;			/* Position in a volume. */
63	uint8_t		channel;		/* ATA channel number. */
64	uint8_t		device;			/* ATA device number. */
65	uint64_t	id __packed;		/* Subdisk ID. */
66} __packed;
67
68struct promise_raid_conf {
69	char		promise_id[24];
70#define PROMISE_MAGIC		"Promise Technology, Inc."
71#define FREEBSD_MAGIC		"FreeBSD ATA driver RAID "
72
73	uint32_t	dummy_0;
74	uint64_t	magic_0;
75#define PROMISE_MAGIC0(x)	(((uint64_t)(x.channel) << 48) | \
76				((uint64_t)(x.device != 0) << 56))
77	uint16_t	magic_1;
78	uint32_t	magic_2;
79	uint8_t		filler1[470];
80
81	uint32_t	integrity;
82#define PROMISE_I_VALID		0x00000080
83
84	struct promise_raid_disk	disk;	/* This subdisk info. */
85	uint32_t	disk_offset;		/* Subdisk offset. */
86	uint32_t	disk_sectors;		/* Subdisk size */
87	uint32_t	disk_rebuild;		/* Rebuild position. */
88	uint16_t	generation;		/* Generation number. */
89	uint8_t		status;			/* Volume status. */
90#define PROMISE_S_VALID		0x01
91#define PROMISE_S_ONLINE	0x02
92#define PROMISE_S_INITED	0x04
93#define PROMISE_S_READY		0x08
94#define PROMISE_S_DEGRADED	0x10
95#define PROMISE_S_MARKED	0x20
96#define PROMISE_S_MIGRATING	0x40
97#define PROMISE_S_FUNCTIONAL	0x80
98
99	uint8_t		type;			/* Voluem type. */
100#define PROMISE_T_RAID0		0x00
101#define PROMISE_T_RAID1		0x01
102#define PROMISE_T_RAID3		0x02
103#define PROMISE_T_RAID5		0x04
104#define PROMISE_T_SPAN		0x08
105#define PROMISE_T_JBOD		0x10
106
107	uint8_t		total_disks;		/* Disks in this volume. */
108	uint8_t		stripe_shift;		/* Strip size. */
109	uint8_t		array_width;		/* Number of RAID0 stripes. */
110	uint8_t		array_number;		/* Global volume number. */
111	uint32_t	total_sectors;		/* Volume size. */
112	uint16_t	cylinders;		/* Volume geometry: C. */
113	uint8_t		heads;			/* Volume geometry: H. */
114	uint8_t		sectors;		/* Volume geometry: S. */
115	uint64_t	volume_id __packed;	/* Volume ID, */
116	struct promise_raid_disk	disks[PROMISE_MAX_DISKS];
117						/* Subdisks in this volume. */
118	char		name[32];		/* Volume label. */
119
120	uint32_t	filler2[8];
121	uint32_t	magic_3;	/* Something related to rebuild. */
122	uint64_t	rebuild_lba64;	/* Per-volume rebuild position. */
123	uint32_t	magic_4;
124	uint32_t	magic_5;
125	uint32_t	total_sectors_high;
126	uint8_t		magic_6;
127	uint8_t		sector_size;
128	uint16_t	magic_7;
129	uint32_t	magic_8[31];
130	uint32_t	backup_time;
131	uint16_t	magic_9;
132	uint32_t	disk_offset_high;
133	uint32_t	disk_sectors_high;
134	uint32_t	disk_rebuild_high;
135	uint16_t	magic_10;
136	uint32_t	magic_11[3];
137	uint32_t	filler3[284];
138	uint32_t	checksum;
139} __packed;
140
141struct g_raid_md_promise_perdisk {
142	int		 pd_updated;
143	int		 pd_subdisks;
144	struct promise_raid_conf	*pd_meta[PROMISE_MAX_SUBDISKS];
145};
146
147struct g_raid_md_promise_pervolume {
148	struct promise_raid_conf	*pv_meta;
149	uint64_t			 pv_id;
150	uint16_t			 pv_generation;
151	int				 pv_disks_present;
152	int				 pv_started;
153	struct callout			 pv_start_co;	/* STARTING state timer. */
154};
155
156static g_raid_md_create_t g_raid_md_create_promise;
157static g_raid_md_taste_t g_raid_md_taste_promise;
158static g_raid_md_event_t g_raid_md_event_promise;
159static g_raid_md_volume_event_t g_raid_md_volume_event_promise;
160static g_raid_md_ctl_t g_raid_md_ctl_promise;
161static g_raid_md_write_t g_raid_md_write_promise;
162static g_raid_md_fail_disk_t g_raid_md_fail_disk_promise;
163static g_raid_md_free_disk_t g_raid_md_free_disk_promise;
164static g_raid_md_free_volume_t g_raid_md_free_volume_promise;
165static g_raid_md_free_t g_raid_md_free_promise;
166
167static kobj_method_t g_raid_md_promise_methods[] = {
168	KOBJMETHOD(g_raid_md_create,	g_raid_md_create_promise),
169	KOBJMETHOD(g_raid_md_taste,	g_raid_md_taste_promise),
170	KOBJMETHOD(g_raid_md_event,	g_raid_md_event_promise),
171	KOBJMETHOD(g_raid_md_volume_event,	g_raid_md_volume_event_promise),
172	KOBJMETHOD(g_raid_md_ctl,	g_raid_md_ctl_promise),
173	KOBJMETHOD(g_raid_md_write,	g_raid_md_write_promise),
174	KOBJMETHOD(g_raid_md_fail_disk,	g_raid_md_fail_disk_promise),
175	KOBJMETHOD(g_raid_md_free_disk,	g_raid_md_free_disk_promise),
176	KOBJMETHOD(g_raid_md_free_volume,	g_raid_md_free_volume_promise),
177	KOBJMETHOD(g_raid_md_free,	g_raid_md_free_promise),
178	{ 0, 0 }
179};
180
181static struct g_raid_md_class g_raid_md_promise_class = {
182	"Promise",
183	g_raid_md_promise_methods,
184	sizeof(struct g_raid_md_object),
185	.mdc_enable = 1,
186	.mdc_priority = 100
187};
188
189
190static void
191g_raid_md_promise_print(struct promise_raid_conf *meta)
192{
193	int i;
194
195	if (g_raid_debug < 1)
196		return;
197
198	printf("********* ATA Promise Metadata *********\n");
199	printf("promise_id          <%.24s>\n", meta->promise_id);
200	printf("disk                %02x %02x %02x %02x %016jx\n",
201	    meta->disk.flags, meta->disk.number, meta->disk.channel,
202	    meta->disk.device, meta->disk.id);
203	printf("disk_offset         %u\n", meta->disk_offset);
204	printf("disk_sectors        %u\n", meta->disk_sectors);
205	printf("disk_rebuild        %u\n", meta->disk_rebuild);
206	printf("generation          %u\n", meta->generation);
207	printf("status              0x%02x\n", meta->status);
208	printf("type                %u\n", meta->type);
209	printf("total_disks         %u\n", meta->total_disks);
210	printf("stripe_shift        %u\n", meta->stripe_shift);
211	printf("array_width         %u\n", meta->array_width);
212	printf("array_number        %u\n", meta->array_number);
213	printf("total_sectors       %u\n", meta->total_sectors);
214	printf("cylinders           %u\n", meta->cylinders);
215	printf("heads               %u\n", meta->heads);
216	printf("sectors             %u\n", meta->sectors);
217	printf("volume_id           0x%016jx\n", meta->volume_id);
218	printf("disks:\n");
219	for (i = 0; i < PROMISE_MAX_DISKS; i++ ) {
220		printf("                    %02x %02x %02x %02x %016jx\n",
221		    meta->disks[i].flags, meta->disks[i].number,
222		    meta->disks[i].channel, meta->disks[i].device,
223		    meta->disks[i].id);
224	}
225	printf("name                <%.32s>\n", meta->name);
226	printf("magic_3             0x%08x\n", meta->magic_3);
227	printf("rebuild_lba64       %ju\n", meta->rebuild_lba64);
228	printf("magic_4             0x%08x\n", meta->magic_4);
229	printf("magic_5             0x%08x\n", meta->magic_5);
230	printf("total_sectors_high  0x%08x\n", meta->total_sectors_high);
231	printf("sector_size         %u\n", meta->sector_size);
232	printf("backup_time         %d\n", meta->backup_time);
233	printf("disk_offset_high    0x%08x\n", meta->disk_offset_high);
234	printf("disk_sectors_high   0x%08x\n", meta->disk_sectors_high);
235	printf("disk_rebuild_high   0x%08x\n", meta->disk_rebuild_high);
236	printf("=================================================\n");
237}
238
239static struct promise_raid_conf *
240promise_meta_copy(struct promise_raid_conf *meta)
241{
242	struct promise_raid_conf *nmeta;
243
244	nmeta = malloc(sizeof(*nmeta), M_MD_PROMISE, M_WAITOK);
245	memcpy(nmeta, meta, sizeof(*nmeta));
246	return (nmeta);
247}
248
249static int
250promise_meta_find_disk(struct promise_raid_conf *meta, uint64_t id)
251{
252	int pos;
253
254	for (pos = 0; pos < meta->total_disks; pos++) {
255		if (meta->disks[pos].id == id)
256			return (pos);
257	}
258	return (-1);
259}
260
261static int
262promise_meta_unused_range(struct promise_raid_conf **metaarr, int nsd,
263    off_t sectors, off_t *off, off_t *size)
264{
265	off_t coff, csize, tmp;
266	int i, j;
267
268	sectors -= 131072;
269	*off = 0;
270	*size = 0;
271	coff = 0;
272	csize = sectors;
273	i = 0;
274	while (1) {
275		for (j = 0; j < nsd; j++) {
276			tmp = ((off_t)metaarr[j]->disk_offset_high << 32) +
277			    metaarr[j]->disk_offset;
278			if (tmp >= coff)
279				csize = MIN(csize, tmp - coff);
280		}
281		if (csize > *size) {
282			*off = coff;
283			*size = csize;
284		}
285		if (i >= nsd)
286			break;
287		coff = ((off_t)metaarr[i]->disk_offset_high << 32) +
288		     metaarr[i]->disk_offset +
289		    ((off_t)metaarr[i]->disk_sectors_high << 32) +
290		     metaarr[i]->disk_sectors;
291		csize = sectors - coff;
292		i++;
293	}
294	return ((*size > 0) ? 1 : 0);
295}
296
297static int
298promise_meta_translate_disk(struct g_raid_volume *vol, int md_disk_pos)
299{
300	int disk_pos, width;
301
302	if (md_disk_pos >= 0 && vol->v_raid_level == G_RAID_VOLUME_RL_RAID1E) {
303		width = vol->v_disks_count / 2;
304		disk_pos = (md_disk_pos / width) +
305		    (md_disk_pos % width) * width;
306	} else
307		disk_pos = md_disk_pos;
308	return (disk_pos);
309}
310
311static void
312promise_meta_get_name(struct promise_raid_conf *meta, char *buf)
313{
314	int i;
315
316	strncpy(buf, meta->name, 32);
317	buf[32] = 0;
318	for (i = 31; i >= 0; i--) {
319		if (buf[i] > 0x20)
320			break;
321		buf[i] = 0;
322	}
323}
324
325static void
326promise_meta_put_name(struct promise_raid_conf *meta, char *buf)
327{
328
329	memset(meta->name, 0x20, 32);
330	memcpy(meta->name, buf, MIN(strlen(buf), 32));
331}
332
333static int
334promise_meta_read(struct g_consumer *cp, struct promise_raid_conf **metaarr)
335{
336	struct g_provider *pp;
337	struct promise_raid_conf *meta;
338	char *buf;
339	int error, i, subdisks;
340	uint32_t checksum, *ptr;
341
342	pp = cp->provider;
343	subdisks = 0;
344next:
345	/* Read metadata block. */
346	buf = g_read_data(cp, pp->mediasize - pp->sectorsize *
347	    (63 - subdisks * PROMISE_META_OFFSET),
348	    pp->sectorsize * 4, &error);
349	if (buf == NULL) {
350		G_RAID_DEBUG(1, "Cannot read metadata from %s (error=%d).",
351		    pp->name, error);
352		return (subdisks);
353	}
354	meta = (struct promise_raid_conf *)buf;
355
356	/* Check if this is an Promise RAID struct */
357	if (strncmp(meta->promise_id, PROMISE_MAGIC, strlen(PROMISE_MAGIC)) &&
358	    strncmp(meta->promise_id, FREEBSD_MAGIC, strlen(FREEBSD_MAGIC))) {
359		if (subdisks == 0)
360			G_RAID_DEBUG(1,
361			    "Promise signature check failed on %s", pp->name);
362		g_free(buf);
363		return (subdisks);
364	}
365	meta = malloc(sizeof(*meta), M_MD_PROMISE, M_WAITOK);
366	memcpy(meta, buf, MIN(sizeof(*meta), pp->sectorsize * 4));
367	g_free(buf);
368
369	/* Check metadata checksum. */
370	for (checksum = 0, ptr = (uint32_t *)meta, i = 0; i < 511; i++)
371		checksum += *ptr++;
372	if (checksum != meta->checksum) {
373		G_RAID_DEBUG(1, "Promise checksum check failed on %s", pp->name);
374		free(meta, M_MD_PROMISE);
375		return (subdisks);
376	}
377
378	if ((meta->integrity & PROMISE_I_VALID) == 0) {
379		G_RAID_DEBUG(1, "Promise metadata is invalid on %s", pp->name);
380		free(meta, M_MD_PROMISE);
381		return (subdisks);
382	}
383
384	if (meta->total_disks > PROMISE_MAX_DISKS) {
385		G_RAID_DEBUG(1, "Wrong number of disks on %s (%d)",
386		    pp->name, meta->total_disks);
387		free(meta, M_MD_PROMISE);
388		return (subdisks);
389	}
390
391	/* Remove filler garbage from fields used in newer metadata. */
392	if (meta->disk_offset_high == 0x8b8c8d8e &&
393	    meta->disk_sectors_high == 0x8788898a &&
394	    meta->disk_rebuild_high == 0x83848586) {
395		meta->disk_offset_high = 0;
396		meta->disk_sectors_high = 0;
397		if (meta->disk_rebuild == UINT32_MAX)
398			meta->disk_rebuild_high = UINT32_MAX;
399		else
400			meta->disk_rebuild_high = 0;
401		if (meta->total_sectors_high == 0x15161718) {
402			meta->total_sectors_high = 0;
403			meta->backup_time = 0;
404			if (meta->rebuild_lba64 == 0x2122232425262728)
405				meta->rebuild_lba64 = UINT64_MAX;
406		}
407	}
408	if (meta->sector_size < 1 || meta->sector_size > 8)
409		meta->sector_size = 1;
410
411	/* Save this part and look for next. */
412	*metaarr = meta;
413	metaarr++;
414	subdisks++;
415	if (subdisks < PROMISE_MAX_SUBDISKS)
416		goto next;
417
418	return (subdisks);
419}
420
421static int
422promise_meta_write(struct g_consumer *cp,
423    struct promise_raid_conf **metaarr, int nsd)
424{
425	struct g_provider *pp;
426	struct promise_raid_conf *meta;
427	char *buf;
428	off_t off, size;
429	int error, i, subdisk, fake;
430	uint32_t checksum, *ptr;
431
432	pp = cp->provider;
433	subdisk = 0;
434	fake = 0;
435next:
436	buf = malloc(pp->sectorsize * 4, M_MD_PROMISE, M_WAITOK | M_ZERO);
437	meta = NULL;
438	if (subdisk < nsd) {
439		meta = metaarr[subdisk];
440	} else if (!fake && promise_meta_unused_range(metaarr, nsd,
441	    cp->provider->mediasize / cp->provider->sectorsize,
442	    &off, &size)) {
443		/* Optionally add record for unused space. */
444		meta = (struct promise_raid_conf *)buf;
445		memcpy(&meta->promise_id[0], PROMISE_MAGIC,
446		    sizeof(PROMISE_MAGIC) - 1);
447		meta->dummy_0 = 0x00020000;
448		meta->integrity = PROMISE_I_VALID;
449		meta->disk.flags = PROMISE_F_ONLINE | PROMISE_F_VALID;
450		meta->disk.number = 0xff;
451		arc4rand(&meta->disk.id, sizeof(meta->disk.id), 0);
452		meta->disk_offset_high = off >> 32;
453		meta->disk_offset = (uint32_t)off;
454		meta->disk_sectors_high = size >> 32;
455		meta->disk_sectors = (uint32_t)size;
456		meta->disk_rebuild_high = UINT32_MAX;
457		meta->disk_rebuild = UINT32_MAX;
458		fake = 1;
459	}
460	if (meta != NULL) {
461		/* Recalculate checksum for case if metadata were changed. */
462		meta->checksum = 0;
463		for (checksum = 0, ptr = (uint32_t *)meta, i = 0; i < 511; i++)
464			checksum += *ptr++;
465		meta->checksum = checksum;
466		memcpy(buf, meta, MIN(pp->sectorsize * 4, sizeof(*meta)));
467	}
468	error = g_write_data(cp, pp->mediasize - pp->sectorsize *
469	    (63 - subdisk * PROMISE_META_OFFSET),
470	    buf, pp->sectorsize * 4);
471	if (error != 0) {
472		G_RAID_DEBUG(1, "Cannot write metadata to %s (error=%d).",
473		    pp->name, error);
474	}
475	free(buf, M_MD_PROMISE);
476
477	subdisk++;
478	if (subdisk < PROMISE_MAX_SUBDISKS)
479		goto next;
480
481	return (error);
482}
483
484static int
485promise_meta_erase(struct g_consumer *cp)
486{
487	struct g_provider *pp;
488	char *buf;
489	int error, subdisk;
490
491	pp = cp->provider;
492	buf = malloc(4 * pp->sectorsize, M_MD_PROMISE, M_WAITOK | M_ZERO);
493	for (subdisk = 0; subdisk < PROMISE_MAX_SUBDISKS; subdisk++) {
494		error = g_write_data(cp, pp->mediasize - pp->sectorsize *
495		    (63 - subdisk * PROMISE_META_OFFSET),
496		    buf, 4 * pp->sectorsize);
497		if (error != 0) {
498			G_RAID_DEBUG(1, "Cannot erase metadata on %s (error=%d).",
499			    pp->name, error);
500		}
501	}
502	free(buf, M_MD_PROMISE);
503	return (error);
504}
505
506static int
507promise_meta_write_spare(struct g_consumer *cp)
508{
509	struct promise_raid_conf *meta;
510	off_t tmp;
511	int error;
512
513	meta = malloc(sizeof(*meta), M_MD_PROMISE, M_WAITOK | M_ZERO);
514	memcpy(&meta->promise_id[0], PROMISE_MAGIC, sizeof(PROMISE_MAGIC) - 1);
515	meta->dummy_0 = 0x00020000;
516	meta->integrity = PROMISE_I_VALID;
517	meta->disk.flags = PROMISE_F_SPARE | PROMISE_F_ONLINE | PROMISE_F_VALID;
518	meta->disk.number = 0xff;
519	arc4rand(&meta->disk.id, sizeof(meta->disk.id), 0);
520	tmp = cp->provider->mediasize / cp->provider->sectorsize - 131072;
521	meta->disk_sectors_high = tmp >> 32;
522	meta->disk_sectors = (uint32_t)tmp;
523	meta->disk_rebuild_high = UINT32_MAX;
524	meta->disk_rebuild = UINT32_MAX;
525	error = promise_meta_write(cp, &meta, 1);
526	free(meta, M_MD_PROMISE);
527	return (error);
528}
529
530static struct g_raid_volume *
531g_raid_md_promise_get_volume(struct g_raid_softc *sc, uint64_t id)
532{
533	struct g_raid_volume	*vol;
534	struct g_raid_md_promise_pervolume *pv;
535
536	TAILQ_FOREACH(vol, &sc->sc_volumes, v_next) {
537		pv = vol->v_md_data;
538		if (pv->pv_id == id)
539			break;
540	}
541	return (vol);
542}
543
544static int
545g_raid_md_promise_purge_volumes(struct g_raid_softc *sc)
546{
547	struct g_raid_volume	*vol, *tvol;
548	struct g_raid_md_promise_pervolume *pv;
549	int i, res;
550
551	res = 0;
552	TAILQ_FOREACH_SAFE(vol, &sc->sc_volumes, v_next, tvol) {
553		pv = vol->v_md_data;
554		if (!pv->pv_started || vol->v_stopping)
555			continue;
556		for (i = 0; i < vol->v_disks_count; i++) {
557			if (vol->v_subdisks[i].sd_state != G_RAID_SUBDISK_S_NONE)
558				break;
559		}
560		if (i >= vol->v_disks_count) {
561			g_raid_destroy_volume(vol);
562			res = 1;
563		}
564	}
565	return (res);
566}
567
568static int
569g_raid_md_promise_purge_disks(struct g_raid_softc *sc)
570{
571	struct g_raid_disk	*disk, *tdisk;
572	struct g_raid_volume	*vol;
573	struct g_raid_md_promise_perdisk *pd;
574	int i, j, res;
575
576	res = 0;
577	TAILQ_FOREACH_SAFE(disk, &sc->sc_disks, d_next, tdisk) {
578		if (disk->d_state == G_RAID_DISK_S_SPARE)
579			continue;
580		pd = (struct g_raid_md_promise_perdisk *)disk->d_md_data;
581
582		/* Scan for deleted volumes. */
583		for (i = 0; i < pd->pd_subdisks; ) {
584			vol = g_raid_md_promise_get_volume(sc,
585			    pd->pd_meta[i]->volume_id);
586			if (vol != NULL && !vol->v_stopping) {
587				i++;
588				continue;
589			}
590			free(pd->pd_meta[i], M_MD_PROMISE);
591			for (j = i; j < pd->pd_subdisks - 1; j++)
592				pd->pd_meta[j] = pd->pd_meta[j + 1];
593			pd->pd_meta[pd->pd_subdisks - 1] = NULL;
594			pd->pd_subdisks--;
595			pd->pd_updated = 1;
596		}
597
598		/* If there is no metadata left - erase and delete disk. */
599		if (pd->pd_subdisks == 0) {
600			promise_meta_erase(disk->d_consumer);
601			g_raid_destroy_disk(disk);
602			res = 1;
603		}
604	}
605	return (res);
606}
607
608static int
609g_raid_md_promise_supported(int level, int qual, int disks, int force)
610{
611
612	if (disks > PROMISE_MAX_DISKS)
613		return (0);
614	switch (level) {
615	case G_RAID_VOLUME_RL_RAID0:
616		if (disks < 1)
617			return (0);
618		if (!force && disks < 2)
619			return (0);
620		break;
621	case G_RAID_VOLUME_RL_RAID1:
622		if (disks < 1)
623			return (0);
624		if (!force && (disks != 2))
625			return (0);
626		break;
627	case G_RAID_VOLUME_RL_RAID1E:
628		if (disks < 2)
629			return (0);
630		if (disks % 2 != 0)
631			return (0);
632		if (!force && (disks != 4))
633			return (0);
634		break;
635	case G_RAID_VOLUME_RL_SINGLE:
636		if (disks != 1)
637			return (0);
638		break;
639	case G_RAID_VOLUME_RL_CONCAT:
640		if (disks < 2)
641			return (0);
642		break;
643	case G_RAID_VOLUME_RL_RAID5:
644		if (disks < 3)
645			return (0);
646		if (qual != G_RAID_VOLUME_RLQ_R5LA)
647			return (0);
648		break;
649	default:
650		return (0);
651	}
652	if (level != G_RAID_VOLUME_RL_RAID5 && qual != G_RAID_VOLUME_RLQ_NONE)
653		return (0);
654	return (1);
655}
656
657static int
658g_raid_md_promise_start_disk(struct g_raid_disk *disk, int sdn,
659    struct g_raid_volume *vol)
660{
661	struct g_raid_softc *sc;
662	struct g_raid_subdisk *sd;
663	struct g_raid_md_promise_perdisk *pd;
664	struct g_raid_md_promise_pervolume *pv;
665	struct promise_raid_conf *meta;
666	off_t eoff, esize, size;
667	int disk_pos, md_disk_pos, i, resurrection = 0;
668
669	sc = disk->d_softc;
670	pd = (struct g_raid_md_promise_perdisk *)disk->d_md_data;
671
672	pv = vol->v_md_data;
673	meta = pv->pv_meta;
674
675	if (sdn >= 0) {
676		/* Find disk position in metadata by it's serial. */
677		md_disk_pos = promise_meta_find_disk(meta, pd->pd_meta[sdn]->disk.id);
678		/* For RAID0+1 we need to translate order. */
679		disk_pos = promise_meta_translate_disk(vol, md_disk_pos);
680	} else {
681		md_disk_pos = -1;
682		disk_pos = -1;
683	}
684	if (disk_pos < 0) {
685		G_RAID_DEBUG1(1, sc, "Disk %s is not part of the volume %s",
686		    g_raid_get_diskname(disk), vol->v_name);
687		/* Failed stale disk is useless for us. */
688		if (sdn >= 0 &&
689		    pd->pd_meta[sdn]->disk.flags & PROMISE_F_DOWN) {
690			g_raid_change_disk_state(disk, G_RAID_DISK_S_STALE_FAILED);
691			return (0);
692		}
693		/* If we were given specific metadata subdisk - erase it. */
694		if (sdn >= 0) {
695			free(pd->pd_meta[sdn], M_MD_PROMISE);
696			for (i = sdn; i < pd->pd_subdisks - 1; i++)
697				pd->pd_meta[i] = pd->pd_meta[i + 1];
698			pd->pd_meta[pd->pd_subdisks - 1] = NULL;
699			pd->pd_subdisks--;
700		}
701		/* If we are in the start process, that's all for now. */
702		if (!pv->pv_started)
703			goto nofit;
704		/*
705		 * If we have already started - try to get use of the disk.
706		 * Try to replace OFFLINE disks first, then FAILED.
707		 */
708		promise_meta_unused_range(pd->pd_meta, pd->pd_subdisks,
709		    disk->d_consumer->provider->mediasize /
710		    disk->d_consumer->provider->sectorsize,
711		    &eoff, &esize);
712		if (esize == 0) {
713			G_RAID_DEBUG1(1, sc, "No free space on disk %s",
714			    g_raid_get_diskname(disk));
715			goto nofit;
716		}
717		size = INT64_MAX;
718		for (i = 0; i < vol->v_disks_count; i++) {
719			sd = &vol->v_subdisks[i];
720			if (sd->sd_state != G_RAID_SUBDISK_S_NONE)
721				size = sd->sd_size;
722			if (sd->sd_state <= G_RAID_SUBDISK_S_FAILED &&
723			    (disk_pos < 0 ||
724			     vol->v_subdisks[i].sd_state < sd->sd_state))
725				disk_pos = i;
726		}
727		if (disk_pos >= 0 &&
728		    vol->v_raid_level != G_RAID_VOLUME_RL_CONCAT &&
729		    (off_t)esize * 512 < size) {
730			G_RAID_DEBUG1(1, sc, "Disk %s free space "
731			    "is too small (%ju < %ju)",
732			    g_raid_get_diskname(disk),
733			    (off_t)esize * 512, size);
734			disk_pos = -1;
735		}
736		if (disk_pos >= 0) {
737			if (vol->v_raid_level != G_RAID_VOLUME_RL_CONCAT)
738				esize = size / 512;
739			/* For RAID0+1 we need to translate order. */
740			md_disk_pos = promise_meta_translate_disk(vol, disk_pos);
741		} else {
742nofit:
743			if (pd->pd_subdisks == 0) {
744				g_raid_change_disk_state(disk,
745				    G_RAID_DISK_S_SPARE);
746			}
747			return (0);
748		}
749		G_RAID_DEBUG1(1, sc, "Disk %s takes pos %d in the volume %s",
750		    g_raid_get_diskname(disk), disk_pos, vol->v_name);
751		resurrection = 1;
752	}
753
754	sd = &vol->v_subdisks[disk_pos];
755
756	if (resurrection && sd->sd_disk != NULL) {
757		g_raid_change_disk_state(sd->sd_disk,
758		    G_RAID_DISK_S_STALE_FAILED);
759		TAILQ_REMOVE(&sd->sd_disk->d_subdisks,
760		    sd, sd_next);
761	}
762	vol->v_subdisks[disk_pos].sd_disk = disk;
763	TAILQ_INSERT_TAIL(&disk->d_subdisks, sd, sd_next);
764
765	/* Welcome the new disk. */
766	if (resurrection)
767		g_raid_change_disk_state(disk, G_RAID_DISK_S_ACTIVE);
768	else if (meta->disks[md_disk_pos].flags & PROMISE_F_DOWN)
769		g_raid_change_disk_state(disk, G_RAID_DISK_S_FAILED);
770	else
771		g_raid_change_disk_state(disk, G_RAID_DISK_S_ACTIVE);
772
773	if (resurrection) {
774		sd->sd_offset = (off_t)eoff * 512;
775		sd->sd_size = (off_t)esize * 512;
776	} else {
777		sd->sd_offset = (((off_t)pd->pd_meta[sdn]->disk_offset_high
778		    << 32) + pd->pd_meta[sdn]->disk_offset) * 512;
779		sd->sd_size = (((off_t)pd->pd_meta[sdn]->disk_sectors_high
780		    << 32) + pd->pd_meta[sdn]->disk_sectors) * 512;
781	}
782
783	if (resurrection) {
784		/* Stale disk, almost same as new. */
785		g_raid_change_subdisk_state(sd,
786		    G_RAID_SUBDISK_S_NEW);
787	} else if (meta->disks[md_disk_pos].flags & PROMISE_F_DOWN) {
788		/* Failed disk. */
789		g_raid_change_subdisk_state(sd,
790		    G_RAID_SUBDISK_S_FAILED);
791	} else if (meta->disks[md_disk_pos].flags & PROMISE_F_REDIR) {
792		/* Rebuilding disk. */
793		g_raid_change_subdisk_state(sd,
794		    G_RAID_SUBDISK_S_REBUILD);
795		if (pd->pd_meta[sdn]->generation != meta->generation)
796			sd->sd_rebuild_pos = 0;
797		else {
798			sd->sd_rebuild_pos =
799			    (((off_t)pd->pd_meta[sdn]->disk_rebuild_high << 32) +
800			     pd->pd_meta[sdn]->disk_rebuild) * 512;
801		}
802	} else if (!(meta->disks[md_disk_pos].flags & PROMISE_F_ONLINE)) {
803		/* Rebuilding disk. */
804		g_raid_change_subdisk_state(sd,
805		    G_RAID_SUBDISK_S_NEW);
806	} else if (pd->pd_meta[sdn]->generation != meta->generation ||
807	    (meta->status & PROMISE_S_MARKED)) {
808		/* Stale disk or dirty volume (unclean shutdown). */
809		g_raid_change_subdisk_state(sd,
810		    G_RAID_SUBDISK_S_STALE);
811	} else {
812		/* Up to date disk. */
813		g_raid_change_subdisk_state(sd,
814		    G_RAID_SUBDISK_S_ACTIVE);
815	}
816	g_raid_event_send(sd, G_RAID_SUBDISK_E_NEW,
817	    G_RAID_EVENT_SUBDISK);
818
819	return (resurrection);
820}
821
822static void
823g_raid_md_promise_refill(struct g_raid_softc *sc)
824{
825	struct g_raid_volume *vol;
826	struct g_raid_subdisk *sd;
827	struct g_raid_disk *disk;
828	struct g_raid_md_object *md;
829	struct g_raid_md_promise_perdisk *pd;
830	struct g_raid_md_promise_pervolume *pv;
831	int update, updated, i, bad;
832
833	md = sc->sc_md;
834restart:
835	updated = 0;
836	TAILQ_FOREACH(vol, &sc->sc_volumes, v_next) {
837		pv = vol->v_md_data;
838		if (!pv->pv_started || vol->v_stopping)
839			continue;
840
841		/* Search for subdisk that needs replacement. */
842		bad = 0;
843		for (i = 0; i < vol->v_disks_count; i++) {
844			sd = &vol->v_subdisks[i];
845			if (sd->sd_state == G_RAID_SUBDISK_S_NONE ||
846			    sd->sd_state == G_RAID_SUBDISK_S_FAILED)
847			        bad = 1;
848		}
849		if (!bad)
850			continue;
851
852		G_RAID_DEBUG1(1, sc, "Volume %s is not complete, "
853		    "trying to refill.", vol->v_name);
854
855		TAILQ_FOREACH(disk, &sc->sc_disks, d_next) {
856			/* Skip failed. */
857			if (disk->d_state < G_RAID_DISK_S_SPARE)
858				continue;
859			/* Skip already used by this volume. */
860			for (i = 0; i < vol->v_disks_count; i++) {
861				sd = &vol->v_subdisks[i];
862				if (sd->sd_disk == disk)
863					break;
864			}
865			if (i < vol->v_disks_count)
866				continue;
867
868			/* Try to use disk if it has empty extents. */
869			pd = disk->d_md_data;
870			if (pd->pd_subdisks < PROMISE_MAX_SUBDISKS) {
871				update =
872				    g_raid_md_promise_start_disk(disk, -1, vol);
873			} else
874				update = 0;
875			if (update) {
876				updated = 1;
877				g_raid_md_write_promise(md, vol, NULL, disk);
878				break;
879			}
880		}
881	}
882	if (updated)
883		goto restart;
884}
885
886static void
887g_raid_md_promise_start(struct g_raid_volume *vol)
888{
889	struct g_raid_softc *sc;
890	struct g_raid_subdisk *sd;
891	struct g_raid_disk *disk;
892	struct g_raid_md_object *md;
893	struct g_raid_md_promise_perdisk *pd;
894	struct g_raid_md_promise_pervolume *pv;
895	struct promise_raid_conf *meta;
896	u_int i;
897
898	sc = vol->v_softc;
899	md = sc->sc_md;
900	pv = vol->v_md_data;
901	meta = pv->pv_meta;
902
903	vol->v_raid_level_qualifier = G_RAID_VOLUME_RLQ_NONE;
904	if (meta->type == PROMISE_T_RAID0)
905		vol->v_raid_level = G_RAID_VOLUME_RL_RAID0;
906	else if (meta->type == PROMISE_T_RAID1) {
907		if (meta->array_width == 1)
908			vol->v_raid_level = G_RAID_VOLUME_RL_RAID1;
909		else
910			vol->v_raid_level = G_RAID_VOLUME_RL_RAID1E;
911	} else if (meta->type == PROMISE_T_RAID3)
912		vol->v_raid_level = G_RAID_VOLUME_RL_RAID3;
913	else if (meta->type == PROMISE_T_RAID5) {
914		vol->v_raid_level = G_RAID_VOLUME_RL_RAID5;
915		vol->v_raid_level_qualifier = G_RAID_VOLUME_RLQ_R5LA;
916	} else if (meta->type == PROMISE_T_SPAN)
917		vol->v_raid_level = G_RAID_VOLUME_RL_CONCAT;
918	else if (meta->type == PROMISE_T_JBOD)
919		vol->v_raid_level = G_RAID_VOLUME_RL_SINGLE;
920	else
921		vol->v_raid_level = G_RAID_VOLUME_RL_UNKNOWN;
922	vol->v_strip_size = 512 << meta->stripe_shift; //ZZZ
923	vol->v_disks_count = meta->total_disks;
924	vol->v_mediasize = (off_t)meta->total_sectors * 512; //ZZZ
925	if (meta->total_sectors_high < 256) /* If value looks sane. */
926		vol->v_mediasize +=
927		    ((off_t)meta->total_sectors_high << 32) * 512; //ZZZ
928	vol->v_sectorsize = 512 * meta->sector_size;
929	for (i = 0; i < vol->v_disks_count; i++) {
930		sd = &vol->v_subdisks[i];
931		sd->sd_offset = (((off_t)meta->disk_offset_high << 32) +
932		    meta->disk_offset) * 512;
933		sd->sd_size = (((off_t)meta->disk_sectors_high << 32) +
934		    meta->disk_sectors) * 512;
935	}
936	g_raid_start_volume(vol);
937
938	/* Make all disks found till the moment take their places. */
939	TAILQ_FOREACH(disk, &sc->sc_disks, d_next) {
940		pd = disk->d_md_data;
941		for (i = 0; i < pd->pd_subdisks; i++) {
942			if (pd->pd_meta[i]->volume_id == meta->volume_id)
943				g_raid_md_promise_start_disk(disk, i, vol);
944		}
945	}
946
947	pv->pv_started = 1;
948	callout_stop(&pv->pv_start_co);
949	G_RAID_DEBUG1(0, sc, "Volume started.");
950	g_raid_md_write_promise(md, vol, NULL, NULL);
951
952	/* Pickup any STALE/SPARE disks to refill array if needed. */
953	g_raid_md_promise_refill(sc);
954
955	g_raid_event_send(vol, G_RAID_VOLUME_E_START, G_RAID_EVENT_VOLUME);
956}
957
958static void
959g_raid_promise_go(void *arg)
960{
961	struct g_raid_volume *vol;
962	struct g_raid_softc *sc;
963	struct g_raid_md_promise_pervolume *pv;
964
965	vol = arg;
966	pv = vol->v_md_data;
967	sc = vol->v_softc;
968	if (!pv->pv_started) {
969		G_RAID_DEBUG1(0, sc, "Force volume start due to timeout.");
970		g_raid_event_send(vol, G_RAID_VOLUME_E_STARTMD,
971		    G_RAID_EVENT_VOLUME);
972	}
973}
974
975static void
976g_raid_md_promise_new_disk(struct g_raid_disk *disk)
977{
978	struct g_raid_softc *sc;
979	struct g_raid_md_object *md;
980	struct promise_raid_conf *pdmeta;
981	struct g_raid_md_promise_perdisk *pd;
982	struct g_raid_md_promise_pervolume *pv;
983	struct g_raid_volume *vol;
984	int i;
985	char buf[33];
986
987	sc = disk->d_softc;
988	md = sc->sc_md;
989	pd = (struct g_raid_md_promise_perdisk *)disk->d_md_data;
990
991	if (pd->pd_subdisks == 0) {
992		g_raid_change_disk_state(disk, G_RAID_DISK_S_SPARE);
993		g_raid_md_promise_refill(sc);
994		return;
995	}
996
997	for (i = 0; i < pd->pd_subdisks; i++) {
998		pdmeta = pd->pd_meta[i];
999
1000		/* Look for volume with matching ID. */
1001		vol = g_raid_md_promise_get_volume(sc, pdmeta->volume_id);
1002		if (vol == NULL) {
1003			promise_meta_get_name(pdmeta, buf);
1004			vol = g_raid_create_volume(sc, buf, pdmeta->array_number);
1005			pv = malloc(sizeof(*pv), M_MD_PROMISE, M_WAITOK | M_ZERO);
1006			pv->pv_id = pdmeta->volume_id;
1007			vol->v_md_data = pv;
1008			callout_init(&pv->pv_start_co, 1);
1009			callout_reset(&pv->pv_start_co,
1010			    g_raid_start_timeout * hz,
1011			    g_raid_promise_go, vol);
1012		} else
1013			pv = vol->v_md_data;
1014
1015		/* If we haven't started yet - check metadata freshness. */
1016		if (pv->pv_meta == NULL || !pv->pv_started) {
1017			if (pv->pv_meta == NULL ||
1018			    ((int16_t)(pdmeta->generation - pv->pv_generation)) > 0) {
1019				G_RAID_DEBUG1(1, sc, "Newer disk");
1020				if (pv->pv_meta != NULL)
1021					free(pv->pv_meta, M_MD_PROMISE);
1022				pv->pv_meta = promise_meta_copy(pdmeta);
1023				pv->pv_generation = pv->pv_meta->generation;
1024				pv->pv_disks_present = 1;
1025			} else if (pdmeta->generation == pv->pv_generation) {
1026				pv->pv_disks_present++;
1027				G_RAID_DEBUG1(1, sc, "Matching disk (%d of %d up)",
1028				    pv->pv_disks_present,
1029				    pv->pv_meta->total_disks);
1030			} else {
1031				G_RAID_DEBUG1(1, sc, "Older disk");
1032			}
1033		}
1034	}
1035
1036	for (i = 0; i < pd->pd_subdisks; i++) {
1037		pdmeta = pd->pd_meta[i];
1038
1039		/* Look for volume with matching ID. */
1040		vol = g_raid_md_promise_get_volume(sc, pdmeta->volume_id);
1041		if (vol == NULL)
1042			continue;
1043		pv = vol->v_md_data;
1044
1045		if (pv->pv_started) {
1046			if (g_raid_md_promise_start_disk(disk, i, vol))
1047				g_raid_md_write_promise(md, vol, NULL, NULL);
1048		} else {
1049			/* If we collected all needed disks - start array. */
1050			if (pv->pv_disks_present == pv->pv_meta->total_disks)
1051				g_raid_md_promise_start(vol);
1052		}
1053	}
1054}
1055
1056static int
1057g_raid_md_create_promise(struct g_raid_md_object *md, struct g_class *mp,
1058    struct g_geom **gp)
1059{
1060	struct g_geom *geom;
1061	struct g_raid_softc *sc;
1062
1063	/* Search for existing node. */
1064	LIST_FOREACH(geom, &mp->geom, geom) {
1065		sc = geom->softc;
1066		if (sc == NULL)
1067			continue;
1068		if (sc->sc_stopping != 0)
1069			continue;
1070		if (sc->sc_md->mdo_class != md->mdo_class)
1071			continue;
1072		break;
1073	}
1074	if (geom != NULL) {
1075		*gp = geom;
1076		return (G_RAID_MD_TASTE_EXISTING);
1077	}
1078
1079	/* Create new one if not found. */
1080	sc = g_raid_create_node(mp, "Promise", md);
1081	if (sc == NULL)
1082		return (G_RAID_MD_TASTE_FAIL);
1083	md->mdo_softc = sc;
1084	*gp = sc->sc_geom;
1085	return (G_RAID_MD_TASTE_NEW);
1086}
1087
1088static int
1089g_raid_md_taste_promise(struct g_raid_md_object *md, struct g_class *mp,
1090                              struct g_consumer *cp, struct g_geom **gp)
1091{
1092	struct g_consumer *rcp;
1093	struct g_provider *pp;
1094	struct g_raid_softc *sc;
1095	struct g_raid_disk *disk;
1096	struct promise_raid_conf *meta, *metaarr[4];
1097	struct g_raid_md_promise_perdisk *pd;
1098	struct g_geom *geom;
1099	int i, j, result, len, subdisks;
1100	char name[16];
1101	uint16_t vendor;
1102
1103	G_RAID_DEBUG(1, "Tasting Promise on %s", cp->provider->name);
1104	pp = cp->provider;
1105
1106	/* Read metadata from device. */
1107	meta = NULL;
1108	g_topology_unlock();
1109	vendor = 0xffff;
1110	len = sizeof(vendor);
1111	if (pp->geom->rank == 1)
1112		g_io_getattr("GEOM::hba_vendor", cp, &len, &vendor);
1113	subdisks = promise_meta_read(cp, metaarr);
1114	g_topology_lock();
1115	if (subdisks == 0) {
1116		if (g_raid_aggressive_spare) {
1117			if (vendor == 0x105a || vendor == 0x1002) {
1118				G_RAID_DEBUG(1,
1119				    "No Promise metadata, forcing spare.");
1120				goto search;
1121			} else {
1122				G_RAID_DEBUG(1,
1123				    "Promise/ATI vendor mismatch "
1124				    "0x%04x != 0x105a/0x1002",
1125				    vendor);
1126			}
1127		}
1128		return (G_RAID_MD_TASTE_FAIL);
1129	}
1130
1131	/* Metadata valid. Print it. */
1132	for (i = 0; i < subdisks; i++)
1133		g_raid_md_promise_print(metaarr[i]);
1134
1135	/* Purge meaningless (empty/spare) records. */
1136	for (i = 0; i < subdisks; ) {
1137		if (metaarr[i]->disk.flags & PROMISE_F_ASSIGNED) {
1138			i++;
1139			continue;
1140		}
1141		free(metaarr[i], M_MD_PROMISE);
1142		for (j = i; j < subdisks - 1; j++)
1143			metaarr[i] = metaarr[j + 1];
1144		metaarr[subdisks - 1] = NULL;
1145		subdisks--;
1146	}
1147
1148search:
1149	/* Search for matching node. */
1150	sc = NULL;
1151	LIST_FOREACH(geom, &mp->geom, geom) {
1152		sc = geom->softc;
1153		if (sc == NULL)
1154			continue;
1155		if (sc->sc_stopping != 0)
1156			continue;
1157		if (sc->sc_md->mdo_class != md->mdo_class)
1158			continue;
1159		break;
1160	}
1161
1162	/* Found matching node. */
1163	if (geom != NULL) {
1164		G_RAID_DEBUG(1, "Found matching array %s", sc->sc_name);
1165		result = G_RAID_MD_TASTE_EXISTING;
1166
1167	} else { /* Not found matching node -- create one. */
1168		result = G_RAID_MD_TASTE_NEW;
1169		snprintf(name, sizeof(name), "Promise");
1170		sc = g_raid_create_node(mp, name, md);
1171		md->mdo_softc = sc;
1172		geom = sc->sc_geom;
1173	}
1174
1175	/* There is no return after this point, so we close passed consumer. */
1176	g_access(cp, -1, 0, 0);
1177
1178	rcp = g_new_consumer(geom);
1179	rcp->flags |= G_CF_DIRECT_RECEIVE;
1180	g_attach(rcp, pp);
1181	if (g_access(rcp, 1, 1, 1) != 0)
1182		; //goto fail1;
1183
1184	g_topology_unlock();
1185	sx_xlock(&sc->sc_lock);
1186
1187	pd = malloc(sizeof(*pd), M_MD_PROMISE, M_WAITOK | M_ZERO);
1188	pd->pd_subdisks = subdisks;
1189	for (i = 0; i < subdisks; i++)
1190		pd->pd_meta[i] = metaarr[i];
1191	disk = g_raid_create_disk(sc);
1192	disk->d_md_data = (void *)pd;
1193	disk->d_consumer = rcp;
1194	rcp->private = disk;
1195
1196	g_raid_get_disk_info(disk);
1197
1198	g_raid_md_promise_new_disk(disk);
1199
1200	sx_xunlock(&sc->sc_lock);
1201	g_topology_lock();
1202	*gp = geom;
1203	return (result);
1204}
1205
1206static int
1207g_raid_md_event_promise(struct g_raid_md_object *md,
1208    struct g_raid_disk *disk, u_int event)
1209{
1210	struct g_raid_softc *sc;
1211
1212	sc = md->mdo_softc;
1213	if (disk == NULL)
1214		return (-1);
1215	switch (event) {
1216	case G_RAID_DISK_E_DISCONNECTED:
1217		/* Delete disk. */
1218		g_raid_change_disk_state(disk, G_RAID_DISK_S_NONE);
1219		g_raid_destroy_disk(disk);
1220		g_raid_md_promise_purge_volumes(sc);
1221
1222		/* Write updated metadata to all disks. */
1223		g_raid_md_write_promise(md, NULL, NULL, NULL);
1224
1225		/* Check if anything left. */
1226		if (g_raid_ndisks(sc, -1) == 0)
1227			g_raid_destroy_node(sc, 0);
1228		else
1229			g_raid_md_promise_refill(sc);
1230		return (0);
1231	}
1232	return (-2);
1233}
1234
1235static int
1236g_raid_md_volume_event_promise(struct g_raid_md_object *md,
1237    struct g_raid_volume *vol, u_int event)
1238{
1239	struct g_raid_md_promise_pervolume *pv;
1240
1241	pv = (struct g_raid_md_promise_pervolume *)vol->v_md_data;
1242	switch (event) {
1243	case G_RAID_VOLUME_E_STARTMD:
1244		if (!pv->pv_started)
1245			g_raid_md_promise_start(vol);
1246		return (0);
1247	}
1248	return (-2);
1249}
1250
1251static int
1252g_raid_md_ctl_promise(struct g_raid_md_object *md,
1253    struct gctl_req *req)
1254{
1255	struct g_raid_softc *sc;
1256	struct g_raid_volume *vol, *vol1;
1257	struct g_raid_subdisk *sd;
1258	struct g_raid_disk *disk, *disks[PROMISE_MAX_DISKS];
1259	struct g_raid_md_promise_perdisk *pd;
1260	struct g_raid_md_promise_pervolume *pv;
1261	struct g_consumer *cp;
1262	struct g_provider *pp;
1263	char arg[16];
1264	const char *nodename, *verb, *volname, *levelname, *diskname;
1265	char *tmp;
1266	int *nargs, *force;
1267	off_t esize, offs[PROMISE_MAX_DISKS], size, sectorsize, strip;
1268	intmax_t *sizearg, *striparg;
1269	int numdisks, i, len, level, qual;
1270	int error;
1271
1272	sc = md->mdo_softc;
1273	verb = gctl_get_param(req, "verb", NULL);
1274	nargs = gctl_get_paraml(req, "nargs", sizeof(*nargs));
1275	error = 0;
1276	if (strcmp(verb, "label") == 0) {
1277
1278		if (*nargs < 4) {
1279			gctl_error(req, "Invalid number of arguments.");
1280			return (-1);
1281		}
1282		volname = gctl_get_asciiparam(req, "arg1");
1283		if (volname == NULL) {
1284			gctl_error(req, "No volume name.");
1285			return (-2);
1286		}
1287		levelname = gctl_get_asciiparam(req, "arg2");
1288		if (levelname == NULL) {
1289			gctl_error(req, "No RAID level.");
1290			return (-3);
1291		}
1292		if (strcasecmp(levelname, "RAID5") == 0)
1293			levelname = "RAID5-LA";
1294		if (g_raid_volume_str2level(levelname, &level, &qual)) {
1295			gctl_error(req, "Unknown RAID level '%s'.", levelname);
1296			return (-4);
1297		}
1298		numdisks = *nargs - 3;
1299		force = gctl_get_paraml(req, "force", sizeof(*force));
1300		if (!g_raid_md_promise_supported(level, qual, numdisks,
1301		    force ? *force : 0)) {
1302			gctl_error(req, "Unsupported RAID level "
1303			    "(0x%02x/0x%02x), or number of disks (%d).",
1304			    level, qual, numdisks);
1305			return (-5);
1306		}
1307
1308		/* Search for disks, connect them and probe. */
1309		size = INT64_MAX;
1310		sectorsize = 0;
1311		bzero(disks, sizeof(disks));
1312		bzero(offs, sizeof(offs));
1313		for (i = 0; i < numdisks; i++) {
1314			snprintf(arg, sizeof(arg), "arg%d", i + 3);
1315			diskname = gctl_get_asciiparam(req, arg);
1316			if (diskname == NULL) {
1317				gctl_error(req, "No disk name (%s).", arg);
1318				error = -6;
1319				break;
1320			}
1321			if (strcmp(diskname, "NONE") == 0)
1322				continue;
1323
1324			TAILQ_FOREACH(disk, &sc->sc_disks, d_next) {
1325				if (disk->d_consumer != NULL &&
1326				    disk->d_consumer->provider != NULL &&
1327				    strcmp(disk->d_consumer->provider->name,
1328				     diskname) == 0)
1329					break;
1330			}
1331			if (disk != NULL) {
1332				if (disk->d_state != G_RAID_DISK_S_ACTIVE) {
1333					gctl_error(req, "Disk '%s' is in a "
1334					    "wrong state (%s).", diskname,
1335					    g_raid_disk_state2str(disk->d_state));
1336					error = -7;
1337					break;
1338				}
1339				pd = disk->d_md_data;
1340				if (pd->pd_subdisks >= PROMISE_MAX_SUBDISKS) {
1341					gctl_error(req, "Disk '%s' already "
1342					    "used by %d volumes.",
1343					    diskname, pd->pd_subdisks);
1344					error = -7;
1345					break;
1346				}
1347				pp = disk->d_consumer->provider;
1348				disks[i] = disk;
1349				promise_meta_unused_range(pd->pd_meta,
1350				    pd->pd_subdisks,
1351				    pp->mediasize / pp->sectorsize,
1352				    &offs[i], &esize);
1353				size = MIN(size, (off_t)esize * pp->sectorsize);
1354				sectorsize = MAX(sectorsize, pp->sectorsize);
1355				continue;
1356			}
1357
1358			g_topology_lock();
1359			cp = g_raid_open_consumer(sc, diskname);
1360			if (cp == NULL) {
1361				gctl_error(req, "Can't open disk '%s'.",
1362				    diskname);
1363				g_topology_unlock();
1364				error = -8;
1365				break;
1366			}
1367			pp = cp->provider;
1368			pd = malloc(sizeof(*pd), M_MD_PROMISE, M_WAITOK | M_ZERO);
1369			disk = g_raid_create_disk(sc);
1370			disk->d_md_data = (void *)pd;
1371			disk->d_consumer = cp;
1372			disks[i] = disk;
1373			cp->private = disk;
1374			g_topology_unlock();
1375
1376			g_raid_get_disk_info(disk);
1377
1378			/* Reserve some space for metadata. */
1379			size = MIN(size, pp->mediasize - 131072llu * pp->sectorsize);
1380			sectorsize = MAX(sectorsize, pp->sectorsize);
1381		}
1382		if (error != 0) {
1383			for (i = 0; i < numdisks; i++) {
1384				if (disks[i] != NULL &&
1385				    disks[i]->d_state == G_RAID_DISK_S_NONE)
1386					g_raid_destroy_disk(disks[i]);
1387			}
1388			return (error);
1389		}
1390
1391		if (sectorsize <= 0) {
1392			gctl_error(req, "Can't get sector size.");
1393			return (-8);
1394		}
1395
1396		/* Handle size argument. */
1397		len = sizeof(*sizearg);
1398		sizearg = gctl_get_param(req, "size", &len);
1399		if (sizearg != NULL && len == sizeof(*sizearg) &&
1400		    *sizearg > 0) {
1401			if (*sizearg > size) {
1402				gctl_error(req, "Size too big %lld > %lld.",
1403				    (long long)*sizearg, (long long)size);
1404				return (-9);
1405			}
1406			size = *sizearg;
1407		}
1408
1409		/* Handle strip argument. */
1410		strip = 131072;
1411		len = sizeof(*striparg);
1412		striparg = gctl_get_param(req, "strip", &len);
1413		if (striparg != NULL && len == sizeof(*striparg) &&
1414		    *striparg > 0) {
1415			if (*striparg < sectorsize) {
1416				gctl_error(req, "Strip size too small.");
1417				return (-10);
1418			}
1419			if (*striparg % sectorsize != 0) {
1420				gctl_error(req, "Incorrect strip size.");
1421				return (-11);
1422			}
1423			strip = *striparg;
1424		}
1425
1426		/* Round size down to strip or sector. */
1427		if (level == G_RAID_VOLUME_RL_RAID1 ||
1428		    level == G_RAID_VOLUME_RL_SINGLE ||
1429		    level == G_RAID_VOLUME_RL_CONCAT)
1430			size -= (size % sectorsize);
1431		else if (level == G_RAID_VOLUME_RL_RAID1E &&
1432		    (numdisks & 1) != 0)
1433			size -= (size % (2 * strip));
1434		else
1435			size -= (size % strip);
1436		if (size <= 0) {
1437			gctl_error(req, "Size too small.");
1438			return (-13);
1439		}
1440
1441		/* We have all we need, create things: volume, ... */
1442		pv = malloc(sizeof(*pv), M_MD_PROMISE, M_WAITOK | M_ZERO);
1443		arc4rand(&pv->pv_id, sizeof(pv->pv_id), 0);
1444		pv->pv_generation = 0;
1445		pv->pv_started = 1;
1446		vol = g_raid_create_volume(sc, volname, -1);
1447		vol->v_md_data = pv;
1448		vol->v_raid_level = level;
1449		vol->v_raid_level_qualifier = qual;
1450		vol->v_strip_size = strip;
1451		vol->v_disks_count = numdisks;
1452		if (level == G_RAID_VOLUME_RL_RAID0 ||
1453		    level == G_RAID_VOLUME_RL_CONCAT ||
1454		    level == G_RAID_VOLUME_RL_SINGLE)
1455			vol->v_mediasize = size * numdisks;
1456		else if (level == G_RAID_VOLUME_RL_RAID1)
1457			vol->v_mediasize = size;
1458		else if (level == G_RAID_VOLUME_RL_RAID3 ||
1459		    level == G_RAID_VOLUME_RL_RAID5)
1460			vol->v_mediasize = size * (numdisks - 1);
1461		else { /* RAID1E */
1462			vol->v_mediasize = ((size * numdisks) / strip / 2) *
1463			    strip;
1464		}
1465		vol->v_sectorsize = sectorsize;
1466		g_raid_start_volume(vol);
1467
1468		/* , and subdisks. */
1469		for (i = 0; i < numdisks; i++) {
1470			disk = disks[i];
1471			sd = &vol->v_subdisks[i];
1472			sd->sd_disk = disk;
1473			sd->sd_offset = (off_t)offs[i] * 512;
1474			sd->sd_size = size;
1475			if (disk == NULL)
1476				continue;
1477			TAILQ_INSERT_TAIL(&disk->d_subdisks, sd, sd_next);
1478			g_raid_change_disk_state(disk,
1479			    G_RAID_DISK_S_ACTIVE);
1480			g_raid_change_subdisk_state(sd,
1481			    G_RAID_SUBDISK_S_ACTIVE);
1482			g_raid_event_send(sd, G_RAID_SUBDISK_E_NEW,
1483			    G_RAID_EVENT_SUBDISK);
1484		}
1485
1486		/* Write metadata based on created entities. */
1487		G_RAID_DEBUG1(0, sc, "Array started.");
1488		g_raid_md_write_promise(md, vol, NULL, NULL);
1489
1490		/* Pickup any STALE/SPARE disks to refill array if needed. */
1491		g_raid_md_promise_refill(sc);
1492
1493		g_raid_event_send(vol, G_RAID_VOLUME_E_START,
1494		    G_RAID_EVENT_VOLUME);
1495		return (0);
1496	}
1497	if (strcmp(verb, "add") == 0) {
1498
1499		gctl_error(req, "`add` command is not applicable, "
1500		    "use `label` instead.");
1501		return (-99);
1502	}
1503	if (strcmp(verb, "delete") == 0) {
1504
1505		nodename = gctl_get_asciiparam(req, "arg0");
1506		if (nodename != NULL && strcasecmp(sc->sc_name, nodename) != 0)
1507			nodename = NULL;
1508
1509		/* Full node destruction. */
1510		if (*nargs == 1 && nodename != NULL) {
1511			/* Check if some volume is still open. */
1512			force = gctl_get_paraml(req, "force", sizeof(*force));
1513			if (force != NULL && *force == 0 &&
1514			    g_raid_nopens(sc) != 0) {
1515				gctl_error(req, "Some volume is still open.");
1516				return (-4);
1517			}
1518
1519			TAILQ_FOREACH(disk, &sc->sc_disks, d_next) {
1520				if (disk->d_consumer)
1521					promise_meta_erase(disk->d_consumer);
1522			}
1523			g_raid_destroy_node(sc, 0);
1524			return (0);
1525		}
1526
1527		/* Destroy specified volume. If it was last - all node. */
1528		if (*nargs > 2) {
1529			gctl_error(req, "Invalid number of arguments.");
1530			return (-1);
1531		}
1532		volname = gctl_get_asciiparam(req,
1533		    nodename != NULL ? "arg1" : "arg0");
1534		if (volname == NULL) {
1535			gctl_error(req, "No volume name.");
1536			return (-2);
1537		}
1538
1539		/* Search for volume. */
1540		TAILQ_FOREACH(vol, &sc->sc_volumes, v_next) {
1541			if (strcmp(vol->v_name, volname) == 0)
1542				break;
1543			pp = vol->v_provider;
1544			if (pp == NULL)
1545				continue;
1546			if (strcmp(pp->name, volname) == 0)
1547				break;
1548			if (strncmp(pp->name, "raid/", 5) == 0 &&
1549			    strcmp(pp->name + 5, volname) == 0)
1550				break;
1551		}
1552		if (vol == NULL) {
1553			i = strtol(volname, &tmp, 10);
1554			if (verb != volname && tmp[0] == 0) {
1555				TAILQ_FOREACH(vol, &sc->sc_volumes, v_next) {
1556					if (vol->v_global_id == i)
1557						break;
1558				}
1559			}
1560		}
1561		if (vol == NULL) {
1562			gctl_error(req, "Volume '%s' not found.", volname);
1563			return (-3);
1564		}
1565
1566		/* Check if volume is still open. */
1567		force = gctl_get_paraml(req, "force", sizeof(*force));
1568		if (force != NULL && *force == 0 &&
1569		    vol->v_provider_open != 0) {
1570			gctl_error(req, "Volume is still open.");
1571			return (-4);
1572		}
1573
1574		/* Destroy volume and potentially node. */
1575		i = 0;
1576		TAILQ_FOREACH(vol1, &sc->sc_volumes, v_next)
1577			i++;
1578		if (i >= 2) {
1579			g_raid_destroy_volume(vol);
1580			g_raid_md_promise_purge_disks(sc);
1581			g_raid_md_write_promise(md, NULL, NULL, NULL);
1582		} else {
1583			TAILQ_FOREACH(disk, &sc->sc_disks, d_next) {
1584				if (disk->d_consumer)
1585					promise_meta_erase(disk->d_consumer);
1586			}
1587			g_raid_destroy_node(sc, 0);
1588		}
1589		return (0);
1590	}
1591	if (strcmp(verb, "remove") == 0 ||
1592	    strcmp(verb, "fail") == 0) {
1593		if (*nargs < 2) {
1594			gctl_error(req, "Invalid number of arguments.");
1595			return (-1);
1596		}
1597		for (i = 1; i < *nargs; i++) {
1598			snprintf(arg, sizeof(arg), "arg%d", i);
1599			diskname = gctl_get_asciiparam(req, arg);
1600			if (diskname == NULL) {
1601				gctl_error(req, "No disk name (%s).", arg);
1602				error = -2;
1603				break;
1604			}
1605			if (strncmp(diskname, "/dev/", 5) == 0)
1606				diskname += 5;
1607
1608			TAILQ_FOREACH(disk, &sc->sc_disks, d_next) {
1609				if (disk->d_consumer != NULL &&
1610				    disk->d_consumer->provider != NULL &&
1611				    strcmp(disk->d_consumer->provider->name,
1612				     diskname) == 0)
1613					break;
1614			}
1615			if (disk == NULL) {
1616				gctl_error(req, "Disk '%s' not found.",
1617				    diskname);
1618				error = -3;
1619				break;
1620			}
1621
1622			if (strcmp(verb, "fail") == 0) {
1623				g_raid_md_fail_disk_promise(md, NULL, disk);
1624				continue;
1625			}
1626
1627			/* Erase metadata on deleting disk and destroy it. */
1628			promise_meta_erase(disk->d_consumer);
1629			g_raid_destroy_disk(disk);
1630		}
1631		g_raid_md_promise_purge_volumes(sc);
1632
1633		/* Write updated metadata to remaining disks. */
1634		g_raid_md_write_promise(md, NULL, NULL, NULL);
1635
1636		/* Check if anything left. */
1637		if (g_raid_ndisks(sc, -1) == 0)
1638			g_raid_destroy_node(sc, 0);
1639		else
1640			g_raid_md_promise_refill(sc);
1641		return (error);
1642	}
1643	if (strcmp(verb, "insert") == 0) {
1644		if (*nargs < 2) {
1645			gctl_error(req, "Invalid number of arguments.");
1646			return (-1);
1647		}
1648		for (i = 1; i < *nargs; i++) {
1649			/* Get disk name. */
1650			snprintf(arg, sizeof(arg), "arg%d", i);
1651			diskname = gctl_get_asciiparam(req, arg);
1652			if (diskname == NULL) {
1653				gctl_error(req, "No disk name (%s).", arg);
1654				error = -3;
1655				break;
1656			}
1657
1658			/* Try to find provider with specified name. */
1659			g_topology_lock();
1660			cp = g_raid_open_consumer(sc, diskname);
1661			if (cp == NULL) {
1662				gctl_error(req, "Can't open disk '%s'.",
1663				    diskname);
1664				g_topology_unlock();
1665				error = -4;
1666				break;
1667			}
1668			pp = cp->provider;
1669			g_topology_unlock();
1670
1671			pd = malloc(sizeof(*pd), M_MD_PROMISE, M_WAITOK | M_ZERO);
1672
1673			disk = g_raid_create_disk(sc);
1674			disk->d_consumer = cp;
1675			disk->d_md_data = (void *)pd;
1676			cp->private = disk;
1677
1678			g_raid_get_disk_info(disk);
1679
1680			/* Welcome the "new" disk. */
1681			g_raid_change_disk_state(disk, G_RAID_DISK_S_SPARE);
1682			promise_meta_write_spare(cp);
1683			g_raid_md_promise_refill(sc);
1684		}
1685		return (error);
1686	}
1687	return (-100);
1688}
1689
1690static int
1691g_raid_md_write_promise(struct g_raid_md_object *md, struct g_raid_volume *tvol,
1692    struct g_raid_subdisk *tsd, struct g_raid_disk *tdisk)
1693{
1694	struct g_raid_softc *sc;
1695	struct g_raid_volume *vol;
1696	struct g_raid_subdisk *sd;
1697	struct g_raid_disk *disk;
1698	struct g_raid_md_promise_perdisk *pd;
1699	struct g_raid_md_promise_pervolume *pv;
1700	struct promise_raid_conf *meta;
1701	off_t rebuild_lba64;
1702	int i, j, pos, rebuild;
1703
1704	sc = md->mdo_softc;
1705
1706	if (sc->sc_stopping == G_RAID_DESTROY_HARD)
1707		return (0);
1708
1709	/* Generate new per-volume metadata for affected volumes. */
1710	TAILQ_FOREACH(vol, &sc->sc_volumes, v_next) {
1711		if (vol->v_stopping)
1712			continue;
1713
1714		/* Skip volumes not related to specified targets. */
1715		if (tvol != NULL && vol != tvol)
1716			continue;
1717		if (tsd != NULL && vol != tsd->sd_volume)
1718			continue;
1719		if (tdisk != NULL) {
1720			for (i = 0; i < vol->v_disks_count; i++) {
1721				if (vol->v_subdisks[i].sd_disk == tdisk)
1722					break;
1723			}
1724			if (i >= vol->v_disks_count)
1725				continue;
1726		}
1727
1728		pv = (struct g_raid_md_promise_pervolume *)vol->v_md_data;
1729		pv->pv_generation++;
1730
1731		meta = malloc(sizeof(*meta), M_MD_PROMISE, M_WAITOK | M_ZERO);
1732		if (pv->pv_meta != NULL)
1733			memcpy(meta, pv->pv_meta, sizeof(*meta));
1734		memcpy(meta->promise_id, PROMISE_MAGIC,
1735		    sizeof(PROMISE_MAGIC) - 1);
1736		meta->dummy_0 = 0x00020000;
1737		meta->integrity = PROMISE_I_VALID;
1738
1739		meta->generation = pv->pv_generation;
1740		meta->status = PROMISE_S_VALID | PROMISE_S_ONLINE |
1741		    PROMISE_S_INITED | PROMISE_S_READY;
1742		if (vol->v_state <= G_RAID_VOLUME_S_DEGRADED)
1743			meta->status |= PROMISE_S_DEGRADED;
1744		if (vol->v_dirty)
1745			meta->status |= PROMISE_S_MARKED; /* XXX: INVENTED! */
1746		if (vol->v_raid_level == G_RAID_VOLUME_RL_RAID0 ||
1747		    vol->v_raid_level == G_RAID_VOLUME_RL_SINGLE)
1748			meta->type = PROMISE_T_RAID0;
1749		else if (vol->v_raid_level == G_RAID_VOLUME_RL_RAID1 ||
1750		    vol->v_raid_level == G_RAID_VOLUME_RL_RAID1E)
1751			meta->type = PROMISE_T_RAID1;
1752		else if (vol->v_raid_level == G_RAID_VOLUME_RL_RAID3)
1753			meta->type = PROMISE_T_RAID3;
1754		else if (vol->v_raid_level == G_RAID_VOLUME_RL_RAID5)
1755			meta->type = PROMISE_T_RAID5;
1756		else if (vol->v_raid_level == G_RAID_VOLUME_RL_CONCAT)
1757			meta->type = PROMISE_T_SPAN;
1758		else
1759			meta->type = PROMISE_T_JBOD;
1760		meta->total_disks = vol->v_disks_count;
1761		meta->stripe_shift = ffs(vol->v_strip_size / 1024);
1762		meta->array_width = vol->v_disks_count;
1763		if (vol->v_raid_level == G_RAID_VOLUME_RL_RAID1 ||
1764		    vol->v_raid_level == G_RAID_VOLUME_RL_RAID1E)
1765			meta->array_width /= 2;
1766		meta->array_number = vol->v_global_id;
1767		meta->total_sectors = vol->v_mediasize / 512;
1768		meta->total_sectors_high = (vol->v_mediasize / 512) >> 32;
1769		meta->sector_size = vol->v_sectorsize / 512;
1770		meta->cylinders = meta->total_sectors / (255 * 63) - 1;
1771		meta->heads = 254;
1772		meta->sectors = 63;
1773		meta->volume_id = pv->pv_id;
1774		rebuild_lba64 = UINT64_MAX;
1775		rebuild = 0;
1776		for (i = 0; i < vol->v_disks_count; i++) {
1777			sd = &vol->v_subdisks[i];
1778			/* For RAID0+1 we need to translate order. */
1779			pos = promise_meta_translate_disk(vol, i);
1780			meta->disks[pos].flags = PROMISE_F_VALID |
1781			    PROMISE_F_ASSIGNED;
1782			if (sd->sd_state == G_RAID_SUBDISK_S_NONE) {
1783				meta->disks[pos].flags |= 0;
1784			} else if (sd->sd_state == G_RAID_SUBDISK_S_FAILED) {
1785				meta->disks[pos].flags |=
1786				    PROMISE_F_DOWN | PROMISE_F_REDIR;
1787			} else if (sd->sd_state <= G_RAID_SUBDISK_S_REBUILD) {
1788				meta->disks[pos].flags |=
1789				    PROMISE_F_ONLINE | PROMISE_F_REDIR;
1790				if (sd->sd_state == G_RAID_SUBDISK_S_REBUILD) {
1791					rebuild_lba64 = MIN(rebuild_lba64,
1792					    sd->sd_rebuild_pos / 512);
1793				} else
1794					rebuild_lba64 = 0;
1795				rebuild = 1;
1796			} else {
1797				meta->disks[pos].flags |= PROMISE_F_ONLINE;
1798				if (sd->sd_state < G_RAID_SUBDISK_S_ACTIVE) {
1799					meta->status |= PROMISE_S_MARKED;
1800					if (sd->sd_state == G_RAID_SUBDISK_S_RESYNC) {
1801						rebuild_lba64 = MIN(rebuild_lba64,
1802						    sd->sd_rebuild_pos / 512);
1803					} else
1804						rebuild_lba64 = 0;
1805				}
1806			}
1807			if (pv->pv_meta != NULL) {
1808				meta->disks[pos].id = pv->pv_meta->disks[pos].id;
1809			} else {
1810				meta->disks[pos].number = i * 2;
1811				arc4rand(&meta->disks[pos].id,
1812				    sizeof(meta->disks[pos].id), 0);
1813			}
1814		}
1815		promise_meta_put_name(meta, vol->v_name);
1816
1817		/* Try to mimic AMD BIOS rebuild/resync behavior. */
1818		if (rebuild_lba64 != UINT64_MAX) {
1819			if (rebuild)
1820				meta->magic_3 = 0x03040010UL; /* Rebuild? */
1821			else
1822				meta->magic_3 = 0x03040008UL; /* Resync? */
1823			/* Translate from per-disk to per-volume LBA. */
1824			if (vol->v_raid_level == G_RAID_VOLUME_RL_RAID1 ||
1825			    vol->v_raid_level == G_RAID_VOLUME_RL_RAID1E) {
1826				rebuild_lba64 *= meta->array_width;
1827			} else if (vol->v_raid_level == G_RAID_VOLUME_RL_RAID3 ||
1828			    vol->v_raid_level == G_RAID_VOLUME_RL_RAID5) {
1829				rebuild_lba64 *= meta->array_width - 1;
1830			} else
1831				rebuild_lba64 = 0;
1832		} else
1833			meta->magic_3 = 0x03000000UL;
1834		meta->rebuild_lba64 = rebuild_lba64;
1835		meta->magic_4 = 0x04010101UL;
1836
1837		/* Replace per-volume metadata with new. */
1838		if (pv->pv_meta != NULL)
1839			free(pv->pv_meta, M_MD_PROMISE);
1840		pv->pv_meta = meta;
1841
1842		/* Copy new metadata to the disks, adding or replacing old. */
1843		for (i = 0; i < vol->v_disks_count; i++) {
1844			sd = &vol->v_subdisks[i];
1845			disk = sd->sd_disk;
1846			if (disk == NULL)
1847				continue;
1848			/* For RAID0+1 we need to translate order. */
1849			pos = promise_meta_translate_disk(vol, i);
1850			pd = (struct g_raid_md_promise_perdisk *)disk->d_md_data;
1851			for (j = 0; j < pd->pd_subdisks; j++) {
1852				if (pd->pd_meta[j]->volume_id == meta->volume_id)
1853					break;
1854			}
1855			if (j == pd->pd_subdisks)
1856				pd->pd_subdisks++;
1857			if (pd->pd_meta[j] != NULL)
1858				free(pd->pd_meta[j], M_MD_PROMISE);
1859			pd->pd_meta[j] = promise_meta_copy(meta);
1860			pd->pd_meta[j]->disk = meta->disks[pos];
1861			pd->pd_meta[j]->disk.number = pos;
1862			pd->pd_meta[j]->disk_offset_high =
1863			    (sd->sd_offset / 512) >> 32;
1864			pd->pd_meta[j]->disk_offset = sd->sd_offset / 512;
1865			pd->pd_meta[j]->disk_sectors_high =
1866			    (sd->sd_size / 512) >> 32;
1867			pd->pd_meta[j]->disk_sectors = sd->sd_size / 512;
1868			if (sd->sd_state == G_RAID_SUBDISK_S_REBUILD) {
1869				pd->pd_meta[j]->disk_rebuild_high =
1870				    (sd->sd_rebuild_pos / 512) >> 32;
1871				pd->pd_meta[j]->disk_rebuild =
1872				    sd->sd_rebuild_pos / 512;
1873			} else if (sd->sd_state < G_RAID_SUBDISK_S_REBUILD) {
1874				pd->pd_meta[j]->disk_rebuild_high = 0;
1875				pd->pd_meta[j]->disk_rebuild = 0;
1876			} else {
1877				pd->pd_meta[j]->disk_rebuild_high = UINT32_MAX;
1878				pd->pd_meta[j]->disk_rebuild = UINT32_MAX;
1879			}
1880			pd->pd_updated = 1;
1881		}
1882	}
1883
1884	TAILQ_FOREACH(disk, &sc->sc_disks, d_next) {
1885		pd = (struct g_raid_md_promise_perdisk *)disk->d_md_data;
1886		if (disk->d_state != G_RAID_DISK_S_ACTIVE)
1887			continue;
1888		if (!pd->pd_updated)
1889			continue;
1890		G_RAID_DEBUG(1, "Writing Promise metadata to %s",
1891		    g_raid_get_diskname(disk));
1892		for (i = 0; i < pd->pd_subdisks; i++)
1893			g_raid_md_promise_print(pd->pd_meta[i]);
1894		promise_meta_write(disk->d_consumer,
1895		    pd->pd_meta, pd->pd_subdisks);
1896		pd->pd_updated = 0;
1897	}
1898
1899	return (0);
1900}
1901
1902static int
1903g_raid_md_fail_disk_promise(struct g_raid_md_object *md,
1904    struct g_raid_subdisk *tsd, struct g_raid_disk *tdisk)
1905{
1906	struct g_raid_softc *sc;
1907	struct g_raid_md_promise_perdisk *pd;
1908	struct g_raid_subdisk *sd;
1909	int i, pos;
1910
1911	sc = md->mdo_softc;
1912	pd = (struct g_raid_md_promise_perdisk *)tdisk->d_md_data;
1913
1914	/* We can't fail disk that is not a part of array now. */
1915	if (tdisk->d_state != G_RAID_DISK_S_ACTIVE)
1916		return (-1);
1917
1918	/*
1919	 * Mark disk as failed in metadata and try to write that metadata
1920	 * to the disk itself to prevent it's later resurrection as STALE.
1921	 */
1922	if (pd->pd_subdisks > 0 && tdisk->d_consumer != NULL)
1923		G_RAID_DEBUG(1, "Writing Promise metadata to %s",
1924		    g_raid_get_diskname(tdisk));
1925	for (i = 0; i < pd->pd_subdisks; i++) {
1926		pd->pd_meta[i]->disk.flags |=
1927		    PROMISE_F_DOWN | PROMISE_F_REDIR;
1928		pos = pd->pd_meta[i]->disk.number;
1929		if (pos >= 0 && pos < PROMISE_MAX_DISKS) {
1930			pd->pd_meta[i]->disks[pos].flags |=
1931			    PROMISE_F_DOWN | PROMISE_F_REDIR;
1932		}
1933		g_raid_md_promise_print(pd->pd_meta[i]);
1934	}
1935	if (tdisk->d_consumer != NULL)
1936		promise_meta_write(tdisk->d_consumer,
1937		    pd->pd_meta, pd->pd_subdisks);
1938
1939	/* Change states. */
1940	g_raid_change_disk_state(tdisk, G_RAID_DISK_S_FAILED);
1941	TAILQ_FOREACH(sd, &tdisk->d_subdisks, sd_next) {
1942		g_raid_change_subdisk_state(sd,
1943		    G_RAID_SUBDISK_S_FAILED);
1944		g_raid_event_send(sd, G_RAID_SUBDISK_E_FAILED,
1945		    G_RAID_EVENT_SUBDISK);
1946	}
1947
1948	/* Write updated metadata to remaining disks. */
1949	g_raid_md_write_promise(md, NULL, NULL, tdisk);
1950
1951	g_raid_md_promise_refill(sc);
1952	return (0);
1953}
1954
1955static int
1956g_raid_md_free_disk_promise(struct g_raid_md_object *md,
1957    struct g_raid_disk *disk)
1958{
1959	struct g_raid_md_promise_perdisk *pd;
1960	int i;
1961
1962	pd = (struct g_raid_md_promise_perdisk *)disk->d_md_data;
1963	for (i = 0; i < pd->pd_subdisks; i++) {
1964		if (pd->pd_meta[i] != NULL) {
1965			free(pd->pd_meta[i], M_MD_PROMISE);
1966			pd->pd_meta[i] = NULL;
1967		}
1968	}
1969	free(pd, M_MD_PROMISE);
1970	disk->d_md_data = NULL;
1971	return (0);
1972}
1973
1974static int
1975g_raid_md_free_volume_promise(struct g_raid_md_object *md,
1976    struct g_raid_volume *vol)
1977{
1978	struct g_raid_md_promise_pervolume *pv;
1979
1980	pv = (struct g_raid_md_promise_pervolume *)vol->v_md_data;
1981	if (pv && pv->pv_meta != NULL) {
1982		free(pv->pv_meta, M_MD_PROMISE);
1983		pv->pv_meta = NULL;
1984	}
1985	if (pv && !pv->pv_started) {
1986		pv->pv_started = 1;
1987		callout_stop(&pv->pv_start_co);
1988	}
1989	free(pv, M_MD_PROMISE);
1990	vol->v_md_data = NULL;
1991	return (0);
1992}
1993
1994static int
1995g_raid_md_free_promise(struct g_raid_md_object *md)
1996{
1997
1998	return (0);
1999}
2000
2001G_RAID_MD_DECLARE(promise, "Promise");
2002