1/*-
2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3 *
4 * Copyright (c) 2010 Alexander Motin <mav@FreeBSD.org>
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 *    notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 *    notice, this list of conditions and the following disclaimer in the
14 *    documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 *
28 * $FreeBSD$
29 */
30
31#ifndef	_G_RAID_H_
32#define	_G_RAID_H_
33
34#include <sys/param.h>
35#include <sys/kobj.h>
36#include <sys/bio.h>
37#include <sys/time.h>
38#ifdef _KERNEL
39#include <sys/sysctl.h>
40#endif
41
42#define	G_RAID_CLASS_NAME	"RAID"
43
44#define	G_RAID_MAGIC		"GEOM::RAID"
45
46#define	G_RAID_VERSION		0
47
48struct g_raid_md_object;
49struct g_raid_tr_object;
50
51#define	G_RAID_DEVICE_FLAG_NOAUTOSYNC	0x0000000000000001ULL
52#define	G_RAID_DEVICE_FLAG_NOFAILSYNC	0x0000000000000002ULL
53#define	G_RAID_DEVICE_FLAG_MASK	(G_RAID_DEVICE_FLAG_NOAUTOSYNC | \
54					 G_RAID_DEVICE_FLAG_NOFAILSYNC)
55
56#ifdef _KERNEL
57extern u_int g_raid_aggressive_spare;
58extern u_int g_raid_debug;
59extern int g_raid_enable;
60extern int g_raid_read_err_thresh;
61extern u_int g_raid_start_timeout;
62extern struct g_class g_raid_class;
63
64#define	G_RAID_DEBUG(lvl, fmt, ...)	do {				\
65	if (g_raid_debug >= (lvl)) {					\
66		if (g_raid_debug > 0) {					\
67			printf("GEOM_RAID[%u]: " fmt "\n",		\
68			    lvl, ## __VA_ARGS__);			\
69		} else {						\
70			printf("GEOM_RAID: " fmt "\n",			\
71			    ## __VA_ARGS__);				\
72		}							\
73	}								\
74} while (0)
75#define	G_RAID_DEBUG1(lvl, sc, fmt, ...)	do {			\
76	if (g_raid_debug >= (lvl)) {					\
77		if (g_raid_debug > 0) {					\
78			printf("GEOM_RAID[%u]: %s: " fmt "\n",		\
79			    lvl, (sc)->sc_name, ## __VA_ARGS__);	\
80		} else {						\
81			printf("GEOM_RAID: %s: " fmt "\n",		\
82			    (sc)->sc_name, ## __VA_ARGS__);		\
83		}							\
84	}								\
85} while (0)
86#define	G_RAID_LOGREQ(lvl, bp, fmt, ...)	do {			\
87	if (g_raid_debug >= (lvl)) {					\
88		if (g_raid_debug > 0) {					\
89			printf("GEOM_RAID[%u]: " fmt " ",		\
90			    lvl, ## __VA_ARGS__);			\
91		} else							\
92			printf("GEOM_RAID: " fmt " ", ## __VA_ARGS__);	\
93		g_print_bio(bp);					\
94		printf("\n");						\
95	}								\
96} while (0)
97
98/*
99 * Flags we use to distinguish I/O initiated by the TR layer to maintain
100 * the volume's characteristics, fix subdisks, extra copies of data, etc.
101 *
102 * G_RAID_BIO_FLAG_SYNC		I/O to update an extra copy of the data
103 *				for RAID volumes that maintain extra data
104 *				and need to rebuild that data.
105 * G_RAID_BIO_FLAG_REMAP	I/O done to try to provoke a subdisk into
106 *				doing some desirable action such as bad
107 *				block remapping after we detect a bad part
108 *				of the disk.
109 * G_RAID_BIO_FLAG_LOCKED	I/O holds range lock that should re released.
110 *
111 * and the following meta item:
112 * G_RAID_BIO_FLAG_SPECIAL	And of the I/O flags that need to make it
113 *				through the range locking which would
114 *				otherwise defer the I/O until after that
115 *				range is unlocked.
116 */
117#define	G_RAID_BIO_FLAG_SYNC		0x01
118#define	G_RAID_BIO_FLAG_REMAP		0x02
119#define	G_RAID_BIO_FLAG_SPECIAL \
120		(G_RAID_BIO_FLAG_SYNC|G_RAID_BIO_FLAG_REMAP)
121#define	G_RAID_BIO_FLAG_LOCKED		0x80
122
123struct g_raid_lock {
124	off_t			 l_offset;
125	off_t			 l_length;
126	void			*l_callback_arg;
127	int			 l_pending;
128	LIST_ENTRY(g_raid_lock)	 l_next;
129};
130
131#define	G_RAID_EVENT_WAIT	0x01
132#define	G_RAID_EVENT_VOLUME	0x02
133#define	G_RAID_EVENT_SUBDISK	0x04
134#define	G_RAID_EVENT_DISK	0x08
135#define	G_RAID_EVENT_DONE	0x10
136struct g_raid_event {
137	void			*e_tgt;
138	int			 e_event;
139	int			 e_flags;
140	int			 e_error;
141	TAILQ_ENTRY(g_raid_event) e_next;
142};
143#define G_RAID_DISK_S_NONE		0x00	/* State is unknown. */
144#define G_RAID_DISK_S_OFFLINE		0x01	/* Missing disk placeholder. */
145#define G_RAID_DISK_S_DISABLED		0x02	/* Disabled. */
146#define G_RAID_DISK_S_FAILED		0x03	/* Failed. */
147#define G_RAID_DISK_S_STALE_FAILED	0x04	/* Old failed. */
148#define G_RAID_DISK_S_SPARE		0x05	/* Hot-spare. */
149#define G_RAID_DISK_S_STALE		0x06	/* Old disk, unused now. */
150#define G_RAID_DISK_S_ACTIVE		0x07	/* Operational. */
151
152#define G_RAID_DISK_E_DISCONNECTED	0x01
153
154struct g_raid_disk {
155	struct g_raid_softc	*d_softc;	/* Back-pointer to softc. */
156	struct g_consumer	*d_consumer;	/* GEOM disk consumer. */
157	void			*d_md_data;	/* Disk's metadata storage. */
158	struct g_kerneldump	 d_kd;		/* Kernel dumping method/args. */
159	int			 d_candelete;	/* BIO_DELETE supported. */
160	uint64_t		 d_flags;	/* Additional flags. */
161	u_int			 d_state;	/* Disk state. */
162	u_int			 d_load;	/* Disk average load. */
163	off_t			 d_last_offset;	/* Last head offset. */
164	int			 d_read_errs;	/* Count of the read errors */
165	TAILQ_HEAD(, g_raid_subdisk)	 d_subdisks; /* List of subdisks. */
166	TAILQ_ENTRY(g_raid_disk)	 d_next;	/* Next disk in the node. */
167};
168
169#define G_RAID_SUBDISK_S_NONE		0x00	/* Absent. */
170#define G_RAID_SUBDISK_S_FAILED		0x01	/* Failed. */
171#define G_RAID_SUBDISK_S_NEW		0x02	/* Blank. */
172#define G_RAID_SUBDISK_S_REBUILD	0x03	/* Blank + rebuild. */
173#define G_RAID_SUBDISK_S_UNINITIALIZED	0x04	/* Disk of the new volume. */
174#define G_RAID_SUBDISK_S_STALE		0x05	/* Dirty. */
175#define G_RAID_SUBDISK_S_RESYNC		0x06	/* Dirty + check/repair. */
176#define G_RAID_SUBDISK_S_ACTIVE		0x07	/* Usable. */
177
178#define G_RAID_SUBDISK_E_NEW		0x01	/* A new subdisk has arrived */
179#define G_RAID_SUBDISK_E_FAILED		0x02	/* A subdisk failed, but remains in volume */
180#define G_RAID_SUBDISK_E_DISCONNECTED	0x03	/* A subdisk removed from volume. */
181#define G_RAID_SUBDISK_E_FIRST_TR_PRIVATE 0x80	/* translation private events */
182
183#define G_RAID_SUBDISK_POS(sd)						\
184    ((sd)->sd_disk ? ((sd)->sd_disk->d_last_offset - (sd)->sd_offset) : 0)
185#define G_RAID_SUBDISK_TRACK_SIZE	(1 * 1024 * 1024)
186#define G_RAID_SUBDISK_LOAD(sd)						\
187    ((sd)->sd_disk ? ((sd)->sd_disk->d_load) : 0)
188#define G_RAID_SUBDISK_LOAD_SCALE	256
189
190struct g_raid_subdisk {
191	struct g_raid_softc	*sd_softc;	/* Back-pointer to softc. */
192	struct g_raid_disk	*sd_disk;	/* Where this subdisk lives. */
193	struct g_raid_volume	*sd_volume;	/* Volume, sd is a part of. */
194	off_t			 sd_offset;	/* Offset on the disk. */
195	off_t			 sd_size;	/* Size on the disk. */
196	u_int			 sd_pos;	/* Position in volume. */
197	u_int			 sd_state;	/* Subdisk state. */
198	off_t			 sd_rebuild_pos; /* Rebuild position. */
199	int			 sd_recovery;	/* Count of recovery reqs. */
200	TAILQ_ENTRY(g_raid_subdisk)	 sd_next; /* Next subdisk on disk. */
201};
202
203#define G_RAID_MAX_SUBDISKS	16
204#define G_RAID_MAX_VOLUMENAME	32
205
206#define G_RAID_VOLUME_S_STARTING	0x00
207#define G_RAID_VOLUME_S_BROKEN		0x01
208#define G_RAID_VOLUME_S_DEGRADED	0x02
209#define G_RAID_VOLUME_S_SUBOPTIMAL	0x03
210#define G_RAID_VOLUME_S_OPTIMAL		0x04
211#define G_RAID_VOLUME_S_UNSUPPORTED	0x05
212#define G_RAID_VOLUME_S_STOPPED		0x06
213
214#define G_RAID_VOLUME_S_ALIVE(s)			\
215    ((s) == G_RAID_VOLUME_S_DEGRADED ||			\
216     (s) == G_RAID_VOLUME_S_SUBOPTIMAL ||		\
217     (s) == G_RAID_VOLUME_S_OPTIMAL)
218
219#define G_RAID_VOLUME_E_DOWN		0x00
220#define G_RAID_VOLUME_E_UP		0x01
221#define G_RAID_VOLUME_E_START		0x10
222#define G_RAID_VOLUME_E_STARTMD		0x11
223
224#define G_RAID_VOLUME_RL_RAID0		0x00
225#define G_RAID_VOLUME_RL_RAID1		0x01
226#define G_RAID_VOLUME_RL_RAID3		0x03
227#define G_RAID_VOLUME_RL_RAID4		0x04
228#define G_RAID_VOLUME_RL_RAID5		0x05
229#define G_RAID_VOLUME_RL_RAID6		0x06
230#define G_RAID_VOLUME_RL_RAIDMDF	0x07
231#define G_RAID_VOLUME_RL_RAID1E		0x11
232#define G_RAID_VOLUME_RL_SINGLE		0x0f
233#define G_RAID_VOLUME_RL_CONCAT		0x1f
234#define G_RAID_VOLUME_RL_RAID5E		0x15
235#define G_RAID_VOLUME_RL_RAID5EE	0x25
236#define G_RAID_VOLUME_RL_RAID5R		0x35
237#define G_RAID_VOLUME_RL_UNKNOWN	0xff
238
239#define G_RAID_VOLUME_RLQ_NONE		0x00
240#define G_RAID_VOLUME_RLQ_R1SM		0x00
241#define G_RAID_VOLUME_RLQ_R1MM		0x01
242#define G_RAID_VOLUME_RLQ_R3P0		0x00
243#define G_RAID_VOLUME_RLQ_R3PN		0x01
244#define G_RAID_VOLUME_RLQ_R4P0		0x00
245#define G_RAID_VOLUME_RLQ_R4PN		0x01
246#define G_RAID_VOLUME_RLQ_R5RA		0x00
247#define G_RAID_VOLUME_RLQ_R5RS		0x01
248#define G_RAID_VOLUME_RLQ_R5LA		0x02
249#define G_RAID_VOLUME_RLQ_R5LS		0x03
250#define G_RAID_VOLUME_RLQ_R6RA		0x00
251#define G_RAID_VOLUME_RLQ_R6RS		0x01
252#define G_RAID_VOLUME_RLQ_R6LA		0x02
253#define G_RAID_VOLUME_RLQ_R6LS		0x03
254#define G_RAID_VOLUME_RLQ_RMDFRA	0x00
255#define G_RAID_VOLUME_RLQ_RMDFRS	0x01
256#define G_RAID_VOLUME_RLQ_RMDFLA	0x02
257#define G_RAID_VOLUME_RLQ_RMDFLS	0x03
258#define G_RAID_VOLUME_RLQ_R1EA		0x00
259#define G_RAID_VOLUME_RLQ_R1EO		0x01
260#define G_RAID_VOLUME_RLQ_R5ERA		0x00
261#define G_RAID_VOLUME_RLQ_R5ERS		0x01
262#define G_RAID_VOLUME_RLQ_R5ELA		0x02
263#define G_RAID_VOLUME_RLQ_R5ELS		0x03
264#define G_RAID_VOLUME_RLQ_R5EERA	0x00
265#define G_RAID_VOLUME_RLQ_R5EERS	0x01
266#define G_RAID_VOLUME_RLQ_R5EELA	0x02
267#define G_RAID_VOLUME_RLQ_R5EELS	0x03
268#define G_RAID_VOLUME_RLQ_R5RRA		0x00
269#define G_RAID_VOLUME_RLQ_R5RRS		0x01
270#define G_RAID_VOLUME_RLQ_R5RLA		0x02
271#define G_RAID_VOLUME_RLQ_R5RLS		0x03
272#define G_RAID_VOLUME_RLQ_UNKNOWN	0xff
273
274struct g_raid_volume;
275
276struct g_raid_volume {
277	struct g_raid_softc	*v_softc;	/* Back-pointer to softc. */
278	struct g_provider	*v_provider;	/* GEOM provider. */
279	struct g_raid_subdisk	 v_subdisks[G_RAID_MAX_SUBDISKS];
280						/* Subdisks of this volume. */
281	void			*v_md_data;	/* Volume's metadata storage. */
282	struct g_raid_tr_object	*v_tr;		/* Transformation object. */
283	char			 v_name[G_RAID_MAX_VOLUMENAME];
284						/* Volume name. */
285	u_int			 v_state;	/* Volume state. */
286	u_int			 v_raid_level;	/* Array RAID level. */
287	u_int			 v_raid_level_qualifier; /* RAID level det. */
288	u_int			 v_disks_count;	/* Number of disks in array. */
289	u_int			 v_mdf_pdisks;	/* Number of parity disks
290						   in RAIDMDF array. */
291	uint16_t		 v_mdf_polynomial; /* Polynomial for RAIDMDF. */
292	uint8_t			 v_mdf_method;	/* Generation method for RAIDMDF. */
293	u_int			 v_strip_size;	/* Array strip size. */
294	u_int			 v_rotate_parity; /* Rotate RAID5R parity
295						   after numer of stripes. */
296	u_int			 v_sectorsize;	/* Volume sector size. */
297	off_t			 v_mediasize;	/* Volume media size.  */
298	struct bio_queue_head	 v_inflight;	/* In-flight write requests. */
299	struct bio_queue_head	 v_locked;	/* Blocked I/O requests. */
300	LIST_HEAD(, g_raid_lock) v_locks;	 /* List of locked regions. */
301	int			 v_pending_lock; /* writes to locked region */
302	int			 v_dirty;	/* Volume is DIRTY. */
303	struct timeval		 v_last_done;	/* Time of the last I/O. */
304	time_t			 v_last_write;	/* Time of the last write. */
305	u_int			 v_writes;	/* Number of active writes. */
306	struct root_hold_token	*v_rootmount;	/* Root mount delay token. */
307	int			 v_starting;	/* Volume is starting */
308	int			 v_stopping;	/* Volume is stopping */
309	int			 v_provider_open; /* Number of opens. */
310	int			 v_global_id;	/* Global volume ID (rX). */
311	int			 v_read_only;	/* Volume is read-only. */
312	TAILQ_ENTRY(g_raid_volume)	 v_next; /* List of volumes entry. */
313	LIST_ENTRY(g_raid_volume)	 v_global_next; /* Global list entry. */
314};
315
316#define G_RAID_NODE_E_WAKE	0x00
317#define G_RAID_NODE_E_START	0x01
318
319struct g_raid_softc {
320	struct g_raid_md_object	*sc_md;		/* Metadata object. */
321	struct g_geom		*sc_geom;	/* GEOM class instance. */
322	uint64_t		 sc_flags;	/* Additional flags. */
323	TAILQ_HEAD(, g_raid_volume)	 sc_volumes;	/* List of volumes. */
324	TAILQ_HEAD(, g_raid_disk)	 sc_disks;	/* List of disks. */
325	struct sx		 sc_lock;	/* Main node lock. */
326	struct proc		*sc_worker;	/* Worker process. */
327	struct mtx		 sc_queue_mtx;	/* Worker queues lock. */
328	TAILQ_HEAD(, g_raid_event) sc_events;	/* Worker events queue. */
329	struct bio_queue_head	 sc_queue;	/* Worker I/O queue. */
330	int			 sc_stopping;	/* Node is stopping */
331};
332#define	sc_name	sc_geom->name
333
334SYSCTL_DECL(_kern_geom_raid);
335
336/*
337 * KOBJ parent class of metadata processing modules.
338 */
339struct g_raid_md_class {
340	KOBJ_CLASS_FIELDS;
341	int		 mdc_enable;
342	int		 mdc_priority;
343	LIST_ENTRY(g_raid_md_class) mdc_list;
344};
345
346/*
347 * KOBJ instance of metadata processing module.
348 */
349struct g_raid_md_object {
350	KOBJ_FIELDS;
351	struct g_raid_md_class	*mdo_class;
352	struct g_raid_softc	*mdo_softc;	/* Back-pointer to softc. */
353};
354
355int g_raid_md_modevent(module_t, int, void *);
356
357#define	G_RAID_MD_DECLARE(name, label)				\
358    static moduledata_t g_raid_md_##name##_mod = {		\
359	"g_raid_md_" __XSTRING(name),				\
360	g_raid_md_modevent,					\
361	&g_raid_md_##name##_class				\
362    };								\
363    DECLARE_MODULE(g_raid_md_##name, g_raid_md_##name##_mod,	\
364	SI_SUB_DRIVERS, SI_ORDER_SECOND);			\
365    MODULE_DEPEND(g_raid_md_##name, geom_raid, 0, 0, 0);	\
366    SYSCTL_NODE(_kern_geom_raid, OID_AUTO, name, CTLFLAG_RD,	\
367	NULL, label " metadata module");			\
368    SYSCTL_INT(_kern_geom_raid_##name, OID_AUTO, enable,	\
369	CTLFLAG_RWTUN, &g_raid_md_##name##_class.mdc_enable, 0,	\
370	"Enable " label " metadata format taste")
371
372/*
373 * KOBJ parent class of data transformation modules.
374 */
375struct g_raid_tr_class {
376	KOBJ_CLASS_FIELDS;
377	int		 trc_enable;
378	int		 trc_priority;
379	int		 trc_accept_unmapped;
380	LIST_ENTRY(g_raid_tr_class) trc_list;
381};
382
383/*
384 * KOBJ instance of data transformation module.
385 */
386struct g_raid_tr_object {
387	KOBJ_FIELDS;
388	struct g_raid_tr_class	*tro_class;
389	struct g_raid_volume 	*tro_volume;	/* Back-pointer to volume. */
390};
391
392int g_raid_tr_modevent(module_t, int, void *);
393
394#define	G_RAID_TR_DECLARE(name, label)				\
395    static moduledata_t g_raid_tr_##name##_mod = {		\
396	"g_raid_tr_" __XSTRING(name),				\
397	g_raid_tr_modevent,					\
398	&g_raid_tr_##name##_class				\
399    };								\
400    DECLARE_MODULE(g_raid_tr_##name, g_raid_tr_##name##_mod,	\
401	SI_SUB_DRIVERS, SI_ORDER_FIRST);			\
402    MODULE_DEPEND(g_raid_tr_##name, geom_raid, 0, 0, 0);	\
403    SYSCTL_NODE(_kern_geom_raid, OID_AUTO, name, CTLFLAG_RD,	\
404	NULL, label " transformation module");			\
405    SYSCTL_INT(_kern_geom_raid_##name, OID_AUTO, enable,	\
406	CTLFLAG_RWTUN, &g_raid_tr_##name##_class.trc_enable, 0,	\
407	"Enable " label " transformation module taste")
408
409const char * g_raid_volume_level2str(int level, int qual);
410int g_raid_volume_str2level(const char *str, int *level, int *qual);
411const char * g_raid_volume_state2str(int state);
412const char * g_raid_subdisk_state2str(int state);
413const char * g_raid_disk_state2str(int state);
414
415struct g_raid_softc * g_raid_create_node(struct g_class *mp,
416    const char *name, struct g_raid_md_object *md);
417int g_raid_create_node_format(const char *format, struct gctl_req *req,
418    struct g_geom **gp);
419struct g_raid_volume * g_raid_create_volume(struct g_raid_softc *sc,
420    const char *name, int id);
421struct g_raid_disk * g_raid_create_disk(struct g_raid_softc *sc);
422const char * g_raid_get_diskname(struct g_raid_disk *disk);
423void g_raid_get_disk_info(struct g_raid_disk *disk);
424
425int g_raid_start_volume(struct g_raid_volume *vol);
426
427int g_raid_destroy_node(struct g_raid_softc *sc, int worker);
428int g_raid_destroy_volume(struct g_raid_volume *vol);
429int g_raid_destroy_disk(struct g_raid_disk *disk);
430
431void g_raid_iodone(struct bio *bp, int error);
432void g_raid_subdisk_iostart(struct g_raid_subdisk *sd, struct bio *bp);
433int g_raid_subdisk_kerneldump(struct g_raid_subdisk *sd,
434    void *virtual, vm_offset_t physical, off_t offset, size_t length);
435
436struct g_consumer *g_raid_open_consumer(struct g_raid_softc *sc,
437    const char *name);
438void g_raid_kill_consumer(struct g_raid_softc *sc, struct g_consumer *cp);
439
440void g_raid_report_disk_state(struct g_raid_disk *disk);
441void g_raid_change_disk_state(struct g_raid_disk *disk, int state);
442void g_raid_change_subdisk_state(struct g_raid_subdisk *sd, int state);
443void g_raid_change_volume_state(struct g_raid_volume *vol, int state);
444
445void g_raid_write_metadata(struct g_raid_softc *sc, struct g_raid_volume *vol,
446    struct g_raid_subdisk *sd, struct g_raid_disk *disk);
447void g_raid_fail_disk(struct g_raid_softc *sc,
448    struct g_raid_subdisk *sd, struct g_raid_disk *disk);
449
450void g_raid_tr_flush_common(struct g_raid_tr_object *tr, struct bio *bp);
451int g_raid_tr_kerneldump_common(struct g_raid_tr_object *tr,
452    void *virtual, vm_offset_t physical, off_t offset, size_t length);
453
454u_int g_raid_ndisks(struct g_raid_softc *sc, int state);
455u_int g_raid_nsubdisks(struct g_raid_volume *vol, int state);
456u_int g_raid_nopens(struct g_raid_softc *sc);
457struct g_raid_subdisk * g_raid_get_subdisk(struct g_raid_volume *vol,
458    int state);
459#define	G_RAID_DESTROY_SOFT		0
460#define	G_RAID_DESTROY_DELAYED	1
461#define	G_RAID_DESTROY_HARD		2
462int g_raid_destroy(struct g_raid_softc *sc, int how);
463int g_raid_event_send(void *arg, int event, int flags);
464int g_raid_lock_range(struct g_raid_volume *vol, off_t off, off_t len,
465    struct bio *ignore, void *argp);
466int g_raid_unlock_range(struct g_raid_volume *vol, off_t off, off_t len);
467
468g_ctl_req_t g_raid_ctl;
469#endif	/* _KERNEL */
470
471#endif	/* !_G_RAID_H_ */
472