Deleted Added
full compact
vdev_geom.c (168404) vdev_geom.c (169087)
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21/*
22 * Copyright (c) 2006 Pawel Jakub Dawidek <pjd@FreeBSD.org>
23 * All rights reserved.
24 */
25
26#include <sys/zfs_context.h>
27#include <sys/param.h>
28#include <sys/kernel.h>
29#include <sys/bio.h>
30#include <sys/spa.h>
31#include <sys/vdev_impl.h>
32#include <sys/fs/zfs.h>
33#include <sys/zio.h>
34#include <geom/geom.h>
35
36/*
37 * Virtual device vector for GEOM.
38 */
39
40struct g_class zfs_vdev_class = {
41 .name = "ZFS::VDEV",
42 .version = G_VERSION,
43};
44
45DECLARE_GEOM_CLASS(zfs_vdev_class, zfs_vdev);
46
47typedef struct vdev_geom_ctx {
48 struct g_consumer *gc_consumer;
49 int gc_state;
50 struct bio_queue_head gc_queue;
51 struct mtx gc_queue_mtx;
52} vdev_geom_ctx_t;
53
54static void
55vdev_geom_release(vdev_t *vd)
56{
57 vdev_geom_ctx_t *ctx;
58
59 ctx = vd->vdev_tsd;
60 vd->vdev_tsd = NULL;
61
62 mtx_lock(&ctx->gc_queue_mtx);
63 ctx->gc_state = 1;
64 wakeup_one(&ctx->gc_queue);
65 while (ctx->gc_state != 2)
66 msleep(&ctx->gc_state, &ctx->gc_queue_mtx, 0, "vgeom:w", 0);
67 mtx_unlock(&ctx->gc_queue_mtx);
68 mtx_destroy(&ctx->gc_queue_mtx);
69 kmem_free(ctx, sizeof(*ctx));
70}
71
72static void
73vdev_geom_orphan(struct g_consumer *cp)
74{
75 struct g_geom *gp;
76 vdev_t *vd;
77 int error;
78
79 g_topology_assert();
80
81 vd = cp->private;
82 gp = cp->geom;
83 error = cp->provider->error;
84
85 ZFS_LOG(1, "Closing access to %s.", cp->provider->name);
86 g_access(cp, -cp->acr, -cp->acw, -cp->ace);
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21/*
22 * Copyright (c) 2006 Pawel Jakub Dawidek <pjd@FreeBSD.org>
23 * All rights reserved.
24 */
25
26#include <sys/zfs_context.h>
27#include <sys/param.h>
28#include <sys/kernel.h>
29#include <sys/bio.h>
30#include <sys/spa.h>
31#include <sys/vdev_impl.h>
32#include <sys/fs/zfs.h>
33#include <sys/zio.h>
34#include <geom/geom.h>
35
36/*
37 * Virtual device vector for GEOM.
38 */
39
40struct g_class zfs_vdev_class = {
41 .name = "ZFS::VDEV",
42 .version = G_VERSION,
43};
44
45DECLARE_GEOM_CLASS(zfs_vdev_class, zfs_vdev);
46
47typedef struct vdev_geom_ctx {
48 struct g_consumer *gc_consumer;
49 int gc_state;
50 struct bio_queue_head gc_queue;
51 struct mtx gc_queue_mtx;
52} vdev_geom_ctx_t;
53
54static void
55vdev_geom_release(vdev_t *vd)
56{
57 vdev_geom_ctx_t *ctx;
58
59 ctx = vd->vdev_tsd;
60 vd->vdev_tsd = NULL;
61
62 mtx_lock(&ctx->gc_queue_mtx);
63 ctx->gc_state = 1;
64 wakeup_one(&ctx->gc_queue);
65 while (ctx->gc_state != 2)
66 msleep(&ctx->gc_state, &ctx->gc_queue_mtx, 0, "vgeom:w", 0);
67 mtx_unlock(&ctx->gc_queue_mtx);
68 mtx_destroy(&ctx->gc_queue_mtx);
69 kmem_free(ctx, sizeof(*ctx));
70}
71
72static void
73vdev_geom_orphan(struct g_consumer *cp)
74{
75 struct g_geom *gp;
76 vdev_t *vd;
77 int error;
78
79 g_topology_assert();
80
81 vd = cp->private;
82 gp = cp->geom;
83 error = cp->provider->error;
84
85 ZFS_LOG(1, "Closing access to %s.", cp->provider->name);
86 g_access(cp, -cp->acr, -cp->acw, -cp->ace);
87 g_detach(cp);
88 ZFS_LOG(1, "Destroyed consumer to %s.", cp->provider->name);
87 ZFS_LOG(1, "Destroyed consumer to %s.", cp->provider->name);
88 g_detach(cp);
89 g_destroy_consumer(cp);
90 /* Destroy geom if there are no consumers left. */
91 if (LIST_EMPTY(&gp->consumer)) {
92 ZFS_LOG(1, "Destroyed geom %s.", gp->name);
89 g_destroy_consumer(cp);
90 /* Destroy geom if there are no consumers left. */
91 if (LIST_EMPTY(&gp->consumer)) {
92 ZFS_LOG(1, "Destroyed geom %s.", gp->name);
93 g_wither_geom(cp->geom, error);
93 g_wither_geom(gp, error);
94 }
95 vdev_geom_release(vd);
96 /* Both methods below work, but in a bit different way. */
97#if 0
98 vd->vdev_reopen_wanted = 1;
99#else
100 vd->vdev_stat.vs_aux = VDEV_AUX_OPEN_FAILED;
101 vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN, vd->vdev_stat.vs_aux);
102#endif
103}
104
105static struct g_consumer *
106vdev_geom_attach(struct g_provider *pp, int write)
107{
108 struct g_geom *gp;
109 struct g_consumer *cp;
110
111 g_topology_assert();
112
113 ZFS_LOG(1, "Attaching to %s.", pp->name);
114 /* Do we have geom already? No? Create one. */
115 LIST_FOREACH(gp, &zfs_vdev_class.geom, geom) {
116 if (!(gp->flags & G_GEOM_WITHER))
117 break;
118 }
119 if (gp == NULL) {
120 gp = g_new_geomf(&zfs_vdev_class, "zfs::vdev");
121 gp->orphan = vdev_geom_orphan;
122 cp = g_new_consumer(gp);
123 if (g_attach(cp, pp) != 0) {
124 g_wither_geom(gp, ENXIO);
125 return (NULL);
126 }
127 if (g_access(cp, 1, write, 1) != 0) {
128 g_wither_geom(gp, ENXIO);
129 return (NULL);
130 }
131 ZFS_LOG(1, "Created geom and consumer for %s.", pp->name);
132 } else {
133 /* Check if we are already connected to this provider. */
134 LIST_FOREACH(cp, &gp->consumer, consumer) {
135 if (cp->provider == pp) {
136 ZFS_LOG(1, "Found consumer for %s.", pp->name);
137 break;
138 }
139 }
140 if (cp == NULL) {
141 cp = g_new_consumer(gp);
142 if (g_attach(cp, pp) != 0) {
143 g_destroy_consumer(cp);
144 return (NULL);
145 }
146 if (g_access(cp, 1, write, 1) != 0) {
147 g_detach(cp);
148 g_destroy_consumer(cp);
149 return (NULL);
150 }
151 ZFS_LOG(1, "Created consumer for %s.", pp->name);
152 } else {
153 if (g_access(cp, 1, cp->acw > 0 ? 0 : write, 1) != 0)
154 return (NULL);
155 ZFS_LOG(1, "Used existing consumer for %s.", pp->name);
156 }
157 }
158 return (cp);
159}
160
161static void
162vdev_geom_detach(void *arg, int flag __unused)
163{
164 struct g_geom *gp;
165 struct g_consumer *cp;
166
167 g_topology_assert();
168 cp = arg;
169 gp = cp->geom;
170
171 ZFS_LOG(1, "Closing access to %s.", cp->provider->name);
172 g_access(cp, -1, 0, -1);
173 /* Destroy consumer on last close. */
174 if (cp->acr == 0 && cp->ace == 0) {
175 ZFS_LOG(1, "Destroyed consumer to %s.", cp->provider->name);
176 if (cp->acw > 0)
177 g_access(cp, 0, -cp->acw, 0);
178 g_detach(cp);
179 g_destroy_consumer(cp);
180 }
181 /* Destroy geom if there are no consumers left. */
182 if (LIST_EMPTY(&gp->consumer)) {
183 ZFS_LOG(1, "Destroyed geom %s.", gp->name);
184 g_wither_geom(gp, ENXIO);
185 }
186}
187
188static void
189vdev_geom_worker(void *arg)
190{
191 vdev_geom_ctx_t *ctx;
192 zio_t *zio;
193 struct bio *bp;
194
195 ctx = arg;
196 for (;;) {
197 mtx_lock(&ctx->gc_queue_mtx);
198 bp = bioq_takefirst(&ctx->gc_queue);
199 if (bp == NULL) {
200 if (ctx->gc_state == 1) {
201 ctx->gc_state = 2;
202 wakeup_one(&ctx->gc_state);
203 mtx_unlock(&ctx->gc_queue_mtx);
204 kthread_exit(0);
205 }
206 msleep(&ctx->gc_queue, &ctx->gc_queue_mtx,
207 PRIBIO | PDROP, "vgeom:io", 0);
208 continue;
209 }
210 mtx_unlock(&ctx->gc_queue_mtx);
211 zio = bp->bio_caller1;
212 zio->io_error = bp->bio_error;
213 if (bp->bio_cmd == BIO_FLUSH && bp->bio_error == ENOTSUP) {
214 vdev_t *vd;
215
216 /*
217 * If we get ENOTSUP, we know that no future
218 * attempts will ever succeed. In this case we
219 * set a persistent bit so that we don't bother
220 * with the ioctl in the future.
221 */
222 vd = zio->io_vd;
223 vd->vdev_nowritecache = B_TRUE;
224 }
225 g_destroy_bio(bp);
226 zio_next_stage_async(zio);
227 }
228}
229
230static int
231vdev_geom_open(vdev_t *vd, uint64_t *psize, uint64_t *ashift)
232{
233 vdev_geom_ctx_t *ctx;
234 struct g_provider *pp;
235 struct g_consumer *cp;
236 int owned;
237
238 /*
239 * We must have a pathname, and it must be absolute.
240 */
241 if (vd->vdev_path == NULL || vd->vdev_path[0] != '/') {
242 vd->vdev_stat.vs_aux = VDEV_AUX_BAD_LABEL;
243 return (EINVAL);
244 }
245
246 if ((owned = mtx_owned(&Giant)))
247 mtx_unlock(&Giant);
248 g_topology_lock();
249 pp = g_provider_by_name(vd->vdev_path + sizeof("/dev/") - 1);
250 if (pp == NULL) {
251 g_topology_unlock();
252 if (owned)
253 mtx_lock(&Giant);
254 vd->vdev_stat.vs_aux = VDEV_AUX_BAD_LABEL;
255 return (EINVAL);
256 }
257 cp = vdev_geom_attach(pp, !!(spa_mode & FWRITE));
258 g_topology_unlock();
259 if (owned)
260 mtx_lock(&Giant);
261 if (cp == NULL) {
262 vd->vdev_stat.vs_aux = VDEV_AUX_OPEN_FAILED;
263 return (EACCES);
264 }
265
266 /*
267 * Determine the actual size of the device.
268 */
269 *psize = pp->mediasize;
270
271 /*
272 * Determine the device's minimum transfer size.
273 */
274 *ashift = highbit(MAX(pp->sectorsize, SPA_MINBLOCKSIZE)) - 1;
275
276 /*
277 * Clear the nowritecache bit, so that on a vdev_reopen() we will
278 * try again.
279 */
280 vd->vdev_nowritecache = B_FALSE;
281
282 cp->private = vd;
283
284 ctx = kmem_zalloc(sizeof(*ctx), KM_SLEEP);
285 bioq_init(&ctx->gc_queue);
286 mtx_init(&ctx->gc_queue_mtx, "zfs:vdev:geom:queue", NULL, MTX_DEF);
287 ctx->gc_consumer = cp;
288 ctx->gc_state = 0;
289
290 vd->vdev_tsd = ctx;
291
292 kthread_create(vdev_geom_worker, ctx, NULL, 0, 0, "vdev:worker %s",
293 pp->name);
294
295 return (0);
296}
297
298static void
299vdev_geom_close(vdev_t *vd)
300{
301 vdev_geom_ctx_t *ctx;
302 struct g_consumer *cp;
303
304 if ((ctx = vd->vdev_tsd) == NULL)
305 return;
306 if ((cp = ctx->gc_consumer) == NULL)
307 return;
308 vdev_geom_release(vd);
309 g_post_event(vdev_geom_detach, cp, M_WAITOK, NULL);
310}
311
312static void
313vdev_geom_io_intr(struct bio *bp)
314{
315 vdev_geom_ctx_t *ctx;
316 zio_t *zio;
317
318 zio = bp->bio_caller1;
319 ctx = zio->io_vd->vdev_tsd;
320
321 mtx_lock(&ctx->gc_queue_mtx);
322 bioq_insert_tail(&ctx->gc_queue, bp);
323 wakeup_one(&ctx->gc_queue);
324 mtx_unlock(&ctx->gc_queue_mtx);
325}
326
327static void
328vdev_geom_io_start(zio_t *zio)
329{
330 vdev_t *vd;
331 vdev_geom_ctx_t *ctx;
332 struct g_consumer *cp;
333 struct bio *bp;
334 int error;
335
336 cp = NULL;
337
338 vd = zio->io_vd;
339 ctx = vd->vdev_tsd;
340 if (ctx != NULL)
341 cp = ctx->gc_consumer;
342
343 if (zio->io_type == ZIO_TYPE_IOCTL) {
344 zio_vdev_io_bypass(zio);
345
346 /* XXPOLICY */
347 if (vdev_is_dead(vd)) {
348 zio->io_error = ENXIO;
349 zio_next_stage_async(zio);
350 return;
351 }
352
353 switch (zio->io_cmd) {
354
355 case DKIOCFLUSHWRITECACHE:
356 if (vd->vdev_nowritecache) {
357 zio->io_error = ENOTSUP;
358 break;
359 }
360
361 goto sendreq;
362 default:
363 zio->io_error = ENOTSUP;
364 }
365
366 zio_next_stage_async(zio);
367 return;
368 }
369
370 if (zio->io_type == ZIO_TYPE_READ && vdev_cache_read(zio) == 0)
371 return;
372
373 if ((zio = vdev_queue_io(zio)) == NULL)
374 return;
375
376sendreq:
377
378 error = vdev_is_dead(vd) ? ENXIO : vdev_error_inject(vd, zio);
379 if (error == 0 && cp == NULL)
380 error = ENXIO;
381 if (error) {
382 zio->io_error = error;
383 zio_next_stage_async(zio);
384 return;
385 }
386
387 bp = g_alloc_bio();
388 bp->bio_caller1 = zio;
389 switch (zio->io_type) {
390 case ZIO_TYPE_READ:
391 case ZIO_TYPE_WRITE:
392 bp->bio_cmd = zio->io_type == ZIO_TYPE_READ ? BIO_READ : BIO_WRITE;
393 bp->bio_data = zio->io_data;
394 bp->bio_offset = zio->io_offset;
395 bp->bio_length = zio->io_size;
396 break;
397 case ZIO_TYPE_IOCTL:
398 bp->bio_cmd = BIO_FLUSH;
399 bp->bio_data = NULL;
400 bp->bio_offset = cp->provider->mediasize;
401 bp->bio_length = 0;
402 break;
403 }
404 bp->bio_done = vdev_geom_io_intr;
405
406 g_io_request(bp, cp);
407}
408
409static void
410vdev_geom_io_done(zio_t *zio)
411{
412 vdev_queue_io_done(zio);
413
414 if (zio->io_type == ZIO_TYPE_WRITE)
415 vdev_cache_write(zio);
416
417 if (zio_injection_enabled && zio->io_error == 0)
418 zio->io_error = zio_handle_device_injection(zio->io_vd, EIO);
419
420 zio_next_stage(zio);
421}
422
423vdev_ops_t vdev_geom_ops = {
424 vdev_geom_open,
425 vdev_geom_close,
426 vdev_default_asize,
427 vdev_geom_io_start,
428 vdev_geom_io_done,
429 NULL,
430 VDEV_TYPE_DISK, /* name of this vdev type */
431 B_TRUE /* leaf vdev */
432};
94 }
95 vdev_geom_release(vd);
96 /* Both methods below work, but in a bit different way. */
97#if 0
98 vd->vdev_reopen_wanted = 1;
99#else
100 vd->vdev_stat.vs_aux = VDEV_AUX_OPEN_FAILED;
101 vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN, vd->vdev_stat.vs_aux);
102#endif
103}
104
105static struct g_consumer *
106vdev_geom_attach(struct g_provider *pp, int write)
107{
108 struct g_geom *gp;
109 struct g_consumer *cp;
110
111 g_topology_assert();
112
113 ZFS_LOG(1, "Attaching to %s.", pp->name);
114 /* Do we have geom already? No? Create one. */
115 LIST_FOREACH(gp, &zfs_vdev_class.geom, geom) {
116 if (!(gp->flags & G_GEOM_WITHER))
117 break;
118 }
119 if (gp == NULL) {
120 gp = g_new_geomf(&zfs_vdev_class, "zfs::vdev");
121 gp->orphan = vdev_geom_orphan;
122 cp = g_new_consumer(gp);
123 if (g_attach(cp, pp) != 0) {
124 g_wither_geom(gp, ENXIO);
125 return (NULL);
126 }
127 if (g_access(cp, 1, write, 1) != 0) {
128 g_wither_geom(gp, ENXIO);
129 return (NULL);
130 }
131 ZFS_LOG(1, "Created geom and consumer for %s.", pp->name);
132 } else {
133 /* Check if we are already connected to this provider. */
134 LIST_FOREACH(cp, &gp->consumer, consumer) {
135 if (cp->provider == pp) {
136 ZFS_LOG(1, "Found consumer for %s.", pp->name);
137 break;
138 }
139 }
140 if (cp == NULL) {
141 cp = g_new_consumer(gp);
142 if (g_attach(cp, pp) != 0) {
143 g_destroy_consumer(cp);
144 return (NULL);
145 }
146 if (g_access(cp, 1, write, 1) != 0) {
147 g_detach(cp);
148 g_destroy_consumer(cp);
149 return (NULL);
150 }
151 ZFS_LOG(1, "Created consumer for %s.", pp->name);
152 } else {
153 if (g_access(cp, 1, cp->acw > 0 ? 0 : write, 1) != 0)
154 return (NULL);
155 ZFS_LOG(1, "Used existing consumer for %s.", pp->name);
156 }
157 }
158 return (cp);
159}
160
161static void
162vdev_geom_detach(void *arg, int flag __unused)
163{
164 struct g_geom *gp;
165 struct g_consumer *cp;
166
167 g_topology_assert();
168 cp = arg;
169 gp = cp->geom;
170
171 ZFS_LOG(1, "Closing access to %s.", cp->provider->name);
172 g_access(cp, -1, 0, -1);
173 /* Destroy consumer on last close. */
174 if (cp->acr == 0 && cp->ace == 0) {
175 ZFS_LOG(1, "Destroyed consumer to %s.", cp->provider->name);
176 if (cp->acw > 0)
177 g_access(cp, 0, -cp->acw, 0);
178 g_detach(cp);
179 g_destroy_consumer(cp);
180 }
181 /* Destroy geom if there are no consumers left. */
182 if (LIST_EMPTY(&gp->consumer)) {
183 ZFS_LOG(1, "Destroyed geom %s.", gp->name);
184 g_wither_geom(gp, ENXIO);
185 }
186}
187
188static void
189vdev_geom_worker(void *arg)
190{
191 vdev_geom_ctx_t *ctx;
192 zio_t *zio;
193 struct bio *bp;
194
195 ctx = arg;
196 for (;;) {
197 mtx_lock(&ctx->gc_queue_mtx);
198 bp = bioq_takefirst(&ctx->gc_queue);
199 if (bp == NULL) {
200 if (ctx->gc_state == 1) {
201 ctx->gc_state = 2;
202 wakeup_one(&ctx->gc_state);
203 mtx_unlock(&ctx->gc_queue_mtx);
204 kthread_exit(0);
205 }
206 msleep(&ctx->gc_queue, &ctx->gc_queue_mtx,
207 PRIBIO | PDROP, "vgeom:io", 0);
208 continue;
209 }
210 mtx_unlock(&ctx->gc_queue_mtx);
211 zio = bp->bio_caller1;
212 zio->io_error = bp->bio_error;
213 if (bp->bio_cmd == BIO_FLUSH && bp->bio_error == ENOTSUP) {
214 vdev_t *vd;
215
216 /*
217 * If we get ENOTSUP, we know that no future
218 * attempts will ever succeed. In this case we
219 * set a persistent bit so that we don't bother
220 * with the ioctl in the future.
221 */
222 vd = zio->io_vd;
223 vd->vdev_nowritecache = B_TRUE;
224 }
225 g_destroy_bio(bp);
226 zio_next_stage_async(zio);
227 }
228}
229
230static int
231vdev_geom_open(vdev_t *vd, uint64_t *psize, uint64_t *ashift)
232{
233 vdev_geom_ctx_t *ctx;
234 struct g_provider *pp;
235 struct g_consumer *cp;
236 int owned;
237
238 /*
239 * We must have a pathname, and it must be absolute.
240 */
241 if (vd->vdev_path == NULL || vd->vdev_path[0] != '/') {
242 vd->vdev_stat.vs_aux = VDEV_AUX_BAD_LABEL;
243 return (EINVAL);
244 }
245
246 if ((owned = mtx_owned(&Giant)))
247 mtx_unlock(&Giant);
248 g_topology_lock();
249 pp = g_provider_by_name(vd->vdev_path + sizeof("/dev/") - 1);
250 if (pp == NULL) {
251 g_topology_unlock();
252 if (owned)
253 mtx_lock(&Giant);
254 vd->vdev_stat.vs_aux = VDEV_AUX_BAD_LABEL;
255 return (EINVAL);
256 }
257 cp = vdev_geom_attach(pp, !!(spa_mode & FWRITE));
258 g_topology_unlock();
259 if (owned)
260 mtx_lock(&Giant);
261 if (cp == NULL) {
262 vd->vdev_stat.vs_aux = VDEV_AUX_OPEN_FAILED;
263 return (EACCES);
264 }
265
266 /*
267 * Determine the actual size of the device.
268 */
269 *psize = pp->mediasize;
270
271 /*
272 * Determine the device's minimum transfer size.
273 */
274 *ashift = highbit(MAX(pp->sectorsize, SPA_MINBLOCKSIZE)) - 1;
275
276 /*
277 * Clear the nowritecache bit, so that on a vdev_reopen() we will
278 * try again.
279 */
280 vd->vdev_nowritecache = B_FALSE;
281
282 cp->private = vd;
283
284 ctx = kmem_zalloc(sizeof(*ctx), KM_SLEEP);
285 bioq_init(&ctx->gc_queue);
286 mtx_init(&ctx->gc_queue_mtx, "zfs:vdev:geom:queue", NULL, MTX_DEF);
287 ctx->gc_consumer = cp;
288 ctx->gc_state = 0;
289
290 vd->vdev_tsd = ctx;
291
292 kthread_create(vdev_geom_worker, ctx, NULL, 0, 0, "vdev:worker %s",
293 pp->name);
294
295 return (0);
296}
297
298static void
299vdev_geom_close(vdev_t *vd)
300{
301 vdev_geom_ctx_t *ctx;
302 struct g_consumer *cp;
303
304 if ((ctx = vd->vdev_tsd) == NULL)
305 return;
306 if ((cp = ctx->gc_consumer) == NULL)
307 return;
308 vdev_geom_release(vd);
309 g_post_event(vdev_geom_detach, cp, M_WAITOK, NULL);
310}
311
312static void
313vdev_geom_io_intr(struct bio *bp)
314{
315 vdev_geom_ctx_t *ctx;
316 zio_t *zio;
317
318 zio = bp->bio_caller1;
319 ctx = zio->io_vd->vdev_tsd;
320
321 mtx_lock(&ctx->gc_queue_mtx);
322 bioq_insert_tail(&ctx->gc_queue, bp);
323 wakeup_one(&ctx->gc_queue);
324 mtx_unlock(&ctx->gc_queue_mtx);
325}
326
327static void
328vdev_geom_io_start(zio_t *zio)
329{
330 vdev_t *vd;
331 vdev_geom_ctx_t *ctx;
332 struct g_consumer *cp;
333 struct bio *bp;
334 int error;
335
336 cp = NULL;
337
338 vd = zio->io_vd;
339 ctx = vd->vdev_tsd;
340 if (ctx != NULL)
341 cp = ctx->gc_consumer;
342
343 if (zio->io_type == ZIO_TYPE_IOCTL) {
344 zio_vdev_io_bypass(zio);
345
346 /* XXPOLICY */
347 if (vdev_is_dead(vd)) {
348 zio->io_error = ENXIO;
349 zio_next_stage_async(zio);
350 return;
351 }
352
353 switch (zio->io_cmd) {
354
355 case DKIOCFLUSHWRITECACHE:
356 if (vd->vdev_nowritecache) {
357 zio->io_error = ENOTSUP;
358 break;
359 }
360
361 goto sendreq;
362 default:
363 zio->io_error = ENOTSUP;
364 }
365
366 zio_next_stage_async(zio);
367 return;
368 }
369
370 if (zio->io_type == ZIO_TYPE_READ && vdev_cache_read(zio) == 0)
371 return;
372
373 if ((zio = vdev_queue_io(zio)) == NULL)
374 return;
375
376sendreq:
377
378 error = vdev_is_dead(vd) ? ENXIO : vdev_error_inject(vd, zio);
379 if (error == 0 && cp == NULL)
380 error = ENXIO;
381 if (error) {
382 zio->io_error = error;
383 zio_next_stage_async(zio);
384 return;
385 }
386
387 bp = g_alloc_bio();
388 bp->bio_caller1 = zio;
389 switch (zio->io_type) {
390 case ZIO_TYPE_READ:
391 case ZIO_TYPE_WRITE:
392 bp->bio_cmd = zio->io_type == ZIO_TYPE_READ ? BIO_READ : BIO_WRITE;
393 bp->bio_data = zio->io_data;
394 bp->bio_offset = zio->io_offset;
395 bp->bio_length = zio->io_size;
396 break;
397 case ZIO_TYPE_IOCTL:
398 bp->bio_cmd = BIO_FLUSH;
399 bp->bio_data = NULL;
400 bp->bio_offset = cp->provider->mediasize;
401 bp->bio_length = 0;
402 break;
403 }
404 bp->bio_done = vdev_geom_io_intr;
405
406 g_io_request(bp, cp);
407}
408
409static void
410vdev_geom_io_done(zio_t *zio)
411{
412 vdev_queue_io_done(zio);
413
414 if (zio->io_type == ZIO_TYPE_WRITE)
415 vdev_cache_write(zio);
416
417 if (zio_injection_enabled && zio->io_error == 0)
418 zio->io_error = zio_handle_device_injection(zio->io_vd, EIO);
419
420 zio_next_stage(zio);
421}
422
423vdev_ops_t vdev_geom_ops = {
424 vdev_geom_open,
425 vdev_geom_close,
426 vdev_default_asize,
427 vdev_geom_io_start,
428 vdev_geom_io_done,
429 NULL,
430 VDEV_TYPE_DISK, /* name of this vdev type */
431 B_TRUE /* leaf vdev */
432};