Deleted Added
full compact
vdev_geom.c (208682) vdev_geom.c (209962)
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21/*
22 * Copyright (c) 2006 Pawel Jakub Dawidek <pjd@FreeBSD.org>
23 * All rights reserved.
24 */
25
26#include <sys/zfs_context.h>
27#include <sys/param.h>
28#include <sys/kernel.h>
29#include <sys/bio.h>
30#include <sys/disk.h>
31#include <sys/spa.h>
32#include <sys/spa_impl.h>
33#include <sys/vdev_impl.h>
34#include <sys/fs/zfs.h>
35#include <sys/zio.h>
36#include <geom/geom.h>
37#include <geom/geom_int.h>
38
39/*
40 * Virtual device vector for GEOM.
41 */
42
43struct g_class zfs_vdev_class = {
44 .name = "ZFS::VDEV",
45 .version = G_VERSION,
46};
47
48DECLARE_GEOM_CLASS(zfs_vdev_class, zfs_vdev);
49
50static void
51vdev_geom_orphan(struct g_consumer *cp)
52{
53 struct g_geom *gp;
54 vdev_t *vd;
55 int error;
56
57 g_topology_assert();
58
59 vd = cp->private;
60 gp = cp->geom;
61 error = cp->provider->error;
62
63 ZFS_LOG(1, "Closing access to %s.", cp->provider->name);
64 if (cp->acr + cp->acw + cp->ace > 0)
65 g_access(cp, -cp->acr, -cp->acw, -cp->ace);
66 ZFS_LOG(1, "Destroyed consumer to %s.", cp->provider->name);
67 g_detach(cp);
68 g_destroy_consumer(cp);
69 /* Destroy geom if there are no consumers left. */
70 if (LIST_EMPTY(&gp->consumer)) {
71 ZFS_LOG(1, "Destroyed geom %s.", gp->name);
72 g_wither_geom(gp, error);
73 }
74 vd->vdev_tsd = NULL;
75 vd->vdev_remove_wanted = B_TRUE;
76 spa_async_request(vd->vdev_spa, SPA_ASYNC_REMOVE);
77}
78
79static struct g_consumer *
80vdev_geom_attach(struct g_provider *pp)
81{
82 struct g_geom *gp;
83 struct g_consumer *cp;
84
85 g_topology_assert();
86
87 ZFS_LOG(1, "Attaching to %s.", pp->name);
88 /* Do we have geom already? No? Create one. */
89 LIST_FOREACH(gp, &zfs_vdev_class.geom, geom) {
90 if (gp->flags & G_GEOM_WITHER)
91 continue;
92 if (strcmp(gp->name, "zfs::vdev") != 0)
93 continue;
94 break;
95 }
96 if (gp == NULL) {
97 gp = g_new_geomf(&zfs_vdev_class, "zfs::vdev");
98 gp->orphan = vdev_geom_orphan;
99 cp = g_new_consumer(gp);
100 if (g_attach(cp, pp) != 0) {
101 g_wither_geom(gp, ENXIO);
102 return (NULL);
103 }
104 if (g_access(cp, 1, 0, 1) != 0) {
105 g_wither_geom(gp, ENXIO);
106 return (NULL);
107 }
108 ZFS_LOG(1, "Created geom and consumer for %s.", pp->name);
109 } else {
110 /* Check if we are already connected to this provider. */
111 LIST_FOREACH(cp, &gp->consumer, consumer) {
112 if (cp->provider == pp) {
113 ZFS_LOG(1, "Found consumer for %s.", pp->name);
114 break;
115 }
116 }
117 if (cp == NULL) {
118 cp = g_new_consumer(gp);
119 if (g_attach(cp, pp) != 0) {
120 g_destroy_consumer(cp);
121 return (NULL);
122 }
123 if (g_access(cp, 1, 0, 1) != 0) {
124 g_detach(cp);
125 g_destroy_consumer(cp);
126 return (NULL);
127 }
128 ZFS_LOG(1, "Created consumer for %s.", pp->name);
129 } else {
130 if (g_access(cp, 1, 0, 1) != 0)
131 return (NULL);
132 ZFS_LOG(1, "Used existing consumer for %s.", pp->name);
133 }
134 }
135 return (cp);
136}
137
138static void
139vdev_geom_detach(void *arg, int flag __unused)
140{
141 struct g_geom *gp;
142 struct g_consumer *cp;
143
144 g_topology_assert();
145 cp = arg;
146 gp = cp->geom;
147
148 ZFS_LOG(1, "Closing access to %s.", cp->provider->name);
149 g_access(cp, -1, 0, -1);
150 /* Destroy consumer on last close. */
151 if (cp->acr == 0 && cp->ace == 0) {
152 ZFS_LOG(1, "Destroyed consumer to %s.", cp->provider->name);
153 if (cp->acw > 0)
154 g_access(cp, 0, -cp->acw, 0);
155 g_detach(cp);
156 g_destroy_consumer(cp);
157 }
158 /* Destroy geom if there are no consumers left. */
159 if (LIST_EMPTY(&gp->consumer)) {
160 ZFS_LOG(1, "Destroyed geom %s.", gp->name);
161 g_wither_geom(gp, ENXIO);
162 }
163}
164
165static uint64_t
166nvlist_get_guid(nvlist_t *list)
167{
168 nvpair_t *elem = NULL;
169 uint64_t value;
170
171 while ((elem = nvlist_next_nvpair(list, elem)) != NULL) {
172 if (nvpair_type(elem) == DATA_TYPE_UINT64 &&
173 strcmp(nvpair_name(elem), "guid") == 0) {
174 VERIFY(nvpair_value_uint64(elem, &value) == 0);
175 return (value);
176 }
177 }
178 return (0);
179}
180
181static int
182vdev_geom_io(struct g_consumer *cp, int cmd, void *data, off_t offset, off_t size)
183{
184 struct bio *bp;
185 u_char *p;
186 off_t off, maxio;
187 int error;
188
189 ASSERT((offset % cp->provider->sectorsize) == 0);
190 ASSERT((size % cp->provider->sectorsize) == 0);
191
192 bp = g_alloc_bio();
193 off = offset;
194 offset += size;
195 p = data;
196 maxio = MAXPHYS - (MAXPHYS % cp->provider->sectorsize);
197 error = 0;
198
199 for (; off < offset; off += maxio, p += maxio, size -= maxio) {
200 bzero(bp, sizeof(*bp));
201 bp->bio_cmd = cmd;
202 bp->bio_done = NULL;
203 bp->bio_offset = off;
204 bp->bio_length = MIN(size, maxio);
205 bp->bio_data = p;
206 g_io_request(bp, cp);
207 error = biowait(bp, "vdev_geom_io");
208 if (error != 0)
209 break;
210 }
211
212 g_destroy_bio(bp);
213 return (error);
214}
215
216static uint64_t
217vdev_geom_read_guid(struct g_consumer *cp)
218{
219 struct g_provider *pp;
220 vdev_label_t *label;
221 char *p, *buf;
222 size_t buflen;
223 uint64_t psize;
224 off_t offset, size;
225 uint64_t guid;
226 int error, l, len, iszvol;
227
228 g_topology_assert_not();
229
230 pp = cp->provider;
231 ZFS_LOG(1, "Reading guid from %s...", pp->name);
232 if (g_getattr("ZFS::iszvol", cp, &iszvol) == 0 && iszvol) {
233 ZFS_LOG(1, "Skipping ZVOL-based provider %s.", pp->name);
234 return (0);
235 }
236
237 psize = pp->mediasize;
238 psize = P2ALIGN(psize, (uint64_t)sizeof(vdev_label_t));
239
240 size = sizeof(*label) + pp->sectorsize -
241 ((sizeof(*label) - 1) % pp->sectorsize) - 1;
242
243 guid = 0;
244 label = kmem_alloc(size, KM_SLEEP);
245 buflen = sizeof(label->vl_vdev_phys.vp_nvlist);
246
247 for (l = 0; l < VDEV_LABELS; l++) {
248 nvlist_t *config = NULL;
249
250 offset = vdev_label_offset(psize, l, 0);
251 if ((offset % pp->sectorsize) != 0)
252 continue;
253
254 if (vdev_geom_io(cp, BIO_READ, label, offset, size) != 0)
255 continue;
256 buf = label->vl_vdev_phys.vp_nvlist;
257
258 if (nvlist_unpack(buf, buflen, &config, 0) != 0)
259 continue;
260
261 guid = nvlist_get_guid(config);
262 nvlist_free(config);
263 if (guid != 0)
264 break;
265 }
266
267 kmem_free(label, size);
268 if (guid != 0)
269 ZFS_LOG(1, "guid for %s is %ju", pp->name, (uintmax_t)guid);
270 return (guid);
271}
272
273struct vdev_geom_find {
274 uint64_t guid;
275 struct g_consumer *cp;
276};
277
278static void
279vdev_geom_taste_orphan(struct g_consumer *cp)
280{
281
282 KASSERT(1 == 0, ("%s called while tasting %s.", __func__,
283 cp->provider->name));
284}
285
286static void
287vdev_geom_attach_by_guid_event(void *arg, int flags __unused)
288{
289 struct vdev_geom_find *ap;
290 struct g_class *mp;
291 struct g_geom *gp, *zgp;
292 struct g_provider *pp;
293 struct g_consumer *zcp;
294 uint64_t guid;
295
296 g_topology_assert();
297
298 ap = arg;
299
300 zgp = g_new_geomf(&zfs_vdev_class, "zfs::vdev::taste");
301 /* This orphan function should be never called. */
302 zgp->orphan = vdev_geom_taste_orphan;
303 zcp = g_new_consumer(zgp);
304
305 LIST_FOREACH(mp, &g_classes, class) {
306 if (mp == &zfs_vdev_class)
307 continue;
308 LIST_FOREACH(gp, &mp->geom, geom) {
309 if (gp->flags & G_GEOM_WITHER)
310 continue;
311 LIST_FOREACH(pp, &gp->provider, provider) {
312 if (pp->flags & G_PF_WITHER)
313 continue;
314 g_attach(zcp, pp);
315 if (g_access(zcp, 1, 0, 0) != 0) {
316 g_detach(zcp);
317 continue;
318 }
319 g_topology_unlock();
320 guid = vdev_geom_read_guid(zcp);
321 g_topology_lock();
322 g_access(zcp, -1, 0, 0);
323 g_detach(zcp);
324 if (guid != ap->guid)
325 continue;
326 ap->cp = vdev_geom_attach(pp);
327 if (ap->cp == NULL) {
328 printf("ZFS WARNING: Unable to attach to %s.\n",
329 pp->name);
330 continue;
331 }
332 goto end;
333 }
334 }
335 }
336 ap->cp = NULL;
337end:
338 g_destroy_consumer(zcp);
339 g_destroy_geom(zgp);
340}
341
342static struct g_consumer *
343vdev_geom_attach_by_guid(uint64_t guid)
344{
345 struct vdev_geom_find *ap;
346 struct g_consumer *cp;
347
348 ap = kmem_zalloc(sizeof(*ap), KM_SLEEP);
349 ap->guid = guid;
350 g_waitfor_event(vdev_geom_attach_by_guid_event, ap, M_WAITOK, NULL);
351 cp = ap->cp;
352 kmem_free(ap, sizeof(*ap));
353 return (cp);
354}
355
356static struct g_consumer *
357vdev_geom_open_by_guid(vdev_t *vd)
358{
359 struct g_consumer *cp;
360 char *buf;
361 size_t len;
362
363 ZFS_LOG(1, "Searching by guid [%ju].", (uintmax_t)vd->vdev_guid);
364 cp = vdev_geom_attach_by_guid(vd->vdev_guid);
365 if (cp != NULL) {
366 len = strlen(cp->provider->name) + strlen("/dev/") + 1;
367 buf = kmem_alloc(len, KM_SLEEP);
368
369 snprintf(buf, len, "/dev/%s", cp->provider->name);
370 spa_strfree(vd->vdev_path);
371 vd->vdev_path = buf;
372
373 ZFS_LOG(1, "Attach by guid [%ju] succeeded, provider %s.",
374 (uintmax_t)vd->vdev_guid, vd->vdev_path);
375 } else {
376 ZFS_LOG(1, "Search by guid [%ju] failed.",
377 (uintmax_t)vd->vdev_guid);
378 }
379
380 return (cp);
381}
382
383static struct g_consumer *
384vdev_geom_open_by_path(vdev_t *vd, int check_guid)
385{
386 struct g_provider *pp;
387 struct g_consumer *cp;
388 uint64_t guid;
389
390 cp = NULL;
391 g_topology_lock();
392 pp = g_provider_by_name(vd->vdev_path + sizeof("/dev/") - 1);
393 if (pp != NULL) {
394 ZFS_LOG(1, "Found provider by name %s.", vd->vdev_path);
395 cp = vdev_geom_attach(pp);
396 if (cp != NULL && check_guid) {
397 g_topology_unlock();
398 guid = vdev_geom_read_guid(cp);
399 g_topology_lock();
400 if (guid != vd->vdev_guid) {
401 vdev_geom_detach(cp, 0);
402 cp = NULL;
403 ZFS_LOG(1, "guid mismatch for provider %s: "
404 "%ju != %ju.", vd->vdev_path,
405 (uintmax_t)vd->vdev_guid, (uintmax_t)guid);
406 } else {
407 ZFS_LOG(1, "guid match for provider %s.",
408 vd->vdev_path);
409 }
410 }
411 }
412 g_topology_unlock();
413
414 return (cp);
415}
416
417static int
418vdev_geom_open(vdev_t *vd, uint64_t *psize, uint64_t *ashift)
419{
420 struct g_provider *pp;
421 struct g_consumer *cp;
422 int error, owned;
423
424 /*
425 * We must have a pathname, and it must be absolute.
426 */
427 if (vd->vdev_path == NULL || vd->vdev_path[0] != '/') {
428 vd->vdev_stat.vs_aux = VDEV_AUX_BAD_LABEL;
429 return (EINVAL);
430 }
431
432 vd->vdev_tsd = NULL;
433
434 if ((owned = mtx_owned(&Giant)))
435 mtx_unlock(&Giant);
436 error = 0;
437
438 /*
439 * If we're creating pool, just find GEOM provider by its name
440 * and ignore GUID mismatches.
441 */
442 if (vd->vdev_spa->spa_load_state == SPA_LOAD_NONE)
443 cp = vdev_geom_open_by_path(vd, 0);
444 else {
445 cp = vdev_geom_open_by_path(vd, 1);
446 if (cp == NULL) {
447 /*
448 * The device at vd->vdev_path doesn't have the
449 * expected guid. The disks might have merely
450 * moved around so try all other GEOM providers
451 * to find one with the right guid.
452 */
453 cp = vdev_geom_open_by_guid(vd);
454 }
455 }
456
457 if (cp == NULL) {
458 ZFS_LOG(1, "Provider %s not found.", vd->vdev_path);
459 error = ENOENT;
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21/*
22 * Copyright (c) 2006 Pawel Jakub Dawidek <pjd@FreeBSD.org>
23 * All rights reserved.
24 */
25
26#include <sys/zfs_context.h>
27#include <sys/param.h>
28#include <sys/kernel.h>
29#include <sys/bio.h>
30#include <sys/disk.h>
31#include <sys/spa.h>
32#include <sys/spa_impl.h>
33#include <sys/vdev_impl.h>
34#include <sys/fs/zfs.h>
35#include <sys/zio.h>
36#include <geom/geom.h>
37#include <geom/geom_int.h>
38
39/*
40 * Virtual device vector for GEOM.
41 */
42
43struct g_class zfs_vdev_class = {
44 .name = "ZFS::VDEV",
45 .version = G_VERSION,
46};
47
48DECLARE_GEOM_CLASS(zfs_vdev_class, zfs_vdev);
49
50static void
51vdev_geom_orphan(struct g_consumer *cp)
52{
53 struct g_geom *gp;
54 vdev_t *vd;
55 int error;
56
57 g_topology_assert();
58
59 vd = cp->private;
60 gp = cp->geom;
61 error = cp->provider->error;
62
63 ZFS_LOG(1, "Closing access to %s.", cp->provider->name);
64 if (cp->acr + cp->acw + cp->ace > 0)
65 g_access(cp, -cp->acr, -cp->acw, -cp->ace);
66 ZFS_LOG(1, "Destroyed consumer to %s.", cp->provider->name);
67 g_detach(cp);
68 g_destroy_consumer(cp);
69 /* Destroy geom if there are no consumers left. */
70 if (LIST_EMPTY(&gp->consumer)) {
71 ZFS_LOG(1, "Destroyed geom %s.", gp->name);
72 g_wither_geom(gp, error);
73 }
74 vd->vdev_tsd = NULL;
75 vd->vdev_remove_wanted = B_TRUE;
76 spa_async_request(vd->vdev_spa, SPA_ASYNC_REMOVE);
77}
78
79static struct g_consumer *
80vdev_geom_attach(struct g_provider *pp)
81{
82 struct g_geom *gp;
83 struct g_consumer *cp;
84
85 g_topology_assert();
86
87 ZFS_LOG(1, "Attaching to %s.", pp->name);
88 /* Do we have geom already? No? Create one. */
89 LIST_FOREACH(gp, &zfs_vdev_class.geom, geom) {
90 if (gp->flags & G_GEOM_WITHER)
91 continue;
92 if (strcmp(gp->name, "zfs::vdev") != 0)
93 continue;
94 break;
95 }
96 if (gp == NULL) {
97 gp = g_new_geomf(&zfs_vdev_class, "zfs::vdev");
98 gp->orphan = vdev_geom_orphan;
99 cp = g_new_consumer(gp);
100 if (g_attach(cp, pp) != 0) {
101 g_wither_geom(gp, ENXIO);
102 return (NULL);
103 }
104 if (g_access(cp, 1, 0, 1) != 0) {
105 g_wither_geom(gp, ENXIO);
106 return (NULL);
107 }
108 ZFS_LOG(1, "Created geom and consumer for %s.", pp->name);
109 } else {
110 /* Check if we are already connected to this provider. */
111 LIST_FOREACH(cp, &gp->consumer, consumer) {
112 if (cp->provider == pp) {
113 ZFS_LOG(1, "Found consumer for %s.", pp->name);
114 break;
115 }
116 }
117 if (cp == NULL) {
118 cp = g_new_consumer(gp);
119 if (g_attach(cp, pp) != 0) {
120 g_destroy_consumer(cp);
121 return (NULL);
122 }
123 if (g_access(cp, 1, 0, 1) != 0) {
124 g_detach(cp);
125 g_destroy_consumer(cp);
126 return (NULL);
127 }
128 ZFS_LOG(1, "Created consumer for %s.", pp->name);
129 } else {
130 if (g_access(cp, 1, 0, 1) != 0)
131 return (NULL);
132 ZFS_LOG(1, "Used existing consumer for %s.", pp->name);
133 }
134 }
135 return (cp);
136}
137
138static void
139vdev_geom_detach(void *arg, int flag __unused)
140{
141 struct g_geom *gp;
142 struct g_consumer *cp;
143
144 g_topology_assert();
145 cp = arg;
146 gp = cp->geom;
147
148 ZFS_LOG(1, "Closing access to %s.", cp->provider->name);
149 g_access(cp, -1, 0, -1);
150 /* Destroy consumer on last close. */
151 if (cp->acr == 0 && cp->ace == 0) {
152 ZFS_LOG(1, "Destroyed consumer to %s.", cp->provider->name);
153 if (cp->acw > 0)
154 g_access(cp, 0, -cp->acw, 0);
155 g_detach(cp);
156 g_destroy_consumer(cp);
157 }
158 /* Destroy geom if there are no consumers left. */
159 if (LIST_EMPTY(&gp->consumer)) {
160 ZFS_LOG(1, "Destroyed geom %s.", gp->name);
161 g_wither_geom(gp, ENXIO);
162 }
163}
164
165static uint64_t
166nvlist_get_guid(nvlist_t *list)
167{
168 nvpair_t *elem = NULL;
169 uint64_t value;
170
171 while ((elem = nvlist_next_nvpair(list, elem)) != NULL) {
172 if (nvpair_type(elem) == DATA_TYPE_UINT64 &&
173 strcmp(nvpair_name(elem), "guid") == 0) {
174 VERIFY(nvpair_value_uint64(elem, &value) == 0);
175 return (value);
176 }
177 }
178 return (0);
179}
180
181static int
182vdev_geom_io(struct g_consumer *cp, int cmd, void *data, off_t offset, off_t size)
183{
184 struct bio *bp;
185 u_char *p;
186 off_t off, maxio;
187 int error;
188
189 ASSERT((offset % cp->provider->sectorsize) == 0);
190 ASSERT((size % cp->provider->sectorsize) == 0);
191
192 bp = g_alloc_bio();
193 off = offset;
194 offset += size;
195 p = data;
196 maxio = MAXPHYS - (MAXPHYS % cp->provider->sectorsize);
197 error = 0;
198
199 for (; off < offset; off += maxio, p += maxio, size -= maxio) {
200 bzero(bp, sizeof(*bp));
201 bp->bio_cmd = cmd;
202 bp->bio_done = NULL;
203 bp->bio_offset = off;
204 bp->bio_length = MIN(size, maxio);
205 bp->bio_data = p;
206 g_io_request(bp, cp);
207 error = biowait(bp, "vdev_geom_io");
208 if (error != 0)
209 break;
210 }
211
212 g_destroy_bio(bp);
213 return (error);
214}
215
216static uint64_t
217vdev_geom_read_guid(struct g_consumer *cp)
218{
219 struct g_provider *pp;
220 vdev_label_t *label;
221 char *p, *buf;
222 size_t buflen;
223 uint64_t psize;
224 off_t offset, size;
225 uint64_t guid;
226 int error, l, len, iszvol;
227
228 g_topology_assert_not();
229
230 pp = cp->provider;
231 ZFS_LOG(1, "Reading guid from %s...", pp->name);
232 if (g_getattr("ZFS::iszvol", cp, &iszvol) == 0 && iszvol) {
233 ZFS_LOG(1, "Skipping ZVOL-based provider %s.", pp->name);
234 return (0);
235 }
236
237 psize = pp->mediasize;
238 psize = P2ALIGN(psize, (uint64_t)sizeof(vdev_label_t));
239
240 size = sizeof(*label) + pp->sectorsize -
241 ((sizeof(*label) - 1) % pp->sectorsize) - 1;
242
243 guid = 0;
244 label = kmem_alloc(size, KM_SLEEP);
245 buflen = sizeof(label->vl_vdev_phys.vp_nvlist);
246
247 for (l = 0; l < VDEV_LABELS; l++) {
248 nvlist_t *config = NULL;
249
250 offset = vdev_label_offset(psize, l, 0);
251 if ((offset % pp->sectorsize) != 0)
252 continue;
253
254 if (vdev_geom_io(cp, BIO_READ, label, offset, size) != 0)
255 continue;
256 buf = label->vl_vdev_phys.vp_nvlist;
257
258 if (nvlist_unpack(buf, buflen, &config, 0) != 0)
259 continue;
260
261 guid = nvlist_get_guid(config);
262 nvlist_free(config);
263 if (guid != 0)
264 break;
265 }
266
267 kmem_free(label, size);
268 if (guid != 0)
269 ZFS_LOG(1, "guid for %s is %ju", pp->name, (uintmax_t)guid);
270 return (guid);
271}
272
273struct vdev_geom_find {
274 uint64_t guid;
275 struct g_consumer *cp;
276};
277
278static void
279vdev_geom_taste_orphan(struct g_consumer *cp)
280{
281
282 KASSERT(1 == 0, ("%s called while tasting %s.", __func__,
283 cp->provider->name));
284}
285
286static void
287vdev_geom_attach_by_guid_event(void *arg, int flags __unused)
288{
289 struct vdev_geom_find *ap;
290 struct g_class *mp;
291 struct g_geom *gp, *zgp;
292 struct g_provider *pp;
293 struct g_consumer *zcp;
294 uint64_t guid;
295
296 g_topology_assert();
297
298 ap = arg;
299
300 zgp = g_new_geomf(&zfs_vdev_class, "zfs::vdev::taste");
301 /* This orphan function should be never called. */
302 zgp->orphan = vdev_geom_taste_orphan;
303 zcp = g_new_consumer(zgp);
304
305 LIST_FOREACH(mp, &g_classes, class) {
306 if (mp == &zfs_vdev_class)
307 continue;
308 LIST_FOREACH(gp, &mp->geom, geom) {
309 if (gp->flags & G_GEOM_WITHER)
310 continue;
311 LIST_FOREACH(pp, &gp->provider, provider) {
312 if (pp->flags & G_PF_WITHER)
313 continue;
314 g_attach(zcp, pp);
315 if (g_access(zcp, 1, 0, 0) != 0) {
316 g_detach(zcp);
317 continue;
318 }
319 g_topology_unlock();
320 guid = vdev_geom_read_guid(zcp);
321 g_topology_lock();
322 g_access(zcp, -1, 0, 0);
323 g_detach(zcp);
324 if (guid != ap->guid)
325 continue;
326 ap->cp = vdev_geom_attach(pp);
327 if (ap->cp == NULL) {
328 printf("ZFS WARNING: Unable to attach to %s.\n",
329 pp->name);
330 continue;
331 }
332 goto end;
333 }
334 }
335 }
336 ap->cp = NULL;
337end:
338 g_destroy_consumer(zcp);
339 g_destroy_geom(zgp);
340}
341
342static struct g_consumer *
343vdev_geom_attach_by_guid(uint64_t guid)
344{
345 struct vdev_geom_find *ap;
346 struct g_consumer *cp;
347
348 ap = kmem_zalloc(sizeof(*ap), KM_SLEEP);
349 ap->guid = guid;
350 g_waitfor_event(vdev_geom_attach_by_guid_event, ap, M_WAITOK, NULL);
351 cp = ap->cp;
352 kmem_free(ap, sizeof(*ap));
353 return (cp);
354}
355
356static struct g_consumer *
357vdev_geom_open_by_guid(vdev_t *vd)
358{
359 struct g_consumer *cp;
360 char *buf;
361 size_t len;
362
363 ZFS_LOG(1, "Searching by guid [%ju].", (uintmax_t)vd->vdev_guid);
364 cp = vdev_geom_attach_by_guid(vd->vdev_guid);
365 if (cp != NULL) {
366 len = strlen(cp->provider->name) + strlen("/dev/") + 1;
367 buf = kmem_alloc(len, KM_SLEEP);
368
369 snprintf(buf, len, "/dev/%s", cp->provider->name);
370 spa_strfree(vd->vdev_path);
371 vd->vdev_path = buf;
372
373 ZFS_LOG(1, "Attach by guid [%ju] succeeded, provider %s.",
374 (uintmax_t)vd->vdev_guid, vd->vdev_path);
375 } else {
376 ZFS_LOG(1, "Search by guid [%ju] failed.",
377 (uintmax_t)vd->vdev_guid);
378 }
379
380 return (cp);
381}
382
383static struct g_consumer *
384vdev_geom_open_by_path(vdev_t *vd, int check_guid)
385{
386 struct g_provider *pp;
387 struct g_consumer *cp;
388 uint64_t guid;
389
390 cp = NULL;
391 g_topology_lock();
392 pp = g_provider_by_name(vd->vdev_path + sizeof("/dev/") - 1);
393 if (pp != NULL) {
394 ZFS_LOG(1, "Found provider by name %s.", vd->vdev_path);
395 cp = vdev_geom_attach(pp);
396 if (cp != NULL && check_guid) {
397 g_topology_unlock();
398 guid = vdev_geom_read_guid(cp);
399 g_topology_lock();
400 if (guid != vd->vdev_guid) {
401 vdev_geom_detach(cp, 0);
402 cp = NULL;
403 ZFS_LOG(1, "guid mismatch for provider %s: "
404 "%ju != %ju.", vd->vdev_path,
405 (uintmax_t)vd->vdev_guid, (uintmax_t)guid);
406 } else {
407 ZFS_LOG(1, "guid match for provider %s.",
408 vd->vdev_path);
409 }
410 }
411 }
412 g_topology_unlock();
413
414 return (cp);
415}
416
417static int
418vdev_geom_open(vdev_t *vd, uint64_t *psize, uint64_t *ashift)
419{
420 struct g_provider *pp;
421 struct g_consumer *cp;
422 int error, owned;
423
424 /*
425 * We must have a pathname, and it must be absolute.
426 */
427 if (vd->vdev_path == NULL || vd->vdev_path[0] != '/') {
428 vd->vdev_stat.vs_aux = VDEV_AUX_BAD_LABEL;
429 return (EINVAL);
430 }
431
432 vd->vdev_tsd = NULL;
433
434 if ((owned = mtx_owned(&Giant)))
435 mtx_unlock(&Giant);
436 error = 0;
437
438 /*
439 * If we're creating pool, just find GEOM provider by its name
440 * and ignore GUID mismatches.
441 */
442 if (vd->vdev_spa->spa_load_state == SPA_LOAD_NONE)
443 cp = vdev_geom_open_by_path(vd, 0);
444 else {
445 cp = vdev_geom_open_by_path(vd, 1);
446 if (cp == NULL) {
447 /*
448 * The device at vd->vdev_path doesn't have the
449 * expected guid. The disks might have merely
450 * moved around so try all other GEOM providers
451 * to find one with the right guid.
452 */
453 cp = vdev_geom_open_by_guid(vd);
454 }
455 }
456
457 if (cp == NULL) {
458 ZFS_LOG(1, "Provider %s not found.", vd->vdev_path);
459 error = ENOENT;
460 } else if (cp->acw == 0 && (spa_mode & FWRITE) != 0) {
460 } else if (cp->acw == 0 && (spa_mode(vd->vdev_spa) & FWRITE) != 0) {
461 int i;
462
463 g_topology_lock();
464 for (i = 0; i < 5; i++) {
465 error = g_access(cp, 0, 1, 0);
466 if (error == 0)
467 break;
468 g_topology_unlock();
469 tsleep(vd, 0, "vdev", hz / 2);
470 g_topology_lock();
471 }
472 if (error != 0) {
473 printf("ZFS WARNING: Unable to open %s for writing (error=%d).\n",
474 vd->vdev_path, error);
475 vdev_geom_detach(cp, 0);
476 cp = NULL;
477 }
478 g_topology_unlock();
479 }
480 if (owned)
481 mtx_lock(&Giant);
482 if (cp == NULL) {
483 vd->vdev_stat.vs_aux = VDEV_AUX_OPEN_FAILED;
484 return (error);
485 }
486
487 cp->private = vd;
488 vd->vdev_tsd = cp;
489 pp = cp->provider;
490
491 /*
492 * Determine the actual size of the device.
493 */
494 *psize = pp->mediasize;
495
496 /*
497 * Determine the device's minimum transfer size.
498 */
499 *ashift = highbit(MAX(pp->sectorsize, SPA_MINBLOCKSIZE)) - 1;
500
501 /*
502 * Clear the nowritecache bit, so that on a vdev_reopen() we will
503 * try again.
504 */
505 vd->vdev_nowritecache = B_FALSE;
506
507 return (0);
508}
509
510static void
511vdev_geom_close(vdev_t *vd)
512{
513 struct g_consumer *cp;
514
515 cp = vd->vdev_tsd;
516 if (cp == NULL)
517 return;
518 vd->vdev_tsd = NULL;
519 g_post_event(vdev_geom_detach, cp, M_WAITOK, NULL);
520}
521
522static void
523vdev_geom_io_intr(struct bio *bp)
524{
525 zio_t *zio;
526
527 zio = bp->bio_caller1;
528 zio->io_error = bp->bio_error;
529 if (zio->io_error == 0 && bp->bio_resid != 0)
530 zio->io_error = EIO;
531 if (bp->bio_cmd == BIO_FLUSH && bp->bio_error == ENOTSUP) {
532 vdev_t *vd;
533
534 /*
535 * If we get ENOTSUP, we know that no future
536 * attempts will ever succeed. In this case we
537 * set a persistent bit so that we don't bother
538 * with the ioctl in the future.
539 */
540 vd = zio->io_vd;
541 vd->vdev_nowritecache = B_TRUE;
542 }
543 g_destroy_bio(bp);
544 zio_interrupt(zio);
545}
546
547static int
548vdev_geom_io_start(zio_t *zio)
549{
550 vdev_t *vd;
551 struct g_consumer *cp;
552 struct bio *bp;
553 int error;
554
555 vd = zio->io_vd;
556
557 if (zio->io_type == ZIO_TYPE_IOCTL) {
558 /* XXPOLICY */
559 if (!vdev_readable(vd)) {
560 zio->io_error = ENXIO;
561 return (ZIO_PIPELINE_CONTINUE);
562 }
563
564 switch (zio->io_cmd) {
565
566 case DKIOCFLUSHWRITECACHE:
567
568 if (zfs_nocacheflush)
569 break;
570
571 if (vd->vdev_nowritecache) {
572 zio->io_error = ENOTSUP;
573 break;
574 }
575
576 goto sendreq;
577 default:
578 zio->io_error = ENOTSUP;
579 }
580
581 return (ZIO_PIPELINE_CONTINUE);
582 }
583sendreq:
584 cp = vd->vdev_tsd;
585 if (cp == NULL) {
586 zio->io_error = ENXIO;
587 return (ZIO_PIPELINE_CONTINUE);
588 }
589 bp = g_alloc_bio();
590 bp->bio_caller1 = zio;
591 switch (zio->io_type) {
592 case ZIO_TYPE_READ:
593 case ZIO_TYPE_WRITE:
594 bp->bio_cmd = zio->io_type == ZIO_TYPE_READ ? BIO_READ : BIO_WRITE;
595 bp->bio_data = zio->io_data;
596 bp->bio_offset = zio->io_offset;
597 bp->bio_length = zio->io_size;
598 break;
599 case ZIO_TYPE_IOCTL:
600 bp->bio_cmd = BIO_FLUSH;
601 bp->bio_data = NULL;
602 bp->bio_offset = cp->provider->mediasize;
603 bp->bio_length = 0;
604 break;
605 }
606 bp->bio_done = vdev_geom_io_intr;
607
608 g_io_request(bp, cp);
609
610 return (ZIO_PIPELINE_STOP);
611}
612
613static void
614vdev_geom_io_done(zio_t *zio)
615{
616}
617
618vdev_ops_t vdev_geom_ops = {
619 vdev_geom_open,
620 vdev_geom_close,
621 vdev_default_asize,
622 vdev_geom_io_start,
623 vdev_geom_io_done,
624 NULL,
625 VDEV_TYPE_DISK, /* name of this vdev type */
626 B_TRUE /* leaf vdev */
627};
461 int i;
462
463 g_topology_lock();
464 for (i = 0; i < 5; i++) {
465 error = g_access(cp, 0, 1, 0);
466 if (error == 0)
467 break;
468 g_topology_unlock();
469 tsleep(vd, 0, "vdev", hz / 2);
470 g_topology_lock();
471 }
472 if (error != 0) {
473 printf("ZFS WARNING: Unable to open %s for writing (error=%d).\n",
474 vd->vdev_path, error);
475 vdev_geom_detach(cp, 0);
476 cp = NULL;
477 }
478 g_topology_unlock();
479 }
480 if (owned)
481 mtx_lock(&Giant);
482 if (cp == NULL) {
483 vd->vdev_stat.vs_aux = VDEV_AUX_OPEN_FAILED;
484 return (error);
485 }
486
487 cp->private = vd;
488 vd->vdev_tsd = cp;
489 pp = cp->provider;
490
491 /*
492 * Determine the actual size of the device.
493 */
494 *psize = pp->mediasize;
495
496 /*
497 * Determine the device's minimum transfer size.
498 */
499 *ashift = highbit(MAX(pp->sectorsize, SPA_MINBLOCKSIZE)) - 1;
500
501 /*
502 * Clear the nowritecache bit, so that on a vdev_reopen() we will
503 * try again.
504 */
505 vd->vdev_nowritecache = B_FALSE;
506
507 return (0);
508}
509
510static void
511vdev_geom_close(vdev_t *vd)
512{
513 struct g_consumer *cp;
514
515 cp = vd->vdev_tsd;
516 if (cp == NULL)
517 return;
518 vd->vdev_tsd = NULL;
519 g_post_event(vdev_geom_detach, cp, M_WAITOK, NULL);
520}
521
522static void
523vdev_geom_io_intr(struct bio *bp)
524{
525 zio_t *zio;
526
527 zio = bp->bio_caller1;
528 zio->io_error = bp->bio_error;
529 if (zio->io_error == 0 && bp->bio_resid != 0)
530 zio->io_error = EIO;
531 if (bp->bio_cmd == BIO_FLUSH && bp->bio_error == ENOTSUP) {
532 vdev_t *vd;
533
534 /*
535 * If we get ENOTSUP, we know that no future
536 * attempts will ever succeed. In this case we
537 * set a persistent bit so that we don't bother
538 * with the ioctl in the future.
539 */
540 vd = zio->io_vd;
541 vd->vdev_nowritecache = B_TRUE;
542 }
543 g_destroy_bio(bp);
544 zio_interrupt(zio);
545}
546
547static int
548vdev_geom_io_start(zio_t *zio)
549{
550 vdev_t *vd;
551 struct g_consumer *cp;
552 struct bio *bp;
553 int error;
554
555 vd = zio->io_vd;
556
557 if (zio->io_type == ZIO_TYPE_IOCTL) {
558 /* XXPOLICY */
559 if (!vdev_readable(vd)) {
560 zio->io_error = ENXIO;
561 return (ZIO_PIPELINE_CONTINUE);
562 }
563
564 switch (zio->io_cmd) {
565
566 case DKIOCFLUSHWRITECACHE:
567
568 if (zfs_nocacheflush)
569 break;
570
571 if (vd->vdev_nowritecache) {
572 zio->io_error = ENOTSUP;
573 break;
574 }
575
576 goto sendreq;
577 default:
578 zio->io_error = ENOTSUP;
579 }
580
581 return (ZIO_PIPELINE_CONTINUE);
582 }
583sendreq:
584 cp = vd->vdev_tsd;
585 if (cp == NULL) {
586 zio->io_error = ENXIO;
587 return (ZIO_PIPELINE_CONTINUE);
588 }
589 bp = g_alloc_bio();
590 bp->bio_caller1 = zio;
591 switch (zio->io_type) {
592 case ZIO_TYPE_READ:
593 case ZIO_TYPE_WRITE:
594 bp->bio_cmd = zio->io_type == ZIO_TYPE_READ ? BIO_READ : BIO_WRITE;
595 bp->bio_data = zio->io_data;
596 bp->bio_offset = zio->io_offset;
597 bp->bio_length = zio->io_size;
598 break;
599 case ZIO_TYPE_IOCTL:
600 bp->bio_cmd = BIO_FLUSH;
601 bp->bio_data = NULL;
602 bp->bio_offset = cp->provider->mediasize;
603 bp->bio_length = 0;
604 break;
605 }
606 bp->bio_done = vdev_geom_io_intr;
607
608 g_io_request(bp, cp);
609
610 return (ZIO_PIPELINE_STOP);
611}
612
613static void
614vdev_geom_io_done(zio_t *zio)
615{
616}
617
618vdev_ops_t vdev_geom_ops = {
619 vdev_geom_open,
620 vdev_geom_close,
621 vdev_default_asize,
622 vdev_geom_io_start,
623 vdev_geom_io_done,
624 NULL,
625 VDEV_TYPE_DISK, /* name of this vdev type */
626 B_TRUE /* leaf vdev */
627};