1/*	$NetBSD: mirrored.c,v 1.1.1.2 2009/12/02 00:26:26 haad Exp $	*/
2
3/*
4 * Copyright (C) 2003-2004 Sistina Software, Inc. All rights reserved.
5 * Copyright (C) 2004-2007 Red Hat, Inc. All rights reserved.
6 *
7 * This file is part of LVM2.
8 *
9 * This copyrighted material is made available to anyone wishing to use,
10 * modify, copy, or redistribute it subject to the terms and conditions
11 * of the GNU Lesser General Public License v.2.1.
12 *
13 * You should have received a copy of the GNU Lesser General Public License
14 * along with this program; if not, write to the Free Software Foundation,
15 * Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
16 */
17
18#include "lib.h"
19#include "toolcontext.h"
20#include "metadata.h"
21#include "segtype.h"
22#include "display.h"
23#include "text_export.h"
24#include "text_import.h"
25#include "config.h"
26#include "defaults.h"
27#include "lvm-string.h"
28#include "targets.h"
29#include "activate.h"
30#include "sharedlib.h"
31#include "str_list.h"
32
33#ifdef DMEVENTD
34#  include "libdevmapper-event.h"
35#endif
36
37static int _block_on_error_available = 0;
38static unsigned _mirror_attributes = 0;
39
40enum {
41	MIRR_DISABLED,
42	MIRR_RUNNING,
43	MIRR_COMPLETED
44};
45
46struct mirror_state {
47	uint32_t default_region_size;
48};
49
50static const char *_mirrored_name(const struct lv_segment *seg)
51{
52	return seg->segtype->name;
53}
54
55static void _mirrored_display(const struct lv_segment *seg)
56{
57	const char *size;
58	uint32_t s;
59
60	log_print("  Mirrors\t\t%u", seg->area_count);
61	log_print("  Mirror size\t\t%u", seg->area_len);
62	if (seg->log_lv)
63		log_print("  Mirror log volume\t%s", seg->log_lv->name);
64
65	if (seg->region_size) {
66		size = display_size(seg->lv->vg->cmd,
67				    (uint64_t) seg->region_size);
68		log_print("  Mirror region size\t%s", size);
69	}
70
71	log_print("  Mirror original:");
72	display_stripe(seg, 0, "    ");
73	log_print("  Mirror destinations:");
74	for (s = 1; s < seg->area_count; s++)
75		display_stripe(seg, s, "    ");
76	log_print(" ");
77}
78
79static int _mirrored_text_import_area_count(struct config_node *sn, uint32_t *area_count)
80{
81	if (!get_config_uint32(sn, "mirror_count", area_count)) {
82		log_error("Couldn't read 'mirror_count' for "
83			  "segment '%s'.", config_parent_name(sn));
84		return 0;
85	}
86
87	return 1;
88}
89
90static int _mirrored_text_import(struct lv_segment *seg, const struct config_node *sn,
91			struct dm_hash_table *pv_hash)
92{
93	const struct config_node *cn;
94	char *logname = NULL;
95
96	if (find_config_node(sn, "extents_moved")) {
97		if (get_config_uint32(sn, "extents_moved",
98				      &seg->extents_copied))
99			seg->status |= PVMOVE;
100		else {
101			log_error("Couldn't read 'extents_moved' for "
102				  "segment %s of logical volume %s.",
103				  config_parent_name(sn), seg->lv->name);
104			return 0;
105		}
106	}
107
108	if (find_config_node(sn, "region_size")) {
109		if (!get_config_uint32(sn, "region_size",
110				      &seg->region_size)) {
111			log_error("Couldn't read 'region_size' for "
112				  "segment %s of logical volume %s.",
113				  config_parent_name(sn), seg->lv->name);
114			return 0;
115		}
116	}
117
118	if ((cn = find_config_node(sn, "mirror_log"))) {
119		if (!cn->v || !cn->v->v.str) {
120			log_error("Mirror log type must be a string.");
121			return 0;
122		}
123		logname = cn->v->v.str;
124		if (!(seg->log_lv = find_lv(seg->lv->vg, logname))) {
125			log_error("Unrecognised mirror log in "
126				  "segment %s of logical volume %s.",
127				  config_parent_name(sn), seg->lv->name);
128			return 0;
129		}
130		seg->log_lv->status |= MIRROR_LOG;
131	}
132
133	if (logname && !seg->region_size) {
134		log_error("Missing region size for mirror log for "
135			  "segment %s of logical volume %s.",
136			  config_parent_name(sn), seg->lv->name);
137		return 0;
138	}
139
140	if (!(cn = find_config_node(sn, "mirrors"))) {
141		log_error("Couldn't find mirrors array for "
142			  "segment %s of logical volume %s.",
143			  config_parent_name(sn), seg->lv->name);
144		return 0;
145	}
146
147	return text_import_areas(seg, sn, cn, pv_hash, MIRROR_IMAGE);
148}
149
150static int _mirrored_text_export(const struct lv_segment *seg, struct formatter *f)
151{
152	outf(f, "mirror_count = %u", seg->area_count);
153	if (seg->status & PVMOVE)
154		out_size(f, (uint64_t) seg->extents_copied * seg->lv->vg->extent_size,
155			 "extents_moved = %" PRIu32, seg->extents_copied);
156	if (seg->log_lv)
157		outf(f, "mirror_log = \"%s\"", seg->log_lv->name);
158	if (seg->region_size)
159		outf(f, "region_size = %" PRIu32, seg->region_size);
160
161	return out_areas(f, seg, "mirror");
162}
163
164#ifdef DEVMAPPER_SUPPORT
165static struct mirror_state *_mirrored_init_target(struct dm_pool *mem,
166					 struct cmd_context *cmd)
167{
168	struct mirror_state *mirr_state;
169
170	if (!(mirr_state = dm_pool_alloc(mem, sizeof(*mirr_state)))) {
171		log_error("struct mirr_state allocation failed");
172		return NULL;
173	}
174
175	mirr_state->default_region_size = 2 *
176	    find_config_tree_int(cmd,
177			    "activation/mirror_region_size",
178			    DEFAULT_MIRROR_REGION_SIZE);
179
180	return mirr_state;
181}
182
183static int _mirrored_target_percent(void **target_state,
184				    percent_range_t *percent_range,
185				    struct dm_pool *mem,
186				    struct cmd_context *cmd,
187				    struct lv_segment *seg, char *params,
188				    uint64_t *total_numerator,
189				    uint64_t *total_denominator)
190{
191	struct mirror_state *mirr_state;
192	uint64_t numerator, denominator;
193	unsigned mirror_count, m;
194	int used;
195	char *pos = params;
196
197	if (!*target_state)
198		*target_state = _mirrored_init_target(mem, cmd);
199
200	mirr_state = *target_state;
201
202	/* Status line: <#mirrors> (maj:min)+ <synced>/<total_regions> */
203	log_debug("Mirror status: %s", params);
204
205	if (sscanf(pos, "%u %n", &mirror_count, &used) != 1) {
206		log_error("Failure parsing mirror status mirror count: %s",
207			  params);
208		return 0;
209	}
210	pos += used;
211
212	for (m = 0; m < mirror_count; m++) {
213		if (sscanf(pos, "%*x:%*x %n", &used) != 0) {
214			log_error("Failure parsing mirror status devices: %s",
215				  params);
216			return 0;
217		}
218		pos += used;
219	}
220
221	if (sscanf(pos, "%" PRIu64 "/%" PRIu64 "%n", &numerator, &denominator,
222		   &used) != 2) {
223		log_error("Failure parsing mirror status fraction: %s", params);
224		return 0;
225	}
226	pos += used;
227
228	*total_numerator += numerator;
229	*total_denominator += denominator;
230
231	if (seg)
232		seg->extents_copied = seg->area_len * numerator / denominator;
233
234	if (numerator == denominator)
235		*percent_range = PERCENT_100;
236	else if (numerator == 0)
237		*percent_range = PERCENT_0;
238	else
239		*percent_range = PERCENT_0_TO_100;
240
241	return 1;
242}
243
244static int _add_log(struct dev_manager *dm, struct lv_segment *seg,
245		    struct dm_tree_node *node, uint32_t area_count, uint32_t region_size)
246{
247	unsigned clustered = 0;
248	char *log_dlid = NULL;
249	uint32_t log_flags = 0;
250
251	/*
252	 * Use clustered mirror log for non-exclusive activation
253	 * in clustered VG.
254	 */
255	if ((!(seg->lv->status & ACTIVATE_EXCL) &&
256	      (vg_is_clustered(seg->lv->vg))))
257		clustered = 1;
258
259	if (seg->log_lv) {
260		/* If disk log, use its UUID */
261		if (!(log_dlid = build_dlid(dm, seg->log_lv->lvid.s, NULL))) {
262			log_error("Failed to build uuid for log LV %s.",
263				  seg->log_lv->name);
264			return 0;
265		}
266	} else {
267		/* If core log, use mirror's UUID and set DM_CORELOG flag */
268		if (!(log_dlid = build_dlid(dm, seg->lv->lvid.s, NULL))) {
269			log_error("Failed to build uuid for mirror LV %s.",
270				  seg->lv->name);
271			return 0;
272		}
273		log_flags |= DM_CORELOG;
274	}
275
276	if (mirror_in_sync() && !(seg->status & PVMOVE))
277		log_flags |= DM_NOSYNC;
278
279	if (_block_on_error_available && !(seg->status & PVMOVE))
280		log_flags |= DM_BLOCK_ON_ERROR;
281
282	return dm_tree_node_add_mirror_target_log(node, region_size, clustered, log_dlid, area_count, log_flags);
283}
284
285static int _mirrored_add_target_line(struct dev_manager *dm, struct dm_pool *mem,
286				struct cmd_context *cmd, void **target_state,
287				struct lv_segment *seg,
288				struct dm_tree_node *node, uint64_t len,
289				uint32_t *pvmove_mirror_count)
290{
291	struct mirror_state *mirr_state;
292	uint32_t area_count = seg->area_count;
293	unsigned start_area = 0u;
294	int mirror_status = MIRR_RUNNING;
295	uint32_t region_size;
296	int r;
297
298	if (!*target_state)
299		*target_state = _mirrored_init_target(mem, cmd);
300
301	mirr_state = *target_state;
302
303	/*
304	 * Mirror segment could have only 1 area temporarily
305	 * if the segment is under conversion.
306	 */
307 	if (seg->area_count == 1)
308		mirror_status = MIRR_DISABLED;
309
310	/*
311	 * For pvmove, only have one mirror segment RUNNING at once.
312	 * Segments before this are COMPLETED and use 2nd area.
313	 * Segments after this are DISABLED and use 1st area.
314	 */
315	if (seg->status & PVMOVE) {
316		if (seg->extents_copied == seg->area_len) {
317			mirror_status = MIRR_COMPLETED;
318			start_area = 1;
319		} else if ((*pvmove_mirror_count)++) {
320			mirror_status = MIRR_DISABLED;
321			area_count = 1;
322		}
323		/* else MIRR_RUNNING */
324	}
325
326	if (mirror_status != MIRR_RUNNING) {
327		if (!dm_tree_node_add_linear_target(node, len))
328			return_0;
329		goto done;
330	}
331
332	if (!(seg->status & PVMOVE)) {
333		if (!seg->region_size) {
334			log_error("Missing region size for mirror segment.");
335			return 0;
336		}
337		region_size = seg->region_size;
338
339	} else
340		region_size = adjusted_mirror_region_size(seg->lv->vg->extent_size,
341							  seg->area_len,
342							  mirr_state->default_region_size);
343
344	if (!dm_tree_node_add_mirror_target(node, len))
345		return_0;
346
347	if ((r = _add_log(dm, seg, node, area_count, region_size)) <= 0) {
348		stack;
349		return r;
350	}
351
352      done:
353	return add_areas_line(dm, seg, node, start_area, area_count);
354}
355
356static int _mirrored_target_present(struct cmd_context *cmd,
357				    const struct lv_segment *seg,
358				    unsigned *attributes)
359{
360	static int _mirrored_checked = 0;
361	static int _mirrored_present = 0;
362	uint32_t maj, min, patchlevel;
363	unsigned maj2, min2, patchlevel2;
364	char vsn[80];
365
366	if (!_mirrored_checked) {
367		_mirrored_present = target_present(cmd, "mirror", 1);
368
369		/*
370		 * block_on_error available as "block_on_error" log
371		 * argument with mirror target >= 1.1 and <= 1.11
372		 * or with 1.0 in RHEL4U3 driver >= 4.5
373		 *
374		 * block_on_error available as "handle_errors" mirror
375		 * argument with mirror target >= 1.12.
376		 *
377		 * libdm-deptree.c is smart enough to handle the differences
378		 * between block_on_error and handle_errors for all
379		 * mirror target versions >= 1.1
380		 */
381		/* FIXME Move this into libdevmapper */
382
383		if (target_version("mirror", &maj, &min, &patchlevel) &&
384		    maj == 1 &&
385		    ((min >= 1) ||
386		     (min == 0 && driver_version(vsn, sizeof(vsn)) &&
387		      sscanf(vsn, "%u.%u.%u", &maj2, &min2, &patchlevel2) == 3 &&
388		      maj2 == 4 && min2 == 5 && patchlevel2 == 0)))	/* RHEL4U3 */
389			_block_on_error_available = 1;
390	}
391
392	/*
393	 * Check only for modules if atttributes requested and no previous check.
394	 * FIXME: Fails incorrectly if cmirror was built into kernel.
395	 */
396	if (attributes) {
397		if (!_mirror_attributes && module_present(cmd, "log-clustered"))
398			_mirror_attributes |= MIRROR_LOG_CLUSTERED;
399		*attributes = _mirror_attributes;
400	}
401	_mirrored_checked = 1;
402
403	return _mirrored_present;
404}
405
406#ifdef DMEVENTD
407static int _get_mirror_dso_path(struct cmd_context *cmd, char **dso)
408{
409	char *path;
410	const char *libpath;
411
412	if (!(path = dm_pool_alloc(cmd->mem, PATH_MAX))) {
413		log_error("Failed to allocate dmeventd library path.");
414		return 0;
415	}
416
417	libpath = find_config_tree_str(cmd, "dmeventd/mirror_library",
418				       DEFAULT_DMEVENTD_MIRROR_LIB);
419
420	get_shared_library_path(cmd, libpath, path, PATH_MAX);
421
422	*dso = path;
423
424	return 1;
425}
426
427static struct dm_event_handler *_create_dm_event_handler(const char *dmname,
428							 const char *dso,
429							 enum dm_event_mask mask)
430{
431	struct dm_event_handler *dmevh;
432
433	if (!(dmevh = dm_event_handler_create()))
434		return_0;
435
436       if (dm_event_handler_set_dso(dmevh, dso))
437		goto fail;
438
439	if (dm_event_handler_set_dev_name(dmevh, dmname))
440		goto fail;
441
442	dm_event_handler_set_event_mask(dmevh, mask);
443	return dmevh;
444
445fail:
446	dm_event_handler_destroy(dmevh);
447	return NULL;
448}
449
450static int _target_monitored(struct lv_segment *seg, int *pending)
451{
452	char *dso, *name;
453	struct logical_volume *lv;
454	struct volume_group *vg;
455	enum dm_event_mask evmask = 0;
456	struct dm_event_handler *dmevh;
457
458	lv = seg->lv;
459	vg = lv->vg;
460
461	*pending = 0;
462	if (!_get_mirror_dso_path(vg->cmd, &dso))
463		return_0;
464
465	if (!(name = build_dm_name(vg->cmd->mem, vg->name, lv->name, NULL)))
466		return_0;
467
468	if (!(dmevh = _create_dm_event_handler(name, dso, DM_EVENT_ALL_ERRORS)))
469		return_0;
470
471	if (dm_event_get_registered_device(dmevh, 0)) {
472		dm_event_handler_destroy(dmevh);
473		return 0;
474	}
475
476	evmask = dm_event_handler_get_event_mask(dmevh);
477	if (evmask & DM_EVENT_REGISTRATION_PENDING) {
478		*pending = 1;
479		evmask &= ~DM_EVENT_REGISTRATION_PENDING;
480	}
481
482	dm_event_handler_destroy(dmevh);
483
484	return evmask;
485}
486
487/* FIXME This gets run while suspended and performs banned operations. */
488static int _target_set_events(struct lv_segment *seg,
489			      int evmask __attribute((unused)), int set)
490{
491	char *dso, *name;
492	struct logical_volume *lv;
493	struct volume_group *vg;
494	struct dm_event_handler *dmevh;
495	int r;
496
497	lv = seg->lv;
498	vg = lv->vg;
499
500	if (!_get_mirror_dso_path(vg->cmd, &dso))
501		return_0;
502
503	if (!(name = build_dm_name(vg->cmd->mem, vg->name, lv->name, NULL)))
504		return_0;
505
506	if (!(dmevh = _create_dm_event_handler(name, dso, DM_EVENT_ALL_ERRORS)))
507		return_0;
508
509	r = set ? dm_event_register_handler(dmevh) : dm_event_unregister_handler(dmevh);
510	dm_event_handler_destroy(dmevh);
511	if (!r)
512		return_0;
513
514	log_info("%s %s for events", set ? "Monitored" : "Unmonitored", name);
515
516	return 1;
517}
518
519static int _target_monitor_events(struct lv_segment *seg, int events)
520{
521	return _target_set_events(seg, events, 1);
522}
523
524static int _target_unmonitor_events(struct lv_segment *seg, int events)
525{
526	return _target_set_events(seg, events, 0);
527}
528
529#endif /* DMEVENTD */
530#endif /* DEVMAPPER_SUPPORT */
531
532static int _mirrored_modules_needed(struct dm_pool *mem,
533				    const struct lv_segment *seg,
534				    struct dm_list *modules)
535{
536	if (seg->log_lv &&
537	    !list_segment_modules(mem, first_seg(seg->log_lv), modules))
538		return_0;
539
540	if (vg_is_clustered(seg->lv->vg) &&
541	    !str_list_add(mem, modules, "clog")) {
542		log_error("cluster log string list allocation failed");
543		return 0;
544	}
545
546	if (!str_list_add(mem, modules, "mirror")) {
547		log_error("mirror string list allocation failed");
548		return 0;
549	}
550
551	return 1;
552}
553
554static void _mirrored_destroy(const struct segment_type *segtype)
555{
556	dm_free((void *) segtype);
557}
558
559static struct segtype_handler _mirrored_ops = {
560	.name = _mirrored_name,
561	.display = _mirrored_display,
562	.text_import_area_count = _mirrored_text_import_area_count,
563	.text_import = _mirrored_text_import,
564	.text_export = _mirrored_text_export,
565#ifdef DEVMAPPER_SUPPORT
566	.add_target_line = _mirrored_add_target_line,
567	.target_percent = _mirrored_target_percent,
568	.target_present = _mirrored_target_present,
569#ifdef DMEVENTD
570	.target_monitored = _target_monitored,
571	.target_monitor_events = _target_monitor_events,
572	.target_unmonitor_events = _target_unmonitor_events,
573#endif
574#endif
575	.modules_needed = _mirrored_modules_needed,
576	.destroy = _mirrored_destroy,
577};
578
579#ifdef MIRRORED_INTERNAL
580struct segment_type *init_mirrored_segtype(struct cmd_context *cmd)
581#else				/* Shared */
582struct segment_type *init_segtype(struct cmd_context *cmd);
583struct segment_type *init_segtype(struct cmd_context *cmd)
584#endif
585{
586	struct segment_type *segtype = dm_malloc(sizeof(*segtype));
587
588	if (!segtype)
589		return_NULL;
590
591	segtype->cmd = cmd;
592	segtype->ops = &_mirrored_ops;
593	segtype->name = "mirror";
594	segtype->private = NULL;
595	segtype->flags = SEG_AREAS_MIRRORED | SEG_MONITORED;
596
597	log_very_verbose("Initialised segtype: %s", segtype->name);
598
599	return segtype;
600}
601