1/* -*- mode: c; c-basic-offset: 8; -*-
2 *
3 * vim: noexpandtab sw=8 ts=8 sts=0:
4 *
5 * Copyright (C) 2005 Oracle.  All rights reserved.
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15 * General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public
18 * License along with this program; if not, write to the
19 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
20 * Boston, MA 021110-1307, USA.
21 */
22
23/* This quorum hack is only here until we transition to some more rational
24 * approach that is driven from userspace.  Honest.  No foolin'.
25 *
26 * Imagine two nodes lose network connectivity to each other but they're still
27 * up and operating in every other way.  Presumably a network timeout indicates
28 * that a node is broken and should be recovered.  They can't both recover each
29 * other and both carry on without serialising their access to the file system.
30 * They need to decide who is authoritative.  Now extend that problem to
31 * arbitrary groups of nodes losing connectivity between each other.
32 *
33 * So we declare that a node which has given up on connecting to a majority
34 * of nodes who are still heartbeating will fence itself.
35 *
36 * There are huge opportunities for races here.  After we give up on a node's
37 * connection we need to wait long enough to give heartbeat an opportunity
38 * to declare the node as truly dead.  We also need to be careful with the
39 * race between when we see a node start heartbeating and when we connect
40 * to it.
41 *
42 * So nodes that are in this transtion put a hold on the quorum decision
43 * with a counter.  As they fall out of this transition they drop the count
44 * and if they're the last, they fire off the decision.
45 */
46#include <linux/kernel.h>
47#include <linux/slab.h>
48#include <linux/workqueue.h>
49#include <linux/reboot.h>
50
51#include "heartbeat.h"
52#include "nodemanager.h"
53#define MLOG_MASK_PREFIX ML_QUORUM
54#include "masklog.h"
55#include "quorum.h"
56
57static struct o2quo_state {
58	spinlock_t		qs_lock;
59	struct work_struct	qs_work;
60	int			qs_pending;
61	int			qs_heartbeating;
62	unsigned long		qs_hb_bm[BITS_TO_LONGS(O2NM_MAX_NODES)];
63	int			qs_connected;
64	unsigned long		qs_conn_bm[BITS_TO_LONGS(O2NM_MAX_NODES)];
65	int			qs_holds;
66	unsigned long		qs_hold_bm[BITS_TO_LONGS(O2NM_MAX_NODES)];
67} o2quo_state;
68
69/* this is horribly heavy-handed.  It should instead flip the file
70 * system RO and call some userspace script. */
71static void o2quo_fence_self(void)
72{
73	/* panic spins with interrupts enabled.  with preempt
74	 * threads can still schedule, etc, etc */
75	o2hb_stop_all_regions();
76
77	printk("ocfs2 is very sorry to be fencing this system by restarting\n");
78	emergency_restart();
79}
80
81/* Indicate that a timeout occured on a hearbeat region write. The
82 * other nodes in the cluster may consider us dead at that time so we
83 * want to "fence" ourselves so that we don't scribble on the disk
84 * after they think they've recovered us. This can't solve all
85 * problems related to writeout after recovery but this hack can at
86 * least close some of those gaps. When we have real fencing, this can
87 * go away as our node would be fenced externally before other nodes
88 * begin recovery. */
89void o2quo_disk_timeout(void)
90{
91	o2quo_fence_self();
92}
93
94static void o2quo_make_decision(struct work_struct *work)
95{
96	int quorum;
97	int lowest_hb, lowest_reachable = 0, fence = 0;
98	struct o2quo_state *qs = &o2quo_state;
99
100	spin_lock(&qs->qs_lock);
101
102	lowest_hb = find_first_bit(qs->qs_hb_bm, O2NM_MAX_NODES);
103	if (lowest_hb != O2NM_MAX_NODES)
104		lowest_reachable = test_bit(lowest_hb, qs->qs_conn_bm);
105
106	mlog(0, "heartbeating: %d, connected: %d, "
107	     "lowest: %d (%sreachable)\n", qs->qs_heartbeating,
108	     qs->qs_connected, lowest_hb, lowest_reachable ? "" : "un");
109
110	if (!test_bit(o2nm_this_node(), qs->qs_hb_bm) ||
111	    qs->qs_heartbeating == 1)
112		goto out;
113
114	if (qs->qs_heartbeating & 1) {
115		/* the odd numbered cluster case is straight forward --
116		 * if we can't talk to the majority we're hosed */
117		quorum = (qs->qs_heartbeating + 1)/2;
118		if (qs->qs_connected < quorum) {
119			mlog(ML_ERROR, "fencing this node because it is "
120			     "only connected to %u nodes and %u is needed "
121			     "to make a quorum out of %u heartbeating nodes\n",
122			     qs->qs_connected, quorum,
123			     qs->qs_heartbeating);
124			fence = 1;
125		}
126	} else {
127		/* the even numbered cluster adds the possibility of each half
128		 * of the cluster being able to talk amongst themselves.. in
129		 * that case we're hosed if we can't talk to the group that has
130		 * the lowest numbered node */
131		quorum = qs->qs_heartbeating / 2;
132		if (qs->qs_connected < quorum) {
133			mlog(ML_ERROR, "fencing this node because it is "
134			     "only connected to %u nodes and %u is needed "
135			     "to make a quorum out of %u heartbeating nodes\n",
136			     qs->qs_connected, quorum,
137			     qs->qs_heartbeating);
138			fence = 1;
139		}
140		else if ((qs->qs_connected == quorum) &&
141			 !lowest_reachable) {
142			mlog(ML_ERROR, "fencing this node because it is "
143			     "connected to a half-quorum of %u out of %u "
144			     "nodes which doesn't include the lowest active "
145			     "node %u\n", quorum, qs->qs_heartbeating,
146			     lowest_hb);
147			fence = 1;
148		}
149	}
150
151out:
152	spin_unlock(&qs->qs_lock);
153	if (fence)
154		o2quo_fence_self();
155}
156
157static void o2quo_set_hold(struct o2quo_state *qs, u8 node)
158{
159	assert_spin_locked(&qs->qs_lock);
160
161	if (!test_and_set_bit(node, qs->qs_hold_bm)) {
162		qs->qs_holds++;
163		mlog_bug_on_msg(qs->qs_holds == O2NM_MAX_NODES,
164			        "node %u\n", node);
165		mlog(0, "node %u, %d total\n", node, qs->qs_holds);
166	}
167}
168
169static void o2quo_clear_hold(struct o2quo_state *qs, u8 node)
170{
171	assert_spin_locked(&qs->qs_lock);
172
173	if (test_and_clear_bit(node, qs->qs_hold_bm)) {
174		mlog(0, "node %u, %d total\n", node, qs->qs_holds - 1);
175		if (--qs->qs_holds == 0) {
176			if (qs->qs_pending) {
177				qs->qs_pending = 0;
178				schedule_work(&qs->qs_work);
179			}
180		}
181		mlog_bug_on_msg(qs->qs_holds < 0, "node %u, holds %d\n",
182				node, qs->qs_holds);
183	}
184}
185
186/* as a node comes up we delay the quorum decision until we know the fate of
187 * the connection.  the hold will be droped in conn_up or hb_down.  it might be
188 * perpetuated by con_err until hb_down.  if we already have a conn, we might
189 * be dropping a hold that conn_up got. */
190void o2quo_hb_up(u8 node)
191{
192	struct o2quo_state *qs = &o2quo_state;
193
194	spin_lock(&qs->qs_lock);
195
196	qs->qs_heartbeating++;
197	mlog_bug_on_msg(qs->qs_heartbeating == O2NM_MAX_NODES,
198		        "node %u\n", node);
199	mlog_bug_on_msg(test_bit(node, qs->qs_hb_bm), "node %u\n", node);
200	set_bit(node, qs->qs_hb_bm);
201
202	mlog(0, "node %u, %d total\n", node, qs->qs_heartbeating);
203
204	if (!test_bit(node, qs->qs_conn_bm))
205		o2quo_set_hold(qs, node);
206	else
207		o2quo_clear_hold(qs, node);
208
209	spin_unlock(&qs->qs_lock);
210}
211
212/* hb going down releases any holds we might have had due to this node from
213 * conn_up, conn_err, or hb_up */
214void o2quo_hb_down(u8 node)
215{
216	struct o2quo_state *qs = &o2quo_state;
217
218	spin_lock(&qs->qs_lock);
219
220	qs->qs_heartbeating--;
221	mlog_bug_on_msg(qs->qs_heartbeating < 0,
222			"node %u, %d heartbeating\n",
223			node, qs->qs_heartbeating);
224	mlog_bug_on_msg(!test_bit(node, qs->qs_hb_bm), "node %u\n", node);
225	clear_bit(node, qs->qs_hb_bm);
226
227	mlog(0, "node %u, %d total\n", node, qs->qs_heartbeating);
228
229	o2quo_clear_hold(qs, node);
230
231	spin_unlock(&qs->qs_lock);
232}
233
234/* this tells us that we've decided that the node is still heartbeating
235 * even though we've lost it's conn.  it must only be called after conn_err
236 * and indicates that we must now make a quorum decision in the future,
237 * though we might be doing so after waiting for holds to drain.  Here
238 * we'll be dropping the hold from conn_err. */
239void o2quo_hb_still_up(u8 node)
240{
241	struct o2quo_state *qs = &o2quo_state;
242
243	spin_lock(&qs->qs_lock);
244
245	mlog(0, "node %u\n", node);
246
247	qs->qs_pending = 1;
248	o2quo_clear_hold(qs, node);
249
250	spin_unlock(&qs->qs_lock);
251}
252
253/* This is analagous to hb_up.  as a node's connection comes up we delay the
254 * quorum decision until we see it heartbeating.  the hold will be droped in
255 * hb_up or hb_down.  it might be perpetuated by con_err until hb_down.  if
256 * it's already heartbeating we we might be dropping a hold that conn_up got.
257 * */
258void o2quo_conn_up(u8 node)
259{
260	struct o2quo_state *qs = &o2quo_state;
261
262	spin_lock(&qs->qs_lock);
263
264	qs->qs_connected++;
265	mlog_bug_on_msg(qs->qs_connected == O2NM_MAX_NODES,
266		        "node %u\n", node);
267	mlog_bug_on_msg(test_bit(node, qs->qs_conn_bm), "node %u\n", node);
268	set_bit(node, qs->qs_conn_bm);
269
270	mlog(0, "node %u, %d total\n", node, qs->qs_connected);
271
272	if (!test_bit(node, qs->qs_hb_bm))
273		o2quo_set_hold(qs, node);
274	else
275		o2quo_clear_hold(qs, node);
276
277	spin_unlock(&qs->qs_lock);
278}
279
280/* we've decided that we won't ever be connecting to the node again.  if it's
281 * still heartbeating we grab a hold that will delay decisions until either the
282 * node stops heartbeating from hb_down or the caller decides that the node is
283 * still up and calls still_up */
284void o2quo_conn_err(u8 node)
285{
286	struct o2quo_state *qs = &o2quo_state;
287
288	spin_lock(&qs->qs_lock);
289
290	if (test_bit(node, qs->qs_conn_bm)) {
291		qs->qs_connected--;
292		mlog_bug_on_msg(qs->qs_connected < 0,
293				"node %u, connected %d\n",
294				node, qs->qs_connected);
295
296		clear_bit(node, qs->qs_conn_bm);
297	}
298
299	mlog(0, "node %u, %d total\n", node, qs->qs_connected);
300
301	if (test_bit(node, qs->qs_hb_bm))
302		o2quo_set_hold(qs, node);
303
304	spin_unlock(&qs->qs_lock);
305}
306
307void o2quo_init(void)
308{
309	struct o2quo_state *qs = &o2quo_state;
310
311	spin_lock_init(&qs->qs_lock);
312	INIT_WORK(&qs->qs_work, o2quo_make_decision);
313}
314
315void o2quo_exit(void)
316{
317	flush_scheduled_work();
318}
319