1/*-
2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3 *
4 * Copyright (c) 2009 Isilon Inc http://www.isilon.com/
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 *    notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 *    notice, this list of conditions and the following disclaimer in the
13 *    documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 */
27/**
28 * @file
29 *
30 * fail(9) Facility.
31 *
32 * @ingroup failpoint_private
33 */
34/**
35 * @defgroup failpoint fail(9) Facility
36 *
37 * Failpoints allow for injecting fake errors into running code on the fly,
38 * without modifying code or recompiling with flags.  Failpoints are always
39 * present, and are very efficient when disabled.  Failpoints are described
40 * in man fail(9).
41 */
42/**
43 * @defgroup failpoint_private Private fail(9) Implementation functions
44 *
45 * Private implementations for the actual failpoint code.
46 *
47 * @ingroup failpoint
48 */
49/**
50 * @addtogroup failpoint_private
51 * @{
52 */
53
54#include <sys/cdefs.h>
55__FBSDID("$FreeBSD$");
56
57#include "opt_stack.h"
58
59#include <sys/ctype.h>
60#include <sys/errno.h>
61#include <sys/fail.h>
62#include <sys/kernel.h>
63#include <sys/libkern.h>
64#include <sys/limits.h>
65#include <sys/lock.h>
66#include <sys/malloc.h>
67#include <sys/mutex.h>
68#include <sys/proc.h>
69#include <sys/sbuf.h>
70#include <sys/sleepqueue.h>
71#include <sys/sx.h>
72#include <sys/sysctl.h>
73#include <sys/types.h>
74
75#include <machine/atomic.h>
76#include <machine/stdarg.h>
77
78#ifdef ILOG_DEFINE_FOR_FILE
79ILOG_DEFINE_FOR_FILE(L_ISI_FAIL_POINT, L_ILOG, fail_point);
80#endif
81
82static MALLOC_DEFINE(M_FAIL_POINT, "Fail Points", "fail points system");
83#define fp_free(ptr) free(ptr, M_FAIL_POINT)
84#define fp_malloc(size, flags) malloc((size), M_FAIL_POINT, (flags))
85#define fs_free(ptr) fp_free(ptr)
86#define fs_malloc() fp_malloc(sizeof(struct fail_point_setting), \
87    M_WAITOK | M_ZERO)
88
89/**
90 * These define the wchans that are used for sleeping, pausing respectively.
91 * They are chosen arbitrarily but need to be distinct to the failpoint and
92 * the sleep/pause distinction.
93 */
94#define FP_SLEEP_CHANNEL(fp) (void*)(fp)
95#define FP_PAUSE_CHANNEL(fp) __DEVOLATILE(void*, &fp->fp_setting)
96
97/**
98 * Don't allow more than this many entries in a fail point set by sysctl.
99 * The 99.99...% case is to have 1 entry.  I can't imagine having this many
100 * entries, so it should not limit us.  Saves on re-mallocs while holding
101 * a non-sleepable lock.
102 */
103#define FP_MAX_ENTRY_COUNT 20
104
105/* Used to drain sbufs to the sysctl output */
106int fail_sysctl_drain_func(void *, const char *, int);
107
108/* Head of tailq of struct fail_point_entry */
109TAILQ_HEAD(fail_point_entry_queue, fail_point_entry);
110
111/**
112 * fp entries garbage list; outstanding entries are cleaned up in the
113 * garbage collector
114 */
115STAILQ_HEAD(fail_point_setting_garbage, fail_point_setting);
116static struct fail_point_setting_garbage fp_setting_garbage =
117        STAILQ_HEAD_INITIALIZER(fp_setting_garbage);
118static struct mtx mtx_garbage_list;
119MTX_SYSINIT(mtx_garbage_list, &mtx_garbage_list, "fail point garbage mtx",
120        MTX_SPIN);
121
122static struct sx sx_fp_set;
123SX_SYSINIT(sx_fp_set, &sx_fp_set, "fail point set sx");
124
125/**
126 * Failpoint types.
127 * Don't change these without changing fail_type_strings in fail.c.
128 * @ingroup failpoint_private
129 */
130enum fail_point_t {
131	FAIL_POINT_OFF,		/**< don't fail */
132	FAIL_POINT_PANIC,	/**< panic */
133	FAIL_POINT_RETURN,	/**< return an errorcode */
134	FAIL_POINT_BREAK,	/**< break into the debugger */
135	FAIL_POINT_PRINT,	/**< print a message */
136	FAIL_POINT_SLEEP,	/**< sleep for some msecs */
137	FAIL_POINT_PAUSE,	/**< sleep until failpoint is set to off */
138	FAIL_POINT_YIELD,	/**< yield the cpu */
139	FAIL_POINT_DELAY,	/**< busy wait the cpu */
140	FAIL_POINT_NUMTYPES,
141	FAIL_POINT_INVALID = -1
142};
143
144static struct {
145	const char *name;
146	int	nmlen;
147} fail_type_strings[] = {
148#define	FP_TYPE_NM_LEN(s)	{ s, sizeof(s) - 1 }
149	[FAIL_POINT_OFF] =	FP_TYPE_NM_LEN("off"),
150	[FAIL_POINT_PANIC] =	FP_TYPE_NM_LEN("panic"),
151	[FAIL_POINT_RETURN] =	FP_TYPE_NM_LEN("return"),
152	[FAIL_POINT_BREAK] =	FP_TYPE_NM_LEN("break"),
153	[FAIL_POINT_PRINT] =	FP_TYPE_NM_LEN("print"),
154	[FAIL_POINT_SLEEP] =	FP_TYPE_NM_LEN("sleep"),
155	[FAIL_POINT_PAUSE] =	FP_TYPE_NM_LEN("pause"),
156	[FAIL_POINT_YIELD] =	FP_TYPE_NM_LEN("yield"),
157	[FAIL_POINT_DELAY] =	FP_TYPE_NM_LEN("delay"),
158};
159
160#define FE_COUNT_UNTRACKED (INT_MIN)
161
162/**
163 * Internal structure tracking a single term of a complete failpoint.
164 * @ingroup failpoint_private
165 */
166struct fail_point_entry {
167	volatile bool	fe_stale;
168	enum fail_point_t	fe_type;	/**< type of entry */
169	int		fe_arg;		/**< argument to type (e.g. return value) */
170	int		fe_prob;	/**< likelihood of firing in millionths */
171	int32_t		fe_count;	/**< number of times to fire, -1 means infinite */
172	pid_t		fe_pid;		/**< only fail for this process */
173	struct fail_point	*fe_parent;	/**< backpointer to fp */
174	TAILQ_ENTRY(fail_point_entry)	fe_entries; /**< next entry ptr */
175};
176
177struct fail_point_setting {
178	STAILQ_ENTRY(fail_point_setting) fs_garbage_link;
179	struct fail_point_entry_queue fp_entry_queue;
180	struct fail_point * fs_parent;
181	struct mtx feq_mtx; /* Gives fail_point_pause something to do.  */
182};
183
184/**
185 * Defines stating the equivalent of probablilty one (100%)
186 */
187enum {
188	PROB_MAX = 1000000,	/* probability between zero and this number */
189	PROB_DIGITS = 6		/* number of zero's in above number */
190};
191
192/* Get a ref on an fp's fp_setting */
193static inline struct fail_point_setting *fail_point_setting_get_ref(
194        struct fail_point *fp);
195/* Release a ref on an fp_setting */
196static inline void fail_point_setting_release_ref(struct fail_point *fp);
197/* Allocate and initialize a struct fail_point_setting */
198static struct fail_point_setting *fail_point_setting_new(struct
199        fail_point *);
200/* Free a struct fail_point_setting */
201static void fail_point_setting_destroy(struct fail_point_setting *fp_setting);
202/* Allocate and initialize a struct fail_point_entry */
203static struct fail_point_entry *fail_point_entry_new(struct
204        fail_point_setting *);
205/* Free a struct fail_point_entry */
206static void fail_point_entry_destroy(struct fail_point_entry *fp_entry);
207/* Append fp setting to garbage list */
208static inline void fail_point_setting_garbage_append(
209        struct fail_point_setting *fp_setting);
210/* Swap fp's setting with fp_setting_new */
211static inline struct fail_point_setting *
212        fail_point_swap_settings(struct fail_point *fp,
213        struct fail_point_setting *fp_setting_new);
214/* Free up any zero-ref setting in the garbage queue */
215static void fail_point_garbage_collect(void);
216/* If this fail point's setting are empty, then swap it out to NULL. */
217static inline void fail_point_eval_swap_out(struct fail_point *fp,
218        struct fail_point_setting *fp_setting);
219
220bool
221fail_point_is_off(struct fail_point *fp)
222{
223	bool return_val;
224	struct fail_point_setting *fp_setting;
225	struct fail_point_entry *ent;
226
227	return_val = true;
228
229	fp_setting = fail_point_setting_get_ref(fp);
230	if (fp_setting != NULL) {
231		TAILQ_FOREACH(ent, &fp_setting->fp_entry_queue,
232		    fe_entries) {
233			if (!ent->fe_stale) {
234				return_val = false;
235				break;
236			}
237		}
238	}
239	fail_point_setting_release_ref(fp);
240
241	return (return_val);
242}
243
244/* Allocate and initialize a struct fail_point_setting */
245static struct fail_point_setting *
246fail_point_setting_new(struct fail_point *fp)
247{
248	struct fail_point_setting *fs_new;
249
250	fs_new = fs_malloc();
251	fs_new->fs_parent = fp;
252	TAILQ_INIT(&fs_new->fp_entry_queue);
253	mtx_init(&fs_new->feq_mtx, "fail point entries", NULL, MTX_SPIN);
254
255	fail_point_setting_garbage_append(fs_new);
256
257	return (fs_new);
258}
259
260/* Free a struct fail_point_setting */
261static void
262fail_point_setting_destroy(struct fail_point_setting *fp_setting)
263{
264	struct fail_point_entry *ent;
265
266	while (!TAILQ_EMPTY(&fp_setting->fp_entry_queue)) {
267		ent = TAILQ_FIRST(&fp_setting->fp_entry_queue);
268		TAILQ_REMOVE(&fp_setting->fp_entry_queue, ent, fe_entries);
269		fail_point_entry_destroy(ent);
270	}
271
272	fs_free(fp_setting);
273}
274
275/* Allocate and initialize a struct fail_point_entry */
276static struct fail_point_entry *
277fail_point_entry_new(struct fail_point_setting *fp_setting)
278{
279	struct fail_point_entry *fp_entry;
280
281	fp_entry = fp_malloc(sizeof(struct fail_point_entry),
282	        M_WAITOK | M_ZERO);
283	fp_entry->fe_parent = fp_setting->fs_parent;
284	fp_entry->fe_prob = PROB_MAX;
285	fp_entry->fe_pid = NO_PID;
286	fp_entry->fe_count = FE_COUNT_UNTRACKED;
287	TAILQ_INSERT_TAIL(&fp_setting->fp_entry_queue, fp_entry,
288	        fe_entries);
289
290	return (fp_entry);
291}
292
293/* Free a struct fail_point_entry */
294static void
295fail_point_entry_destroy(struct fail_point_entry *fp_entry)
296{
297
298	fp_free(fp_entry);
299}
300
301/* Get a ref on an fp's fp_setting */
302static inline struct fail_point_setting *
303fail_point_setting_get_ref(struct fail_point *fp)
304{
305	struct fail_point_setting *fp_setting;
306
307	/* Invariant: if we have a ref, our pointer to fp_setting is safe */
308	atomic_add_acq_32(&fp->fp_ref_cnt, 1);
309	fp_setting = fp->fp_setting;
310
311	return (fp_setting);
312}
313
314/* Release a ref on an fp_setting */
315static inline void
316fail_point_setting_release_ref(struct fail_point *fp)
317{
318
319	KASSERT(&fp->fp_ref_cnt > 0, ("Attempting to deref w/no refs"));
320	atomic_subtract_rel_32(&fp->fp_ref_cnt, 1);
321}
322
323/* Append fp entries to fp garbage list */
324static inline void
325fail_point_setting_garbage_append(struct fail_point_setting *fp_setting)
326{
327
328	mtx_lock_spin(&mtx_garbage_list);
329	STAILQ_INSERT_TAIL(&fp_setting_garbage, fp_setting,
330	        fs_garbage_link);
331	mtx_unlock_spin(&mtx_garbage_list);
332}
333
334/* Swap fp's entries with fp_setting_new */
335static struct fail_point_setting *
336fail_point_swap_settings(struct fail_point *fp,
337        struct fail_point_setting *fp_setting_new)
338{
339	struct fail_point_setting *fp_setting_old;
340
341	fp_setting_old = fp->fp_setting;
342	fp->fp_setting = fp_setting_new;
343
344	return (fp_setting_old);
345}
346
347static inline void
348fail_point_eval_swap_out(struct fail_point *fp,
349        struct fail_point_setting *fp_setting)
350{
351
352	/* We may have already been swapped out and replaced; ignore. */
353	if (fp->fp_setting == fp_setting)
354		fail_point_swap_settings(fp, NULL);
355}
356
357/* Free up any zero-ref entries in the garbage queue */
358static void
359fail_point_garbage_collect(void)
360{
361	struct fail_point_setting *fs_current, *fs_next;
362	struct fail_point_setting_garbage fp_ents_free_list;
363
364	/**
365	  * We will transfer the entries to free to fp_ents_free_list while holding
366	  * the spin mutex, then free it after we drop the lock. This avoids
367	  * triggering witness due to sleepable mutexes in the memory
368	  * allocator.
369	  */
370	STAILQ_INIT(&fp_ents_free_list);
371
372	mtx_lock_spin(&mtx_garbage_list);
373	STAILQ_FOREACH_SAFE(fs_current, &fp_setting_garbage, fs_garbage_link,
374	    fs_next) {
375		if (fs_current->fs_parent->fp_setting != fs_current &&
376		        fs_current->fs_parent->fp_ref_cnt == 0) {
377			STAILQ_REMOVE(&fp_setting_garbage, fs_current,
378			        fail_point_setting, fs_garbage_link);
379			STAILQ_INSERT_HEAD(&fp_ents_free_list, fs_current,
380			        fs_garbage_link);
381		}
382	}
383	mtx_unlock_spin(&mtx_garbage_list);
384
385	STAILQ_FOREACH_SAFE(fs_current, &fp_ents_free_list, fs_garbage_link,
386	        fs_next)
387		fail_point_setting_destroy(fs_current);
388}
389
390/* Drain out all refs from this fail point */
391static inline void
392fail_point_drain(struct fail_point *fp, int expected_ref)
393{
394	struct fail_point_setting *entries;
395
396	entries = fail_point_swap_settings(fp, NULL);
397	/**
398	 * We have unpaused all threads; so we will wait no longer
399	 * than the time taken for the longest remaining sleep, or
400	 * the length of time of a long-running code block.
401	 */
402	while (fp->fp_ref_cnt > expected_ref) {
403		wakeup(FP_PAUSE_CHANNEL(fp));
404		tsleep(&fp, PWAIT, "fail_point_drain", hz / 100);
405	}
406	fail_point_swap_settings(fp, entries);
407}
408
409static inline void
410fail_point_pause(struct fail_point *fp, enum fail_point_return_code *pret,
411        struct mtx *mtx_sleep)
412{
413
414	if (fp->fp_pre_sleep_fn)
415		fp->fp_pre_sleep_fn(fp->fp_pre_sleep_arg);
416
417	msleep_spin(FP_PAUSE_CHANNEL(fp), mtx_sleep, "failpt", 0);
418
419	if (fp->fp_post_sleep_fn)
420		fp->fp_post_sleep_fn(fp->fp_post_sleep_arg);
421}
422
423static inline void
424fail_point_sleep(struct fail_point *fp, int msecs,
425        enum fail_point_return_code *pret)
426{
427	int timo;
428
429	/* Convert from millisecs to ticks, rounding up */
430	timo = howmany((int64_t)msecs * hz, 1000L);
431
432	if (timo > 0) {
433		if (!(fp->fp_flags & FAIL_POINT_USE_TIMEOUT_PATH)) {
434			if (fp->fp_pre_sleep_fn)
435				fp->fp_pre_sleep_fn(fp->fp_pre_sleep_arg);
436
437			tsleep(FP_SLEEP_CHANNEL(fp), PWAIT, "failpt", timo);
438
439			if (fp->fp_post_sleep_fn)
440				fp->fp_post_sleep_fn(fp->fp_post_sleep_arg);
441		} else {
442			if (fp->fp_pre_sleep_fn)
443				fp->fp_pre_sleep_fn(fp->fp_pre_sleep_arg);
444
445			timeout(fp->fp_post_sleep_fn, fp->fp_post_sleep_arg,
446			    timo);
447			*pret = FAIL_POINT_RC_QUEUED;
448		}
449	}
450}
451
452static char *parse_fail_point(struct fail_point_setting *, char *);
453static char *parse_term(struct fail_point_setting *, char *);
454static char *parse_number(int *out_units, int *out_decimal, char *);
455static char *parse_type(struct fail_point_entry *, char *);
456
457/**
458 * Initialize a fail_point.  The name is formed in a printf-like fashion
459 * from "fmt" and subsequent arguments.  This function is generally used
460 * for custom failpoints located at odd places in the sysctl tree, and is
461 * not explicitly needed for standard in-line-declared failpoints.
462 *
463 * @ingroup failpoint
464 */
465void
466fail_point_init(struct fail_point *fp, const char *fmt, ...)
467{
468	va_list ap;
469	char *name;
470	int n;
471
472	fp->fp_setting = NULL;
473	fp->fp_flags = 0;
474
475	/* Figure out the size of the name. */
476	va_start(ap, fmt);
477	n = vsnprintf(NULL, 0, fmt, ap);
478	va_end(ap);
479
480	/* Allocate the name and fill it in. */
481	name = fp_malloc(n + 1, M_WAITOK);
482	if (name != NULL) {
483		va_start(ap, fmt);
484		vsnprintf(name, n + 1, fmt, ap);
485		va_end(ap);
486	}
487	fp->fp_name = name;
488	fp->fp_location = "";
489	fp->fp_flags |= FAIL_POINT_DYNAMIC_NAME;
490	fp->fp_pre_sleep_fn = NULL;
491	fp->fp_pre_sleep_arg = NULL;
492	fp->fp_post_sleep_fn = NULL;
493	fp->fp_post_sleep_arg = NULL;
494}
495
496/**
497 * Free the resources held by a fail_point, and wake any paused threads.
498 * Thou shalt not allow threads to hit this fail point after you enter this
499 * function, nor shall you call this multiple times for a given fp.
500 * @ingroup failpoint
501 */
502void
503fail_point_destroy(struct fail_point *fp)
504{
505
506	fail_point_drain(fp, 0);
507
508	if ((fp->fp_flags & FAIL_POINT_DYNAMIC_NAME) != 0) {
509		fp_free(__DECONST(void *, fp->fp_name));
510		fp->fp_name = NULL;
511	}
512	fp->fp_flags = 0;
513
514	sx_xlock(&sx_fp_set);
515	fail_point_garbage_collect();
516	sx_xunlock(&sx_fp_set);
517}
518
519/**
520 * This does the real work of evaluating a fail point. If the fail point tells
521 * us to return a value, this function returns 1 and fills in 'return_value'
522 * (return_value is allowed to be null). If the fail point tells us to panic,
523 * we never return. Otherwise we just return 0 after doing some work, which
524 * means "keep going".
525 */
526enum fail_point_return_code
527fail_point_eval_nontrivial(struct fail_point *fp, int *return_value)
528{
529	bool execute = false;
530	struct fail_point_entry *ent;
531	struct fail_point_setting *fp_setting;
532	enum fail_point_return_code ret;
533	int cont;
534	int count;
535	int msecs;
536	int usecs;
537
538	ret = FAIL_POINT_RC_CONTINUE;
539	cont = 0; /* don't continue by default */
540
541	fp_setting = fail_point_setting_get_ref(fp);
542	if (fp_setting == NULL)
543		goto abort;
544
545	TAILQ_FOREACH(ent, &fp_setting->fp_entry_queue, fe_entries) {
546
547		if (ent->fe_stale)
548			continue;
549
550		if (ent->fe_prob < PROB_MAX &&
551		    ent->fe_prob < random() % PROB_MAX)
552			continue;
553
554		if (ent->fe_pid != NO_PID && ent->fe_pid != curproc->p_pid)
555			continue;
556
557		if (ent->fe_count != FE_COUNT_UNTRACKED) {
558			count = ent->fe_count;
559			while (count > 0) {
560				if (atomic_cmpset_32(&ent->fe_count, count, count - 1)) {
561					count--;
562					execute = true;
563					break;
564				}
565				count = ent->fe_count;
566			}
567			if (execute == false)
568				/* We lost the race; consider the entry stale and bail now */
569				continue;
570			if (count == 0)
571				ent->fe_stale = true;
572		}
573
574		switch (ent->fe_type) {
575		case FAIL_POINT_PANIC:
576			panic("fail point %s panicking", fp->fp_name);
577			/* NOTREACHED */
578
579		case FAIL_POINT_RETURN:
580			if (return_value != NULL)
581				*return_value = ent->fe_arg;
582			ret = FAIL_POINT_RC_RETURN;
583			break;
584
585		case FAIL_POINT_BREAK:
586			printf("fail point %s breaking to debugger\n",
587			        fp->fp_name);
588			breakpoint();
589			break;
590
591		case FAIL_POINT_PRINT:
592			printf("fail point %s executing\n", fp->fp_name);
593			cont = ent->fe_arg;
594			break;
595
596		case FAIL_POINT_SLEEP:
597			msecs = ent->fe_arg;
598			if (msecs)
599				fail_point_sleep(fp, msecs, &ret);
600			break;
601
602		case FAIL_POINT_PAUSE:
603			/**
604			 * Pausing is inherently strange with multiple
605			 * entries given our design.  That is because some
606			 * entries could be unreachable, for instance in cases like:
607			 * pause->return. We can never reach the return entry.
608			 * The sysctl layer actually truncates all entries after
609			 * a pause for this reason.
610			 */
611			mtx_lock_spin(&fp_setting->feq_mtx);
612			fail_point_pause(fp, &ret, &fp_setting->feq_mtx);
613			mtx_unlock_spin(&fp_setting->feq_mtx);
614			break;
615
616		case FAIL_POINT_YIELD:
617			kern_yield(PRI_UNCHANGED);
618			break;
619
620		case FAIL_POINT_DELAY:
621			usecs = ent->fe_arg;
622			DELAY(usecs);
623			break;
624
625		default:
626			break;
627		}
628
629		if (cont == 0)
630			break;
631	}
632
633	if (fail_point_is_off(fp))
634		fail_point_eval_swap_out(fp, fp_setting);
635
636abort:
637	fail_point_setting_release_ref(fp);
638
639	return (ret);
640}
641
642/**
643 * Translate internal fail_point structure into human-readable text.
644 */
645static void
646fail_point_get(struct fail_point *fp, struct sbuf *sb,
647        bool verbose)
648{
649	struct fail_point_entry *ent;
650	struct fail_point_setting *fp_setting;
651	struct fail_point_entry *fp_entry_cpy;
652	int cnt_sleeping;
653	int idx;
654	int printed_entry_count;
655
656	cnt_sleeping = 0;
657	idx = 0;
658	printed_entry_count = 0;
659
660	fp_entry_cpy = fp_malloc(sizeof(struct fail_point_entry) *
661	        (FP_MAX_ENTRY_COUNT + 1), M_WAITOK);
662
663	fp_setting = fail_point_setting_get_ref(fp);
664
665	if (fp_setting != NULL) {
666		TAILQ_FOREACH(ent, &fp_setting->fp_entry_queue, fe_entries) {
667			if (ent->fe_stale)
668				continue;
669
670			KASSERT(printed_entry_count < FP_MAX_ENTRY_COUNT,
671			        ("FP entry list larger than allowed"));
672
673			fp_entry_cpy[printed_entry_count] = *ent;
674			++printed_entry_count;
675		}
676	}
677	fail_point_setting_release_ref(fp);
678
679	/* This is our equivalent of a NULL terminator */
680	fp_entry_cpy[printed_entry_count].fe_type = FAIL_POINT_INVALID;
681
682	while (idx < printed_entry_count) {
683		ent = &fp_entry_cpy[idx];
684		++idx;
685		if (ent->fe_prob < PROB_MAX) {
686			int decimal = ent->fe_prob % (PROB_MAX / 100);
687			int units = ent->fe_prob / (PROB_MAX / 100);
688			sbuf_printf(sb, "%d", units);
689			if (decimal) {
690				int digits = PROB_DIGITS - 2;
691				while (!(decimal % 10)) {
692					digits--;
693					decimal /= 10;
694				}
695				sbuf_printf(sb, ".%0*d", digits, decimal);
696			}
697			sbuf_printf(sb, "%%");
698		}
699		if (ent->fe_count >= 0)
700			sbuf_printf(sb, "%d*", ent->fe_count);
701		sbuf_printf(sb, "%s", fail_type_strings[ent->fe_type].name);
702		if (ent->fe_arg)
703			sbuf_printf(sb, "(%d)", ent->fe_arg);
704		if (ent->fe_pid != NO_PID)
705			sbuf_printf(sb, "[pid %d]", ent->fe_pid);
706		if (TAILQ_NEXT(ent, fe_entries))
707			sbuf_printf(sb, "->");
708	}
709	if (!printed_entry_count)
710		sbuf_printf(sb, "off");
711
712	fp_free(fp_entry_cpy);
713	if (verbose) {
714#ifdef STACK
715		/* Print number of sleeping threads. queue=0 is the argument
716		 * used by msleep when sending our threads to sleep. */
717		sbuf_printf(sb, "\nsleeping_thread_stacks = {\n");
718		sleepq_sbuf_print_stacks(sb, FP_SLEEP_CHANNEL(fp), 0,
719		        &cnt_sleeping);
720
721		sbuf_printf(sb, "},\n");
722#endif
723		sbuf_printf(sb, "sleeping_thread_count = %d,\n",
724		        cnt_sleeping);
725
726#ifdef STACK
727		sbuf_printf(sb, "paused_thread_stacks = {\n");
728		sleepq_sbuf_print_stacks(sb, FP_PAUSE_CHANNEL(fp), 0,
729		        &cnt_sleeping);
730
731		sbuf_printf(sb, "},\n");
732#endif
733		sbuf_printf(sb, "paused_thread_count = %d\n",
734		        cnt_sleeping);
735	}
736}
737
738/**
739 * Set an internal fail_point structure from a human-readable failpoint string
740 * in a lock-safe manner.
741 */
742static int
743fail_point_set(struct fail_point *fp, char *buf)
744{
745	struct fail_point_entry *ent, *ent_next;
746	struct fail_point_setting *entries;
747	bool should_wake_paused;
748	bool should_truncate;
749	int error;
750
751	error = 0;
752	should_wake_paused = false;
753	should_truncate = false;
754
755	/* Parse new entries. */
756	/**
757	 * ref protects our new malloc'd stuff from being garbage collected
758	 * before we link it.
759	 */
760	fail_point_setting_get_ref(fp);
761	entries = fail_point_setting_new(fp);
762	if (parse_fail_point(entries, buf) == NULL) {
763		STAILQ_REMOVE(&fp_setting_garbage, entries,
764		        fail_point_setting, fs_garbage_link);
765		fail_point_setting_destroy(entries);
766		error = EINVAL;
767		goto end;
768	}
769
770	/**
771	 * Transfer the entries we are going to keep to a new list.
772	 * Get rid of useless zero probability entries, and entries with hit
773	 * count 0.
774	 * If 'off' is present, and it has no hit count set, then all entries
775	 *       after it are discarded since they are unreachable.
776	 */
777	TAILQ_FOREACH_SAFE(ent, &entries->fp_entry_queue, fe_entries, ent_next) {
778		if (ent->fe_prob == 0 || ent->fe_count == 0) {
779			printf("Discarding entry which cannot execute %s\n",
780			        fail_type_strings[ent->fe_type].name);
781			TAILQ_REMOVE(&entries->fp_entry_queue, ent,
782			        fe_entries);
783			fp_free(ent);
784			continue;
785		} else if (should_truncate) {
786			printf("Discarding unreachable entry %s\n",
787			        fail_type_strings[ent->fe_type].name);
788			TAILQ_REMOVE(&entries->fp_entry_queue, ent,
789			        fe_entries);
790			fp_free(ent);
791			continue;
792		}
793
794		if (ent->fe_type == FAIL_POINT_OFF) {
795			should_wake_paused = true;
796			if (ent->fe_count == FE_COUNT_UNTRACKED) {
797				should_truncate = true;
798				TAILQ_REMOVE(&entries->fp_entry_queue, ent,
799				        fe_entries);
800				fp_free(ent);
801			}
802		} else if (ent->fe_type == FAIL_POINT_PAUSE) {
803			should_truncate = true;
804		} else if (ent->fe_type == FAIL_POINT_SLEEP && (fp->fp_flags &
805		        FAIL_POINT_NONSLEEPABLE)) {
806			/**
807			 * If this fail point is annotated as being in a
808			 * non-sleepable ctx, convert sleep to delay and
809			 * convert the msec argument to usecs.
810			 */
811			printf("Sleep call request on fail point in "
812			        "non-sleepable context; using delay instead "
813			        "of sleep\n");
814			ent->fe_type = FAIL_POINT_DELAY;
815			ent->fe_arg *= 1000;
816		}
817	}
818
819	if (TAILQ_EMPTY(&entries->fp_entry_queue)) {
820		entries = fail_point_swap_settings(fp, NULL);
821		if (entries != NULL)
822			wakeup(FP_PAUSE_CHANNEL(fp));
823	} else {
824		if (should_wake_paused)
825			wakeup(FP_PAUSE_CHANNEL(fp));
826		fail_point_swap_settings(fp, entries);
827	}
828
829end:
830#ifdef IWARNING
831	if (error)
832		IWARNING("Failed to set %s %s to %s",
833		    fp->fp_name, fp->fp_location, buf);
834	else
835		INOTICE("Set %s %s to %s",
836		    fp->fp_name, fp->fp_location, buf);
837#endif /* IWARNING */
838
839	fail_point_setting_release_ref(fp);
840	return (error);
841}
842
843#define MAX_FAIL_POINT_BUF	1023
844
845/**
846 * Handle kernel failpoint set/get.
847 */
848int
849fail_point_sysctl(SYSCTL_HANDLER_ARGS)
850{
851	struct fail_point *fp;
852	char *buf;
853	struct sbuf sb, *sb_check;
854	int error;
855
856	buf = NULL;
857	error = 0;
858	fp = arg1;
859
860	sb_check = sbuf_new(&sb, NULL, 1024, SBUF_AUTOEXTEND);
861	if (sb_check != &sb)
862		return (ENOMEM);
863
864	sbuf_set_drain(&sb, (sbuf_drain_func *)fail_sysctl_drain_func, req);
865
866	/* Setting */
867	/**
868	 * Lock protects any new entries from being garbage collected before we
869	 * can link them to the fail point.
870	 */
871	sx_xlock(&sx_fp_set);
872	if (req->newptr) {
873		if (req->newlen > MAX_FAIL_POINT_BUF) {
874			error = EINVAL;
875			goto out;
876		}
877
878		buf = fp_malloc(req->newlen + 1, M_WAITOK);
879
880		error = SYSCTL_IN(req, buf, req->newlen);
881		if (error)
882			goto out;
883		buf[req->newlen] = '\0';
884
885		error = fail_point_set(fp, buf);
886	}
887
888	fail_point_garbage_collect();
889	sx_xunlock(&sx_fp_set);
890
891	/* Retrieving. */
892	fail_point_get(fp, &sb, false);
893
894out:
895	sbuf_finish(&sb);
896	sbuf_delete(&sb);
897
898	if (buf)
899		fp_free(buf);
900
901	return (error);
902}
903
904int
905fail_point_sysctl_status(SYSCTL_HANDLER_ARGS)
906{
907	struct fail_point *fp;
908	struct sbuf sb, *sb_check;
909
910	fp = arg1;
911
912	sb_check = sbuf_new(&sb, NULL, 1024, SBUF_AUTOEXTEND);
913	if (sb_check != &sb)
914		return (ENOMEM);
915
916	sbuf_set_drain(&sb, (sbuf_drain_func *)fail_sysctl_drain_func, req);
917
918	/* Retrieving. */
919	fail_point_get(fp, &sb, true);
920
921	sbuf_finish(&sb);
922	sbuf_delete(&sb);
923
924	/**
925	 * Lock protects any new entries from being garbage collected before we
926	 * can link them to the fail point.
927	 */
928	sx_xlock(&sx_fp_set);
929	fail_point_garbage_collect();
930	sx_xunlock(&sx_fp_set);
931
932	return (0);
933}
934
935int
936fail_sysctl_drain_func(void *sysctl_args, const char *buf, int len)
937{
938	struct sysctl_req *sa;
939	int error;
940
941	sa = sysctl_args;
942
943	error = SYSCTL_OUT(sa, buf, len);
944
945	if (error == ENOMEM)
946		return (-1);
947	else
948		return (len);
949}
950
951/**
952 * Internal helper function to translate a human-readable failpoint string
953 * into a internally-parsable fail_point structure.
954 */
955static char *
956parse_fail_point(struct fail_point_setting *ents, char *p)
957{
958	/*  <fail_point> ::
959	 *      <term> ( "->" <term> )*
960	 */
961	uint8_t term_count;
962
963	term_count = 1;
964
965	p = parse_term(ents, p);
966	if (p == NULL)
967		return (NULL);
968
969	while (*p != '\0') {
970		term_count++;
971		if (p[0] != '-' || p[1] != '>' ||
972		        (p = parse_term(ents, p+2)) == NULL ||
973		        term_count > FP_MAX_ENTRY_COUNT)
974			return (NULL);
975	}
976	return (p);
977}
978
979/**
980 * Internal helper function to parse an individual term from a failpoint.
981 */
982static char *
983parse_term(struct fail_point_setting *ents, char *p)
984{
985	struct fail_point_entry *ent;
986
987	ent = fail_point_entry_new(ents);
988
989	/*
990	 * <term> ::
991	 *     ( (<float> "%") | (<integer> "*" ) )*
992	 *     <type>
993	 *     [ "(" <integer> ")" ]
994	 *     [ "[pid " <integer> "]" ]
995	 */
996
997	/* ( (<float> "%") | (<integer> "*" ) )* */
998	while (isdigit(*p) || *p == '.') {
999		int units, decimal;
1000
1001		p = parse_number(&units, &decimal, p);
1002		if (p == NULL)
1003			return (NULL);
1004
1005		if (*p == '%') {
1006			if (units > 100) /* prevent overflow early */
1007				units = 100;
1008			ent->fe_prob = units * (PROB_MAX / 100) + decimal;
1009			if (ent->fe_prob > PROB_MAX)
1010				ent->fe_prob = PROB_MAX;
1011		} else if (*p == '*') {
1012			if (!units || units < 0 || decimal)
1013				return (NULL);
1014			ent->fe_count = units;
1015		} else
1016			return (NULL);
1017		p++;
1018	}
1019
1020	/* <type> */
1021	p = parse_type(ent, p);
1022	if (p == NULL)
1023		return (NULL);
1024	if (*p == '\0')
1025		return (p);
1026
1027	/* [ "(" <integer> ")" ] */
1028	if (*p != '(')
1029		return (p);
1030	p++;
1031	if (!isdigit(*p) && *p != '-')
1032		return (NULL);
1033	ent->fe_arg = strtol(p, &p, 0);
1034	if (*p++ != ')')
1035		return (NULL);
1036
1037	/* [ "[pid " <integer> "]" ] */
1038#define PID_STRING "[pid "
1039	if (strncmp(p, PID_STRING, sizeof(PID_STRING) - 1) != 0)
1040		return (p);
1041	p += sizeof(PID_STRING) - 1;
1042	if (!isdigit(*p))
1043		return (NULL);
1044	ent->fe_pid = strtol(p, &p, 0);
1045	if (*p++ != ']')
1046		return (NULL);
1047
1048	return (p);
1049}
1050
1051/**
1052 * Internal helper function to parse a numeric for a failpoint term.
1053 */
1054static char *
1055parse_number(int *out_units, int *out_decimal, char *p)
1056{
1057	char *old_p;
1058
1059	/**
1060	 *  <number> ::
1061	 *      <integer> [ "." <integer> ] |
1062	 *      "." <integer>
1063	 */
1064
1065	/* whole part */
1066	old_p = p;
1067	*out_units = strtol(p, &p, 10);
1068	if (p == old_p && *p != '.')
1069		return (NULL);
1070
1071	/* fractional part */
1072	*out_decimal = 0;
1073	if (*p == '.') {
1074		int digits = 0;
1075		p++;
1076		while (isdigit(*p)) {
1077			int digit = *p - '0';
1078			if (digits < PROB_DIGITS - 2)
1079				*out_decimal = *out_decimal * 10 + digit;
1080			else if (digits == PROB_DIGITS - 2 && digit >= 5)
1081				(*out_decimal)++;
1082			digits++;
1083			p++;
1084		}
1085		if (!digits) /* need at least one digit after '.' */
1086			return (NULL);
1087		while (digits++ < PROB_DIGITS - 2) /* add implicit zeros */
1088			*out_decimal *= 10;
1089	}
1090
1091	return (p); /* success */
1092}
1093
1094/**
1095 * Internal helper function to parse an individual type for a failpoint term.
1096 */
1097static char *
1098parse_type(struct fail_point_entry *ent, char *beg)
1099{
1100	enum fail_point_t type;
1101	int len;
1102
1103	for (type = FAIL_POINT_OFF; type < FAIL_POINT_NUMTYPES; type++) {
1104		len = fail_type_strings[type].nmlen;
1105		if (strncmp(fail_type_strings[type].name, beg, len) == 0) {
1106			ent->fe_type = type;
1107			return (beg + len);
1108		}
1109	}
1110	return (NULL);
1111}
1112
1113/* The fail point sysctl tree. */
1114SYSCTL_NODE(_debug, OID_AUTO, fail_point, CTLFLAG_RW, 0, "fail points");
1115
1116/* Debugging/testing stuff for fail point */
1117static int
1118sysctl_test_fail_point(SYSCTL_HANDLER_ARGS)
1119{
1120
1121	KFAIL_POINT_RETURN(DEBUG_FP, test_fail_point);
1122	return (0);
1123}
1124SYSCTL_OID(_debug_fail_point, OID_AUTO, test_trigger_fail_point,
1125        CTLTYPE_STRING | CTLFLAG_RD, NULL, 0, sysctl_test_fail_point, "A",
1126        "Trigger test fail points");
1127