subr_lock.c revision 243046
1/*-
2 * Copyright (c) 2006 John Baldwin <jhb@FreeBSD.org>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 * 3. Neither the name of the author nor the names of any co-contributors
14 *    may be used to endorse or promote products derived from this software
15 *    without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 */
29
30/*
31 * This module holds the global variables and functions used to maintain
32 * lock_object structures.
33 */
34
35#include <sys/cdefs.h>
36__FBSDID("$FreeBSD: head/sys/kern/subr_lock.c 243046 2012-11-15 00:51:57Z jeff $");
37
38#include "opt_ddb.h"
39#include "opt_mprof.h"
40
41#include <sys/param.h>
42#include <sys/systm.h>
43#include <sys/kernel.h>
44#include <sys/ktr.h>
45#include <sys/lock.h>
46#include <sys/lock_profile.h>
47#include <sys/malloc.h>
48#include <sys/mutex.h>
49#include <sys/pcpu.h>
50#include <sys/proc.h>
51#include <sys/sbuf.h>
52#include <sys/sched.h>
53#include <sys/smp.h>
54#include <sys/sysctl.h>
55
56#ifdef DDB
57#include <ddb/ddb.h>
58#endif
59
60#include <machine/cpufunc.h>
61
62CTASSERT(LOCK_CLASS_MAX == 15);
63
64struct lock_class *lock_classes[LOCK_CLASS_MAX + 1] = {
65	&lock_class_mtx_spin,
66	&lock_class_mtx_sleep,
67	&lock_class_sx,
68	&lock_class_rm,
69	&lock_class_rw,
70	&lock_class_lockmgr,
71};
72
73void
74lock_init(struct lock_object *lock, struct lock_class *class, const char *name,
75    const char *type, int flags)
76{
77	int i;
78
79	/* Check for double-init and zero object. */
80	KASSERT(!lock_initalized(lock), ("lock \"%s\" %p already initialized",
81	    name, lock));
82
83	/* Look up lock class to find its index. */
84	for (i = 0; i < LOCK_CLASS_MAX; i++)
85		if (lock_classes[i] == class) {
86			lock->lo_flags = i << LO_CLASSSHIFT;
87			break;
88		}
89	KASSERT(i < LOCK_CLASS_MAX, ("unknown lock class %p", class));
90
91	/* Initialize the lock object. */
92	lock->lo_name = name;
93	lock->lo_flags |= flags | LO_INITIALIZED;
94	LOCK_LOG_INIT(lock, 0);
95	WITNESS_INIT(lock, (type != NULL) ? type : name);
96}
97
98void
99lock_destroy(struct lock_object *lock)
100{
101
102	KASSERT(lock_initalized(lock), ("lock %p is not initialized", lock));
103	WITNESS_DESTROY(lock);
104	LOCK_LOG_DESTROY(lock, 0);
105	lock->lo_flags &= ~LO_INITIALIZED;
106}
107
108#ifdef DDB
109DB_SHOW_COMMAND(lock, db_show_lock)
110{
111	struct lock_object *lock;
112	struct lock_class *class;
113
114	if (!have_addr)
115		return;
116	lock = (struct lock_object *)addr;
117	if (LO_CLASSINDEX(lock) > LOCK_CLASS_MAX) {
118		db_printf("Unknown lock class: %d\n", LO_CLASSINDEX(lock));
119		return;
120	}
121	class = LOCK_CLASS(lock);
122	db_printf(" class: %s\n", class->lc_name);
123	db_printf(" name: %s\n", lock->lo_name);
124	class->lc_ddb_show(lock);
125}
126#endif
127
128#ifdef LOCK_PROFILING
129
130/*
131 * One object per-thread for each lock the thread owns.  Tracks individual
132 * lock instances.
133 */
134struct lock_profile_object {
135	LIST_ENTRY(lock_profile_object) lpo_link;
136	struct lock_object *lpo_obj;
137	const char	*lpo_file;
138	int		lpo_line;
139	uint16_t	lpo_ref;
140	uint16_t	lpo_cnt;
141	uint64_t	lpo_acqtime;
142	uint64_t	lpo_waittime;
143	u_int		lpo_contest_locking;
144};
145
146/*
147 * One lock_prof for each (file, line, lock object) triple.
148 */
149struct lock_prof {
150	SLIST_ENTRY(lock_prof) link;
151	struct lock_class *class;
152	const char	*file;
153	const char	*name;
154	int		line;
155	int		ticks;
156	uintmax_t	cnt_wait_max;
157	uintmax_t	cnt_max;
158	uintmax_t	cnt_tot;
159	uintmax_t	cnt_wait;
160	uintmax_t	cnt_cur;
161	uintmax_t	cnt_contest_locking;
162};
163
164SLIST_HEAD(lphead, lock_prof);
165
166#define	LPROF_HASH_SIZE		4096
167#define	LPROF_HASH_MASK		(LPROF_HASH_SIZE - 1)
168#define	LPROF_CACHE_SIZE	4096
169
170/*
171 * Array of objects and profs for each type of object for each cpu.  Spinlocks
172 * are handled separately because a thread may be preempted and acquire a
173 * spinlock while in the lock profiling code of a non-spinlock.  In this way
174 * we only need a critical section to protect the per-cpu lists.
175 */
176struct lock_prof_type {
177	struct lphead		lpt_lpalloc;
178	struct lpohead		lpt_lpoalloc;
179	struct lphead		lpt_hash[LPROF_HASH_SIZE];
180	struct lock_prof	lpt_prof[LPROF_CACHE_SIZE];
181	struct lock_profile_object lpt_objs[LPROF_CACHE_SIZE];
182};
183
184struct lock_prof_cpu {
185	struct lock_prof_type	lpc_types[2]; /* One for spin one for other. */
186};
187
188struct lock_prof_cpu *lp_cpu[MAXCPU];
189
190volatile int lock_prof_enable = 0;
191static volatile int lock_prof_resetting;
192
193#define LPROF_SBUF_SIZE		256
194
195static int lock_prof_rejected;
196static int lock_prof_skipspin;
197static int lock_prof_skipcount;
198
199#ifndef USE_CPU_NANOSECONDS
200uint64_t
201nanoseconds(void)
202{
203	struct bintime bt;
204	uint64_t ns;
205
206	binuptime(&bt);
207	/* From bintime2timespec */
208	ns = bt.sec * (uint64_t)1000000000;
209	ns += ((uint64_t)1000000000 * (uint32_t)(bt.frac >> 32)) >> 32;
210	return (ns);
211}
212#endif
213
214static void
215lock_prof_init_type(struct lock_prof_type *type)
216{
217	int i;
218
219	SLIST_INIT(&type->lpt_lpalloc);
220	LIST_INIT(&type->lpt_lpoalloc);
221	for (i = 0; i < LPROF_CACHE_SIZE; i++) {
222		SLIST_INSERT_HEAD(&type->lpt_lpalloc, &type->lpt_prof[i],
223		    link);
224		LIST_INSERT_HEAD(&type->lpt_lpoalloc, &type->lpt_objs[i],
225		    lpo_link);
226	}
227}
228
229static void
230lock_prof_init(void *arg)
231{
232	int cpu;
233
234	for (cpu = 0; cpu <= mp_maxid; cpu++) {
235		lp_cpu[cpu] = malloc(sizeof(*lp_cpu[cpu]), M_DEVBUF,
236		    M_WAITOK | M_ZERO);
237		lock_prof_init_type(&lp_cpu[cpu]->lpc_types[0]);
238		lock_prof_init_type(&lp_cpu[cpu]->lpc_types[1]);
239	}
240}
241SYSINIT(lockprof, SI_SUB_SMP, SI_ORDER_ANY, lock_prof_init, NULL);
242
243static void
244lock_prof_reset_wait(void)
245{
246
247	/*
248	 * Spin relinquishing our cpu so that quiesce_all_cpus may
249	 * complete.
250	 */
251	while (lock_prof_resetting)
252		sched_relinquish(curthread);
253}
254
255static void
256lock_prof_reset(void)
257{
258	struct lock_prof_cpu *lpc;
259	int enabled, i, cpu;
260
261	/*
262	 * We not only race with acquiring and releasing locks but also
263	 * thread exit.  To be certain that threads exit without valid head
264	 * pointers they must see resetting set before enabled is cleared.
265	 * Otherwise a lock may not be removed from a per-thread list due
266	 * to disabled being set but not wait for reset() to remove it below.
267	 */
268	atomic_store_rel_int(&lock_prof_resetting, 1);
269	enabled = lock_prof_enable;
270	lock_prof_enable = 0;
271	quiesce_all_cpus("profreset", 0);
272	/*
273	 * Some objects may have migrated between CPUs.  Clear all links
274	 * before we zero the structures.  Some items may still be linked
275	 * into per-thread lists as well.
276	 */
277	for (cpu = 0; cpu <= mp_maxid; cpu++) {
278		lpc = lp_cpu[cpu];
279		for (i = 0; i < LPROF_CACHE_SIZE; i++) {
280			LIST_REMOVE(&lpc->lpc_types[0].lpt_objs[i], lpo_link);
281			LIST_REMOVE(&lpc->lpc_types[1].lpt_objs[i], lpo_link);
282		}
283	}
284	for (cpu = 0; cpu <= mp_maxid; cpu++) {
285		lpc = lp_cpu[cpu];
286		bzero(lpc, sizeof(*lpc));
287		lock_prof_init_type(&lpc->lpc_types[0]);
288		lock_prof_init_type(&lpc->lpc_types[1]);
289	}
290	atomic_store_rel_int(&lock_prof_resetting, 0);
291	lock_prof_enable = enabled;
292}
293
294static void
295lock_prof_output(struct lock_prof *lp, struct sbuf *sb)
296{
297	const char *p;
298
299	for (p = lp->file; p != NULL && strncmp(p, "../", 3) == 0; p += 3);
300	sbuf_printf(sb,
301	    "%8ju %9ju %11ju %11ju %11ju %6ju %6ju %2ju %6ju %s:%d (%s:%s)\n",
302	    lp->cnt_max / 1000, lp->cnt_wait_max / 1000, lp->cnt_tot / 1000,
303	    lp->cnt_wait / 1000, lp->cnt_cur,
304	    lp->cnt_cur == 0 ? (uintmax_t)0 :
305	    lp->cnt_tot / (lp->cnt_cur * 1000),
306	    lp->cnt_cur == 0 ? (uintmax_t)0 :
307	    lp->cnt_wait / (lp->cnt_cur * 1000),
308	    (uintmax_t)0, lp->cnt_contest_locking,
309	    p, lp->line, lp->class->lc_name, lp->name);
310}
311
312static void
313lock_prof_sum(struct lock_prof *match, struct lock_prof *dst, int hash,
314    int spin, int t)
315{
316	struct lock_prof_type *type;
317	struct lock_prof *l;
318	int cpu;
319
320	dst->file = match->file;
321	dst->line = match->line;
322	dst->class = match->class;
323	dst->name = match->name;
324
325	for (cpu = 0; cpu <= mp_maxid; cpu++) {
326		if (lp_cpu[cpu] == NULL)
327			continue;
328		type = &lp_cpu[cpu]->lpc_types[spin];
329		SLIST_FOREACH(l, &type->lpt_hash[hash], link) {
330			if (l->ticks == t)
331				continue;
332			if (l->file != match->file || l->line != match->line ||
333			    l->name != match->name)
334				continue;
335			l->ticks = t;
336			if (l->cnt_max > dst->cnt_max)
337				dst->cnt_max = l->cnt_max;
338			if (l->cnt_wait_max > dst->cnt_wait_max)
339				dst->cnt_wait_max = l->cnt_wait_max;
340			dst->cnt_tot += l->cnt_tot;
341			dst->cnt_wait += l->cnt_wait;
342			dst->cnt_cur += l->cnt_cur;
343			dst->cnt_contest_locking += l->cnt_contest_locking;
344		}
345	}
346
347}
348
349static void
350lock_prof_type_stats(struct lock_prof_type *type, struct sbuf *sb, int spin,
351    int t)
352{
353	struct lock_prof *l;
354	int i;
355
356	for (i = 0; i < LPROF_HASH_SIZE; ++i) {
357		SLIST_FOREACH(l, &type->lpt_hash[i], link) {
358			struct lock_prof lp = {};
359
360			if (l->ticks == t)
361				continue;
362			lock_prof_sum(l, &lp, i, spin, t);
363			lock_prof_output(&lp, sb);
364		}
365	}
366}
367
368static int
369dump_lock_prof_stats(SYSCTL_HANDLER_ARGS)
370{
371	struct sbuf *sb;
372	int error, cpu, t;
373	int enabled;
374
375	error = sysctl_wire_old_buffer(req, 0);
376	if (error != 0)
377		return (error);
378	sb = sbuf_new_for_sysctl(NULL, NULL, LPROF_SBUF_SIZE, req);
379	sbuf_printf(sb, "\n%8s %9s %11s %11s %11s %6s %6s %2s %6s %s\n",
380	    "max", "wait_max", "total", "wait_total", "count", "avg", "wait_avg", "cnt_hold", "cnt_lock", "name");
381	enabled = lock_prof_enable;
382	lock_prof_enable = 0;
383	quiesce_all_cpus("profstat", 0);
384	t = ticks;
385	for (cpu = 0; cpu <= mp_maxid; cpu++) {
386		if (lp_cpu[cpu] == NULL)
387			continue;
388		lock_prof_type_stats(&lp_cpu[cpu]->lpc_types[0], sb, 0, t);
389		lock_prof_type_stats(&lp_cpu[cpu]->lpc_types[1], sb, 1, t);
390	}
391	lock_prof_enable = enabled;
392
393	error = sbuf_finish(sb);
394	/* Output a trailing NUL. */
395	if (error == 0)
396		error = SYSCTL_OUT(req, "", 1);
397	sbuf_delete(sb);
398	return (error);
399}
400
401static int
402enable_lock_prof(SYSCTL_HANDLER_ARGS)
403{
404	int error, v;
405
406	v = lock_prof_enable;
407	error = sysctl_handle_int(oidp, &v, v, req);
408	if (error)
409		return (error);
410	if (req->newptr == NULL)
411		return (error);
412	if (v == lock_prof_enable)
413		return (0);
414	if (v == 1)
415		lock_prof_reset();
416	lock_prof_enable = !!v;
417
418	return (0);
419}
420
421static int
422reset_lock_prof_stats(SYSCTL_HANDLER_ARGS)
423{
424	int error, v;
425
426	v = 0;
427	error = sysctl_handle_int(oidp, &v, 0, req);
428	if (error)
429		return (error);
430	if (req->newptr == NULL)
431		return (error);
432	if (v == 0)
433		return (0);
434	lock_prof_reset();
435
436	return (0);
437}
438
439static struct lock_prof *
440lock_profile_lookup(struct lock_object *lo, int spin, const char *file,
441    int line)
442{
443	const char *unknown = "(unknown)";
444	struct lock_prof_type *type;
445	struct lock_prof *lp;
446	struct lphead *head;
447	const char *p;
448	u_int hash;
449
450	p = file;
451	if (p == NULL || *p == '\0')
452		p = unknown;
453	hash = (uintptr_t)lo->lo_name * 31 + (uintptr_t)p * 31 + line;
454	hash &= LPROF_HASH_MASK;
455	type = &lp_cpu[PCPU_GET(cpuid)]->lpc_types[spin];
456	head = &type->lpt_hash[hash];
457	SLIST_FOREACH(lp, head, link) {
458		if (lp->line == line && lp->file == p &&
459		    lp->name == lo->lo_name)
460			return (lp);
461
462	}
463	lp = SLIST_FIRST(&type->lpt_lpalloc);
464	if (lp == NULL) {
465		lock_prof_rejected++;
466		return (lp);
467	}
468	SLIST_REMOVE_HEAD(&type->lpt_lpalloc, link);
469	lp->file = p;
470	lp->line = line;
471	lp->class = LOCK_CLASS(lo);
472	lp->name = lo->lo_name;
473	SLIST_INSERT_HEAD(&type->lpt_hash[hash], lp, link);
474	return (lp);
475}
476
477static struct lock_profile_object *
478lock_profile_object_lookup(struct lock_object *lo, int spin, const char *file,
479    int line)
480{
481	struct lock_profile_object *l;
482	struct lock_prof_type *type;
483	struct lpohead *head;
484
485	head = &curthread->td_lprof[spin];
486	LIST_FOREACH(l, head, lpo_link)
487		if (l->lpo_obj == lo && l->lpo_file == file &&
488		    l->lpo_line == line)
489			return (l);
490	type = &lp_cpu[PCPU_GET(cpuid)]->lpc_types[spin];
491	l = LIST_FIRST(&type->lpt_lpoalloc);
492	if (l == NULL) {
493		lock_prof_rejected++;
494		return (NULL);
495	}
496	LIST_REMOVE(l, lpo_link);
497	l->lpo_obj = lo;
498	l->lpo_file = file;
499	l->lpo_line = line;
500	l->lpo_cnt = 0;
501	LIST_INSERT_HEAD(head, l, lpo_link);
502
503	return (l);
504}
505
506void
507lock_profile_obtain_lock_success(struct lock_object *lo, int contested,
508    uint64_t waittime, const char *file, int line)
509{
510	static int lock_prof_count;
511	struct lock_profile_object *l;
512	int spin;
513
514	if (SCHEDULER_STOPPED())
515		return;
516
517	/* don't reset the timer when/if recursing */
518	if (!lock_prof_enable || (lo->lo_flags & LO_NOPROFILE))
519		return;
520	if (lock_prof_skipcount &&
521	    (++lock_prof_count % lock_prof_skipcount) != 0)
522		return;
523	spin = (LOCK_CLASS(lo)->lc_flags & LC_SPINLOCK) ? 1 : 0;
524	if (spin && lock_prof_skipspin == 1)
525		return;
526	critical_enter();
527	/* Recheck enabled now that we're in a critical section. */
528	if (lock_prof_enable == 0)
529		goto out;
530	l = lock_profile_object_lookup(lo, spin, file, line);
531	if (l == NULL)
532		goto out;
533	l->lpo_cnt++;
534	if (++l->lpo_ref > 1)
535		goto out;
536	l->lpo_contest_locking = contested;
537	l->lpo_acqtime = nanoseconds();
538	if (waittime && (l->lpo_acqtime > waittime))
539		l->lpo_waittime = l->lpo_acqtime - waittime;
540	else
541		l->lpo_waittime = 0;
542out:
543	critical_exit();
544}
545
546void
547lock_profile_thread_exit(struct thread *td)
548{
549#ifdef INVARIANTS
550	struct lock_profile_object *l;
551
552	MPASS(curthread->td_critnest == 0);
553#endif
554	/*
555	 * If lock profiling was disabled we have to wait for reset to
556	 * clear our pointers before we can exit safely.
557	 */
558	lock_prof_reset_wait();
559#ifdef INVARIANTS
560	LIST_FOREACH(l, &td->td_lprof[0], lpo_link)
561		printf("thread still holds lock acquired at %s:%d\n",
562		    l->lpo_file, l->lpo_line);
563	LIST_FOREACH(l, &td->td_lprof[1], lpo_link)
564		printf("thread still holds lock acquired at %s:%d\n",
565		    l->lpo_file, l->lpo_line);
566#endif
567	MPASS(LIST_FIRST(&td->td_lprof[0]) == NULL);
568	MPASS(LIST_FIRST(&td->td_lprof[1]) == NULL);
569}
570
571void
572lock_profile_release_lock(struct lock_object *lo)
573{
574	struct lock_profile_object *l;
575	struct lock_prof_type *type;
576	struct lock_prof *lp;
577	uint64_t curtime, holdtime;
578	struct lpohead *head;
579	int spin;
580
581	if (SCHEDULER_STOPPED())
582		return;
583	if (lo->lo_flags & LO_NOPROFILE)
584		return;
585	spin = (LOCK_CLASS(lo)->lc_flags & LC_SPINLOCK) ? 1 : 0;
586	head = &curthread->td_lprof[spin];
587	if (LIST_FIRST(head) == NULL)
588		return;
589	critical_enter();
590	/* Recheck enabled now that we're in a critical section. */
591	if (lock_prof_enable == 0 && lock_prof_resetting == 1)
592		goto out;
593	/*
594	 * If lock profiling is not enabled we still want to remove the
595	 * lpo from our queue.
596	 */
597	LIST_FOREACH(l, head, lpo_link)
598		if (l->lpo_obj == lo)
599			break;
600	if (l == NULL)
601		goto out;
602	if (--l->lpo_ref > 0)
603		goto out;
604	lp = lock_profile_lookup(lo, spin, l->lpo_file, l->lpo_line);
605	if (lp == NULL)
606		goto release;
607	curtime = nanoseconds();
608	if (curtime < l->lpo_acqtime)
609		goto release;
610	holdtime = curtime - l->lpo_acqtime;
611
612	/*
613	 * Record if the lock has been held longer now than ever
614	 * before.
615	 */
616	if (holdtime > lp->cnt_max)
617		lp->cnt_max = holdtime;
618	if (l->lpo_waittime > lp->cnt_wait_max)
619		lp->cnt_wait_max = l->lpo_waittime;
620	lp->cnt_tot += holdtime;
621	lp->cnt_wait += l->lpo_waittime;
622	lp->cnt_contest_locking += l->lpo_contest_locking;
623	lp->cnt_cur += l->lpo_cnt;
624release:
625	LIST_REMOVE(l, lpo_link);
626	type = &lp_cpu[PCPU_GET(cpuid)]->lpc_types[spin];
627	LIST_INSERT_HEAD(&type->lpt_lpoalloc, l, lpo_link);
628out:
629	critical_exit();
630}
631
632static SYSCTL_NODE(_debug, OID_AUTO, lock, CTLFLAG_RD, NULL, "lock debugging");
633static SYSCTL_NODE(_debug_lock, OID_AUTO, prof, CTLFLAG_RD, NULL,
634    "lock profiling");
635SYSCTL_INT(_debug_lock_prof, OID_AUTO, skipspin, CTLFLAG_RW,
636    &lock_prof_skipspin, 0, "Skip profiling on spinlocks.");
637SYSCTL_INT(_debug_lock_prof, OID_AUTO, skipcount, CTLFLAG_RW,
638    &lock_prof_skipcount, 0, "Sample approximately every N lock acquisitions.");
639SYSCTL_INT(_debug_lock_prof, OID_AUTO, rejected, CTLFLAG_RD,
640    &lock_prof_rejected, 0, "Number of rejected profiling records");
641SYSCTL_PROC(_debug_lock_prof, OID_AUTO, stats, CTLTYPE_STRING | CTLFLAG_RD,
642    NULL, 0, dump_lock_prof_stats, "A", "Lock profiling statistics");
643SYSCTL_PROC(_debug_lock_prof, OID_AUTO, reset, CTLTYPE_INT | CTLFLAG_RW,
644    NULL, 0, reset_lock_prof_stats, "I", "Reset lock profiling statistics");
645SYSCTL_PROC(_debug_lock_prof, OID_AUTO, enable, CTLTYPE_INT | CTLFLAG_RW,
646    NULL, 0, enable_lock_prof, "I", "Enable lock profiling");
647
648#endif
649