npf_bpf_comp.c revision 1.10
1/*	$NetBSD: npf_bpf_comp.c,v 1.10 2016/12/27 22:35:33 rmind Exp $	*/
2
3/*-
4 * Copyright (c) 2010-2014 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This material is based upon work partially supported by The
8 * NetBSD Foundation under a contract with Mindaugas Rasiukevicius.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 *    notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 *    notice, this list of conditions and the following disclaimer in the
17 *    documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32/*
33 * BPF byte-code generation for NPF rules.
34 */
35
36#include <sys/cdefs.h>
37__RCSID("$NetBSD: npf_bpf_comp.c,v 1.10 2016/12/27 22:35:33 rmind Exp $");
38
39#include <stdlib.h>
40#include <stdbool.h>
41#include <stddef.h>
42#include <string.h>
43#include <inttypes.h>
44#include <err.h>
45#include <assert.h>
46
47#include <netinet/in.h>
48#include <netinet/in_systm.h>
49#define	__FAVOR_BSD
50#include <netinet/ip.h>
51#include <netinet/ip6.h>
52#include <netinet/udp.h>
53#include <netinet/tcp.h>
54#include <netinet/ip_icmp.h>
55#include <netinet/icmp6.h>
56
57#include <net/bpf.h>
58
59#include "npfctl.h"
60
61/*
62 * Note: clear X_EQ_L4OFF when register X is invalidated i.e. it stores
63 * something other than L4 header offset.  Generally, when BPF_LDX is used.
64 */
65#define	FETCHED_L3		0x01
66#define	CHECKED_L4		0x02
67#define	X_EQ_L4OFF		0x04
68
69struct npf_bpf {
70	/*
71	 * BPF program code, the allocated length (in bytes), the number
72	 * of logical blocks and the flags.
73	 */
74	struct bpf_program	prog;
75	size_t			alen;
76	u_int			nblocks;
77	sa_family_t		af;
78	uint32_t		flags;
79
80	/* The current group offset and block number. */
81	bool			ingroup;
82	u_int			goff;
83	u_int			gblock;
84
85	/* BPF marks, allocated length and the real length. */
86	uint32_t *		marks;
87	size_t			malen;
88	size_t			mlen;
89};
90
91/*
92 * NPF success and failure values to be returned from BPF.
93 */
94#define	NPF_BPF_SUCCESS		((u_int)-1)
95#define	NPF_BPF_FAILURE		0
96
97/*
98 * Magic value to indicate the failure path, which is fixed up on completion.
99 * Note: this is the longest jump offset in BPF, since the offset is one byte.
100 */
101#define	JUMP_MAGIC		0xff
102
103/* Reduce re-allocations by expanding in 64 byte blocks. */
104#define	ALLOC_MASK		(64 - 1)
105#define	ALLOC_ROUND(x)		(((x) + ALLOC_MASK) & ~ALLOC_MASK)
106
107#ifndef IPV6_VERSION
108#define	IPV6_VERSION		0x60
109#endif
110
111npf_bpf_t *
112npfctl_bpf_create(void)
113{
114	return ecalloc(1, sizeof(npf_bpf_t));
115}
116
117static void
118fixup_jumps(npf_bpf_t *ctx, u_int start, u_int end, bool swap)
119{
120	struct bpf_program *bp = &ctx->prog;
121
122	for (u_int i = start; i < end; i++) {
123		struct bpf_insn *insn = &bp->bf_insns[i];
124		const u_int fail_off = end - i;
125
126		if (fail_off >= JUMP_MAGIC) {
127			errx(EXIT_FAILURE, "BPF generation error: "
128			    "the number of instructions is over the limit");
129		}
130		if (BPF_CLASS(insn->code) != BPF_JMP) {
131			continue;
132		}
133		if (swap) {
134			uint8_t jt = insn->jt;
135			insn->jt = insn->jf;
136			insn->jf = jt;
137		}
138		if (insn->jt == JUMP_MAGIC)
139			insn->jt = fail_off;
140		if (insn->jf == JUMP_MAGIC)
141			insn->jf = fail_off;
142	}
143}
144
145static void
146add_insns(npf_bpf_t *ctx, struct bpf_insn *insns, size_t count)
147{
148	struct bpf_program *bp = &ctx->prog;
149	size_t offset, len, reqlen;
150
151	/* Note: bf_len is the count of instructions. */
152	offset = bp->bf_len * sizeof(struct bpf_insn);
153	len = count * sizeof(struct bpf_insn);
154
155	/* Ensure the memory buffer for the program. */
156	reqlen = ALLOC_ROUND(offset + len);
157	if (reqlen > ctx->alen) {
158		bp->bf_insns = erealloc(bp->bf_insns, reqlen);
159		ctx->alen = reqlen;
160	}
161
162	/* Add the code block. */
163	memcpy((uint8_t *)bp->bf_insns + offset, insns, len);
164	bp->bf_len += count;
165}
166
167static void
168done_raw_block(npf_bpf_t *ctx, const uint32_t *m, size_t len)
169{
170	size_t reqlen, nargs = m[1];
171
172	if ((len / sizeof(uint32_t) - 2) != nargs) {
173		errx(EXIT_FAILURE, "invalid BPF block description");
174	}
175	reqlen = ALLOC_ROUND(ctx->mlen + len);
176	if (reqlen > ctx->malen) {
177		ctx->marks = erealloc(ctx->marks, reqlen);
178		ctx->malen = reqlen;
179	}
180	memcpy((uint8_t *)ctx->marks + ctx->mlen, m, len);
181	ctx->mlen += len;
182}
183
184static void
185done_block(npf_bpf_t *ctx, const uint32_t *m, size_t len)
186{
187	done_raw_block(ctx, m, len);
188	ctx->nblocks++;
189}
190
191struct bpf_program *
192npfctl_bpf_complete(npf_bpf_t *ctx)
193{
194	struct bpf_program *bp = &ctx->prog;
195	const u_int retoff = bp->bf_len;
196
197	/* No instructions (optimised out). */
198	if (!bp->bf_len)
199		return NULL;
200
201	/* Add the return fragment (success and failure paths). */
202	struct bpf_insn insns_ret[] = {
203		BPF_STMT(BPF_RET+BPF_K, NPF_BPF_SUCCESS),
204		BPF_STMT(BPF_RET+BPF_K, NPF_BPF_FAILURE),
205	};
206	add_insns(ctx, insns_ret, __arraycount(insns_ret));
207
208	/* Fixup all jumps to the main failure path. */
209	fixup_jumps(ctx, 0, retoff, false);
210
211	return &ctx->prog;
212}
213
214const void *
215npfctl_bpf_bmarks(npf_bpf_t *ctx, size_t *len)
216{
217	*len = ctx->mlen;
218	return ctx->marks;
219}
220
221void
222npfctl_bpf_destroy(npf_bpf_t *ctx)
223{
224	free(ctx->prog.bf_insns);
225	free(ctx->marks);
226	free(ctx);
227}
228
229/*
230 * npfctl_bpf_group: begin a logical group.  It merely uses logical
231 * disjunction (OR) for compares within the group.
232 */
233void
234npfctl_bpf_group(npf_bpf_t *ctx)
235{
236	struct bpf_program *bp = &ctx->prog;
237
238	assert(ctx->goff == 0);
239	assert(ctx->gblock == 0);
240
241	ctx->goff = bp->bf_len;
242	ctx->gblock = ctx->nblocks;
243	ctx->ingroup = true;
244}
245
246void
247npfctl_bpf_endgroup(npf_bpf_t *ctx, bool invert)
248{
249	struct bpf_program *bp = &ctx->prog;
250	const size_t curoff = bp->bf_len;
251
252	/* If there are no blocks or only one - nothing to do. */
253	if (!invert && (ctx->nblocks - ctx->gblock) <= 1) {
254		ctx->goff = ctx->gblock = 0;
255		return;
256	}
257
258	/*
259	 * If inverting, then prepend a jump over the statement below.
260	 * If matching, jump will jump below and the fail will happen.
261	 */
262	if (invert) {
263		struct bpf_insn insns_ret[] = {
264			BPF_STMT(BPF_JMP+BPF_JA, 1),
265		};
266		add_insns(ctx, insns_ret, __arraycount(insns_ret));
267	}
268
269	/*
270	 * Append a failure return as a fall-through i.e. if there is
271	 * no match within the group.
272	 */
273	struct bpf_insn insns_ret[] = {
274		BPF_STMT(BPF_RET+BPF_K, NPF_BPF_FAILURE),
275	};
276	add_insns(ctx, insns_ret, __arraycount(insns_ret));
277
278	/*
279	 * Adjust jump offsets: on match - jump outside the group i.e.
280	 * to the current offset.  Otherwise, jump to the next instruction
281	 * which would lead to the fall-through code above if none matches.
282	 */
283	fixup_jumps(ctx, ctx->goff, curoff, true);
284	ctx->goff = ctx->gblock = 0;
285}
286
287static void
288fetch_l3(npf_bpf_t *ctx, sa_family_t af, u_int flags)
289{
290	u_int ver;
291
292	switch (af) {
293	case AF_INET:
294		ver = IPVERSION;
295		break;
296	case AF_INET6:
297		ver = IPV6_VERSION >> 4;
298		break;
299	case AF_UNSPEC:
300		ver = 0;
301		break;
302	default:
303		abort();
304	}
305
306	/*
307	 * The memory store is populated with:
308	 * - BPF_MW_IPVER: IP version (4 or 6).
309	 * - BPF_MW_L4OFF: L4 header offset.
310	 * - BPF_MW_L4PROTO: L4 protocol.
311	 */
312	if ((ctx->flags & FETCHED_L3) == 0 || (af && ctx->af == 0)) {
313		const uint8_t jt = ver ? 0 : JUMP_MAGIC;
314		const uint8_t jf = ver ? JUMP_MAGIC : 0;
315		bool ingroup = ctx->ingroup;
316
317		/*
318		 * L3 block cannot be inserted in the middle of a group.
319		 * In fact, it never is.  Check and start the group after.
320		 */
321		if (ingroup) {
322			assert(ctx->nblocks == ctx->gblock);
323			npfctl_bpf_endgroup(ctx, false);
324		}
325
326		/*
327		 * A <- IP version; A == expected-version?
328		 * If no particular version specified, check for non-zero.
329		 */
330		struct bpf_insn insns_af[] = {
331			BPF_STMT(BPF_LD+BPF_W+BPF_MEM, BPF_MW_IPVER),
332			BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, ver, jt, jf),
333		};
334		add_insns(ctx, insns_af, __arraycount(insns_af));
335		ctx->flags |= FETCHED_L3;
336		ctx->af = af;
337
338		if (af) {
339			uint32_t mwords[] = { BM_IPVER, 1, af };
340			done_raw_block(ctx, mwords, sizeof(mwords));
341		}
342		if (ingroup) {
343			npfctl_bpf_group(ctx);
344		}
345
346	} else if (af && af != ctx->af) {
347		errx(EXIT_FAILURE, "address family mismatch");
348	}
349
350	if ((flags & X_EQ_L4OFF) != 0 && (ctx->flags & X_EQ_L4OFF) == 0) {
351		/* X <- IP header length */
352		struct bpf_insn insns_hlen[] = {
353			BPF_STMT(BPF_LDX+BPF_MEM, BPF_MW_L4OFF),
354		};
355		add_insns(ctx, insns_hlen, __arraycount(insns_hlen));
356		ctx->flags |= X_EQ_L4OFF;
357	}
358}
359
360/*
361 * npfctl_bpf_proto: code block to match IP version and L4 protocol.
362 */
363void
364npfctl_bpf_proto(npf_bpf_t *ctx, sa_family_t af, int proto)
365{
366	assert(af != AF_UNSPEC || proto != -1);
367
368	/* Note: fails if IP version does not match. */
369	fetch_l3(ctx, af, 0);
370	if (proto == -1) {
371		return;
372	}
373
374	struct bpf_insn insns_proto[] = {
375		/* A <- L4 protocol; A == expected-protocol? */
376		BPF_STMT(BPF_LD+BPF_W+BPF_MEM, BPF_MW_L4PROTO),
377		BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, proto, 0, JUMP_MAGIC),
378	};
379	add_insns(ctx, insns_proto, __arraycount(insns_proto));
380
381	uint32_t mwords[] = { BM_PROTO, 1, proto };
382	done_block(ctx, mwords, sizeof(mwords));
383	ctx->flags |= CHECKED_L4;
384}
385
386/*
387 * npfctl_bpf_cidr: code block to match IPv4 or IPv6 CIDR.
388 *
389 * => IP address shall be in the network byte order.
390 */
391void
392npfctl_bpf_cidr(npf_bpf_t *ctx, u_int opts, sa_family_t af,
393    const npf_addr_t *addr, const npf_netmask_t mask)
394{
395	const uint32_t *awords = (const uint32_t *)addr;
396	u_int nwords, length, maxmask, off;
397
398	assert(((opts & MATCH_SRC) != 0) ^ ((opts & MATCH_DST) != 0));
399	assert((mask && mask <= NPF_MAX_NETMASK) || mask == NPF_NO_NETMASK);
400
401	switch (af) {
402	case AF_INET:
403		maxmask = 32;
404		off = (opts & MATCH_SRC) ?
405		    offsetof(struct ip, ip_src) :
406		    offsetof(struct ip, ip_dst);
407		nwords = sizeof(struct in_addr) / sizeof(uint32_t);
408		break;
409	case AF_INET6:
410		maxmask = 128;
411		off = (opts & MATCH_SRC) ?
412		    offsetof(struct ip6_hdr, ip6_src) :
413		    offsetof(struct ip6_hdr, ip6_dst);
414		nwords = sizeof(struct in6_addr) / sizeof(uint32_t);
415		break;
416	default:
417		abort();
418	}
419
420	/* Ensure address family. */
421	fetch_l3(ctx, af, 0);
422
423	length = (mask == NPF_NO_NETMASK) ? maxmask : mask;
424
425	/* CAUTION: BPF operates in host byte-order. */
426	for (u_int i = 0; i < nwords; i++) {
427		const u_int woff = i * sizeof(uint32_t);
428		uint32_t word = ntohl(awords[i]);
429		uint32_t wordmask;
430
431		if (length >= 32) {
432			/* The mask is a full word - do not apply it. */
433			wordmask = 0;
434			length -= 32;
435		} else if (length) {
436			wordmask = 0xffffffff << (32 - length);
437			length = 0;
438		} else {
439			/* The mask became zero - skip the rest. */
440			break;
441		}
442
443		/* A <- IP address (or one word of it) */
444		struct bpf_insn insns_ip[] = {
445			BPF_STMT(BPF_LD+BPF_W+BPF_ABS, off + woff),
446		};
447		add_insns(ctx, insns_ip, __arraycount(insns_ip));
448
449		/* A <- (A & MASK) */
450		if (wordmask) {
451			struct bpf_insn insns_mask[] = {
452				BPF_STMT(BPF_ALU+BPF_AND+BPF_K, wordmask),
453			};
454			add_insns(ctx, insns_mask, __arraycount(insns_mask));
455		}
456
457		/* A == expected-IP-word ? */
458		struct bpf_insn insns_cmp[] = {
459			BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, word, 0, JUMP_MAGIC),
460		};
461		add_insns(ctx, insns_cmp, __arraycount(insns_cmp));
462	}
463
464	uint32_t mwords[] = {
465		(opts & MATCH_SRC) ? BM_SRC_CIDR: BM_DST_CIDR, 6,
466		af, mask, awords[0], awords[1], awords[2], awords[3],
467	};
468	done_block(ctx, mwords, sizeof(mwords));
469}
470
471/*
472 * npfctl_bpf_ports: code block to match TCP/UDP port range.
473 *
474 * => Port numbers shall be in the network byte order.
475 */
476void
477npfctl_bpf_ports(npf_bpf_t *ctx, u_int opts, in_port_t from, in_port_t to)
478{
479	const u_int sport_off = offsetof(struct udphdr, uh_sport);
480	const u_int dport_off = offsetof(struct udphdr, uh_dport);
481	u_int off;
482
483	/* TCP and UDP port offsets are the same. */
484	assert(sport_off == offsetof(struct tcphdr, th_sport));
485	assert(dport_off == offsetof(struct tcphdr, th_dport));
486	assert(ctx->flags & CHECKED_L4);
487
488	assert(((opts & MATCH_SRC) != 0) ^ ((opts & MATCH_DST) != 0));
489	off = (opts & MATCH_SRC) ? sport_off : dport_off;
490
491	/* X <- IP header length */
492	fetch_l3(ctx, AF_UNSPEC, X_EQ_L4OFF);
493
494	struct bpf_insn insns_fetch[] = {
495		/* A <- port */
496		BPF_STMT(BPF_LD+BPF_H+BPF_IND, off),
497	};
498	add_insns(ctx, insns_fetch, __arraycount(insns_fetch));
499
500	/* CAUTION: BPF operates in host byte-order. */
501	from = ntohs(from);
502	to = ntohs(to);
503
504	if (from == to) {
505		/* Single port case. */
506		struct bpf_insn insns_port[] = {
507			BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, from, 0, JUMP_MAGIC),
508		};
509		add_insns(ctx, insns_port, __arraycount(insns_port));
510	} else {
511		/* Port range case. */
512		struct bpf_insn insns_range[] = {
513			BPF_JUMP(BPF_JMP+BPF_JGE+BPF_K, from, 0, JUMP_MAGIC),
514			BPF_JUMP(BPF_JMP+BPF_JGT+BPF_K, to, JUMP_MAGIC, 0),
515		};
516		add_insns(ctx, insns_range, __arraycount(insns_range));
517	}
518
519	uint32_t mwords[] = {
520		opts & MATCH_SRC ? BM_SRC_PORTS : BM_DST_PORTS, 2, from, to
521	};
522	done_block(ctx, mwords, sizeof(mwords));
523}
524
525/*
526 * npfctl_bpf_tcpfl: code block to match TCP flags.
527 */
528void
529npfctl_bpf_tcpfl(npf_bpf_t *ctx, uint8_t tf, uint8_t tf_mask, bool checktcp)
530{
531	const u_int tcpfl_off = offsetof(struct tcphdr, th_flags);
532	const bool usingmask = tf_mask != tf;
533
534	/* X <- IP header length */
535	fetch_l3(ctx, AF_UNSPEC, X_EQ_L4OFF);
536	if (checktcp) {
537		const u_int jf = usingmask ? 3 : 2;
538		assert(ctx->ingroup == false);
539
540		/* A <- L4 protocol; A == TCP?  If not, jump out. */
541		struct bpf_insn insns_tcp[] = {
542			BPF_STMT(BPF_LD+BPF_W+BPF_MEM, BPF_MW_L4PROTO),
543			BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, IPPROTO_TCP, 0, jf),
544		};
545		add_insns(ctx, insns_tcp, __arraycount(insns_tcp));
546	} else {
547		assert(ctx->flags & CHECKED_L4);
548	}
549
550	struct bpf_insn insns_tf[] = {
551		/* A <- TCP flags */
552		BPF_STMT(BPF_LD+BPF_B+BPF_IND, tcpfl_off),
553	};
554	add_insns(ctx, insns_tf, __arraycount(insns_tf));
555
556	if (usingmask) {
557		/* A <- (A & mask) */
558		struct bpf_insn insns_mask[] = {
559			BPF_STMT(BPF_ALU+BPF_AND+BPF_K, tf_mask),
560		};
561		add_insns(ctx, insns_mask, __arraycount(insns_mask));
562	}
563
564	struct bpf_insn insns_cmp[] = {
565		/* A == expected-TCP-flags? */
566		BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, tf, 0, JUMP_MAGIC),
567	};
568	add_insns(ctx, insns_cmp, __arraycount(insns_cmp));
569
570	if (!checktcp) {
571		uint32_t mwords[] = { BM_TCPFL, 2, tf, tf_mask};
572		done_block(ctx, mwords, sizeof(mwords));
573	}
574}
575
576/*
577 * npfctl_bpf_icmp: code block to match ICMP type and/or code.
578 * Note: suitable both for the ICMPv4 and ICMPv6.
579 */
580void
581npfctl_bpf_icmp(npf_bpf_t *ctx, int type, int code)
582{
583	const u_int type_off = offsetof(struct icmp, icmp_type);
584	const u_int code_off = offsetof(struct icmp, icmp_code);
585
586	assert(ctx->flags & CHECKED_L4);
587	assert(offsetof(struct icmp6_hdr, icmp6_type) == type_off);
588	assert(offsetof(struct icmp6_hdr, icmp6_code) == code_off);
589	assert(type != -1 || code != -1);
590
591	/* X <- IP header length */
592	fetch_l3(ctx, AF_UNSPEC, X_EQ_L4OFF);
593
594	if (type != -1) {
595		struct bpf_insn insns_type[] = {
596			BPF_STMT(BPF_LD+BPF_B+BPF_IND, type_off),
597			BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, type, 0, JUMP_MAGIC),
598		};
599		add_insns(ctx, insns_type, __arraycount(insns_type));
600
601		uint32_t mwords[] = { BM_ICMP_TYPE, 1, type };
602		done_block(ctx, mwords, sizeof(mwords));
603	}
604
605	if (code != -1) {
606		struct bpf_insn insns_code[] = {
607			BPF_STMT(BPF_LD+BPF_B+BPF_IND, code_off),
608			BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, code, 0, JUMP_MAGIC),
609		};
610		add_insns(ctx, insns_code, __arraycount(insns_code));
611
612		uint32_t mwords[] = { BM_ICMP_CODE, 1, code };
613		done_block(ctx, mwords, sizeof(mwords));
614	}
615}
616
617#define	SRC_FLAG_BIT	(1U << 31)
618
619/*
620 * npfctl_bpf_table: code block to match source/destination IP address
621 * against NPF table specified by ID.
622 */
623void
624npfctl_bpf_table(npf_bpf_t *ctx, u_int opts, u_int tid)
625{
626	const bool src = (opts & MATCH_SRC) != 0;
627
628	struct bpf_insn insns_table[] = {
629		BPF_STMT(BPF_LD+BPF_IMM, (src ? SRC_FLAG_BIT : 0) | tid),
630		BPF_STMT(BPF_MISC+BPF_COP, NPF_COP_TABLE),
631		BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, 0, JUMP_MAGIC, 0),
632	};
633	add_insns(ctx, insns_table, __arraycount(insns_table));
634
635	uint32_t mwords[] = { src ? BM_SRC_TABLE: BM_DST_TABLE, 1, tid };
636	done_block(ctx, mwords, sizeof(mwords));
637}
638