1/*
2 * Copyright (c) 2009-2010 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29#include <libkern/libkern.h>
30#include <mach/mach_types.h>
31#include <mach/task.h>
32#include <sys/proc_internal.h>
33#include <sys/event.h>
34#include <sys/eventvar.h>
35#include <kern/locks.h>
36#include <sys/queue.h>
37#include <kern/vm_pressure.h>
38#include <sys/malloc.h>
39#include <sys/errno.h>
40#include <sys/systm.h>
41#include <sys/types.h>
42#include <sys/sysctl.h>
43#include <kern/assert.h>
44#include <kern/task.h>
45#include <vm/vm_pageout.h>
46
47#include <kern/task.h>
48
49#if CONFIG_MEMORYSTATUS
50#include <sys/kern_memorystatus.h>
51#endif
52
53/*
54 * This value is the threshold that a process must meet to be considered for scavenging.
55 */
56#define VM_PRESSURE_MINIMUM_RSIZE		10	/* MB */
57
58#define VM_PRESSURE_NOTIFY_WAIT_PERIOD		10000	/* milliseconds */
59
60void vm_pressure_klist_lock(void);
61void vm_pressure_klist_unlock(void);
62
63static void vm_dispatch_memory_pressure(void);
64void vm_reset_active_list(void);
65
66#if CONFIG_MEMORYSTATUS
67static kern_return_t vm_try_pressure_candidates(boolean_t target_foreground_process);
68#endif
69
70static lck_mtx_t vm_pressure_klist_mutex;
71
72struct klist vm_pressure_klist;
73struct klist vm_pressure_klist_dormant;
74
75#if DEBUG
76#define VM_PRESSURE_DEBUG(cond, format, ...)      \
77do {                                              \
78	if (cond) { printf(format, ##__VA_ARGS__); } \
79} while(0)
80#else
81#define VM_PRESSURE_DEBUG(cond, format, ...)
82#endif
83
84void vm_pressure_init(lck_grp_t *grp, lck_attr_t *attr) {
85	lck_mtx_init(&vm_pressure_klist_mutex, grp, attr);
86}
87
88void vm_pressure_klist_lock(void) {
89	lck_mtx_lock(&vm_pressure_klist_mutex);
90}
91
92void vm_pressure_klist_unlock(void) {
93	lck_mtx_unlock(&vm_pressure_klist_mutex);
94}
95
96int vm_knote_register(struct knote *kn) {
97	int rv = 0;
98
99	vm_pressure_klist_lock();
100
101	if ((kn->kn_sfflags) & (NOTE_VM_PRESSURE)) {
102		KNOTE_ATTACH(&vm_pressure_klist, kn);
103	} else {
104		rv = ENOTSUP;
105	}
106
107	vm_pressure_klist_unlock();
108
109	return rv;
110}
111
112void vm_knote_unregister(struct knote *kn) {
113	struct knote *kn_temp;
114
115	vm_pressure_klist_lock();
116
117	VM_PRESSURE_DEBUG(0, "[vm_pressure] process %d cancelling pressure notification\n", kn->kn_kq->kq_p->p_pid);
118
119	SLIST_FOREACH(kn_temp, &vm_pressure_klist, kn_selnext) {
120		if (kn_temp == kn) {
121			KNOTE_DETACH(&vm_pressure_klist, kn);
122			vm_pressure_klist_unlock();
123			return;
124		}
125	}
126
127	SLIST_FOREACH(kn_temp, &vm_pressure_klist_dormant, kn_selnext) {
128		if (kn_temp == kn) {
129			KNOTE_DETACH(&vm_pressure_klist_dormant, kn);
130			vm_pressure_klist_unlock();
131			return;
132		}
133	}
134
135	vm_pressure_klist_unlock();
136}
137
138void vm_pressure_proc_cleanup(proc_t p)
139{
140	struct knote *kn = NULL;
141
142	vm_pressure_klist_lock();
143
144	VM_PRESSURE_DEBUG(0, "[vm_pressure] process %d exiting pressure notification\n", p->p_pid);
145
146	SLIST_FOREACH(kn, &vm_pressure_klist, kn_selnext) {
147		if (kn->kn_kq->kq_p == p) {
148			KNOTE_DETACH(&vm_pressure_klist, kn);
149			vm_pressure_klist_unlock();
150			return;
151		}
152	}
153
154	SLIST_FOREACH(kn, &vm_pressure_klist_dormant, kn_selnext) {
155		if (kn->kn_kq->kq_p == p) {
156			KNOTE_DETACH(&vm_pressure_klist_dormant, kn);
157			vm_pressure_klist_unlock();
158			return;
159		}
160	}
161
162	vm_pressure_klist_unlock();
163}
164
165/*
166 * Used by the vm_pressure_thread which is
167 * signalled from within vm_pageout_scan().
168 */
169void consider_vm_pressure_events(void)
170{
171	vm_dispatch_memory_pressure();
172}
173
174#if CONFIG_MEMORYSTATUS
175
176/* Jetsam aware version. Called with lock held */
177
178struct knote *vm_find_knote_from_pid(pid_t, struct klist *);
179
180struct knote *vm_find_knote_from_pid(pid_t pid, struct klist *list) {
181	struct knote *kn = NULL;
182
183	SLIST_FOREACH(kn, list, kn_selnext) {
184		struct proc *p;
185		pid_t current_pid;
186
187		p = kn->kn_kq->kq_p;
188		current_pid = p->p_pid;
189
190		if (current_pid == pid) {
191			break;
192		}
193	}
194
195	return kn;
196}
197
198int vm_dispatch_pressure_note_to_pid(pid_t pid, boolean_t locked) {
199	int ret = EINVAL;
200	struct knote *kn;
201
202	VM_PRESSURE_DEBUG(1, "vm_dispatch_pressure_note_to_pid(): pid %d\n", pid);
203
204	if (!locked) {
205		vm_pressure_klist_lock();
206	}
207
208	/*
209	 * Because we're specifically targeting a process here, we don't care
210	 * if a warning has already been sent and it's moved to the dormant
211	 * list; check that too.
212	 */
213	kn = vm_find_knote_from_pid(pid, &vm_pressure_klist);
214	if (kn) {
215    		KNOTE(&vm_pressure_klist, pid);
216    		ret = 0;
217	} else {
218	        kn = vm_find_knote_from_pid(pid, &vm_pressure_klist_dormant);
219	        if (kn) {
220        		KNOTE(&vm_pressure_klist_dormant, pid);
221			ret = 0;
222	        }
223	}
224
225	if (!locked) {
226		vm_pressure_klist_unlock();
227	}
228
229	return ret;
230}
231
232void vm_find_pressure_foreground_candidates(void)
233{
234	struct knote *kn, *kn_tmp;
235	struct klist dispatch_klist = { NULL };
236
237	vm_pressure_klist_lock();
238	proc_list_lock();
239
240	/* Find the foreground processes. */
241	SLIST_FOREACH_SAFE(kn, &vm_pressure_klist, kn_selnext, kn_tmp) {
242		proc_t p = kn->kn_kq->kq_p;
243
244		if (memorystatus_is_foreground_locked(p)) {
245			KNOTE_DETACH(&vm_pressure_klist, kn);
246			KNOTE_ATTACH(&dispatch_klist, kn);
247		}
248	}
249
250	SLIST_FOREACH_SAFE(kn, &vm_pressure_klist_dormant, kn_selnext, kn_tmp) {
251		proc_t p = kn->kn_kq->kq_p;
252
253		if (memorystatus_is_foreground_locked(p)) {
254			KNOTE_DETACH(&vm_pressure_klist_dormant, kn);
255			KNOTE_ATTACH(&dispatch_klist, kn);
256		}
257	}
258
259	proc_list_unlock();
260
261	/* Dispatch pressure notifications accordingly */
262	SLIST_FOREACH_SAFE(kn, &dispatch_klist, kn_selnext, kn_tmp) {
263		proc_t p = kn->kn_kq->kq_p;
264
265		proc_list_lock();
266		if (p != proc_ref_locked(p)) {
267			proc_list_unlock();
268			KNOTE_DETACH(&dispatch_klist, kn);
269			KNOTE_ATTACH(&vm_pressure_klist_dormant, kn);
270			continue;
271		}
272		proc_list_unlock();
273
274		VM_PRESSURE_DEBUG(1, "[vm_pressure] sending event to pid %d\n", kn->kn_kq->kq_p->p_pid);
275		KNOTE(&dispatch_klist, p->p_pid);
276		KNOTE_DETACH(&dispatch_klist, kn);
277		KNOTE_ATTACH(&vm_pressure_klist_dormant, kn);
278		microuptime(&p->vm_pressure_last_notify_tstamp);
279		memorystatus_send_pressure_note(p->p_pid);
280		proc_rele(p);
281	}
282
283	vm_pressure_klist_unlock();
284}
285
286void vm_find_pressure_candidate(void)
287{
288	struct knote *kn = NULL, *kn_max = NULL;
289	unsigned int resident_max = 0;
290	pid_t target_pid = -1;
291	struct klist dispatch_klist = { NULL };
292	struct timeval curr_tstamp = {0, 0};
293	int elapsed_msecs = 0;
294	proc_t target_proc = PROC_NULL;
295	kern_return_t kr = KERN_SUCCESS;
296
297	microuptime(&curr_tstamp);
298
299	vm_pressure_klist_lock();
300
301	SLIST_FOREACH(kn, &vm_pressure_klist, kn_selnext) {\
302	    struct mach_task_basic_info basic_info;
303	    mach_msg_type_number_t  size = MACH_TASK_BASIC_INFO_COUNT;
304		unsigned int		resident_size = 0;
305		proc_t			p = PROC_NULL;
306		struct task*		t = TASK_NULL;
307
308		p = kn->kn_kq->kq_p;
309		proc_list_lock();
310		if (p != proc_ref_locked(p)) {
311			p = PROC_NULL;
312			proc_list_unlock();
313			continue;
314		}
315		proc_list_unlock();
316
317		t = (struct task *)(p->task);
318
319		timevalsub(&curr_tstamp, &p->vm_pressure_last_notify_tstamp);
320		elapsed_msecs = curr_tstamp.tv_sec * 1000 + curr_tstamp.tv_usec / 1000;
321
322		if (elapsed_msecs < VM_PRESSURE_NOTIFY_WAIT_PERIOD) {
323			proc_rele(p);
324			continue;
325		}
326
327		if (!memorystatus_bg_pressure_eligible(p)) {
328			VM_PRESSURE_DEBUG(1, "[vm_pressure] skipping process %d\n", p->p_pid);
329			proc_rele(p);
330			continue;
331		}
332
333		if( ( kr = task_info(t, MACH_TASK_BASIC_INFO, (task_info_t)(&basic_info), &size)) != KERN_SUCCESS ) {
334			VM_PRESSURE_DEBUG(1, "[vm_pressure] task_info for pid %d failed\n", p->p_pid);
335			proc_rele(p);
336			continue;
337		}
338
339		/*
340		 * We don't want a small process to block large processes from
341		 * being notified again. <rdar://problem/7955532>
342		 */
343		resident_size = (basic_info.resident_size)/(1024 * 1024);
344		if (resident_size >= VM_PRESSURE_MINIMUM_RSIZE) {
345			if (resident_size > resident_max) {
346				resident_max = resident_size;
347				kn_max = kn;
348				target_pid = p->p_pid;
349				target_proc = p;
350			}
351		} else {
352			/* There was no candidate with enough resident memory to scavenge */
353			VM_PRESSURE_DEBUG(1, "[vm_pressure] threshold failed for pid %d with %u resident...\n", p->p_pid, resident_size);
354		}
355		proc_rele(p);
356	}
357
358	if (kn_max == NULL || target_pid == -1) {
359		VM_PRESSURE_DEBUG(1, "[vm_pressure] - no target found!\n");
360		goto exit;
361	}
362
363	VM_DEBUG_EVENT(vm_pageout_scan, VM_PRESSURE_EVENT, DBG_FUNC_NONE, target_pid, resident_max, 0, 0);
364	VM_PRESSURE_DEBUG(1, "[vm_pressure] sending event to pid %d with %u resident\n", kn_max->kn_kq->kq_p->p_pid, resident_max);
365
366	KNOTE_DETACH(&vm_pressure_klist, kn_max);
367
368	target_proc = proc_find(target_pid);
369	if (target_proc != PROC_NULL) {
370		KNOTE_ATTACH(&dispatch_klist, kn_max);
371		KNOTE(&dispatch_klist, target_pid);
372		KNOTE_ATTACH(&vm_pressure_klist_dormant, kn_max);
373		memorystatus_send_pressure_note(target_pid);
374		microuptime(&target_proc->vm_pressure_last_notify_tstamp);
375		proc_rele(target_proc);
376	}
377
378exit:
379	vm_pressure_klist_unlock();
380}
381#endif /* CONFIG_MEMORYSTATUS */
382
383
384struct knote *
385vm_pressure_select_optimal_candidate_to_notify(struct klist *candidate_list, int level, boolean_t target_foreground_process);
386
387kern_return_t vm_pressure_notification_without_levels(boolean_t target_foreground_process);
388kern_return_t vm_pressure_notify_dispatch_vm_clients(boolean_t target_foreground_process);
389
390kern_return_t
391vm_pressure_notify_dispatch_vm_clients(boolean_t target_foreground_process)
392{
393	vm_pressure_klist_lock();
394
395	if (SLIST_EMPTY(&vm_pressure_klist)) {
396		vm_reset_active_list();
397	}
398
399	if (!SLIST_EMPTY(&vm_pressure_klist)) {
400
401		VM_PRESSURE_DEBUG(1, "[vm_pressure] vm_dispatch_memory_pressure\n");
402
403		if (KERN_SUCCESS == vm_try_pressure_candidates(target_foreground_process)) {
404			vm_pressure_klist_unlock();
405			return KERN_SUCCESS;
406		}
407	}
408
409	VM_PRESSURE_DEBUG(1, "[vm_pressure] could not find suitable event candidate\n");
410
411	vm_pressure_klist_unlock();
412
413	return KERN_FAILURE;
414}
415
416static void vm_dispatch_memory_pressure(void)
417{
418	memorystatus_update_vm_pressure(FALSE);
419}
420
421extern vm_pressure_level_t
422convert_internal_pressure_level_to_dispatch_level(vm_pressure_level_t);
423
424struct knote *
425vm_pressure_select_optimal_candidate_to_notify(struct klist *candidate_list, int level, boolean_t target_foreground_process)
426{
427	struct knote	*kn = NULL, *kn_max = NULL;
428        unsigned int	resident_max = 0;
429	struct timeval	curr_tstamp = {0, 0};
430	int		elapsed_msecs = 0;
431	int		selected_task_importance = 0;
432	static int	pressure_snapshot = -1;
433	boolean_t	pressure_increase = FALSE;
434
435	if (level != -1) {
436
437		if (pressure_snapshot == -1) {
438			/*
439			 * Initial snapshot.
440		 	*/
441			pressure_snapshot = level;
442			pressure_increase = TRUE;
443		} else {
444
445			if (level >= pressure_snapshot) {
446				pressure_increase = TRUE;
447			} else {
448				pressure_increase = FALSE;
449			}
450
451			pressure_snapshot = level;
452		}
453	}
454
455	if ((level > 0) && (pressure_increase) == TRUE) {
456		/*
457		 * We'll start by considering the largest
458		 * unimportant task in our list.
459		 */
460		selected_task_importance = INT_MAX;
461	} else {
462		/*
463		 * We'll start by considering the largest
464		 * important task in our list.
465		 */
466		selected_task_importance = 0;
467	}
468
469	microuptime(&curr_tstamp);
470
471        SLIST_FOREACH(kn, candidate_list, kn_selnext) {
472
473                unsigned int		resident_size = 0;
474		proc_t			p = PROC_NULL;
475		struct task*		t = TASK_NULL;
476		int			curr_task_importance = 0;
477		boolean_t		consider_knote = FALSE;
478
479		p = kn->kn_kq->kq_p;
480		proc_list_lock();
481		if (p != proc_ref_locked(p)) {
482			p = PROC_NULL;
483			proc_list_unlock();
484			continue;
485		}
486		proc_list_unlock();
487
488#if CONFIG_MEMORYSTATUS
489		if (target_foreground_process == TRUE && !memorystatus_is_foreground_locked(p)) {
490			/*
491			 * Skip process not marked foreground.
492			 */
493			proc_rele(p);
494			continue;
495		}
496#endif /* CONFIG_MEMORYSTATUS */
497
498		t = (struct task *)(p->task);
499
500		timevalsub(&curr_tstamp, &p->vm_pressure_last_notify_tstamp);
501		elapsed_msecs = curr_tstamp.tv_sec * 1000 + curr_tstamp.tv_usec / 1000;
502
503		if ((level == -1) && (elapsed_msecs < VM_PRESSURE_NOTIFY_WAIT_PERIOD)) {
504			proc_rele(p);
505			continue;
506		}
507
508		if (level != -1) {
509			/*
510			 * For the level based notifications, check and see if this knote is
511			 * registered for the current level.
512			 */
513			vm_pressure_level_t dispatch_level = convert_internal_pressure_level_to_dispatch_level(level);
514
515			if ((kn->kn_sfflags & dispatch_level) == 0) {
516				proc_rele(p);
517				continue;
518			}
519		}
520
521#if CONFIG_MEMORYSTATUS
522		if (target_foreground_process == FALSE && !memorystatus_bg_pressure_eligible(p)) {
523			VM_PRESSURE_DEBUG(1, "[vm_pressure] skipping process %d\n", p->p_pid);
524			proc_rele(p);
525			continue;
526		}
527#endif /* CONFIG_MEMORYSTATUS */
528
529		curr_task_importance = task_importance_estimate(t);
530
531                /*
532                 * We don't want a small process to block large processes from
533                 * being notified again. <rdar://problem/7955532>
534                 */
535                resident_size = (get_task_phys_footprint(t))/(1024*1024ULL); //(MB);
536
537                if (resident_size >= VM_PRESSURE_MINIMUM_RSIZE) {
538
539			if (level > 0) {
540				/*
541				 * Warning or Critical Pressure.
542				 */
543                        	if (pressure_increase) {
544					if ((curr_task_importance < selected_task_importance) ||
545					    ((curr_task_importance == selected_task_importance) && (resident_size > resident_max))) {
546
547						/*
548 						 * We have found a candidate process which is:
549						 * a) at a lower importance than the current selected process
550						 * OR
551						 * b) has importance equal to that of the current selected process but is larger
552						 */
553
554						if (task_has_been_notified(t, level) == FALSE) {
555							consider_knote = TRUE;
556						}
557					}
558				} else {
559					if ((curr_task_importance > selected_task_importance) ||
560					    ((curr_task_importance == selected_task_importance) && (resident_size > resident_max))) {
561
562						/*
563 						 * We have found a candidate process which is:
564						 * a) at a higher importance than the current selected process
565						 * OR
566						 * b) has importance equal to that of the current selected process but is larger
567						 */
568
569						if (task_has_been_notified(t, level) == FALSE) {
570							consider_knote = TRUE;
571						}
572					}
573				}
574			} else if (level == 0) {
575                        	/*
576				 * Pressure back to normal.
577				 */
578				if ((curr_task_importance > selected_task_importance) ||
579				    ((curr_task_importance == selected_task_importance) && (resident_size > resident_max))) {
580
581					if ((task_has_been_notified(t, kVMPressureWarning) == TRUE) || (task_has_been_notified(t, kVMPressureCritical) == TRUE)) {
582						consider_knote = TRUE;
583					}
584				}
585			} else if (level == -1) {
586
587				/*
588				 * Simple (importance and level)-free behavior based solely on RSIZE.
589				 */
590				if (resident_size > resident_max) {
591					consider_knote = TRUE;
592				}
593			}
594
595
596			if (consider_knote) {
597				resident_max = resident_size;
598				kn_max = kn;
599				selected_task_importance = curr_task_importance;
600				consider_knote = FALSE; /* reset for the next candidate */
601			}
602                } else {
603                        /* There was no candidate with enough resident memory to scavenge */
604                        VM_PRESSURE_DEBUG(0, "[vm_pressure] threshold failed for pid %d with %u resident...\n", p->p_pid, resident_size);
605                }
606		proc_rele(p);
607        }
608
609	if (kn_max) {
610        	VM_PRESSURE_DEBUG(1, "[vm_pressure] sending event to pid %d with %u resident\n", kn_max->kn_kq->kq_p->p_pid, resident_max);
611	}
612
613	return kn_max;
614}
615
616/*
617 * vm_pressure_klist_lock is held for this routine.
618 */
619kern_return_t vm_pressure_notification_without_levels(boolean_t target_foreground_process)
620{
621	struct knote *kn_max = NULL;
622        pid_t target_pid = -1;
623        struct klist dispatch_klist = { NULL };
624	proc_t	target_proc = PROC_NULL;
625	struct klist *candidate_list = NULL;
626
627	candidate_list = &vm_pressure_klist;
628
629	kn_max = vm_pressure_select_optimal_candidate_to_notify(candidate_list, -1, target_foreground_process);
630
631        if (kn_max == NULL) {
632		if (target_foreground_process) {
633			/*
634			 * Doesn't matter if the process had been notified earlier on.
635			 * This is a very specific request. Deliver it.
636			 */
637			candidate_list = &vm_pressure_klist_dormant;
638			kn_max = vm_pressure_select_optimal_candidate_to_notify(candidate_list, -1, target_foreground_process);
639		}
640
641		if (kn_max == NULL) {
642			return KERN_FAILURE;
643		}
644	}
645
646	target_proc = kn_max->kn_kq->kq_p;
647
648        KNOTE_DETACH(candidate_list, kn_max);
649
650	if (target_proc != PROC_NULL) {
651
652		target_pid = target_proc->p_pid;
653
654		memoryshot(VM_PRESSURE_EVENT, DBG_FUNC_NONE);
655
656        	KNOTE_ATTACH(&dispatch_klist, kn_max);
657        	KNOTE(&dispatch_klist, target_pid);
658        	KNOTE_ATTACH(&vm_pressure_klist_dormant, kn_max);
659
660#if CONFIG_MEMORYSTATUS
661		memorystatus_send_pressure_note(target_pid);
662#endif /* CONFIG_MEMORYSTATUS */
663
664		microuptime(&target_proc->vm_pressure_last_notify_tstamp);
665	}
666
667        return KERN_SUCCESS;
668}
669
670static kern_return_t vm_try_pressure_candidates(boolean_t target_foreground_process)
671{
672	/*
673	 * This takes care of candidates that use NOTE_VM_PRESSURE.
674	 * It's a notification without indication of the level
675	 * of memory pressure.
676	 */
677	return (vm_pressure_notification_without_levels(target_foreground_process));
678}
679
680/*
681 * Remove all elements from the dormant list and place them on the active list.
682 * Called with klist lock held.
683 */
684void vm_reset_active_list(void) {
685	/* Re-charge the main list from the dormant list if possible */
686	if (!SLIST_EMPTY(&vm_pressure_klist_dormant)) {
687		struct knote *kn;
688
689		VM_PRESSURE_DEBUG(1, "[vm_pressure] recharging main list from dormant list\n");
690
691		while (!SLIST_EMPTY(&vm_pressure_klist_dormant)) {
692			kn = SLIST_FIRST(&vm_pressure_klist_dormant);
693			SLIST_REMOVE_HEAD(&vm_pressure_klist_dormant, kn_selnext);
694			SLIST_INSERT_HEAD(&vm_pressure_klist, kn, kn_selnext);
695		}
696	}
697}
698