Deleted Added
sdiff udiff text old ( 121235 ) new ( 122780 )
full compact
1/*-
2 * Copyright (c) 1982, 1986 The Regents of the University of California.
3 * Copyright (c) 1989, 1990 William Jolitz
4 * Copyright (c) 1994 John Dyson
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to Berkeley by
8 * the Systems Programming Group of the University of Utah Computer
9 * Science Department, and William Jolitz.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 * must display the following acknowledgement:
21 * This product includes software developed by the University of
22 * California, Berkeley and its contributors.
23 * 4. Neither the name of the University nor the names of its contributors
24 * may be used to endorse or promote products derived from this software
25 * without specific prior written permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
28 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
29 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
30 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
31 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
32 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
33 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
34 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
35 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
36 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
37 * SUCH DAMAGE.
38 *
39 * from: @(#)vm_machdep.c 7.3 (Berkeley) 5/13/91
40 * Utah $Hdr: vm_machdep.c 1.16.1.1 89/06/23$
41 */
42
43#include <sys/cdefs.h>
44__FBSDID("$FreeBSD: head/sys/i386/i386/vm_machdep.c 121235 2003-10-19 00:57:10Z davidxu $");
45
46#include "opt_npx.h"
47#ifdef PC98
48#include "opt_pc98.h"
49#endif
50#include "opt_reset.h"
51#include "opt_isa.h"
52#include "opt_kstack_pages.h"
53
54#include <sys/param.h>
55#include <sys/systm.h>
56#include <sys/malloc.h>
57#include <sys/proc.h>
58#include <sys/kse.h>
59#include <sys/bio.h>
60#include <sys/buf.h>
61#include <sys/vnode.h>
62#include <sys/vmmeter.h>
63#include <sys/kernel.h>
64#include <sys/ktr.h>
65#include <sys/mbuf.h>
66#include <sys/mutex.h>
67#include <sys/smp.h>
68#include <sys/socketvar.h>
69#include <sys/sysctl.h>
70#include <sys/unistd.h>
71
72#include <machine/cpu.h>
73#include <machine/md_var.h>
74#include <machine/pcb.h>
75#include <machine/pcb_ext.h>
76#include <machine/vm86.h>
77
78#include <vm/vm.h>
79#include <vm/vm_param.h>
80#include <sys/lock.h>
81#include <vm/vm_kern.h>
82#include <vm/vm_page.h>
83#include <vm/vm_map.h>
84#include <vm/vm_extern.h>
85
86#include <sys/user.h>
87
88#ifdef PC98
89#include <pc98/pc98/pc98.h>
90#else
91#include <i386/isa/isa.h>
92#endif
93
94static void cpu_reset_real(void);
95#ifdef SMP
96static void cpu_reset_proxy(void);
97static u_int cpu_reset_proxyid;
98static volatile u_int cpu_reset_proxy_active;
99#endif
100static void sf_buf_init(void *arg);
101SYSINIT(sock_sf, SI_SUB_MBUF, SI_ORDER_ANY, sf_buf_init, NULL)
102
103/*
104 * Expanded sf_freelist head. Really an SLIST_HEAD() in disguise, with the
105 * sf_freelist head with the sf_lock mutex.
106 */
107static struct {
108 SLIST_HEAD(, sf_buf) sf_head;
109 struct mtx sf_lock;
110} sf_freelist;
111
112static u_int sf_buf_alloc_want;
113
114extern int _ucodesel, _udatasel;
115
116/*
117 * Finish a fork operation, with process p2 nearly set up.
118 * Copy and update the pcb, set up the stack so that the child
119 * ready to run and return to user mode.
120 */
121void
122cpu_fork(td1, p2, td2, flags)
123 register struct thread *td1;
124 register struct proc *p2;
125 struct thread *td2;
126 int flags;
127{
128 register struct proc *p1;
129 struct pcb *pcb2;
130 struct mdproc *mdp2;
131#ifdef DEV_NPX
132 register_t savecrit;
133#endif
134
135 p1 = td1->td_proc;
136 if ((flags & RFPROC) == 0) {
137 if ((flags & RFMEM) == 0) {
138 /* unshare user LDT */
139 struct mdproc *mdp1 = &p1->p_md;
140 struct proc_ldt *pldt = mdp1->md_ldt;
141 if (pldt && pldt->ldt_refcnt > 1) {
142 pldt = user_ldt_alloc(mdp1, pldt->ldt_len);
143 if (pldt == NULL)
144 panic("could not copy LDT");
145 mdp1->md_ldt = pldt;
146 set_user_ldt(mdp1);
147 user_ldt_free(td1);
148 }
149 }
150 return;
151 }
152
153 /* Ensure that p1's pcb is up to date. */
154#ifdef DEV_NPX
155 if (td1 == curthread)
156 td1->td_pcb->pcb_gs = rgs();
157 savecrit = intr_disable();
158 if (PCPU_GET(fpcurthread) == td1)
159 npxsave(&td1->td_pcb->pcb_save);
160 intr_restore(savecrit);
161#endif
162
163 /* Point the pcb to the top of the stack */
164 pcb2 = (struct pcb *)(td2->td_kstack + KSTACK_PAGES * PAGE_SIZE) - 1;
165 td2->td_pcb = pcb2;
166
167 /* Copy p1's pcb */
168 bcopy(td1->td_pcb, pcb2, sizeof(*pcb2));
169
170 /* Point mdproc and then copy over td1's contents */
171 mdp2 = &p2->p_md;
172 bcopy(&p1->p_md, mdp2, sizeof(*mdp2));
173
174 /*
175 * Create a new fresh stack for the new process.
176 * Copy the trap frame for the return to user mode as if from a
177 * syscall. This copies most of the user mode register values.
178 * The -16 is so we can expand the trapframe if we go to vm86.
179 */
180 td2->td_frame = (struct trapframe *)((caddr_t)td2->td_pcb - 16) - 1;
181 bcopy(td1->td_frame, td2->td_frame, sizeof(struct trapframe));
182
183 td2->td_frame->tf_eax = 0; /* Child returns zero */
184 td2->td_frame->tf_eflags &= ~PSL_C; /* success */
185 td2->td_frame->tf_edx = 1;
186
187 /*
188 * Set registers for trampoline to user mode. Leave space for the
189 * return address on stack. These are the kernel mode register values.
190 */
191#ifdef PAE
192 pcb2->pcb_cr3 = vtophys(vmspace_pmap(p2->p_vmspace)->pm_pdpt);
193#else
194 pcb2->pcb_cr3 = vtophys(vmspace_pmap(p2->p_vmspace)->pm_pdir);
195#endif
196 pcb2->pcb_edi = 0;
197 pcb2->pcb_esi = (int)fork_return; /* fork_trampoline argument */
198 pcb2->pcb_ebp = 0;
199 pcb2->pcb_esp = (int)td2->td_frame - sizeof(void *);
200 pcb2->pcb_ebx = (int)td2; /* fork_trampoline argument */
201 pcb2->pcb_eip = (int)fork_trampoline;
202 pcb2->pcb_psl = PSL_KERNEL; /* ints disabled */
203 pcb2->pcb_gs = rgs();
204 /*-
205 * pcb2->pcb_dr*: cloned above.
206 * pcb2->pcb_savefpu: cloned above.
207 * pcb2->pcb_flags: cloned above.
208 * pcb2->pcb_onfault: cloned above (always NULL here?).
209 * pcb2->pcb_gs: cloned above.
210 * pcb2->pcb_ext: cleared below.
211 */
212
213 /*
214 * XXX don't copy the i/o pages. this should probably be fixed.
215 */
216 pcb2->pcb_ext = 0;
217
218 /* Copy the LDT, if necessary. */
219 mtx_lock_spin(&sched_lock);
220 if (mdp2->md_ldt != 0) {
221 if (flags & RFMEM) {
222 mdp2->md_ldt->ldt_refcnt++;
223 } else {
224 mdp2->md_ldt = user_ldt_alloc(mdp2,
225 mdp2->md_ldt->ldt_len);
226 if (mdp2->md_ldt == NULL)
227 panic("could not copy LDT");
228 }
229 }
230 mtx_unlock_spin(&sched_lock);
231
232 /*
233 * Now, cpu_switch() can schedule the new process.
234 * pcb_esp is loaded pointing to the cpu_switch() stack frame
235 * containing the return address when exiting cpu_switch.
236 * This will normally be to fork_trampoline(), which will have
237 * %ebx loaded with the new proc's pointer. fork_trampoline()
238 * will set up a stack to call fork_return(p, frame); to complete
239 * the return to user-mode.
240 */
241}
242
243/*
244 * Intercept the return address from a freshly forked process that has NOT
245 * been scheduled yet.
246 *
247 * This is needed to make kernel threads stay in kernel mode.
248 */
249void
250cpu_set_fork_handler(td, func, arg)
251 struct thread *td;
252 void (*func)(void *);
253 void *arg;
254{
255 /*
256 * Note that the trap frame follows the args, so the function
257 * is really called like this: func(arg, frame);
258 */
259 td->td_pcb->pcb_esi = (int) func; /* function */
260 td->td_pcb->pcb_ebx = (int) arg; /* first arg */
261}
262
263void
264cpu_exit(struct thread *td)
265{
266 struct mdproc *mdp;
267 struct pcb *pcb = td->td_pcb;
268
269
270 /* Reset pc->pcb_gs and %gs before possibly invalidating it. */
271 mdp = &td->td_proc->p_md;
272 if (mdp->md_ldt) {
273 td->td_pcb->pcb_gs = _udatasel;
274 load_gs(_udatasel);
275 user_ldt_free(td);
276 }
277 if (pcb->pcb_flags & PCB_DBREGS) {
278 /* disable all hardware breakpoints */
279 reset_dbregs();
280 pcb->pcb_flags &= ~PCB_DBREGS;
281 }
282}
283
284void
285cpu_thread_exit(struct thread *td)
286{
287 struct pcb *pcb = td->td_pcb;
288#ifdef DEV_NPX
289 if (td == PCPU_GET(fpcurthread))
290 npxdrop();
291#endif
292 if (pcb->pcb_flags & PCB_DBREGS) {
293 /* disable all hardware breakpoints */
294 reset_dbregs();
295 pcb->pcb_flags &= ~PCB_DBREGS;
296 }
297}
298
299void
300cpu_thread_clean(struct thread *td)
301{
302 struct pcb *pcb;
303
304 pcb = td->td_pcb;
305 if (pcb->pcb_ext != 0) {
306 /* XXXKSE XXXSMP not SMP SAFE.. what locks do we have? */
307 /* if (pcb->pcb_ext->ext_refcount-- == 1) ?? */
308 /*
309 * XXX do we need to move the TSS off the allocated pages
310 * before freeing them? (not done here)
311 */
312 mtx_lock(&Giant);
313 kmem_free(kernel_map, (vm_offset_t)pcb->pcb_ext,
314 ctob(IOPAGES + 1));
315 mtx_unlock(&Giant);
316 pcb->pcb_ext = 0;
317 }
318}
319
320void
321cpu_thread_swapin(struct thread *td)
322{
323}
324
325void
326cpu_thread_swapout(struct thread *td)
327{
328}
329
330void
331cpu_sched_exit(td)
332 register struct thread *td;
333{
334}
335
336void
337cpu_thread_setup(struct thread *td)
338{
339
340 td->td_pcb =
341 (struct pcb *)(td->td_kstack + KSTACK_PAGES * PAGE_SIZE) - 1;
342 td->td_frame = (struct trapframe *)((caddr_t)td->td_pcb - 16) - 1;
343 td->td_pcb->pcb_ext = NULL;
344}
345
346/*
347 * Initialize machine state (pcb and trap frame) for a new thread about to
348 * upcall. Pu t enough state in the new thread's PCB to get it to go back
349 * userret(), where we can intercept it again to set the return (upcall)
350 * Address and stack, along with those from upcals that are from other sources
351 * such as those generated in thread_userret() itself.
352 */
353void
354cpu_set_upcall(struct thread *td, struct thread *td0)
355{
356 struct pcb *pcb2;
357
358 /* Point the pcb to the top of the stack. */
359 pcb2 = td->td_pcb;
360
361 /*
362 * Copy the upcall pcb. This loads kernel regs.
363 * Those not loaded individually below get their default
364 * values here.
365 *
366 * XXXKSE It might be a good idea to simply skip this as
367 * the values of the other registers may be unimportant.
368 * This would remove any requirement for knowing the KSE
369 * at this time (see the matching comment below for
370 * more analysis) (need a good safe default).
371 */
372 bcopy(td0->td_pcb, pcb2, sizeof(*pcb2));
373 pcb2->pcb_flags &= ~(PCB_NPXTRAP|PCB_NPXINITDONE);
374
375 /*
376 * Create a new fresh stack for the new thread.
377 * The -16 is so we can expand the trapframe if we go to vm86.
378 * Don't forget to set this stack value into whatever supplies
379 * the address for the fault handlers.
380 * The contexts are filled in at the time we actually DO the
381 * upcall as only then do we know which KSE we got.
382 */
383 bcopy(td0->td_frame, td->td_frame, sizeof(struct trapframe));
384
385 /*
386 * Set registers for trampoline to user mode. Leave space for the
387 * return address on stack. These are the kernel mode register values.
388 */
389#ifdef PAE
390 pcb2->pcb_cr3 = vtophys(vmspace_pmap(td->td_proc->p_vmspace)->pm_pdpt);
391#else
392 pcb2->pcb_cr3 = vtophys(vmspace_pmap(td->td_proc->p_vmspace)->pm_pdir);
393#endif
394 pcb2->pcb_edi = 0;
395 pcb2->pcb_esi = (int)fork_return; /* trampoline arg */
396 pcb2->pcb_ebp = 0;
397 pcb2->pcb_esp = (int)td->td_frame - sizeof(void *); /* trampoline arg */
398 pcb2->pcb_ebx = (int)td; /* trampoline arg */
399 pcb2->pcb_eip = (int)fork_trampoline;
400 pcb2->pcb_psl &= ~(PSL_I); /* interrupts must be disabled */
401 pcb2->pcb_gs = rgs();
402 /*
403 * If we didn't copy the pcb, we'd need to do the following registers:
404 * pcb2->pcb_dr*: cloned above.
405 * pcb2->pcb_savefpu: cloned above.
406 * pcb2->pcb_flags: cloned above.
407 * pcb2->pcb_onfault: cloned above (always NULL here?).
408 * pcb2->pcb_gs: cloned above. XXXKSE ???
409 * pcb2->pcb_ext: cleared below.
410 */
411 pcb2->pcb_ext = NULL;
412}
413
414/*
415 * Set that machine state for performing an upcall that has to
416 * be done in thread_userret() so that those upcalls generated
417 * in thread_userret() itself can be done as well.
418 */
419void
420cpu_set_upcall_kse(struct thread *td, struct kse_upcall *ku)
421{
422
423 /*
424 * Do any extra cleaning that needs to be done.
425 * The thread may have optional components
426 * that are not present in a fresh thread.
427 * This may be a recycled thread so make it look
428 * as though it's newly allocated.
429 */
430 cpu_thread_clean(td);
431
432 /*
433 * Set the trap frame to point at the beginning of the uts
434 * function.
435 */
436 td->td_frame->tf_esp =
437 (int)ku->ku_stack.ss_sp + ku->ku_stack.ss_size - 16;
438 td->td_frame->tf_eip = (int)ku->ku_func;
439
440 /*
441 * Pass the address of the mailbox for this kse to the uts
442 * function as a parameter on the stack.
443 */
444 suword((void *)(td->td_frame->tf_esp + sizeof(void *)),
445 (int)ku->ku_mailbox);
446}
447
448/*
449 * Convert kernel VA to physical address
450 */
451vm_paddr_t
452kvtop(void *addr)
453{
454 vm_paddr_t pa;
455
456 pa = pmap_kextract((vm_offset_t)addr);
457 if (pa == 0)
458 panic("kvtop: zero page frame");
459 return (pa);
460}
461
462/*
463 * Force reset the processor by invalidating the entire address space!
464 */
465
466#ifdef SMP
467static void
468cpu_reset_proxy()
469{
470
471 cpu_reset_proxy_active = 1;
472 while (cpu_reset_proxy_active == 1)
473 ; /* Wait for other cpu to see that we've started */
474 stop_cpus((1<<cpu_reset_proxyid));
475 printf("cpu_reset_proxy: Stopped CPU %d\n", cpu_reset_proxyid);
476 DELAY(1000000);
477 cpu_reset_real();
478}
479#endif
480
481void
482cpu_reset()
483{
484#ifdef SMP
485 if (smp_active == 0) {
486 cpu_reset_real();
487 /* NOTREACHED */
488 } else {
489
490 u_int map;
491 int cnt;
492 printf("cpu_reset called on cpu#%d\n", PCPU_GET(cpuid));
493
494 map = PCPU_GET(other_cpus) & ~ stopped_cpus;
495
496 if (map != 0) {
497 printf("cpu_reset: Stopping other CPUs\n");
498 stop_cpus(map); /* Stop all other CPUs */
499 }
500
501 if (PCPU_GET(cpuid) == 0) {
502 DELAY(1000000);
503 cpu_reset_real();
504 /* NOTREACHED */
505 } else {
506 /* We are not BSP (CPU #0) */
507
508 cpu_reset_proxyid = PCPU_GET(cpuid);
509 cpustop_restartfunc = cpu_reset_proxy;
510 cpu_reset_proxy_active = 0;
511 printf("cpu_reset: Restarting BSP\n");
512 started_cpus = (1<<0); /* Restart CPU #0 */
513
514 cnt = 0;
515 while (cpu_reset_proxy_active == 0 && cnt < 10000000)
516 cnt++; /* Wait for BSP to announce restart */
517 if (cpu_reset_proxy_active == 0)
518 printf("cpu_reset: Failed to restart BSP\n");
519 enable_intr();
520 cpu_reset_proxy_active = 2;
521
522 while (1);
523 /* NOTREACHED */
524 }
525 }
526#else
527 cpu_reset_real();
528#endif
529}
530
531static void
532cpu_reset_real()
533{
534
535#ifdef PC98
536 /*
537 * Attempt to do a CPU reset via CPU reset port.
538 */
539 disable_intr();
540 if ((inb(0x35) & 0xa0) != 0xa0) {
541 outb(0x37, 0x0f); /* SHUT0 = 0. */
542 outb(0x37, 0x0b); /* SHUT1 = 0. */
543 }
544 outb(0xf0, 0x00); /* Reset. */
545#else
546 /*
547 * Attempt to do a CPU reset via the keyboard controller,
548 * do not turn of the GateA20, as any machine that fails
549 * to do the reset here would then end up in no man's land.
550 */
551
552#if !defined(BROKEN_KEYBOARD_RESET)
553 outb(IO_KBD + 4, 0xFE);
554 DELAY(500000); /* wait 0.5 sec to see if that did it */
555 printf("Keyboard reset did not work, attempting CPU shutdown\n");
556 DELAY(1000000); /* wait 1 sec for printf to complete */
557#endif
558#endif /* PC98 */
559 /* force a shutdown by unmapping entire address space ! */
560 bzero((caddr_t)PTD, NBPTD);
561
562 /* "good night, sweet prince .... <THUNK!>" */
563 invltlb();
564 /* NOTREACHED */
565 while(1);
566}
567
568/*
569 * Allocate a pool of sf_bufs (sendfile(2) or "super-fast" if you prefer. :-))
570 */
571static void
572sf_buf_init(void *arg)
573{
574 struct sf_buf *sf_bufs;
575 vm_offset_t sf_base;
576 int i;
577
578 mtx_init(&sf_freelist.sf_lock, "sf_bufs list lock", NULL, MTX_DEF);
579 mtx_lock(&sf_freelist.sf_lock);
580 SLIST_INIT(&sf_freelist.sf_head);
581 sf_base = kmem_alloc_nofault(kernel_map, nsfbufs * PAGE_SIZE);
582 sf_bufs = malloc(nsfbufs * sizeof(struct sf_buf), M_TEMP,
583 M_NOWAIT | M_ZERO);
584 for (i = 0; i < nsfbufs; i++) {
585 sf_bufs[i].kva = sf_base + i * PAGE_SIZE;
586 SLIST_INSERT_HEAD(&sf_freelist.sf_head, &sf_bufs[i], free_list);
587 }
588 sf_buf_alloc_want = 0;
589 mtx_unlock(&sf_freelist.sf_lock);
590}
591
592/*
593 * Get an sf_buf from the freelist. Will block if none are available.
594 */
595struct sf_buf *
596sf_buf_alloc(struct vm_page *m)
597{
598 struct sf_buf *sf;
599 int error;
600
601 mtx_lock(&sf_freelist.sf_lock);
602 while ((sf = SLIST_FIRST(&sf_freelist.sf_head)) == NULL) {
603 sf_buf_alloc_want++;
604 error = msleep(&sf_freelist, &sf_freelist.sf_lock, PVM|PCATCH,
605 "sfbufa", 0);
606 sf_buf_alloc_want--;
607
608 /*
609 * If we got a signal, don't risk going back to sleep.
610 */
611 if (error)
612 break;
613 }
614 if (sf != NULL) {
615 SLIST_REMOVE_HEAD(&sf_freelist.sf_head, free_list);
616 sf->m = m;
617 pmap_qenter(sf->kva, &sf->m, 1);
618 }
619 mtx_unlock(&sf_freelist.sf_lock);
620 return (sf);
621}
622
623/*
624 * Detatch mapped page and release resources back to the system.
625 */
626void
627sf_buf_free(void *addr, void *args)
628{
629 struct sf_buf *sf;
630 struct vm_page *m;
631
632 sf = args;
633 pmap_qremove((vm_offset_t)addr, 1);
634 m = sf->m;
635 vm_page_lock_queues();
636 vm_page_unwire(m, 0);
637 /*
638 * Check for the object going away on us. This can
639 * happen since we don't hold a reference to it.
640 * If so, we're responsible for freeing the page.
641 */
642 if (m->wire_count == 0 && m->object == NULL)
643 vm_page_free(m);
644 vm_page_unlock_queues();
645 sf->m = NULL;
646 mtx_lock(&sf_freelist.sf_lock);
647 SLIST_INSERT_HEAD(&sf_freelist.sf_head, sf, free_list);
648 if (sf_buf_alloc_want > 0)
649 wakeup_one(&sf_freelist);
650 mtx_unlock(&sf_freelist.sf_lock);
651}
652
653/*
654 * Software interrupt handler for queued VM system processing.
655 */
656void
657swi_vm(void *dummy)
658{
659 if (busdma_swi_pending != 0)
660 busdma_swi();
661}
662
663/*
664 * Tell whether this address is in some physical memory region.
665 * Currently used by the kernel coredump code in order to avoid
666 * dumping the ``ISA memory hole'' which could cause indefinite hangs,
667 * or other unpredictable behaviour.
668 */
669
670int
671is_physical_memory(addr)
672 vm_offset_t addr;
673{
674
675#ifdef DEV_ISA
676 /* The ISA ``memory hole''. */
677 if (addr >= 0xa0000 && addr < 0x100000)
678 return 0;
679#endif
680
681 /*
682 * stuff other tests for known memory-mapped devices (PCI?)
683 * here
684 */
685
686 return 1;
687}