1/* BEGIN LICENSE BLOCK
2 * Version: CMPL 1.1
3 *
4 * The contents of this file are subject to the Cisco-style Mozilla Public
5 * License Version 1.1 (the "License"); you may not use this file except
6 * in compliance with the License.  You may obtain a copy of the License
7 * at www.eclipse-clp.org/license.
8 *
9 * Software distributed under the License is distributed on an "AS IS"
10 * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied.  See
11 * the License for the specific language governing rights and limitations
12 * under the License.
13 *
14 * The Original Code is  The ECLiPSe Constraint Logic Programming System.
15 * The Initial Developer of the Original Code is  Cisco Systems, Inc.
16 * Portions created by the Initial Developer are
17 * Copyright (C) 1989-2006 Cisco Systems, Inc.  All Rights Reserved.
18 *
19 * Contributor(s):
20 *
21 * END LICENSE BLOCK */
22
23/*
24 * SEPIA C SOURCE MODULE
25 *
26 * VERSION	$Id: emu_c_env.c,v 1.9 2012/12/09 22:53:12 jschimpf Exp $
27 */
28
29/*
30 * IDENTIFICATION		emu_c_env.c
31 *
32 * DESCRIPTION			This file contains auxiliary C functions for
33 *				the emulator.
34 */
35
36#include "config.h"
37#include "sepia.h"
38#include "types.h"
39#include "error.h"
40#include "embed.h"
41#include "mem.h"
42#include "ec_io.h"
43#include "dict.h"
44#include "emu_export.h"
45#include "module.h"
46#include "debug.h"
47#include "opcode.h"
48
49extern int		*interrupt_handler_flags_;
50extern pri		**interrupt_handler_;
51extern dident		*interrupt_name_;
52extern jmp_buf reset;
53extern pword		*p_meta_arity_;
54extern void		msg_nopoll();
55extern void		ec_init_globvars(void);
56extern int		ec_init_postponed(void);
57
58#define Bind_Named(pwn, pw)\
59                         Trail_Tag_If_Needed_Gb(pwn); \
60			 (pwn)->val.ptr = (pw);\
61			 (pwn)->tag.kernel = TREF;
62
63#define DELAY_SLOT		1	/* 'suspend' attribute slot */
64#define CONSTRAINED_OFF		2	/* 'constrained' list */
65/* If you change the above, update ic.c as well. */
66
67
68
69/*------------------------------------------
70 * the various entry points to the emulator
71 *------------------------------------------*/
72
73/*
74 * What are the setjmp/longjmps good for?
75 * In order to allow exit_block/1 from inside an interrupt emulator, we
76 * map it onto the longjmp feature of C. Every emulator call is preceded
77 * by a setjmp(), which catches longjmps that are executed while this
78 * emulator invocation is active. When a longjmp is caught, we call the
79 * emulator again and let it execute an exit_block/1. If the exit_block/1
80 * is not caught inside this recursion level, the emulator exits with PTHROW
81 * and we have to continue exiting in older emulator invocations by doing
82 * another longjmp.
83 */
84
85extern vmcode	eval_code_[],
86		recurs_code_[],
87		slave_code_[],
88		boot_code_[],
89		it_code_[],
90		it_block_code_[],
91		*do_exit_block_code_;
92
93extern vmcode	stop_fail_code_[],
94		slave_fail_code_[],
95		it_fail_code_[];
96
97extern vmcode	*fail_code_,
98		*bip_error_code_;
99
100extern st_handle_t eng_root_branch;
101
102/*
103 * If we have reinitialised or restored the machine state,
104 * we must make sure that the FakedOverflow condition is
105 * in the corresponding state.
106 */
107void
108re_fake_overflow(void)
109{
110    Disable_Int();
111    if (MU ||
112    	(EVENT_FLAGS && g_emu_.nesting_level == 1 && !PO) ||
113	InterruptsPending)
114    {
115	if (g_emu_.nesting_level > 1) {
116	    Interrupt_Fake_Overflow;	/* maybe we are in an interrupt */
117	} else {
118	    Fake_Overflow;
119	}
120    }
121    else
122    {
123	Reset_Faked_Overflow;
124    }
125    Enable_Int();
126}
127
128#define EMU_INIT_LD	1
129#define EMU_INIT_WL	2
130#define EMU_INIT_GV	4
131
132static void
133save_vm_status(vmcode *fail_code, int options)
134{
135    register pword *pw1;
136    register control_ptr b_aux;
137    register uint32 i;
138    extern vmcode fail_return_env_0_[];
139
140    /*
141     * Build the invocation frame
142     *
143     * We leave space for one inline frame (the biggest frame with constant size)
144     * on top of the control stack to prevent overwriting useful information
145     * in interrupt emulators. Thus we don't have to mask interrupts when
146     * building small control frames.
147     * We push a dummy return address onto the local stack because
148     * the GC relies on the sp-entries in control frames pointing to
149     * valid return addresses.
150     */
151
152    /* push a dummy return address (needed in the GC) */
153    SP = (pword *) (((vmcode **) SP) -1);
154    *((vmcode **) SP) = &fail_return_env_0_[1];
155
156    i = VM_FLAGS;
157    Disable_Int()			/* will be reset in ..._emulc() */
158    B.args += SAFE_B_AREA;		/* leave some free space */
159    b_aux.args = B.args;
160
161    b_aux.invoc->tg_before = TG;	/* for restoring TG after exiting */
162
163    b_aux.invoc->wl = TAGGED_WL;
164    b_aux.invoc->wp = WP;
165    b_aux.invoc->wp_stamp = WP_STAMP;
166    if (options & EMU_INIT_WL)
167    {
168	/* wl_init() must be done between saving tg_before and tg */
169	/* it saves WL, LD, WP */
170	Make_Struct(&TAGGED_WL, wl_init());
171	/* don't update timestamp, WP must look "old" */
172	WP = PRIORITY_MAIN;
173    }
174
175#ifdef NEW_ORACLE
176    b_aux.invoc->oracle = TO;
177    b_aux.invoc->followed_oracle = FO;
178    b_aux.invoc->pending_oracle = PO;
179    FO = PO = (char *) 0;
180    TO = (pword *) 0;
181    /* no oracles in recursive emulators! */
182    if (g_emu_.nesting_level == 0  &&  VM_FLAGS & ORACLES_ENABLED)
183    {
184	O_Push(1, O_PAR_ORACLE);	/* also inits TO */
185    }
186#endif
187
188    b_aux.invoc->global_variable = g_emu_.global_variable;
189    b_aux.invoc->postponed_list = PostponedList;
190    if (options & EMU_INIT_GV)
191    {
192	ec_init_globvars();
193
194	ec_init_postponed();
195
196	/* no need to save/restore POSTED: ignored in nested engines */
197
198    	b_aux.invoc->trace_data = g_emu_.trace_data;
199	Make_Integer(&TAGGED_TD, 0);
200	FCULPRIT = -1;
201	/* FTRACE = NULL; */
202    }
203
204    b_aux.invoc->eb = EB;
205    b_aux.invoc->sp = EB = SP;
206    b_aux.invoc->gb = GB;
207    b_aux.invoc->tg = GB = TG;		/* for retry from this frame */
208    Push_Witness;			/* must be first new thing on global */
209    b_aux.invoc->tt = TT;
210    b_aux.invoc->e = E;
211    b_aux.invoc->flags = i;
212    b_aux.invoc->it_buf = g_emu_.it_buf;
213    b_aux.invoc->nesting_level = g_emu_.nesting_level;
214    b_aux.invoc->pp = PP;
215    b_aux.invoc->mu = MU;
216    b_aux.invoc->sv = SV;
217    b_aux.invoc->ld = LD;
218    b_aux.invoc->de = DE;
219    b_aux.invoc->ppb = PPB;
220#ifdef PB_MAINTAINED
221    b_aux.invoc->pb = PB;
222#endif
223    b_aux.invoc->node = eng_root_branch;
224    Get_Bip_Error(b_aux.invoc->global_bip_error);
225    b_aux.invoc->gctg = GCTG;
226    GCTG = TG;
227    Save_Tg_Soft_Lim(b_aux.invoc->tg_soft_lim);
228    b_aux.invoc->parser_env = PARSENV;
229
230    pw1 = &A[0];
231    b_aux.invoc->arg_0 = *pw1++;
232    b_aux.invoc += 1;
233    /* don't save any arguments for the initial frame to make invocation
234     * frames identical size for all parallel engines */
235    if (g_emu_.nesting_level > 0)
236    {
237	for(i = 1; i < NARGREGS; i++) {
238	    if(pw1->tag.kernel != TEND) {
239		*(b_aux.args)++ = *pw1++;
240	    } else break;
241	}
242    }
243
244    b_aux.top->backtrack = fail_code;
245    b_aux.top->frame.invoc = B.invoc;
246    B.top = b_aux.top + 1;
247#ifdef PB_MAINTAINED
248    PB =
249#endif
250    PPB = B.args;
251
252    /*
253     * Do some initialisation common to all recursive emulator invocations
254     */
255
256    g_emu_.nesting_level++;
257
258    DE = MU = SV = (pword *) 0;
259
260#ifdef OC
261    OCB = (pword *) 0;
262#endif
263
264    re_fake_overflow();
265
266    Restore_Tg_Soft_Lim(TG + TG_SEG)
267
268    Set_Bip_Error(0);
269}
270
271
272/*
273 * Idea for event handling: Have the EventPending check here in the loop
274 * and dispatch to next predicate, handler or pred continuation, which
275 * all correspond to a C function entry point.
276 * This returns PSUCCESS or PFAIL or PTHROW (throw argument is in A1)
277 */
278static int
279_emul_trampoline(void)
280{
281    extern func_ptr ec_emulate(void);
282    continuation_t continuation = ec_emulate;
283    do
284    {
285	continuation = (continuation_t) (*continuation)();
286    } while (continuation);
287    return A[0].val.nint;
288}
289
290static void
291_start_goal(value v_goal, type t_goal, value v_mod, type t_mod)
292{
293    A[1].val.all = v_goal.all;
294    A[1].tag.all = t_goal.all;
295    A[2].val.all = v_mod.all;
296    A[2].tag.all = t_mod.all;
297}
298
299
300/*
301 * This is a wrapper round the emulator _emul_trampoline()
302 * which catches the longjumps.
303 * This procedure must be called with interrupts disabled (Disable_Int)!!!
304 */
305static int
306emulc(void)
307{
308    jmp_buf	interrupt_buf;
309    int jump;
310
311    /*
312     * (re)initialise the machine
313     */
314
315    jump = setjmp(interrupt_buf);
316
317    switch(jump)
318    {
319    case PFAIL:
320	/* We get here when a C++ external want to fail */
321    	PP = fail_code_;
322	break;
323    case PTHROW:
324	/* we get here when a recursive emulator throws or
325	 * an external called Exit_Block() (eg. on stack overflow)
326	 */
327	PP = do_exit_block_code_;
328	/* In case we're within Disable_Exit() section,
329	 * we must clear the NO_EXIT flag on reentry to *this*
330	 * emulator!
331	 */
332	VM_FLAGS &= ~NO_EXIT;
333	/* in case we aborted in polling mode */
334	msg_nopoll();
335	break;
336    case 0:
337	/* We are in the first call */
338	g_emu_.it_buf = (jmp_buf *) interrupt_buf; /* clean: &interrupt_buf */
339	Enable_Int();		/* not earlier, since it may call a
340				 * recursive emulator that throws */
341	break;
342    default:
343    	/* We get here when a C++ external wants to raise an error */
344    	PP = bip_error_code_;
345	break;
346
347    }
348    return _emul_trampoline();
349}
350
351/*
352 * This emulator untrails and pops all stacks before returning.
353 * It should be used when it is known that no variable that is
354 * older than this emulator can be bound (like for file queries).
355 */
356
357main_emulc_noexit(value v_goal, type t_goal, value v_mod, type t_mod)
358{
359    save_vm_status(&stop_fail_code_[0], EMU_INIT_LD|EMU_INIT_WL);
360    PP = &eval_code_[0];
361    _start_goal(v_goal, t_goal, v_mod, t_mod);
362    return emulc();
363}
364
365query_emulc_noexit(value v_goal, type t_goal, value v_mod, type t_mod)
366{
367    int		result;
368    save_vm_status(&stop_fail_code_[0], EMU_INIT_LD|EMU_INIT_WL);
369    PP = &eval_code_[0];
370    _start_goal(v_goal, t_goal, v_mod, t_mod);
371    result = emulc();
372    while (result == PYIELD)
373    {
374	Make_Atom(&A[1], in_dict("Nested emulator yielded",0));
375	Make_Integer(&A[2], RESUME_CONT);
376    	result = restart_emulc();
377    }
378    return result;
379}
380
381query_emulc(value v_goal, type t_goal, value v_mod, type t_mod)
382{
383    int		result;
384
385    result = query_emulc_noexit(v_goal, t_goal, v_mod, t_mod);
386
387    if (result == PTHROW)
388	longjmp(*g_emu_.it_buf, PTHROW);
389    return result;
390}
391
392slave_emulc(void)
393{
394    int		result;
395
396    save_vm_status(&slave_fail_code_[0], EMU_INIT_LD|EMU_INIT_WL);
397    PP = &slave_code_[0];
398
399    result = emulc();
400    while (result == PYIELD)
401    {
402	Make_Atom(&A[1], in_dict("Nested emulator yielded",0));
403	Make_Integer(&A[2], RESUME_CONT);
404    	result = restart_emulc();
405    }
406
407    if (result == PTHROW)
408	longjmp(*g_emu_.it_buf, PTHROW);
409    return result;
410
411}
412
413restart_emulc(void)
414{
415    Disable_Int();
416    return emulc();
417}
418
419
420/*
421 * This emulator is to be used if the recursive emulator may bind
422 * outside variables or leave something useful on the global stack
423 */
424
425sub_emulc_noexit(value v_goal, type t_goal, value v_mod, type t_mod)
426{
427    int result;
428    save_vm_status(&stop_fail_code_[0], 0);
429    PP = &recurs_code_[0];
430
431    _start_goal(v_goal, t_goal, v_mod, t_mod);
432    result = emulc();
433    while (result == PYIELD)
434    {
435	Make_Atom(&A[1], in_dict("Nested emulator yielded",0));
436	Make_Integer(&A[2], RESUME_CONT);
437    	result = restart_emulc();
438    }
439    return result;
440}
441
442sub_emulc(value v_goal, type t_goal, value v_mod, type t_mod)
443{
444    int		result;
445
446    result = sub_emulc_noexit(v_goal, t_goal, v_mod, t_mod);
447
448    if (result == PTHROW)
449	longjmp(*g_emu_.it_buf, PTHROW);
450    return result;
451}
452
453/*
454 * For booting: the 1st argument is the bootfile name
455 */
456
457boot_emulc(value v_file, type t_file, value v_mod, type t_mod)
458{
459    int		result;
460    save_vm_status(&stop_fail_code_[0], EMU_INIT_LD);
461    PP = &boot_code_[0];
462    _start_goal(v_file, t_file, v_mod, t_mod);
463    result = emulc();
464    while (result == PYIELD)
465    {
466	Make_Atom(&A[1], in_dict("Nested emulator yielded",0));
467	Make_Integer(&A[2], RESUME_CONT);
468    	result = restart_emulc();
469    }
470    return result;
471}
472
473
474/*
475 * make an exit_block with the given exit tag
476 */
477
478int
479return_throw(value v_tag, type t_tag)
480{
481    A[1].val.all = v_tag.all;
482    A[1].tag.all = t_tag.all;
483    return PTHROW;
484}
485
486longjmp_throw(value v_tag, type t_tag)
487{
488    A[1].val.all = v_tag.all;
489    A[1].tag.all = t_tag.all;
490    longjmp(*g_emu_.it_buf, PTHROW);
491}
492
493
494delayed_exit(void)
495{
496    pword goal, mod;
497    goal.val.did = d_.exit_postponed;
498    goal.tag.kernel = TDICT;
499    mod.val.did = d_.kernel_sepia;
500    mod.tag.kernel = ModuleTag(d_.kernel_sepia);
501    (void) query_emulc(goal.val, goal.tag, mod.val, mod.tag); /* will do a longjmp */
502}
503
504
505/*
506 * Interrupt emulator:
507 *	the 1st argument is the signal number
508 * When the exit_block protection is active,
509 * the handler is called inside a block/3
510 */
511
512int
513it_emulc(value v_sig, type t_sig)
514{
515    int		result;
516
517    /* no handler set, don't bother starting an emulator */
518    if (interrupt_handler_flags_[v_sig.nint] != IH_HANDLE_ASYNC)
519    	return PSUCCEED;
520
521    save_vm_status(&it_fail_code_[0], EMU_INIT_LD|EMU_INIT_GV);
522
523    PARSENV = (void_ptr) 0;
524
525    if (VM_FLAGS & NO_EXIT) {
526	PP = &it_block_code_[0];
527    } else {
528	PP = &it_code_[0];
529    }
530
531    /* in case we interrupted in polling mode */
532    msg_nopoll();
533    A[1].val.all = v_sig.all;
534    A[1].tag.all = t_sig.all;
535    result = emulc();
536    while (result == PYIELD)
537    {
538	Make_Atom(&A[1], in_dict("Nested emulator yielded",0));
539	Make_Integer(&A[2], RESUME_CONT);
540    	result = restart_emulc();
541    }
542    return result;
543}
544
545
546/*------------------------------------------
547 * Synchronous event handling
548 *------------------------------------------*/
549
550#ifdef DEBUG_EVENT_Q
551#define event_q_assert(ex) {						\
552    if (!(ex)) {							\
553	(void) p_fprintf(current_err_, "Assertion Failed at ");		\
554	(void) p_fprintf(current_err_, "file \"%s\"", __FILE__);	\
555	(void) p_fprintf(current_err_, " line %d\n", __LINE__);		\
556	(void) ec_panic("Assertion Failed", "Event queue");		\
557    }									\
558}
559#else
560#define event_q_assert(ex)
561#endif
562
563static pword volatile posted_events_[MAX_STATIC_EVENT_SLOTS];
564static int volatile first_posted_ = 0;
565static int volatile next_posted_ = 0;
566
567#define IsEmptyDynamicEventQueue()			\
568	(g_emu_.dyn_event_q.free_event_slots == 	\
569	 g_emu_.dyn_event_q.total_event_slots)
570
571#define IsEmptyStaticEventQueue()			\
572	(first_posted_ == next_posted_)
573
574#ifdef PRINTAM
575void
576print_static_queued_events(void)
577{
578    int i;
579
580    Disable_Int();
581    i = first_posted_;
582    p_fprintf(current_err_, "Static event queue:");
583    while (i != next_posted_)
584    {
585	p_fprintf(current_err_, " %d:%x", posted_events_[i].tag.kernel, posted_events_[i].val.ptr);
586	i = (i + 1) % MAX_STATIC_EVENT_SLOTS;
587    }
588    ec_newline(current_err_);
589    Enable_Int();
590}
591
592void
593print_dynamic_queued_events(void)
594{
595    dyn_event_q_slot_t *slot;
596    uword cnt = 0, total;
597
598    Disable_Int();
599    slot = g_emu_.dyn_event_q.prehead->next; /* get */
600    total = g_emu_.dyn_event_q.total_event_slots - g_emu_.dyn_event_q.free_event_slots;
601    p_fprintf(current_err_, "Dynamic event queue: Total: %" W_MOD "d Free: %" W_MOD "d:",
602	g_emu_.dyn_event_q.total_event_slots, g_emu_.dyn_event_q.free_event_slots);
603    for( cnt = 0; cnt < total; cnt++, slot = slot->next )
604    {
605	p_fprintf(current_err_, " %d:%x", slot->event_data.tag.kernel, slot->event_data.val.ptr);
606    }
607    ec_newline(current_err_);
608    Enable_Int();
609}
610#endif
611
612static int
613_post_event_static(pword event, int no_duplicates)
614{
615    int i;
616
617    Check_Integer(event.tag);
618
619    Disable_Int();
620
621    if (no_duplicates)
622    {
623	/* if this event is already posted, don't do it again */
624	for (i = first_posted_; i != next_posted_; i = (i + 1) % MAX_STATIC_EVENT_SLOTS)
625	{
626	    if (posted_events_[i].tag.all == event.tag.all
627		&& posted_events_[i].val.all == event.val.all)
628	    {
629		Enable_Int();
630		Succeed_;
631	    }
632	}
633    }
634
635    i = (next_posted_ + 1) % MAX_STATIC_EVENT_SLOTS;
636
637    if (i == first_posted_)
638    {
639	Enable_Int();
640	Bip_Error(RANGE_ERROR);	/* queue full */
641    }
642
643    posted_events_[next_posted_] = event;
644    next_posted_ = i;		/* enter in queue */
645    EVENT_FLAGS |= EVENT_POSTED|DEL_IRQ_POSTED;
646    Interrupt_Fake_Overflow; 	/* Served in signal handler */
647    Enable_Int();
648
649    Succeed_;
650}
651
652static int
653_post_event_dynamic(pword event, int no_duplicates)
654{
655    extern t_ext_type heap_event_tid;
656
657    if (IsHandle(event.tag))
658    {
659	Check_Type(event.val.ptr->tag, TEXTERN);
660	if (ExternalClass(event.val.ptr) != &heap_event_tid) {
661		Bip_Error(TYPE_ERROR);
662	}
663        if (!(ExternalData(event.val.ptr))) {
664	    Bip_Error(STALE_HANDLE);
665	}
666
667	/* If the event is disabled, don't post it to the queue */
668	if (!((t_heap_event *)ExternalData(event.val.ptr))->enabled) {
669	    Succeed_;
670	}
671
672	/* Don't put the handle in the queue! */
673	event.tag.kernel = TPTR;
674	event.val.wptr = heap_event_tid.copy(ExternalData(event.val.ptr));
675    }
676    else if (IsTag(event.tag.kernel, TPTR))
677    {
678	/* Assume it'a a TPTR to a t_heap_event (we use this when posting
679	 * an event that was stored in a stream descriptor).
680	 * As above, if the event is disabled, don't post it to the queue.
681	 */
682	if (!((t_heap_event *)event.val.ptr)->enabled) {
683	    Succeed_;
684	}
685	event.val.wptr = heap_event_tid.copy(event.val.wptr);
686    }
687    else if (!IsAtom(event.tag))
688    {
689	Error_If_Ref(event.tag);
690	Bip_Error(TYPE_ERROR);
691    }
692
693    /* Events are either atoms or handles (anonymous).
694     * Such events go to the dynamic event queue
695     */
696
697    Disable_Int();
698
699    if (no_duplicates)
700    {
701	uword cnt, total;
702	/* if this event is already posted, don't do it again */
703	dyn_event_q_slot_t *slot = g_emu_.dyn_event_q.prehead->next; /* get */
704
705	total = g_emu_.dyn_event_q.total_event_slots - g_emu_.dyn_event_q.free_event_slots;
706	for( cnt = 0; cnt < total; cnt++, slot = slot->next )
707	{
708	    if (slot->event_data.tag.all == event.tag.all
709	     && slot->event_data.val.all == event.val.all)
710	    {
711		/* If the anonymous event handle reference count was bumped
712		 * (via the copy ready for queue insertion) decrement it again!
713		 */
714		if (IsTag(event.tag.kernel, TPTR))
715		{
716		    heap_event_tid.free(event.val.wptr);
717		}
718		Enable_Int();
719		Succeed_;
720	    }
721	}
722    }
723
724    /* Is the queue full? */
725    if (g_emu_.dyn_event_q.free_event_slots != 0)
726    {
727	/* No! */
728	g_emu_.dyn_event_q.free_event_slots--;
729    }
730    else
731    {
732	/* Yes! */
733	dyn_event_q_slot_t *slot;
734
735	event_q_assert(g_emu_.dyn_event_q.prehead ==
736		       g_emu_.dyn_event_q.tail); /* put == get */
737
738	if ((slot = (dyn_event_q_slot_t *)hp_alloc_size(sizeof(dyn_event_q_slot_t))) == NULL)
739	{
740	    Enable_Int();
741	    Bip_Error(RANGE_ERROR); /* not enough memory - queue full */
742	}
743	slot->next = g_emu_.dyn_event_q.tail->next;
744	g_emu_.dyn_event_q.tail->next = slot;
745	g_emu_.dyn_event_q.total_event_slots++;
746	g_emu_.dyn_event_q.prehead = g_emu_.dyn_event_q.prehead->next; /* reflect insertion */
747    }
748
749    g_emu_.dyn_event_q.tail = g_emu_.dyn_event_q.tail->next; /* update tail and put */
750    g_emu_.dyn_event_q.tail->event_data = event; /* delayed set of old put */
751    EVENT_FLAGS |= EVENT_POSTED;
752    Fake_Overflow; /* Not served in signal handler */
753    Enable_Int();
754
755    Succeed_;
756}
757
758int
759ec_post_event_unique(pword event)
760{
761    return _post_event_dynamic(event, 1);
762}
763
764int Winapi
765ec_post_event(pword event)
766{
767    return _post_event_dynamic(event, 0);
768}
769
770int Winapi
771ec_post_event_string(const char *event)
772{
773    pword pw;
774    Make_Atom(&pw, in_dict((char *) event,0));
775    return _post_event_dynamic(pw, 0);
776}
777
778int Winapi
779ec_post_event_int(int event)
780{
781    pword pw;
782    Make_Integer(&pw, event);
783    return _post_event_static(pw, 0);
784}
785
786void
787next_posted_event(pword *out)
788{
789    int n;
790
791    /* Execute all static event queue entries before
792     * dynamic queue entries.
793     * Assumption here is that it's ok to disrespect the
794     * precise post order of interleaved
795     * asynchronously-posted events with all other events.
796     * i.e. synchronously-posted events.
797     * In addition eventual servicing of dynamic event queue is
798     * assumed and so starvation unlikely / not problematic!
799     */
800
801    Disable_Int();
802
803    if ((n = next_urgent_event()) != -1)
804    {
805	Make_Integer(out, n);
806    }
807    else
808    {
809	/* Service the dynamic event queue */
810	if (!IsEmptyDynamicEventQueue())
811	{
812	    g_emu_.dyn_event_q.prehead =
813		g_emu_.dyn_event_q.prehead->next; /* get = get->next */
814	    *out = g_emu_.dyn_event_q.prehead->event_data; /* Delayed update of get */
815	    g_emu_.dyn_event_q.free_event_slots++;
816	}
817	else
818	{
819	    /* The queues were empty although flag was set: shouldn't happen */
820	    ec_panic("Bogus event queue notification", "next_posted_event()");
821	}
822    }
823
824    /* If either queue contain events fake the over flow to handle next */
825    if (IsEmptyStaticEventQueue() &&
826	IsEmptyDynamicEventQueue())
827    {
828	event_q_assert(g_emu_.dyn_event_q.prehead ==
829		       g_emu_.dyn_event_q.tail); /* put == get */
830	EVENT_FLAGS &= ~EVENT_POSTED;
831	event_q_assert(!(EVENT_FLAGS & DEL_IRQ_POSTED));
832    }
833    else
834    {
835	event_q_assert(EVENT_FLAGS & EVENT_POSTED);
836	Fake_Overflow;
837    }
838
839    Enable_Int();
840}
841
842/*
843 * The following is a hack to allow aborting looping unifications:
844 * It is invoked within a possibly infinite emulator loop iff the
845 * DEL_IRQ_POSTED flags is set (Poll_Interrupts macro).
846 * We pick out the delayed async irqs from the event queue and
847 * return them.
848 * If the event is an asynchrously-posted-synchronously-executed
849 * event, then we move the event to the dynamic event queue and
850 * seek the next urgent event. EVENT_FLAGS are adjusted and
851 * if no urgent events left, -1 is returned.
852 */
853
854int
855next_urgent_event(void)
856{
857    Disable_Int();
858
859    while (!IsEmptyStaticEventQueue())
860    {
861	int n = posted_events_[first_posted_].val.nint;
862	event_q_assert(!IsTag(posted_events_[first_posted_].tag.kernel, TEND));
863	event_q_assert(IsInteger(posted_events_[first_posted_].tag));
864	/* Remove element from queue */
865	first_posted_ = (first_posted_ + 1) % MAX_STATIC_EVENT_SLOTS;
866	if (interrupt_handler_flags_[n] == IH_POST_EVENT)
867	{
868	    /* Post the atom to the dynamic event queue for synchronous
869	     * execution.
870	     */
871	    pword event;
872	    Make_Atom(&event, interrupt_name_[n]);
873	    if (_post_event_dynamic(event, 0) != PSUCCEED)
874		(void) write(2,"\nEvent queue overflow - signal lost\n",36);
875	}
876	else
877	{
878	    event_q_assert(interrupt_handler_flags_[n] == IH_HANDLE_ASYNC
879	    		|| interrupt_handler_flags_[n] == IH_THROW
880	    		|| interrupt_handler_flags_[n] == IH_ABORT);
881	    if (IsEmptyStaticEventQueue())
882	    {
883    		EVENT_FLAGS &= ~DEL_IRQ_POSTED;
884		if (IsEmptyDynamicEventQueue())
885		{
886		    EVENT_FLAGS &= ~EVENT_POSTED;
887		}
888	    }
889	    Enable_Int();
890	    return n;
891	}
892    }
893    EVENT_FLAGS &= ~DEL_IRQ_POSTED; /* In case it got set in the meantime */
894
895    Enable_Int();
896    return -1;
897}
898
899
900/*
901 * Remove a disabled event from the dynamic event queue
902 */
903
904void
905purge_disabled_dynamic_events(t_heap_event *event)
906{
907    dyn_event_q_slot_t *slot, *prev;
908    uword cnt = 0, total;
909    pword *pevent;
910
911    Disable_Int();
912
913    total = g_emu_.dyn_event_q.total_event_slots - g_emu_.dyn_event_q.free_event_slots;
914
915    if ( total == 0 ) {
916	Enable_Int();
917	return;
918    }
919
920    prev = g_emu_.dyn_event_q.prehead;
921    slot = prev->next; /* get */
922
923    /* Process all slots but the tail */
924    for( cnt = 1; cnt < total; cnt++ )
925    {
926	pevent = &slot->event_data;
927
928	if (IsTag(pevent->tag.kernel, TPTR) && pevent->val.wptr == (uword*)event)
929	{
930	    g_emu_.dyn_event_q.free_event_slots++;
931	    prev->next = slot->next;
932	    slot->next = g_emu_.dyn_event_q.tail->next; /* insert before put */
933	    g_emu_.dyn_event_q.tail->next = slot; /* update put */
934	    ExternalClass(pevent->val.ptr)->free(ExternalData(pevent->val.ptr));
935	    slot = prev->next;
936	    continue;
937	}
938
939	prev = slot;
940	slot = slot->next;
941    }
942
943    /* Special case tail element removal. This also handles the case
944     * where the circular list is full - in either case simply rewind
945     * the tail pointer.
946     */
947    event_q_assert(slot == g_emu_.dyn_event_q.tail);
948    pevent = &slot->event_data;
949    if (IsTag(pevent->tag.kernel, TPTR) && pevent->val.wptr == (uword*)event)
950    {
951	g_emu_.dyn_event_q.free_event_slots++;
952	g_emu_.dyn_event_q.tail = prev;
953	ExternalClass(pevent->val.ptr)->free(ExternalData(pevent->val.ptr));
954    }
955
956    /* If both static and dynamic event queues are
957     * now empty clear the flags
958     */
959    if (IsEmptyDynamicEventQueue() &&
960	IsEmptyStaticEventQueue())
961    {
962	EVENT_FLAGS &= ~EVENT_POSTED;
963	event_q_assert(!(EVENT_FLAGS & DEL_IRQ_POSTED));
964    }
965
966    Enable_Int();
967}
968
969
970/*
971 * Initialise dynamic event queue
972 */
973
974void
975ec_init_dynamic_event_queue(void)
976{
977    int cnt;
978
979    Disable_Int();
980
981    if ((g_emu_.dyn_event_q.prehead =
982	(dyn_event_q_slot_t *)hp_alloc_size(sizeof(dyn_event_q_slot_t))) == NULL)
983    {
984	ec_panic(MEMORY_P, "emu_init()");
985    }
986
987    g_emu_.dyn_event_q.tail = g_emu_.dyn_event_q.prehead;
988
989    for(cnt = 0; cnt < MIN_DYNAMIC_EVENT_SLOTS - 1; cnt++)
990    {
991	if ((g_emu_.dyn_event_q.tail->next =
992	    (dyn_event_q_slot_t *)hp_alloc_size(sizeof(dyn_event_q_slot_t))) == NULL)
993	{
994	    ec_panic(MEMORY_P, "emu_init()");
995	}
996	g_emu_.dyn_event_q.tail = g_emu_.dyn_event_q.tail->next;
997    }
998
999    /* Link tail to head to complete circular list creation */
1000    g_emu_.dyn_event_q.tail->next = g_emu_.dyn_event_q.prehead;
1001
1002    /* Set tail insertion point */
1003    /* Empty queue condition:
1004     * IsEmptyDynamicEventQueue(). In addition, when queue is empty
1005     * or full: tail->next (put) == prehead->next (get)
1006     */
1007    g_emu_.dyn_event_q.tail = g_emu_.dyn_event_q.prehead;
1008
1009    /* Dynamic queue is initially empty */
1010    g_emu_.dyn_event_q.total_event_slots =
1011		g_emu_.dyn_event_q.free_event_slots = MIN_DYNAMIC_EVENT_SLOTS;
1012
1013    Enable_Int();
1014}
1015
1016
1017/* Shrink the dynamic event queue to at least
1018 * MIN_DYNAMIC_EVENT_SLOTS free.
1019 * Used during GC.
1020 */
1021
1022void
1023trim_dynamic_event_queue(void)
1024{
1025    Disable_Int();
1026
1027    if (g_emu_.dyn_event_q.free_event_slots > MIN_DYNAMIC_EVENT_SLOTS)
1028    {
1029	dyn_event_q_slot_t *slot = g_emu_.dyn_event_q.tail->next; /* put */
1030	uword new_free_slots =	g_emu_.dyn_event_q.free_event_slots /
1031					DYNAMIC_EVENT_Q_SHRINK_FACTOR;
1032	if (new_free_slots < MIN_DYNAMIC_EVENT_SLOTS) {
1033	    new_free_slots = MIN_DYNAMIC_EVENT_SLOTS;
1034	}
1035
1036	if (GlobalFlags & GC_VERBOSE) {
1037	    p_fprintf(log_output_,
1038		      "shrink dynamic event queue from Total: %" W_MOD "u"
1039		      " Free: %" W_MOD "u to Total: %" W_MOD "u Free: %" W_MOD "u (elements)\n",
1040		      g_emu_.dyn_event_q.total_event_slots,
1041		      g_emu_.dyn_event_q.free_event_slots,
1042		      g_emu_.dyn_event_q.total_event_slots -
1043		      (g_emu_.dyn_event_q.free_event_slots - new_free_slots), new_free_slots);
1044		      ec_flush(log_output_);
1045	}
1046
1047	for ( ; g_emu_.dyn_event_q.free_event_slots > new_free_slots
1048	      ; g_emu_.dyn_event_q.free_event_slots--,
1049		g_emu_.dyn_event_q.total_event_slots-- )
1050	{
1051	    g_emu_.dyn_event_q.tail->next = slot->next;
1052	    hp_free_size((generic_ptr)slot, sizeof(dyn_event_q_slot_t));
1053	    slot = g_emu_.dyn_event_q.tail->next;
1054	}
1055    }
1056
1057    Enable_Int();
1058}
1059
1060
1061
1062/*----------------------------------------------
1063 * Auxiliary functions for the emulator
1064 *----------------------------------------------*/
1065
1066/*
1067 * UNIFY		var		nonvar		any
1068 *
1069 * with var:		ec_unify()
1070 *
1071 * with nonvar:		Bind_Var()	ec_unify()
1072 *
1073 * with any:		ec_unify()	ec_unify()	ec_unify()
1074 */
1075
1076/*
1077 * ec_unify() -- copy of the general unifier, callable from C code
1078 *
1079 * Note that Occur_Check_Boundary(0) is done after return from the builtin.
1080 */
1081
1082int
1083ec_unify_(value v1, type t1,
1084	value v2, type t2,
1085	pword **list)		/* list of unified metaterms */
1086{
1087    register long arity;
1088    register pword *pw1, *pw2;
1089
1090    /* In Request_Unify it may happen that the tag is REF/NAME but
1091       it has been already bound by a previous Request */
1092    if (IsRef(t1))
1093    {
1094	pw1 = v1.ptr;
1095	Dereference_(pw1);
1096	t1.all = pw1->tag.all;
1097	v1.all = pw1->val.all;
1098    }
1099    if (IsRef(t2))
1100    {
1101	pw2 = v2.ptr;
1102	Dereference_(pw2);
1103	t2.all = pw2->tag.all;
1104	v2.all = pw2->val.all;
1105    }
1106
1107    for (;;)
1108    {
1109	if(IsVar(t1))
1110	{
1111	    if(IsVar(t2)) 		/* both are free:	*/
1112	    {
1113		if (v1.ptr < v2.ptr)
1114		    if (v1.ptr < TG)
1115		    {
1116			Trail_If_Needed(v2.ptr);
1117			v2.ptr->val.ptr = v1.ptr;
1118		    }
1119		    else
1120		    {
1121			Trail_If_Needed_Eb(v1.ptr);
1122			v1.ptr->val.ptr = v2.ptr;
1123		    }
1124		else if (v1.ptr > v2.ptr)
1125		    if (v2.ptr < TG)
1126		    {
1127			Trail_If_Needed(v1.ptr);
1128			v1.ptr->val.ptr = v2.ptr;
1129		    }
1130		    else
1131		    {
1132			Trail_If_Needed_Eb(v2.ptr);
1133			v2.ptr->val.ptr = v1.ptr;
1134		    }
1135		else
1136		    ;		/* succeed */
1137	    }
1138	    else 			/* only t1 is free */
1139	    {
1140		Occur_Check_Read(v1.ptr, v2, t2, return PFAIL)
1141		if (IsRef(t2)) {
1142		    Trail_If_Needed(v1.ptr);
1143		    v1.ptr->val.ptr = v2.ptr->val.ptr;
1144		} else {
1145		    Bind_(v1.ptr, v2.all, t2.all)
1146		}
1147	    }
1148	    return PSUCCEED;
1149	}
1150	else if (IsVar(t2))		/* only t2 is free */
1151	{
1152	    Occur_Check_Read(v2.ptr, v1, t1, return PFAIL)
1153	    if (IsRef(t1)) {
1154		Trail_If_Needed(v2.ptr);
1155		v2.ptr->val.ptr = v1.ptr->val.ptr;
1156	    } else {
1157		Bind_(v2.ptr, v1.all, t1.all)
1158	    }
1159	    return PSUCCEED;
1160	}
1161	else if (IsRef(t1))		/* t1 is a nonstandard variable */
1162	{
1163	    pword aux_pw;
1164	    Occur_Check_Read(v1.ptr, v2, t2, return PFAIL)
1165	    aux_pw.val.all = v2.all;
1166	    aux_pw.tag.all = t2.all;
1167	    return bind_c(v1.ptr, &aux_pw, list);
1168	}
1169	else if (IsRef(t2))		/* t2 is a nonstandard variable */
1170	{
1171	    pword aux_pw;
1172	    Occur_Check_Read(v2.ptr, v1, t1, return PFAIL)
1173	    aux_pw.val.all = v1.all;
1174	    aux_pw.tag.all = t1.all;
1175	    return bind_c(v2.ptr, &aux_pw, list);
1176	}
1177	/* two non-variables */
1178	else if (TagType(t1) != TagType(t2))
1179	{
1180	    return PFAIL;
1181	}
1182	else if (IsSimple(t1))
1183	{
1184	    if (SimpleEq(t1.kernel, v1, v2))
1185		return PSUCCEED;
1186	    else
1187		return PFAIL;
1188	}
1189	else if (IsList(t1))
1190	{
1191	    arity = 2;
1192	}
1193	else if (IsStructure(t1))
1194	{
1195	    if (v1.ptr->val.did != v2.ptr->val.did)
1196		return PFAIL;
1197	    if ((arity = DidArity(v1.ptr->val.did)) == 0)
1198		return PSUCCEED;
1199	    v1.ptr++;
1200	    v2.ptr++;
1201	}
1202	else if (IsString(t1))
1203	{
1204	    Compare_Strings(v1, v2, arity)
1205	    if (arity >= 0)
1206		return PFAIL;
1207	    else
1208		return PSUCCEED;
1209	}
1210	else
1211	{
1212#ifdef PRINTAM
1213	    if (!(TagType(t1) >= 0 && TagType(t1) <= NTYPES))
1214	    {
1215	    p_fprintf(current_err_, "ec_unify(): unknown tag (%x) encountered\n",
1216			t1.kernel);
1217	    return PFAIL;
1218	    }
1219#endif
1220	    return tag_desc[TagType(t1)].equal(v1.ptr, v2.ptr) ? PSUCCEED : PFAIL;
1221	}
1222
1223	Poll_Interrupts();	/* because we might be looping */
1224
1225	/* arity > 0 */
1226	for (;;)
1227	{
1228	    pw1 = v1.ptr++;
1229	    pw2 = v2.ptr++;
1230	    Dereference_(pw1);
1231	    Dereference_(pw2);
1232	    if (--arity == 0)
1233		break;
1234	    if (ec_unify_(pw1->val, pw1->tag, pw2->val, pw2->tag, list) == PFAIL)
1235		return PFAIL;
1236	}
1237	v1.all = pw1->val.all;
1238	t1.all = pw1->tag.all;
1239	v2.all = pw2->val.all;
1240	t2.all = pw2->tag.all;
1241    }
1242}
1243
1244
1245deep_suspend(value val, type tag,
1246	int position,		/* must be > 0 */
1247	pword *susp,		/* must be dereferenced */
1248	int slot)
1249{
1250    register int arity;
1251    register pword *arg_i;
1252    int		res;
1253
1254    for (;;)
1255    {
1256	if (IsRef(tag))
1257	{
1258	    return insert_suspension(val.ptr, position, susp, slot);
1259	}
1260	else if (IsList(tag))
1261	    arity = 2;
1262	else if (IsStructure(tag))
1263	{
1264	    arity = DidArity(val.ptr->val.did);
1265	    val.ptr++;
1266	}
1267	else
1268	    return PSUCCEED;
1269
1270	for(;arity > 1; arity--)
1271	{
1272	    arg_i = val.ptr++;
1273	    Dereference_(arg_i);
1274	    if (IsRef(arg_i->tag))
1275		res = insert_suspension(arg_i, position, susp, slot);
1276	    else
1277		res = deep_suspend(arg_i->val, arg_i->tag, position,
1278				susp, slot);
1279	    if (res != PSUCCEED)
1280		return res;
1281	}
1282	arg_i = val.ptr;		/* tail recursion */
1283	Dereference_(arg_i);
1284	val.all = arg_i->val.all;
1285	tag.all = arg_i->tag.all;
1286    }
1287}
1288
1289
1290pword *
1291add_attribute(word tv, pword *va, word ta, int slot)
1292{
1293    register pword *s, *t;
1294
1295    s = TG;
1296    TG += 2 + p_meta_arity_->val.nint + 1;
1297    s[0].val.ptr = s;		/* metaterm */
1298    s[0].tag.kernel = TagNameField(tv) | RefTag(TMETA);
1299    s[1].val.ptr = s + 2;
1300    s[1].tag.kernel = TCOMP;
1301    s[2].val.did = in_dict("meta", (int) p_meta_arity_->val.nint);
1302    s[2].tag.kernel = TDICT;
1303    for (t = &s[3]; t < TG; t++)
1304    {
1305	t->val.ptr = t;
1306	t->tag.kernel = TREF;
1307    }
1308    s[slot+2].val.ptr = va;
1309    s[slot+2].tag.kernel = ta;
1310    Check_Gc
1311    return s;
1312}
1313
1314/*
1315 * Create the attribute for the suspend extension.
1316 * The first a difference list, the others are normal lists.
1317 */
1318static pword *
1319_suspension_attribute(pword *susp, int position)
1320{
1321    register pword	*t, *d, *s;
1322    register int	i;
1323    register int	arity = DidArity(d_.suspend_attr);
1324
1325    if (position > arity) {
1326	position = 1;
1327    }
1328
1329    t = TG;
1330    Push_Struct_Frame(d_.suspend_attr);
1331    d = TG;
1332    Push_Struct_Frame(d_.minus);
1333    s = TG;
1334    Push_List_Frame();
1335
1336    s->val.ptr = susp;		/* list element */
1337    s->tag.kernel = TSUSP;
1338    Make_Struct(t+1, d);
1339    if (position == 1)
1340    {
1341	Make_List(d+1,s);	/* singleton dlist */
1342	Make_Ref(d+2,s+1);
1343	Make_Var(s+1);
1344
1345	for(i=2; i<=arity; i++)
1346	{
1347	    Make_Nil(t+i);
1348	}
1349    }
1350    else
1351    {
1352	Make_Var(d+1);		/* empty dlist */
1353	Make_Ref(d+2,d+1);
1354
1355	for(i=2; i<=arity; i++)
1356	{
1357	    if (i == position) {
1358		Make_List(t+i,s);
1359		Make_Nil(s+1);
1360	    } else {
1361		Make_Nil(t+i);
1362	    }
1363	}
1364    }
1365    return t;
1366}
1367
1368int
1369insert_suspension(pword *var,
1370	int position,		/* must be > 0 */
1371	pword *susp,		/* must be dereferenced */
1372	int slot)
1373{
1374    register pword *s, *t;
1375    int			i;
1376
1377    if (IsMeta(var->tag)) {		/* already a metaterm */
1378
1379	t = MetaTerm(var)->val.ptr + slot;	/* find the dlist to insert */
1380	Dereference_(t);
1381	if (IsRef(t->tag)) {
1382	    if (slot != DELAY_SLOT)
1383		return ATTR_FORMAT;
1384	    s = _suspension_attribute(susp, position);
1385	    if (!s)
1386		return RANGE_ERROR;
1387	    Bind_Var(t->val, t->tag, s, TCOMP);
1388	    return PSUCCEED;
1389	} else if (!IsStructure(t->tag))
1390	    return ATTR_FORMAT;
1391	t = t->val.ptr;
1392	if ((DidArity(t->val.did)) < position) {
1393	    if (slot != DELAY_SLOT)
1394		return RANGE_ERROR;
1395	    position = 1;		/* force to the 1st list */
1396	}
1397
1398	return ec_enter_suspension(t+position, susp);
1399    }
1400    else if (IsRef(var->tag)) {
1401	if (slot != DELAY_SLOT)
1402	    return ATTR_FORMAT;
1403	t = _suspension_attribute(susp, position);
1404	if (!t)
1405	    return RANGE_ERROR;
1406	s = add_attribute(var->tag.kernel, t, (word) TCOMP, slot);
1407	Bind_Var(var->val, var->tag, s, TREF);
1408    }
1409    Check_Gc;
1410    return PSUCCEED;
1411}
1412
1413int
1414ec_enter_suspension(pword *t, pword *susp)
1415{
1416    register pword *s, *head;
1417    pword	   *dlp;
1418
1419    dlp = t;
1420    Dereference_(t);
1421    s = TG;
1422    TG += 2;			/* make a list cell */
1423    s[0].val.ptr = susp;
1424    s[0].tag.kernel = TSUSP;
1425    if IsRef(t->tag) {		/* first insert */
1426	s[1].tag.kernel = TNIL;
1427	Bind_Var(t->val, t->tag, &s[0], TLIST);
1428    } else {
1429	if (IsStructure(t->tag)) {		/* it already exists */
1430	    t = t->val.ptr;
1431	    if (t->val.did != d_.minus)		/* check the functor */
1432		return ATTR_FORMAT;
1433	    head = ++t;
1434	    Dereference_(head);
1435	} else if (IsList(t->tag) || IsNil(t->tag)) {
1436	    /* not a difference list */
1437	    head = t;
1438	    t = dlp;
1439	} else
1440	    return ATTR_FORMAT;
1441
1442	/*
1443	 * dlp is the (undereferenced) difference list pointer (if any)
1444	 * t is the (undereferenced) list pointer
1445	 * head is the (dereferenced) list pointer
1446	 */
1447
1448	/*
1449	 * Incomplete garbage collection: Get rid of woken
1450	 * suspensions at the beginning of the list.
1451	 */
1452	while (IsList(head->tag))
1453	{
1454	    register pword *psusp = head->val.ptr;
1455	    Dereference_(psusp);
1456	    if (!IsTag(psusp->tag.kernel, TSUSP))
1457		return ATTR_FORMAT;
1458	    if (!SuspDead(psusp->val.ptr))
1459		break;
1460	    head = head->val.ptr + 1;
1461	    Dereference_(head);
1462	}
1463
1464	/* head now points to the rest of the old suspension list */
1465
1466	if (IsList(head->tag) || IsNil(head->tag)) {
1467	    s[1] = *head;
1468	    /* t may be TREF, TLIST or TNIL */
1469	    if (t < GB || !ISPointer(t->tag.kernel) || t->val.ptr < GB)
1470	    {
1471		Trail_Pword(t);
1472	    }
1473	    t->tag.kernel = TLIST;
1474	    t->val.ptr = s;
1475	} else if (!IsRef(head->tag))
1476	    return ATTR_FORMAT;
1477	else {				/* empty dlist, replace it */
1478	    value v;
1479	    s[1].val.ptr = &s[1];
1480	    s[1].tag.kernel = TREF;
1481	    TG += 3;
1482	    s[2].val.did = d_.minus;	/* new difference list header */
1483	    s[2].tag.kernel = TDICT;
1484	    s[3].val.ptr = s;
1485	    s[3].tag.kernel = TLIST;
1486	    s[4].val.ptr = &s[1];
1487	    s[4].tag.kernel = TREF;
1488	    v.ptr = &s[2];
1489	    (void) ec_assign(dlp, v, tcomp);
1490	}
1491    }
1492    Check_Gc;
1493    return PSUCCEED;
1494}
1495
1496int
1497notify_constrained(pword *pvar)
1498{
1499    pword	*p;
1500
1501    if (!IsMeta(pvar->tag)) {
1502	Succeed_
1503    }
1504    p = MetaTerm(pvar->val.ptr);
1505    p = p->val.ptr + DELAY_SLOT;
1506    Dereference_(p);
1507    if (!IsStructure(p->tag)) {
1508	Succeed_
1509    }
1510    return ec_schedule_susps(p->val.ptr + CONSTRAINED_OFF);
1511}
1512
1513/*
1514 * Pick up the first woken goal with priority higher than prio,
1515 * remove it from its list and set WP to the priority
1516 */
1517pword *
1518first_woken(register int prio)
1519{
1520    register int	i;
1521    register pword	*p = WL;
1522    register pword	*s;
1523    register pword	*t;
1524    register pword	*u;
1525
1526    if (p == (pword *) 0)
1527	return 0;
1528    if (prio > WLMaxPrio(p))
1529	prio = WLMaxPrio(p) + 1;
1530    p = WLFirst(p) - 1;
1531    for (i = 1; i < prio; i++) {
1532	t = ++p;		/* no references allowed */
1533	if (IsList(t->tag)) {
1534	    for (;;) {
1535		t = t->val.ptr;
1536		s = t++;
1537		Dereference_(s);
1538		Dereference_(t);
1539		if (IsSusp(s->tag)) {
1540		    u = s->val.ptr;
1541		    if (!SuspDead(u))
1542			break;
1543		} else
1544		    p_fprintf(current_err_, "*** woken list %d is corrupted\n", i);
1545		if (IsNil(t->tag)) {
1546		    s = 0;
1547		    break;
1548		}
1549	    }
1550	    /* replace the list head */
1551	    if (p->val.ptr < GB) {
1552		Trail_Pword(p);
1553	    }
1554	    if (IsList(t->tag))
1555		p->val.ptr = t->val.ptr;
1556	    else
1557	    {
1558		/* Use a timestamp (which happens to look like a [])
1559		 * to terminate the list */
1560		Make_Stamp(p);
1561	    }
1562	    if (s) {
1563		Set_WP(SuspRunPrio(s))
1564		return s;
1565	    }
1566	}
1567    }
1568    return 0;
1569}
1570
1571/*
1572 * Initialize the WL structure
1573 */
1574pword *
1575wl_init()
1576{
1577    pword	*p = TG;
1578    int	i;
1579
1580    Push_Struct_Frame(d_.woken);
1581    *WLPrevious(p) = TAGGED_WL;
1582    Make_Integer(WLPreviousWP(p), WP);
1583    Make_Susp(WLPreviousLD(p), LD);
1584    for (i=WL_FIRST; i <= WL_ARITY; i++)
1585	p[i].tag.kernel = TNIL;
1586    return p;
1587}
1588
1589/*
1590 * binding routine for non-standard variables
1591 *
1592 * receives:
1593 * 	pw1	a non-standard variable
1594 *		(ie. IsRef(pw1) && !IsVar(pw1))
1595 *	pw2	a general term, but not a (standard) free variable
1596 *		(ie. !IsVar(pw2))
1597 *
1598 * binds the non-standard variable pw1 to the term referenced by pw2
1599 */
1600
1601bind_c(register pword *pw1, register pword *pw2, register pword **list)
1602{
1603    switch(TagType(pw1 -> tag))
1604    {
1605    case TNAME:			/* a named variable */
1606	pw1 = pw1->val.ptr;
1607	switch(TagType(pw2->tag))
1608	{
1609	case TNAME:
1610	    pw2 = pw2->val.ptr;
1611	    if (pw1 < pw2)
1612	    {
1613		Bind_Named(pw2, pw1);
1614	    }
1615	    else if (pw1 > pw2)
1616	    {
1617		Bind_Named(pw1, pw2);
1618	    }
1619	    break;
1620
1621	case TMETA:
1622	    pw2 = pw2->val.ptr;
1623	    if (pw2 > pw1) /* we bind the "wrong" direction, copy the name */
1624	    {
1625		Trail_Tag_If_Needed_Gb(pw2)
1626		pw2->tag.kernel = TagNameField(pw1->tag.kernel) | RefTag(TMETA);
1627	    }
1628	    Bind_Named(pw1, pw2);
1629	    break;
1630
1631	case TUNIV:
1632	    pw2 = pw2->val.ptr;
1633	    Bind_Named(pw1, pw2);
1634	    break;
1635
1636	default:
1637	    Trail_Tag_If_Needed_Gb(pw1);
1638	    *pw1 = *pw2;
1639	}
1640	return PSUCCEED;
1641
1642    case TMETA:
1643    {
1644	pw1 = pw1->val.ptr;
1645	switch(TagType(pw2->tag))
1646	{
1647	case TNAME:
1648	    pw2 = pw2->val.ptr;
1649	    if (pw1 > pw2) /* we bind the "wrong" direction, copy the name */
1650	    {
1651		Trail_Tag_If_Needed_Gb(pw1)
1652		pw1->tag.kernel = TagNameField(pw2->tag.kernel) | RefTag(TMETA);
1653	    }
1654	    Bind_Named(pw2, pw1);
1655	    return PSUCCEED;
1656
1657	case TUNIV:
1658	    return PFAIL;
1659
1660	case TMETA:
1661	    pw2 = pw2->val.ptr;
1662	    if (pw1 > pw2)
1663	    {
1664		Trail_Tag_If_Needed_Gb(pw1)
1665		pw1->tag.kernel = TREF;
1666		pw1->val.all = pw2->val.all;
1667	    }
1668	    else if (pw1 < pw2)
1669	    {
1670		Trail_Tag_If_Needed_Gb(pw2)
1671		pw2->tag.kernel = TREF;
1672		pw2->val.all = pw1->val.all;
1673		pw1 = pw2;
1674	    }
1675	    else
1676		return PSUCCEED;
1677	    break;
1678
1679	default:
1680	    Trail_Tag_If_Needed_Gb(pw1)
1681	    *pw1 = *pw2;
1682	}
1683
1684	pw2 = TG;
1685	TG += 2;
1686	Check_Gc;
1687	pw2[0].val.ptr = pw1;
1688	pw2[0].tag.kernel = TLIST;
1689	if (*list) {
1690	    pw2[1].val.ptr = *list;
1691	    pw2[1].tag.kernel = TLIST;
1692	} else {
1693	    pw2[1].tag.kernel = TNIL;
1694	    if (list == &MU) {
1695		Fake_Overflow;
1696	    }
1697	}
1698	*list = pw2;
1699	return PSUCCEED;
1700    }
1701
1702    case TUNIV:
1703	/* TUNIV variables are all-quantified variables,
1704	 * so any attempt to constrain them must fail! */
1705	switch(TagType(pw2->tag))
1706	{
1707	case TNAME:
1708	    pw1 = pw1->val.ptr;
1709	    pw2 = pw2->val.ptr;
1710	    Bind_Named(pw2, pw1);
1711	    return PSUCCEED;
1712	case TUNIV:
1713	    if (pw1->val.ptr == pw2->val.ptr)
1714		return PSUCCEED;
1715	    /* else */
1716	default:
1717	    return PFAIL;
1718	}
1719
1720/*
1721 * EXTENSION SLOT HERE
1722 */
1723
1724    default:
1725	p_fprintf(current_err_, "bind_c(): unknown tag (%x) encountered\n",
1726		pw1->tag.kernel);
1727	return (PFAIL);
1728    }
1729}
1730
1731
1732/*
1733 * Instantiate a metaterm without triggering meta_unification events
1734 */
1735
1736int
1737meta_bind(pword *pvar, value v, type t)
1738{
1739    if (IsVar(t) && v.ptr >= TG)	/* local -> meta */
1740    {
1741	Trail_If_Needed_Eb(v.ptr)
1742	v.ptr->val.ptr = pvar;
1743    }
1744    else				/* bind the metaterm pvar */
1745    {
1746	Trail_Tag_If_Needed_Gb(pvar)
1747	pvar->tag.all = t.all;
1748	pvar->val.all = v.all;
1749    }
1750    Succeed_;
1751}
1752
1753
1754/*
1755 * ec_assign() - destructive assignment to a pword in the global stack
1756 *
1757 * Used to implement setarg/3 and the like.
1758 * It is not allowed to assign to a variable, in order to reduce the
1759 * confusing side effects caused by this facility [check has been removed].
1760 * Originally, we had the additional restriction that also the new value
1761 * of the pword should not be a variable to avoid multiple references
1762 * to the modified location. However, this proved to be too restrictive
1763 * for the applications, e.g. in difference lists.
1764 *
1765 * This solution should be optimal. Some thoughts about this problem:
1766 * To optimize space reuse and trailing, we need to know the age of
1767 * a binding. A binding is always younger than the bound location and
1768 * also younger than the binding value.
1769 * If the old binding was already done in the current choicepoint
1770 * segment (NewValue), we do not have to trail the update.
1771 * When the value we bind to is in the current choicepoint segment, we
1772 * can use it as the indicator of the binding age. If it is older, or
1773 * if we bind to a constant (which has no age), we create an intermediate
1774 * cell on top of the stack, so that we can later use its address to
1775 * determine the binding age.
1776 */
1777
1778int				/* returns PSUCCEED */
1779ec_assign(
1780    	register pword *argpw,	/* location to be modified */
1781	value v, type t)	/* the new value and tag */
1782{
1783#ifdef PRINTAM
1784    if (!(TG_ORIG <= argpw && argpw < TG) &&
1785    	!((void_ptr)&ec_.m <= (void_ptr)argpw &&
1786	  (void_ptr)argpw < (void_ptr)&ec_.m + sizeof(struct machine)))
1787    {
1788	pword *argpw1 = argpw;
1789	p_fprintf(current_output_,"INTERNAL ERROR: ec_assign of heap term: ");
1790	Dereference_(argpw1)
1791	writeq_term(argpw1->val.all, argpw1->tag.all);
1792	ec_newline(current_output_);
1793    }
1794#endif
1795    if (IsVar(t) && v.ptr > TG)	/* globalize local variables */
1796    {
1797	register pword *new = TG++;
1798	Check_Gc;
1799	new->val.ptr = new;
1800	new->tag.kernel = TREF;
1801	Trail_If_Needed(v.ptr)
1802	v.ptr = v.ptr->val.ptr = new;
1803    }
1804
1805    if (!NewLocation(argpw))		/* not completely deterministic */
1806    {
1807	if (!NewValue(v, t))		/* binding age will not be implicit */
1808	{
1809	    register pword *new = TG++; /* create an intermediate cell */
1810	    Check_Gc;
1811	    new->val.all = v.all;
1812	    new->tag.all = t.all;
1813	    v.ptr = new;
1814	    t.kernel = TREF;
1815	}
1816	if (!NewValue(argpw->val, argpw->tag))
1817	{
1818					/* old binding wasn't in this sgmt */
1819	    Trail_Pword(argpw);		/* don't "optimize" this (bug #609) */
1820	}
1821    }
1822    argpw->tag.all = t.all;
1823    argpw->val.all = v.all;
1824    Succeed_;
1825}
1826
1827
1828/*
1829 * pword *ec_nonground(val,tag)
1830 *
1831 * Check if a term is nonground. Returns a pointer to the first
1832 * variable encountered, otherwise NULL.
1833 */
1834
1835pword *
1836ec_nonground(value val, type tag)	/* expects a dereferenced argument */
1837{
1838    register int arity;
1839    register pword *arg_i;
1840
1841    for (;;)
1842    {
1843	if (IsRef(tag))
1844	    return val.ptr;
1845	else if (IsList(tag))
1846	    arity = 2;
1847	else if (IsStructure(tag))
1848	{
1849	    arity = DidArity(val.ptr->val.did);
1850	    val.ptr++;
1851	}
1852	else
1853	    return (pword *) 0;
1854
1855	for(;arity > 1; arity--)
1856	{
1857	    register pword *pvar;
1858	    arg_i = val.ptr++;
1859	    Dereference_(arg_i);
1860	    if (pvar = ec_nonground(arg_i->val,arg_i->tag))
1861		return pvar;
1862	}
1863	arg_i = val.ptr;		/* tail recursion */
1864	Dereference_(arg_i);
1865	val.all = arg_i->val.all;
1866	tag.all = arg_i->tag.all;
1867    }
1868}
1869
1870/*---------------------------------------------
1871 * Cut across PB
1872 *---------------------------------------------*/
1873
1874#ifdef PB_MAINTAINED
1875
1876int
1877cut_across_pb(old_b)
1878pword *old_b;		/* old_b < PB */
1879{
1880    do
1881    {
1882	PB = BPar(PB)->ppb;
1883    } while (old_b < PB);
1884    if (old_b < PPB) {
1885	PPB = PB;
1886	do
1887	    PPB = BPar(PPB)->ppb;
1888	while (old_b < PPB);
1889	return cut_public();
1890    }
1891    return 1;
1892}
1893
1894#endif
1895
1896/*---------------------------------------------
1897 * Trailing/Untrailing
1898 *---------------------------------------------*/
1899
1900/*
1901 * This function extends the Untrail_Variables() macro.
1902 * It is called when the trail is neither address nor tag nor value trail.
1903 *
1904 * Untrail the extended trail frame that trail_ptr points to.
1905 * The frame must be popped by the caller !
1906 *
1907 * This function (when called during failure) relies on TG/GB having
1908 * their pre-failure values!
1909 */
1910
1911void
1912untrail_ext(pword **trail_ptr, int undo_context)
1913{
1914    switch(TrailedEtype(*trail_ptr))
1915    {
1916
1917    case TRAIL_UNDO:
1918	/* call undo function */
1919	(* (void(*)(pword*,word*,int,int)) (trail_ptr[TRAIL_UNDO_FUNCT])) (
1920		trail_ptr[TRAIL_UNDO_ADDRESS],
1921		(word*) (trail_ptr + TRAIL_UNDO_SIMPLE_HEADER_SIZE),
1922		TrailedEsize(trail_ptr[TRAIL_UNDO_FLAGS]) - TRAIL_UNDO_SIMPLE_HEADER_SIZE,
1923		undo_context
1924	    );
1925	break;
1926
1927    case TRAIL_UNDO_STAMPED:
1928	/*
1929	 * first reset timestamp
1930	 * this is not done in gc because the stamp location may already be
1931	 * marked. The only consequence of this is that the stamp keeps
1932	 * an extra witness alive.
1933	 */
1934	if (undo_context == UNDO_FAIL)
1935	{
1936	    trail_ptr[TRAIL_UNDO_STAMP_ADDRESS]->val.ptr = trail_ptr[TRAIL_UNDO_OLDSTAMP];
1937
1938	    /* do nothing if the trail is redundant according to timestamp */
1939	    if (!OldStamp(trail_ptr[TRAIL_UNDO_STAMP_ADDRESS]))
1940		return;
1941	}
1942	/* then call undo function */
1943	(* (void(*)(pword*,word*,int,int)) (trail_ptr[TRAIL_UNDO_FUNCT])) (
1944		trail_ptr[TRAIL_UNDO_ADDRESS],
1945		(word*) (trail_ptr + TRAIL_UNDO_STAMPED_HEADER_SIZE),
1946		TrailedEsize(trail_ptr[TRAIL_UNDO_FLAGS]) - TRAIL_UNDO_STAMPED_HEADER_SIZE,
1947		undo_context
1948	    );
1949	break;
1950
1951/* EXTENSION SLOT HERE */
1952
1953    }
1954}
1955
1956
1957/*
1958 * _untrail_cut_action()
1959 * called only by untrail_ext() during untrailing
1960 */
1961static void
1962_untrail_cut_action(pword *action_frame)
1963{
1964    if (action_frame == LCA)
1965    {
1966	do_cut_action();
1967    }
1968    /* else the action has already been executed by a cut */
1969}
1970
1971
1972/*
1973 * do_cut_action() is called at cut time or during untrailing
1974 * The LCA register is a pointer to a cut action frame with the format:
1975 *
1976 *	TDICT	arg/3				don't care functor
1977 *	TCOMP	<ptr to next (older) action>	chain of cut actions
1978 *	TINT	<address of C action function>
1979 *	TAG	VAL				argument for the action
1980 */
1981void
1982do_cut_action(void)
1983{
1984    /* call the action function */
1985    (* (void(*)(value,type)) (LCA[2].val.ptr)) (LCA[3].val, LCA[3].tag);
1986
1987    /* advance the LCA register */
1988    if (IsStructure(LCA[1].tag))
1989	LCA = LCA[1].val.ptr;
1990    else
1991	LCA = (pword *) 0;
1992}
1993
1994
1995/*
1996 * schedule_cut_fail_action(function, v, t)
1997 *
1998 * create a cut-action frame on the global stack and a corresponding
1999 * undo-frame on the trail.
2000 * The cut-action frame is linked into the global list of cut-action frames,
2001 * starting with the LCA register.
2002 */
2003void
2004schedule_cut_fail_action(
2005	void	(*function)(value, type),
2006	value	v,
2007	type	t)
2008{
2009    pword *action_frame = TG;
2010
2011    TG += 4;
2012    Check_Gc;
2013    action_frame[0].val.did = d_.arg;	/* just any arity 3 functor ... */
2014    action_frame[0].tag.kernel = TDICT;
2015    action_frame[1].val.ptr = LCA;
2016    if (LCA)
2017	action_frame[1].tag.kernel = TCOMP;
2018    else
2019	action_frame[1].tag.kernel = TNIL;
2020    action_frame[2].val.ptr = (pword *) function;
2021    action_frame[2].tag.kernel = TINT;
2022    action_frame[3].val.all = v.all;
2023    action_frame[3].tag.all = t.all;
2024
2025    Trail_Undo(action_frame, _untrail_cut_action);
2026    LCA = action_frame;
2027}
2028
2029/*
2030 * C function interfaces for use in extensions
2031 */
2032
2033void trail_undo(pword *pw, void (*function) (pword *))
2034{
2035    Trail_Undo(pw, function);
2036}
2037
2038
2039/*
2040 * The function to create an (optionally time-stamped) undo trail:
2041 *
2042 * void ec_trail_undo(
2043 *	function,	address of untrail function
2044 *	pitem,		address of related item, or NULL
2045 *			(pointer to pword on heap, or anything elsewhere)
2046 *	pstamp,		address of time stamp (we only trail if it is old)
2047 *			or NULL for non-timestamped trail
2048 *	pdata,		pointer to untrail data or NULL
2049 *	data_size,	size of untrail data in words (0..2^23)
2050 *	data_type	TRAILED_PWORD or TRAILED_WORD32
2051 *   )
2052 *
2053 * The untrail function will later be called as follows:
2054 *
2055 * void undo(
2056 *	pitem,		address of related item
2057 *	pdata,		pointer to untrail data
2058 *	data_size,	size of untrail data in words
2059 *	undo_context	UNDO_FAIL or UNDO_GC
2060 * )
2061 */
2062
2063void
2064ec_trail_undo(
2065	void	(*function)(pword*,word*,int,int),
2066	pword	*pitem,
2067	pword	*pstamp,
2068	word	*pdata,
2069	int	data_size,
2070	int	data_type)
2071{
2072    int i;
2073    uword *traildata = (uword *)TT - data_size;
2074
2075    /* Disable_Exit macro guards against interruption by an
2076     * asynchronous abort leaving a partially complete trail
2077     * entry on the top of the stack
2078     */
2079
2080    if (pstamp)
2081    {
2082	if (!OldStamp(pstamp))	/* trail redundant? */
2083	    return;
2084
2085	Disable_Exit();
2086
2087	TT = (pword **) (traildata - TRAIL_UNDO_STAMPED_HEADER_SIZE);
2088	Check_Trail_Ov
2089	TT[TRAIL_UNDO_FLAGS] = (pword *)
2090    		( TrailedEsizeField(TRAIL_UNDO_STAMPED_HEADER_SIZE + data_size)
2091		| TrailedEtypeField(TRAIL_UNDO_STAMPED)
2092		| TRAIL_EXT | (data_type & TRAILED_TYPE_MASK));
2093	TT[TRAIL_UNDO_STAMP_ADDRESS] = pstamp;
2094	TT[TRAIL_UNDO_OLDSTAMP] = ISPointer(pstamp->tag.kernel) ? pstamp->val.ptr : 0;
2095	Make_Stamp(pstamp);
2096    }
2097    else
2098    {
2099	Disable_Exit();
2100
2101	TT = (pword **) (traildata - TRAIL_UNDO_SIMPLE_HEADER_SIZE);
2102	Check_Trail_Ov
2103	TT[TRAIL_UNDO_FLAGS] = (pword *)
2104    		( TrailedEsizeField(TRAIL_UNDO_SIMPLE_HEADER_SIZE + data_size)
2105		| TrailedEtypeField(TRAIL_UNDO)
2106		| TRAIL_EXT | (data_type & TRAILED_TYPE_MASK));
2107    }
2108
2109    TT[TRAIL_UNDO_ADDRESS] = pitem;
2110    *((void (**)(pword*,word*,int,int)) (TT+TRAIL_UNDO_FUNCT)) = function;
2111
2112    for(i=0; i<data_size; ++i)
2113    {
2114	traildata[i] = ((uword *) pdata)[i];
2115    }
2116
2117    Enable_Exit();
2118}
2119
2120
2121/*
2122 * trail the n_pwords pwords starting at pw + offset_pwords
2123 */
2124void ec_trail_pwords(pword *pw, int offset_pwords, int n_pwords)
2125{
2126    Trail_Pwords(pw, offset_pwords, n_pwords);
2127}
2128
2129
2130void disable_exit(void)
2131{
2132    Disable_Exit();
2133}
2134
2135void enable_exit(void)
2136{
2137    Enable_Exit();
2138}
2139
2140#define GlobalRef(ref)		((ref) < TG && (ref) >= TG_ORIG)
2141#define LocalRef(ref)		((ref) < SP_ORIG && (ref) >= SP)
2142#define TrailRef(ref)		((pword**)(ref) < TT_ORIG && (pword**)(ref) >= TT)
2143#define MachineRef(ref)		((word*)(&ec_) <= (word*)(ref) && (word*)(ref) < (word*)(&ec_ + 1))
2144
2145/*
2146 * This function checks very thoroughly that the pointer is a valid local
2147 * or global reference.
2148 */
2149check_pword(pword *ref)
2150{
2151    int		arity;
2152
2153    if (!(GlobalRef(ref) || LocalRef(ref)
2154    	|| TrailRef(ref) || address_in_heap(&global_heap, ref)
2155	|| MachineRef(ref)))
2156	return 0;
2157    /* Now we can test the contents */
2158    switch (TagType(ref->tag))
2159    {
2160    case TLIST:
2161	if (!(GlobalRef(ref->val.ptr) || address_in_heap(&global_heap, ref->val.ptr)))
2162	    return 0;
2163	return check_pword(ref->val.ptr) && check_pword(ref->val.ptr+1);
2164
2165    case TCOMP:
2166	ref = ref->val.ptr;
2167	if (!(GlobalRef(ref) || address_in_heap(&global_heap, ref->val.ptr)))
2168	    return 0;
2169	if (bitfield_did((word) DidBitField(ref->val.did)) != ref->val.did)
2170	    return 0;
2171	arity = DidArity(ref->val.did);
2172	for (ref++; arity; arity--, ref++)
2173	    if (!check_pword(ref))
2174		return 0;
2175	return 1;
2176
2177    case TSTRG:
2178    case TBIG:
2179#ifndef UNBOXED_DOUBLES
2180    case TDBL:
2181#endif
2182    case TIVL:
2183	if (!(GlobalRef(ref->val.ptr) || address_in_heap(&global_heap, ref->val.ptr)))
2184	    return 0;
2185	return TagType(ref->val.ptr->tag) == TBUFFER;
2186
2187    case TRAT:
2188	if (!(GlobalRef(ref->val.ptr) || address_in_heap(&global_heap, ref->val.ptr)))
2189	    return 0;
2190	return TagType(ref->val.ptr->tag) == TBIG;
2191
2192    case TSUSP:
2193	ref = ref->val.ptr;
2194	if (!GlobalRef(ref))
2195	    return 0;
2196	return TagType(ref->tag) == TDE &&
2197		(ref->val.ptr == 0 || GlobalRef(ref->val.ptr));
2198
2199    case TNIL:
2200    case TINT:
2201#ifdef UNBOXED_DOUBLES
2202    case TDBL:
2203#endif
2204	return 1;
2205
2206    case TDICT:
2207	return bitfield_did((word) DidBitField(ref->val.did)) == ref->val.did;
2208
2209    case TVAR_TAG:
2210	if (ref->val.ptr != ref)
2211	    return check_pword(ref->val.ptr);
2212	return 1;
2213
2214    case TNAME:
2215	if (ref->val.ptr != ref)
2216	    return check_pword(ref->val.ptr);
2217	return (IsNamed(ref->tag.kernel) &&
2218	    address_in_heap(&global_heap, (pword *) TagDid(ref->tag.kernel)));
2219
2220    case TMETA:
2221	if (ref->val.ptr != ref)
2222	    return check_pword(ref->val.ptr);
2223	return check_pword(ref->val.ptr + 1);
2224
2225    default:
2226	return 0;
2227    }
2228}
2229
2230#ifdef PRINTAM
2231/*---------------------------------------
2232 * Debugging support
2233 *---------------------------------------*/
2234
2235check_arg(pword *pw)
2236{
2237    switch (TagType(pw->tag))
2238    {
2239    case TCOMP:
2240	if (SameTypeC(pw->val.ptr->tag, TDICT))
2241	    return;
2242	break;
2243    case TLIST:
2244	return;
2245    case TSUSP:
2246	if (pw->val.ptr < TG && pw->val.ptr >= TG_ORIG)
2247	    return;
2248	break;
2249    case THANDLE:
2250	if (pw->val.ptr < TG && pw->val.ptr >= TG_ORIG
2251	 && SameTypeC(pw->val.ptr[0].tag, TEXTERN)
2252	 && SameTypeC(pw->val.ptr[1].tag, TPTR))
2253	    return;
2254	break;
2255    case TIVL:
2256    case TBIG:
2257    case TSTRG:
2258#ifndef UNBOXED_DOUBLES
2259    case TDBL:
2260#endif
2261	if (SameTypeC(pw->val.ptr->tag, TBUFFER))
2262	    return;
2263	break;
2264    case TRAT:
2265	if (SameTypeC(pw->val.ptr[0].tag, TBIG) &&
2266	    SameTypeC(pw->val.ptr[1].tag, TBIG))
2267	    return;
2268	break;
2269    case TNIL:
2270    case TINT:
2271    case TDICT:
2272#ifdef UNBOXED_DOUBLES
2273    case TDBL:
2274#endif
2275	return;
2276    case TVAR_TAG:
2277	return;
2278    case TNAME:
2279    case TMETA:
2280    case TUNIV:
2281	if (pw->val.ptr < TG && pw->val.ptr >= TG_ORIG)
2282	    return;
2283	break;
2284    }
2285    p_fprintf(current_err_,
2286	"INTERNAL ERROR: illegal pword encountered: val=%x tag=%x\n",
2287	pw->val.all, pw->tag.all);
2288    ec_flush(current_err_);
2289}
2290
2291
2292#define InGlobal(p)  ((p) >= min && (p) < max)
2293#define InHeap(p)  (address_in_heap(&global_heap, (generic_ptr) p))
2294
2295check_global(void)
2296{
2297    check_global1(TG_ORIG, TG);
2298}
2299
2300check_global2(pword *max)
2301{
2302    check_global1(TG_ORIG, max);
2303}
2304
2305check_global1(register pword *min, register pword *max)
2306{
2307    register pword *pw = min;
2308    extern pword    woken_susp_;
2309
2310    if (g_emu_.nesting_level > 1)
2311	return;
2312
2313    while (pw < max)
2314    {
2315	switch (TagType(pw->tag))
2316	{
2317	case TVAR_TAG:
2318	case TNAME:
2319	case TMETA:
2320	case TUNIV:
2321	    if (!IsRef(pw->tag))
2322		goto _problem_;
2323	    if (!InGlobal(pw->val.ptr))
2324		goto _problem_;
2325	    pw++;
2326	    break;
2327
2328	case TCOMP:
2329	    /*
2330	    if (pw->val.ptr && !InGlobal(pw->val.ptr) && !IsPersistent(pw->tag))
2331		goto _problem_;
2332	    */
2333	    if (pw->val.ptr &&
2334	    	(!IsAtom(pw->val.ptr->tag) || DidArity(pw->val.ptr->val.did) == 0))
2335		goto _problem_;
2336	    pw++;
2337	    break;
2338
2339	case TSTRG:
2340	case TBIG:
2341#ifndef UNBOXED_DOUBLES
2342	case TDBL:
2343#endif
2344	case TIVL:
2345	    /*
2346	    if (!InGlobal(pw->val.ptr) && !IsPersistent(pw->tag)) goto _problem_;
2347	    */
2348	    if (DifferTypeC(pw->val.ptr->tag,TBUFFER)) goto _problem_;
2349	    pw++;
2350	    break;
2351
2352	case TRAT:
2353	    if (!InGlobal(pw->val.ptr) && !IsPersistent(pw->tag)) goto _problem_;
2354	    if (DifferTypeC(pw->val.ptr[0].tag, TBIG) ||
2355		DifferTypeC(pw->val.ptr[1].tag, TBIG)) goto _problem_;
2356	    pw++;
2357	    break;
2358
2359	case TSUSP:
2360	    if (!InGlobal(pw->val.ptr) && pw->val.ptr != &woken_susp_) goto _problem_;
2361	    if (DifferTypeC(pw->val.ptr->tag,TDE)) goto _problem_;
2362	    pw++;
2363	    break;
2364
2365	case TLIST:
2366	    if (!InGlobal(pw->val.ptr) && !IsPersistent(pw->tag)) goto _problem_;
2367	    pw++;
2368	    break;
2369
2370	case THANDLE:
2371	    if (!InGlobal(pw->val.ptr)) goto _problem_;
2372	    if (DifferTypeC(pw->val.ptr[0].tag, TEXTERN) ||
2373		DifferTypeC(pw->val.ptr[1].tag, TPTR)) goto _problem_;
2374	    pw++;
2375	    break;
2376
2377	case TNIL:
2378	case TINT:
2379	case TDICT:
2380#ifdef UNBOXED_DOUBLES
2381	case TDBL:
2382#endif
2383	    pw++;
2384	    break;
2385
2386	case TBUFFER:
2387	    pw += BufferPwords(pw);
2388	    break;
2389
2390	case TEXTERN:
2391	    pw += 2;
2392	    break;
2393
2394	case TDE:
2395	    pw += SUSP_SIZE - 2;
2396	    break;
2397
2398	default:
2399	    goto _problem_;
2400	}
2401    }
2402    return;
2403_problem_:
2404    p_fprintf(current_err_,
2405	"INTERNAL ERROR: illegal pword encountered at 0x%x: val=0x%x tag=0x%x\n",
2406	pw, pw->val.all, pw->tag.all);
2407    ec_flush(current_err_);
2408    return;
2409}
2410
2411find_in_trail(pword *addr)
2412{
2413    pword **tr = TT;
2414    pword *trailed_item;
2415    long	i;
2416
2417    while(tr < TT_ORIG)
2418    {
2419	switch((((word) *tr) & 3))
2420	{
2421	case TRAIL_ADDRESS:
2422	    trailed_item = *tr++;
2423	    break;
2424	case TRAIL_TAG:
2425	    trailed_item = *(tr+1);
2426	    break;
2427	case TRAIL_MULT:
2428	    i = (word) *tr;
2429	    trailed_item = (pword *)((uword *)(*(tr+1)) + TrailedOffset(i));
2430	    break;
2431	case TRAIL_EXT:
2432	    break;
2433	}
2434	if (trailed_item == addr)
2435	{
2436	    p_fprintf(current_err_,
2437		"Trail entry found for 0x%x at 0x%x\n", trailed_item, tr);
2438	    ec_flush(current_err_);
2439	}
2440	End_Of_Frame(tr, tr);
2441    }
2442}
2443
2444
2445check_trail(void)
2446{
2447    extern vmcode par_fail_code_[];
2448    control_ptr fp;
2449    pword **tt = TT;
2450    pword *tg = TG;
2451    int print = 0;
2452
2453    for(fp.args = B.args;;fp.args = BPrev(fp.args))
2454    {
2455	if (BPrev(fp.args) == (pword *) (fp.top - 1))
2456	{
2457	    /* small if-then-else choicepoint */
2458	}
2459	else
2460	{
2461	    check_trail2(print, tt, BChp(fp.args)->tt, tg);
2462	    tt = BChp(fp.args)->tt;
2463	    tg = BChp(fp.args)->tg;
2464	    break;
2465	}
2466
2467	if (IsInterruptFrame(BTop(fp.args)) || IsRecursionFrame(BTop(fp.args)))
2468	    break;
2469    }
2470    if (print) p_fprintf(current_err_, "BOTTOM\n");
2471    if (print) ec_flush(current_err_);
2472}
2473
2474check_trail1(int print)
2475{
2476    check_trail2(print, TT, TT_ORIG, TG);
2477}
2478
2479check_trail2(int print, pword **ttptr, pword **ttend, pword *min_tg_when_failing)
2480{
2481    word ctr;
2482    pword *pw;
2483    while(ttptr < ttend) {
2484	if (print) p_fprintf(current_err_, "TT=0x%08x: ", ttptr);
2485	switch((((word) *ttptr) & 3)) {
2486	case TRAIL_ADDRESS:
2487	    pw = *ttptr++;
2488	    if (print) p_fprintf(current_err_, "ADDRESS 0x%08x\n", pw);
2489	    if (min_tg_when_failing <= pw && pw < (pword*)TT)
2490		emu_break();
2491	    break;
2492	case TRAIL_TAG:
2493	    pw = *(ttptr+1);
2494	    if (print) p_fprintf(current_err_, "TAG     0x%08x 0x%08x\n", pw, TrailedTag(*ttptr));
2495	    if (min_tg_when_failing <= pw && pw < (pword*)TT)
2496		emu_break();
2497	    ttptr += 2;
2498	    break;
2499	case TRAIL_MULT:
2500	    ctr = (word) *ttptr++;
2501	    pw = *ttptr++;
2502	    ctr = TrailedNumber(ctr);
2503	    if (print) p_fprintf(current_err_, "MULT    0x%08x %d\n", pw, ctr);
2504	    if (min_tg_when_failing <= pw && pw < (pword*)TT)
2505		emu_break();
2506#if 0
2507	    if (!check_pword(pw) && !(
2508		    pw == &POSTED_LAST
2509		||
2510		    IsTag(pw->tag.kernel, TDE)
2511	    	))
2512		emu_break();
2513#endif
2514	    do {
2515		ttptr++;
2516	    } while (ctr--);
2517	    break;
2518	case TRAIL_EXT:
2519	    switch(TrailedEtype(*ttptr)) {
2520	    case TRAIL_UNDO:
2521		if (print) p_fprintf(current_err_, "UNDO    0x%08x\n", ttptr[TRAIL_UNDO_ADDRESS]);
2522		break;
2523	    case TRAIL_UNDO_STAMPED:
2524		if (print) p_fprintf(current_err_, "UNDO_ST 0x%08x\n", ttptr[TRAIL_UNDO_ADDRESS]);
2525#if 0
2526		if (ttptr[TRAIL_UNDO_OLDSTAMP] >= min_tg_when_failing)
2527		{
2528		    p_fprintf(current_err_, "UNDO_ST redundant 0x%08x\n", ttptr[TRAIL_UNDO_OLDSTAMP]);
2529		    ec_flush(current_err_);
2530		}
2531#endif
2532		if (TrailedType(*ttptr) == TRAILED_PWORD)
2533		{
2534		    word n_pwords = (TrailedEsize(*ttptr) - TRAIL_UNDO_STAMPED_HEADER_SIZE)/2;
2535		    pw = (pword *) (ttptr + TRAIL_UNDO_STAMPED_HEADER_SIZE);
2536		    for(; n_pwords > 0; --n_pwords, ++pw)
2537		    {
2538			if (ISPointer(pw->tag.kernel))
2539			{
2540			    if (min_tg_when_failing <= pw->val.ptr && pw->val.ptr < (pword*)TT)
2541				emu_break();
2542			    if (IsString(pw->tag) && !IsTag(pw->val.ptr->tag.kernel, TBUFFER))
2543				emu_break();
2544			}
2545		    }
2546		}
2547		break;
2548	    }
2549	    ttptr += TrailedEsize(*ttptr);
2550	    break;
2551	}
2552    }
2553    if (print) p_fprintf(current_err_, "TT=0x%08x: STOP\n", ttptr);
2554    if (print) ec_flush(current_err_);
2555}
2556
2557#endif /* PRINTAM */
2558