1/* BEGIN LICENSE BLOCK 2 * Version: CMPL 1.1 3 * 4 * The contents of this file are subject to the Cisco-style Mozilla Public 5 * License Version 1.1 (the "License"); you may not use this file except 6 * in compliance with the License. You may obtain a copy of the License 7 * at www.eclipse-clp.org/license. 8 * 9 * Software distributed under the License is distributed on an "AS IS" 10 * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See 11 * the License for the specific language governing rights and limitations 12 * under the License. 13 * 14 * The Original Code is The ECLiPSe Constraint Logic Programming System. 15 * The Initial Developer of the Original Code is Cisco Systems, Inc. 16 * Portions created by the Initial Developer are 17 * Copyright (C) 1989-2006 Cisco Systems, Inc. All Rights Reserved. 18 * 19 * Contributor(s): 20 * 21 * END LICENSE BLOCK */ 22 23/* 24 * VERSION $Id: code.c,v 1.18 2013/04/29 01:02:10 jschimpf Exp $ 25 */ 26 27/******************************************************************** 28 * 29 * 30 * File code.c 31 * 32 * This file is intended for the initialization of fixed, handcoded 33 * sequences of abstract machine code. 34 * 35 ***********************************************************************/ 36 37#include "config.h" 38#include "sepia.h" 39#include "types.h" 40#include "embed.h" 41#include "mem.h" 42#include "error.h" 43#include "dict.h" 44#include "emu_export.h" 45#include "opcode.h" 46#include "gencode.h" 47#include "debug.h" 48#include "module.h" 49#include "database.h" 50 51/* global definition */ 52#define Local_Kernel_Proc(d, flag, ccode) \ 53 pd = local_procedure(d, d_.kernel_sepia, tm, PRI_CREATE); \ 54 pd->flags |= SYSTEM|flag; \ 55 pricode.vmc = ccode; \ 56 pri_define_code(pd, VMCODE, pricode); 57#define Exported_Kernel_Proc(d, flag, ccode) \ 58 pd = export_procedure(d, d_.kernel_sepia, tm); \ 59 pd->flags |= SYSTEM|flag; \ 60 pricode.vmc = ccode; \ 61 pri_define_code(pd, VMCODE, pricode); 62 63#define Store_Var_Alloc(size, arg, var) \ 64 Store_4( \ 65 Get_variableNAML, \ 66 Esize(size), \ 67 Address(arg), \ 68 Esize(var)) 69 70#define KernelPri(d) \ 71 visible_procedure(d, d_.kernel_sepia, tm, PRI_CREATE|PRI_REFER) 72 73 74/* 75 * CAUTION: only static code that is never redefined may use 76 * an array to hold the code. Otherwise the system would 77 * try to free the code space to the code heap on recompilation. 78 * 79 * The first dummy procedure is there to fool the profiler: 80 * All code fragments which do not belong to a particular procedure 81 * account for this dummy procedure (assuming the C compiler allocates 82 * all the following arrays consecutively). 83 */ 84 85vmcode dummy_procedure_code_[PROC_PREFIX_SIZE+3]; /* should be the first */ 86vmcode fail_return_env_0_[3]; 87vmcode eval_code_[15]; 88vmcode slave_code_[2]; 89vmcode slave_fail_code_[25]; 90vmcode restore_code_[3]; 91vmcode restore_debug_code_[3]; 92vmcode trace_exit_code_[3]; 93vmcode return_code_[2]; 94vmcode it_code_[20]; 95vmcode it_block_code_[21]; 96vmcode recurs_code_[15]; 97vmcode boot_code_[16]; 98vmcode fail_code_[2]; 99 100/* 101 * Special backtrack codes that are used to identify certain frames 102 * on the control stack. They may not be used for other purposes. 103 */ 104 105vmcode it_fail_code_[3]; /* interrupt emulator invocation frame */ 106vmcode stop_fail_code_[3]; /* recursive emulator invocation frame */ 107vmcode exception_fail_code_[3]; /* exception frame */ 108vmcode catch_unint_fail_code_[11]; /* catch frame with events deferred */ 109vmcode external_fail_code_[2]; /* choicepoint of backtracking external */ 110vmcode soft_cut_code_[2]; /* softly cut choice point */ 111vmcode gc_fail_code_[2]; /* gc dummy choicepoint */ 112vmcode par_fail_code_[2]; /* parallel choicepoint */ 113 114/* 115 * code arrays for static procedures with proper header. 116 * They are used instead of heap-allocated space only when the code 117 * is referenced by direct pointers other than the one in the pri. 118 */ 119 120vmcode syserror_code_[PROC_PREFIX_SIZE+13]; 121vmcode true_code_[PROC_PREFIX_SIZE+2]; 122vmcode cut_to_code_[PROC_PREFIX_SIZE+4]; 123vmcode comma_body_code_[PROC_PREFIX_SIZE+31]; 124vmcode semic_body_code_[PROC_PREFIX_SIZE+20]; 125vmcode cond_body_code_[PROC_PREFIX_SIZE+36]; 126vmcode cond3_body_code_[PROC_PREFIX_SIZE+51]; 127vmcode softcut5_body_code_[PROC_PREFIX_SIZE+52]; 128vmcode call2_code_[PROC_PREFIX_SIZE+11]; 129vmcode call_with_cut_code_[PROC_PREFIX_SIZE+3]; 130vmcode call_at_code_[PROC_PREFIX_SIZE+5]; 131vmcode gc_code_[PROC_PREFIX_SIZE+8]; 132vmcode exit_block_code_[PROC_PREFIX_SIZE+9]; 133vmcode wake_code_[PROC_PREFIX_SIZE+5]; 134vmcode idle_code_[PROC_PREFIX_SIZE+4]; 135vmcode fork_code_[PROC_PREFIX_SIZE+49]; 136vmcode wb_code_[PROC_PREFIX_SIZE+15]; 137vmcode head_match_code_[PROC_PREFIX_SIZE+15]; 138 139/* 140 * These are pointers into the arrays above 141 */ 142 143vmcode *bip_error_code_, 144 *auto_gc_code_, 145 *catch_fail_code_, 146 *do_exit_block_code_, 147 *sync_it_code_, 148 *do_idle_code_, 149 *idle_ret_code_, 150 *fork_unify_code_, 151 *meta_exit_simple_code_, 152 *meta_last_exit_simple_code_, 153 *prolog_error_code_, 154 *wb_fail_code_, 155 *do_call_code_; 156 157 158pri *true_proc_, 159 *arity_proc_, 160 *softcut_proc_, 161 *cut_to_proc_, 162 *identical_proc_, 163 *not_identical_proc_, 164 *inequality_proc_, 165 *not_ident_list_proc_, 166 *minus_proc_, 167 *add_proc_, 168 *sub_proc_, 169 *mul_proc_, 170 *quot_proc_, 171 *div_proc_, 172 *rem_proc_, 173 *fdiv_proc_, 174 *mod_proc_, 175 *and_proc_, 176 *or_proc_, 177 *xor_proc_, 178 *bitnot_proc_, 179 *lt_proc3_, 180 *le_proc3_, 181 *gt_proc3_, 182 *ge_proc3_, 183 *eq_proc3_, 184 *ne_proc3_, 185 *arg_proc_, 186 *make_suspension_proc_, 187 *cut_to_stamp_proc_, 188 *fail_proc_; 189 190 191/* 192 * make_function_bip() 193 * make_test_bip() 194 * 195 * Create descriptor and code stubs for those built-ins that are implemented 196 * by a single abstract machine instruction. The code sequence is only used 197 * for metacalling and waking. Other calls are inlined by the compiler. 198 */ 199 200pri * 201make_function_bip(dident did1, int opc, uint32 flags, uint32 mode, int argdesc, int store_desc) 202{ 203 vmcode *code; 204 type tm; 205 pri_code_t pricode; 206 pri *pd; 207 word i, arity = DidArity(did1); 208 word result_arg = 0; 209 unsigned currdesc = argdesc; 210 Allocate_Default_Procedure(arity+7, did1); 211 Exported_Kernel_Proc(did1, flags|EXTERN|ARGFLEXWAM|DEBUG_DB|DEBUG_DF, code); 212 PriMode(pd) = mode; 213 Store_i(opc); 214 for(i=1; i<=arity; ++i) { 215 if ((currdesc & 3) == 1) { 216 result_arg = i; 217 Store_d(Address(arity+1)); 218 } else { 219 Store_d(Address(i)); 220 } 221 currdesc >>= 2; 222 } 223 if (store_desc) { 224 Store_d(argdesc); 225 } 226 /* 227 * The previous instruction leaves the function result in argument 228 * register A[arity+1], which then needs to be unified with A[result_arg]. 229 */ 230 if (result_arg) { 231 Store_3(Get_valueAMAM,Address(result_arg),Address(arity+1)) 232 } 233 Store_i(Retd_nowake); /* because inlined calls don't wake either */ 234 Store_i(Code_end); 235 return pd; 236} 237 238pri * 239make_test_bip(dident did1, int opc, uint32 flags, uint32 mode, int argdesc, int vis) 240{ 241 vmcode *code; 242 type tm; 243 pri_code_t pricode; 244 pri *pd; 245 word i, arity = DidArity(did1); 246 Allocate_Default_Procedure(arity+4, did1); 247 if (vis == EXPORT) { 248 Exported_Kernel_Proc(did1, flags|EXTERN|ARGFLEXWAM|DEBUG_DB|DEBUG_DF, code); 249 } else { 250 Local_Kernel_Proc(did1, flags|EXTERN|ARGFLEXWAM|DEBUG_DB|DEBUG_DF, code); 251 } 252 PriMode(pd) = mode; 253 Store_i(opc); 254 for(i=1; i<=arity; ++i) { 255 Store_d(Address(i)); 256 } 257 if (argdesc >= 0) { 258 Store_d(argdesc); 259 } 260 Store_i(Retd_nowake); /* because inlined calls don't wake either */ 261 Store_i(Code_end); 262 return pd; 263} 264 265 266/* 267 * Create an exported predicate call_/N, N >= 3 268 */ 269int 270ec_create_call_n(dident call_did) 271{ 272 vmcode *code; 273 pri_code_t pricode; 274 pri *pd; 275 type tm; 276 tm.kernel = ModuleTag(d_.kernel_sepia); 277 int i = DidArity(call_did); 278 Allocate_Default_Procedure(8, call_did); 279 Exported_Kernel_Proc(call_did, ARGFIXEDWAM|DEBUG_TRMETA, code); 280 pd->flags &= ~DEBUG_TR; /*untraceable*/ 281 Store_3(MoveAMAM, Address(i), Address(i+1)) 282 Store_2(SavecutAM, Address(i+2)) 283 Store_2(Meta_jmp,i-2) 284 Store_i(Code_end) 285 return PSUCCEED; 286} 287 288 289 290vmcode * 291allocate_code_block(word size, uword btablepos, uword link, uword bid, uword fid, uword btype, uword cid) 292{ 293 vmcode *code; 294 295 code = (vmcode *) hg_alloc(((int)size + PROC_PREFIX_SIZE) * sizeof(vmcode)); 296 Make_Prefix(link, btablepos, size, bid, fid, btype, cid) 297 return code; 298} 299 300 301reclaim_ground_structure(vmcode *code_header) 302{ 303 extern void free_heapterm(); 304 305 free_heapterm(ProcStruct(CodeStart(code_header))); 306 hg_free((generic_ptr) code_header); 307} 308 309 310/* 311 * Initialisation of code that is defined on the WAM level. 312 * Code arrays and pointers in private memory have to be initialised always. 313 * Heap-allocated code and PRIs only if (flags & INIT_SHARED). 314 */ 315 316void 317code_init(int flags) 318{ 319 extern dident d_call_susp_; 320 321 dident did1; 322 register vmcode *code; 323 vmcode *aux, *aux1; 324 register pri *pd; 325 type tm; 326 pri_code_t pricode; 327 328 tm.kernel = ModuleTag(d_.kernel_sepia); 329 /* 330 * dummy procedure 331 * Its code should precede all procedureless code fragments 332 * so that the profiler accounts them for this procedure. 333 * (it can also be used for other purposes) 334 */ 335 code = &dummy_procedure_code_[0]; 336 Make_Default_Prefix(d_.dummy_call); 337 if (flags & INIT_SHARED) 338 { 339 Local_Kernel_Proc(d_.dummy_call, ARGFIXEDWAM | DEBUG_DB, code); 340 } 341 Store_2(Undefined, pd) 342 Store_i(Code_end) 343 344 if (flags & INIT_SHARED) 345 { 346 347/* 348 * The debugger needs the procedure descriptor of (;)/2, that's why 349 * we have a prelimiary definition here. It's overwritten in kernel.pl 350 */ 351 pd = global_procedure(d_.comma, d_.kernel_sepia, tm); 352 pd->flags |= SYSTEM|TOOL; 353 pd = global_procedure(d_.semicolon, d_.kernel_sepia, tm); 354 pd->flags |= SYSTEM|TOOL; 355 pd = global_procedure(d_.cond, d_.kernel_sepia, tm); 356 pd->flags |= SYSTEM|TOOL; 357 pd = local_procedure(d_.softcut, d_.kernel_sepia, tm, PRI_CREATE); 358 pd->flags |= SYSTEM|TOOL; 359 360 } 361 362 363/* 364 * Definition of call_(Goal, Module), the body of call/1 365 */ 366 did1 = in_dict("untraced_call", 2); 367 code = &call2_code_[0]; 368 Make_Default_Prefix(did1); 369 if (flags & INIT_SHARED) 370 { 371 Exported_Kernel_Proc(did1, ARGFIXEDWAM|DEBUG_TRMETA, code); 372 pd->flags &= ~DEBUG_TR; /*untraceable*/ 373 Exported_Kernel_Proc(d_.call_body, ARGFIXEDWAM|DEBUG_TRMETA, code); 374 pd->flags &= ~DEBUG_TR; /*untraceable*/ 375 Exported_Kernel_Proc(in_dict("trace_body",2), ARGFIXEDWAM|DEBUG_ST|DEBUG_SP|DEBUG_TRMETA, code); 376 Exported_Kernel_Proc(in_dict("debug_body",2), ARGFIXEDWAM|DEBUG_ST|DEBUG_TRMETA, code); 377 } 378 Store_3(MoveAMAM, Address(2), Address(3)) 379 Store_2(SavecutAM, Address(4)) 380 Store_2(Meta_jmp,0) 381 Store_i(Code_end) /* not really, see below */ 382/* 383 * The following code is dynamically inserted (by the Metacall instruction) 384 * after a metacalled builtin. 385 * It generates the EXIT_PORT for the builtin and pops its arguments 386 * together with the dummy environment. 387 * It is in the code block of call/2 (for the profiler). 388 */ 389 meta_exit_simple_code_ = code; 390 Store_i(Exitd_nowake); /* Do not wake here (like compiled sequence) */ 391 meta_last_exit_simple_code_ = code; 392 Store_i(Exitd); /* Do wake */ 393 Store_i(Code_end); 394 395/* 396 * call_with_cut(Goal,CallerModule,LookupModule,SaveCut) 397 */ 398 did1 = in_dict("call_with_cut", 4); 399 code = &call_with_cut_code_[0]; 400 Make_Default_Prefix(did1); 401 if (flags & INIT_SHARED) 402 { 403 Exported_Kernel_Proc(did1, ARGFIXEDWAM|DEBUG_DF, code); 404 } 405 Store_2(Meta_jmp,0) /* (Goal,CallerMod,LookupMod,Cut) */ 406 Store_i(Code_end) 407 408/* 409 * @(Goal,CallerModule,LookupModule) - the tool body of @/2 410 */ 411 did1 = in_dict("@", 3); 412 code = &call_at_code_[0]; 413 Make_Default_Prefix(did1); 414 if (flags & INIT_SHARED) 415 { 416 Exported_Kernel_Proc(did1, ARGFIXEDWAM|DEBUG_DB|DEBUG_DF, code); 417 } 418 do_call_code_ = code; 419 Store_2(SavecutAM, Address(4)) 420 Store_2(Meta_jmp,0) /* (Goal,CallerMod,LookupMod,Cut) */ 421 Store_i(Code_end) 422 423/* 424 * :@(LookupModule,Goal,CallerModule) - the tool body of :/2 425 */ 426 did1 = in_dict(":@", 3); 427 Allocate_Default_Procedure(4, did1); 428 Exported_Kernel_Proc(did1, ARGFIXEDWAM|DEBUG_DB|DEBUG_DF, code); 429 Store_2(SavecutAM, Address(4)) 430 Store_i(Explicit_jmp) /* (LookupMod,Goal,CallerMod,Cut) */ 431 Store_i(Code_end) 432 433/* 434 * wake/0 435 * Call all woken lists whose priority is higher than WP 436 */ 437 code = &wake_code_[0]; 438 Make_Default_Prefix(d_.wake); 439 if (flags & INIT_SHARED) 440 { 441 Exported_Kernel_Proc(d_.wake, ARGFIXEDWAM, code); 442 } 443 Store_2(Wake_init, Esize(1)) 444 Store_i(Wake) 445 Store_i(Exit) 446 Store_i(Code_end) 447 448/* 449 * Goal1 , Goal2 450 * ','(Goal1, Goal2, Module, Cut) :- 451 * call(Goal1, Module, Module, Cut), 452 * call(Goal2, Module, Module, Cut). 453 */ 454 did1 = in_dict(",",4); 455 code = &comma_body_code_[0]; 456 Make_Default_Prefix(did1); 457 if (flags & INIT_SHARED) 458 { 459 Local_Kernel_Proc(did1, ARGFIXEDWAM, code); 460 } 461 Store_Var_Alloc(3, 2, 3); /* Goal2 -> Y3 */ 462 Store_3(MoveAML, Address(3), Esize(2)) /* Module -> Y2 */ 463 Store_3(MoveAML, Address(4), Esize(1)) /* Cut -> Y1 */ 464 Store_3(MoveAMAM, Address(3), Address(2)) 465 Store_2(Metacall,Esize(3)) 466 Store_3(MoveLAM, Esize(3), Address(1)) 467 Store_3(MoveLAM, Esize(2), Address(2)) 468 Store_3(MoveAMAM, Address(2), Address(3)) 469 Store_3(MoveLAM, Esize(1), Address(4)) 470 Store_i(Deallocate) 471 Store_2(Meta_jmp,0) 472 Store_i(Code_end) 473 474/* 475 * Goal1 -> Goal2 476 * '->'(Goal1, Goal2, Module, Cut) :- 477 * call(Goal1, Module, Module, []). 478 * !, 479 * call(Goal2, Module, Module, Cut). 480 */ 481 did1 = in_dict("->",4); 482 code = &cond_body_code_[0]; 483 Make_Default_Prefix(did1); 484 if (flags & INIT_SHARED) 485 { 486 Local_Kernel_Proc(did1, ARGFIXEDWAM, code); 487 } 488 Store_Var_Alloc(4, 2, 4); /* Goal2 -> Y4 */ 489 Store_3(MoveAML, Address(3), Esize(3)) /* Module -> Y3 */ 490 Store_3(MoveAML, Address(4), Esize(2)) /* Cut -> Y2 */ 491 Store_i(Savecut) 492 Store_3(MoveAMAM, Address(3), Address(2)) 493 Store_2(SavecutAM, Address(4)) 494 Store_2(Metacall,Esize(4)) 495 Store_2(Cut, Esize(4)) 496 Store_3(MoveLAM, Esize(4), Address(1)) 497 Store_3(MoveLAM, Esize(3), Address(2)) 498 Store_3(MoveAMAM, Address(2), Address(3)) 499 Store_3(MoveLAM, Esize(2), Address(4)) 500 Store_i(Deallocate) 501 Store_2(Meta_jmp,0) 502 Store_i(Code_end) 503 504/* 505 * Goal1 ; Goal2 506 * ;(Goal1, Goal2, Module, Cut) :- 507 * call(Goal1, Module, Module, Cut). 508 * ;(Goal1, Goal2, Module, Cut) :- 509 * call(Goal2, Module, Module, Cut). 510 */ 511 did1 = in_dict(";",4); 512 code = &semic_body_code_[0]; 513 Make_Default_Prefix(did1); 514 if (flags & INIT_SHARED) 515 { 516 Local_Kernel_Proc(did1, ARGFIXEDWAM, code); 517 } 518 Store_3(Try_me_else, NO_PORT, 4) 519 aux = code++; 520 Store_3(MoveAMAM, Address(3), Address(2)) 521 Store_2(Meta_jmp,0) 522 *(vmcode**)aux = code; 523 Store_2(Trust_me, NEXT_PORT) 524 Store_3(MoveAMAM, Address(2), Address(1)) 525 Store_3(MoveAMAM, Address(3), Address(2)) 526 Store_2(Meta_jmp,0) 527 Store_i(Code_end); 528 529/* 530 * Goal1 -> Goal2 ; Goal3 531 * ;(Goal1, Goal2, Module, Cut, Goal3) :- 532 * call(Goal1, Module, Module, []). 533 * !, 534 * call(Goal2, Module, Module, Cut). 535 * ;(Goal1, Goal2, Module, Cut, Goal3) :- 536 * call(Goal3, Module, Module, Cut). 537 */ 538 did1 = in_dict(";", 5); 539 code = &cond3_body_code_[0]; 540 Make_Default_Prefix(did1); 541 if (flags & INIT_SHARED) 542 { 543 Local_Kernel_Proc(did1, ARGFIXEDWAM, code); 544 } 545 Store_3(Try_me_else, NO_PORT, 5) 546 aux = code++; 547 Store_Var_Alloc(4, 2, 4); /* Goal2 -> Y4 */ 548 Store_3(MoveAML, Address(4), Esize(3)) /* Cut -> Y3 */ 549 Store_3(MoveAML, Address(3), Esize(2)) /* Module -> Y2 */ 550 Store_i(Savecut) 551 Store_3(MoveAMAM, Address(3), Address(2)) 552 Store_2(SavecutAM, Address(4)) 553 Store_2(Metacall,Esize(4)) 554 Store_2(Cut, Esize(4)) 555 Store_3(MoveLAM, Esize(4), Address(1)) 556 Store_3(MoveLAM, Esize(2), Address(2)) 557 Store_3(MoveAMAM, Address(2), Address(3)) 558 Store_3(MoveLAM, Esize(3), Address(4)) 559 Store_i(Deallocate) 560 Store_2(Meta_jmp,0) 561 *(vmcode**)aux = code; 562 Store_2(Trust_me, NEXT_PORT) 563 Store_3(MoveAMAM, Address(5), Address(1)) 564 Store_3(MoveAMAM, Address(3), Address(2)) 565 Store_2(Meta_jmp,0) 566 Store_i(Code_end); 567 568 569/* 570 * Goal1 *-> Goal2 ; Goal3 571 * softcut(Goal1, Goal2, Module, Cut, Goal3) :- 572 * call(Goal1, Module, Module, []). 573 * softcut, 574 * call(Goal2, Module, Module, Cut). 575 * softcut(Goal1, Goal2, Module, Cut, Goal3) :- 576 * call(Goal3, Module, Module, Cut). 577 */ 578 did1 = in_dict("softcut", 5); 579 code = &softcut5_body_code_[0]; 580 Make_Default_Prefix(did1); 581 if (flags & INIT_SHARED) 582 { 583 Local_Kernel_Proc(did1, ARGFIXEDWAM, code); 584 } 585 Store_3(Try_me_else, NO_PORT, 5) 586 aux = code++; 587 Store_Var_Alloc(4, 2, 4); /* Goal2 -> Y4 */ 588 Store_3(MoveAML, Address(4), Esize(3)) /* Cut -> Y3 */ 589 Store_3(MoveAML, Address(3), Esize(2)) /* Module -> Y2 */ 590 Store_2(SavecutL, Esize(1)) 591 Store_3(MoveAMAM, Address(3), Address(2)) 592 Store_2(SavecutAM, Address(4)) 593 Store_2(Metacall,Esize(4)) 594 Store_2(SoftcutL, Esize(1)) 595 Store_3(MoveLAM, Esize(4), Address(1)) 596 Store_3(MoveLAM, Esize(2), Address(2)) 597 Store_3(MoveAMAM, Address(2), Address(3)) 598 Store_3(MoveLAM, Esize(3), Address(4)) 599 Store_i(Deallocate) 600 Store_2(Meta_jmp,0) 601 *(vmcode**)aux = code; 602 Store_2(Trust_me, NEXT_PORT) 603 Store_3(MoveAMAM, Address(5), Address(1)) 604 Store_3(MoveAMAM, Address(3), Address(2)) 605 Store_2(Meta_jmp,0) 606 Store_i(Code_end); 607 608 609/* 610 * cut_to/1, also used for metacalled !/0 611 */ 612 code = cut_to_code_; 613 Make_Default_Prefix(d_.cut_to); 614 if (flags & INIT_SHARED) 615 { 616 Exported_Kernel_Proc(d_.cut_to, EXTERN|ARGFLEXWAM|DEBUG_DB|DEBUG_DF, code); 617 } 618 Store_2(CutAM, Address(1)) 619 Store_i(Retd_nowake); 620 Store_i(Code_end); 621 622 623/* 624 * ?=/2 (head matching expansion) 625 * This is normally only generated in the compiler's normalisation phase 626 * and then immediately inlined in the code generation phase. However, 627 * when we store the normalised source (because of inline/1), this can 628 * show up in the result of goal expansion. In case that expansion is 629 * then metacalled instead of compiled, we need this definition. 630 */ 631 code = head_match_code_; 632 did1 = in_dict("?=",2); 633 Make_Default_Prefix(did1); 634 if (flags & INIT_SHARED) 635 { 636 Exported_Kernel_Proc(did1, EXTERN|ARGFLEXWAM|DEBUG_DB|DEBUG_DF, code); 637 } 638 Store_Var_Alloc(2, 1, 1); /* 4 words */ 639 Store_3(MoveAML, Address(2), Esize(2)) 640 Store_3(CallfP, DidPtr(in_dict("instance_simple",2))->procedure, 0) 641 Store_3(Get_valueLL, Esize(1), Esize(2)) 642 Store_i(Exit); 643 Store_i(Code_end); 644 645 646/* 647 * Backtrack codes for special control frames 648 */ 649 650 code = &it_fail_code_[0]; 651 Store_2(Exit_emulator, PFAIL) 652 Store_i(Code_end); 653 654 code = &stop_fail_code_[0]; 655 Store_2(Exit_emulator, PFAIL) 656 Store_i(Code_end); 657 658 code = &exception_fail_code_[0]; 659 Store_i(Continue_after_exception) 660 Store_i(Failure) 661 Store_i(Code_end); 662 663 code = &external_fail_code_[0]; 664 Store_i(Refail) 665 Store_i(Code_end); 666 667 code = &gc_fail_code_[0]; 668 Store_i(Refail) 669 Store_i(Code_end); 670 671 code = &soft_cut_code_[0]; 672 Store_i(Refail) 673 Store_i(Code_end); 674 675/* 676 * The fail code of dead parallel choicepoints 677 */ 678 code = &par_fail_code_[0]; 679 Store_i(Refail) 680 Store_i(Code_end); 681 682/* 683 * query_emulc(Goal, Module) :- not not call(Goal, Module). 684 * Discard all stacks, just return succeed or fail. 685 */ 686 code = &eval_code_[0]; 687 Store_2(Allocate, Esize(1)) 688 Store_i(Savecut) 689 Store_3(MoveAMAM, Address(2), Address(3)) 690 Store_2(SavecutAM, Address(4)) 691 Store_2(Metacall, Esize(1)) 692 Store_2(Cut,Esize(1)) 693 Store_2(Exit_emulator, PSUCCEED) 694 Store_i(Code_end); 695 696/* 697 * slave_emulc() 698 */ 699 code = &slave_code_[0]; 700 Store_i(Failure) /* execute slave_fail_code_ */ 701 Store_i(Code_end); 702 703 code = &slave_fail_code_[0]; 704 Store_2(Fail_clause, Esize(2)) /* invoke the scheduler */ 705 Store_2(Allocate, Esize(1)) 706 Store_i(Savecut) 707 Store_3(Put_atomAM, Address(1), in_dict("slave",0)) 708 Store_4(Put_constantAM, Address(2), ModuleTag(d_.kernel_sepia), 709 d_.kernel_sepia) 710 Store_3(MoveAMAM, Address(2), Address(3)) 711 Store_2(SavecutAM, Address(4)) 712 Store_2(Metacall, Esize(1)) 713 Store_i(Failure) 714 Store_i(Code_end); 715 716/* 717 * sub_emulc(Goal, Module) :- call(Goal, Module), !. 718 * sub_emulc(Goal, Module) :- fail. 719 * Cut, but keep the global and trail. 720 */ 721 code = &recurs_code_[0]; 722 Store_2(Allocate, Esize(1)) 723 Store_i(Savecut) 724 Store_3(MoveAMAM, Address(2), Address(3)) 725 Store_2(SavecutAM, Address(4)) 726 Store_2(Metacall, Esize(1)) 727 Store_2(Cut,Esize(1)) 728 Store_2(Exit_emulator, PKEEP) 729 Store_i(Code_end); 730 731 732 code = &boot_code_[0]; 733 Store_2(Allocate, Esize(0)) 734 Store_3(MoveAMAM, Address(2), Address(3)) 735 Store_3(Put_integerAM, Address(2), 0) 736 Store_2(Put_variableAM, Address(4)) 737 Store_3(CallP, DidPtr(in_dict("load_eco",4))->procedure, 0) 738 Store_2(Exit_emulator, PSUCCEED) 739 Store_i(Code_end); 740 741/* 742 * Auxiliary code for synchronous event handling 743 */ 744 code = &restore_code_[0]; 745 Store_d(Esize(-1)) 746 Store_i(Continue_after_event) /* entry point for restoring status */ 747 Store_i(Code_end); 748 749 code = &restore_debug_code_[0]; 750 Store_d(Esize(-1)) 751 Store_i(Continue_after_event_debug) /* entry point for restoring status */ 752 Store_i(Code_end); 753 754 code = &trace_exit_code_[0]; 755 Store_d(Esize(0)) 756 Store_i(Debug_exit) 757 Store_i(Code_end); 758 759 code = &return_code_[0]; 760 Store_i(Ret_nowake); /* no Retd: event may leave chp! */ 761 Store_i(Code_end); /* no wake: argument registers valid! */ 762 763/* 764 * This fail_code_ is used by the fail cases of switch instructions and the like 765 */ 766 code = &fail_code_[0]; 767 Store_i(Failure) 768 Store_i(Code_end); 769 770 771/* 772 * &fail_return_env_0_[1] is used as a return address with 773 * environment size 0, and for triggering failure after return 774 */ 775 code = &fail_return_env_0_[0]; 776 Store_d(Esize(0)) 777 Store_i(Failure) 778 Store_i(Code_end); 779 780 781/* 782 * catch_/4 and throw/1 (alias block/4 and exit_block/1) 783 */ 784 785 if (flags & INIT_SHARED) 786 { 787 did1 = in_dict("catch_", 4); 788 Allocate_Default_Procedure(16, did1); 789 Exported_Kernel_Proc(did1, ARGFIXEDWAM | DEBUG_DF | DEBUG_TRMETA, code); 790 Exported_Kernel_Proc(in_dict("block",4), ARGFIXEDWAM | DEBUG_DF | DEBUG_TRMETA, code); 791 Store_2(Catch, 0) 792 Store_2(Allocate, Esize(1)) 793 Store_i(Savecut) 794 Store_3(MoveAMAM, Address(2), Address(3)) 795 Store_2(SavecutAM, Address(4)) 796 Store_2(Metacall, Esize(1)) 797 Store_2(Cut_single, 0) /* if the Goal was deterministic */ 798 Store_i(Exit) 799 Store_i(Code_end); 800 801 did1 = in_dict("block_atomic", 4); 802 Allocate_Default_Procedure(16, did1); 803 Exported_Kernel_Proc(did1, ARGFIXEDWAM | DEBUG_DF | DEBUG_TRMETA, code); 804 Store_2(Catch, 1) 805 Store_2(Allocate, Esize(1)) 806 Store_i(Savecut) 807 Store_3(MoveAMAM, Address(2), Address(3)) 808 Store_2(SavecutAM, Address(4)) 809 Store_2(Metacall, Esize(1)) 810 Store_2(Cut_single, 0) /* if the Goal was deterministic */ 811 Store_i(Exit) 812 Store_i(Code_end); 813 } 814 815 code = &exit_block_code_[0]; 816 Make_Default_Prefix(d_.throw1); 817 if (flags & INIT_SHARED) 818 { 819 Exported_Kernel_Proc(d_.throw1, ARGFIXEDWAM | DEBUG_DF | DEBUG_DB,code); 820 Exported_Kernel_Proc(d_.exit_block, ARGFIXEDWAM | DEBUG_DF | DEBUG_DB,code); 821 } 822 do_exit_block_code_ = code; 823 Store_i(Throw) 824 Store_3(MoveAMAM, Address(2), Address(3)) 825 Store_2(SavecutAM, Address(4)) 826 Store_2(Meta_jmp,0) 827 Store_i(Code_end); 828 829 830/* 831 * code for syserror(Err, Goal, ContextMod, LookupMod) 832 * also referenced directly from the emulator 833 */ 834 code = &syserror_code_[0]; 835 Make_Default_Prefix(d_.syserror); 836 if (flags & INIT_SHARED) 837 { 838 Local_Kernel_Proc(d_.syserror, ARGFIXEDWAM | DEBUG_DB, code); 839 } 840 prolog_error_code_ = code; 841 Store_2(Allocate, 0) 842 Store_3(Fastcall, CALL_PORT, 0) 843 Store_i(Exit) 844 Store_i(Code_end) /* continues below */ 845/* 846 * Code for calling error handlers inside builtins. 847 * The exception frame has already been pushed! 848 * Disallow tracing (NO_PORT) for the time being, because when the builtin 849 * raised the exception inside a shallow condition, and the handler fails, 850 * the Continue_after_exception instruction is currently not able to trace 851 * the fail port and adjust the tracer stack correctly. 852 */ 853 bip_error_code_ = code; 854 Store_3(Fastcall, NO_PORT, 0) 855 Store_i(Continue_after_exception) 856 Store_i(Retd_nowake); 857 Store_i(Code_end) /* continues below */ 858#if SIMPLIFY 859 Store_d(Esize(0)) 860 exception_cont_code_ = code; 861 Store_i(Continue_after_exception) 862 Store_i(Retd_nowake); 863 Store_i(Code_end); 864#endif 865 866 867/* 868 * code sequence for calling interrupt handlers 869 */ 870 code = &it_code_[0]; 871 Store_2(Allocate, Esize(1)) 872 Store_i(Savecut) 873 Store_2(Handler_call,0) 874 Store_2(Cut,Esize(1)) 875 Store_2(Exit_emulator, PSUCCEED) 876 Store_i(Code_end); 877 sync_it_code_ = code; 878 Store_2(Allocate, Esize(1)) 879 Store_i(Savecut) 880 Store_2(Handler_call,0) 881 Store_2(Cut,Esize(1)) 882 Store_i(Exitd) 883 Store_i(Code_end) 884 885/* 886 * code sequence for calling interrupt handlers inside an 887 * exit_block protected execution. Simulates: 888 * 889 * it(Sig) :- 890 * block(handler(Sig), Tag, postpone_exit(Tag), sepia_kernel). 891 */ 892 code = &it_block_code_[0]; 893 Store_4(Put_constantAM, Address(4), ModuleTag(d_.kernel_sepia), 894 d_.kernel_sepia) 895 Store_3(Put_structureAM, Address(3), in_dict("postpone_exit",1)) 896 Store_2(Push_variableAM, Address(2)) 897 Store_2(Catch, 0) /* (Sig, Tag, Recov, Mod) */ 898 Store_2(Allocate, Esize(1)) 899 Store_i(Savecut) 900 Store_2(Handler_call,0) 901 Store_2(Cut,Esize(1)) 902 Store_2(Exit_emulator, PSUCCEED) 903 Store_i(Code_end); 904 905/* 906 * true/0 is here because we want its procedure identifier 907 */ 908 909 code = &true_code_[0]; 910 Make_Default_Prefix(d_.true0); 911 if (flags & INIT_SHARED) 912 { 913 Exported_Kernel_Proc(d_.true0, ARGFIXEDWAM | DEBUG_DF | DEBUG_DB, code); 914 } 915 Store_i(Retd) 916 Store_i(Code_end); 917 918/* 919 * Backtrack fail code for catch that allows handling of fail-events. 920 * Note that the events are triggered in a state where the choicepoint 921 * is still present (this state may be required by the event handlers). 922 * After all the handlers succeeded (or one of them failed), the choicepoint 923 * is popped and failure continues normally. Remaining bug: when a handler 924 * fails while other events are still posted, those other events will be 925 * executed later in the wrong context. The only way I can see to fix that 926 * is to somehow distinguish fail-undo events (always succeed) from 927 * retry-events (may fail) and always handle all the former ones first. 928 */ 929 930 code = &catch_unint_fail_code_[0]; 931 Store_i(Nop) 932 catch_fail_code_ = code; 933 /* Leave the choice point */ 934 Store_2(Retry_me_else, NO_PORT) 935 aux = code++; /* alternative is ReFail */ 936 Store_2(Allocate, Esize(0)) 937 /* Trigger pending fail-events */ 938 Store_3(CallP, DidPtr(d_.true0)->procedure, 0) 939 *(vmcode**)aux = code; 940 Store_i(Refail) 941 Store_i(Code_end); 942 943/* 944 * garbage_collect/0 945 */ 946 code = &gc_code_[0]; 947 did1 = in_dict("garbage_collect", 0); 948 Make_Default_Prefix(did1) 949 if (flags & INIT_SHARED) 950 { 951 Exported_Kernel_Proc(did1, ARGFIXEDWAM|DEBUG_DF, code); 952 } 953 Store_2(Gc, 1); 954 Store_i(Ret) 955 Store_i(Code_end); 956 957 /* the following sequence is executed on global stack soft overflow 958 * i.e. TG > TG_SL. This is in the same code block as garbage_collect/0 959 * so it accounts for garbage_collect/0 in the profiler. 960 */ 961 auto_gc_code_ = code; 962 Store_2(Gc, 0); 963 Store_i(Ret) 964 Store_i(Code_end); 965 966/* 967 * idle/0 968 * Dummy procedure where the engine spends its time while scheduling. 969 */ 970 code = &idle_code_[0]; 971 did1 = in_dict("idle", 0); 972 Make_Default_Prefix(did1) 973 if (flags & INIT_SHARED) 974 { 975 Local_Kernel_Proc(did1, ARGFIXEDWAM|DEBUG_DF, code); 976 } 977 do_idle_code_ = code; 978 Store_2(JmpdA, do_idle_code_); 979 idle_ret_code_ = code; 980 Store_i(Retd_nowake) /* No event handling here: After a job 981 * installation the state is not clean! */ 982 Store_i(Code_end); 983 984 985/* 986 * fork/2 987 * To create parallel choicepoints with arbitrary many alternatives. 988 */ 989 code = &fork_code_[0]; 990 did1 = in_dict("fork", 2); 991 Make_Default_Prefix(did1) 992 if (flags & INIT_SHARED) 993 { 994 Exported_Kernel_Proc(did1, ARGFIXEDWAM | DEBUG_DF | DEBUG_DB, code); 995 } 996 Store_2(Integer_range_switchAM, Address(1)) 997 aux = code++; 998 Store_d(1); /* table size */ 999 Store_2d(fail_code_, aux+4) 1000 Store_3(Put_structureAM, Address(3), did1) 1001 Store_2(Push_local_valueAM, Address(1)) 1002 Store_2(Push_local_valueAM, Address(2)) 1003 Store_3(Put_integerAM, Address(1), 5) 1004 Store_3(MoveAMAM, Address(3), Address(2)) 1005 Store_3(Put_atomAM, Address(3), d_.kernel_sepia); 1006 Store_3(Put_atomAM, Address(4), d_.kernel_sepia); 1007 Store_2(JmpdA, prolog_error_code_) 1008 aux1 = code; 1009 Store_4(Try_parallel, 1, 2, 0) 1010 Store_2(Retry_seq, 0) 1011 Store_2(Fail_clause, Esize(2)) 1012 Store_2(Try_clause, 0) 1013 fork_unify_code_ = code; 1014 Store_3(Get_valueAMAM,Address(1),Address(2)) 1015 Store_i(Ret) 1016 Store_i(Code_end); 1017 *(vmcode**)aux = code; 1018 *code++ = 1; *code++ = (vmcode) fail_code_; 1019 *code++ = 1; *code++ = (vmcode) aux1; 1020 *code++ = 1; *code++ = (vmcode) fork_unify_code_; 1021 Store_i(Code_end); 1022 1023/* 1024 * worker_boundary/0 1025 * Create a dummy parallel choicepoint that can be 1026 * backtracked over only by the worker that created it. 1027 */ 1028 code = &wb_code_[0]; 1029 did1 = in_dict("worker_boundary", 0); 1030 Make_Default_Prefix(did1); 1031 if (flags & INIT_SHARED) 1032 { 1033 Exported_Kernel_Proc(did1, ARGFIXEDWAM|DEBUG_DB|DEBUG_DF, code); 1034 } 1035 aux = code; 1036 Store_3(Try_parallel, 1, 0) 1037 code++; 1038 wb_fail_code_ = code; 1039 Store_i(Retry_seq) 1040 code++; 1041 Store_2(Fail_clause, Esize(2)) 1042 Store_i(Try_clause) 1043 code++; 1044 Store_i(Ret) 1045 Store_i(Code_end); 1046 ((vmcode**)aux)[3] = code; 1047 ((vmcode**)aux)[5] = code; 1048 ((vmcode**)aux)[9] = code; 1049 *(vmcode**)code++ = &fail_code_[0]; 1050 *(vmcode**)code++ = &aux[10]; 1051 Store_i(Code_end); 1052 1053 1054/*----------------------------------------------------------------- 1055 * Define predicates in WAM code that cannot be defined in Prolog. 1056 * Their code has no other references and is allocated on the heap. 1057 *-----------------------------------------------------------------*/ 1058 1059 if (flags & INIT_SHARED) 1060 { 1061/* 1062 * par_true/0 1063 * Create a dummy parallel choicepoint that can be used to 1064 * reduce the amount of incremental stack copying. 1065 */ 1066 did1 = in_dict("par_true", 0); 1067 Allocate_Default_Procedure(15, did1); 1068 Exported_Kernel_Proc(did1, ARGFIXEDWAM|DEBUG_DB|DEBUG_DF, code); 1069 aux = code; 1070 Store_3(Try_parallel, 1, 0) 1071 code++; 1072 Store_i(Retry_seq) 1073 code++; 1074 Store_2(Fail_clause, Esize(2)) 1075 Store_i(Try_clause) 1076 code++; 1077 Store_i(Ret) 1078 Store_i(Code_end); 1079 ((vmcode**)aux)[3] = code; 1080 ((vmcode**)aux)[5] = code; 1081 ((vmcode**)aux)[9] = code; 1082 *(vmcode**)code++ = &fail_code_[0]; 1083 *(vmcode**)code++ = &aux[10]; 1084 Store_i(Code_end); 1085 1086/* 1087 * call_suspension(+Suspension) 1088 */ 1089 Allocate_Default_Procedure(2, d_call_susp_); 1090 Exported_Kernel_Proc(d_call_susp_, ARGFIXEDWAM|DEBUG_DB|DEBUG_DF, code); 1091 Store_i(Suspension_jmp) 1092 Store_i(Code_end) 1093 1094/* 1095 * repeat/0 1096 */ 1097 did1 = in_dict("repeat", 0); 1098 Allocate_Default_Procedure(9, did1); 1099 Exported_Kernel_Proc(did1, ARGFIXEDWAM | DEBUG_DF | DEBUG_DB, code); 1100 aux = code; 1101 Store_4(Try, NO_PORT, 0, aux + 7) 1102 Store_3(Retry_me_else, NEXT_PORT, aux + 4) 1103 Store_i(Retn) 1104 Store_i(Code_end); 1105 1106/* 1107 * clause/5 1108 */ 1109 did1 = in_dict("clause",5); 1110 Allocate_Default_Procedure(4, did1); 1111 Local_Kernel_Proc(did1, ARGFIXEDWAM, code); 1112 Store_i(Clause); 1113 Store_i(Retd); 1114 Store_i(Code_end); 1115 1116/* 1117 * guard(Goal, Result, Module) 1118 */ 1119 did1 = in_dict("guard", 3); 1120 Allocate_Default_Procedure(34, did1); 1121 Exported_Kernel_Proc(did1, ARGFIXEDWAM | DEBUG_DB | DEBUG_DF, code); 1122 Store_3(Try_me_else, NO_PORT, 0) 1123 aux = code++; 1124 Store_Var_Alloc(2, 2, 2); /* 4 words */ 1125 Store_i(Savecut) 1126 Store_3(MoveAMAM,Address(3),Address(2)) 1127 Store_2(SavecutAM, Address(4)) 1128 Store_2(Metacall, Esize(1)) 1129 Store_3(MoveLAM, Esize(2), Address(1)) 1130 Store_2(GuardL, Esize(1)) 1131 aux1 = code++; 1132 Store_3(Get_atomAM, Address(1), d_.true0) 1133 Store_i(Exitc) 1134 *(vmcode**)aux1 = code; 1135 Store_2(Trust_me, NEXT_PORT) 1136 Store_3(Get_atomAM, Address(1), d_.question) 1137 Store_i(Retd); 1138 *(vmcode**)aux = code; 1139 Store_i(Refail); 1140 Store_i(Code_end); 1141 1142/* 1143 * module_directive/4 1144 * dummy code for checking the module in top.pl until this procedure is 1145 * really defined 1146 */ 1147 Allocate_Default_Procedure(2, d_.module_directive); 1148 Local_Kernel_Proc(d_.module_directive, ARGFIXEDWAM, code); 1149 Store_i(Retd); 1150 Store_i(Code_end); 1151 1152/* 1153 * boot_error/2 1154 */ 1155 did1 = in_dict("boot_error", 2); 1156 Allocate_Default_Procedure(70, did1); 1157 Local_Kernel_Proc(did1, ARGFIXEDWAM , code); 1158 pd = KernelPri(in_dict("write_", 2)); 1159 1160 Store_Var_Alloc(2, 2, 1) /* 4 words */ 1161 aux = code+1; 1162 Store_2(Set_bp, 0); 1163 Store_3(Get_integerAM, Address(1), 170); 1164 1165 Store_i(Restore_bp); 1166 Store_3(Put_variableAML, Address(1), Esize(2)) 1167 Store_3(CallP, KernelPri(in_dict("errno_id", 1)), Esize(2)); 1168 aux1 = code+1; 1169 Store_2(Branch, 0); 1170 1171 *(vmcode**)aux = code; 1172 Store_3(Put_variableAML, Address(2), Esize(2)) 1173 Store_3(CallP, KernelPri(in_dict("error_id", 2)), Esize(2)); 1174 1175 *(vmcode**)aux1 = code; 1176 Store_3(MoveLAM, Esize(2), Address(1)); 1177 Store_3(Put_atomAM, Address(2), d_.kernel_sepia); 1178 Store_3(CallP, pd, Esize(2)); 1179 1180 Store_3(Put_atomAM, Address(1), in_dict(" in ",0)); 1181 Store_3(Put_atomAM, Address(2), d_.kernel_sepia); 1182 Store_3(CallP, pd, Esize(2)); 1183 1184 Store_3(MoveLAM, Esize(1), Address(1)); 1185 Store_3(Put_atomAM, Address(2), d_.kernel_sepia); 1186 Store_3(CallP, KernelPri(in_dict("writeq_", 2)), Esize(2)); 1187 1188 Store_3(Put_atomAM, Address(1), in_dict("\n\n",0)); 1189 Store_3(Put_atomAM, Address(2), d_.kernel_sepia); 1190 Store_3(CallP, pd, Esize(2)); 1191 1192 Store_3(Put_integerAM, Address(1), -1); 1193 Store_2(ChainP, DidPtr(in_dict("exit0", 1))->procedure); 1194 Store_i(Code_end); 1195 1196/* 1197 * yield/4 1198 */ 1199 did1 = in_dict("yield", 4); 1200 Allocate_Default_Procedure(13, did1); 1201 Local_Kernel_Proc(did1, ARGFIXEDWAM , code); 1202 Store_3(Put_integerAM, Address(0), PYIELD) 1203 Store_2(Bounce, 0); /* exits the emulator and bounce over the trampoline */ 1204 Store_3(Get_valueAMAM,Address(1),Address(3)) 1205 Store_3(Get_valueAMAM,Address(2),Address(4)) 1206 Store_i(Retd); 1207 Store_i(Code_end); 1208 1209 1210/* 1211 * Create the built-ins that are implemented by a single abstract machine instruction 1212 */ 1213 make_test_bip(d_.fail, Failure, 0, 0, -1, EXPORT); 1214 make_test_bip(d_.unify, Get_valueAMAM, U_UNIFY, BoundArg(1, NONVAR) | BoundArg(2, NONVAR), -1, EXPORT); 1215 1216 make_test_bip(in_dict("set_bip_error",1), BI_SetBipError, 0, 0, -1, EXPORT); 1217 make_function_bip(in_dict("get_bip_error",1), BI_GetBipError, U_SIMPLE, BoundArg(1,CONSTANT), 1, 0); 1218 make_function_bip(in_dict("get_cut",1), SavecutAM, U_SIMPLE, BoundArg(1,CONSTANT), 1, 0); 1219 1220 make_test_bip(in_dict("sys_return",1), BI_Exit, 0, 0, -1, LOCAL); 1221 make_test_bip(in_dict("cut_to_stamp",2), BI_CutToStamp, 0, 0, 0, EXPORT); 1222 make_test_bip(in_dict("cont_debug",0), BI_ContDebug, 0, 0, -1, LOCAL); 1223 1224 make_test_bip(d_.free1, BI_Free, 0, 0, -1, EXPORT); 1225 make_test_bip(d_.is_suspension, BI_IsSuspension, 0, 0, -1, EXPORT); 1226 make_test_bip(d_.is_event, BI_IsEvent, 0, 0, -1, EXPORT); 1227 make_test_bip(d_.is_handle, BI_IsHandle, 0, 0, -1, EXPORT); 1228 make_test_bip(d_.var, BI_Var, 0, 0, -1, EXPORT); 1229 make_test_bip(d_.nonvar, BI_NonVar, 0, 0, -1, EXPORT); 1230 make_test_bip(d_.meta, BI_Meta, 0, 0, -1, EXPORT); 1231 make_test_bip(d_.atom, BI_Atom, 0, 0, -1, EXPORT); 1232 make_test_bip(d_.integer, BI_Integer, 0, 0, -1, EXPORT); 1233 make_test_bip(d_.rational1, BI_Rational, 0, 0, -1, EXPORT); 1234 make_test_bip(d_.real, BI_Real, 0, 0, -1, EXPORT); 1235 make_test_bip(d_.float1, BI_Float, 0, 0, -1, EXPORT); 1236 make_test_bip(d_.breal, BI_Breal, 0, 0, -1, EXPORT); 1237 make_test_bip(d_.string, BI_String, 0, 0, -1, EXPORT); 1238 make_test_bip(d_.number, BI_Number, 0, 0, -1, EXPORT); 1239 make_test_bip(d_.atomic, BI_Atomic, 0, 0, -1, EXPORT); 1240 make_test_bip(d_.compound, BI_Compound, 0, 0, -1, EXPORT); 1241 make_test_bip(d_.is_list, BI_IsList, 0, 0, -1, EXPORT); 1242 make_test_bip(d_.bignum, BI_Bignum, 0, 0, -1, EXPORT); 1243 make_test_bip(in_dict("callable",1), BI_Callable, 0, 0, -1, EXPORT); 1244 1245 make_function_bip(in_dict("-",2), BI_Minus, U_SIMPLE, BoundArg(2,CONSTANT), 4, 1); 1246 make_function_bip(in_dict("+",3), BI_Add, PROC_DEMON|U_SIMPLE, BoundArg(3,CONSTANT), 16, 1); 1247 make_function_bip(in_dict("-",3), BI_Sub, PROC_DEMON|U_SIMPLE, BoundArg(3,CONSTANT), 16, 1); 1248 make_function_bip(in_dict("*",3), BI_Mul, PROC_DEMON|U_SIMPLE, BoundArg(3,CONSTANT), 16, 1); 1249 make_function_bip(in_dict("/",3), BI_Quot, PROC_DEMON|U_SIMPLE, BoundArg(3,CONSTANT), 16, 1); 1250 make_function_bip(in_dict("//",3), BI_Div, PROC_DEMON|U_SIMPLE, BoundArg(3,CONSTANT), 16, 1); 1251 make_function_bip(in_dict("rem",3), BI_Rem, PROC_DEMON|U_SIMPLE, BoundArg(3,CONSTANT), 16, 1); 1252 make_function_bip(in_dict("div",3), BI_FloorDiv, PROC_DEMON|U_SIMPLE, BoundArg(3,CONSTANT), 16, 1); 1253 make_function_bip(in_dict("mod",3), BI_FloorRem, PROC_DEMON|U_SIMPLE, BoundArg(3,CONSTANT), 16, 1); 1254 make_function_bip(in_dict("/\\",3), BI_And, PROC_DEMON|U_SIMPLE, BoundArg(3,CONSTANT), 16, 1); 1255 make_function_bip(in_dict("\\/",3), BI_Or, PROC_DEMON|U_SIMPLE, BoundArg(3,CONSTANT), 16, 1); 1256 make_function_bip(in_dict("xor", 3), BI_Xor, PROC_DEMON|U_SIMPLE, BoundArg(3,CONSTANT), 16, 1); 1257 make_function_bip(in_dict("\\",2), BI_Bitnot, U_SIMPLE, BoundArg(2,CONSTANT), 4, 1); 1258 1259 make_function_bip(in_dict("arity",2), BI_Arity, U_SIMPLE, BoundArg(2,CONSTANT), 4, 1); 1260 make_function_bip(in_dict("arg",3), BI_Arg, PROC_DEMON|U_UNIFY, BoundArg(2, NONVAR) | BoundArg(3, NONVAR), 16, 1); 1261 1262 make_function_bip(in_dict("compare",3), BI_Compare, U_UNIFY, BoundArg(1, CONSTANT), 1, 0); 1263 make_function_bip(in_dict("list_end",2), BI_ListEnd, U_UNIFY, 0, 4, 0); 1264 make_function_bip(in_dict("qualify_",3), BI_Qualify, U_UNIFY, 0, 4, 0); 1265 1266 make_test_bip(in_dict("make_suspension",4), BI_MakeSuspension, U_UNIFY|DEBUG_INVISIBLE, BoundArg(3, NONVAR), 0, EXPORT); 1267 1268 make_test_bip(d_.identical, BI_Identical, 0, 0, -1, EXPORT); 1269 make_test_bip(d_.not_identical, BI_NotIdentical, 0, 0, -1, EXPORT); 1270 make_test_bip(d_.diff_reg, BI_Inequality, PROC_DEMON, 0, -1, EXPORT); 1271 make_test_bip(in_dict("\\==",3), BI_NotIdentList, 0, BoundArg(3, NONVAR), -1, EXPORT); 1272 1273 make_test_bip(in_dict("<",3), BI_Lt, PROC_DEMON, 0, 0, EXPORT); 1274 make_test_bip(in_dict(">",3), BI_Gt, PROC_DEMON, 0, 0, EXPORT); 1275 make_test_bip(in_dict("=<",3), BI_Le, PROC_DEMON, 0, 0, EXPORT); 1276 make_test_bip(in_dict(">=",3), BI_Ge, PROC_DEMON, 0, 0, EXPORT); 1277 make_test_bip(in_dict("=:=",3), BI_Eq, PROC_DEMON, 0, 0, EXPORT); 1278 make_test_bip(in_dict("=\\=",3), BI_Ne, PROC_DEMON, 0, 0, EXPORT); 1279 1280 } /* end if (flags & INIT_SHARED) */ 1281 1282 1283/*----------------------------------------------------------------- 1284 * Initialize global (non-shared) pointers to procedure identifiers 1285 *-----------------------------------------------------------------*/ 1286 1287#define KernelProc(d) local_procedure(d, d_.kernel_sepia, tm, 0) 1288 1289 true_proc_ = KernelProc(d_.true0); 1290 cut_to_proc_ = KernelProc(d_.cut_to); 1291 softcut_proc_ = KernelProc(d_.softcut); 1292 cut_to_stamp_proc_ = KernelProc(in_dict("cut_to_stamp", 2)); 1293 fail_proc_ = KernelProc(d_.fail); 1294 identical_proc_ = KernelProc(d_.identical); 1295 not_identical_proc_ = KernelProc(d_.not_identical); 1296 not_ident_list_proc_ = KernelProc(in_dict("\\==",3)); 1297 inequality_proc_ = KernelProc(d_.diff_reg); 1298 minus_proc_ = KernelProc(in_dict("-",2)); 1299 add_proc_ = KernelProc(in_dict("+",3)); 1300 sub_proc_ = KernelProc(in_dict("-",3)); 1301 mul_proc_ = KernelProc(in_dict("*",3)); 1302 quot_proc_ = KernelProc(in_dict("/",3)); 1303 div_proc_ = KernelProc(in_dict("//",3)); 1304 rem_proc_ = KernelProc(in_dict("rem",3)); 1305 fdiv_proc_ = KernelProc(in_dict("div",3)); 1306 mod_proc_ = KernelProc(in_dict("mod",3)); 1307 and_proc_ = KernelProc(in_dict("/\\",3)); 1308 or_proc_ = KernelProc(in_dict("\\/",3)); 1309 xor_proc_ = KernelProc(in_dict("xor",3)); 1310 bitnot_proc_ = KernelProc(in_dict("\\",2)); 1311 lt_proc3_ = KernelProc(in_dict("<",3)); 1312 gt_proc3_ = KernelProc(in_dict(">",3)); 1313 le_proc3_ = KernelProc(in_dict("=<",3)); 1314 ge_proc3_ = KernelProc(in_dict(">=",3)); 1315 eq_proc3_ = KernelProc(in_dict("=:=",3)); 1316 ne_proc3_ = KernelProc(in_dict("=\\=",3)); 1317 arg_proc_ = KernelProc(in_dict("arg",3)); 1318 arity_proc_ = KernelProc(in_dict("arity",2)); 1319 make_suspension_proc_ = KernelProc(in_dict("make_suspension",4)); 1320} 1321 1322 1323/* 1324 * generates necessary WAM instruction for a C built_in. 1325 * pd is supposed to be of the valid type (consistency check already made) 1326 */ 1327 1328/*ARGSUSED*/ 1329int 1330b_built_code(pri *pd, word function, int nondet) 1331{ 1332 vmcode *code, *aux; 1333 pri_code_t pricode; 1334 unsigned arity; 1335 dident did1 = pd->did; 1336 1337 arity = DidArity(did1); 1338 Allocate_Default_Procedure((word) (4 + (nondet?7:0)), did1); 1339 pricode.vmc = code; 1340 pd->flags |= EXTERN; 1341 pri_define_code(pd, VMCODE, pricode); 1342 1343 if (nondet) 1344 { 1345 Store_4(Try, NO_PORT, arity, 0) 1346 aux = code; 1347 Store_3(Retry_me_else, (pd->flags & DEBUG_DB)?NEXT_PORT:NO_PORT, aux); 1348 *(aux - 1) = (vmcode) code; 1349 } 1350 switch(arity) 1351 { 1352 case 0: Store_3(External0, pd, function); break; 1353 case 1: Store_3(External1, pd, function); break; 1354 case 2: Store_3(External2, pd, function); break; 1355 case 3: Store_3(External3, pd, function); break; 1356 default: Store_3(External, pd, function); 1357 } 1358 Store_i(Code_end) 1359 1360 Succeed_; 1361} 1362