1/* 2 * Copyright (C) 2012, 2013, 2014 Apple Inc. All rights reserved. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions 6 * are met: 7 * 1. Redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer. 9 * 2. Redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution. 12 * 13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY 14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR 17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, 18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY 21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 24 */ 25 26#include "config.h" 27#include "DFGFixupPhase.h" 28 29#if ENABLE(DFG_JIT) 30 31#include "DFGGraph.h" 32#include "DFGInsertionSet.h" 33#include "DFGPhase.h" 34#include "DFGPredictionPropagationPhase.h" 35#include "DFGVariableAccessDataDump.h" 36#include "JSCInlines.h" 37 38namespace JSC { namespace DFG { 39 40class FixupPhase : public Phase { 41public: 42 FixupPhase(Graph& graph) 43 : Phase(graph, "fixup") 44 , m_insertionSet(graph) 45 { 46 } 47 48 bool run() 49 { 50 ASSERT(m_graph.m_fixpointState == BeforeFixpoint); 51 ASSERT(m_graph.m_form == ThreadedCPS); 52 53 m_profitabilityChanged = false; 54 for (BlockIndex blockIndex = 0; blockIndex < m_graph.numBlocks(); ++blockIndex) 55 fixupBlock(m_graph.block(blockIndex)); 56 57 while (m_profitabilityChanged) { 58 m_profitabilityChanged = false; 59 60 for (unsigned i = m_graph.m_argumentPositions.size(); i--;) 61 m_graph.m_argumentPositions[i].mergeArgumentUnboxingAwareness(); 62 63 for (BlockIndex blockIndex = 0; blockIndex < m_graph.numBlocks(); ++blockIndex) 64 fixupGetAndSetLocalsInBlock(m_graph.block(blockIndex)); 65 } 66 67 for (BlockIndex blockIndex = 0; blockIndex < m_graph.numBlocks(); ++blockIndex) 68 injectTypeConversionsInBlock(m_graph.block(blockIndex)); 69 70 return true; 71 } 72 73private: 74 void fixupBlock(BasicBlock* block) 75 { 76 if (!block) 77 return; 78 ASSERT(block->isReachable); 79 m_block = block; 80 for (m_indexInBlock = 0; m_indexInBlock < block->size(); ++m_indexInBlock) { 81 m_currentNode = block->at(m_indexInBlock); 82 addPhantomsIfNecessary(); 83 fixupNode(m_currentNode); 84 } 85 clearPhantomsAtEnd(); 86 m_insertionSet.execute(block); 87 } 88 89 void fixupNode(Node* node) 90 { 91 NodeType op = node->op(); 92 93 switch (op) { 94 case SetLocal: { 95 // This gets handled by fixupSetLocalsInBlock(). 96 return; 97 } 98 99 case BitAnd: 100 case BitOr: 101 case BitXor: 102 case BitRShift: 103 case BitLShift: 104 case BitURShift: { 105 fixIntConvertingEdge(node->child1()); 106 fixIntConvertingEdge(node->child2()); 107 break; 108 } 109 110 case ArithIMul: { 111 fixIntConvertingEdge(node->child1()); 112 fixIntConvertingEdge(node->child2()); 113 node->setOp(ArithMul); 114 node->setArithMode(Arith::Unchecked); 115 node->child1().setUseKind(Int32Use); 116 node->child2().setUseKind(Int32Use); 117 break; 118 } 119 120 case UInt32ToNumber: { 121 fixIntConvertingEdge(node->child1()); 122 if (bytecodeCanTruncateInteger(node->arithNodeFlags())) 123 node->convertToIdentity(); 124 else if (node->canSpeculateInt32(FixupPass)) 125 node->setArithMode(Arith::CheckOverflow); 126 else { 127 node->setArithMode(Arith::DoOverflow); 128 node->setResult(NodeResultDouble); 129 } 130 break; 131 } 132 133 case ValueAdd: { 134 if (attemptToMakeIntegerAdd(node)) { 135 node->setOp(ArithAdd); 136 node->clearFlags(NodeMustGenerate | NodeClobbersWorld); 137 break; 138 } 139 if (Node::shouldSpeculateNumberOrBooleanExpectingDefined(node->child1().node(), node->child2().node())) { 140 fixDoubleOrBooleanEdge(node->child1()); 141 fixDoubleOrBooleanEdge(node->child2()); 142 node->setOp(ArithAdd); 143 node->clearFlags(NodeMustGenerate | NodeClobbersWorld); 144 node->setResult(NodeResultDouble); 145 break; 146 } 147 148 // FIXME: Optimize for the case where one of the operands is the 149 // empty string. Also consider optimizing for the case where we don't 150 // believe either side is the emtpy string. Both of these things should 151 // be easy. 152 153 if (node->child1()->shouldSpeculateString() 154 && attemptToMakeFastStringAdd<StringUse>(node, node->child1(), node->child2())) 155 break; 156 if (node->child2()->shouldSpeculateString() 157 && attemptToMakeFastStringAdd<StringUse>(node, node->child2(), node->child1())) 158 break; 159 if (node->child1()->shouldSpeculateStringObject() 160 && attemptToMakeFastStringAdd<StringObjectUse>(node, node->child1(), node->child2())) 161 break; 162 if (node->child2()->shouldSpeculateStringObject() 163 && attemptToMakeFastStringAdd<StringObjectUse>(node, node->child2(), node->child1())) 164 break; 165 if (node->child1()->shouldSpeculateStringOrStringObject() 166 && attemptToMakeFastStringAdd<StringOrStringObjectUse>(node, node->child1(), node->child2())) 167 break; 168 if (node->child2()->shouldSpeculateStringOrStringObject() 169 && attemptToMakeFastStringAdd<StringOrStringObjectUse>(node, node->child2(), node->child1())) 170 break; 171 break; 172 } 173 174 case MakeRope: { 175 fixupMakeRope(node); 176 break; 177 } 178 179 case ArithAdd: 180 case ArithSub: { 181 if (attemptToMakeIntegerAdd(node)) 182 break; 183 fixDoubleOrBooleanEdge(node->child1()); 184 fixDoubleOrBooleanEdge(node->child2()); 185 node->setResult(NodeResultDouble); 186 break; 187 } 188 189 case ArithNegate: { 190 if (m_graph.negateShouldSpeculateInt32(node, FixupPass)) { 191 fixIntOrBooleanEdge(node->child1()); 192 if (bytecodeCanTruncateInteger(node->arithNodeFlags())) 193 node->setArithMode(Arith::Unchecked); 194 else if (bytecodeCanIgnoreNegativeZero(node->arithNodeFlags())) 195 node->setArithMode(Arith::CheckOverflow); 196 else 197 node->setArithMode(Arith::CheckOverflowAndNegativeZero); 198 break; 199 } 200 if (m_graph.negateShouldSpeculateMachineInt(node, FixupPass)) { 201 fixEdge<Int52RepUse>(node->child1()); 202 if (bytecodeCanIgnoreNegativeZero(node->arithNodeFlags())) 203 node->setArithMode(Arith::CheckOverflow); 204 else 205 node->setArithMode(Arith::CheckOverflowAndNegativeZero); 206 node->setResult(NodeResultInt52); 207 break; 208 } 209 fixDoubleOrBooleanEdge(node->child1()); 210 node->setResult(NodeResultDouble); 211 break; 212 } 213 214 case ArithMul: { 215 if (m_graph.mulShouldSpeculateInt32(node, FixupPass)) { 216 fixIntOrBooleanEdge(node->child1()); 217 fixIntOrBooleanEdge(node->child2()); 218 if (bytecodeCanTruncateInteger(node->arithNodeFlags())) 219 node->setArithMode(Arith::Unchecked); 220 else if (bytecodeCanIgnoreNegativeZero(node->arithNodeFlags())) 221 node->setArithMode(Arith::CheckOverflow); 222 else 223 node->setArithMode(Arith::CheckOverflowAndNegativeZero); 224 break; 225 } 226 if (m_graph.mulShouldSpeculateMachineInt(node, FixupPass)) { 227 fixEdge<Int52RepUse>(node->child1()); 228 fixEdge<Int52RepUse>(node->child2()); 229 if (bytecodeCanIgnoreNegativeZero(node->arithNodeFlags())) 230 node->setArithMode(Arith::CheckOverflow); 231 else 232 node->setArithMode(Arith::CheckOverflowAndNegativeZero); 233 node->setResult(NodeResultInt52); 234 break; 235 } 236 fixDoubleOrBooleanEdge(node->child1()); 237 fixDoubleOrBooleanEdge(node->child2()); 238 node->setResult(NodeResultDouble); 239 break; 240 } 241 242 case ArithDiv: 243 case ArithMod: { 244 if (Node::shouldSpeculateInt32OrBooleanForArithmetic(node->child1().node(), node->child2().node()) 245 && node->canSpeculateInt32(FixupPass)) { 246 if (optimizeForX86() || optimizeForARM64() || optimizeForARMv7s()) { 247 fixIntOrBooleanEdge(node->child1()); 248 fixIntOrBooleanEdge(node->child2()); 249 if (bytecodeCanTruncateInteger(node->arithNodeFlags())) 250 node->setArithMode(Arith::Unchecked); 251 else if (bytecodeCanIgnoreNegativeZero(node->arithNodeFlags())) 252 node->setArithMode(Arith::CheckOverflow); 253 else 254 node->setArithMode(Arith::CheckOverflowAndNegativeZero); 255 break; 256 } 257 258 // This will cause conversion nodes to be inserted later. 259 fixDoubleOrBooleanEdge(node->child1()); 260 fixDoubleOrBooleanEdge(node->child2()); 261 262 // But we have to make sure that everything is phantom'd until after the 263 // DoubleAsInt32 node, which occurs after the Div/Mod node that the conversions 264 // will be insered on. 265 addRequiredPhantom(node->child1().node()); 266 addRequiredPhantom(node->child2().node()); 267 268 // We don't need to do ref'ing on the children because we're stealing them from 269 // the original division. 270 Node* newDivision = m_insertionSet.insertNode( 271 m_indexInBlock, SpecBytecodeDouble, *node); 272 newDivision->setResult(NodeResultDouble); 273 274 node->setOp(DoubleAsInt32); 275 node->children.initialize(Edge(newDivision, DoubleRepUse), Edge(), Edge()); 276 if (bytecodeCanIgnoreNegativeZero(node->arithNodeFlags())) 277 node->setArithMode(Arith::CheckOverflow); 278 else 279 node->setArithMode(Arith::CheckOverflowAndNegativeZero); 280 break; 281 } 282 fixDoubleOrBooleanEdge(node->child1()); 283 fixDoubleOrBooleanEdge(node->child2()); 284 node->setResult(NodeResultDouble); 285 break; 286 } 287 288 case ArithMin: 289 case ArithMax: { 290 if (Node::shouldSpeculateInt32OrBooleanForArithmetic(node->child1().node(), node->child2().node()) 291 && node->canSpeculateInt32(FixupPass)) { 292 fixIntOrBooleanEdge(node->child1()); 293 fixIntOrBooleanEdge(node->child2()); 294 break; 295 } 296 fixDoubleOrBooleanEdge(node->child1()); 297 fixDoubleOrBooleanEdge(node->child2()); 298 node->setResult(NodeResultDouble); 299 break; 300 } 301 302 case ArithAbs: { 303 if (node->child1()->shouldSpeculateInt32OrBooleanForArithmetic() 304 && node->canSpeculateInt32(FixupPass)) { 305 fixIntOrBooleanEdge(node->child1()); 306 break; 307 } 308 fixDoubleOrBooleanEdge(node->child1()); 309 node->setResult(NodeResultDouble); 310 break; 311 } 312 313 case ArithSqrt: 314 case ArithFRound: 315 case ArithSin: 316 case ArithCos: { 317 fixDoubleOrBooleanEdge(node->child1()); 318 node->setResult(NodeResultDouble); 319 break; 320 } 321 322 case LogicalNot: { 323 if (node->child1()->shouldSpeculateBoolean()) 324 fixEdge<BooleanUse>(node->child1()); 325 else if (node->child1()->shouldSpeculateObjectOrOther()) 326 fixEdge<ObjectOrOtherUse>(node->child1()); 327 else if (node->child1()->shouldSpeculateInt32OrBoolean()) 328 fixIntOrBooleanEdge(node->child1()); 329 else if (node->child1()->shouldSpeculateNumber()) 330 fixEdge<DoubleRepUse>(node->child1()); 331 else if (node->child1()->shouldSpeculateString()) 332 fixEdge<StringUse>(node->child1()); 333 break; 334 } 335 336 case TypeOf: { 337 if (node->child1()->shouldSpeculateString()) 338 fixEdge<StringUse>(node->child1()); 339 else if (node->child1()->shouldSpeculateCell()) 340 fixEdge<CellUse>(node->child1()); 341 break; 342 } 343 344 case CompareEqConstant: { 345 break; 346 } 347 348 case CompareEq: 349 case CompareLess: 350 case CompareLessEq: 351 case CompareGreater: 352 case CompareGreaterEq: { 353 if (node->op() == CompareEq 354 && Node::shouldSpeculateBoolean(node->child1().node(), node->child2().node())) { 355 fixEdge<BooleanUse>(node->child1()); 356 fixEdge<BooleanUse>(node->child2()); 357 node->clearFlags(NodeMustGenerate | NodeClobbersWorld); 358 break; 359 } 360 if (Node::shouldSpeculateInt32OrBoolean(node->child1().node(), node->child2().node())) { 361 fixIntOrBooleanEdge(node->child1()); 362 fixIntOrBooleanEdge(node->child2()); 363 node->clearFlags(NodeMustGenerate | NodeClobbersWorld); 364 break; 365 } 366 if (enableInt52() 367 && Node::shouldSpeculateMachineInt(node->child1().node(), node->child2().node())) { 368 fixEdge<Int52RepUse>(node->child1()); 369 fixEdge<Int52RepUse>(node->child2()); 370 node->clearFlags(NodeMustGenerate | NodeClobbersWorld); 371 break; 372 } 373 if (Node::shouldSpeculateNumberOrBoolean(node->child1().node(), node->child2().node())) { 374 fixDoubleOrBooleanEdge(node->child1()); 375 fixDoubleOrBooleanEdge(node->child2()); 376 node->clearFlags(NodeMustGenerate | NodeClobbersWorld); 377 break; 378 } 379 if (node->op() != CompareEq) 380 break; 381 if (node->child1()->shouldSpeculateStringIdent() && node->child2()->shouldSpeculateStringIdent()) { 382 fixEdge<StringIdentUse>(node->child1()); 383 fixEdge<StringIdentUse>(node->child2()); 384 node->clearFlags(NodeMustGenerate | NodeClobbersWorld); 385 break; 386 } 387 if (node->child1()->shouldSpeculateString() && node->child2()->shouldSpeculateString() && GPRInfo::numberOfRegisters >= 7) { 388 fixEdge<StringUse>(node->child1()); 389 fixEdge<StringUse>(node->child2()); 390 node->clearFlags(NodeMustGenerate | NodeClobbersWorld); 391 break; 392 } 393 if (node->child1()->shouldSpeculateObject() && node->child2()->shouldSpeculateObject()) { 394 fixEdge<ObjectUse>(node->child1()); 395 fixEdge<ObjectUse>(node->child2()); 396 node->clearFlags(NodeMustGenerate | NodeClobbersWorld); 397 break; 398 } 399 if (node->child1()->shouldSpeculateObject() && node->child2()->shouldSpeculateObjectOrOther()) { 400 fixEdge<ObjectUse>(node->child1()); 401 fixEdge<ObjectOrOtherUse>(node->child2()); 402 node->clearFlags(NodeMustGenerate | NodeClobbersWorld); 403 break; 404 } 405 if (node->child1()->shouldSpeculateObjectOrOther() && node->child2()->shouldSpeculateObject()) { 406 fixEdge<ObjectOrOtherUse>(node->child1()); 407 fixEdge<ObjectUse>(node->child2()); 408 node->clearFlags(NodeMustGenerate | NodeClobbersWorld); 409 break; 410 } 411 break; 412 } 413 414 case CompareStrictEq: { 415 if (Node::shouldSpeculateBoolean(node->child1().node(), node->child2().node())) { 416 fixEdge<BooleanUse>(node->child1()); 417 fixEdge<BooleanUse>(node->child2()); 418 break; 419 } 420 if (Node::shouldSpeculateInt32(node->child1().node(), node->child2().node())) { 421 fixEdge<Int32Use>(node->child1()); 422 fixEdge<Int32Use>(node->child2()); 423 break; 424 } 425 if (enableInt52() 426 && Node::shouldSpeculateMachineInt(node->child1().node(), node->child2().node())) { 427 fixEdge<Int52RepUse>(node->child1()); 428 fixEdge<Int52RepUse>(node->child2()); 429 break; 430 } 431 if (Node::shouldSpeculateNumber(node->child1().node(), node->child2().node())) { 432 fixEdge<DoubleRepUse>(node->child1()); 433 fixEdge<DoubleRepUse>(node->child2()); 434 break; 435 } 436 if (node->child1()->shouldSpeculateStringIdent() && node->child2()->shouldSpeculateStringIdent()) { 437 fixEdge<StringIdentUse>(node->child1()); 438 fixEdge<StringIdentUse>(node->child2()); 439 break; 440 } 441 if (node->child1()->shouldSpeculateString() && node->child2()->shouldSpeculateString() && (GPRInfo::numberOfRegisters >= 7 || isFTL(m_graph.m_plan.mode))) { 442 fixEdge<StringUse>(node->child1()); 443 fixEdge<StringUse>(node->child2()); 444 break; 445 } 446 if (node->child1()->shouldSpeculateObject() && node->child2()->shouldSpeculateObject()) { 447 fixEdge<ObjectUse>(node->child1()); 448 fixEdge<ObjectUse>(node->child2()); 449 break; 450 } 451 if (node->child1()->shouldSpeculateMisc()) { 452 fixEdge<MiscUse>(node->child1()); 453 break; 454 } 455 if (node->child2()->shouldSpeculateMisc()) { 456 fixEdge<MiscUse>(node->child2()); 457 break; 458 } 459 if (node->child1()->shouldSpeculateStringIdent() 460 && node->child2()->shouldSpeculateNotStringVar()) { 461 fixEdge<StringIdentUse>(node->child1()); 462 fixEdge<NotStringVarUse>(node->child2()); 463 break; 464 } 465 if (node->child2()->shouldSpeculateStringIdent() 466 && node->child1()->shouldSpeculateNotStringVar()) { 467 fixEdge<StringIdentUse>(node->child2()); 468 fixEdge<NotStringVarUse>(node->child1()); 469 break; 470 } 471 if (node->child1()->shouldSpeculateString() && (GPRInfo::numberOfRegisters >= 8 || isFTL(m_graph.m_plan.mode))) { 472 fixEdge<StringUse>(node->child1()); 473 break; 474 } 475 if (node->child2()->shouldSpeculateString() && (GPRInfo::numberOfRegisters >= 8 || isFTL(m_graph.m_plan.mode))) { 476 fixEdge<StringUse>(node->child2()); 477 break; 478 } 479 break; 480 } 481 482 case StringFromCharCode: 483 fixEdge<Int32Use>(node->child1()); 484 break; 485 486 case StringCharAt: 487 case StringCharCodeAt: { 488 // Currently we have no good way of refining these. 489 ASSERT(node->arrayMode() == ArrayMode(Array::String)); 490 blessArrayOperation(node->child1(), node->child2(), node->child3()); 491 fixEdge<KnownCellUse>(node->child1()); 492 fixEdge<Int32Use>(node->child2()); 493 break; 494 } 495 496 case GetByVal: { 497 node->setArrayMode( 498 node->arrayMode().refine( 499 m_graph, node, 500 node->child1()->prediction(), 501 node->child2()->prediction(), 502 SpecNone, node->flags())); 503 504 blessArrayOperation(node->child1(), node->child2(), node->child3()); 505 506 ArrayMode arrayMode = node->arrayMode(); 507 switch (arrayMode.type()) { 508 case Array::Double: 509 if (arrayMode.arrayClass() == Array::OriginalArray 510 && arrayMode.speculation() == Array::InBounds 511 && m_graph.globalObjectFor(node->origin.semantic)->arrayPrototypeChainIsSane() 512 && !(node->flags() & NodeBytecodeUsesAsOther)) 513 node->setArrayMode(arrayMode.withSpeculation(Array::SaneChain)); 514 break; 515 516 case Array::String: 517 if ((node->prediction() & ~SpecString) 518 || m_graph.hasExitSite(node->origin.semantic, OutOfBounds)) 519 node->setArrayMode(arrayMode.withSpeculation(Array::OutOfBounds)); 520 break; 521 522 default: 523 break; 524 } 525 526 arrayMode = node->arrayMode(); 527 switch (arrayMode.type()) { 528 case Array::SelectUsingPredictions: 529 case Array::Unprofiled: 530 case Array::Undecided: 531 RELEASE_ASSERT_NOT_REACHED(); 532 break; 533 case Array::Generic: 534#if USE(JSVALUE32_64) 535 fixEdge<CellUse>(node->child1()); // Speculating cell due to register pressure on 32-bit. 536#endif 537 break; 538 case Array::ForceExit: 539 break; 540 default: 541 fixEdge<KnownCellUse>(node->child1()); 542 fixEdge<Int32Use>(node->child2()); 543 break; 544 } 545 546 switch (arrayMode.type()) { 547 case Array::Double: 548 if (!arrayMode.isOutOfBounds()) 549 node->setResult(NodeResultDouble); 550 break; 551 552 case Array::Float32Array: 553 case Array::Float64Array: 554 node->setResult(NodeResultDouble); 555 break; 556 557 case Array::Uint32Array: 558 if (node->shouldSpeculateInt32()) 559 break; 560 if (node->shouldSpeculateMachineInt() && enableInt52()) 561 node->setResult(NodeResultInt52); 562 else 563 node->setResult(NodeResultDouble); 564 break; 565 566 default: 567 break; 568 } 569 570 break; 571 } 572 573 case PutByValDirect: 574 case PutByVal: 575 case PutByValAlias: { 576 Edge& child1 = m_graph.varArgChild(node, 0); 577 Edge& child2 = m_graph.varArgChild(node, 1); 578 Edge& child3 = m_graph.varArgChild(node, 2); 579 580 node->setArrayMode( 581 node->arrayMode().refine( 582 m_graph, node, 583 child1->prediction(), 584 child2->prediction(), 585 child3->prediction())); 586 587 blessArrayOperation(child1, child2, m_graph.varArgChild(node, 3)); 588 589 switch (node->arrayMode().modeForPut().type()) { 590 case Array::SelectUsingPredictions: 591 case Array::Unprofiled: 592 case Array::Undecided: 593 RELEASE_ASSERT_NOT_REACHED(); 594 break; 595 case Array::ForceExit: 596 case Array::Generic: 597#if USE(JSVALUE32_64) 598 // Due to register pressure on 32-bit, we speculate cell and 599 // ignore the base-is-not-cell case entirely by letting the 600 // baseline JIT handle it. 601 fixEdge<CellUse>(child1); 602#endif 603 break; 604 case Array::Int32: 605 fixEdge<KnownCellUse>(child1); 606 fixEdge<Int32Use>(child2); 607 fixEdge<Int32Use>(child3); 608 break; 609 case Array::Double: 610 fixEdge<KnownCellUse>(child1); 611 fixEdge<Int32Use>(child2); 612 fixEdge<DoubleRepRealUse>(child3); 613 break; 614 case Array::Int8Array: 615 case Array::Int16Array: 616 case Array::Int32Array: 617 case Array::Uint8Array: 618 case Array::Uint8ClampedArray: 619 case Array::Uint16Array: 620 case Array::Uint32Array: 621 fixEdge<KnownCellUse>(child1); 622 fixEdge<Int32Use>(child2); 623 if (child3->shouldSpeculateInt32()) 624 fixIntOrBooleanEdge(child3); 625 else if (child3->shouldSpeculateMachineInt()) 626 fixEdge<Int52RepUse>(child3); 627 else 628 fixDoubleOrBooleanEdge(child3); 629 break; 630 case Array::Float32Array: 631 case Array::Float64Array: 632 fixEdge<KnownCellUse>(child1); 633 fixEdge<Int32Use>(child2); 634 fixDoubleOrBooleanEdge(child3); 635 break; 636 case Array::Contiguous: 637 case Array::ArrayStorage: 638 case Array::SlowPutArrayStorage: 639 case Array::Arguments: 640 fixEdge<KnownCellUse>(child1); 641 fixEdge<Int32Use>(child2); 642 insertStoreBarrier(m_indexInBlock, child1); 643 break; 644 default: 645 fixEdge<KnownCellUse>(child1); 646 fixEdge<Int32Use>(child2); 647 break; 648 } 649 break; 650 } 651 652 case ArrayPush: { 653 // May need to refine the array mode in case the value prediction contravenes 654 // the array prediction. For example, we may have evidence showing that the 655 // array is in Int32 mode, but the value we're storing is likely to be a double. 656 // Then we should turn this into a conversion to Double array followed by the 657 // push. On the other hand, we absolutely don't want to refine based on the 658 // base prediction. If it has non-cell garbage in it, then we want that to be 659 // ignored. That's because ArrayPush can't handle any array modes that aren't 660 // array-related - so if refine() turned this into a "Generic" ArrayPush then 661 // that would break things. 662 node->setArrayMode( 663 node->arrayMode().refine( 664 m_graph, node, 665 node->child1()->prediction() & SpecCell, 666 SpecInt32, 667 node->child2()->prediction())); 668 blessArrayOperation(node->child1(), Edge(), node->child3()); 669 fixEdge<KnownCellUse>(node->child1()); 670 671 switch (node->arrayMode().type()) { 672 case Array::Int32: 673 fixEdge<Int32Use>(node->child2()); 674 break; 675 case Array::Double: 676 fixEdge<DoubleRepRealUse>(node->child2()); 677 break; 678 case Array::Contiguous: 679 case Array::ArrayStorage: 680 insertStoreBarrier(m_indexInBlock, node->child1()); 681 break; 682 default: 683 break; 684 } 685 break; 686 } 687 688 case ArrayPop: { 689 blessArrayOperation(node->child1(), Edge(), node->child2()); 690 fixEdge<KnownCellUse>(node->child1()); 691 break; 692 } 693 694 case RegExpExec: 695 case RegExpTest: { 696 fixEdge<CellUse>(node->child1()); 697 fixEdge<CellUse>(node->child2()); 698 break; 699 } 700 701 case Branch: { 702 if (node->child1()->shouldSpeculateBoolean()) 703 fixEdge<BooleanUse>(node->child1()); 704 else if (node->child1()->shouldSpeculateObjectOrOther()) 705 fixEdge<ObjectOrOtherUse>(node->child1()); 706 // FIXME: We should just be able to do shouldSpeculateInt32OrBoolean() and 707 // shouldSpeculateNumberOrBoolean() here, but we can't because then the Branch 708 // could speculate on the result of a non-speculative conversion node. 709 // https://bugs.webkit.org/show_bug.cgi?id=126778 710 else if (node->child1()->shouldSpeculateInt32()) 711 fixEdge<Int32Use>(node->child1()); 712 else if (node->child1()->shouldSpeculateNumber()) 713 fixEdge<DoubleRepUse>(node->child1()); 714 715 Node* logicalNot = node->child1().node(); 716 if (logicalNot->op() == LogicalNot) { 717 718 // Make sure that OSR exit can't observe the LogicalNot. If it can, 719 // then we must compute it and cannot peephole around it. 720 bool found = false; 721 bool ok = true; 722 for (unsigned i = m_indexInBlock; i--;) { 723 Node* candidate = m_block->at(i); 724 if (candidate == logicalNot) { 725 found = true; 726 break; 727 } 728 if (candidate->canExit()) { 729 ok = false; 730 found = true; 731 break; 732 } 733 } 734 ASSERT_UNUSED(found, found); 735 736 if (ok) { 737 Edge newChildEdge = logicalNot->child1(); 738 if (newChildEdge->hasBooleanResult()) { 739 node->children.setChild1(newChildEdge); 740 741 BranchData* data = node->branchData(); 742 std::swap(data->taken, data->notTaken); 743 } 744 } 745 } 746 break; 747 } 748 749 case Switch: { 750 SwitchData* data = node->switchData(); 751 switch (data->kind) { 752 case SwitchImm: 753 if (node->child1()->shouldSpeculateInt32()) 754 fixEdge<Int32Use>(node->child1()); 755 break; 756 case SwitchChar: 757 if (node->child1()->shouldSpeculateString()) 758 fixEdge<StringUse>(node->child1()); 759 break; 760 case SwitchString: 761 if (node->child1()->shouldSpeculateStringIdent()) 762 fixEdge<StringIdentUse>(node->child1()); 763 else if (node->child1()->shouldSpeculateString()) 764 fixEdge<StringUse>(node->child1()); 765 break; 766 } 767 break; 768 } 769 770 case ToPrimitive: { 771 fixupToPrimitive(node); 772 break; 773 } 774 775 case ToString: { 776 fixupToString(node); 777 break; 778 } 779 780 case NewStringObject: { 781 fixEdge<KnownStringUse>(node->child1()); 782 break; 783 } 784 785 case NewArray: { 786 for (unsigned i = m_graph.varArgNumChildren(node); i--;) { 787 node->setIndexingType( 788 leastUpperBoundOfIndexingTypeAndType( 789 node->indexingType(), m_graph.varArgChild(node, i)->prediction())); 790 } 791 switch (node->indexingType()) { 792 case ALL_BLANK_INDEXING_TYPES: 793 CRASH(); 794 break; 795 case ALL_UNDECIDED_INDEXING_TYPES: 796 if (node->numChildren()) { 797 // This will only happen if the children have no type predictions. We 798 // would have already exited by now, but insert a forced exit just to 799 // be safe. 800 m_insertionSet.insertNode( 801 m_indexInBlock, SpecNone, ForceOSRExit, node->origin); 802 } 803 break; 804 case ALL_INT32_INDEXING_TYPES: 805 for (unsigned operandIndex = 0; operandIndex < node->numChildren(); ++operandIndex) 806 fixEdge<Int32Use>(m_graph.m_varArgChildren[node->firstChild() + operandIndex]); 807 break; 808 case ALL_DOUBLE_INDEXING_TYPES: 809 for (unsigned operandIndex = 0; operandIndex < node->numChildren(); ++operandIndex) 810 fixEdge<DoubleRepRealUse>(m_graph.m_varArgChildren[node->firstChild() + operandIndex]); 811 break; 812 case ALL_CONTIGUOUS_INDEXING_TYPES: 813 case ALL_ARRAY_STORAGE_INDEXING_TYPES: 814 break; 815 default: 816 CRASH(); 817 break; 818 } 819 break; 820 } 821 822 case NewTypedArray: { 823 if (node->child1()->shouldSpeculateInt32()) { 824 fixEdge<Int32Use>(node->child1()); 825 node->clearFlags(NodeMustGenerate | NodeClobbersWorld); 826 break; 827 } 828 break; 829 } 830 831 case NewArrayWithSize: { 832 fixEdge<Int32Use>(node->child1()); 833 break; 834 } 835 836 case ToThis: { 837 ECMAMode ecmaMode = m_graph.executableFor(node->origin.semantic)->isStrictMode() ? StrictMode : NotStrictMode; 838 839 if (node->child1()->shouldSpeculateOther()) { 840 if (ecmaMode == StrictMode) { 841 fixEdge<OtherUse>(node->child1()); 842 node->convertToIdentity(); 843 break; 844 } 845 846 m_insertionSet.insertNode( 847 m_indexInBlock, SpecNone, Phantom, node->origin, 848 Edge(node->child1().node(), OtherUse)); 849 observeUseKindOnNode<OtherUse>(node->child1().node()); 850 node->convertToWeakConstant(m_graph.globalThisObjectFor(node->origin.semantic)); 851 break; 852 } 853 854 if (isFinalObjectSpeculation(node->child1()->prediction())) { 855 fixEdge<FinalObjectUse>(node->child1()); 856 node->convertToIdentity(); 857 break; 858 } 859 860 break; 861 } 862 863 case GetMyArgumentByVal: 864 case GetMyArgumentByValSafe: { 865 fixEdge<Int32Use>(node->child1()); 866 break; 867 } 868 869 case PutStructure: { 870 fixEdge<KnownCellUse>(node->child1()); 871 insertStoreBarrier(m_indexInBlock, node->child1()); 872 break; 873 } 874 875 case PutClosureVar: { 876 fixEdge<KnownCellUse>(node->child1()); 877 insertStoreBarrier(m_indexInBlock, node->child1()); 878 break; 879 } 880 881 case GetClosureRegisters: 882 case SkipTopScope: 883 case SkipScope: 884 case GetScope: { 885 fixEdge<KnownCellUse>(node->child1()); 886 break; 887 } 888 889 case AllocatePropertyStorage: 890 case ReallocatePropertyStorage: { 891 fixEdge<KnownCellUse>(node->child1()); 892 insertStoreBarrier(m_indexInBlock + 1, node->child1()); 893 break; 894 } 895 896 case GetById: 897 case GetByIdFlush: { 898 if (!node->child1()->shouldSpeculateCell()) 899 break; 900 StringImpl* impl = m_graph.identifiers()[node->identifierNumber()]; 901 if (impl == vm().propertyNames->length.impl()) { 902 attemptToMakeGetArrayLength(node); 903 break; 904 } 905 if (impl == vm().propertyNames->byteLength.impl()) { 906 attemptToMakeGetTypedArrayByteLength(node); 907 break; 908 } 909 if (impl == vm().propertyNames->byteOffset.impl()) { 910 attemptToMakeGetTypedArrayByteOffset(node); 911 break; 912 } 913 fixEdge<CellUse>(node->child1()); 914 break; 915 } 916 917 case PutById: 918 case PutByIdFlush: 919 case PutByIdDirect: { 920 fixEdge<CellUse>(node->child1()); 921 insertStoreBarrier(m_indexInBlock, node->child1()); 922 break; 923 } 924 925 case CheckExecutable: 926 case CheckStructure: 927 case StructureTransitionWatchpoint: 928 case CheckFunction: 929 case CheckHasInstance: 930 case CreateThis: 931 case GetButterfly: { 932 fixEdge<CellUse>(node->child1()); 933 break; 934 } 935 936 case Arrayify: 937 case ArrayifyToStructure: { 938 fixEdge<CellUse>(node->child1()); 939 if (node->child2()) 940 fixEdge<Int32Use>(node->child2()); 941 break; 942 } 943 944 case GetByOffset: { 945 if (!node->child1()->hasStorageResult()) 946 fixEdge<KnownCellUse>(node->child1()); 947 fixEdge<KnownCellUse>(node->child2()); 948 break; 949 } 950 951 case MultiGetByOffset: { 952 fixEdge<CellUse>(node->child1()); 953 break; 954 } 955 956 case PutByOffset: { 957 if (!node->child1()->hasStorageResult()) 958 fixEdge<KnownCellUse>(node->child1()); 959 fixEdge<KnownCellUse>(node->child2()); 960 insertStoreBarrier(m_indexInBlock, node->child2()); 961 break; 962 } 963 964 case MultiPutByOffset: { 965 fixEdge<CellUse>(node->child1()); 966 insertStoreBarrier(m_indexInBlock, node->child1()); 967 break; 968 } 969 970 case InstanceOf: { 971 if (!(node->child1()->prediction() & ~SpecCell)) 972 fixEdge<CellUse>(node->child1()); 973 fixEdge<CellUse>(node->child2()); 974 break; 975 } 976 977 case In: { 978 // FIXME: We should at some point have array profiling on op_in, in which 979 // case we would be able to turn this into a kind of GetByVal. 980 981 fixEdge<CellUse>(node->child2()); 982 break; 983 } 984 985 case Phantom: 986 case Check: { 987 switch (node->child1().useKind()) { 988 case NumberUse: 989 if (node->child1()->shouldSpeculateInt32ForArithmetic()) 990 node->child1().setUseKind(Int32Use); 991 break; 992 default: 993 break; 994 } 995 observeUseKindOnEdge(node->child1()); 996 break; 997 } 998 999 case FiatInt52: { 1000 RELEASE_ASSERT(enableInt52()); 1001 node->convertToIdentity(); 1002 fixEdge<Int52RepUse>(node->child1()); 1003 node->setResult(NodeResultInt52); 1004 break; 1005 } 1006 1007 case GetArrayLength: 1008 case Phi: 1009 case Upsilon: 1010 case GetArgument: 1011 case PhantomPutStructure: 1012 case GetIndexedPropertyStorage: 1013 case GetTypedArrayByteOffset: 1014 case LastNodeType: 1015 case CheckTierUpInLoop: 1016 case CheckTierUpAtReturn: 1017 case CheckTierUpAndOSREnter: 1018 case InvalidationPoint: 1019 case CheckArray: 1020 case CheckInBounds: 1021 case ConstantStoragePointer: 1022 case DoubleAsInt32: 1023 case ValueToInt32: 1024 case HardPhantom: // HardPhantom would be trivial to handle but anyway we assert that we won't see it here yet. 1025 case DoubleRep: 1026 case ValueRep: 1027 case Int52Rep: 1028 case DoubleConstant: 1029 case Int52Constant: 1030 case Identity: // This should have been cleaned up. 1031 case BooleanToNumber: 1032 // These are just nodes that we don't currently expect to see during fixup. 1033 // If we ever wanted to insert them prior to fixup, then we just have to create 1034 // fixup rules for them. 1035 RELEASE_ASSERT_NOT_REACHED(); 1036 break; 1037 1038 case PutGlobalVar: { 1039 Node* globalObjectNode = m_insertionSet.insertNode( 1040 m_indexInBlock, SpecNone, WeakJSConstant, node->origin, 1041 OpInfo(m_graph.globalObjectFor(node->origin.semantic))); 1042 Node* barrierNode = m_graph.addNode( 1043 SpecNone, StoreBarrier, m_currentNode->origin, 1044 Edge(globalObjectNode, KnownCellUse)); 1045 m_insertionSet.insert(m_indexInBlock, barrierNode); 1046 break; 1047 } 1048 1049 case TearOffActivation: { 1050 Node* barrierNode = m_graph.addNode( 1051 SpecNone, StoreBarrierWithNullCheck, m_currentNode->origin, 1052 Edge(node->child1().node(), UntypedUse)); 1053 m_insertionSet.insert(m_indexInBlock, barrierNode); 1054 break; 1055 } 1056 1057 case IsString: 1058 if (node->child1()->shouldSpeculateString()) { 1059 m_insertionSet.insertNode( 1060 m_indexInBlock, SpecNone, Phantom, node->origin, 1061 Edge(node->child1().node(), StringUse)); 1062 m_graph.convertToConstant(node, jsBoolean(true)); 1063 observeUseKindOnNode<StringUse>(node); 1064 } 1065 break; 1066 1067#if !ASSERT_DISABLED 1068 // Have these no-op cases here to ensure that nobody forgets to add handlers for new opcodes. 1069 case SetArgument: 1070 case JSConstant: 1071 case WeakJSConstant: 1072 case GetLocal: 1073 case GetCallee: 1074 case Flush: 1075 case PhantomLocal: 1076 case GetLocalUnlinked: 1077 case GetMyScope: 1078 case GetClosureVar: 1079 case GetGlobalVar: 1080 case NotifyWrite: 1081 case VariableWatchpoint: 1082 case VarInjectionWatchpoint: 1083 case AllocationProfileWatchpoint: 1084 case Call: 1085 case Construct: 1086 case NewObject: 1087 case NewArrayBuffer: 1088 case NewRegexp: 1089 case Breakpoint: 1090 case ProfileWillCall: 1091 case ProfileDidCall: 1092 case IsUndefined: 1093 case IsBoolean: 1094 case IsNumber: 1095 case IsObject: 1096 case IsFunction: 1097 case CreateActivation: 1098 case CreateArguments: 1099 case PhantomArguments: 1100 case TearOffArguments: 1101 case GetMyArgumentsLength: 1102 case GetMyArgumentsLengthSafe: 1103 case CheckArgumentsNotCreated: 1104 case NewFunction: 1105 case NewFunctionNoCheck: 1106 case NewFunctionExpression: 1107 case Jump: 1108 case Return: 1109 case Throw: 1110 case ThrowReferenceError: 1111 case CountExecution: 1112 case ForceOSRExit: 1113 case CheckWatchdogTimer: 1114 case Unreachable: 1115 case ExtractOSREntryLocal: 1116 case LoopHint: 1117 case StoreBarrier: 1118 case StoreBarrierWithNullCheck: 1119 case FunctionReentryWatchpoint: 1120 case TypedArrayWatchpoint: 1121 case MovHint: 1122 case ZombieHint: 1123 break; 1124#else 1125 default: 1126 break; 1127#endif 1128 } 1129 } 1130 1131 template<UseKind useKind> 1132 void createToString(Node* node, Edge& edge) 1133 { 1134 edge.setNode(m_insertionSet.insertNode( 1135 m_indexInBlock, SpecString, ToString, node->origin, 1136 Edge(edge.node(), useKind))); 1137 } 1138 1139 template<UseKind useKind> 1140 void attemptToForceStringArrayModeByToStringConversion(ArrayMode& arrayMode, Node* node) 1141 { 1142 ASSERT(arrayMode == ArrayMode(Array::Generic)); 1143 1144 if (!canOptimizeStringObjectAccess(node->origin.semantic)) 1145 return; 1146 1147 createToString<useKind>(node, node->child1()); 1148 arrayMode = ArrayMode(Array::String); 1149 } 1150 1151 template<UseKind useKind> 1152 bool isStringObjectUse() 1153 { 1154 switch (useKind) { 1155 case StringObjectUse: 1156 case StringOrStringObjectUse: 1157 return true; 1158 default: 1159 return false; 1160 } 1161 } 1162 1163 template<UseKind useKind> 1164 void convertStringAddUse(Node* node, Edge& edge) 1165 { 1166 if (useKind == StringUse) { 1167 // This preserves the binaryUseKind() invariant ot ValueAdd: ValueAdd's 1168 // two edges will always have identical use kinds, which makes the 1169 // decision process much easier. 1170 observeUseKindOnNode<StringUse>(edge.node()); 1171 m_insertionSet.insertNode( 1172 m_indexInBlock, SpecNone, Phantom, node->origin, 1173 Edge(edge.node(), StringUse)); 1174 edge.setUseKind(KnownStringUse); 1175 return; 1176 } 1177 1178 // FIXME: We ought to be able to have a ToPrimitiveToString node. 1179 1180 observeUseKindOnNode<useKind>(edge.node()); 1181 createToString<useKind>(node, edge); 1182 } 1183 1184 void convertToMakeRope(Node* node) 1185 { 1186 node->setOpAndDefaultFlags(MakeRope); 1187 fixupMakeRope(node); 1188 } 1189 1190 void fixupMakeRope(Node* node) 1191 { 1192 for (unsigned i = 0; i < AdjacencyList::Size; ++i) { 1193 Edge& edge = node->children.child(i); 1194 if (!edge) 1195 break; 1196 edge.setUseKind(KnownStringUse); 1197 if (!m_graph.isConstant(edge.node())) 1198 continue; 1199 JSString* string = jsCast<JSString*>(m_graph.valueOfJSConstant(edge.node()).asCell()); 1200 if (string->length()) 1201 continue; 1202 1203 // Don't allow the MakeRope to have zero children. 1204 if (!i && !node->child2()) 1205 break; 1206 1207 node->children.removeEdge(i--); 1208 } 1209 1210 if (!node->child2()) { 1211 ASSERT(!node->child3()); 1212 node->convertToIdentity(); 1213 } 1214 } 1215 1216 void fixupToPrimitive(Node* node) 1217 { 1218 if (node->child1()->shouldSpeculateInt32()) { 1219 fixEdge<Int32Use>(node->child1()); 1220 node->convertToIdentity(); 1221 return; 1222 } 1223 1224 if (node->child1()->shouldSpeculateString()) { 1225 fixEdge<StringUse>(node->child1()); 1226 node->convertToIdentity(); 1227 return; 1228 } 1229 1230 if (node->child1()->shouldSpeculateStringObject() 1231 && canOptimizeStringObjectAccess(node->origin.semantic)) { 1232 fixEdge<StringObjectUse>(node->child1()); 1233 node->convertToToString(); 1234 return; 1235 } 1236 1237 if (node->child1()->shouldSpeculateStringOrStringObject() 1238 && canOptimizeStringObjectAccess(node->origin.semantic)) { 1239 fixEdge<StringOrStringObjectUse>(node->child1()); 1240 node->convertToToString(); 1241 return; 1242 } 1243 } 1244 1245 void fixupToString(Node* node) 1246 { 1247 if (node->child1()->shouldSpeculateString()) { 1248 fixEdge<StringUse>(node->child1()); 1249 node->convertToIdentity(); 1250 return; 1251 } 1252 1253 if (node->child1()->shouldSpeculateStringObject() 1254 && canOptimizeStringObjectAccess(node->origin.semantic)) { 1255 fixEdge<StringObjectUse>(node->child1()); 1256 return; 1257 } 1258 1259 if (node->child1()->shouldSpeculateStringOrStringObject() 1260 && canOptimizeStringObjectAccess(node->origin.semantic)) { 1261 fixEdge<StringOrStringObjectUse>(node->child1()); 1262 return; 1263 } 1264 1265 if (node->child1()->shouldSpeculateCell()) { 1266 fixEdge<CellUse>(node->child1()); 1267 return; 1268 } 1269 } 1270 1271 template<UseKind leftUseKind> 1272 bool attemptToMakeFastStringAdd(Node* node, Edge& left, Edge& right) 1273 { 1274 Node* originalLeft = left.node(); 1275 Node* originalRight = right.node(); 1276 1277 ASSERT(leftUseKind == StringUse || leftUseKind == StringObjectUse || leftUseKind == StringOrStringObjectUse); 1278 1279 if (isStringObjectUse<leftUseKind>() && !canOptimizeStringObjectAccess(node->origin.semantic)) 1280 return false; 1281 1282 convertStringAddUse<leftUseKind>(node, left); 1283 1284 if (right->shouldSpeculateString()) 1285 convertStringAddUse<StringUse>(node, right); 1286 else if (right->shouldSpeculateStringObject() && canOptimizeStringObjectAccess(node->origin.semantic)) 1287 convertStringAddUse<StringObjectUse>(node, right); 1288 else if (right->shouldSpeculateStringOrStringObject() && canOptimizeStringObjectAccess(node->origin.semantic)) 1289 convertStringAddUse<StringOrStringObjectUse>(node, right); 1290 else { 1291 // At this point we know that the other operand is something weird. The semantically correct 1292 // way of dealing with this is: 1293 // 1294 // MakeRope(@left, ToString(ToPrimitive(@right))) 1295 // 1296 // So that's what we emit. NB, we need to do all relevant type checks on @left before we do 1297 // anything to @right, since ToPrimitive may be effectful. 1298 1299 Node* toPrimitive = m_insertionSet.insertNode( 1300 m_indexInBlock, resultOfToPrimitive(right->prediction()), ToPrimitive, 1301 node->origin, Edge(right.node())); 1302 Node* toString = m_insertionSet.insertNode( 1303 m_indexInBlock, SpecString, ToString, node->origin, Edge(toPrimitive)); 1304 1305 fixupToPrimitive(toPrimitive); 1306 fixupToString(toString); 1307 1308 right.setNode(toString); 1309 } 1310 1311 // We're doing checks up there, so we need to make sure that the 1312 // *original* inputs to the addition are live up to here. 1313 m_insertionSet.insertNode( 1314 m_indexInBlock, SpecNone, Phantom, node->origin, 1315 Edge(originalLeft), Edge(originalRight)); 1316 1317 convertToMakeRope(node); 1318 return true; 1319 } 1320 1321 bool isStringPrototypeMethodSane(Structure* stringPrototypeStructure, StringImpl* uid) 1322 { 1323 unsigned attributesUnused; 1324 JSCell* specificValue; 1325 PropertyOffset offset = stringPrototypeStructure->getConcurrently( 1326 vm(), uid, attributesUnused, specificValue); 1327 if (!isValidOffset(offset)) 1328 return false; 1329 1330 if (!specificValue) 1331 return false; 1332 1333 if (!specificValue->inherits(JSFunction::info())) 1334 return false; 1335 1336 JSFunction* function = jsCast<JSFunction*>(specificValue); 1337 if (function->executable()->intrinsicFor(CodeForCall) != StringPrototypeValueOfIntrinsic) 1338 return false; 1339 1340 return true; 1341 } 1342 1343 bool canOptimizeStringObjectAccess(const CodeOrigin& codeOrigin) 1344 { 1345 if (m_graph.hasExitSite(codeOrigin, NotStringObject)) 1346 return false; 1347 1348 Structure* stringObjectStructure = m_graph.globalObjectFor(codeOrigin)->stringObjectStructure(); 1349 ASSERT(stringObjectStructure->storedPrototype().isObject()); 1350 ASSERT(stringObjectStructure->storedPrototype().asCell()->classInfo() == StringPrototype::info()); 1351 1352 JSObject* stringPrototypeObject = asObject(stringObjectStructure->storedPrototype()); 1353 Structure* stringPrototypeStructure = stringPrototypeObject->structure(); 1354 if (!m_graph.watchpoints().isStillValid(stringPrototypeStructure->transitionWatchpointSet())) 1355 return false; 1356 1357 if (stringPrototypeStructure->isDictionary()) 1358 return false; 1359 1360 // We're being conservative here. We want DFG's ToString on StringObject to be 1361 // used in both numeric contexts (that would call valueOf()) and string contexts 1362 // (that would call toString()). We don't want the DFG to have to distinguish 1363 // between the two, just because that seems like it would get confusing. So we 1364 // just require both methods to be sane. 1365 if (!isStringPrototypeMethodSane(stringPrototypeStructure, vm().propertyNames->valueOf.impl())) 1366 return false; 1367 if (!isStringPrototypeMethodSane(stringPrototypeStructure, vm().propertyNames->toString.impl())) 1368 return false; 1369 1370 return true; 1371 } 1372 1373 void fixupGetAndSetLocalsInBlock(BasicBlock* block) 1374 { 1375 if (!block) 1376 return; 1377 ASSERT(block->isReachable); 1378 m_block = block; 1379 for (m_indexInBlock = 0; m_indexInBlock < block->size(); ++m_indexInBlock) { 1380 Node* node = m_currentNode = block->at(m_indexInBlock); 1381 if (node->op() != SetLocal && node->op() != GetLocal) 1382 continue; 1383 1384 VariableAccessData* variable = node->variableAccessData(); 1385 switch (node->op()) { 1386 case GetLocal: 1387 switch (variable->flushFormat()) { 1388 case FlushedDouble: 1389 node->setResult(NodeResultDouble); 1390 break; 1391 case FlushedInt52: 1392 node->setResult(NodeResultInt52); 1393 break; 1394 default: 1395 break; 1396 } 1397 break; 1398 1399 case SetLocal: 1400 switch (variable->flushFormat()) { 1401 case FlushedJSValue: 1402 break; 1403 case FlushedDouble: 1404 fixEdge<DoubleRepUse>(node->child1()); 1405 break; 1406 case FlushedInt32: 1407 fixEdge<Int32Use>(node->child1()); 1408 break; 1409 case FlushedInt52: 1410 fixEdge<Int52RepUse>(node->child1()); 1411 break; 1412 case FlushedCell: 1413 fixEdge<CellUse>(node->child1()); 1414 break; 1415 case FlushedBoolean: 1416 fixEdge<BooleanUse>(node->child1()); 1417 break; 1418 default: 1419 RELEASE_ASSERT_NOT_REACHED(); 1420 break; 1421 } 1422 break; 1423 1424 default: 1425 RELEASE_ASSERT_NOT_REACHED(); 1426 break; 1427 } 1428 } 1429 m_insertionSet.execute(block); 1430 } 1431 1432 Node* checkArray(ArrayMode arrayMode, const NodeOrigin& origin, Node* array, Node* index, bool (*storageCheck)(const ArrayMode&) = canCSEStorage) 1433 { 1434 ASSERT(arrayMode.isSpecific()); 1435 1436 if (arrayMode.type() == Array::String) { 1437 m_insertionSet.insertNode( 1438 m_indexInBlock, SpecNone, Phantom, origin, Edge(array, StringUse)); 1439 } else { 1440 Structure* structure = arrayMode.originalArrayStructure(m_graph, origin.semantic); 1441 1442 Edge indexEdge = index ? Edge(index, Int32Use) : Edge(); 1443 1444 if (arrayMode.doesConversion()) { 1445 if (structure) { 1446 m_insertionSet.insertNode( 1447 m_indexInBlock, SpecNone, ArrayifyToStructure, origin, 1448 OpInfo(structure), OpInfo(arrayMode.asWord()), Edge(array, CellUse), indexEdge); 1449 } else { 1450 m_insertionSet.insertNode( 1451 m_indexInBlock, SpecNone, Arrayify, origin, 1452 OpInfo(arrayMode.asWord()), Edge(array, CellUse), indexEdge); 1453 } 1454 } else { 1455 if (structure) { 1456 m_insertionSet.insertNode( 1457 m_indexInBlock, SpecNone, CheckStructure, origin, 1458 OpInfo(m_graph.addStructureSet(structure)), Edge(array, CellUse)); 1459 } else { 1460 m_insertionSet.insertNode( 1461 m_indexInBlock, SpecNone, CheckArray, origin, 1462 OpInfo(arrayMode.asWord()), Edge(array, CellUse)); 1463 } 1464 } 1465 } 1466 1467 if (!storageCheck(arrayMode)) 1468 return 0; 1469 1470 if (arrayMode.usesButterfly()) { 1471 return m_insertionSet.insertNode( 1472 m_indexInBlock, SpecNone, GetButterfly, origin, Edge(array, CellUse)); 1473 } 1474 1475 return m_insertionSet.insertNode( 1476 m_indexInBlock, SpecNone, GetIndexedPropertyStorage, origin, 1477 OpInfo(arrayMode.asWord()), Edge(array, KnownCellUse)); 1478 } 1479 1480 void blessArrayOperation(Edge base, Edge index, Edge& storageChild) 1481 { 1482 Node* node = m_currentNode; 1483 1484 switch (node->arrayMode().type()) { 1485 case Array::ForceExit: { 1486 m_insertionSet.insertNode( 1487 m_indexInBlock, SpecNone, ForceOSRExit, node->origin); 1488 return; 1489 } 1490 1491 case Array::SelectUsingPredictions: 1492 case Array::Unprofiled: 1493 RELEASE_ASSERT_NOT_REACHED(); 1494 return; 1495 1496 case Array::Generic: 1497 return; 1498 1499 default: { 1500 Node* storage = checkArray(node->arrayMode(), node->origin, base.node(), index.node()); 1501 if (!storage) 1502 return; 1503 1504 storageChild = Edge(storage); 1505 return; 1506 } } 1507 } 1508 1509 bool alwaysUnboxSimplePrimitives() 1510 { 1511#if USE(JSVALUE64) 1512 return false; 1513#else 1514 // Any boolean, int, or cell value is profitable to unbox on 32-bit because it 1515 // reduces traffic. 1516 return true; 1517#endif 1518 } 1519 1520 template<UseKind useKind> 1521 void observeUseKindOnNode(Node* node) 1522 { 1523 if (useKind == UntypedUse) 1524 return; 1525 observeUseKindOnNode(node, useKind); 1526 } 1527 1528 void observeUseKindOnEdge(Edge edge) 1529 { 1530 observeUseKindOnNode(edge.node(), edge.useKind()); 1531 } 1532 1533 void observeUseKindOnNode(Node* node, UseKind useKind) 1534 { 1535 if (node->op() != GetLocal) 1536 return; 1537 1538 // FIXME: The way this uses alwaysUnboxSimplePrimitives() is suspicious. 1539 // https://bugs.webkit.org/show_bug.cgi?id=121518 1540 1541 VariableAccessData* variable = node->variableAccessData(); 1542 switch (useKind) { 1543 case Int32Use: 1544 if (alwaysUnboxSimplePrimitives() 1545 || isInt32Speculation(variable->prediction())) 1546 m_profitabilityChanged |= variable->mergeIsProfitableToUnbox(true); 1547 break; 1548 case NumberUse: 1549 case DoubleRepUse: 1550 case DoubleRepRealUse: 1551 if (variable->doubleFormatState() == UsingDoubleFormat) 1552 m_profitabilityChanged |= variable->mergeIsProfitableToUnbox(true); 1553 break; 1554 case BooleanUse: 1555 if (alwaysUnboxSimplePrimitives() 1556 || isBooleanSpeculation(variable->prediction())) 1557 m_profitabilityChanged |= variable->mergeIsProfitableToUnbox(true); 1558 break; 1559 case Int52RepUse: 1560 if (isMachineIntSpeculation(variable->prediction())) 1561 m_profitabilityChanged |= variable->mergeIsProfitableToUnbox(true); 1562 break; 1563 case CellUse: 1564 case KnownCellUse: 1565 case ObjectUse: 1566 case StringUse: 1567 case KnownStringUse: 1568 case StringObjectUse: 1569 case StringOrStringObjectUse: 1570 if (alwaysUnboxSimplePrimitives() 1571 || isCellSpeculation(variable->prediction())) 1572 m_profitabilityChanged |= variable->mergeIsProfitableToUnbox(true); 1573 break; 1574 default: 1575 break; 1576 } 1577 } 1578 1579 template<UseKind useKind> 1580 void fixEdge(Edge& edge) 1581 { 1582 observeUseKindOnNode<useKind>(edge.node()); 1583 edge.setUseKind(useKind); 1584 } 1585 1586 void insertStoreBarrier(unsigned indexInBlock, Edge child1) 1587 { 1588 Node* barrierNode = m_graph.addNode(SpecNone, StoreBarrier, m_currentNode->origin, child1); 1589 m_insertionSet.insert(indexInBlock, barrierNode); 1590 } 1591 1592 void fixIntConvertingEdge(Edge& edge) 1593 { 1594 Node* node = edge.node(); 1595 if (node->shouldSpeculateInt32OrBoolean()) { 1596 fixIntOrBooleanEdge(edge); 1597 return; 1598 } 1599 1600 UseKind useKind; 1601 if (node->shouldSpeculateMachineInt()) 1602 useKind = Int52RepUse; 1603 else if (node->shouldSpeculateNumber()) 1604 useKind = DoubleRepUse; 1605 else 1606 useKind = NotCellUse; 1607 Node* newNode = m_insertionSet.insertNode( 1608 m_indexInBlock, SpecInt32, ValueToInt32, m_currentNode->origin, 1609 Edge(node, useKind)); 1610 observeUseKindOnNode(node, useKind); 1611 1612 edge = Edge(newNode, KnownInt32Use); 1613 addRequiredPhantom(node); 1614 } 1615 1616 void fixIntOrBooleanEdge(Edge& edge) 1617 { 1618 Node* node = edge.node(); 1619 if (!node->sawBooleans()) { 1620 fixEdge<Int32Use>(edge); 1621 return; 1622 } 1623 1624 UseKind useKind; 1625 if (node->shouldSpeculateBoolean()) 1626 useKind = BooleanUse; 1627 else 1628 useKind = UntypedUse; 1629 Node* newNode = m_insertionSet.insertNode( 1630 m_indexInBlock, SpecInt32, BooleanToNumber, m_currentNode->origin, 1631 Edge(node, useKind)); 1632 observeUseKindOnNode(node, useKind); 1633 1634 edge = Edge(newNode, Int32Use); 1635 addRequiredPhantom(node); 1636 } 1637 1638 void fixDoubleOrBooleanEdge(Edge& edge) 1639 { 1640 Node* node = edge.node(); 1641 if (!node->sawBooleans()) { 1642 fixEdge<DoubleRepUse>(edge); 1643 return; 1644 } 1645 1646 UseKind useKind; 1647 if (node->shouldSpeculateBoolean()) 1648 useKind = BooleanUse; 1649 else 1650 useKind = UntypedUse; 1651 Node* newNode = m_insertionSet.insertNode( 1652 m_indexInBlock, SpecInt32, BooleanToNumber, m_currentNode->origin, 1653 Edge(node, useKind)); 1654 observeUseKindOnNode(node, useKind); 1655 1656 edge = Edge(newNode, DoubleRepUse); 1657 addRequiredPhantom(node); 1658 } 1659 1660 void truncateConstantToInt32(Edge& edge) 1661 { 1662 Node* oldNode = edge.node(); 1663 1664 ASSERT(oldNode->hasConstant()); 1665 JSValue value = m_graph.valueOfJSConstant(oldNode); 1666 if (value.isInt32()) 1667 return; 1668 1669 value = jsNumber(JSC::toInt32(value.asNumber())); 1670 ASSERT(value.isInt32()); 1671 unsigned constantRegister; 1672 if (!codeBlock()->findConstant(value, constantRegister)) { 1673 constantRegister = codeBlock()->addConstantLazily(); 1674 initializeLazyWriteBarrierForConstant( 1675 m_graph.m_plan.writeBarriers, 1676 codeBlock()->constants()[constantRegister], 1677 codeBlock(), 1678 constantRegister, 1679 codeBlock()->ownerExecutable(), 1680 value); 1681 } 1682 edge.setNode(m_insertionSet.insertNode( 1683 m_indexInBlock, SpecInt32, JSConstant, m_currentNode->origin, 1684 OpInfo(constantRegister))); 1685 } 1686 1687 void truncateConstantsIfNecessary(Node* node, AddSpeculationMode mode) 1688 { 1689 if (mode != SpeculateInt32AndTruncateConstants) 1690 return; 1691 1692 ASSERT(node->child1()->hasConstant() || node->child2()->hasConstant()); 1693 if (node->child1()->hasConstant()) 1694 truncateConstantToInt32(node->child1()); 1695 else 1696 truncateConstantToInt32(node->child2()); 1697 } 1698 1699 bool attemptToMakeIntegerAdd(Node* node) 1700 { 1701 AddSpeculationMode mode = m_graph.addSpeculationMode(node, FixupPass); 1702 if (mode != DontSpeculateInt32) { 1703 truncateConstantsIfNecessary(node, mode); 1704 fixIntOrBooleanEdge(node->child1()); 1705 fixIntOrBooleanEdge(node->child2()); 1706 if (bytecodeCanTruncateInteger(node->arithNodeFlags())) 1707 node->setArithMode(Arith::Unchecked); 1708 else 1709 node->setArithMode(Arith::CheckOverflow); 1710 return true; 1711 } 1712 1713 if (m_graph.addShouldSpeculateMachineInt(node)) { 1714 fixEdge<Int52RepUse>(node->child1()); 1715 fixEdge<Int52RepUse>(node->child2()); 1716 node->setArithMode(Arith::CheckOverflow); 1717 node->setResult(NodeResultInt52); 1718 return true; 1719 } 1720 1721 return false; 1722 } 1723 1724 bool attemptToMakeGetArrayLength(Node* node) 1725 { 1726 if (!isInt32Speculation(node->prediction())) 1727 return false; 1728 CodeBlock* profiledBlock = m_graph.baselineCodeBlockFor(node->origin.semantic); 1729 ArrayProfile* arrayProfile = 1730 profiledBlock->getArrayProfile(node->origin.semantic.bytecodeIndex); 1731 ArrayMode arrayMode = ArrayMode(Array::SelectUsingPredictions); 1732 if (arrayProfile) { 1733 ConcurrentJITLocker locker(profiledBlock->m_lock); 1734 arrayProfile->computeUpdatedPrediction(locker, profiledBlock); 1735 arrayMode = ArrayMode::fromObserved(locker, arrayProfile, Array::Read, false); 1736 if (arrayMode.type() == Array::Unprofiled) { 1737 // For normal array operations, it makes sense to treat Unprofiled 1738 // accesses as ForceExit and get more data rather than using 1739 // predictions and then possibly ending up with a Generic. But here, 1740 // we treat anything that is Unprofiled as Generic and keep the 1741 // GetById. I.e. ForceExit = Generic. So, there is no harm - and only 1742 // profit - from treating the Unprofiled case as 1743 // SelectUsingPredictions. 1744 arrayMode = ArrayMode(Array::SelectUsingPredictions); 1745 } 1746 } 1747 1748 arrayMode = arrayMode.refine( 1749 m_graph, node, node->child1()->prediction(), node->prediction()); 1750 1751 if (arrayMode.type() == Array::Generic) { 1752 // Check if the input is something that we can't get array length for, but for which we 1753 // could insert some conversions in order to transform it into something that we can do it 1754 // for. 1755 if (node->child1()->shouldSpeculateStringObject()) 1756 attemptToForceStringArrayModeByToStringConversion<StringObjectUse>(arrayMode, node); 1757 else if (node->child1()->shouldSpeculateStringOrStringObject()) 1758 attemptToForceStringArrayModeByToStringConversion<StringOrStringObjectUse>(arrayMode, node); 1759 } 1760 1761 if (!arrayMode.supportsLength()) 1762 return false; 1763 1764 convertToGetArrayLength(node, arrayMode); 1765 return true; 1766 } 1767 1768 bool attemptToMakeGetTypedArrayByteLength(Node* node) 1769 { 1770 if (!isInt32Speculation(node->prediction())) 1771 return false; 1772 1773 TypedArrayType type = typedArrayTypeFromSpeculation(node->child1()->prediction()); 1774 if (!isTypedView(type)) 1775 return false; 1776 1777 if (elementSize(type) == 1) { 1778 convertToGetArrayLength(node, ArrayMode(toArrayType(type))); 1779 return true; 1780 } 1781 1782 Node* length = prependGetArrayLength( 1783 node->origin, node->child1().node(), ArrayMode(toArrayType(type))); 1784 1785 Node* shiftAmount = m_insertionSet.insertNode( 1786 m_indexInBlock, SpecInt32, JSConstant, node->origin, 1787 OpInfo(m_graph.constantRegisterForConstant(jsNumber(logElementSize(type))))); 1788 1789 // We can use a BitLShift here because typed arrays will never have a byteLength 1790 // that overflows int32. 1791 node->setOp(BitLShift); 1792 node->clearFlags(NodeMustGenerate | NodeClobbersWorld); 1793 observeUseKindOnNode(length, Int32Use); 1794 observeUseKindOnNode(shiftAmount, Int32Use); 1795 node->child1() = Edge(length, Int32Use); 1796 node->child2() = Edge(shiftAmount, Int32Use); 1797 return true; 1798 } 1799 1800 void convertToGetArrayLength(Node* node, ArrayMode arrayMode) 1801 { 1802 node->setOp(GetArrayLength); 1803 node->clearFlags(NodeMustGenerate | NodeClobbersWorld); 1804 fixEdge<KnownCellUse>(node->child1()); 1805 node->setArrayMode(arrayMode); 1806 1807 Node* storage = checkArray(arrayMode, node->origin, node->child1().node(), 0, lengthNeedsStorage); 1808 if (!storage) 1809 return; 1810 1811 node->child2() = Edge(storage); 1812 } 1813 1814 Node* prependGetArrayLength(NodeOrigin origin, Node* child, ArrayMode arrayMode) 1815 { 1816 Node* storage = checkArray(arrayMode, origin, child, 0, lengthNeedsStorage); 1817 return m_insertionSet.insertNode( 1818 m_indexInBlock, SpecInt32, GetArrayLength, origin, 1819 OpInfo(arrayMode.asWord()), Edge(child, KnownCellUse), Edge(storage)); 1820 } 1821 1822 bool attemptToMakeGetTypedArrayByteOffset(Node* node) 1823 { 1824 if (!isInt32Speculation(node->prediction())) 1825 return false; 1826 1827 TypedArrayType type = typedArrayTypeFromSpeculation(node->child1()->prediction()); 1828 if (!isTypedView(type)) 1829 return false; 1830 1831 checkArray( 1832 ArrayMode(toArrayType(type)), node->origin, node->child1().node(), 1833 0, neverNeedsStorage); 1834 1835 node->setOp(GetTypedArrayByteOffset); 1836 node->clearFlags(NodeMustGenerate | NodeClobbersWorld); 1837 fixEdge<KnownCellUse>(node->child1()); 1838 return true; 1839 } 1840 1841 void injectTypeConversionsInBlock(BasicBlock* block) 1842 { 1843 if (!block) 1844 return; 1845 ASSERT(block->isReachable); 1846 m_block = block; 1847 for (m_indexInBlock = 0; m_indexInBlock < block->size(); ++m_indexInBlock) { 1848 m_currentNode = block->at(m_indexInBlock); 1849 addPhantomsIfNecessary(); 1850 tryToRelaxRepresentation(m_currentNode); 1851 DFG_NODE_DO_TO_CHILDREN(m_graph, m_currentNode, injectTypeConversionsForEdge); 1852 } 1853 clearPhantomsAtEnd(); 1854 m_insertionSet.execute(block); 1855 } 1856 1857 void tryToRelaxRepresentation(Node* node) 1858 { 1859 // Some operations may be able to operate more efficiently over looser representations. 1860 // Identify those here. This avoids inserting a redundant representation conversion. 1861 // Also, for some operations, like MovHint, this is a necessary optimization: inserting 1862 // an otherwise-dead conversion just for a MovHint would break OSR's understanding of 1863 // the IR. 1864 1865 switch (node->op()) { 1866 case MovHint: 1867 case Phantom: 1868 case Check: 1869 case HardPhantom: 1870 DFG_NODE_DO_TO_CHILDREN(m_graph, m_currentNode, fixEdgeRepresentation); 1871 break; 1872 1873 case ValueToInt32: 1874 if (node->child1().useKind() == DoubleRepUse 1875 && !node->child1()->hasDoubleResult()) { 1876 node->child1().setUseKind(NumberUse); 1877 break; 1878 } 1879 break; 1880 1881 default: 1882 break; 1883 } 1884 } 1885 1886 void fixEdgeRepresentation(Node*, Edge& edge) 1887 { 1888 switch (edge.useKind()) { 1889 case DoubleRepUse: 1890 case DoubleRepRealUse: 1891 if (edge->hasDoubleResult()) 1892 break; 1893 1894 if (edge->hasInt52Result()) 1895 edge.setUseKind(Int52RepUse); 1896 else if (edge.useKind() == DoubleRepUse) 1897 edge.setUseKind(NumberUse); 1898 break; 1899 1900 case Int52RepUse: 1901 // Nothing we can really do. 1902 break; 1903 1904 case UntypedUse: 1905 case NumberUse: 1906 if (edge->hasDoubleResult()) 1907 edge.setUseKind(DoubleRepUse); 1908 else if (edge->hasInt52Result()) 1909 edge.setUseKind(Int52RepUse); 1910 break; 1911 1912 default: 1913 break; 1914 } 1915 } 1916 1917 void injectTypeConversionsForEdge(Node* node, Edge& edge) 1918 { 1919 ASSERT(node == m_currentNode); 1920 Node* result = nullptr; 1921 1922 switch (edge.useKind()) { 1923 case DoubleRepUse: 1924 case DoubleRepRealUse: 1925 case DoubleRepMachineIntUse: { 1926 if (edge->hasDoubleResult()) 1927 break; 1928 1929 addRequiredPhantom(edge.node()); 1930 1931 if (edge->op() == JSConstant && m_graph.isNumberConstant(edge.node())) { 1932 result = m_insertionSet.insertNode( 1933 m_indexInBlock, SpecBytecodeDouble, DoubleConstant, node->origin, 1934 OpInfo(m_graph.constantRegisterForConstant( 1935 jsDoubleNumber(m_graph.valueOfNumberConstant(edge.node()))))); 1936 } else if (edge->hasInt52Result()) { 1937 result = m_insertionSet.insertNode( 1938 m_indexInBlock, SpecInt52AsDouble, DoubleRep, node->origin, 1939 Edge(edge.node(), Int52RepUse)); 1940 } else { 1941 result = m_insertionSet.insertNode( 1942 m_indexInBlock, SpecBytecodeDouble, DoubleRep, node->origin, 1943 Edge(edge.node(), NumberUse)); 1944 } 1945 1946 edge.setNode(result); 1947 break; 1948 } 1949 1950 case Int52RepUse: { 1951 if (edge->hasInt52Result()) 1952 break; 1953 1954 addRequiredPhantom(edge.node()); 1955 1956 if (edge->op() == JSConstant && m_graph.isMachineIntConstant(edge.node())) { 1957 result = m_insertionSet.insertNode( 1958 m_indexInBlock, SpecMachineInt, Int52Constant, node->origin, 1959 OpInfo(edge->constantNumber())); 1960 } else if (edge->hasDoubleResult()) { 1961 result = m_insertionSet.insertNode( 1962 m_indexInBlock, SpecMachineInt, Int52Rep, node->origin, 1963 Edge(edge.node(), DoubleRepMachineIntUse)); 1964 } else if (edge->shouldSpeculateInt32ForArithmetic()) { 1965 result = m_insertionSet.insertNode( 1966 m_indexInBlock, SpecInt32, Int52Rep, node->origin, 1967 Edge(edge.node(), Int32Use)); 1968 } else { 1969 result = m_insertionSet.insertNode( 1970 m_indexInBlock, SpecMachineInt, Int52Rep, node->origin, 1971 Edge(edge.node(), MachineIntUse)); 1972 } 1973 1974 edge.setNode(result); 1975 break; 1976 } 1977 1978 default: { 1979 if (!edge->hasDoubleResult() && !edge->hasInt52Result()) 1980 break; 1981 1982 addRequiredPhantom(edge.node()); 1983 1984 if (edge->hasDoubleResult()) { 1985 result = m_insertionSet.insertNode( 1986 m_indexInBlock, SpecBytecodeDouble, ValueRep, node->origin, 1987 Edge(edge.node(), DoubleRepUse)); 1988 } else { 1989 result = m_insertionSet.insertNode( 1990 m_indexInBlock, SpecInt32 | SpecInt52AsDouble, ValueRep, node->origin, 1991 Edge(edge.node(), Int52RepUse)); 1992 } 1993 1994 edge.setNode(result); 1995 break; 1996 } } 1997 } 1998 1999 void addRequiredPhantom(Node* node) 2000 { 2001 m_requiredPhantoms.append(node); 2002 } 2003 2004 void addPhantomsIfNecessary() 2005 { 2006 if (m_requiredPhantoms.isEmpty()) 2007 return; 2008 2009 for (unsigned i = m_requiredPhantoms.size(); i--;) { 2010 Node* node = m_requiredPhantoms[i]; 2011 m_insertionSet.insertNode( 2012 m_indexInBlock, SpecNone, Phantom, m_currentNode->origin, 2013 node->defaultEdge()); 2014 } 2015 2016 m_requiredPhantoms.resize(0); 2017 } 2018 2019 void clearPhantomsAtEnd() 2020 { 2021 // Terminal nodes don't need post-phantoms, and inserting them would violate 2022 // the current requirement that a terminal is the last thing in a block. We 2023 // should eventually change that requirement. Currently we get around this by 2024 // ensuring that all terminals accept just one input, and if that input is a 2025 // conversion node then no further speculations will be performed. See 2026 // references to the bug, below, for places where we have to have hacks to 2027 // work around this. 2028 // FIXME: Get rid of this by allowing Phantoms after terminals. 2029 // https://bugs.webkit.org/show_bug.cgi?id=126778 2030 2031 m_requiredPhantoms.resize(0); 2032 } 2033 2034 BasicBlock* m_block; 2035 unsigned m_indexInBlock; 2036 Node* m_currentNode; 2037 InsertionSet m_insertionSet; 2038 bool m_profitabilityChanged; 2039 Vector<Node*, 3> m_requiredPhantoms; 2040}; 2041 2042bool performFixup(Graph& graph) 2043{ 2044 SamplingRegion samplingRegion("DFG Fixup Phase"); 2045 return runPhase<FixupPhase>(graph); 2046} 2047 2048} } // namespace JSC::DFG 2049 2050#endif // ENABLE(DFG_JIT) 2051 2052