1/* This file contains routines to construct OpenACC and OpenMP constructs, 2 called from parsing in the C and C++ front ends. 3 4 Copyright (C) 2005-2015 Free Software Foundation, Inc. 5 Contributed by Richard Henderson <rth@redhat.com>, 6 Diego Novillo <dnovillo@redhat.com>. 7 8This file is part of GCC. 9 10GCC is free software; you can redistribute it and/or modify it under 11the terms of the GNU General Public License as published by the Free 12Software Foundation; either version 3, or (at your option) any later 13version. 14 15GCC is distributed in the hope that it will be useful, but WITHOUT ANY 16WARRANTY; without even the implied warranty of MERCHANTABILITY or 17FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 18for more details. 19 20You should have received a copy of the GNU General Public License 21along with GCC; see the file COPYING3. If not see 22<http://www.gnu.org/licenses/>. */ 23 24#include "config.h" 25#include "system.h" 26#include "coretypes.h" 27#include "tm.h" 28#include "hash-set.h" 29#include "machmode.h" 30#include "vec.h" 31#include "double-int.h" 32#include "input.h" 33#include "alias.h" 34#include "symtab.h" 35#include "wide-int.h" 36#include "inchash.h" 37#include "tree.h" 38#include "c-common.h" 39#include "c-pragma.h" 40#include "gimple-expr.h" 41#include "langhooks.h" 42#include "omp-low.h" 43#include "gomp-constants.h" 44 45 46/* Complete a #pragma oacc wait construct. LOC is the location of 47 the #pragma. */ 48 49tree 50c_finish_oacc_wait (location_t loc, tree parms, tree clauses) 51{ 52 const int nparms = list_length (parms); 53 tree stmt, t; 54 vec<tree, va_gc> *args; 55 56 vec_alloc (args, nparms + 2); 57 stmt = builtin_decl_explicit (BUILT_IN_GOACC_WAIT); 58 59 if (find_omp_clause (clauses, OMP_CLAUSE_ASYNC)) 60 t = OMP_CLAUSE_ASYNC_EXPR (clauses); 61 else 62 t = build_int_cst (integer_type_node, GOMP_ASYNC_SYNC); 63 64 args->quick_push (t); 65 args->quick_push (build_int_cst (integer_type_node, nparms)); 66 67 for (t = parms; t; t = TREE_CHAIN (t)) 68 { 69 if (TREE_CODE (OMP_CLAUSE_WAIT_EXPR (t)) == INTEGER_CST) 70 args->quick_push (build_int_cst (integer_type_node, 71 TREE_INT_CST_LOW (OMP_CLAUSE_WAIT_EXPR (t)))); 72 else 73 args->quick_push (OMP_CLAUSE_WAIT_EXPR (t)); 74 } 75 76 stmt = build_call_expr_loc_vec (loc, stmt, args); 77 add_stmt (stmt); 78 79 vec_free (args); 80 81 return stmt; 82} 83 84/* Complete a #pragma omp master construct. STMT is the structured-block 85 that follows the pragma. LOC is the l*/ 86 87tree 88c_finish_omp_master (location_t loc, tree stmt) 89{ 90 tree t = add_stmt (build1 (OMP_MASTER, void_type_node, stmt)); 91 SET_EXPR_LOCATION (t, loc); 92 return t; 93} 94 95/* Complete a #pragma omp taskgroup construct. STMT is the structured-block 96 that follows the pragma. LOC is the l*/ 97 98tree 99c_finish_omp_taskgroup (location_t loc, tree stmt) 100{ 101 tree t = add_stmt (build1 (OMP_TASKGROUP, void_type_node, stmt)); 102 SET_EXPR_LOCATION (t, loc); 103 return t; 104} 105 106/* Complete a #pragma omp critical construct. STMT is the structured-block 107 that follows the pragma, NAME is the identifier in the pragma, or null 108 if it was omitted. LOC is the location of the #pragma. */ 109 110tree 111c_finish_omp_critical (location_t loc, tree body, tree name) 112{ 113 tree stmt = make_node (OMP_CRITICAL); 114 TREE_TYPE (stmt) = void_type_node; 115 OMP_CRITICAL_BODY (stmt) = body; 116 OMP_CRITICAL_NAME (stmt) = name; 117 SET_EXPR_LOCATION (stmt, loc); 118 return add_stmt (stmt); 119} 120 121/* Complete a #pragma omp ordered construct. STMT is the structured-block 122 that follows the pragma. LOC is the location of the #pragma. */ 123 124tree 125c_finish_omp_ordered (location_t loc, tree stmt) 126{ 127 tree t = build1 (OMP_ORDERED, void_type_node, stmt); 128 SET_EXPR_LOCATION (t, loc); 129 return add_stmt (t); 130} 131 132 133/* Complete a #pragma omp barrier construct. LOC is the location of 134 the #pragma. */ 135 136void 137c_finish_omp_barrier (location_t loc) 138{ 139 tree x; 140 141 x = builtin_decl_explicit (BUILT_IN_GOMP_BARRIER); 142 x = build_call_expr_loc (loc, x, 0); 143 add_stmt (x); 144} 145 146 147/* Complete a #pragma omp taskwait construct. LOC is the location of the 148 pragma. */ 149 150void 151c_finish_omp_taskwait (location_t loc) 152{ 153 tree x; 154 155 x = builtin_decl_explicit (BUILT_IN_GOMP_TASKWAIT); 156 x = build_call_expr_loc (loc, x, 0); 157 add_stmt (x); 158} 159 160 161/* Complete a #pragma omp taskyield construct. LOC is the location of the 162 pragma. */ 163 164void 165c_finish_omp_taskyield (location_t loc) 166{ 167 tree x; 168 169 x = builtin_decl_explicit (BUILT_IN_GOMP_TASKYIELD); 170 x = build_call_expr_loc (loc, x, 0); 171 add_stmt (x); 172} 173 174 175/* Complete a #pragma omp atomic construct. For CODE OMP_ATOMIC 176 the expression to be implemented atomically is LHS opcode= RHS. 177 For OMP_ATOMIC_READ V = LHS, for OMP_ATOMIC_CAPTURE_{NEW,OLD} LHS 178 opcode= RHS with the new or old content of LHS returned. 179 LOC is the location of the atomic statement. The value returned 180 is either error_mark_node (if the construct was erroneous) or an 181 OMP_ATOMIC* node which should be added to the current statement 182 tree with add_stmt. */ 183 184tree 185c_finish_omp_atomic (location_t loc, enum tree_code code, 186 enum tree_code opcode, tree lhs, tree rhs, 187 tree v, tree lhs1, tree rhs1, bool swapped, bool seq_cst) 188{ 189 tree x, type, addr, pre = NULL_TREE; 190 191 if (lhs == error_mark_node || rhs == error_mark_node 192 || v == error_mark_node || lhs1 == error_mark_node 193 || rhs1 == error_mark_node) 194 return error_mark_node; 195 196 /* ??? According to one reading of the OpenMP spec, complex type are 197 supported, but there are no atomic stores for any architecture. 198 But at least icc 9.0 doesn't support complex types here either. 199 And lets not even talk about vector types... */ 200 type = TREE_TYPE (lhs); 201 if (!INTEGRAL_TYPE_P (type) 202 && !POINTER_TYPE_P (type) 203 && !SCALAR_FLOAT_TYPE_P (type)) 204 { 205 error_at (loc, "invalid expression type for %<#pragma omp atomic%>"); 206 return error_mark_node; 207 } 208 209 if (opcode == RDIV_EXPR) 210 opcode = TRUNC_DIV_EXPR; 211 212 /* ??? Validate that rhs does not overlap lhs. */ 213 214 /* Take and save the address of the lhs. From then on we'll reference it 215 via indirection. */ 216 addr = build_unary_op (loc, ADDR_EXPR, lhs, 0); 217 if (addr == error_mark_node) 218 return error_mark_node; 219 addr = save_expr (addr); 220 if (TREE_CODE (addr) != SAVE_EXPR 221 && (TREE_CODE (addr) != ADDR_EXPR 222 || TREE_CODE (TREE_OPERAND (addr, 0)) != VAR_DECL)) 223 { 224 /* Make sure LHS is simple enough so that goa_lhs_expr_p can recognize 225 it even after unsharing function body. */ 226 tree var = create_tmp_var_raw (TREE_TYPE (addr)); 227 DECL_CONTEXT (var) = current_function_decl; 228 addr = build4 (TARGET_EXPR, TREE_TYPE (addr), var, addr, NULL, NULL); 229 } 230 lhs = build_indirect_ref (loc, addr, RO_NULL); 231 232 if (code == OMP_ATOMIC_READ) 233 { 234 x = build1 (OMP_ATOMIC_READ, type, addr); 235 SET_EXPR_LOCATION (x, loc); 236 OMP_ATOMIC_SEQ_CST (x) = seq_cst; 237 return build_modify_expr (loc, v, NULL_TREE, NOP_EXPR, 238 loc, x, NULL_TREE); 239 } 240 241 /* There are lots of warnings, errors, and conversions that need to happen 242 in the course of interpreting a statement. Use the normal mechanisms 243 to do this, and then take it apart again. */ 244 if (swapped) 245 { 246 rhs = build_binary_op (loc, opcode, rhs, lhs, 1); 247 opcode = NOP_EXPR; 248 } 249 bool save = in_late_binary_op; 250 in_late_binary_op = true; 251 x = build_modify_expr (loc, lhs, NULL_TREE, opcode, loc, rhs, NULL_TREE); 252 in_late_binary_op = save; 253 if (x == error_mark_node) 254 return error_mark_node; 255 if (TREE_CODE (x) == COMPOUND_EXPR) 256 { 257 pre = TREE_OPERAND (x, 0); 258 gcc_assert (TREE_CODE (pre) == SAVE_EXPR); 259 x = TREE_OPERAND (x, 1); 260 } 261 gcc_assert (TREE_CODE (x) == MODIFY_EXPR); 262 rhs = TREE_OPERAND (x, 1); 263 264 /* Punt the actual generation of atomic operations to common code. */ 265 if (code == OMP_ATOMIC) 266 type = void_type_node; 267 x = build2 (code, type, addr, rhs); 268 SET_EXPR_LOCATION (x, loc); 269 OMP_ATOMIC_SEQ_CST (x) = seq_cst; 270 271 /* Generally it is hard to prove lhs1 and lhs are the same memory 272 location, just diagnose different variables. */ 273 if (rhs1 274 && TREE_CODE (rhs1) == VAR_DECL 275 && TREE_CODE (lhs) == VAR_DECL 276 && rhs1 != lhs) 277 { 278 if (code == OMP_ATOMIC) 279 error_at (loc, "%<#pragma omp atomic update%> uses two different variables for memory"); 280 else 281 error_at (loc, "%<#pragma omp atomic capture%> uses two different variables for memory"); 282 return error_mark_node; 283 } 284 285 if (code != OMP_ATOMIC) 286 { 287 /* Generally it is hard to prove lhs1 and lhs are the same memory 288 location, just diagnose different variables. */ 289 if (lhs1 && TREE_CODE (lhs1) == VAR_DECL && TREE_CODE (lhs) == VAR_DECL) 290 { 291 if (lhs1 != lhs) 292 { 293 error_at (loc, "%<#pragma omp atomic capture%> uses two different variables for memory"); 294 return error_mark_node; 295 } 296 } 297 x = build_modify_expr (loc, v, NULL_TREE, NOP_EXPR, 298 loc, x, NULL_TREE); 299 if (rhs1 && rhs1 != lhs) 300 { 301 tree rhs1addr = build_unary_op (loc, ADDR_EXPR, rhs1, 0); 302 if (rhs1addr == error_mark_node) 303 return error_mark_node; 304 x = omit_one_operand_loc (loc, type, x, rhs1addr); 305 } 306 if (lhs1 && lhs1 != lhs) 307 { 308 tree lhs1addr = build_unary_op (loc, ADDR_EXPR, lhs1, 0); 309 if (lhs1addr == error_mark_node) 310 return error_mark_node; 311 if (code == OMP_ATOMIC_CAPTURE_OLD) 312 x = omit_one_operand_loc (loc, type, x, lhs1addr); 313 else 314 { 315 x = save_expr (x); 316 x = omit_two_operands_loc (loc, type, x, x, lhs1addr); 317 } 318 } 319 } 320 else if (rhs1 && rhs1 != lhs) 321 { 322 tree rhs1addr = build_unary_op (loc, ADDR_EXPR, rhs1, 0); 323 if (rhs1addr == error_mark_node) 324 return error_mark_node; 325 x = omit_one_operand_loc (loc, type, x, rhs1addr); 326 } 327 328 if (pre) 329 x = omit_one_operand_loc (loc, type, x, pre); 330 return x; 331} 332 333 334/* Complete a #pragma omp flush construct. We don't do anything with 335 the variable list that the syntax allows. LOC is the location of 336 the #pragma. */ 337 338void 339c_finish_omp_flush (location_t loc) 340{ 341 tree x; 342 343 x = builtin_decl_explicit (BUILT_IN_SYNC_SYNCHRONIZE); 344 x = build_call_expr_loc (loc, x, 0); 345 add_stmt (x); 346} 347 348 349/* Check and canonicalize OMP_FOR increment expression. 350 Helper function for c_finish_omp_for. */ 351 352static tree 353check_omp_for_incr_expr (location_t loc, tree exp, tree decl) 354{ 355 tree t; 356 357 if (!INTEGRAL_TYPE_P (TREE_TYPE (exp)) 358 || TYPE_PRECISION (TREE_TYPE (exp)) < TYPE_PRECISION (TREE_TYPE (decl))) 359 return error_mark_node; 360 361 if (exp == decl) 362 return build_int_cst (TREE_TYPE (exp), 0); 363 364 switch (TREE_CODE (exp)) 365 { 366 CASE_CONVERT: 367 t = check_omp_for_incr_expr (loc, TREE_OPERAND (exp, 0), decl); 368 if (t != error_mark_node) 369 return fold_convert_loc (loc, TREE_TYPE (exp), t); 370 break; 371 case MINUS_EXPR: 372 t = check_omp_for_incr_expr (loc, TREE_OPERAND (exp, 0), decl); 373 if (t != error_mark_node) 374 return fold_build2_loc (loc, MINUS_EXPR, 375 TREE_TYPE (exp), t, TREE_OPERAND (exp, 1)); 376 break; 377 case PLUS_EXPR: 378 t = check_omp_for_incr_expr (loc, TREE_OPERAND (exp, 0), decl); 379 if (t != error_mark_node) 380 return fold_build2_loc (loc, PLUS_EXPR, 381 TREE_TYPE (exp), t, TREE_OPERAND (exp, 1)); 382 t = check_omp_for_incr_expr (loc, TREE_OPERAND (exp, 1), decl); 383 if (t != error_mark_node) 384 return fold_build2_loc (loc, PLUS_EXPR, 385 TREE_TYPE (exp), TREE_OPERAND (exp, 0), t); 386 break; 387 case COMPOUND_EXPR: 388 { 389 /* cp_build_modify_expr forces preevaluation of the RHS to make 390 sure that it is evaluated before the lvalue-rvalue conversion 391 is applied to the LHS. Reconstruct the original expression. */ 392 tree op0 = TREE_OPERAND (exp, 0); 393 if (TREE_CODE (op0) == TARGET_EXPR 394 && !VOID_TYPE_P (TREE_TYPE (op0))) 395 { 396 tree op1 = TREE_OPERAND (exp, 1); 397 tree temp = TARGET_EXPR_SLOT (op0); 398 if (TREE_CODE_CLASS (TREE_CODE (op1)) == tcc_binary 399 && TREE_OPERAND (op1, 1) == temp) 400 { 401 op1 = copy_node (op1); 402 TREE_OPERAND (op1, 1) = TARGET_EXPR_INITIAL (op0); 403 return check_omp_for_incr_expr (loc, op1, decl); 404 } 405 } 406 break; 407 } 408 default: 409 break; 410 } 411 412 return error_mark_node; 413} 414 415/* If the OMP_FOR increment expression in INCR is of pointer type, 416 canonicalize it into an expression handled by gimplify_omp_for() 417 and return it. DECL is the iteration variable. */ 418 419static tree 420c_omp_for_incr_canonicalize_ptr (location_t loc, tree decl, tree incr) 421{ 422 if (POINTER_TYPE_P (TREE_TYPE (decl)) 423 && TREE_OPERAND (incr, 1)) 424 { 425 tree t = fold_convert_loc (loc, 426 sizetype, TREE_OPERAND (incr, 1)); 427 428 if (TREE_CODE (incr) == POSTDECREMENT_EXPR 429 || TREE_CODE (incr) == PREDECREMENT_EXPR) 430 t = fold_build1_loc (loc, NEGATE_EXPR, sizetype, t); 431 t = fold_build_pointer_plus (decl, t); 432 incr = build2 (MODIFY_EXPR, void_type_node, decl, t); 433 } 434 return incr; 435} 436 437/* Validate and generate OMP_FOR. 438 DECLV is a vector of iteration variables, for each collapsed loop. 439 INITV, CONDV and INCRV are vectors containing initialization 440 expressions, controlling predicates and increment expressions. 441 BODY is the body of the loop and PRE_BODY statements that go before 442 the loop. */ 443 444tree 445c_finish_omp_for (location_t locus, enum tree_code code, tree declv, 446 tree initv, tree condv, tree incrv, tree body, tree pre_body) 447{ 448 location_t elocus; 449 bool fail = false; 450 int i; 451 452 if ((code == CILK_SIMD || code == CILK_FOR) 453 && !c_check_cilk_loop (locus, TREE_VEC_ELT (declv, 0))) 454 fail = true; 455 456 gcc_assert (TREE_VEC_LENGTH (declv) == TREE_VEC_LENGTH (initv)); 457 gcc_assert (TREE_VEC_LENGTH (declv) == TREE_VEC_LENGTH (condv)); 458 gcc_assert (TREE_VEC_LENGTH (declv) == TREE_VEC_LENGTH (incrv)); 459 for (i = 0; i < TREE_VEC_LENGTH (declv); i++) 460 { 461 tree decl = TREE_VEC_ELT (declv, i); 462 tree init = TREE_VEC_ELT (initv, i); 463 tree cond = TREE_VEC_ELT (condv, i); 464 tree incr = TREE_VEC_ELT (incrv, i); 465 466 elocus = locus; 467 if (EXPR_HAS_LOCATION (init)) 468 elocus = EXPR_LOCATION (init); 469 470 /* Validate the iteration variable. */ 471 if (!INTEGRAL_TYPE_P (TREE_TYPE (decl)) 472 && TREE_CODE (TREE_TYPE (decl)) != POINTER_TYPE) 473 { 474 error_at (elocus, "invalid type for iteration variable %qE", decl); 475 fail = true; 476 } 477 478 /* In the case of "for (int i = 0...)", init will be a decl. It should 479 have a DECL_INITIAL that we can turn into an assignment. */ 480 if (init == decl) 481 { 482 elocus = DECL_SOURCE_LOCATION (decl); 483 484 init = DECL_INITIAL (decl); 485 if (init == NULL) 486 { 487 error_at (elocus, "%qE is not initialized", decl); 488 init = integer_zero_node; 489 fail = true; 490 } 491 492 init = build_modify_expr (elocus, decl, NULL_TREE, NOP_EXPR, 493 /* FIXME diagnostics: This should 494 be the location of the INIT. */ 495 elocus, 496 init, 497 NULL_TREE); 498 } 499 if (init != error_mark_node) 500 { 501 gcc_assert (TREE_CODE (init) == MODIFY_EXPR); 502 gcc_assert (TREE_OPERAND (init, 0) == decl); 503 } 504 505 if (cond == NULL_TREE) 506 { 507 error_at (elocus, "missing controlling predicate"); 508 fail = true; 509 } 510 else 511 { 512 bool cond_ok = false; 513 514 if (EXPR_HAS_LOCATION (cond)) 515 elocus = EXPR_LOCATION (cond); 516 517 if (TREE_CODE (cond) == LT_EXPR 518 || TREE_CODE (cond) == LE_EXPR 519 || TREE_CODE (cond) == GT_EXPR 520 || TREE_CODE (cond) == GE_EXPR 521 || TREE_CODE (cond) == NE_EXPR 522 || TREE_CODE (cond) == EQ_EXPR) 523 { 524 tree op0 = TREE_OPERAND (cond, 0); 525 tree op1 = TREE_OPERAND (cond, 1); 526 527 /* 2.5.1. The comparison in the condition is computed in 528 the type of DECL, otherwise the behavior is undefined. 529 530 For example: 531 long n; int i; 532 i < n; 533 534 according to ISO will be evaluated as: 535 (long)i < n; 536 537 We want to force: 538 i < (int)n; */ 539 if (TREE_CODE (op0) == NOP_EXPR 540 && decl == TREE_OPERAND (op0, 0)) 541 { 542 TREE_OPERAND (cond, 0) = TREE_OPERAND (op0, 0); 543 TREE_OPERAND (cond, 1) 544 = fold_build1_loc (elocus, NOP_EXPR, TREE_TYPE (decl), 545 TREE_OPERAND (cond, 1)); 546 } 547 else if (TREE_CODE (op1) == NOP_EXPR 548 && decl == TREE_OPERAND (op1, 0)) 549 { 550 TREE_OPERAND (cond, 1) = TREE_OPERAND (op1, 0); 551 TREE_OPERAND (cond, 0) 552 = fold_build1_loc (elocus, NOP_EXPR, TREE_TYPE (decl), 553 TREE_OPERAND (cond, 0)); 554 } 555 556 if (decl == TREE_OPERAND (cond, 0)) 557 cond_ok = true; 558 else if (decl == TREE_OPERAND (cond, 1)) 559 { 560 TREE_SET_CODE (cond, 561 swap_tree_comparison (TREE_CODE (cond))); 562 TREE_OPERAND (cond, 1) = TREE_OPERAND (cond, 0); 563 TREE_OPERAND (cond, 0) = decl; 564 cond_ok = true; 565 } 566 567 if (TREE_CODE (cond) == NE_EXPR 568 || TREE_CODE (cond) == EQ_EXPR) 569 { 570 if (!INTEGRAL_TYPE_P (TREE_TYPE (decl))) 571 { 572 if (code != CILK_SIMD && code != CILK_FOR) 573 cond_ok = false; 574 } 575 else if (operand_equal_p (TREE_OPERAND (cond, 1), 576 TYPE_MIN_VALUE (TREE_TYPE (decl)), 577 0)) 578 TREE_SET_CODE (cond, TREE_CODE (cond) == NE_EXPR 579 ? GT_EXPR : LE_EXPR); 580 else if (operand_equal_p (TREE_OPERAND (cond, 1), 581 TYPE_MAX_VALUE (TREE_TYPE (decl)), 582 0)) 583 TREE_SET_CODE (cond, TREE_CODE (cond) == NE_EXPR 584 ? LT_EXPR : GE_EXPR); 585 else if (code != CILK_SIMD && code != CILK_FOR) 586 cond_ok = false; 587 } 588 } 589 590 if (!cond_ok) 591 { 592 error_at (elocus, "invalid controlling predicate"); 593 fail = true; 594 } 595 } 596 597 if (incr == NULL_TREE) 598 { 599 error_at (elocus, "missing increment expression"); 600 fail = true; 601 } 602 else 603 { 604 bool incr_ok = false; 605 606 if (EXPR_HAS_LOCATION (incr)) 607 elocus = EXPR_LOCATION (incr); 608 609 /* Check all the valid increment expressions: v++, v--, ++v, --v, 610 v = v + incr, v = incr + v and v = v - incr. */ 611 switch (TREE_CODE (incr)) 612 { 613 case POSTINCREMENT_EXPR: 614 case PREINCREMENT_EXPR: 615 case POSTDECREMENT_EXPR: 616 case PREDECREMENT_EXPR: 617 if (TREE_OPERAND (incr, 0) != decl) 618 break; 619 620 incr_ok = true; 621 incr = c_omp_for_incr_canonicalize_ptr (elocus, decl, incr); 622 break; 623 624 case COMPOUND_EXPR: 625 if (TREE_CODE (TREE_OPERAND (incr, 0)) != SAVE_EXPR 626 || TREE_CODE (TREE_OPERAND (incr, 1)) != MODIFY_EXPR) 627 break; 628 incr = TREE_OPERAND (incr, 1); 629 /* FALLTHRU */ 630 case MODIFY_EXPR: 631 if (TREE_OPERAND (incr, 0) != decl) 632 break; 633 if (TREE_OPERAND (incr, 1) == decl) 634 break; 635 if (TREE_CODE (TREE_OPERAND (incr, 1)) == PLUS_EXPR 636 && (TREE_OPERAND (TREE_OPERAND (incr, 1), 0) == decl 637 || TREE_OPERAND (TREE_OPERAND (incr, 1), 1) == decl)) 638 incr_ok = true; 639 else if ((TREE_CODE (TREE_OPERAND (incr, 1)) == MINUS_EXPR 640 || (TREE_CODE (TREE_OPERAND (incr, 1)) 641 == POINTER_PLUS_EXPR)) 642 && TREE_OPERAND (TREE_OPERAND (incr, 1), 0) == decl) 643 incr_ok = true; 644 else 645 { 646 tree t = check_omp_for_incr_expr (elocus, 647 TREE_OPERAND (incr, 1), 648 decl); 649 if (t != error_mark_node) 650 { 651 incr_ok = true; 652 t = build2 (PLUS_EXPR, TREE_TYPE (decl), decl, t); 653 incr = build2 (MODIFY_EXPR, void_type_node, decl, t); 654 } 655 } 656 break; 657 658 default: 659 break; 660 } 661 if (!incr_ok) 662 { 663 error_at (elocus, "invalid increment expression"); 664 fail = true; 665 } 666 } 667 668 TREE_VEC_ELT (initv, i) = init; 669 TREE_VEC_ELT (incrv, i) = incr; 670 } 671 672 if (fail) 673 return NULL; 674 else 675 { 676 tree t = make_node (code); 677 678 TREE_TYPE (t) = void_type_node; 679 OMP_FOR_INIT (t) = initv; 680 OMP_FOR_COND (t) = condv; 681 OMP_FOR_INCR (t) = incrv; 682 OMP_FOR_BODY (t) = body; 683 OMP_FOR_PRE_BODY (t) = pre_body; 684 685 SET_EXPR_LOCATION (t, locus); 686 return add_stmt (t); 687 } 688} 689 690/* Right now we have 14 different combined constructs, this 691 function attempts to split or duplicate clauses for combined 692 constructs. CODE is the innermost construct in the combined construct, 693 and MASK allows to determine which constructs are combined together, 694 as every construct has at least one clause that no other construct 695 has (except for OMP_SECTIONS, but that can be only combined with parallel). 696 Combined constructs are: 697 #pragma omp parallel for 698 #pragma omp parallel sections 699 #pragma omp parallel for simd 700 #pragma omp for simd 701 #pragma omp distribute simd 702 #pragma omp distribute parallel for 703 #pragma omp distribute parallel for simd 704 #pragma omp teams distribute 705 #pragma omp teams distribute parallel for 706 #pragma omp teams distribute parallel for simd 707 #pragma omp target teams 708 #pragma omp target teams distribute 709 #pragma omp target teams distribute parallel for 710 #pragma omp target teams distribute parallel for simd */ 711 712void 713c_omp_split_clauses (location_t loc, enum tree_code code, 714 omp_clause_mask mask, tree clauses, tree *cclauses) 715{ 716 tree next, c; 717 enum c_omp_clause_split s; 718 int i; 719 720 for (i = 0; i < C_OMP_CLAUSE_SPLIT_COUNT; i++) 721 cclauses[i] = NULL; 722 /* Add implicit nowait clause on 723 #pragma omp parallel {for,for simd,sections}. */ 724 if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_THREADS)) != 0) 725 switch (code) 726 { 727 case OMP_FOR: 728 case OMP_SIMD: 729 cclauses[C_OMP_CLAUSE_SPLIT_FOR] 730 = build_omp_clause (loc, OMP_CLAUSE_NOWAIT); 731 break; 732 case OMP_SECTIONS: 733 cclauses[C_OMP_CLAUSE_SPLIT_SECTIONS] 734 = build_omp_clause (loc, OMP_CLAUSE_NOWAIT); 735 break; 736 default: 737 break; 738 } 739 740 for (; clauses ; clauses = next) 741 { 742 next = OMP_CLAUSE_CHAIN (clauses); 743 744 switch (OMP_CLAUSE_CODE (clauses)) 745 { 746 /* First the clauses that are unique to some constructs. */ 747 case OMP_CLAUSE_DEVICE: 748 case OMP_CLAUSE_MAP: 749 s = C_OMP_CLAUSE_SPLIT_TARGET; 750 break; 751 case OMP_CLAUSE_NUM_TEAMS: 752 case OMP_CLAUSE_THREAD_LIMIT: 753 s = C_OMP_CLAUSE_SPLIT_TEAMS; 754 break; 755 case OMP_CLAUSE_DIST_SCHEDULE: 756 s = C_OMP_CLAUSE_SPLIT_DISTRIBUTE; 757 break; 758 case OMP_CLAUSE_COPYIN: 759 case OMP_CLAUSE_NUM_THREADS: 760 case OMP_CLAUSE_PROC_BIND: 761 s = C_OMP_CLAUSE_SPLIT_PARALLEL; 762 break; 763 case OMP_CLAUSE_ORDERED: 764 case OMP_CLAUSE_SCHEDULE: 765 case OMP_CLAUSE_NOWAIT: 766 s = C_OMP_CLAUSE_SPLIT_FOR; 767 break; 768 case OMP_CLAUSE_SAFELEN: 769 case OMP_CLAUSE_LINEAR: 770 case OMP_CLAUSE_ALIGNED: 771 s = C_OMP_CLAUSE_SPLIT_SIMD; 772 break; 773 /* Duplicate this to all of distribute, for and simd. */ 774 case OMP_CLAUSE_COLLAPSE: 775 if (code == OMP_SIMD) 776 { 777 c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses), 778 OMP_CLAUSE_COLLAPSE); 779 OMP_CLAUSE_COLLAPSE_EXPR (c) 780 = OMP_CLAUSE_COLLAPSE_EXPR (clauses); 781 OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_SIMD]; 782 cclauses[C_OMP_CLAUSE_SPLIT_SIMD] = c; 783 } 784 if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_SCHEDULE)) != 0) 785 { 786 if ((mask & (OMP_CLAUSE_MASK_1 787 << PRAGMA_OMP_CLAUSE_DIST_SCHEDULE)) != 0) 788 { 789 c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses), 790 OMP_CLAUSE_COLLAPSE); 791 OMP_CLAUSE_COLLAPSE_EXPR (c) 792 = OMP_CLAUSE_COLLAPSE_EXPR (clauses); 793 OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_FOR]; 794 cclauses[C_OMP_CLAUSE_SPLIT_FOR] = c; 795 s = C_OMP_CLAUSE_SPLIT_DISTRIBUTE; 796 } 797 else 798 s = C_OMP_CLAUSE_SPLIT_FOR; 799 } 800 else 801 s = C_OMP_CLAUSE_SPLIT_DISTRIBUTE; 802 break; 803 /* Private clause is supported on all constructs but target, 804 it is enough to put it on the innermost one. For 805 #pragma omp {for,sections} put it on parallel though, 806 as that's what we did for OpenMP 3.1. */ 807 case OMP_CLAUSE_PRIVATE: 808 switch (code) 809 { 810 case OMP_SIMD: s = C_OMP_CLAUSE_SPLIT_SIMD; break; 811 case OMP_FOR: case OMP_SECTIONS: 812 case OMP_PARALLEL: s = C_OMP_CLAUSE_SPLIT_PARALLEL; break; 813 case OMP_DISTRIBUTE: s = C_OMP_CLAUSE_SPLIT_DISTRIBUTE; break; 814 case OMP_TEAMS: s = C_OMP_CLAUSE_SPLIT_TEAMS; break; 815 default: gcc_unreachable (); 816 } 817 break; 818 /* Firstprivate clause is supported on all constructs but 819 target and simd. Put it on the outermost of those and 820 duplicate on parallel. */ 821 case OMP_CLAUSE_FIRSTPRIVATE: 822 if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_THREADS)) 823 != 0) 824 { 825 if ((mask & ((OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_TEAMS) 826 | (OMP_CLAUSE_MASK_1 827 << PRAGMA_OMP_CLAUSE_DIST_SCHEDULE))) != 0) 828 { 829 c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses), 830 OMP_CLAUSE_FIRSTPRIVATE); 831 OMP_CLAUSE_DECL (c) = OMP_CLAUSE_DECL (clauses); 832 OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_PARALLEL]; 833 cclauses[C_OMP_CLAUSE_SPLIT_PARALLEL] = c; 834 if ((mask & (OMP_CLAUSE_MASK_1 835 << PRAGMA_OMP_CLAUSE_NUM_TEAMS)) != 0) 836 s = C_OMP_CLAUSE_SPLIT_TEAMS; 837 else 838 s = C_OMP_CLAUSE_SPLIT_DISTRIBUTE; 839 } 840 else 841 /* This must be 842 #pragma omp parallel{, for{, simd}, sections}. */ 843 s = C_OMP_CLAUSE_SPLIT_PARALLEL; 844 } 845 else if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_TEAMS)) 846 != 0) 847 { 848 /* This must be one of 849 #pragma omp {,target }teams distribute 850 #pragma omp target teams 851 #pragma omp {,target }teams distribute simd. */ 852 gcc_assert (code == OMP_DISTRIBUTE 853 || code == OMP_TEAMS 854 || code == OMP_SIMD); 855 s = C_OMP_CLAUSE_SPLIT_TEAMS; 856 } 857 else if ((mask & (OMP_CLAUSE_MASK_1 858 << PRAGMA_OMP_CLAUSE_DIST_SCHEDULE)) != 0) 859 { 860 /* This must be #pragma omp distribute simd. */ 861 gcc_assert (code == OMP_SIMD); 862 s = C_OMP_CLAUSE_SPLIT_TEAMS; 863 } 864 else 865 { 866 /* This must be #pragma omp for simd. */ 867 gcc_assert (code == OMP_SIMD); 868 s = C_OMP_CLAUSE_SPLIT_FOR; 869 } 870 break; 871 /* Lastprivate is allowed on for, sections and simd. In 872 parallel {for{, simd},sections} we actually want to put it on 873 parallel rather than for or sections. */ 874 case OMP_CLAUSE_LASTPRIVATE: 875 if (code == OMP_FOR || code == OMP_SECTIONS) 876 { 877 if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_THREADS)) 878 != 0) 879 s = C_OMP_CLAUSE_SPLIT_PARALLEL; 880 else 881 s = C_OMP_CLAUSE_SPLIT_FOR; 882 break; 883 } 884 gcc_assert (code == OMP_SIMD); 885 if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_SCHEDULE)) != 0) 886 { 887 c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses), 888 OMP_CLAUSE_LASTPRIVATE); 889 OMP_CLAUSE_DECL (c) = OMP_CLAUSE_DECL (clauses); 890 if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_THREADS)) 891 != 0) 892 s = C_OMP_CLAUSE_SPLIT_PARALLEL; 893 else 894 s = C_OMP_CLAUSE_SPLIT_FOR; 895 OMP_CLAUSE_CHAIN (c) = cclauses[s]; 896 cclauses[s] = c; 897 } 898 s = C_OMP_CLAUSE_SPLIT_SIMD; 899 break; 900 /* Shared and default clauses are allowed on private and teams. */ 901 case OMP_CLAUSE_SHARED: 902 case OMP_CLAUSE_DEFAULT: 903 if (code == OMP_TEAMS) 904 { 905 s = C_OMP_CLAUSE_SPLIT_TEAMS; 906 break; 907 } 908 if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_TEAMS)) 909 != 0) 910 { 911 c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses), 912 OMP_CLAUSE_CODE (clauses)); 913 if (OMP_CLAUSE_CODE (clauses) == OMP_CLAUSE_SHARED) 914 OMP_CLAUSE_DECL (c) = OMP_CLAUSE_DECL (clauses); 915 else 916 OMP_CLAUSE_DEFAULT_KIND (c) 917 = OMP_CLAUSE_DEFAULT_KIND (clauses); 918 OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_TEAMS]; 919 cclauses[C_OMP_CLAUSE_SPLIT_TEAMS] = c; 920 921 } 922 s = C_OMP_CLAUSE_SPLIT_PARALLEL; 923 break; 924 /* Reduction is allowed on simd, for, parallel, sections and teams. 925 Duplicate it on all of them, but omit on for or sections if 926 parallel is present. */ 927 case OMP_CLAUSE_REDUCTION: 928 if (code == OMP_SIMD) 929 { 930 c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses), 931 OMP_CLAUSE_REDUCTION); 932 OMP_CLAUSE_DECL (c) = OMP_CLAUSE_DECL (clauses); 933 OMP_CLAUSE_REDUCTION_CODE (c) 934 = OMP_CLAUSE_REDUCTION_CODE (clauses); 935 OMP_CLAUSE_REDUCTION_PLACEHOLDER (c) 936 = OMP_CLAUSE_REDUCTION_PLACEHOLDER (clauses); 937 OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_SIMD]; 938 cclauses[C_OMP_CLAUSE_SPLIT_SIMD] = c; 939 } 940 if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_SCHEDULE)) != 0) 941 { 942 if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_TEAMS)) 943 != 0) 944 { 945 c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses), 946 OMP_CLAUSE_REDUCTION); 947 OMP_CLAUSE_DECL (c) = OMP_CLAUSE_DECL (clauses); 948 OMP_CLAUSE_REDUCTION_CODE (c) 949 = OMP_CLAUSE_REDUCTION_CODE (clauses); 950 OMP_CLAUSE_REDUCTION_PLACEHOLDER (c) 951 = OMP_CLAUSE_REDUCTION_PLACEHOLDER (clauses); 952 OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_PARALLEL]; 953 cclauses[C_OMP_CLAUSE_SPLIT_PARALLEL] = c; 954 s = C_OMP_CLAUSE_SPLIT_TEAMS; 955 } 956 else if ((mask & (OMP_CLAUSE_MASK_1 957 << PRAGMA_OMP_CLAUSE_NUM_THREADS)) != 0) 958 s = C_OMP_CLAUSE_SPLIT_PARALLEL; 959 else 960 s = C_OMP_CLAUSE_SPLIT_FOR; 961 } 962 else if (code == OMP_SECTIONS) 963 s = C_OMP_CLAUSE_SPLIT_PARALLEL; 964 else 965 s = C_OMP_CLAUSE_SPLIT_TEAMS; 966 break; 967 case OMP_CLAUSE_IF: 968 /* FIXME: This is currently being discussed. */ 969 if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_THREADS)) 970 != 0) 971 s = C_OMP_CLAUSE_SPLIT_PARALLEL; 972 else 973 s = C_OMP_CLAUSE_SPLIT_TARGET; 974 break; 975 default: 976 gcc_unreachable (); 977 } 978 OMP_CLAUSE_CHAIN (clauses) = cclauses[s]; 979 cclauses[s] = clauses; 980 } 981} 982 983 984/* qsort callback to compare #pragma omp declare simd clauses. */ 985 986static int 987c_omp_declare_simd_clause_cmp (const void *p, const void *q) 988{ 989 tree a = *(const tree *) p; 990 tree b = *(const tree *) q; 991 if (OMP_CLAUSE_CODE (a) != OMP_CLAUSE_CODE (b)) 992 { 993 if (OMP_CLAUSE_CODE (a) > OMP_CLAUSE_CODE (b)) 994 return -1; 995 return 1; 996 } 997 if (OMP_CLAUSE_CODE (a) != OMP_CLAUSE_SIMDLEN 998 && OMP_CLAUSE_CODE (a) != OMP_CLAUSE_INBRANCH 999 && OMP_CLAUSE_CODE (a) != OMP_CLAUSE_NOTINBRANCH) 1000 { 1001 int c = tree_to_shwi (OMP_CLAUSE_DECL (a)); 1002 int d = tree_to_shwi (OMP_CLAUSE_DECL (b)); 1003 if (c < d) 1004 return 1; 1005 if (c > d) 1006 return -1; 1007 } 1008 return 0; 1009} 1010 1011/* Change PARM_DECLs in OMP_CLAUSE_DECL of #pragma omp declare simd 1012 CLAUSES on FNDECL into argument indexes and sort them. */ 1013 1014tree 1015c_omp_declare_simd_clauses_to_numbers (tree parms, tree clauses) 1016{ 1017 tree c; 1018 vec<tree> clvec = vNULL; 1019 1020 for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c)) 1021 { 1022 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_SIMDLEN 1023 && OMP_CLAUSE_CODE (c) != OMP_CLAUSE_INBRANCH 1024 && OMP_CLAUSE_CODE (c) != OMP_CLAUSE_NOTINBRANCH) 1025 { 1026 tree decl = OMP_CLAUSE_DECL (c); 1027 tree arg; 1028 int idx; 1029 for (arg = parms, idx = 0; arg; 1030 arg = TREE_CHAIN (arg), idx++) 1031 if (arg == decl) 1032 break; 1033 if (arg == NULL_TREE) 1034 { 1035 error_at (OMP_CLAUSE_LOCATION (c), 1036 "%qD is not an function argument", decl); 1037 continue; 1038 } 1039 OMP_CLAUSE_DECL (c) = build_int_cst (integer_type_node, idx); 1040 } 1041 clvec.safe_push (c); 1042 } 1043 if (!clvec.is_empty ()) 1044 { 1045 unsigned int len = clvec.length (), i; 1046 clvec.qsort (c_omp_declare_simd_clause_cmp); 1047 clauses = clvec[0]; 1048 for (i = 0; i < len; i++) 1049 OMP_CLAUSE_CHAIN (clvec[i]) = (i < len - 1) ? clvec[i + 1] : NULL_TREE; 1050 } 1051 else 1052 clauses = NULL_TREE; 1053 clvec.release (); 1054 return clauses; 1055} 1056 1057/* Change argument indexes in CLAUSES of FNDECL back to PARM_DECLs. */ 1058 1059void 1060c_omp_declare_simd_clauses_to_decls (tree fndecl, tree clauses) 1061{ 1062 tree c; 1063 1064 for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c)) 1065 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_SIMDLEN 1066 && OMP_CLAUSE_CODE (c) != OMP_CLAUSE_INBRANCH 1067 && OMP_CLAUSE_CODE (c) != OMP_CLAUSE_NOTINBRANCH) 1068 { 1069 int idx = tree_to_shwi (OMP_CLAUSE_DECL (c)), i; 1070 tree arg; 1071 for (arg = DECL_ARGUMENTS (fndecl), i = 0; arg; 1072 arg = TREE_CHAIN (arg), i++) 1073 if (i == idx) 1074 break; 1075 gcc_assert (arg); 1076 OMP_CLAUSE_DECL (c) = arg; 1077 } 1078} 1079 1080/* True if OpenMP sharing attribute of DECL is predetermined. */ 1081 1082enum omp_clause_default_kind 1083c_omp_predetermined_sharing (tree decl) 1084{ 1085 /* Variables with const-qualified type having no mutable member 1086 are predetermined shared. */ 1087 if (TREE_READONLY (decl)) 1088 return OMP_CLAUSE_DEFAULT_SHARED; 1089 1090 return OMP_CLAUSE_DEFAULT_UNSPECIFIED; 1091} 1092