1/* PR middle-end/61486 */
2/* { dg-do compile } */
3/* { dg-options "-fopenmp" } */
4
5#pragma omp declare target
6void dosomething (int *a, int n, int m);
7#pragma omp end declare target
8
9void
10test (int n, int o, int p, int q, int r, int s, int *pp)
11{
12  int a[o], i, j;
13  #pragma omp target data device (n + 1) if (n != 6) map (tofrom: n, r)
14  {
15    #pragma omp target device (n + 1) if (n != 6) map (from: n) map (alloc: a[2:o-2])
16      dosomething (a, n, 0);
17    #pragma omp target teams device (n + 1) num_teams (n + 4) thread_limit (n * 2) \
18    	if (n != 6)map (from: n) map (alloc: a[2:o-2]) default(shared) \
19    	private (p) firstprivate (q) shared (n) reduction (+: r)
20    {
21      r = r + 1;
22      p = q;
23      dosomething (a, n, p + q);
24    }
25    #pragma omp target teams distribute device (n + 1) num_teams (n + 4) collapse (2) \
26    	if (n != 6)map (from: n) map (alloc: a[2:o-2]) default(shared) \
27    	private (p) firstprivate (q) shared (n) reduction (+: r) \
28    	thread_limit (n * 2) dist_schedule (static, 4)
29      for (i = 0; i < 10; i++)
30	for (j = 0; j < 10; j++)
31	  {
32	    r = r + 1;
33	    p = q;
34	    dosomething (a, n, p + q);
35	  }
36    #pragma omp target teams distribute device (n + 1) num_teams (n + 4) \
37    	if (n != 6)map (from: n) map (alloc: a[2:o-2]) default(shared) \
38    	private (p) firstprivate (q) shared (n) reduction (+: r) \
39    	thread_limit (n * 2) dist_schedule (static, 4)
40      for (i = 0; i < 10; i++)
41	for (j = 0; j < 10; j++)
42	  {
43	    r = r + 1;
44	    p = q;
45	    dosomething (a, n, p + q);
46	  }
47    #pragma omp target teams distribute parallel for device (n + 1) num_teams (n + 4) \
48    	if (n != 6)map (from: n) map (alloc: a[2:o-2]) default(shared) \
49    	private (p) firstprivate (q) shared (n) reduction (+: r) \
50    	thread_limit (n * 2) dist_schedule (static, 4) collapse (2) \
51    	num_threads (n + 4) proc_bind (spread) lastprivate (s) \
52    	ordered schedule (static, 8)
53      for (i = 0; i < 10; i++)
54	for (j = 0; j < 10; j++)
55	  {
56	    r = r + 1;
57	    p = q;
58	    dosomething (a, n, p + q);
59	    #pragma omp ordered
60	      p = q;
61	    s = i * 10 + j;
62	  }
63    #pragma omp target teams distribute parallel for device (n + 1) num_teams (n + 4) \
64    	if (n != 6)map (from: n) map (alloc: a[2:o-2]) default(shared) \
65    	private (p) firstprivate (q) shared (n) reduction (+: r) \
66    	thread_limit (n * 2) dist_schedule (static, 4) num_threads (n + 4) \
67    	proc_bind (master) lastprivate (s) ordered schedule (static, 8)
68      for (i = 0; i < 10; i++)
69	{
70	  for (j = 0; j < 10; j++)
71	    {
72	      r = r + 1;
73	      p = q;
74	      dosomething (a, n, p + q);
75	    }
76	  #pragma omp ordered
77	    p = q;
78	  s = i * 10;
79	}
80    #pragma omp target teams distribute parallel for simd device (n + 1) \
81    	if (n != 6)map (from: n) map (alloc: a[2:o-2]) default(shared) \
82    	private (p) firstprivate (q) shared (n) reduction (+: r) \
83    	thread_limit (n * 2) dist_schedule (static, 4) collapse (2) \
84    	num_threads (n + 4) proc_bind (spread) lastprivate (s) \
85    	schedule (static, 8) num_teams (n + 4) safelen(8)
86      for (i = 0; i < 10; i++)
87	for (j = 0; j < 10; j++)
88	  {
89	    r = r + 1;
90	    p = q;
91	    a[2+i*10+j] = p + q;
92	    s = i * 10 + j;
93	  }
94    #pragma omp target teams distribute parallel for simd device (n + 1) \
95    	if (n != 6)map (from: n) map (alloc: a[2:o-2]) default(shared) \
96    	private (p) firstprivate (q) shared (n) reduction (+: r) \
97    	thread_limit (n * 2) dist_schedule (static, 4) num_threads (n + 4) \
98    	proc_bind (master) lastprivate (s) schedule (static, 8) \
99    	num_teams (n + 4) safelen(16) linear(i:1) aligned (pp:4)
100      for (i = 0; i < 10; i++)
101	{
102	  r = r + 1;
103	  p = q;
104	  a[2+i] = p + q;
105	  s = i * 10;
106	}
107    #pragma omp target teams distribute simd device (n + 1) \
108    	if (n != 6)map (from: n) map (alloc: a[2:o-2]) default(shared) \
109    	private (p) firstprivate (q) shared (n) reduction (+: r) \
110    	thread_limit (n * 2) dist_schedule (static, 4) collapse (2) \
111    	lastprivate (s) num_teams (n + 4) safelen(8)
112      for (i = 0; i < 10; i++)
113	for (j = 0; j < 10; j++)
114	  {
115	    r = r + 1;
116	    p = q;
117	    a[2+i*10+j] = p + q;
118	    s = i * 10 + j;
119	  }
120    #pragma omp target teams distribute simd device (n + 1) \
121    	if (n != 6)map (from: n) map (alloc: a[2:o-2]) default(shared) \
122    	private (p) firstprivate (q) shared (n) reduction (+: r) \
123    	thread_limit (n * 2) dist_schedule (static, 4) lastprivate (s) \
124    	num_teams (n + 4) safelen(16) linear(i:1) aligned (pp:4)
125      for (i = 0; i < 10; i++)
126	{
127	  r = r + 1;
128	  p = q;
129	  a[2+i] = p + q;
130	  s = i * 10;
131	}
132    #pragma omp target device (n + 1) if (n != 6)map(from:n) map(alloc:a[2:o-2])
133    #pragma omp teams num_teams (n + 4) thread_limit (n * 2) default(shared) \
134    	private (p) firstprivate (q) shared (n) reduction (+: r)
135    {
136      r = r + 1;
137      p = q;
138      dosomething (a, n, p + q);
139    }
140    #pragma omp target device (n + 1) if (n != 6)map(from:n) map(alloc:a[2:o-2])
141    #pragma omp teams distribute num_teams (n + 4) collapse (2) default(shared) \
142    	private (p) firstprivate (q) shared (n) reduction (+: r) \
143    	thread_limit (n * 2) dist_schedule (static, 4)
144      for (i = 0; i < 10; i++)
145	for (j = 0; j < 10; j++)
146	  {
147	    r = r + 1;
148	    p = q;
149	    dosomething (a, n, p + q);
150	  }
151    #pragma omp target device (n + 1) if (n != 6)map(from:n) map(alloc:a[2:o-2])
152    #pragma omp teams distribute num_teams (n + 4) default(shared) \
153    	private (p) firstprivate (q) shared (n) reduction (+: r) \
154    	thread_limit (n * 2) dist_schedule (static, 4)
155      for (i = 0; i < 10; i++)
156	for (j = 0; j < 10; j++)
157	  {
158	    r = r + 1;
159	    p = q;
160	    dosomething (a, n, p + q);
161	  }
162    #pragma omp target device (n + 1) if (n != 6)map(from:n) map(alloc:a[2:o-2])
163    #pragma omp teams distribute parallel for num_teams (n + 4) if (n != 6) \
164	default(shared) private (p) firstprivate (q) shared (n) reduction (+: r) \
165    	thread_limit (n * 2) dist_schedule (static, 4) collapse (2) \
166    	num_threads (n + 4) proc_bind (spread) lastprivate (s) \
167    	ordered schedule (static, 8)
168      for (i = 0; i < 10; i++)
169	for (j = 0; j < 10; j++)
170	  {
171	    r = r + 1;
172	    p = q;
173	    dosomething (a, n, p + q);
174	    #pragma omp ordered
175	      p = q;
176	    s = i * 10 + j;
177	  }
178    #pragma omp target device (n + 1) if (n != 6)map(from:n) map(alloc:a[2:o-2])
179    #pragma omp teams distribute parallel for num_teams (n + 4) if (n != 6) \
180	default(shared) private (p) firstprivate (q) shared (n) reduction (+: r) \
181    	thread_limit (n * 2) dist_schedule (static, 4) num_threads (n + 4) \
182    	proc_bind (master) lastprivate (s) ordered schedule (static, 8)
183      for (i = 0; i < 10; i++)
184	{
185	  for (j = 0; j < 10; j++)
186	    {
187	      r = r + 1;
188	      p = q;
189	      dosomething (a, n, p + q);
190	    }
191	  #pragma omp ordered
192	    p = q;
193	  s = i * 10;
194	}
195    #pragma omp target device (n + 1) if (n != 6)map(from:n) map(alloc:a[2:o-2])
196    #pragma omp teams distribute parallel for simd if (n != 6)default(shared) \
197    	private (p) firstprivate (q) shared (n) reduction (+: r) \
198    	thread_limit (n * 2) dist_schedule (static, 4) collapse (2) \
199    	num_threads (n + 4) proc_bind (spread) lastprivate (s) \
200    	schedule (static, 8) num_teams (n + 4) safelen(8)
201      for (i = 0; i < 10; i++)
202	for (j = 0; j < 10; j++)
203	  {
204	    r = r + 1;
205	    p = q;
206	    a[2+i*10+j] = p + q;
207	    s = i * 10 + j;
208	  }
209    #pragma omp target device (n + 1) if (n != 6)map(from:n) map(alloc:a[2:o-2])
210    #pragma omp teams distribute parallel for simd if (n != 6)default(shared) \
211    	private (p) firstprivate (q) shared (n) reduction (+: r) \
212    	thread_limit (n * 2) dist_schedule (static, 4) num_threads (n + 4) \
213    	proc_bind (master) lastprivate (s) schedule (static, 8) \
214    	num_teams (n + 4) safelen(16) linear(i:1) aligned (pp:4)
215      for (i = 0; i < 10; i++)
216	{
217	  r = r + 1;
218	  p = q;
219	  a[2+i] = p + q;
220	  s = i * 10;
221	}
222    #pragma omp target device (n + 1) if (n != 6)map(from:n) map(alloc:a[2:o-2])
223    #pragma omp teams distribute simd default(shared) \
224    	private (p) firstprivate (q) shared (n) reduction (+: r) \
225    	thread_limit (n * 2) dist_schedule (static, 4) collapse (2) \
226    	lastprivate (s) num_teams (n + 4) safelen(8)
227      for (i = 0; i < 10; i++)
228	for (j = 0; j < 10; j++)
229	  {
230	    r = r + 1;
231	    p = q;
232	    a[2+i*10+j] = p + q;
233	    s = i * 10 + j;
234	  }
235    #pragma omp target device (n + 1) if (n != 6)map(from:n) map(alloc:a[2:o-2])
236    #pragma omp teams distribute simd default(shared) \
237    	private (p) firstprivate (q) shared (n) reduction (+: r) \
238    	thread_limit (n * 2) dist_schedule (static, 4) lastprivate (s) \
239    	num_teams (n + 4) safelen(16) linear(i:1) aligned (pp:4)
240      for (i = 0; i < 10; i++)
241	{
242	  r = r + 1;
243	  p = q;
244	  a[2+i] = p + q;
245	  s = i * 10;
246	}
247    #pragma omp target teams device (n + 1) if (n != 6)map(from:n) map(alloc:a[2:o-2]) \
248	num_teams (n + 4) thread_limit (n * 2)default(shared) shared(n) \
249	private (p) reduction (+: r)
250    #pragma omp distribute collapse (2) dist_schedule (static, 4) firstprivate (q)
251      for (i = 0; i < 10; i++)
252	for (j = 0; j < 10; j++)
253	  {
254	    r = r + 1;
255	    p = q;
256	    dosomething (a, n, p + q);
257	  }
258    #pragma omp target teams device (n + 1) if (n != 6)map(from:n) map(alloc:a[2:o-2]) \
259	num_teams (n + 4) thread_limit (n * 2) shared(n) private(p) reduction (+ : r) \
260	default(shared)
261    #pragma omp distribute dist_schedule (static, 4) firstprivate (q)
262      for (i = 0; i < 10; i++)
263	for (j = 0; j < 10; j++)
264	  {
265	    r = r + 1;
266	    p = q;
267	    dosomething (a, n, p + q);
268	  }
269    #pragma omp target teams device (n + 1) if (n != 6)map(from:n) map(alloc:a[2:o-2]) \
270	num_teams (n + 4) thread_limit (n * 2)
271    #pragma omp distribute parallel for if (n != 6) \
272	default(shared) private (p) firstprivate (q) shared (n) reduction (+: r) \
273    	collapse (2) dist_schedule (static, 4) \
274    	num_threads (n + 4) proc_bind (spread) lastprivate (s) \
275    	ordered schedule (static, 8)
276      for (i = 0; i < 10; i++)
277	for (j = 0; j < 10; j++)
278	  {
279	    r = r + 1;
280	    p = q;
281	    dosomething (a, n, p + q);
282	    #pragma omp ordered
283	      p = q;
284	    s = i * 10 + j;
285	  }
286    #pragma omp target teams device (n + 1) if (n != 6)map(from:n) map(alloc:a[2:o-2]) \
287	num_teams (n + 4) thread_limit (n * 2)
288    #pragma omp distribute parallel for if (n != 6) \
289	default(shared) private (p) firstprivate (q) shared (n) reduction (+: r) \
290    	num_threads (n + 4) dist_schedule (static, 4) \
291    	proc_bind (master) lastprivate (s) ordered schedule (static, 8)
292      for (i = 0; i < 10; i++)
293	{
294	  for (j = 0; j < 10; j++)
295	    {
296	      r = r + 1;
297	      p = q;
298	      dosomething (a, n, p + q);
299	    }
300	  #pragma omp ordered
301	    p = q;
302	  s = i * 10;
303	}
304    #pragma omp target teams device (n + 1) if (n != 6)map(from:n) map(alloc:a[2:o-2]) \
305	num_teams (n + 4) thread_limit (n * 2)
306    #pragma omp distribute parallel for simd if (n != 6)default(shared) \
307    	private (p) firstprivate (q) shared (n) reduction (+: r) \
308    	collapse (2) dist_schedule (static, 4) \
309    	num_threads (n + 4) proc_bind (spread) lastprivate (s) \
310    	schedule (static, 8) safelen(8)
311      for (i = 0; i < 10; i++)
312	for (j = 0; j < 10; j++)
313	  {
314	    r = r + 1;
315	    p = q;
316	    a[2+i*10+j] = p + q;
317	    s = i * 10 + j;
318	  }
319    #pragma omp target teams device (n + 1) if (n != 6)map(from:n) map(alloc:a[2:o-2]) \
320	num_teams (n + 4) thread_limit (n * 2)
321    #pragma omp distribute parallel for simd if (n != 6)default(shared) \
322    	private (p) firstprivate (q) shared (n) reduction (+: r) \
323    	num_threads (n + 4) dist_schedule (static, 4) \
324    	proc_bind (master) lastprivate (s) schedule (static, 8) \
325    	safelen(16) linear(i:1) aligned (pp:4)
326      for (i = 0; i < 10; i++)
327	{
328	  r = r + 1;
329	  p = q;
330	  a[2+i] = p + q;
331	  s = i * 10;
332	}
333    #pragma omp target teams device (n + 1) if (n != 6)map(from:n) map(alloc:a[2:o-2]) \
334	num_teams (n + 4) thread_limit (n * 2) default(shared) shared(n) private(p) \
335	reduction(+:r)
336    #pragma omp distribute simd private (p) firstprivate (q) reduction (+: r) \
337    	collapse (2) dist_schedule (static, 4) lastprivate (s) safelen(8)
338      for (i = 0; i < 10; i++)
339	for (j = 0; j < 10; j++)
340	  {
341	    r = r + 1;
342	    p = q;
343	    a[2+i*10+j] = p + q;
344	    s = i * 10 + j;
345	  }
346    #pragma omp target teams device (n + 1) if (n != 6)map(from:n) map(alloc:a[2:o-2]) \
347	num_teams (n + 4) thread_limit (n * 2) default(shared) shared(n) private(p) \
348	reduction(+:r)
349    #pragma omp distribute simd private (p) firstprivate (q) reduction (+: r) \
350    	lastprivate (s) dist_schedule (static, 4) safelen(16) linear(i:1) aligned (pp:4)
351      for (i = 0; i < 10; i++)
352	{
353	  r = r + 1;
354	  p = q;
355	  a[2+i] = p + q;
356	  s = i * 10;
357	}
358  }
359}
360
361int q, i, j;
362
363void
364test2 (int n, int o, int p, int r, int s, int *pp)
365{
366  int a[o];
367    #pragma omp distribute collapse (2) dist_schedule (static, 4) firstprivate (q)
368      for (i = 0; i < 10; i++)
369	for (j = 0; j < 10; j++)
370	  {
371	    r = r + 1;
372	    p = q;
373	    dosomething (a, n, p + q);
374	  }
375    #pragma omp distribute dist_schedule (static, 4) firstprivate (q)
376      for (i = 0; i < 10; i++)
377	for (j = 0; j < 10; j++)
378	  {
379	    r = r + 1;
380	    p = q;
381	    dosomething (a, n, p + q);
382	  }
383    #pragma omp distribute parallel for if (n != 6) \
384	default(shared) private (p) firstprivate (q) shared (n) reduction (+: r) \
385    	collapse (2) dist_schedule (static, 4) \
386    	num_threads (n + 4) proc_bind (spread) lastprivate (s) \
387    	ordered schedule (static, 8)
388      for (i = 0; i < 10; i++)
389	for (j = 0; j < 10; j++)
390	  {
391	    r = r + 1;
392	    p = q;
393	    dosomething (a, n, p + q);
394	    #pragma omp ordered
395	      p = q;
396	    s = i * 10 + j;
397	  }
398    #pragma omp distribute parallel for if (n != 6) \
399	default(shared) private (p) firstprivate (q) shared (n) reduction (+: r) \
400    	num_threads (n + 4) dist_schedule (static, 4) \
401    	proc_bind (master) lastprivate (s) ordered schedule (static, 8)
402      for (i = 0; i < 10; i++)
403	{
404	  for (j = 0; j < 10; j++)
405	    {
406	      r = r + 1;
407	      p = q;
408	      dosomething (a, n, p + q);
409	    }
410	  #pragma omp ordered
411	    p = q;
412	  s = i * 10;
413	}
414    #pragma omp distribute parallel for simd if (n != 6)default(shared) \
415    	private (p) firstprivate (q) shared (n) reduction (+: r) \
416    	collapse (2) dist_schedule (static, 4) \
417    	num_threads (n + 4) proc_bind (spread) lastprivate (s) \
418    	schedule (static, 8) safelen(8)
419      for (i = 0; i < 10; i++)
420	for (j = 0; j < 10; j++)
421	  {
422	    r = r + 1;
423	    p = q;
424	    a[2+i*10+j] = p + q;
425	    s = i * 10 + j;
426	  }
427    #pragma omp distribute parallel for simd if (n != 6)default(shared) \
428    	private (p) firstprivate (q) shared (n) reduction (+: r) \
429    	num_threads (n + 4) dist_schedule (static, 4) \
430    	proc_bind (master) lastprivate (s) schedule (static, 8) \
431    	safelen(16) linear(i:1) aligned (pp:4)
432      for (i = 0; i < 10; i++)
433	{
434	  r = r + 1;
435	  p = q;
436	  a[2+i] = p + q;
437	  s = i * 10;
438	}
439    #pragma omp distribute simd private (p) firstprivate (q) reduction (+: r) \
440    	collapse (2) dist_schedule (static, 4) lastprivate (s) safelen(8)
441      for (i = 0; i < 10; i++)
442	for (j = 0; j < 10; j++)
443	  {
444	    r = r + 1;
445	    p = q;
446	    a[2+i*10+j] = p + q;
447	    s = i * 10 + j;
448	  }
449    #pragma omp distribute simd private (p) firstprivate (q) reduction (+: r) \
450    	lastprivate (s) dist_schedule (static, 4) safelen(16) linear(i:1) aligned (pp:4)
451      for (i = 0; i < 10; i++)
452	{
453	  r = r + 1;
454	  p = q;
455	  a[2+i] = p + q;
456	  s = i * 10;
457	}
458}
459