• Home
  • History
  • Annotate
  • Raw
  • Download
  • only in /freebsd-12-stable/contrib/llvm-project/openmp/runtime/src/

Lines Matching defs:ub

87 // ub (upper bound), and st (stride).  nproc is the number of threads relevant
98 enum sched_type schedule, T lb, T ub,
121 "pr:%%p lb:%%%s ub:%%%s st:%%%s "
126 KD_TRACE(10, (buff, gtid, pr, lb, ub, st, schedule, chunk, nproc, tid));
292 if (ub >= lb) {
293 tc = ub - lb + 1;
294 } else { // ub < lb
298 if (lb >= ub) {
301 tc = (UT)(lb - ub) / (-st) + 1;
302 } else { // lb < ub
306 if (ub >= lb) {
309 tc = (UT)(ub - lb) / st + 1;
310 } else { // ub < lb
322 pr->u.p.ub = ub;
327 pr->u.p.last_upper = ub + st;
360 pr->u.p.ub = init + small_chunk + (id < extras ? 1 : 0);
441 pr->u.p.ub = lb + limit;
443 // calculated upper bound, "ub" is user-defined upper bound
446 // adjust upper bound to "ub" if needed, so that MS lastprivate will match
449 pr->u.p.ub = (ub_tmp + st > ub ? ub : ub_tmp);
451 pr->u.p.ub = (ub_tmp + st < ub ? ub : ub_tmp);
705 inline void __kmp_dispatch_init_hier_runtime(ident_t *loc, T lb, T ub,
710 kmp_int32 ub, kmp_int32 st) {
713 __kmp_hier_scheds.scheds, __kmp_hier_scheds.small_chunks, lb, ub, st);
718 kmp_uint32 ub, kmp_int32 st) {
721 __kmp_hier_scheds.scheds, __kmp_hier_scheds.small_chunks, lb, ub, st);
726 kmp_int64 ub, kmp_int64 st) {
729 __kmp_hier_scheds.scheds, __kmp_hier_scheds.large_chunks, lb, ub, st);
734 kmp_uint64 ub, kmp_int64 st) {
737 __kmp_hier_scheds.scheds, __kmp_hier_scheds.large_chunks, lb, ub, st);
761 T ub, typename traits_t<T>::signed_t st,
791 "chunk:%%%s lb:%%%s ub:%%%s st:%%%s\n",
794 KD_TRACE(10, (buff, gtid, schedule, chunk, lb, ub, st));
840 __kmp_dispatch_init_hier_runtime<T>(loc, lb, ub, st);
870 __kmp_dispatch_init_algorithm(loc, gtid, pr, schedule, lb, ub, st,
939 pr->u.p.ub = pr->u.p.lb = pr->u.p.st = pr->u.p.tc = 0;
951 "lb:%%%s ub:%%%s"
959 pr->u.p.ub, pr->u.p.st, pr->u.p.tc, pr->u.p.count,
1200 if (pr->u.p.count < (UT)pr->u.p.ub) {
1204 status = (init < (UT)pr->u.p.ub);
1238 if (victim->u.p.count + 2 > (UT)victim->u.p.ub) {
1246 limit = victim->u.p.ub; // keep initial ub
1253 // stealing succeded, reduce victim's ub by 1/4 of undone chunks or
1258 init = (victim->u.p.ub -= (remaining >> 2));
1262 init = (victim->u.p.ub -= 1);
1270 // now update own count and ub with stolen range but init chunk
1273 pr->u.p.ub = limit;
1278 // 4-byte induction variable, use 8-byte CAS for pair (count, ub)
1282 T ub;
1286 // All operations on 'count' or 'ub' must be combined atomically
1304 status = (init < (UT)vnew.p.ub);
1343 KMP_DEBUG_ASSERT((vnew.p.ub - 1) * (UT)chunk <= trip);
1344 if (vnew.p.count >= (UT)vnew.p.ub ||
1345 (remaining = vnew.p.ub - vnew.p.count) < 2) {
1350 vnew.p.ub -= (remaining >> 2); // try to steal 1/4 of remaining
1352 vnew.p.ub -= 1; // steal 1 chunk of 2 or 3 remaining
1354 KMP_DEBUG_ASSERT((vnew.p.ub - 1) * (UT)chunk <= trip);
1362 vold.p.ub - vnew.p.ub);
1365 // now update own count and ub
1366 init = vnew.p.ub;
1423 *p_ub = pr->u.p.ub;
1428 pr->u.p.lb = pr->u.p.ub + pr->u.p.st;
1962 pr->u.p.last_upper = pr->u.p.ub;
1997 *p_ub = pr->u.p.ub;
2116 pr->u.p.last_upper = pr->u.p.ub;
2262 // T lb, T ub, ST st, ST chunk )
2272 @param ub Upper bound
2283 kmp_int32 ub, kmp_int32 st, kmp_int32 chunk) {
2288 __kmp_dispatch_init<kmp_int32>(loc, gtid, schedule, lb, ub, st, chunk, true);
2295 kmp_uint32 ub, kmp_int32 st, kmp_int32 chunk) {
2300 __kmp_dispatch_init<kmp_uint32>(loc, gtid, schedule, lb, ub, st, chunk, true);
2308 kmp_int64 ub, kmp_int64 st, kmp_int64 chunk) {
2313 __kmp_dispatch_init<kmp_int64>(loc, gtid, schedule, lb, ub, st, chunk, true);
2321 kmp_uint64 ub, kmp_int64 st, kmp_int64 chunk) {
2326 __kmp_dispatch_init<kmp_uint64>(loc, gtid, schedule, lb, ub, st, chunk, true);
2340 kmp_int32 lb, kmp_int32 ub, kmp_int32 st,
2346 __kmp_dist_get_bounds<kmp_int32>(loc, gtid, p_last, &lb, &ub, st);
2347 __kmp_dispatch_init<kmp_int32>(loc, gtid, schedule, lb, ub, st, chunk, true);
2352 kmp_uint32 lb, kmp_uint32 ub, kmp_int32 st,
2358 __kmp_dist_get_bounds<kmp_uint32>(loc, gtid, p_last, &lb, &ub, st);
2359 __kmp_dispatch_init<kmp_uint32>(loc, gtid, schedule, lb, ub, st, chunk, true);
2364 kmp_int64 lb, kmp_int64 ub, kmp_int64 st,
2370 __kmp_dist_get_bounds<kmp_int64>(loc, gtid, p_last, &lb, &ub, st);
2371 __kmp_dispatch_init<kmp_int64>(loc, gtid, schedule, lb, ub, st, chunk, true);
2376 kmp_uint64 lb, kmp_uint64 ub, kmp_int64 st,
2382 __kmp_dist_get_bounds<kmp_uint64>(loc, gtid, p_last, &lb, &ub, st);
2383 __kmp_dispatch_init<kmp_uint64>(loc, gtid, schedule, lb, ub, st, chunk, true);
2397 If there is no more work, then the lb,ub and stride need not be modified.
2572 kmp_int32 ub, kmp_int32 st, kmp_int32 chunk,
2574 __kmp_dispatch_init<kmp_int32>(loc, gtid, schedule, lb, ub, st, chunk,
2580 kmp_uint32 ub, kmp_int32 st, kmp_int32 chunk,
2582 __kmp_dispatch_init<kmp_uint32>(loc, gtid, schedule, lb, ub, st, chunk,
2588 kmp_int64 ub, kmp_int64 st, kmp_int64 chunk,
2590 __kmp_dispatch_init<kmp_int64>(loc, gtid, schedule, lb, ub, st, chunk,
2596 kmp_uint64 ub, kmp_int64 st, kmp_int64 chunk,
2598 __kmp_dispatch_init<kmp_uint64>(loc, gtid, schedule, lb, ub, st, chunk,