Lines Matching defs:epoch

44  * acquired some snapshot (e) of the global epoch value (e_g) and set an active
66 * causes epoch counter tick) actually deletes the same items that reader
68 * This is possible if the writer thread re-observes the epoch after the
89 * Now, if the epoch counter is ticked to e_g+1, then no new hazardous
91 * this is that at e_g+1, all epoch read-side critical sections started at
92 * e_g-1 must have been completed. If any epoch read-side critical sections at
121 * Blocking semantics for epoch reclamation have additional restrictions.
166 * epoch. If so, then make sure to update our shared snapshot
174 ((int)(current->epoch - other->epoch) < 0)) {
176 * The other epoch value is actually the newest,
179 ck_pr_store_uint(&record->epoch, other->epoch);
191 unsigned int epoch, i;
193 epoch = ck_pr_load_uint(&global->epoch);
194 i = epoch & CK_EPOCH_SENSE_MASK;
205 * from the previous epoch generation.
219 * bucket then cache the associated epoch value.
221 ref->epoch = epoch;
233 global->epoch = 1;
282 record->epoch = 0;
304 record->epoch = 0;
323 unsigned int epoch,
350 if (active != 0 && ck_pr_load_uint(&cr->epoch) != epoch)
362 unsigned int epoch = e & (CK_EPOCH_LENGTH - 1);
367 head = ck_stack_batch_pop_upmc(&record->pending[epoch]);
402 unsigned int epoch;
404 for (epoch = 0; epoch < CK_EPOCH_LENGTH; epoch++)
405 ck_epoch_dispatch(record, epoch, NULL);
429 unsigned int delta, epoch, goal, i;
435 * The observation of the global epoch must be ordered with respect to
437 * monoticity of global epoch counter.
443 delta = epoch = ck_pr_load_uint(&global->epoch);
444 goal = epoch + CK_EPOCH_GRACE;
451 * epoch with respect to the updates on invocation.
463 e_d = ck_pr_load_uint(&global->epoch);
470 * If the epoch has been updated, we may have already
474 if ((goal > epoch) & (delta >= goal))
480 * If the epoch has been updated, then a grace period
482 * same epoch.
495 * Increment current epoch. CAS semantics are used to eliminate
497 * same global epoch value snapshot.
500 * epoch tick at a given time, then it is sufficient to use an
502 * it is possible to overflow the epoch value if we apply
505 r = ck_pr_cas_uint_value(&global->epoch, delta, delta + 1,
556 * It may be worth it to actually apply these deferral semantics to an epoch
560 * ck_epoch_call will dispatch to the latest epoch snapshot that was observed.
562 * becomes a problem, we could actually use a heap for epoch buckets but that
569 unsigned int epoch;
574 epoch = ck_pr_load_uint(&global->epoch);
576 /* Serialize epoch snapshots with respect to global epoch. */
580 * At this point, epoch is the current global epoch value.
581 * There may or may not be active threads which observed epoch - 1.
583 * no active threads which observed epoch - 2.
585 * Note that checking epoch - 2 is necessary, as race conditions can
586 * allow another thread to increment the global epoch before this
589 n_dispatch = ck_epoch_dispatch(record, epoch - 2, deferred);
591 cr = ck_epoch_scan(global, cr, epoch, &active);
597 record->epoch = epoch;
598 for (epoch = 0; epoch < CK_EPOCH_LENGTH; epoch++)
599 ck_epoch_dispatch(record, epoch, deferred);
605 * If an active thread exists, rely on epoch observation.
607 * All the active threads entered the epoch section during
608 * the current epoch. Therefore, we can now run the handlers
609 * for the immediately preceding epoch and attempt to
610 * advance the epoch if it hasn't been already.
612 (void)ck_pr_cas_uint(&global->epoch, epoch, epoch + 1);
614 ck_epoch_dispatch(record, epoch - 1, deferred);