Lines Matching refs:record

151 _ck_epoch_delref(struct ck_epoch_record *record,
157 current = &record->local.bucket[i];
169 * If no other active bucket exists, then the record will go
172 other = &record->local.bucket[(i + 1) & CK_EPOCH_SENSE_MASK];
179 ck_pr_store_uint(&record->epoch, other->epoch);
186 _ck_epoch_addref(struct ck_epoch_record *record,
189 struct ck_epoch *global = record->global;
195 ref = &record->local.bucket[i];
211 previous = &record->local.bucket[(i + 1) &
242 struct ck_epoch_record *record;
250 record = ck_epoch_record_container(cursor);
252 if (ck_pr_load_uint(&record->state) == CK_EPOCH_STATE_FREE) {
255 state = ck_pr_fas_uint(&record->state,
259 ck_pr_store_ptr(&record->ct, ct);
265 return record;
274 ck_epoch_register(struct ck_epoch *global, struct ck_epoch_record *record,
279 record->global = global;
280 record->state = CK_EPOCH_STATE_USED;
281 record->active = 0;
282 record->epoch = 0;
283 record->n_dispatch = 0;
284 record->n_peak = 0;
285 record->n_pending = 0;
286 record->ct = ct;
287 memset(&record->local, 0, sizeof record->local);
290 ck_stack_init(&record->pending[i]);
293 ck_stack_push_upmc(&global->records, &record->record_next);
298 ck_epoch_unregister(struct ck_epoch_record *record)
300 struct ck_epoch *global = record->global;
303 record->active = 0;
304 record->epoch = 0;
305 record->n_dispatch = 0;
306 record->n_peak = 0;
307 record->n_pending = 0;
308 memset(&record->local, 0, sizeof record->local);
311 ck_stack_init(&record->pending[i]);
313 ck_pr_store_ptr(&record->ct, NULL);
315 ck_pr_store_uint(&record->state, CK_EPOCH_STATE_FREE);
360 ck_epoch_dispatch(struct ck_epoch_record *record, unsigned int e, ck_stack_t *deferred)
367 head = ck_stack_batch_pop_upmc(&record->pending[epoch]);
381 n_peak = ck_pr_load_uint(&record->n_peak);
382 n_pending = ck_pr_load_uint(&record->n_pending);
386 ck_pr_store_uint(&record->n_peak, n_peak);
389 ck_pr_add_uint(&record->n_dispatch, i);
390 ck_pr_sub_uint(&record->n_pending, i);
397 * Reclaim all objects associated with a record.
400 ck_epoch_reclaim(struct ck_epoch_record *record)
405 ck_epoch_dispatch(record, epoch, NULL);
529 ck_epoch_synchronize(struct ck_epoch_record *record)
532 ck_epoch_synchronize_wait(record->global, NULL, NULL);
537 ck_epoch_barrier(struct ck_epoch_record *record)
540 ck_epoch_synchronize(record);
541 ck_epoch_reclaim(record);
546 ck_epoch_barrier_wait(struct ck_epoch_record *record, ck_epoch_wait_cb_t *cb,
550 ck_epoch_synchronize_wait(record->global, cb, ct);
551 ck_epoch_reclaim(record);
566 ck_epoch_poll_deferred(struct ck_epoch_record *record, ck_stack_t *deferred)
571 struct ck_epoch *global = record->global;
589 n_dispatch = ck_epoch_dispatch(record, epoch - 2, deferred);
597 record->epoch = epoch;
599 ck_epoch_dispatch(record, epoch, deferred);
614 ck_epoch_dispatch(record, epoch - 1, deferred);
619 ck_epoch_poll(struct ck_epoch_record *record)
622 return ck_epoch_poll_deferred(record, NULL);