• Home
  • History
  • Annotate
  • Raw
  • Download
  • only in /macosx-10.9.5/xnu-2422.115.4/osfmk/kern/

Lines Matching refs:call

100 static __inline__ void		_internal_call_release(thread_call_t call);
101 static __inline__ boolean_t _pending_call_enqueue(thread_call_t call, thread_call_group_t group);
102 static __inline__ boolean_t _delayed_call_enqueue(thread_call_t call, thread_call_group_t group, uint64_t deadline);
103 static __inline__ boolean_t _call_dequeue(thread_call_t call, thread_call_group_t group);
105 static __inline__ void _set_delayed_call_timer(thread_call_t call, thread_call_group_t group);
115 static void thread_call_wait_locked(thread_call_t call);
116 static boolean_t thread_call_enter_delayed_internal(thread_call_t call,
231 thread_call_t call)
233 thread_call_priority_t pri = call->tc_pri;
270 * thread call groups.
306 thread_call_t call;
339 call = internal_call_storage;
340 call < &internal_call_storage[INTERNAL_CALL_COUNT];
341 call++) {
343 enqueue_tail(&thread_call_internal_queue, qe(call));
360 thread_call_t call,
364 bzero(call, sizeof(*call));
365 call_entry_setup((call_entry_t)call, func, param0);
366 call->tc_pri = THREAD_CALL_PRIORITY_HIGH; /* Default priority */
379 thread_call_t call;
384 call = TC(dequeue_head(&thread_call_internal_queue));
387 thread_call_setup(call, func, param0);
388 call->tc_refs = 0;
389 call->tc_flags = 0; /* THREAD_CALL_ALLOC not set, do not free back to zone */
391 return (call);
399 * safe to call on a non-internal entry, in which
406 thread_call_t call)
408 if ( call >= internal_call_storage &&
409 call < &internal_call_storage[INTERNAL_CALL_COUNT] ) {
410 assert((call->tc_flags & THREAD_CALL_ALLOC) == 0);
411 enqueue_head(&thread_call_internal_queue, qe(call));
429 thread_call_t call,
434 old_queue = call_entry_enqueue_tail(CE(call), &group->pending_queue);
437 call->tc_submit_count++;
461 thread_call_t call,
467 old_queue = call_entry_enqueue_deadline(CE(call), &group->delayed_queue, deadline);
472 call->tc_submit_count++;
488 thread_call_t call,
493 old_queue = call_entry_dequeue(CE(call));
496 call->tc_finish_count++;
514 thread_call_t call,
519 assert((call->tc_soft_deadline != 0) && ((call->tc_soft_deadline <= call->tc_call.deadline)));
521 leeway = call->tc_call.deadline - call->tc_soft_deadline;
523 call->tc_soft_deadline, leeway,
525 ((call->tc_soft_deadline & 0x1) == 0x1));
546 thread_call_t call;
549 call = TC(queue_first(&group->pending_queue));
551 while (!queue_end(&group->pending_queue, qe(call))) {
552 if (call->tc_call.func == func &&
553 call->tc_call.param0 == param0) {
554 thread_call_t next = TC(queue_next(qe(call)));
556 _call_dequeue(call, group);
558 _internal_call_release(call);
564 call = next;
567 call = TC(queue_next(qe(call)));
591 thread_call_t call;
594 call = TC(queue_first(&group->delayed_queue));
596 while (!queue_end(&group->delayed_queue, qe(call))) {
597 if (call->tc_call.func == func &&
598 call->tc_call.param0 == param0) {
599 thread_call_t next = TC(queue_next(qe(call)));
601 _call_dequeue(call, group);
603 _internal_call_release(call);
609 call = next;
612 call = TC(queue_next(qe(call)));
689 * Allocate a thread call with a given priority. Importances
700 thread_call_t call;
706 call = thread_call_allocate(func, param0);
707 call->tc_pri = pri;
709 return call;
722 thread_call_t call = zalloc(thread_call_zone);
724 thread_call_setup(call, func, param0);
725 call->tc_refs = 1;
726 call->tc_flags = THREAD_CALL_ALLOC;
728 return (call);
740 thread_call_t call)
748 if (call->tc_call.queue != NULL) {
755 refs = --call->tc_refs;
764 zfree(thread_call_zone, call);
775 * Returns TRUE if the call was
780 thread_call_t call)
786 group = thread_call_get_group(call);
791 if (call->tc_call.queue != &group->pending_queue) {
792 result = _pending_call_enqueue(call, group);
795 call->tc_call.param1 = 0;
805 thread_call_t call,
812 group = thread_call_get_group(call);
817 if (call->tc_call.queue != &group->pending_queue) {
818 result = _pending_call_enqueue(call, group);
821 call->tc_call.param1 = param1;
835 * Returns TRUE if the call was
840 thread_call_t call,
843 assert(call);
844 return thread_call_enter_delayed_internal(call, NULL, 0, 0, deadline, 0, 0);
849 thread_call_t call,
853 assert(call);
854 return thread_call_enter_delayed_internal(call, NULL, 0, param1, deadline, 0, 0);
859 thread_call_t call,
865 assert(call);
866 return thread_call_enter_delayed_internal(call, NULL, 0, param1, deadline, leeway, flags);
874 * Returns True if the call was already on a queue
876 * call - structure encapsulating state of the callout
877 * alt_func/alt_param0 - if call is NULL, allocate temporary storage using these parameters
885 thread_call_t call,
905 if (call == NULL) {
907 call = _internal_call_allocate(alt_func, alt_param0);
910 group = thread_call_get_group(call);
913 call->tc_flags |= THREAD_CALL_DELAYED;
915 call->tc_soft_deadline = sdeadline = deadline;
934 call->tc_soft_deadline |= 0x1ULL;
936 call->tc_soft_deadline &= ~0x1ULL;
939 call->tc_call.param1 = param1;
940 call->ttd = (sdeadline > abstime) ? (sdeadline - abstime) : 0;
942 result = _delayed_call_enqueue(call, group, deadline);
944 if (queue_first(&group->delayed_queue) == qe(call))
945 _set_delayed_call_timer(call, group);
948 DTRACE_TMR5(thread_callout__create, thread_call_func_t, call->tc_call.func, uint64_t, (deadline - sdeadline), uint64_t, (call->ttd >> 32), (unsigned) (call->ttd & 0xFFFFFFFF), call);
961 * Returns TRUE if the call was
966 thread_call_t call)
972 group = thread_call_get_group(call);
977 if ((call->tc_call.deadline != 0) &&
978 (queue_first(&group->delayed_queue) == qe(call))) {
979 assert (call->tc_call.queue == &group->delayed_queue);
983 result = _call_dequeue(call, group);
995 DTRACE_TMR4(thread_callout__cancel, thread_call_func_t, call->tc_call.func, 0, (call->ttd >> 32), (unsigned) (call->ttd & 0xFFFFFFFF));
1002 * Cancel a thread call. If it cannot be cancelled (i.e.
1004 * to finish. Note that if clients re-submit this thread call,
1007 * to the call to thread_call_cancel_wait will have finished.
1011 thread_call_t call)
1016 if ((call->tc_flags & THREAD_CALL_ALLOC) == 0) {
1017 panic("%s: Can't wait on thread call whose storage I don't own.", __FUNCTION__);
1020 group = thread_call_get_group(call);
1025 result = _call_dequeue(call, group);
1027 thread_call_wait_locked(call);
1040 * Wake a call thread to service
1041 * pending call entries. May wake
1043 * create additional call threads.
1079 * thread call group.
1115 thread_call_finish(thread_call_t call)
1119 call->tc_finish_count++;
1120 call->tc_refs--;
1122 if ((call->tc_flags & THREAD_CALL_WAIT) != 0) {
1124 call->tc_flags &= ~THREAD_CALL_WAIT;
1127 * Dropping lock here because the sched call for the
1132 thread_wakeup((event_t)call);
1136 if (call->tc_refs == 0) {
1138 panic("Someone waiting on a thread call that is scheduled for free: %p\n", call->tc_call.func);
1143 zfree(thread_call_zone, call);
1180 thread_call_t call;
1184 call = TC(dequeue_head(&group->pending_queue));
1187 func = call->tc_call.func;
1188 param0 = call->tc_call.param0;
1189 param1 = call->tc_call.param1;
1191 call->tc_call.queue = NULL;
1193 _internal_call_release(call);
1199 if ((call->tc_flags & THREAD_CALL_ALLOC) != 0) {
1201 call->tc_refs++; /* Delay free until we're done */
1212 DTRACE_TMR6(thread_callout__start, thread_call_func_t, func, int, 0, int, (call->ttd >> 32), (unsigned) (call->ttd & 0xFFFFFFFF), (call->tc_flags & THREAD_CALL_DELAYED), call);
1218 DTRACE_TMR6(thread_callout__end, thread_call_func_t, func, int, 0, int, (call->ttd >> 32), (unsigned) (call->ttd & 0xFFFFFFFF), (call->tc_flags & THREAD_CALL_DELAYED), call);
1233 thread_call_finish(call);
1387 thread_call_t call;
1395 call = TC(queue_first(&group->delayed_queue));
1397 while (!queue_end(&group->delayed_queue, qe(call))) {
1398 if (call->tc_soft_deadline <= timestamp) {
1405 if ((call->tc_soft_deadline & 0x1) &&
1406 (CE(call)->deadline > timestamp) &&
1410 _pending_call_enqueue(call, group);
1415 call = TC(queue_first(&group->delayed_queue));
1418 if (!queue_end(&group->delayed_queue, qe(call)))
1419 _set_delayed_call_timer(call, group);
1427 thread_call_t call;
1438 call = TC(queue_first(&group->delayed_queue));
1440 while (!queue_end(&group->delayed_queue, qe(call))) {
1441 if (call->tc_soft_deadline <= timestamp) {
1442 _pending_call_enqueue(call, group);
1443 call = TC(queue_first(&group->delayed_queue));
1446 uint64_t skew = call->tc_call.deadline - call->tc_soft_deadline;
1447 assert (call->tc_call.deadline >= call->tc_soft_deadline);
1453 _call_dequeue(call, group);
1454 _delayed_call_enqueue(call, group, call->tc_soft_deadline);
1456 call = TC(queue_next(qe(call)));
1526 * Wait for all requested invocations of a thread call prior to now
1532 thread_call_wait_locked(thread_call_t call)
1537 assert(call->tc_flags & THREAD_CALL_ALLOC);
1539 submit_count = call->tc_submit_count;
1541 while (call->tc_finish_count < submit_count) {
1542 call->tc_flags |= THREAD_CALL_WAIT;
1544 res = assert_wait(call, THREAD_UNINT);
1563 * Determine whether a thread call is either on a queue or
1567 thread_call_isactive(thread_call_t call)
1572 active = (call->tc_submit_count > call->tc_finish_count);