1/* 2 * Copyright (c) 2009 Kungliga Tekniska H�gskolan 3 * (Royal Institute of Technology, Stockholm, Sweden). 4 * All rights reserved. 5 * 6 * Portions Copyright (c) 2009 Apple Inc. All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 19 * 3. Neither the name of the Institute nor the names of its contributors 20 * may be used to endorse or promote products derived from this software 21 * without specific prior written permission. 22 * 23 * THIS SOFTWARE IS PROVIDED BY THE INSTITUTE AND CONTRIBUTORS ``AS IS'' AND 24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 26 * ARE DISCLAIMED. IN NO EVENT SHALL THE INSTITUTE OR CONTRIBUTORS BE LIABLE 27 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 33 * SUCH DAMAGE. 34 */ 35 36#include "hi_locl.h" 37#include <dispatch/dispatch.h> 38 39#include "heap.h" 40 41struct heim_event_data { 42 heap_ptr hptr; 43 dispatch_semaphore_t running; 44 int flags; 45#define RUNNING 1 46#define IN_FREE 2 47 heim_ipc_event_callback_t callback; 48 heim_ipc_event_final_t final; 49 void *ctx; 50 time_t t; 51}; 52 53/** 54 * Event handling framework 55 * 56 * Event lifesyncle 57 * 58 * create ---> set_time ------> do_event --------> delete_event 59 * | | | 60 * \--------\-------> cancel_event -->--/ 61 * 62 */ 63 64static dispatch_queue_t timer_sync_q; 65static dispatch_queue_t timer_job_q; 66static Heap *timer_heap; 67static dispatch_source_t timer_source; 68 69/* 70 * Compare to event for heap sorting 71 */ 72 73static int 74event_cmp_fn(const void *aptr, const void *bptr) 75{ 76 const struct heim_event_data *a = aptr; 77 const struct heim_event_data *b = bptr; 78 return (int)(a->t - b->t); 79} 80 81/* 82 * Calculate next timer event and set the timer 83 */ 84 85static void 86reschedule_timer(void) 87{ 88 const struct heim_event_data *e = heap_head(timer_heap); 89 90 if (e == NULL) { 91 /* 92 * if there are no more events, cancel timer by setting timer 93 * to forever, later calls will pull it down to !forever when 94 * needed again 95 */ 96 dispatch_source_set_timer(timer_source, 97 DISPATCH_TIME_FOREVER, 0, 10ull * NSEC_PER_SEC); 98 } else { 99 struct timespec ts; 100 ts.tv_sec = e->t; 101 ts.tv_nsec = 0; 102 dispatch_source_set_timer(timer_source, 103 dispatch_walltime(&ts, 0), 104 0, 10ull * NSEC_PER_SEC); 105 } 106} 107 108/* 109 * Get jobs that have triggered and run them in the background. 110 */ 111 112static void 113trigger_jobs(void) 114{ 115 time_t now = time(NULL); 116 117 while (1) { 118 struct heim_event_data *e = rk_UNCONST(heap_head(timer_heap)); 119 120 if (e != NULL && e->t < now) { 121 heap_remove_head(timer_heap); 122 e->hptr = HEAP_INVALID_PTR; 123 124 /* if its already running, lets retry 10s from now */ 125 if (e->flags & RUNNING) { 126 e->t = now + 10; 127 heap_insert(timer_heap, e, &e->hptr); 128 continue; 129 } 130 e->flags |= RUNNING; 131 132 _heim_ipc_suspend_timer(); 133 134 dispatch_async(timer_job_q, ^{ 135 e->callback(e, e->ctx); 136 dispatch_async(timer_sync_q, ^{ 137 e->flags &= ~RUNNING; 138 if (e->running) 139 dispatch_semaphore_signal(e->running); 140 141 _heim_ipc_restart_timer(); 142 }); 143 }); 144 } else 145 break; 146 } 147 reschedule_timer(); 148} 149 150/* 151 * Create sync syncronization queue, heap and timer 152 */ 153 154static void 155timer_init(void) 156{ 157 static dispatch_once_t once; 158 159 dispatch_once(&once, ^{ 160 161 timer_sync_q = dispatch_queue_create("hiem-timer-q", NULL); 162 timer_job_q = dispatch_get_global_queue(DISPATCH_QUEUE_PRIORITY_DEFAULT, 0); 163 164 165 timer_heap = heap_new(11, event_cmp_fn); 166 167 timer_source = dispatch_source_create(DISPATCH_SOURCE_TYPE_TIMER, 168 0, 0, timer_sync_q); 169 dispatch_source_set_event_handler(timer_source, ^{ trigger_jobs(); }); 170 dispatch_resume(timer_source); 171 }); 172} 173 174/** 175 * Create a event that is (re)schedule and set the callback functions 176 * and context variable. 177 * 178 * The callback function can call heim_ipc_event_cancel() and 179 * heim_ipc_event_free(). 180 * 181 * @param cb callback function when the event is triggered 182 * @param ctx context passed to the callback function 183 * 184 * @return a heim ipc event 185 */ 186 187heim_event_t 188heim_ipc_event_create_f(heim_ipc_event_callback_t cb, void *ctx) 189{ 190 heim_event_t e; 191 192 timer_init(); 193 194 e = malloc(sizeof(*e)); 195 if (e == NULL) 196 return NULL; 197 198 e->hptr = HEAP_INVALID_PTR; 199 e->running = NULL; 200 e->flags = 0; 201 e->callback = cb; 202 e->ctx = ctx; 203 e->t = 0; 204 205 return e; 206} 207 208 209/** 210 * (Re)schedule a new timeout for an event 211 * 212 * @param e event to schedule new timeout 213 * @param t absolute time the event will trigger 214 * 215 * @return 0 on success 216 */ 217 218int 219heim_ipc_event_set_time(heim_event_t e, time_t t) 220{ 221 dispatch_sync(timer_sync_q, ^{ 222 time_t next; 223 if (e->flags & IN_FREE) 224 abort(); 225 if (e->hptr != HEAP_INVALID_PTR) 226 heap_remove(timer_heap, e->hptr); 227 228 next = time(NULL); 229 230 /* don't allow setting events in the past */ 231 if (t > next) 232 next = t; 233 e->t = next; 234 235 heap_insert(timer_heap, e, &e->hptr); 236 reschedule_timer(); 237 }); 238 return 0; 239} 240 241/** 242 * Cancel an event. 243 * 244 * Cancel will block if the callback for the job is running. 245 * 246 * @param e event to schedule new timeout 247 */ 248 249void 250heim_ipc_event_cancel(heim_event_t e) 251{ 252 dispatch_async(timer_sync_q, ^{ 253 if (e->hptr != HEAP_INVALID_PTR) { 254 heap_remove(timer_heap, e->hptr); 255 e->hptr = HEAP_INVALID_PTR; 256 } 257 e->t = 0; 258 reschedule_timer(); 259 }); 260} 261 262/** 263 * Free an event, most be either canceled or triggered. Can't delete 264 * an event that is not canceled. 265 * 266 * @param e event to free 267 */ 268 269void 270heim_ipc_event_free(heim_event_t e) 271{ 272 dispatch_async(timer_sync_q, ^{ 273 e->flags |= IN_FREE; 274 if ((e->hptr != HEAP_INVALID_PTR)) 275 abort(); 276 if (e->final || (e->flags & RUNNING)) { 277 int wait_running = (e->flags & RUNNING); 278 279 if (wait_running) 280 e->running = dispatch_semaphore_create(0); 281 282 dispatch_async(timer_job_q, ^{ 283 if (wait_running) { 284 dispatch_semaphore_wait(e->running, 285 DISPATCH_TIME_FOREVER); 286 dispatch_release(e->running); 287 } 288 if (e->final) 289 e->final(e->ctx); 290 free(e); 291 }); 292 } else { 293 free(e); 294 } 295 }); 296} 297 298/** 299 * Finalizer called when event 'e' is freed. 300 * 301 * @param e event to set finalizer for 302 * @param f finalizer to be called 303 */ 304 305void 306heim_ipc_event_set_final_f(heim_event_t e, heim_ipc_event_final_t f) 307{ 308 e->final = f; 309} 310