• Home
  • History
  • Annotate
  • Raw
  • Download
  • only in /macosx-10.9.5/apr-30/apr-util/apr-util/misc/

Lines Matching refs:me

82 static apr_status_t thread_pool_construct(apr_thread_pool_t * me,
89 me->thd_max = max_threads;
90 me->idle_max = init_threads;
91 me->threshold = init_threads / 2;
92 rv = apr_thread_mutex_create(&me->lock, APR_THREAD_MUTEX_NESTED,
93 me->pool);
97 rv = apr_thread_cond_create(&me->cond, me->pool);
99 apr_thread_mutex_destroy(me->lock);
102 me->tasks = apr_palloc(me->pool, sizeof(*me->tasks));
103 if (!me->tasks) {
106 APR_RING_INIT(me->tasks, apr_thread_pool_task, link);
107 me->scheduled_tasks = apr_palloc(me->pool, sizeof(*me->scheduled_tasks));
108 if (!me->scheduled_tasks) {
111 APR_RING_INIT(me->scheduled_tasks, apr_thread_pool_task, link);
112 me->recycled_tasks = apr_palloc(me->pool, sizeof(*me->recycled_tasks));
113 if (!me->recycled_tasks) {
116 APR_RING_INIT(me->recycled_tasks, apr_thread_pool_task, link);
117 me->busy_thds = apr_palloc(me->pool, sizeof(*me->busy_thds));
118 if (!me->busy_thds) {
121 APR_RING_INIT(me->busy_thds, apr_thread_list_elt, link);
122 me->idle_thds = apr_palloc(me->pool, sizeof(*me->idle_thds));
123 if (!me->idle_thds) {
126 APR_RING_INIT(me->idle_thds, apr_thread_list_elt, link);
127 me->recycled_thds = apr_palloc(me->pool, sizeof(*me->recycled_thds));
128 if (!me->recycled_thds) {
131 APR_RING_INIT(me->recycled_thds, apr_thread_list_elt, link);
132 me->thd_cnt = me->idle_cnt = me->task_cnt = me->scheduled_task_cnt = 0;
133 me->tasks_run = me->tasks_high = me->thd_high = me->thd_timed_out = 0;
134 me->idle_wait = 0;
135 me->terminated = 0;
137 me->task_idx[i] = NULL;
142 apr_thread_mutex_destroy(me->lock);
143 apr_thread_cond_destroy(me->cond);
151 static apr_thread_pool_task_t *pop_task(apr_thread_pool_t * me)
157 if (me->scheduled_task_cnt > 0) {
158 task = APR_RING_FIRST(me->scheduled_tasks);
161 APR_RING_SENTINEL(me->scheduled_tasks, apr_thread_pool_task,
165 --me->scheduled_task_cnt;
171 if (me->task_cnt == 0) {
175 task = APR_RING_FIRST(me->tasks);
177 assert(task != APR_RING_SENTINEL(me->tasks, apr_thread_pool_task, link));
178 --me->task_cnt;
180 if (task == me->task_idx[seg]) {
181 me->task_idx[seg] = APR_RING_NEXT(task, link);
182 if (me->task_idx[seg] == APR_RING_SENTINEL(me->tasks,
184 || TASK_PRIORITY_SEG(me->task_idx[seg]) != seg) {
185 me->task_idx[seg] = NULL;
192 static apr_interval_time_t waiting_time(apr_thread_pool_t * me)
196 task = APR_RING_FIRST(me->scheduled_tasks);
199 APR_RING_SENTINEL(me->scheduled_tasks, apr_thread_pool_task,
207 static struct apr_thread_list_elt *elt_new(apr_thread_pool_t * me,
212 if (APR_RING_EMPTY(me->recycled_thds, apr_thread_list_elt, link)) {
213 elt = apr_pcalloc(me->pool, sizeof(*elt));
219 elt = APR_RING_FIRST(me->recycled_thds);
241 apr_thread_pool_t *me = param;
246 apr_thread_mutex_lock(me->lock);
247 elt = elt_new(me, t);
249 apr_thread_mutex_unlock(me->lock);
253 while (!me->terminated && elt->state != TH_STOP) {
256 --me->idle_cnt;
260 APR_RING_INSERT_TAIL(me->busy_thds, elt, apr_thread_list_elt, link);
261 task = pop_task(me);
262 while (NULL != task && !me->terminated) {
263 ++me->tasks_run;
265 apr_thread_mutex_unlock(me->lock);
268 apr_thread_mutex_lock(me->lock);
269 APR_RING_INSERT_TAIL(me->recycled_tasks, task,
275 task = pop_task(me);
282 if ((me->idle_cnt >= me->idle_max
283 && !(me->scheduled_task_cnt && 0 >= me->idle_max)
284 && !me->idle_wait)
285 || me->terminated || elt->state != TH_RUN) {
286 --me->thd_cnt;
287 if ((TH_PROBATION == elt->state) && me->idle_wait)
288 ++me->thd_timed_out;
289 APR_RING_INSERT_TAIL(me->recycled_thds, elt,
291 apr_thread_mutex_unlock(me->lock);
298 ++me->idle_cnt;
299 APR_RING_INSERT_TAIL(me->idle_thds, elt, apr_thread_list_elt, link);
306 if (me->scheduled_task_cnt)
307 wait = waiting_time(me);
308 else if (me->idle_cnt > me->idle_max) {
309 wait = me->idle_wait;
316 rv = apr_thread_cond_timedwait(me->cond, me->lock, wait);
319 rv = apr_thread_cond_wait(me->cond, me->lock);
324 --me->thd_cnt;
325 apr_thread_mutex_unlock(me->lock);
330 static apr_status_t thread_pool_cleanup(void *me)
332 apr_thread_pool_t *_myself = me;
344 APU_DECLARE(apr_status_t) apr_thread_pool_create(apr_thread_pool_t ** me,
353 *me = NULL;
367 * allocate from (*me)->pool. This is dangerous if there are multiple
384 *me = tp;
390 APU_DECLARE(apr_status_t) apr_thread_pool_destroy(apr_thread_pool_t * me)
392 return apr_pool_cleanup_run(me->pool, me, thread_pool_cleanup);
398 static apr_thread_pool_task_t *task_new(apr_thread_pool_t * me,
405 if (APR_RING_EMPTY(me->recycled_tasks, apr_thread_pool_task, link)) {
406 t = apr_pcalloc(me->pool, sizeof(*t));
412 t = APR_RING_FIRST(me->recycled_tasks);
436 static apr_thread_pool_task_t *add_if_empty(apr_thread_pool_t * me,
444 if (me->task_idx[seg]) {
445 assert(APR_RING_SENTINEL(me->tasks, apr_thread_pool_task, link) !=
446 me->task_idx[seg]);
447 t_next = me->task_idx[seg];
450 if (APR_RING_SENTINEL(me->tasks, apr_thread_pool_task, link) ==
459 if (me->task_idx[next]) {
460 APR_RING_INSERT_BEFORE(me->task_idx[next], t, link);
465 APR_RING_INSERT_TAIL(me->tasks, t, apr_thread_pool_task, link);
467 me->task_idx[seg] = t;
475 static apr_status_t schedule_task(apr_thread_pool_t *me,
483 apr_thread_mutex_lock(me->lock);
485 t = task_new(me, func, param, 0, owner, time);
487 apr_thread_mutex_unlock(me->lock);
490 t_loc = APR_RING_FIRST(me->scheduled_tasks);
494 ++me->scheduled_task_cnt;
501 APR_RING_SENTINEL(me->scheduled_tasks, apr_thread_pool_task,
503 ++me->scheduled_task_cnt;
504 APR_RING_INSERT_TAIL(me->scheduled_tasks, t,
511 if (0 == me->thd_cnt) {
512 rv = apr_thread_create(&thd, NULL, thread_pool_func, me, me->pool);
514 ++me->thd_cnt;
515 if (me->thd_cnt > me->thd_high)
516 me->thd_high = me->thd_cnt;
519 apr_thread_cond_signal(me->cond);
520 apr_thread_mutex_unlock(me->lock);
524 static apr_status_t add_task(apr_thread_pool_t *me, apr_thread_start_t func,
533 apr_thread_mutex_lock(me->lock);
535 t = task_new(me, func, param, priority, owner, 0);
537 apr_thread_mutex_unlock(me->lock);
541 t_loc = add_if_empty(me, t);
547 while (APR_RING_SENTINEL(me->tasks, apr_thread_pool_task, link) !=
554 if (t_loc == me->task_idx[TASK_PRIORITY_SEG(t)]) {
555 me->task_idx[TASK_PRIORITY_SEG(t)] = t;
560 me->task_cnt++;
561 if (me->task_cnt > me->tasks_high)
562 me->tasks_high = me->task_cnt;
563 if (0 == me->thd_cnt || (0 == me->idle_cnt && me->thd_cnt < me->thd_max &&
564 me->task_cnt > me->threshold)) {
565 rv = apr_thread_create(&thd, NULL, thread_pool_func, me, me->pool);
567 ++me->thd_cnt;
568 if (me->thd_cnt > me->thd_high)
569 me->thd_high = me->thd_cnt;
573 apr_thread_cond_signal(me->cond);
574 apr_thread_mutex_unlock(me->lock);
579 APU_DECLARE(apr_status_t) apr_thread_pool_push(apr_thread_pool_t *me,
585 return add_task(me, func, param, priority, 1, owner);
588 APU_DECLARE(apr_status_t) apr_thread_pool_schedule(apr_thread_pool_t *me,
594 return schedule_task(me, func, param, owner, time);
597 APU_DECLARE(apr_status_t) apr_thread_pool_top(apr_thread_pool_t *me,
603 return add_task(me, func, param, priority, 0, owner);
606 static apr_status_t remove_scheduled_tasks(apr_thread_pool_t *me,
612 t_loc = APR_RING_FIRST(me->scheduled_tasks);
614 APR_RING_SENTINEL(me->scheduled_tasks, apr_thread_pool_task,
619 --me->scheduled_task_cnt;
627 static apr_status_t remove_tasks(apr_thread_pool_t *me, void *owner)
633 t_loc = APR_RING_FIRST(me->tasks);
634 while (t_loc != APR_RING_SENTINEL(me->tasks, apr_thread_pool_task, link)) {
637 --me->task_cnt;
639 if (t_loc == me->task_idx[seg]) {
640 me->task_idx[seg] = APR_RING_NEXT(t_loc, link);
641 if (me->task_idx[seg] == APR_RING_SENTINEL(me->tasks,
644 || TASK_PRIORITY_SEG(me->task_idx[seg]) != seg) {
645 me->task_idx[seg] = NULL;
655 static void wait_on_busy_threads(apr_thread_pool_t *me, void *owner)
661 apr_thread_mutex_lock(me->lock);
662 elt = APR_RING_FIRST(me->busy_thds);
663 while (elt != APR_RING_SENTINEL(me->busy_thds, apr_thread_list_elt, link)) {
679 apr_thread_mutex_unlock(me->lock);
681 apr_thread_mutex_lock(me->lock);
683 elt = APR_RING_FIRST(me->busy_thds);
685 apr_thread_mutex_unlock(me->lock);
689 APU_DECLARE(apr_status_t) apr_thread_pool_tasks_cancel(apr_thread_pool_t *me,
694 apr_thread_mutex_lock(me->lock);
695 if (me->task_cnt > 0) {
696 rv = remove_tasks(me, owner);
698 if (me->scheduled_task_cnt > 0) {
699 rv = remove_scheduled_tasks(me, owner);
701 apr_thread_mutex_unlock(me->lock);
702 wait_on_busy_threads(me, owner);
707 APU_DECLARE(apr_size_t) apr_thread_pool_tasks_count(apr_thread_pool_t *me)
709 return me->task_cnt;
713 apr_thread_pool_scheduled_tasks_count(apr_thread_pool_t *me)
715 return me->scheduled_task_cnt;
718 APU_DECLARE(apr_size_t) apr_thread_pool_threads_count(apr_thread_pool_t *me)
720 return me->thd_cnt;
723 APU_DECLARE(apr_size_t) apr_thread_pool_busy_count(apr_thread_pool_t *me)
725 return me->thd_cnt - me->idle_cnt;
728 APU_DECLARE(apr_size_t) apr_thread_pool_idle_count(apr_thread_pool_t *me)
730 return me->idle_cnt;
734 apr_thread_pool_tasks_run_count(apr_thread_pool_t * me)
736 return me->tasks_run;
740 apr_thread_pool_tasks_high_count(apr_thread_pool_t * me)
742 return me->tasks_high;
746 apr_thread_pool_threads_high_count(apr_thread_pool_t * me)
748 return me->thd_high;
752 apr_thread_pool_threads_idle_timeout_count(apr_thread_pool_t * me)
754 return me->thd_timed_out;
758 APU_DECLARE(apr_size_t) apr_thread_pool_idle_max_get(apr_thread_pool_t *me)
760 return me->idle_max;
764 apr_thread_pool_idle_wait_get(apr_thread_pool_t * me)
766 return me->idle_wait;
774 static struct apr_thread_list_elt *trim_threads(apr_thread_pool_t *me,
781 apr_thread_mutex_lock(me->lock);
783 thds = me->idle_thds;
784 n = me->idle_cnt;
787 thds = me->busy_thds;
788 n = me->thd_cnt - me->idle_cnt;
791 apr_thread_mutex_unlock(me->lock);
804 me->idle_cnt = *cnt;
817 apr_thread_mutex_unlock(me->lock);
824 static apr_size_t trim_idle_threads(apr_thread_pool_t *me, apr_size_t cnt)
830 elt = trim_threads(me, &cnt, 1);
832 apr_thread_mutex_lock(me->lock);
833 apr_thread_cond_broadcast(me->cond);
834 apr_thread_mutex_unlock(me->lock);
844 apr_thread_mutex_lock(me->lock);
845 APR_RING_SPLICE_TAIL(me->recycled_thds, head, tail,
847 apr_thread_mutex_unlock(me->lock);
857 static apr_size_t trim_busy_threads(apr_thread_pool_t *me, apr_size_t cnt)
859 trim_threads(me, &cnt, 0);
863 APU_DECLARE(apr_size_t) apr_thread_pool_idle_max_set(apr_thread_pool_t *me,
866 me->idle_max = cnt;
867 cnt = trim_idle_threads(me, cnt);
872 apr_thread_pool_idle_wait_set(apr_thread_pool_t * me,
877 oldtime = me->idle_wait;
878 me->idle_wait = timeout;
883 APU_DECLARE(apr_size_t) apr_thread_pool_thread_max_get(apr_thread_pool_t *me)
885 return me->thd_max;
892 APU_DECLARE(apr_size_t) apr_thread_pool_thread_max_set(apr_thread_pool_t *me,
897 me->thd_max = cnt;
898 if (0 == cnt || me->thd_cnt <= cnt) {
902 n = me->thd_cnt - cnt;
903 if (n >= me->idle_cnt) {
904 trim_busy_threads(me, n - me->idle_cnt);
905 trim_idle_threads(me, 0);
908 trim_idle_threads(me, me->idle_cnt - n);
913 APU_DECLARE(apr_size_t) apr_thread_pool_threshold_get(apr_thread_pool_t *me)
915 return me->threshold;
918 APU_DECLARE(apr_size_t) apr_thread_pool_threshold_set(apr_thread_pool_t *me,
923 ov = me->threshold;
924 me->threshold = val;