Apache HTTPD
apr_thread_pool.c
Go to the documentation of this file.
1/*
2 * Licensed to the Apache Software Foundation (ASF) under one or more
3 * contributor license agreements. See the NOTICE file distributed
4 * with this work for additional information regarding copyright
5 * ownership. The ASF licenses this file to you under the Apache
6 * License, Version 2.0 (the "License"); you may not use this file
7 * except in compliance with the License. You may obtain a copy of
8 * the License at
9 *
10 * http://www.apache.org/licenses/LICENSE-2.0
11 *
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
15 * implied. See the License for the specific language governing
16 * permissions and limitations under the License.
17 */
18
19#include <assert.h>
20#include "apr_thread_pool.h"
21#include "apr_ring.h"
22#include "apr_thread_cond.h"
23#include "apr_portable.h"
24
25#if APR_HAS_THREADS
26
27#define TASK_PRIORITY_SEGS 4
28#define TASK_PRIORITY_SEG(x) (((x)->dispatch.priority & 0xFF) / 64)
29
30typedef struct apr_thread_pool_task
31{
34 void *param;
35 void *owner;
36 union
37 {
38 apr_byte_t priority;
39 apr_time_t time;
40 } dispatch;
42
44
46{
49 void *current_owner;
50 enum { TH_RUN, TH_STOP, TH_PROBATION } state;
52};
53
55
56struct apr_thread_pool
57{
59 volatile apr_size_t thd_max;
60 volatile apr_size_t idle_max;
62 volatile apr_size_t thd_cnt;
63 volatile apr_size_t idle_cnt;
64 volatile apr_size_t busy_cnt;
65 volatile apr_size_t task_cnt;
67 volatile apr_size_t threshold;
68 volatile apr_size_t tasks_run;
69 volatile apr_size_t tasks_high;
70 volatile apr_size_t thd_high;
77 apr_thread_cond_t *more_work;
79 apr_thread_cond_t *all_done;
81 volatile int terminated;
85};
86
91{
92 apr_status_t rv;
94
95 me = *tp = apr_pcalloc(pool, sizeof(apr_thread_pool_t));
96 me->thd_max = max_threads;
97 me->idle_max = init_threads;
98 me->threshold = init_threads / 2;
99
100 /* This pool will be used by different threads. As we cannot ensure that
101 * our caller won't use the pool without acquiring the mutex, we must
102 * create a new sub pool.
103 */
104 rv = apr_pool_create(&me->pool, pool);
105 if (APR_SUCCESS != rv) {
106 return rv;
107 }
108 /* Create the mutex on the parent pool such that it's always alive from
109 * apr_thread_pool_{push,schedule,top}() callers.
110 */
112 if (APR_SUCCESS != rv) {
113 return rv;
114 }
115 rv = apr_thread_cond_create(&me->more_work, me->pool);
116 if (APR_SUCCESS != rv) {
118 return rv;
119 }
120 rv = apr_thread_cond_create(&me->work_done, me->pool);
121 if (APR_SUCCESS != rv) {
122 apr_thread_cond_destroy(me->more_work);
124 return rv;
125 }
126 rv = apr_thread_cond_create(&me->all_done, me->pool);
127 if (APR_SUCCESS != rv) {
128 apr_thread_cond_destroy(me->work_done);
129 apr_thread_cond_destroy(me->more_work);
131 return rv;
132 }
133 me->tasks = apr_palloc(me->pool, sizeof(*me->tasks));
134 if (!me->tasks) {
135 goto CATCH_ENOMEM;
136 }
138 me->scheduled_tasks = apr_palloc(me->pool, sizeof(*me->scheduled_tasks));
139 if (!me->scheduled_tasks) {
140 goto CATCH_ENOMEM;
141 }
142 APR_RING_INIT(me->scheduled_tasks, apr_thread_pool_task, link);
143 me->recycled_tasks = apr_palloc(me->pool, sizeof(*me->recycled_tasks));
144 if (!me->recycled_tasks) {
145 goto CATCH_ENOMEM;
146 }
147 APR_RING_INIT(me->recycled_tasks, apr_thread_pool_task, link);
148 me->busy_thds = apr_palloc(me->pool, sizeof(*me->busy_thds));
149 if (!me->busy_thds) {
150 goto CATCH_ENOMEM;
151 }
152 APR_RING_INIT(me->busy_thds, apr_thread_list_elt, link);
153 me->idle_thds = apr_palloc(me->pool, sizeof(*me->idle_thds));
154 if (!me->idle_thds) {
155 goto CATCH_ENOMEM;
156 }
157 APR_RING_INIT(me->idle_thds, apr_thread_list_elt, link);
158 me->dead_thds = apr_palloc(me->pool, sizeof(*me->dead_thds));
159 if (!me->dead_thds) {
160 goto CATCH_ENOMEM;
161 }
162 APR_RING_INIT(me->dead_thds, apr_thread_list_elt, link);
163 me->recycled_thds = apr_palloc(me->pool, sizeof(*me->recycled_thds));
164 if (!me->recycled_thds) {
165 goto CATCH_ENOMEM;
166 }
167 APR_RING_INIT(me->recycled_thds, apr_thread_list_elt, link);
168 goto FINAL_EXIT;
170 rv = APR_ENOMEM;
171 apr_thread_cond_destroy(me->all_done);
172 apr_thread_cond_destroy(me->work_done);
173 apr_thread_cond_destroy(me->more_work);
176 return rv;
177}
178
179/*
180 * NOTE: This function is not thread safe by itself. Caller should hold the lock
181 */
183{
185 int seg;
186
187 /* check for scheduled tasks */
188 if (me->scheduled_task_cnt > 0) {
189 task = APR_RING_FIRST(me->scheduled_tasks);
190 assert(task != NULL);
191 assert(task !=
192 APR_RING_SENTINEL(me->scheduled_tasks, apr_thread_pool_task,
193 link));
194 /* if it's time */
195 if (task->dispatch.time <= apr_time_now()) {
196 --me->scheduled_task_cnt;
197 APR_RING_REMOVE(task, link);
198 return task;
199 }
200 }
201 /* check for normal tasks if we're not returning a scheduled task */
202 if (me->task_cnt == 0) {
203 return NULL;
204 }
205
206 task = APR_RING_FIRST(me->tasks);
207 assert(task != NULL);
209 --me->task_cnt;
211 if (task == me->task_idx[seg]) {
212 me->task_idx[seg] = APR_RING_NEXT(task, link);
213 if (me->task_idx[seg] == APR_RING_SENTINEL(me->tasks,
215 || TASK_PRIORITY_SEG(me->task_idx[seg]) != seg) {
216 me->task_idx[seg] = NULL;
217 }
218 }
219 APR_RING_REMOVE(task, link);
220 return task;
221}
222
224{
226
227 task = APR_RING_FIRST(me->scheduled_tasks);
228 assert(task != NULL);
229 assert(task !=
230 APR_RING_SENTINEL(me->scheduled_tasks, apr_thread_pool_task,
231 link));
232 return task->dispatch.time - apr_time_now();
233}
234
235/*
236 * NOTE: This function is not thread safe by itself. Caller should hold the lock
237 */
239 apr_thread_t * t)
240{
241 struct apr_thread_list_elt *elt;
242
243 if (APR_RING_EMPTY(me->recycled_thds, apr_thread_list_elt, link)) {
244 elt = apr_palloc(me->pool, sizeof(*elt));
245 if (NULL == elt) {
246 return NULL;
247 }
248 }
249 else {
250 elt = APR_RING_FIRST(me->recycled_thds);
251 APR_RING_REMOVE(elt, link);
252 }
253
254 APR_RING_ELEM_INIT(elt, link);
255 elt->thd = t;
256 elt->current_owner = NULL;
257 elt->signal_work_done = 0;
258 elt->state = TH_RUN;
259 return elt;
260}
261
262/*
263 * The worker thread function. Take a task from the queue and perform it if
264 * there is any. Otherwise, put itself into the idle thread list and waiting
265 * for signal to wake up.
266 * The thread terminates directly and exits when it is asked to stop, after
267 * handling its task if busy. The thread will then be in the dead_thds list
268 * and should be joined.
269 */
271{
275 struct apr_thread_list_elt *elt;
276
278
279 elt = elt_new(me, t);
280 if (!elt) {
283 }
284
285 for (;;) {
286 /* Test if not new element, it is awakened from idle */
287 if (APR_RING_NEXT(elt, link) != elt) {
288 --me->idle_cnt;
289 APR_RING_REMOVE(elt, link);
290 }
291
292 if (elt->state != TH_STOP) {
293 ++me->busy_cnt;
294 APR_RING_INSERT_TAIL(me->busy_thds, elt,
295 apr_thread_list_elt, link);
296 do {
297 task = pop_task(me);
298 if (!task) {
299 break;
300 }
301 ++me->tasks_run;
302 elt->current_owner = task->owner;
304
305 /* Run the task (or drop it if terminated already) */
306 if (!me->terminated) {
307 apr_thread_data_set(task, "apr_thread_pool_task", NULL, t);
308 task->func(t, task->param);
309 }
310
312 APR_RING_INSERT_TAIL(me->recycled_tasks, task,
314 elt->current_owner = NULL;
315 if (elt->signal_work_done) {
316 elt->signal_work_done = 0;
317 apr_thread_cond_signal(me->work_done);
318 }
319 } while (elt->state != TH_STOP);
320 APR_RING_REMOVE(elt, link);
321 --me->busy_cnt;
322 }
323 assert(NULL == elt->current_owner);
324
325 /* thread should die? */
326 if (me->terminated
327 || elt->state != TH_RUN
328 || (me->idle_cnt >= me->idle_max
329 && (me->idle_max || !me->scheduled_task_cnt)
330 && !me->idle_wait)) {
331 if ((TH_PROBATION == elt->state) && me->idle_wait)
332 ++me->thd_timed_out;
333 break;
334 }
335
336 /* busy thread become idle */
337 ++me->idle_cnt;
338 APR_RING_INSERT_TAIL(me->idle_thds, elt, apr_thread_list_elt, link);
339
340 /*
341 * If there is a scheduled task, always scheduled to perform that task.
342 * Since there is no guarantee that current idle threads are scheduled
343 * for next scheduled task.
344 */
345 if (me->scheduled_task_cnt)
347 else if (me->idle_cnt > me->idle_max) {
348 wait = me->idle_wait;
349 elt->state = TH_PROBATION;
350 }
351 else
352 wait = -1;
353
354 if (wait >= 0) {
355 apr_thread_cond_timedwait(me->more_work, me->lock, wait);
356 }
357 else {
358 apr_thread_cond_wait(me->more_work, me->lock);
359 }
360 }
361
362 /* Dead thread, to be joined */
363 APR_RING_INSERT_TAIL(me->dead_thds, elt, apr_thread_list_elt, link);
364 if (--me->thd_cnt == 0 && me->terminated) {
365 apr_thread_cond_signal(me->all_done);
366 }
368
370 return NULL; /* should not be here, safe net */
371}
372
373/* Must be locked by the caller */
375{
376 while (!APR_RING_EMPTY(me->dead_thds, apr_thread_list_elt, link)) {
377 struct apr_thread_list_elt *elt;
379
380 elt = APR_RING_FIRST(me->dead_thds);
381 APR_RING_REMOVE(elt, link);
383
384 apr_thread_join(&status, elt->thd);
385
387 APR_RING_INSERT_TAIL(me->recycled_thds, elt,
388 apr_thread_list_elt, link);
389 }
390}
391
393{
395
396 _myself->terminated = 1;
400
401 if (_myself->thd_cnt) {
402 apr_thread_cond_wait(_myself->all_done, _myself->lock);
403 }
404
405 /* All threads should be dead now, join them */
407
409
410 return APR_SUCCESS;
411}
412
417{
421
422 *me = NULL;
423
425 if (APR_SUCCESS != rv)
426 return rv;
428
429 /* Grab the mutex as apr_thread_create() and thread_pool_func() will
430 * allocate from (*me)->pool. This is dangerous if there are multiple
431 * initial threads to create.
432 */
433 apr_thread_mutex_lock(tp->lock);
434 while (init_threads--) {
435 rv = apr_thread_create(&t, NULL, thread_pool_func, tp, tp->pool);
436 if (APR_SUCCESS != rv) {
437 break;
438 }
439 tp->thd_cnt++;
440 if (tp->thd_cnt > tp->thd_high) {
441 tp->thd_high = tp->thd_cnt;
442 }
443 }
444 apr_thread_mutex_unlock(tp->lock);
445
446 if (rv == APR_SUCCESS) {
447 *me = tp;
448 }
449
450 return rv;
451}
452
454{
455 /* Stop the threads before destroying me->pool: with APR <= 1.7 the
456 * threads' pools are children of me->pool and APR_POOL_DEBUG would
457 * deadlock if thread_pool_cleanup() is called while me->pool is
458 * destroyed (because of parent locking).
459 * With APR > 1.7 the threads' pools are unmanaged so there is no
460 * such issue, yet it does not hurt to stop the threads before.
461 */
463 apr_pool_destroy(me->pool);
464 return APR_SUCCESS;
465}
466
467/*
468 * NOTE: This function is not thread safe by itself. Caller should hold the lock
469 */
472 void *param, apr_byte_t priority,
473 void *owner, apr_time_t time)
474{
476
477 if (APR_RING_EMPTY(me->recycled_tasks, apr_thread_pool_task, link)) {
478 t = apr_palloc(me->pool, sizeof(*t));
479 if (NULL == t) {
480 return NULL;
481 }
482 }
483 else {
484 t = APR_RING_FIRST(me->recycled_tasks);
485 APR_RING_REMOVE(t, link);
486 }
487 APR_RING_ELEM_INIT(t, link);
488
489 t->func = func;
490 t->param = param;
491 t->owner = owner;
492 if (time > 0) {
493 t->dispatch.time = apr_time_now() + time;
494 }
495 else {
496 t->dispatch.priority = priority;
497 }
498 return t;
499}
500
501/*
502 * Test it the task is the only one within the priority segment.
503 * If it is not, return the first element with same or lower priority.
504 * Otherwise, add the task into the queue and return NULL.
505 *
506 * NOTE: This function is not thread safe by itself. Caller should hold the lock
507 */
510{
511 int seg;
512 int next;
514
516 if (me->task_idx[seg]) {
518 me->task_idx[seg]);
519 t_next = me->task_idx[seg];
520 while (t_next->dispatch.priority > t->dispatch.priority) {
521 t_next = APR_RING_NEXT(t_next, link);
522 if (APR_RING_SENTINEL(me->tasks, apr_thread_pool_task, link) ==
523 t_next) {
524 return t_next;
525 }
526 }
527 return t_next;
528 }
529
530 for (next = seg - 1; next >= 0; next--) {
531 if (me->task_idx[next]) {
532 APR_RING_INSERT_BEFORE(me->task_idx[next], t, link);
533 break;
534 }
535 }
536 if (0 > next) {
538 }
539 me->task_idx[seg] = t;
540 return NULL;
541}
542
543/*
544* schedule a task to run in "time" microseconds. Find the spot in the ring where
545* the time fits. Adjust the short_time so the thread wakes up when the time is reached.
546*/
549 void *owner, apr_interval_time_t time)
550{
555
557
558 if (me->terminated) {
559 /* Let the caller know that we are done */
561 return APR_NOTFOUND;
562 }
563
564 /* Maintain dead threads */
566
567 t = task_new(me, func, param, 0, owner, time);
568 if (NULL == t) {
570 return APR_ENOMEM;
571 }
572 t_loc = APR_RING_FIRST(me->scheduled_tasks);
573 while (NULL != t_loc) {
574 /* if the time is less than the entry insert ahead of it */
575 if (t->dispatch.time < t_loc->dispatch.time) {
576 ++me->scheduled_task_cnt;
578 break;
579 }
580 else {
581 t_loc = APR_RING_NEXT(t_loc, link);
582 if (t_loc ==
583 APR_RING_SENTINEL(me->scheduled_tasks, apr_thread_pool_task,
584 link)) {
585 ++me->scheduled_task_cnt;
586 APR_RING_INSERT_TAIL(me->scheduled_tasks, t,
588 break;
589 }
590 }
591 }
592 /* there should be at least one thread for scheduled tasks */
593 if (0 == me->thd_cnt) {
595 if (APR_SUCCESS == rv) {
596 ++me->thd_cnt;
597 if (me->thd_cnt > me->thd_high)
598 me->thd_high = me->thd_cnt;
599 }
600 }
601 apr_thread_cond_signal(me->more_work);
603
604 return rv;
605}
606
608 void *param, apr_byte_t priority, int push,
609 void *owner)
610{
615
617
618 if (me->terminated) {
619 /* Let the caller know that we are done */
621 return APR_NOTFOUND;
622 }
623
624 /* Maintain dead threads */
626
627 t = task_new(me, func, param, priority, owner, 0);
628 if (NULL == t) {
630 return APR_ENOMEM;
631 }
632
634 if (NULL == t_loc) {
635 goto FINAL_EXIT;
636 }
637
638 if (push) {
639 while (APR_RING_SENTINEL(me->tasks, apr_thread_pool_task, link) !=
640 t_loc && t_loc->dispatch.priority >= t->dispatch.priority) {
641 t_loc = APR_RING_NEXT(t_loc, link);
642 }
643 }
645 if (!push) {
646 if (t_loc == me->task_idx[TASK_PRIORITY_SEG(t)]) {
647 me->task_idx[TASK_PRIORITY_SEG(t)] = t;
648 }
649 }
650
652 me->task_cnt++;
653 if (me->task_cnt > me->tasks_high)
654 me->tasks_high = me->task_cnt;
655 if (0 == me->thd_cnt || (0 == me->idle_cnt && me->thd_cnt < me->thd_max &&
656 me->task_cnt > me->threshold)) {
658 if (APR_SUCCESS == rv) {
659 ++me->thd_cnt;
660 if (me->thd_cnt > me->thd_high)
661 me->thd_high = me->thd_cnt;
662 }
663 }
664
665 apr_thread_cond_signal(me->more_work);
667
668 return rv;
669}
670
673 void *param,
674 apr_byte_t priority,
675 void *owner)
676{
677 return add_task(me, func, param, priority, 1, owner);
678}
679
682 void *param,
684 void *owner)
685{
686 return schedule_task(me, func, param, owner, time);
687}
688
691 void *param,
692 apr_byte_t priority,
693 void *owner)
694{
695 return add_task(me, func, param, priority, 0, owner);
696}
697
699 void *owner)
700{
703
704 t_loc = APR_RING_FIRST(me->scheduled_tasks);
705 while (t_loc !=
706 APR_RING_SENTINEL(me->scheduled_tasks, apr_thread_pool_task,
707 link)) {
708 next = APR_RING_NEXT(t_loc, link);
709 /* if this is the owner remove it */
710 if (!owner || t_loc->owner == owner) {
711 --me->scheduled_task_cnt;
712 APR_RING_REMOVE(t_loc, link);
713 }
714 t_loc = next;
715 }
716 return APR_SUCCESS;
717}
718
719static apr_status_t remove_tasks(apr_thread_pool_t *me, void *owner)
720{
723 int seg;
724
725 t_loc = APR_RING_FIRST(me->tasks);
726 while (t_loc != APR_RING_SENTINEL(me->tasks, apr_thread_pool_task, link)) {
727 next = APR_RING_NEXT(t_loc, link);
728 if (!owner || t_loc->owner == owner) {
729 --me->task_cnt;
731 if (t_loc == me->task_idx[seg]) {
732 me->task_idx[seg] = APR_RING_NEXT(t_loc, link);
733 if (me->task_idx[seg] == APR_RING_SENTINEL(me->tasks,
735 link)
736 || TASK_PRIORITY_SEG(me->task_idx[seg]) != seg) {
737 me->task_idx[seg] = NULL;
738 }
739 }
740 APR_RING_REMOVE(t_loc, link);
741 }
742 t_loc = next;
743 }
744 return APR_SUCCESS;
745}
746
747/* Must be locked by the caller */
748static void wait_on_busy_threads(apr_thread_pool_t *me, void *owner)
749{
750#ifndef NDEBUG
752#endif
753 struct apr_thread_list_elt *elt;
754
755 elt = APR_RING_FIRST(me->busy_thds);
756 while (elt != APR_RING_SENTINEL(me->busy_thds, apr_thread_list_elt, link)) {
757 if (owner ? owner != elt->current_owner : !elt->current_owner) {
758 elt = APR_RING_NEXT(elt, link);
759 continue;
760 }
761
762#ifndef NDEBUG
763 /* make sure the thread is not the one calling tasks_cancel */
764 apr_os_thread_get(&os_thread, elt->thd);
765#ifdef WIN32
766 /* hack for apr win32 bug */
768#else
770#endif
771#endif
772
773 elt->signal_work_done = 1;
774 apr_thread_cond_wait(me->work_done, me->lock);
775
776 /* Restart */
777 elt = APR_RING_FIRST(me->busy_thds);
778 }
779
780 /* Maintain dead threads */
782}
783
785 void *owner)
786{
788
790
791 if (me->task_cnt > 0) {
792 rv = remove_tasks(me, owner);
793 }
794 if (me->scheduled_task_cnt > 0) {
795 rv = remove_scheduled_tasks(me, owner);
796 }
797
798 wait_on_busy_threads(me, owner);
799
801
802 return rv;
803}
804
806{
807 return me->task_cnt;
808}
809
812{
813 return me->scheduled_task_cnt;
814}
815
817{
818 return me->thd_cnt;
819}
820
822{
823 return me->busy_cnt;
824}
825
827{
828 return me->idle_cnt;
829}
830
833{
834 return me->tasks_run;
835}
836
839{
840 return me->tasks_high;
841}
842
845{
846 return me->thd_high;
847}
848
851{
852 return me->thd_timed_out;
853}
854
855
857{
858 return me->idle_max;
859}
860
863{
864 return me->idle_wait;
865}
866
867/*
868 * Stop threads above given *cnt, set the number of threads stopped in *cnt.
869 * NOTE: There could be busy threads become idle during this function
870 */
871static void stop_threads(apr_thread_pool_t *me, apr_size_t *cnt, int idle)
872{
873 struct apr_thread_list *thds;
874 struct apr_thread_list_elt *elt, *last;
875 apr_size_t n, i;
876
878
879 if (idle) {
880 thds = me->idle_thds;
881 n = me->idle_cnt;
882 }
883 else {
884 thds = me->busy_thds;
885 n = me->busy_cnt;
886 }
887 if (n <= *cnt) {
889 *cnt = 0;
890 return;
891 }
892
893 elt = APR_RING_FIRST(thds);
895 for (i = 0; i < *cnt; ++i) {
896 elt = APR_RING_NEXT(elt, link);
897 }
898 for (; i < n; ++i) {
899 elt->state = TH_STOP;
900 if (elt == last) {
901 break;
902 }
903 elt = APR_RING_NEXT(elt, link);
904 }
905 assert(i + 1 == n);
906 *cnt -= n;
907
909
911}
912
914{
915 stop_threads(me, &cnt, 1);
916 if (cnt) {
918 apr_thread_cond_broadcast(me->more_work);
920 }
921 return cnt;
922}
923
925{
926 stop_threads(me, &cnt, 0);
927 return cnt;
928}
929
932{
933 me->idle_max = cnt;
934 return stop_idle_threads(me, cnt);
935}
936
940{
942
943 oldtime = me->idle_wait;
944 me->idle_wait = timeout;
945
946 return oldtime;
947}
948
950{
951 return me->thd_max;
952}
953
954/*
955 * This function stop extra working threads to the new limit.
956 * NOTE: There could be busy threads become idle during this function
957 */
960{
961 apr_size_t n, i;
962
963 me->thd_max = cnt;
964 n = me->thd_cnt;
965 if (n <= cnt) {
966 return 0;
967 }
968 n -= cnt; /* #threads to stop */
969
970 i = me->idle_cnt;
971 if (n >= i) {
973 n = i; /* stop all idle threads */
974 }
976
977 return n;
978}
979
981{
982 return me->threshold;
983}
984
987{
989
990 ov = me->threshold;
991 me->threshold = val;
992 return ov;
993}
994
996 void **owner)
997{
998 apr_status_t rv;
1000 void *data;
1001
1002 rv = apr_thread_data_get(&data, "apr_thread_pool_task", thd);
1003 if (rv != APR_SUCCESS) {
1004 return rv;
1005 }
1006
1007 task = data;
1008 if (!task) {
1009 *owner = NULL;
1010 return APR_BADARG;
1011 }
1012
1013 *owner = task->owner;
1014 return APR_SUCCESS;
1015}
1016
1017#endif /* APR_HAS_THREADS */
1018
1019/* vim: set ts=4 sw=4 et cin tw=80: */
int n
Definition ap_regex.h:278
APR Portability Routines.
APR Rings.
APU_DECLARE(void)
Computes SipHash-2-4, producing a 64bit (APR_SIPHASH_DSIZE) hash from a message and a 128bit (APR_SIP...
Definition apr_sha1.c:206
APR Condition Variable Routines.
APR Thread Pool Library.
int apr_os_thread_equal(apr_os_thread_t tid1, apr_os_thread_t tid2)
Definition thread.c:117
static sem_id lock
Definition threadpriv.c:21
#define APR_ENOMEM
Definition apr_errno.h:683
#define APR_BADARG
Definition apr_errno.h:459
#define APR_NOTFOUND
Definition apr_errno.h:463
apr_size_t size
apr_uint32_t val
Definition apr_atomic.h:66
const char int apr_pool_t * pool
Definition apr_cstr.h:84
#define APR_SUCCESS
Definition apr_errno.h:225
int apr_status_t
Definition apr_errno.h:44
void * data
apr_interval_time_t t
apr_interval_time_t apr_pollcb_cb_t func
Definition apr_poll.h:422
#define apr_pool_create(newpool, parent)
Definition apr_pools.h:322
#define apr_pcalloc(p, size)
Definition apr_pools.h:465
#define APR_RING_ENTRY(elem)
Definition apr_ring.h:70
#define APR_RING_INSERT_TAIL(hp, nep, elem, link)
Definition apr_ring.h:328
#define APR_RING_INIT(hp, elem, link)
Definition apr_ring.h:192
#define APR_RING_HEAD(head, elem)
Definition apr_ring.h:91
#define APR_RING_SENTINEL(hp, elem, link)
Definition apr_ring.h:159
#define APR_RING_INSERT_BEFORE(lep, nep, link)
Definition apr_ring.h:261
#define APR_RING_LAST(hp)
Definition apr_ring.h:171
#define APR_RING_EMPTY(hp, elem, link)
Definition apr_ring.h:204
#define APR_RING_REMOVE(ep, link)
Definition apr_ring.h:381
#define APR_RING_FIRST(hp)
Definition apr_ring.h:166
#define APR_RING_NEXT(ep, link)
Definition apr_ring.h:177
#define APR_RING_ELEM_INIT(ep, link)
Definition apr_ring.h:212
const char char ** last
void *(APR_THREAD_FUNC * apr_thread_start_t)(apr_thread_t *, void *)
int int status
apr_int64_t apr_interval_time_t
Definition apr_time.h:55
apr_int64_t apr_time_t
Definition apr_time.h:45
static apr_status_t dispatch(md_store_fs_t *s_fs, md_store_fs_ev_t ev, unsigned int group, const char *fname, apr_filetype_e ftype, apr_pool_t *p)
struct param_s param
return NULL
Definition mod_so.c:359
int i
Definition mod_so.c:347
apr_status_t apr_thread_exit(apr_thread_t *thd, apr_status_t retval)
Definition thread.c:157
apr_os_thread_t apr_os_thread_current()
Definition thread.c:142
apr_status_t apr_thread_join(apr_status_t *retval, apr_thread_t *thd)
Definition thread.c:166
apr_status_t apr_thread_data_set(void *data, const char *key, apr_status_t(*cleanup)(void *), apr_thread_t *thread)
Definition thread.c:198
apr_status_t apr_thread_data_get(void **data, const char *key, apr_thread_t *thread)
Definition thread.c:186
apr_status_t apr_thread_create(apr_thread_t **new, apr_threadattr_t *attr, apr_thread_start_t func, void *data, apr_pool_t *pool)
Definition thread.c:73
IN ULONG IN INT timeout