Apache HTTPD
proc_mutex.c
Go to the documentation of this file.
1/* Licensed to the Apache Software Foundation (ASF) under one or more
2 * contributor license agreements. See the NOTICE file distributed with
3 * this work for additional information regarding copyright ownership.
4 * The ASF licenses this file to You under the Apache License, Version 2.0
5 * (the "License"); you may not use this file except in compliance with
6 * the License. You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "apr.h"
18#include "apr_strings.h"
19#include "apr_arch_proc_mutex.h"
20#include "apr_arch_file_io.h" /* for apr_mkstemp() */
21#include "apr_hash.h"
22#include "apr_atomic.h"
23
28
29#if APR_HAS_POSIXSEM_SERIALIZE || APR_HAS_FCNTL_SERIALIZE || \
30 APR_HAS_SYSVSEM_SERIALIZE
33 const char *fname)
34{
35 return APR_SUCCESS;
36}
37#endif
38
39#if APR_HAS_POSIXSEM_SERIALIZE || APR_HAS_PROC_PTHREAD_SERIALIZE
44{
45 return APR_ENOTIMPL;
46}
47#endif
48
49#if APR_HAS_FCNTL_SERIALIZE \
50 || APR_HAS_FLOCK_SERIALIZE \
51 || (APR_HAS_SYSVSEM_SERIALIZE \
52 && !defined(HAVE_SEMTIMEDOP)) \
53 || (APR_HAS_POSIXSEM_SERIALIZE \
54 && !defined(HAVE_SEM_TIMEDWAIT)) \
55 || (APR_HAS_PROC_PTHREAD_SERIALIZE \
56 && !defined(HAVE_PTHREAD_MUTEX_TIMEDLOCK) \
57 && !defined(HAVE_PTHREAD_CONDATTR_SETPSHARED))
60{
61#define SLEEP_TIME apr_time_from_msec(10)
62 apr_status_t rv;
63 for (;;) {
64 rv = apr_proc_mutex_trylock(mutex);
65 if (!APR_STATUS_IS_EBUSY(rv)) {
66 if (rv == APR_SUCCESS) {
67 mutex->curr_locked = 1;
68 }
69 break;
70 }
71 if (timeout <= 0) {
72 rv = APR_TIMEUP;
73 break;
74 }
75 if (timeout > SLEEP_TIME) {
78 }
79 else {
81 timeout = 0;
82 }
83 }
84 return rv;
85}
86#endif
87
88#if APR_HAS_POSIXSEM_SERIALIZE
89
90#ifndef SEM_FAILED
91#define SEM_FAILED (-1)
92#endif
93
95{
96 apr_proc_mutex_t *mutex = mutex_;
97
98 if (sem_close(mutex->os.psem_interproc) < 0) {
99 return errno;
100 }
101
102 return APR_SUCCESS;
103}
104
105static unsigned int rshash (char *p) {
106 /* hash function from Robert Sedgwicks 'Algorithms in C' book */
107 unsigned int b = 378551;
108 unsigned int a = 63689;
109 unsigned int retval = 0;
110
111 for( ; *p; p++)
112 {
113 retval = retval * a + (*p);
114 a *= b;
115 }
116
117 return retval;
118}
119
121 const char *fname)
122{
123 #define APR_POSIXSEM_NAME_MIN 13
124 sem_t *psem;
125 char semname[32];
126
127 /*
128 * This bogusness is to follow what appears to be the
129 * lowest common denominator in Posix semaphore naming:
130 * - start with '/'
131 * - be at most 14 chars
132 * - be unique and not match anything on the filesystem
133 *
134 * Because of this, we use fname to generate a (unique) hash
135 * and use that as the name of the semaphore. If no filename was
136 * given, we create one based on the time. We tuck the name
137 * away, since it might be useful for debugging. We use 2 hashing
138 * functions to try to avoid collisions.
139 *
140 * To make this as robust as possible, we initially try something
141 * larger (and hopefully more unique) and gracefully fail down to the
142 * LCD above.
143 *
144 * NOTE: Darwin (Mac OS X) seems to be the most restrictive
145 * implementation. Versions previous to Darwin 6.2 had the 14
146 * char limit, but later rev's allow up to 31 characters.
147 *
148 */
149 if (fname) {
150 apr_ssize_t flen = strlen(fname);
151 char *p = apr_pstrndup(new_mutex->pool, fname, strlen(fname));
152 unsigned int h1, h2;
153 h1 = (apr_hashfunc_default((const char *)p, &flen) & 0xffffffff);
154 h2 = (rshash(p) & 0xffffffff);
155 apr_snprintf(semname, sizeof(semname), "/ApR.%xH%x", h1, h2);
156 } else {
158 unsigned long sec;
159 unsigned long usec;
160 now = apr_time_now();
163 apr_snprintf(semname, sizeof(semname), "/ApR.%lxZ%lx", sec, usec);
164 }
165 do {
166 psem = sem_open(semname, O_CREAT | O_EXCL, 0644, 1);
167 } while (psem == (sem_t *)SEM_FAILED && errno == EINTR);
168 if (psem == (sem_t *)SEM_FAILED) {
169 if (errno == ENAMETOOLONG) {
170 /* Oh well, good try */
172 } else {
173 return errno;
174 }
175 do {
176 psem = sem_open(semname, O_CREAT | O_EXCL, 0644, 1);
177 } while (psem == (sem_t *)SEM_FAILED && errno == EINTR);
178 }
179
180 if (psem == (sem_t *)SEM_FAILED) {
181 return errno;
182 }
183 /* Ahhh. The joys of Posix sems. Predelete it... */
185 new_mutex->os.psem_interproc = psem;
186 new_mutex->fname = apr_pstrdup(new_mutex->pool, semname);
190 return APR_SUCCESS;
191}
192
194{
195 int rc;
196
197 do {
198 rc = sem_wait(mutex->os.psem_interproc);
199 } while (rc < 0 && errno == EINTR);
200 if (rc < 0) {
201 return errno;
202 }
203 mutex->curr_locked = 1;
204 return APR_SUCCESS;
205}
206
208{
209 int rc;
210
211 do {
212 rc = sem_trywait(mutex->os.psem_interproc);
213 } while (rc < 0 && errno == EINTR);
214 if (rc < 0) {
215 if (errno == EAGAIN) {
216 return APR_EBUSY;
217 }
218 return errno;
219 }
220 mutex->curr_locked = 1;
221 return APR_SUCCESS;
222}
223
224#if defined(HAVE_SEM_TIMEDWAIT)
227{
228 if (timeout <= 0) {
230 return (rv == APR_EBUSY) ? APR_TIMEUP : rv;
231 }
232 else {
233 int rc;
234 struct timespec abstime;
235
237 abstime.tv_sec = apr_time_sec(timeout);
238 abstime.tv_nsec = apr_time_usec(timeout) * 1000; /* nanoseconds */
239
240 do {
241 rc = sem_timedwait(mutex->os.psem_interproc, &abstime);
242 } while (rc < 0 && errno == EINTR);
243 if (rc < 0) {
244 if (errno == ETIMEDOUT) {
245 return APR_TIMEUP;
246 }
247 return errno;
248 }
249 }
250 mutex->curr_locked = 1;
251 return APR_SUCCESS;
252}
253#endif
254
256{
257 mutex->curr_locked = 0;
258 if (sem_post(mutex->os.psem_interproc) < 0) {
259 /* any failure is probably fatal, so no big deal to leave
260 * ->curr_locked at 0. */
261 return errno;
262 }
263 return APR_SUCCESS;
264}
265
267{
268#if APR_PROCESS_LOCK_IS_GLOBAL || !APR_HAS_THREADS || defined(POSIXSEM_IS_GLOBAL)
270#else
271 0,
272#endif
276#if defined(HAVE_SEM_TIMEDWAIT)
278#else
280#endif
286 "posixsem"
287};
288
289#endif /* Posix sem implementation */
290
291#if APR_HAS_SYSVSEM_SERIALIZE
292
293static struct sembuf proc_mutex_op_on;
294static struct sembuf proc_mutex_op_try;
295static struct sembuf proc_mutex_op_off;
296
297static void proc_mutex_sysv_setup(void)
298{
299 proc_mutex_op_on.sem_num = 0;
300 proc_mutex_op_on.sem_op = -1;
301 proc_mutex_op_on.sem_flg = SEM_UNDO;
302 proc_mutex_op_try.sem_num = 0;
303 proc_mutex_op_try.sem_op = -1;
305 proc_mutex_op_off.sem_num = 0;
306 proc_mutex_op_off.sem_op = 1;
307 proc_mutex_op_off.sem_flg = SEM_UNDO;
308}
309
311{
313 union semun ick;
314
315 if (mutex->os.crossproc != -1) {
316 ick.val = 0;
317 semctl(mutex->os.crossproc, 0, IPC_RMID, ick);
318 }
319 return APR_SUCCESS;
320}
321
323 const char *fname)
324{
325 union semun ick;
326 apr_status_t rv;
327
328 new_mutex->os.crossproc = semget(IPC_PRIVATE, 1, IPC_CREAT | 0600);
329 if (new_mutex->os.crossproc == -1) {
330 rv = errno;
332 return rv;
333 }
334 ick.val = 1;
335 if (semctl(new_mutex->os.crossproc, 0, SETVAL, ick) < 0) {
336 rv = errno;
338 new_mutex->os.crossproc = -1;
339 return rv;
340 }
341 new_mutex->curr_locked = 0;
345 return APR_SUCCESS;
346}
347
349{
350 int rc;
351
352 do {
353 rc = semop(mutex->os.crossproc, &proc_mutex_op_on, 1);
354 } while (rc < 0 && errno == EINTR);
355 if (rc < 0) {
356 return errno;
357 }
358 mutex->curr_locked = 1;
359 return APR_SUCCESS;
360}
361
363{
364 int rc;
365
366 do {
367 rc = semop(mutex->os.crossproc, &proc_mutex_op_try, 1);
368 } while (rc < 0 && errno == EINTR);
369 if (rc < 0) {
370 if (errno == EAGAIN) {
371 return APR_EBUSY;
372 }
373 return errno;
374 }
375 mutex->curr_locked = 1;
376 return APR_SUCCESS;
377}
378
379#if defined(HAVE_SEMTIMEDOP)
382{
383 if (timeout <= 0) {
385 return (rv == APR_EBUSY) ? APR_TIMEUP : rv;
386 }
387 else {
388 int rc;
389 struct timespec reltime;
390
391 reltime.tv_sec = apr_time_sec(timeout);
392 reltime.tv_nsec = apr_time_usec(timeout) * 1000; /* nanoseconds */
393
394 do {
395 rc = semtimedop(mutex->os.crossproc, &proc_mutex_op_on, 1,
396 &reltime);
397 } while (rc < 0 && errno == EINTR);
398 if (rc < 0) {
399 if (errno == EAGAIN) {
400 return APR_TIMEUP;
401 }
402 return errno;
403 }
404 }
405 mutex->curr_locked = 1;
406 return APR_SUCCESS;
407}
408#endif
409
411{
412 int rc;
413
414 mutex->curr_locked = 0;
415 do {
416 rc = semop(mutex->os.crossproc, &proc_mutex_op_off, 1);
417 } while (rc < 0 && errno == EINTR);
418 if (rc < 0) {
419 return errno;
420 }
421 return APR_SUCCESS;
422}
423
428{
429
430 union semun ick;
431 struct semid_ds buf;
432 buf.sem_perm.uid = uid;
433 buf.sem_perm.gid = gid;
434 buf.sem_perm.mode = apr_unix_perms2mode(perms);
435 ick.buf = &buf;
436 if (semctl(mutex->os.crossproc, 0, IPC_SET, ick) < 0) {
437 return errno;
438 }
439 return APR_SUCCESS;
440}
441
443{
444#if APR_PROCESS_LOCK_IS_GLOBAL || !APR_HAS_THREADS || defined(SYSVSEM_IS_GLOBAL)
446#else
447 0,
448#endif
452#if defined(HAVE_SEMTIMEDOP)
454#else
456#endif
462 "sysvsem"
463};
464
465#endif /* SysV sem implementation */
466
467#if APR_HAS_PROC_PTHREAD_SERIALIZE
468
469#ifndef APR_USE_PROC_PTHREAD_MUTEX_COND
470#if defined(HAVE_PTHREAD_CONDATTR_SETPSHARED) \
471 && !defined(HAVE_PTHREAD_MUTEX_TIMEDLOCK)
472#define APR_USE_PROC_PTHREAD_MUTEX_COND 1
473#else
474#define APR_USE_PROC_PTHREAD_MUTEX_COND 0
475#endif
476#endif
477
478/* The mmap()ed pthread_interproc is the native pthread_mutex_t followed
479 * by a refcounter to track children using it. We want to avoid calling
480 * pthread_mutex_destroy() on the shared mutex area while it is in use by
481 * another process, because this may mark the shared pthread_mutex_t as
482 * invalid for everyone, including forked children (unlike "sysvsem" for
483 * example), causing unexpected errors or deadlocks (PR 49504). So the
484 * last process (parent or child) referencing the mutex will effectively
485 * destroy it.
486 */
487typedef struct {
488#define proc_pthread_cast(m) \
489 ((proc_pthread_mutex_t *)(m)->os.pthread_interproc)
490 pthread_mutex_t mutex;
491#define proc_pthread_mutex(m) \
492 (proc_pthread_cast(m)->mutex)
493#if APR_USE_PROC_PTHREAD_MUTEX_COND
494 pthread_cond_t cond;
495#define proc_pthread_mutex_cond(m) \
496 (proc_pthread_cast(m)->cond)
498#define proc_pthread_mutex_cond_locked(m) \
499 (proc_pthread_cast(m)->cond_locked)
501#define proc_pthread_mutex_cond_num_waiters(m) \
502 (proc_pthread_cast(m)->cond_num_waiters)
503#define proc_pthread_mutex_is_cond(m) \
504 ((m)->pthread_refcounting && proc_pthread_mutex_cond_locked(m) != -1)
505#endif /* APR_USE_PROC_PTHREAD_MUTEX_COND */
506 apr_uint32_t refcount;
507#define proc_pthread_mutex_refcount(m) \
508 (proc_pthread_cast(m)->refcount)
510
511
513{
514 if (mutex->pthread_refcounting) {
516 return 1;
517 }
518 return 0;
519}
520
522{
523 if (mutex->pthread_refcounting) {
525 }
526 return 0;
527}
528
530{
532 apr_status_t rv;
533
534#if APR_USE_PROC_PTHREAD_MUTEX_COND
535 if (proc_pthread_mutex_is_cond(mutex)) {
536 mutex->curr_locked = 0;
537 }
538 else
539#endif /* APR_USE_PROC_PTHREAD_MUTEX_COND */
540 if (mutex->curr_locked == 1) {
541 if ((rv = pthread_mutex_unlock(&proc_pthread_mutex(mutex)))) {
542#ifdef HAVE_ZOS_PTHREADS
543 rv = errno;
544#endif
545 return rv;
546 }
547 }
548 if (!proc_pthread_mutex_dec(mutex)) {
549#if APR_USE_PROC_PTHREAD_MUTEX_COND
550 if (proc_pthread_mutex_is_cond(mutex) &&
552#ifdef HAVE_ZOS_PTHREADS
553 rv = errno;
554#endif
555 return rv;
556 }
557#endif /* APR_USE_PROC_PTHREAD_MUTEX_COND */
558
559 if ((rv = pthread_mutex_destroy(&proc_pthread_mutex(mutex)))) {
560#ifdef HAVE_ZOS_PTHREADS
561 rv = errno;
562#endif
563 return rv;
564 }
565 }
566 return APR_SUCCESS;
567}
568
570{
572 apr_status_t rv;
573
574 /* curr_locked is set to -1 until the mutex has been created */
575 if (mutex->curr_locked != -1) {
576 if ((rv = proc_pthread_mutex_unref(mutex))) {
577 return rv;
578 }
579 }
580 if (munmap(mutex->os.pthread_interproc, sizeof(proc_pthread_mutex_t))) {
581 return errno;
582 }
583 return APR_SUCCESS;
584}
585
587 const char *fname)
588{
589 apr_status_t rv;
590 int fd;
592
593 fd = open("/dev/zero", O_RDWR);
594 if (fd < 0) {
595 return errno;
596 }
597
598 new_mutex->os.pthread_interproc = mmap(NULL, sizeof(proc_pthread_mutex_t),
600 fd, 0);
601 if (new_mutex->os.pthread_interproc == MAP_FAILED) {
602 new_mutex->os.pthread_interproc = NULL;
603 rv = errno;
604 close(fd);
605 return rv;
606 }
607 close(fd);
608
609 new_mutex->pthread_refcounting = 1;
610 new_mutex->curr_locked = -1; /* until the mutex has been created */
611#if APR_USE_PROC_PTHREAD_MUTEX_COND
613#endif
614
615 if ((rv = pthread_mutexattr_init(&mattr))) {
616#ifdef HAVE_ZOS_PTHREADS
617 rv = errno;
618#endif
620 return rv;
621 }
623#ifdef HAVE_ZOS_PTHREADS
624 rv = errno;
625#endif
628 return rv;
629 }
630
631#if defined(HAVE_PTHREAD_MUTEX_ROBUST) || defined(HAVE_PTHREAD_MUTEX_ROBUST_NP)
632#ifdef HAVE_PTHREAD_MUTEX_ROBUST
634#else
636#endif
637 if (rv) {
638#ifdef HAVE_ZOS_PTHREADS
639 rv = errno;
640#endif
643 return rv;
644 }
646#ifdef HAVE_ZOS_PTHREADS
647 rv = errno;
648#endif
651 return rv;
652 }
653#endif /* HAVE_PTHREAD_MUTEX_ROBUST[_NP] */
654
656#ifdef HAVE_ZOS_PTHREADS
657 rv = errno;
658#endif
661 return rv;
662 }
663
664 proc_pthread_mutex_refcount(new_mutex) = 1; /* first/parent reference */
665 new_mutex->curr_locked = 0; /* mutex created now */
666
667 if ((rv = pthread_mutexattr_destroy(&mattr))) {
668#ifdef HAVE_ZOS_PTHREADS
669 rv = errno;
670#endif
672 return rv;
673 }
674
676 (void *)new_mutex,
679 return APR_SUCCESS;
680}
681
684 const char *fname)
685{
686 (*mutex)->curr_locked = 0;
687 if (proc_pthread_mutex_inc(*mutex)) {
690 }
691 return APR_SUCCESS;
692}
693
696{
697 apr_status_t rv;
698
699#if APR_USE_PROC_PTHREAD_MUTEX_COND
700 if (proc_pthread_mutex_is_cond(mutex)) {
701 if ((rv = pthread_mutex_lock(&proc_pthread_mutex(mutex)))) {
702#ifdef HAVE_ZOS_PTHREADS
703 rv = errno;
704#endif
705#if defined(HAVE_PTHREAD_MUTEX_ROBUST) || defined(HAVE_PTHREAD_MUTEX_ROBUST_NP)
706 /* Okay, our owner died. Let's try to make it consistent again. */
707 if (rv == EOWNERDEAD) {
709#ifdef HAVE_PTHREAD_MUTEX_ROBUST
711#else
713#endif
714 }
715 else
716#endif
717 return rv;
718 }
719
720 if (!proc_pthread_mutex_cond_locked(mutex)) {
721 rv = APR_SUCCESS;
722 }
723 else if (!timeout) {
724 rv = APR_TIMEUP;
725 }
726 else {
727 struct timespec abstime;
728
729 if (timeout > 0) {
731 abstime.tv_sec = apr_time_sec(timeout);
732 abstime.tv_nsec = apr_time_usec(timeout) * 1000; /* nanoseconds */
733 }
734
736 do {
737 if (timeout < 0) {
739 &proc_pthread_mutex(mutex));
740 if (rv) {
741#ifdef HAVE_ZOS_PTHREADS
742 rv = errno;
743#endif
744 break;
745 }
746 }
747 else {
749 &proc_pthread_mutex(mutex),
750 &abstime);
751 if (rv) {
752#ifdef HAVE_ZOS_PTHREADS
753 rv = errno;
754#endif
755 if (rv == ETIMEDOUT) {
756 rv = APR_TIMEUP;
757 }
758 break;
759 }
760 }
761 } while (proc_pthread_mutex_cond_locked(mutex));
763 }
764 if (rv != APR_SUCCESS) {
766 return rv;
767 }
768
770
772 if (rv) {
773#ifdef HAVE_ZOS_PTHREADS
774 rv = errno;
775#endif
776 return rv;
777 }
778 }
779 else
780#endif /* APR_USE_PROC_PTHREAD_MUTEX_COND */
781 {
782 if (timeout < 0) {
784 if (rv) {
785#ifdef HAVE_ZOS_PTHREADS
786 rv = errno;
787#endif
788 }
789 }
790 else if (!timeout) {
792 if (rv) {
793#ifdef HAVE_ZOS_PTHREADS
794 rv = errno;
795#endif
796 if (rv == EBUSY) {
797 return APR_TIMEUP;
798 }
799 }
800 }
801 else
802#if defined(HAVE_PTHREAD_MUTEX_TIMEDLOCK)
803 {
804 struct timespec abstime;
805
807 abstime.tv_sec = apr_time_sec(timeout);
808 abstime.tv_nsec = apr_time_usec(timeout) * 1000; /* nanoseconds */
809
811 if (rv) {
812#ifdef HAVE_ZOS_PTHREADS
813 rv = errno;
814#endif
815 if (rv == ETIMEDOUT) {
816 return APR_TIMEUP;
817 }
818 }
819 }
820 if (rv) {
821#if defined(HAVE_PTHREAD_MUTEX_ROBUST) || defined(HAVE_PTHREAD_MUTEX_ROBUST_NP)
822 /* Okay, our owner died. Let's try to make it consistent again. */
823 if (rv == EOWNERDEAD) {
825#ifdef HAVE_PTHREAD_MUTEX_ROBUST
827#else
829#endif
830 }
831 else
832#endif
833 return rv;
834 }
835#else /* !HAVE_PTHREAD_MUTEX_TIMEDLOCK */
837#endif
838 }
839
840 mutex->curr_locked = 1;
841 return APR_SUCCESS;
842}
843
845{
846 return proc_mutex_pthread_acquire_ex(mutex, -1);
847}
848
850{
852 return (rv == APR_TIMEUP) ? APR_EBUSY : rv;
853}
854
857{
858 return proc_mutex_pthread_acquire_ex(mutex, (timeout <= 0) ? 0 : timeout);
859}
860
862{
863 apr_status_t rv;
864
865#if APR_USE_PROC_PTHREAD_MUTEX_COND
866 if (proc_pthread_mutex_is_cond(mutex)) {
867 if ((rv = pthread_mutex_lock(&proc_pthread_mutex(mutex)))) {
868#ifdef HAVE_ZOS_PTHREADS
869 rv = errno;
870#endif
871#if defined(HAVE_PTHREAD_MUTEX_ROBUST) || defined(HAVE_PTHREAD_MUTEX_ROBUST_NP)
872 /* Okay, our owner died. Let's try to make it consistent again. */
873 if (rv == EOWNERDEAD) {
875#ifdef HAVE_PTHREAD_MUTEX_ROBUST
877#else
879#endif
880 }
881 else
882#endif
883 return rv;
884 }
885
886 if (!proc_pthread_mutex_cond_locked(mutex)) {
887 rv = APR_EINVAL;
888 }
889 else if (!proc_pthread_mutex_cond_num_waiters(mutex)) {
890 rv = APR_SUCCESS;
891 }
892 else {
894#ifdef HAVE_ZOS_PTHREADS
895 if (rv) {
896 rv = errno;
897 }
898#endif
899 }
900 if (rv != APR_SUCCESS) {
902 return rv;
903 }
904
906 }
907#endif /* APR_USE_PROC_PTHREAD_MUTEX_COND */
908
909 mutex->curr_locked = 0;
910 if ((rv = pthread_mutex_unlock(&proc_pthread_mutex(mutex)))) {
911#ifdef HAVE_ZOS_PTHREADS
912 rv = errno;
913#endif
914 return rv;
915 }
916
917 return APR_SUCCESS;
918}
919
921{
932 "pthread"
933};
934
935#if APR_USE_PROC_PTHREAD_MUTEX_COND
937 const char *fname)
938{
939 apr_status_t rv;
941
943 if (rv != APR_SUCCESS) {
944 return rv;
945 }
946
947 if ((rv = pthread_condattr_init(&cattr))) {
948#ifdef HAVE_ZOS_PTHREADS
949 rv = errno;
950#endif
953 return rv;
954 }
956#ifdef HAVE_ZOS_PTHREADS
957 rv = errno;
958#endif
962 return rv;
963 }
965 &cattr))) {
966#ifdef HAVE_ZOS_PTHREADS
967 rv = errno;
968#endif
972 return rv;
973 }
975
978
979 return APR_SUCCESS;
980}
981
983{
994 "pthread"
995};
996#endif
997
998#endif
999
1000#if APR_HAS_FCNTL_SERIALIZE
1001
1002static struct flock proc_mutex_lock_it;
1003static struct flock proc_mutex_unlock_it;
1004
1006
1007static void proc_mutex_fcntl_setup(void)
1008{
1009 proc_mutex_lock_it.l_whence = SEEK_SET; /* from current point */
1010 proc_mutex_lock_it.l_start = 0; /* -"- */
1011 proc_mutex_lock_it.l_len = 0; /* until end of file */
1012 proc_mutex_lock_it.l_type = F_WRLCK; /* set exclusive/write lock */
1013 proc_mutex_lock_it.l_pid = 0; /* pid not actually interesting */
1014 proc_mutex_unlock_it.l_whence = SEEK_SET; /* from current point */
1015 proc_mutex_unlock_it.l_start = 0; /* -"- */
1016 proc_mutex_unlock_it.l_len = 0; /* until end of file */
1017 proc_mutex_unlock_it.l_type = F_UNLCK; /* set exclusive/write lock */
1018 proc_mutex_unlock_it.l_pid = 0; /* pid not actually interesting */
1019}
1020
1022{
1024 apr_proc_mutex_t *mutex=mutex_;
1025
1026 if (mutex->curr_locked == 1) {
1028 if (status != APR_SUCCESS)
1029 return status;
1030 }
1031
1032 if (mutex->interproc) {
1033 status = apr_file_close(mutex->interproc);
1034 }
1035 if (!mutex->interproc_closing
1036 && mutex->os.crossproc != -1
1037 && close(mutex->os.crossproc) == -1
1038 && status == APR_SUCCESS) {
1039 status = errno;
1040 }
1041 return status;
1042}
1043
1045 const char *fname)
1046{
1047 int rv;
1048
1049 if (fname) {
1050 new_mutex->fname = apr_pstrdup(new_mutex->pool, fname);
1051 rv = apr_file_open(&new_mutex->interproc, new_mutex->fname,
1054 new_mutex->pool);
1055 }
1056 else {
1057 new_mutex->fname = apr_pstrdup(new_mutex->pool, "/tmp/aprXXXXXX");
1058 rv = apr_file_mktemp(&new_mutex->interproc, new_mutex->fname,
1060 new_mutex->pool);
1061 }
1062
1063 if (rv != APR_SUCCESS) {
1064 return rv;
1065 }
1066
1067 new_mutex->os.crossproc = new_mutex->interproc->filedes;
1068 new_mutex->interproc_closing = 1;
1069 new_mutex->curr_locked = 0;
1070 unlink(new_mutex->fname);
1072 (void*)new_mutex,
1075 return APR_SUCCESS;
1076}
1077
1079{
1080 int rc;
1081
1082 do {
1083 rc = fcntl(mutex->os.crossproc, F_SETLKW, &proc_mutex_lock_it);
1084 } while (rc < 0 && errno == EINTR);
1085 if (rc < 0) {
1086 return errno;
1087 }
1088 mutex->curr_locked=1;
1089 return APR_SUCCESS;
1090}
1091
1093{
1094 int rc;
1095
1096 do {
1097 rc = fcntl(mutex->os.crossproc, F_SETLK, &proc_mutex_lock_it);
1098 } while (rc < 0 && errno == EINTR);
1099 if (rc < 0) {
1100#if FCNTL_TRYACQUIRE_EACCES
1101 if (errno == EACCES) {
1102#else
1103 if (errno == EAGAIN) {
1104#endif
1105 return APR_EBUSY;
1106 }
1107 return errno;
1108 }
1109 mutex->curr_locked = 1;
1110 return APR_SUCCESS;
1111}
1112
1114{
1115 int rc;
1116
1117 mutex->curr_locked=0;
1118 do {
1119 rc = fcntl(mutex->os.crossproc, F_SETLKW, &proc_mutex_unlock_it);
1120 } while (rc < 0 && errno == EINTR);
1121 if (rc < 0) {
1122 return errno;
1123 }
1124 return APR_SUCCESS;
1125}
1126
1129 apr_uid_t uid,
1130 apr_gid_t gid)
1131{
1132
1133 if (mutex->fname) {
1134 if (!(perms & APR_FPROT_GSETID))
1135 gid = -1;
1136 if (fchown(mutex->os.crossproc, uid, gid) < 0) {
1137 return errno;
1138 }
1139 }
1140 return APR_SUCCESS;
1141}
1142
1144{
1145#if APR_PROCESS_LOCK_IS_GLOBAL || !APR_HAS_THREADS || defined(FCNTL_IS_GLOBAL)
1147#else
1148 0,
1149#endif
1159 "fcntl"
1160};
1161
1162#endif /* fcntl implementation */
1163
1164#if APR_HAS_FLOCK_SERIALIZE
1165
1167
1169{
1171 apr_proc_mutex_t *mutex=mutex_;
1172
1173 if (mutex->curr_locked == 1) {
1175 if (status != APR_SUCCESS)
1176 return status;
1177 }
1178 if (mutex->interproc) { /* if it was opened properly */
1179 status = apr_file_close(mutex->interproc);
1180 }
1181 if (!mutex->interproc_closing
1182 && mutex->os.crossproc != -1
1183 && close(mutex->os.crossproc) == -1
1184 && status == APR_SUCCESS) {
1185 status = errno;
1186 }
1187 if (mutex->fname) {
1188 unlink(mutex->fname);
1189 }
1190 return status;
1191}
1192
1194 const char *fname)
1195{
1196 int rv;
1197
1198 if (fname) {
1199 new_mutex->fname = apr_pstrdup(new_mutex->pool, fname);
1200 rv = apr_file_open(&new_mutex->interproc, new_mutex->fname,
1203 new_mutex->pool);
1204 }
1205 else {
1206 new_mutex->fname = apr_pstrdup(new_mutex->pool, "/tmp/aprXXXXXX");
1207 rv = apr_file_mktemp(&new_mutex->interproc, new_mutex->fname,
1209 new_mutex->pool);
1210 }
1211
1212 if (rv != APR_SUCCESS) {
1214 return rv;
1215 }
1216
1217 new_mutex->os.crossproc = new_mutex->interproc->filedes;
1218 new_mutex->interproc_closing = 1;
1219 new_mutex->curr_locked = 0;
1223 return APR_SUCCESS;
1224}
1225
1227{
1228 int rc;
1229
1230 do {
1231 rc = flock(mutex->os.crossproc, LOCK_EX);
1232 } while (rc < 0 && errno == EINTR);
1233 if (rc < 0) {
1234 return errno;
1235 }
1236 mutex->curr_locked = 1;
1237 return APR_SUCCESS;
1238}
1239
1241{
1242 int rc;
1243
1244 do {
1245 rc = flock(mutex->os.crossproc, LOCK_EX | LOCK_NB);
1246 } while (rc < 0 && errno == EINTR);
1247 if (rc < 0) {
1248 if (errno == EWOULDBLOCK || errno == EAGAIN) {
1249 return APR_EBUSY;
1250 }
1251 return errno;
1252 }
1253 mutex->curr_locked = 1;
1254 return APR_SUCCESS;
1255}
1256
1258{
1259 int rc;
1260
1261 mutex->curr_locked = 0;
1262 do {
1263 rc = flock(mutex->os.crossproc, LOCK_UN);
1264 } while (rc < 0 && errno == EINTR);
1265 if (rc < 0) {
1266 return errno;
1267 }
1268 return APR_SUCCESS;
1269}
1270
1272 apr_pool_t *pool,
1273 const char *fname)
1274{
1276 int rv;
1277
1278 if (!fname) {
1279 fname = (*mutex)->fname;
1280 if (!fname) {
1281 return APR_SUCCESS;
1282 }
1283 }
1284
1286 sizeof(apr_proc_mutex_t));
1287 new_mutex->pool = pool;
1288 new_mutex->fname = apr_pstrdup(pool, fname);
1289 rv = apr_file_open(&new_mutex->interproc, new_mutex->fname,
1290 APR_FOPEN_WRITE, 0, new_mutex->pool);
1291 if (rv != APR_SUCCESS) {
1292 return rv;
1293 }
1294 new_mutex->os.crossproc = new_mutex->interproc->filedes;
1295 new_mutex->interproc_closing = 1;
1296
1297 *mutex = new_mutex;
1298 return APR_SUCCESS;
1299}
1300
1303 apr_uid_t uid,
1304 apr_gid_t gid)
1305{
1306
1307 if (mutex->fname) {
1308 if (!(perms & APR_FPROT_GSETID))
1309 gid = -1;
1310 if (fchown(mutex->os.crossproc, uid, gid) < 0) {
1311 return errno;
1312 }
1313 }
1314 return APR_SUCCESS;
1315}
1316
1318{
1319#if APR_PROCESS_LOCK_IS_GLOBAL || !APR_HAS_THREADS || defined(FLOCK_IS_GLOBAL)
1321#else
1322 0,
1323#endif
1333 "flock"
1334};
1335
1336#endif /* flock implementation */
1337
1339{
1340 /* setup only needed for sysvsem and fnctl */
1341#if APR_HAS_SYSVSEM_SERIALIZE
1343#endif
1344#if APR_HAS_FCNTL_SERIALIZE
1346#endif
1347}
1348
1352{
1353#if APR_HAS_PROC_PTHREAD_SERIALIZE
1354 new_mutex->os.pthread_interproc = NULL;
1355#endif
1356#if APR_HAS_POSIXSEM_SERIALIZE
1357 new_mutex->os.psem_interproc = NULL;
1358#endif
1359#if APR_HAS_SYSVSEM_SERIALIZE || APR_HAS_FCNTL_SERIALIZE || APR_HAS_FLOCK_SERIALIZE
1360 new_mutex->os.crossproc = -1;
1361
1362#if APR_HAS_FCNTL_SERIALIZE || APR_HAS_FLOCK_SERIALIZE
1363 new_mutex->interproc = NULL;
1364 new_mutex->interproc_closing = 0;
1365#endif
1366#endif
1367
1368 switch (mech) {
1369 case APR_LOCK_FCNTL:
1370#if APR_HAS_FCNTL_SERIALIZE
1372 if (ospmutex) {
1373 if (ospmutex->crossproc == -1) {
1374 return APR_EINVAL;
1375 }
1376 new_mutex->os.crossproc = ospmutex->crossproc;
1377 }
1378#else
1379 return APR_ENOTIMPL;
1380#endif
1381 break;
1382 case APR_LOCK_FLOCK:
1383#if APR_HAS_FLOCK_SERIALIZE
1385 if (ospmutex) {
1386 if (ospmutex->crossproc == -1) {
1387 return APR_EINVAL;
1388 }
1389 new_mutex->os.crossproc = ospmutex->crossproc;
1390 }
1391#else
1392 return APR_ENOTIMPL;
1393#endif
1394 break;
1395 case APR_LOCK_SYSVSEM:
1396#if APR_HAS_SYSVSEM_SERIALIZE
1398 if (ospmutex) {
1399 if (ospmutex->crossproc == -1) {
1400 return APR_EINVAL;
1401 }
1402 new_mutex->os.crossproc = ospmutex->crossproc;
1403 }
1404#else
1405 return APR_ENOTIMPL;
1406#endif
1407 break;
1408 case APR_LOCK_POSIXSEM:
1409#if APR_HAS_POSIXSEM_SERIALIZE
1411 if (ospmutex) {
1412 if (ospmutex->psem_interproc == NULL) {
1413 return APR_EINVAL;
1414 }
1415 new_mutex->os.psem_interproc = ospmutex->psem_interproc;
1416 }
1417#else
1418 return APR_ENOTIMPL;
1419#endif
1420 break;
1422#if APR_HAS_PROC_PTHREAD_SERIALIZE
1424 if (ospmutex) {
1425 if (ospmutex->pthread_interproc == NULL) {
1426 return APR_EINVAL;
1427 }
1428 new_mutex->os.pthread_interproc = ospmutex->pthread_interproc;
1429 }
1430#else
1431 return APR_ENOTIMPL;
1432#endif
1433 break;
1435#if APR_HAS_PROC_PTHREAD_SERIALIZE \
1436 && (APR_USE_PROC_PTHREAD_MUTEX_COND \
1437 || defined(HAVE_PTHREAD_MUTEX_TIMEDLOCK)) \
1438 && defined(HAVE_PTHREAD_MUTEX_ROBUST)
1439#if APR_USE_PROC_PTHREAD_MUTEX_COND
1441#else
1443#endif
1444 if (ospmutex) {
1445 if (ospmutex->pthread_interproc == NULL) {
1446 return APR_EINVAL;
1447 }
1448 new_mutex->os.pthread_interproc = ospmutex->pthread_interproc;
1449 }
1450 break;
1451#elif APR_HAS_SYSVSEM_SERIALIZE && defined(HAVE_SEMTIMEDOP)
1453 if (ospmutex) {
1454 if (ospmutex->crossproc == -1) {
1455 return APR_EINVAL;
1456 }
1457 new_mutex->os.crossproc = ospmutex->crossproc;
1458 }
1459 break;
1460#elif APR_HAS_POSIXSEM_SERIALIZE && defined(HAVE_SEM_TIMEDWAIT)
1462 if (ospmutex) {
1463 if (ospmutex->psem_interproc == NULL) {
1464 return APR_EINVAL;
1465 }
1466 new_mutex->os.psem_interproc = ospmutex->psem_interproc;
1467 }
1468 break;
1469#endif
1470 /* fall trough */
1471 case APR_LOCK_DEFAULT:
1472#if APR_USE_FLOCK_SERIALIZE
1474 if (ospmutex) {
1475 if (ospmutex->crossproc == -1) {
1476 return APR_EINVAL;
1477 }
1478 new_mutex->os.crossproc = ospmutex->crossproc;
1479 }
1480#elif APR_USE_SYSVSEM_SERIALIZE
1482 if (ospmutex) {
1483 if (ospmutex->crossproc == -1) {
1484 return APR_EINVAL;
1485 }
1486 new_mutex->os.crossproc = ospmutex->crossproc;
1487 }
1488#elif APR_USE_FCNTL_SERIALIZE
1490 if (ospmutex) {
1491 if (ospmutex->crossproc == -1) {
1492 return APR_EINVAL;
1493 }
1494 new_mutex->os.crossproc = ospmutex->crossproc;
1495 }
1496#elif APR_USE_PROC_PTHREAD_SERIALIZE
1498 if (ospmutex) {
1499 if (ospmutex->pthread_interproc == NULL) {
1500 return APR_EINVAL;
1501 }
1502 new_mutex->os.pthread_interproc = ospmutex->pthread_interproc;
1503 }
1504#elif APR_USE_POSIXSEM_SERIALIZE
1506 if (ospmutex) {
1507 if (ospmutex->psem_interproc == NULL) {
1508 return APR_EINVAL;
1509 }
1510 new_mutex->os.psem_interproc = ospmutex->psem_interproc;
1511 }
1512#else
1513 return APR_ENOTIMPL;
1514#endif
1515 break;
1516 default:
1517 return APR_ENOTIMPL;
1518 }
1519 return APR_SUCCESS;
1520}
1521
1523{
1524 apr_proc_mutex_t mutex;
1525
1527 NULL) != APR_SUCCESS) {
1528 return "unknown";
1529 }
1530
1531 return apr_proc_mutex_name(&mutex);
1532}
1533
1535{
1536 apr_status_t rv;
1537
1539 NULL)) != APR_SUCCESS) {
1540 return rv;
1541 }
1542
1543 if ((rv = new_mutex->meth->create(new_mutex, fname)) != APR_SUCCESS) {
1544 return rv;
1545 }
1546
1547 return APR_SUCCESS;
1548}
1549
1551 const char *fname,
1554{
1556 apr_status_t rv;
1557
1559 new_mutex->pool = pool;
1560
1562 return rv;
1563
1564 *mutex = new_mutex;
1565 return APR_SUCCESS;
1566}
1567
1569 const char *fname,
1571{
1572 return (*mutex)->meth->child_init(mutex, pool, fname);
1573}
1574
1576{
1577 return mutex->meth->acquire(mutex);
1578}
1579
1581{
1582 return mutex->meth->tryacquire(mutex);
1583}
1584
1587{
1588#if APR_HAS_TIMEDLOCKS
1589 return mutex->meth->timedacquire(mutex, timeout);
1590#else
1591 return APR_ENOTIMPL;
1592#endif
1593}
1594
1596{
1597 return mutex->meth->release(mutex);
1598}
1599
1601{
1602 return ((apr_proc_mutex_t *)mutex)->meth->cleanup(mutex);
1603}
1604
1609
1611{
1612 return mutex->meth->name;
1613}
1614
1616{
1617 /* POSIX sems use the fname field but don't use a file,
1618 * so be careful. */
1619#if APR_HAS_FLOCK_SERIALIZE
1620 if (mutex->meth == &mutex_flock_methods) {
1621 return mutex->fname;
1622 }
1623#endif
1624#if APR_HAS_FCNTL_SERIALIZE
1625 if (mutex->meth == &mutex_fcntl_methods) {
1626 return mutex->fname;
1627 }
1628#endif
1629 return NULL;
1630}
1631
1633{
1635 return mutex->meth->perms_set(mutex, perms, uid, gid);
1636}
1637
1639
1640/* Implement OS-specific accessors defined in apr_portable.h */
1641
1645{
1646 *ospmutex = pmutex->os;
1647 if (mech) {
1648 *mech = pmutex->meth->mech;
1649 }
1650 return APR_SUCCESS;
1651}
1652
1655{
1657}
1658
1662 int register_cleanup,
1664{
1665 apr_status_t rv;
1666 if (pool == NULL) {
1667 return APR_ENOPOOL;
1668 }
1669
1670 if ((*pmutex) == NULL) {
1671 (*pmutex) = (apr_proc_mutex_t *)apr_pcalloc(pool,
1672 sizeof(apr_proc_mutex_t));
1673 (*pmutex)->pool = pool;
1674 }
1676#if APR_HAS_FCNTL_SERIALIZE || APR_HAS_FLOCK_SERIALIZE
1677 if (rv == APR_SUCCESS) {
1678 rv = apr_os_file_put(&(*pmutex)->interproc, &(*pmutex)->os.crossproc,
1679 0, pool);
1680 }
1681#endif
1682
1683 if (rv == APR_SUCCESS && register_cleanup) {
1686 }
1687 return rv;
1688}
1689
1693{
1695 0, pool);
1696}
1697
#define MAP_FAILED
APR Atomic Operations.
APR Hash Tables.
APR Strings library.
apr_uint32_t apr_atomic_inc32(volatile apr_uint32_t *mem)
Definition atomic.c:51
int apr_atomic_dec32(volatile apr_uint32_t *mem)
Definition atomic.c:56
const unsigned char * buf
Definition util_md5.h:50
#define APR_ENOTIMPL
Definition apr_errno.h:476
#define APR_TIMEUP
Definition apr_errno.h:450
#define APR_ENOPOOL
Definition apr_errno.h:290
#define APR_EBUSY
Definition apr_errno.h:480
#define APR_EINVAL
Definition apr_errno.h:711
apr_fileperms_t apr_uid_t uid
const char apr_lockmech_e mech
apr_fileperms_t apr_uid_t apr_gid_t gid
#define APR_STATUS_IS_EBUSY(s)
Definition apr_errno.h:628
apr_bucket apr_bucket_brigade * a
apr_file_t * fd
apr_redis_t * rc
Definition apr_redis.h:173
const void apr_status_t(*) apr_status_t(* APR_DECLARE)(void) apr_pool_pre_cleanup_register(apr_pool_t *p
Definition apr_pools.h:646
apr_size_t size
const char int apr_pool_t * pool
Definition apr_cstr.h:84
#define APR_SUCCESS
Definition apr_errno.h:225
int apr_status_t
Definition apr_errno.h:44
apr_int32_t apr_fileperms_t
const char apr_fileperms_t perms
#define APR_FOPEN_EXCL
Definition apr_file_io.h:63
#define APR_FOPEN_WRITE
Definition apr_file_io.h:55
#define APR_FOPEN_CREATE
Definition apr_file_io.h:56
#define APR_GREAD
#define APR_UWRITE
#define APR_UREAD
#define APR_WREAD
#define APR_FPROT_GSETID
const char * fname
apr_pool_t * cont
Definition apr_getopt.h:103
const apr_hash_t * h1
Definition apr_hash.h:232
const apr_hash_t const apr_hash_t * h2
Definition apr_hash.h:233
#define APR_PERMS_SET_IMPLEMENT(type)
apr_pool_t * b
Definition apr_pools.h:529
#define APR_POOL_IMPLEMENT_ACCESSOR(type)
Definition apr_pools.h:91
#define apr_pcalloc(p, size)
Definition apr_pools.h:465
apr_os_proc_mutex_t * ospmutex
apr_os_file_t int register_cleanup
apr_global_mutex_t * pmutex
apr_lockmech_e
@ APR_LOCK_FLOCK
@ APR_LOCK_SYSVSEM
@ APR_LOCK_POSIXSEM
@ APR_LOCK_DEFAULT_TIMED
@ APR_LOCK_PROC_PTHREAD
@ APR_LOCK_FCNTL
@ APR_LOCK_DEFAULT
int int status
apr_int64_t apr_interval_time_t
Definition apr_time.h:55
apr_int64_t apr_time_t
Definition apr_time.h:45
#define apr_time_sec(time)
Definition apr_time.h:63
#define apr_time_usec(time)
Definition apr_time.h:66
gid_t apr_gid_t
Definition apr_user.h:54
uid_t apr_uid_t
Definition apr_user.h:45
apr_pool_t * p
Definition md_event.c:32
return NULL
Definition mod_so.c:359
apr_os_proc_mutex_t os
const apr_proc_mutex_unix_lock_methods_t * meth
apr_status_t(* perms_set)(apr_proc_mutex_t *, apr_fileperms_t, apr_uid_t, apr_gid_t)
apr_status_t(* release)(apr_proc_mutex_t *)
apr_status_t(* timedacquire)(apr_proc_mutex_t *, apr_interval_time_t)
apr_status_t(* child_init)(apr_proc_mutex_t **, apr_pool_t *, const char *)
apr_status_t(* tryacquire)(apr_proc_mutex_t *)
apr_status_t(* acquire)(apr_proc_mutex_t *)
static void proc_mutex(abts_case *tc, void *data)
static apr_time_t now
Definition testtime.c:33
#define APR_PROCESS_LOCK_MECH_IS_GLOBAL
mode_t apr_unix_perms2mode(apr_fileperms_t perms)
Definition fileacc.c:35
static apr_status_t proc_mutex_choose_method(apr_proc_mutex_t *new_mutex, apr_lockmech_e mech, apr_os_proc_mutex_t *ospmutex)
void apr_proc_mutex_unix_setup_lock(void)
static apr_status_t proc_mutex_create(apr_proc_mutex_t *new_mutex, apr_lockmech_e mech, const char *fname)
IN ULONG IN INT timeout