Apache HTTPD
apr_rmm.c
Go to the documentation of this file.
1/* Licensed to the Apache Software Foundation (ASF) under one or more
2 * contributor license agreements. See the NOTICE file distributed with
3 * this work for additional information regarding copyright ownership.
4 * The ASF licenses this file to You under the Apache License, Version 2.0
5 * (the "License"); you may not use this file except in compliance with
6 * the License. You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "apr_general.h"
18#include "apr_rmm.h"
19#include "apr_errno.h"
20#include "apr_lib.h"
21#include "apr_strings.h"
22
23/* The RMM region is made up of two doubly-linked-list of blocks; the
24 * list of used blocks, and the list of free blocks (either list may
25 * be empty). The base pointer, rmm->base, points at the beginning of
26 * the shmem region in use. Each block is addressable by an
27 * apr_rmm_off_t value, which represents the offset from the base
28 * pointer. The term "address" is used here to mean such a value; an
29 * "offset from rmm->base".
30 *
31 * The RMM region contains exactly one "rmm_hdr_block_t" structure,
32 * the "header block", which is always stored at the base pointer.
33 * The firstused field in this structure is the address of the first
34 * block in the "used blocks" list; the firstfree field is the address
35 * of the first block in the "free blocks" list.
36 *
37 * Each block is prefixed by an "rmm_block_t" structure, followed by
38 * the caller-usable region represented by the block. The next and
39 * prev fields of the structure are zero if the block is at the end or
40 * beginning of the linked-list respectively, or otherwise hold the
41 * address of the next and previous blocks in the list. ("address 0",
42 * i.e. rmm->base is *not* a valid address for a block, since the
43 * header block is always stored at that address).
44 *
45 * At creation, the RMM region is initialized to hold a single block
46 * on the free list representing the entire available shm segment
47 * (minus header block); subsequent allocation and deallocation of
48 * blocks involves splitting blocks and coalescing adjacent blocks,
49 * and switching them between the free and used lists as
50 * appropriate. */
51
57
58/* Always at our apr_rmm_off(0):
59 */
65
66#define RMM_HDR_BLOCK_SIZE (APR_ALIGN_DEFAULT(sizeof(rmm_hdr_block_t)))
67#define RMM_BLOCK_SIZE (APR_ALIGN_DEFAULT(sizeof(rmm_block_t)))
68
75
77 apr_rmm_off_t find, int includes)
78{
79 apr_rmm_off_t prev = 0;
80
81 while (next) {
82 struct rmm_block_t *blk = (rmm_block_t*)((char*)rmm->base + next);
83
84 if (find == next)
85 return next;
86
87 /* Overshot? */
88 if (find < next)
89 return includes ? prev : 0;
90
91 prev = next;
92 next = blk->next;
93 }
94 return includes ? prev : 0;
95}
96
98{
99 apr_rmm_off_t next = rmm->base->firstfree;
102
103 while (next) {
104 struct rmm_block_t *blk = (rmm_block_t*)((char*)rmm->base + next);
105
106 if (blk->size == size)
107 return next;
108
109 if (blk->size >= size) {
110 /* XXX: sub optimal algorithm
111 * We need the most thorough best-fit logic, since we can
112 * never grow our rmm, we are SOL when we hit the wall.
113 */
114 if (!bestsize || (blk->size < bestsize)) {
115 bestsize = blk->size;
116 best = next;
117 }
118 }
119
120 next = blk->next;
121 }
122
123 if (bestsize > RMM_BLOCK_SIZE + size) {
124 struct rmm_block_t *blk = (rmm_block_t*)((char*)rmm->base + best);
125 struct rmm_block_t *new = (rmm_block_t*)((char*)rmm->base + best + size);
126
127 new->size = blk->size - size;
128 new->next = blk->next;
129 new->prev = best;
130
131 blk->size = size;
132 blk->next = best + size;
133
134 if (new->next) {
135 blk = (rmm_block_t*)((char*)rmm->base + new->next);
136 blk->prev = best + size;
137 }
138 }
139
140 return best;
141}
142
143static void move_block(apr_rmm_t *rmm, apr_rmm_off_t this, int free)
144{
145 struct rmm_block_t *blk = (rmm_block_t*)((char*)rmm->base + this);
146
147 /* close the gap */
148 if (blk->prev) {
149 struct rmm_block_t *prev = (rmm_block_t*)((char*)rmm->base + blk->prev);
150 prev->next = blk->next;
151 }
152 else {
153 if (free) {
154 rmm->base->firstused = blk->next;
155 }
156 else {
157 rmm->base->firstfree = blk->next;
158 }
159 }
160 if (blk->next) {
161 struct rmm_block_t *next = (rmm_block_t*)((char*)rmm->base + blk->next);
162 next->prev = blk->prev;
163 }
164
165 /* now find it in the other list, pushing it to the head if required */
166 if (free) {
167 blk->prev = find_block_by_offset(rmm, rmm->base->firstfree, this, 1);
168 if (!blk->prev) {
169 blk->next = rmm->base->firstfree;
170 rmm->base->firstfree = this;
171 }
172 }
173 else {
174 blk->prev = find_block_by_offset(rmm, rmm->base->firstused, this, 1);
175 if (!blk->prev) {
176 blk->next = rmm->base->firstused;
177 rmm->base->firstused = this;
178 }
179 }
180
181 /* and open it up */
182 if (blk->prev) {
183 struct rmm_block_t *prev = (rmm_block_t*)((char*)rmm->base + blk->prev);
184 if (free && (blk->prev + prev->size == this)) {
185 /* Collapse us into our predecessor */
186 prev->size += blk->size;
187 this = blk->prev;
188 blk = prev;
189 }
190 else {
191 blk->next = prev->next;
192 prev->next = this;
193 }
194 }
195
196 if (blk->next) {
197 struct rmm_block_t *next = (rmm_block_t*)((char*)rmm->base + blk->next);
198 if (free && (this + blk->size == blk->next)) {
199 /* Collapse us into our successor */
200 blk->size += next->size;
201 blk->next = next->next;
202 if (blk->next) {
203 next = (rmm_block_t*)((char*)rmm->base + blk->next);
204 next->prev = this;
205 }
206 }
207 else {
208 next->prev = this;
209 }
210 }
211}
212
214 void *base, apr_size_t size,
215 apr_pool_t *p)
216{
217 apr_status_t rv;
220
221 if (!lock) {
222 nulllock.type = apr_anylock_none;
223 nulllock.lock.pm = NULL;
224 lock = &nulllock;
225 }
226 if ((rv = APR_ANYLOCK_LOCK(lock)) != APR_SUCCESS)
227 return rv;
228
229 (*rmm) = (apr_rmm_t *)apr_pcalloc(p, sizeof(apr_rmm_t));
230 (*rmm)->p = p;
231 (*rmm)->base = base;
232 (*rmm)->size = size;
233 (*rmm)->lock = *lock;
234
235 (*rmm)->base->abssize = size;
236 (*rmm)->base->firstused = 0;
237 (*rmm)->base->firstfree = RMM_HDR_BLOCK_SIZE;
238
239 blk = (rmm_block_t *)((char*)base + (*rmm)->base->firstfree);
240
241 blk->size = size - (*rmm)->base->firstfree;
242 blk->prev = 0;
243 blk->next = 0;
244
245 return APR_ANYLOCK_UNLOCK(lock);
246}
247
249{
250 apr_status_t rv;
252
253 if ((rv = APR_ANYLOCK_LOCK(&rmm->lock)) != APR_SUCCESS) {
254 return rv;
255 }
256 /* Blast it all --- no going back :) */
257 if (rmm->base->firstused) {
258 apr_rmm_off_t this = rmm->base->firstused;
259 do {
260 blk = (rmm_block_t *)((char*)rmm->base + this);
261 this = blk->next;
262 blk->next = blk->prev = 0;
263 } while (this);
264 rmm->base->firstused = 0;
265 }
266 if (rmm->base->firstfree) {
267 apr_rmm_off_t this = rmm->base->firstfree;
268 do {
269 blk = (rmm_block_t *)((char*)rmm->base + this);
270 this = blk->next;
271 blk->next = blk->prev = 0;
272 } while (this);
273 rmm->base->firstfree = 0;
274 }
275 rmm->base->abssize = 0;
276 rmm->size = 0;
277
278 return APR_ANYLOCK_UNLOCK(&rmm->lock);
279}
280
282 void *base, apr_pool_t *p)
283{
285
286 if (!lock) {
287 nulllock.type = apr_anylock_none;
288 nulllock.lock.pm = NULL;
289 lock = &nulllock;
290 }
291
292 /* sanity would be good here */
293 (*rmm) = (apr_rmm_t *)apr_pcalloc(p, sizeof(apr_rmm_t));
294 (*rmm)->p = p;
295 (*rmm)->base = base;
296 (*rmm)->size = (*rmm)->base->abssize;
297 (*rmm)->lock = *lock;
298 return APR_SUCCESS;
299}
300
302{
303 /* A noop until we introduce locked/refcounts */
304 return APR_SUCCESS;
305}
306
308{
310 apr_rmm_off_t this;
311
313 if (size < reqsize) {
314 return 0;
315 }
316
317 APR_ANYLOCK_LOCK(&rmm->lock);
318
319 this = find_block_of_size(rmm, size);
320
321 if (this) {
322 move_block(rmm, this, 0);
323 this += RMM_BLOCK_SIZE;
324 }
325
326 APR_ANYLOCK_UNLOCK(&rmm->lock);
327 return this;
328}
329
331{
333 apr_rmm_off_t this;
334
336 if (size < reqsize) {
337 return 0;
338 }
339
340 APR_ANYLOCK_LOCK(&rmm->lock);
341
342 this = find_block_of_size(rmm, size);
343
344 if (this) {
345 move_block(rmm, this, 0);
346 this += RMM_BLOCK_SIZE;
347 memset((char*)rmm->base + this, 0, size - RMM_BLOCK_SIZE);
348 }
349
350 APR_ANYLOCK_UNLOCK(&rmm->lock);
351 return this;
352}
353
356{
357 apr_rmm_off_t this;
359 struct rmm_block_t *blk;
361
362 if (!entity) {
363 return apr_rmm_malloc(rmm, reqsize);
364 }
365
367 if (size < reqsize) {
368 return 0;
369 }
371
372 if ((this = apr_rmm_malloc(rmm, size)) == 0) {
373 return 0;
374 }
375
376 blk = (rmm_block_t*)((char*)rmm->base + old - RMM_BLOCK_SIZE);
377 oldsize = blk->size;
378
382
383 return this;
384}
385
387{
388 apr_status_t rv;
389 struct rmm_block_t *blk;
390
391 /* A little sanity check is always healthy, especially here.
392 * If we really cared, we could make this compile-time
393 */
394 if (this < RMM_HDR_BLOCK_SIZE + RMM_BLOCK_SIZE) {
395 return APR_EINVAL;
396 }
397
398 this -= RMM_BLOCK_SIZE;
399
400 blk = (rmm_block_t*)((char*)rmm->base + this);
401
402 if ((rv = APR_ANYLOCK_LOCK(&rmm->lock)) != APR_SUCCESS) {
403 return rv;
404 }
405 if (blk->prev) {
406 struct rmm_block_t *prev = (rmm_block_t*)((char*)rmm->base + blk->prev);
407 if (prev->next != this) {
408 APR_ANYLOCK_UNLOCK(&rmm->lock);
409 return APR_EINVAL;
410 }
411 }
412 else {
413 if (rmm->base->firstused != this) {
414 APR_ANYLOCK_UNLOCK(&rmm->lock);
415 return APR_EINVAL;
416 }
417 }
418
419 if (blk->next) {
420 struct rmm_block_t *next = (rmm_block_t*)((char*)rmm->base + blk->next);
421 if (next->prev != this) {
422 APR_ANYLOCK_UNLOCK(&rmm->lock);
423 return APR_EINVAL;
424 }
425 }
426
427 /* Ok, it remained [apparently] sane, so unlink it
428 */
429 move_block(rmm, this, 1);
430
431 return APR_ANYLOCK_UNLOCK(&rmm->lock);
432}
433
435{
436 /* debug-sanity checking here would be good
437 */
438 return (void*)((char*)rmm->base + entity);
439}
440
442{
443 /* debug, or always, sanity checking here would be good
444 * since the primitive is apr_rmm_off_t, I don't mind penalizing
445 * inverse conversions for safety, unless someone can prove that
446 * there is no choice in some cases.
447 */
448 return ((char*)entity - (char*)rmm->base);
449}
450
452{
453 /* overhead per block is at most APR_ALIGN_DEFAULT(1) wasted bytes
454 * for alignment overhead, plus the size of the rmm_block_t
455 * structure. */
457}
int n
Definition ap_regex.h:278
#define APR_ANYLOCK_UNLOCK(lck)
#define APR_ANYLOCK_LOCK(lck)
Definition apr_anylock.h:66
APR Error Codes.
APR Miscellaneous library routines.
APR general purpose library routines.
#define RMM_HDR_BLOCK_SIZE
Definition apr_rmm.c:66
static apr_rmm_off_t find_block_of_size(apr_rmm_t *rmm, apr_size_t size)
Definition apr_rmm.c:97
static void move_block(apr_rmm_t *rmm, apr_rmm_off_t this, int free)
Definition apr_rmm.c:143
#define RMM_BLOCK_SIZE
Definition apr_rmm.c:67
static apr_rmm_off_t find_block_by_offset(apr_rmm_t *rmm, apr_rmm_off_t next, apr_rmm_off_t find, int includes)
Definition apr_rmm.c:76
APR-UTIL Relocatable Memory Management Routines.
APU_DECLARE(void)
Computes SipHash-2-4, producing a 64bit (APR_SIPHASH_DSIZE) hash from a message and a 128bit (APR_SIP...
Definition apr_sha1.c:206
APR Strings library.
static sem_id lock
Definition threadpriv.c:21
ap_conf_vector_t * base
#define APR_EINVAL
Definition apr_errno.h:711
apr_size_t apr_rmm_off_t
Definition apr_rmm.h:43
void * entity
Definition apr_rmm.h:95
apr_size_t size
#define APR_SUCCESS
Definition apr_errno.h:225
int apr_status_t
Definition apr_errno.h:44
#define APR_ALIGN_DEFAULT(size)
#define apr_pcalloc(p, size)
Definition apr_pools.h:465
apr_size_t reqsize
Definition apr_shm.h:71
apr_pool_t * p
Definition md_event.c:32
return NULL
Definition mod_so.c:359
enum apr_anylock_t::tm_lock type
apr_size_t size
Definition apr_rmm.c:72
rmm_hdr_block_t * base
Definition apr_rmm.c:71
apr_pool_t * p
Definition apr_rmm.c:70
apr_anylock_t lock
Definition apr_rmm.c:73
apr_size_t size
Definition apr_rmm.c:53
apr_rmm_off_t next
Definition apr_rmm.c:55
apr_rmm_off_t prev
Definition apr_rmm.c:54
apr_size_t abssize
Definition apr_rmm.c:61
apr_rmm_off_t firstused
Definition apr_rmm.c:62
apr_rmm_off_t firstfree
Definition apr_rmm.c:63