Line data Source code
1 : /*-------------------------------------------------------------------------
2 : *
3 : * aset.c
4 : * Allocation set definitions.
5 : *
6 : * AllocSet is our standard implementation of the abstract MemoryContext
7 : * type.
8 : *
9 : *
10 : * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group
11 : * Portions Copyright (c) 1994, Regents of the University of California
12 : *
13 : * IDENTIFICATION
14 : * src/backend/utils/mmgr/aset.c
15 : *
16 : * NOTE:
17 : * This is a new (Feb. 05, 1999) implementation of the allocation set
18 : * routines. AllocSet...() does not use OrderedSet...() any more.
19 : * Instead it manages allocations in a block pool by itself, combining
20 : * many small allocations in a few bigger blocks. AllocSetFree() normally
21 : * doesn't free() memory really. It just add's the free'd area to some
22 : * list for later reuse by AllocSetAlloc(). All memory blocks are free()'d
23 : * at once on AllocSetReset(), which happens when the memory context gets
24 : * destroyed.
25 : * Jan Wieck
26 : *
27 : * Performance improvement from Tom Lane, 8/99: for extremely large request
28 : * sizes, we do want to be able to give the memory back to free() as soon
29 : * as it is pfree()'d. Otherwise we risk tying up a lot of memory in
30 : * freelist entries that might never be usable. This is specially needed
31 : * when the caller is repeatedly repalloc()'ing a block bigger and bigger;
32 : * the previous instances of the block were guaranteed to be wasted until
33 : * AllocSetReset() under the old way.
34 : *
35 : * Further improvement 12/00: as the code stood, request sizes in the
36 : * midrange between "small" and "large" were handled very inefficiently,
37 : * because any sufficiently large free chunk would be used to satisfy a
38 : * request, even if it was much larger than necessary. This led to more
39 : * and more wasted space in allocated chunks over time. To fix, get rid
40 : * of the midrange behavior: we now handle only "small" power-of-2-size
41 : * chunks as chunks. Anything "large" is passed off to malloc(). Change
42 : * the number of freelists to change the small/large boundary.
43 : *
44 : *-------------------------------------------------------------------------
45 : */
46 :
47 : #include "postgres.h"
48 :
49 : #include "utils/memdebug.h"
50 : #include "utils/memutils.h"
51 :
52 : /* Define this to detail debug alloc information */
53 : /* #define HAVE_ALLOCINFO */
54 :
55 : /*--------------------
56 : * Chunk freelist k holds chunks of size 1 << (k + ALLOC_MINBITS),
57 : * for k = 0 .. ALLOCSET_NUM_FREELISTS-1.
58 : *
59 : * Note that all chunks in the freelists have power-of-2 sizes. This
60 : * improves recyclability: we may waste some space, but the wasted space
61 : * should stay pretty constant as requests are made and released.
62 : *
63 : * A request too large for the last freelist is handled by allocating a
64 : * dedicated block from malloc(). The block still has a block header and
65 : * chunk header, but when the chunk is freed we'll return the whole block
66 : * to malloc(), not put it on our freelists.
67 : *
68 : * CAUTION: ALLOC_MINBITS must be large enough so that
69 : * 1<<ALLOC_MINBITS is at least MAXALIGN,
70 : * or we may fail to align the smallest chunks adequately.
71 : * 8-byte alignment is enough on all currently known machines.
72 : *
73 : * With the current parameters, request sizes up to 8K are treated as chunks,
74 : * larger requests go into dedicated blocks. Change ALLOCSET_NUM_FREELISTS
75 : * to adjust the boundary point; and adjust ALLOCSET_SEPARATE_THRESHOLD in
76 : * memutils.h to agree. (Note: in contexts with small maxBlockSize, we may
77 : * set the allocChunkLimit to less than 8K, so as to avoid space wastage.)
78 : *--------------------
79 : */
80 :
81 : #define ALLOC_MINBITS 3 /* smallest chunk size is 8 bytes */
82 : #define ALLOCSET_NUM_FREELISTS 11
83 : #define ALLOC_CHUNK_LIMIT (1 << (ALLOCSET_NUM_FREELISTS-1+ALLOC_MINBITS))
84 : /* Size of largest chunk that we use a fixed size for */
85 : #define ALLOC_CHUNK_FRACTION 4
86 : /* We allow chunks to be at most 1/4 of maxBlockSize (less overhead) */
87 :
88 : /*--------------------
89 : * The first block allocated for an allocset has size initBlockSize.
90 : * Each time we have to allocate another block, we double the block size
91 : * (if possible, and without exceeding maxBlockSize), so as to reduce
92 : * the bookkeeping load on malloc().
93 : *
94 : * Blocks allocated to hold oversize chunks do not follow this rule, however;
95 : * they are just however big they need to be to hold that single chunk.
96 : *--------------------
97 : */
98 :
99 : #define ALLOC_BLOCKHDRSZ MAXALIGN(sizeof(AllocBlockData))
100 : #define ALLOC_CHUNKHDRSZ sizeof(struct AllocChunkData)
101 :
102 : typedef struct AllocBlockData *AllocBlock; /* forward reference */
103 : typedef struct AllocChunkData *AllocChunk;
104 :
105 : /*
106 : * AllocPointer
107 : * Aligned pointer which may be a member of an allocation set.
108 : */
109 : typedef void *AllocPointer;
110 :
111 : /*
112 : * AllocSetContext is our standard implementation of MemoryContext.
113 : *
114 : * Note: header.isReset means there is nothing for AllocSetReset to do.
115 : * This is different from the aset being physically empty (empty blocks list)
116 : * because we may still have a keeper block. It's also different from the set
117 : * being logically empty, because we don't attempt to detect pfree'ing the
118 : * last active chunk.
119 : */
120 : typedef struct AllocSetContext
121 : {
122 : MemoryContextData header; /* Standard memory-context fields */
123 : /* Info about storage allocated in this context: */
124 : AllocBlock blocks; /* head of list of blocks in this set */
125 : AllocChunk freelist[ALLOCSET_NUM_FREELISTS]; /* free chunk lists */
126 : /* Allocation parameters for this context: */
127 : Size initBlockSize; /* initial block size */
128 : Size maxBlockSize; /* maximum block size */
129 : Size nextBlockSize; /* next block size to allocate */
130 : Size allocChunkLimit; /* effective chunk size limit */
131 : AllocBlock keeper; /* if not NULL, keep this block over resets */
132 : } AllocSetContext;
133 :
134 : typedef AllocSetContext *AllocSet;
135 :
136 : /*
137 : * AllocBlock
138 : * An AllocBlock is the unit of memory that is obtained by aset.c
139 : * from malloc(). It contains one or more AllocChunks, which are
140 : * the units requested by palloc() and freed by pfree(). AllocChunks
141 : * cannot be returned to malloc() individually, instead they are put
142 : * on freelists by pfree() and re-used by the next palloc() that has
143 : * a matching request size.
144 : *
145 : * AllocBlockData is the header data for a block --- the usable space
146 : * within the block begins at the next alignment boundary.
147 : */
148 : typedef struct AllocBlockData
149 : {
150 : AllocSet aset; /* aset that owns this block */
151 : AllocBlock prev; /* prev block in aset's blocks list, if any */
152 : AllocBlock next; /* next block in aset's blocks list, if any */
153 : char *freeptr; /* start of free space in this block */
154 : char *endptr; /* end of space in this block */
155 : } AllocBlockData;
156 :
157 : /*
158 : * AllocChunk
159 : * The prefix of each piece of memory in an AllocBlock
160 : */
161 : typedef struct AllocChunkData
162 : {
163 : /* size is always the size of the usable space in the chunk */
164 : Size size;
165 : #ifdef MEMORY_CONTEXT_CHECKING
166 : /* when debugging memory usage, also store actual requested size */
167 : /* this is zero in a free chunk */
168 : Size requested_size;
169 : #if MAXIMUM_ALIGNOF > 4 && SIZEOF_VOID_P == 4
170 : Size padding;
171 : #endif
172 :
173 : #endif /* MEMORY_CONTEXT_CHECKING */
174 :
175 : /* aset is the owning aset if allocated, or the freelist link if free */
176 : void *aset;
177 :
178 : /* there must not be any padding to reach a MAXALIGN boundary here! */
179 : } AllocChunkData;
180 :
181 : /*
182 : * AllocPointerIsValid
183 : * True iff pointer is valid allocation pointer.
184 : */
185 : #define AllocPointerIsValid(pointer) PointerIsValid(pointer)
186 :
187 : /*
188 : * AllocSetIsValid
189 : * True iff set is valid allocation set.
190 : */
191 : #define AllocSetIsValid(set) PointerIsValid(set)
192 :
193 : #define AllocPointerGetChunk(ptr) \
194 : ((AllocChunk)(((char *)(ptr)) - ALLOC_CHUNKHDRSZ))
195 : #define AllocChunkGetPointer(chk) \
196 : ((AllocPointer)(((char *)(chk)) + ALLOC_CHUNKHDRSZ))
197 :
198 : /*
199 : * These functions implement the MemoryContext API for AllocSet contexts.
200 : */
201 : static void *AllocSetAlloc(MemoryContext context, Size size);
202 : static void AllocSetFree(MemoryContext context, void *pointer);
203 : static void *AllocSetRealloc(MemoryContext context, void *pointer, Size size);
204 : static void AllocSetInit(MemoryContext context);
205 : static void AllocSetReset(MemoryContext context);
206 : static void AllocSetDelete(MemoryContext context);
207 : static Size AllocSetGetChunkSpace(MemoryContext context, void *pointer);
208 : static bool AllocSetIsEmpty(MemoryContext context);
209 : static void AllocSetStats(MemoryContext context, int level, bool print,
210 : MemoryContextCounters *totals);
211 :
212 : #ifdef MEMORY_CONTEXT_CHECKING
213 : static void AllocSetCheck(MemoryContext context);
214 : #endif
215 :
216 : /*
217 : * This is the virtual function table for AllocSet contexts.
218 : */
219 : static MemoryContextMethods AllocSetMethods = {
220 : AllocSetAlloc,
221 : AllocSetFree,
222 : AllocSetRealloc,
223 : AllocSetInit,
224 : AllocSetReset,
225 : AllocSetDelete,
226 : AllocSetGetChunkSpace,
227 : AllocSetIsEmpty,
228 : AllocSetStats
229 : #ifdef MEMORY_CONTEXT_CHECKING
230 : ,AllocSetCheck
231 : #endif
232 : };
233 :
234 : /*
235 : * Table for AllocSetFreeIndex
236 : */
237 : #define LT16(n) n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n
238 :
239 : static const unsigned char LogTable256[256] =
240 : {
241 : 0, 1, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4,
242 : LT16(5), LT16(6), LT16(6), LT16(7), LT16(7), LT16(7), LT16(7),
243 : LT16(8), LT16(8), LT16(8), LT16(8), LT16(8), LT16(8), LT16(8), LT16(8)
244 : };
245 :
246 : /* ----------
247 : * Debug macros
248 : * ----------
249 : */
250 : #ifdef HAVE_ALLOCINFO
251 : #define AllocFreeInfo(_cxt, _chunk) \
252 : fprintf(stderr, "AllocFree: %s: %p, %zu\n", \
253 : (_cxt)->header.name, (_chunk), (_chunk)->size)
254 : #define AllocAllocInfo(_cxt, _chunk) \
255 : fprintf(stderr, "AllocAlloc: %s: %p, %zu\n", \
256 : (_cxt)->header.name, (_chunk), (_chunk)->size)
257 : #else
258 : #define AllocFreeInfo(_cxt, _chunk)
259 : #define AllocAllocInfo(_cxt, _chunk)
260 : #endif
261 :
262 : /* ----------
263 : * AllocSetFreeIndex -
264 : *
265 : * Depending on the size of an allocation compute which freechunk
266 : * list of the alloc set it belongs to. Caller must have verified
267 : * that size <= ALLOC_CHUNK_LIMIT.
268 : * ----------
269 : */
270 : static inline int
271 42852975 : AllocSetFreeIndex(Size size)
272 : {
273 : int idx;
274 : unsigned int t,
275 : tsize;
276 :
277 42852975 : if (size > (1 << ALLOC_MINBITS))
278 : {
279 31677079 : tsize = (size - 1) >> ALLOC_MINBITS;
280 :
281 : /*
282 : * At this point we need to obtain log2(tsize)+1, ie, the number of
283 : * not-all-zero bits at the right. We used to do this with a
284 : * shift-and-count loop, but this function is enough of a hotspot to
285 : * justify micro-optimization effort. The best approach seems to be
286 : * to use a lookup table. Note that this code assumes that
287 : * ALLOCSET_NUM_FREELISTS <= 17, since we only cope with two bytes of
288 : * the tsize value.
289 : */
290 31677079 : t = tsize >> 8;
291 31677079 : idx = t ? LogTable256[t] + 8 : LogTable256[tsize];
292 :
293 31677079 : Assert(idx < ALLOCSET_NUM_FREELISTS);
294 : }
295 : else
296 11175896 : idx = 0;
297 :
298 42852975 : return idx;
299 : }
300 :
301 :
302 : /*
303 : * Public routines
304 : */
305 :
306 :
307 : /*
308 : * AllocSetContextCreate
309 : * Create a new AllocSet context.
310 : *
311 : * parent: parent context, or NULL if top-level context
312 : * name: name of context (for debugging only, need not be unique)
313 : * minContextSize: minimum context size
314 : * initBlockSize: initial allocation block size
315 : * maxBlockSize: maximum allocation block size
316 : *
317 : * Notes: the name string will be copied into context-lifespan storage.
318 : * Most callers should abstract the context size parameters using a macro
319 : * such as ALLOCSET_DEFAULT_SIZES.
320 : */
321 : MemoryContext
322 420376 : AllocSetContextCreate(MemoryContext parent,
323 : const char *name,
324 : Size minContextSize,
325 : Size initBlockSize,
326 : Size maxBlockSize)
327 : {
328 : AllocSet set;
329 :
330 : StaticAssertStmt(offsetof(AllocChunkData, aset) + sizeof(MemoryContext) ==
331 : MAXALIGN(sizeof(AllocChunkData)),
332 : "padding calculation in AllocChunkData is wrong");
333 :
334 : /*
335 : * First, validate allocation parameters. (If we're going to throw an
336 : * error, we should do so before the context is created, not after.) We
337 : * somewhat arbitrarily enforce a minimum 1K block size.
338 : */
339 420376 : if (initBlockSize != MAXALIGN(initBlockSize) ||
340 : initBlockSize < 1024)
341 0 : elog(ERROR, "invalid initBlockSize for memory context: %zu",
342 : initBlockSize);
343 420376 : if (maxBlockSize != MAXALIGN(maxBlockSize) ||
344 420376 : maxBlockSize < initBlockSize ||
345 420376 : !AllocHugeSizeIsValid(maxBlockSize)) /* must be safe to double */
346 0 : elog(ERROR, "invalid maxBlockSize for memory context: %zu",
347 : maxBlockSize);
348 420720 : if (minContextSize != 0 &&
349 688 : (minContextSize != MAXALIGN(minContextSize) ||
350 : minContextSize <= ALLOC_BLOCKHDRSZ + ALLOC_CHUNKHDRSZ))
351 0 : elog(ERROR, "invalid minContextSize for memory context: %zu",
352 : minContextSize);
353 :
354 : /* Do the type-independent part of context creation */
355 420376 : set = (AllocSet) MemoryContextCreate(T_AllocSetContext,
356 : sizeof(AllocSetContext),
357 : &AllocSetMethods,
358 : parent,
359 : name);
360 :
361 : /* Save allocation parameters */
362 420376 : set->initBlockSize = initBlockSize;
363 420376 : set->maxBlockSize = maxBlockSize;
364 420376 : set->nextBlockSize = initBlockSize;
365 :
366 : /*
367 : * Compute the allocation chunk size limit for this context. It can't be
368 : * more than ALLOC_CHUNK_LIMIT because of the fixed number of freelists.
369 : * If maxBlockSize is small then requests exceeding the maxBlockSize, or
370 : * even a significant fraction of it, should be treated as large chunks
371 : * too. For the typical case of maxBlockSize a power of 2, the chunk size
372 : * limit will be at most 1/8th maxBlockSize, so that given a stream of
373 : * requests that are all the maximum chunk size we will waste at most
374 : * 1/8th of the allocated space.
375 : *
376 : * We have to have allocChunkLimit a power of two, because the requested
377 : * and actually-allocated sizes of any chunk must be on the same side of
378 : * the limit, else we get confused about whether the chunk is "big".
379 : *
380 : * Also, allocChunkLimit must not exceed ALLOCSET_SEPARATE_THRESHOLD.
381 : */
382 : StaticAssertStmt(ALLOC_CHUNK_LIMIT == ALLOCSET_SEPARATE_THRESHOLD,
383 : "ALLOC_CHUNK_LIMIT != ALLOCSET_SEPARATE_THRESHOLD");
384 :
385 420376 : set->allocChunkLimit = ALLOC_CHUNK_LIMIT;
386 1732090 : while ((Size) (set->allocChunkLimit + ALLOC_CHUNKHDRSZ) >
387 655857 : (Size) ((maxBlockSize - ALLOC_BLOCKHDRSZ) / ALLOC_CHUNK_FRACTION))
388 235481 : set->allocChunkLimit >>= 1;
389 :
390 : /*
391 : * Grab always-allocated space, if requested
392 : */
393 420376 : if (minContextSize > 0)
394 : {
395 344 : Size blksize = minContextSize;
396 : AllocBlock block;
397 :
398 344 : block = (AllocBlock) malloc(blksize);
399 344 : if (block == NULL)
400 : {
401 0 : MemoryContextStats(TopMemoryContext);
402 0 : ereport(ERROR,
403 : (errcode(ERRCODE_OUT_OF_MEMORY),
404 : errmsg("out of memory"),
405 : errdetail("Failed while creating memory context \"%s\".",
406 : name)));
407 : }
408 344 : block->aset = set;
409 344 : block->freeptr = ((char *) block) + ALLOC_BLOCKHDRSZ;
410 344 : block->endptr = ((char *) block) + blksize;
411 344 : block->prev = NULL;
412 344 : block->next = set->blocks;
413 344 : if (block->next)
414 0 : block->next->prev = block;
415 344 : set->blocks = block;
416 : /* Mark block as not to be released at reset time */
417 344 : set->keeper = block;
418 :
419 : /* Mark unallocated space NOACCESS; leave the block header alone. */
420 : VALGRIND_MAKE_MEM_NOACCESS(block->freeptr,
421 : blksize - ALLOC_BLOCKHDRSZ);
422 : }
423 :
424 420376 : return (MemoryContext) set;
425 : }
426 :
427 : /*
428 : * AllocSetInit
429 : * Context-type-specific initialization routine.
430 : *
431 : * This is called by MemoryContextCreate() after setting up the
432 : * generic MemoryContext fields and before linking the new context
433 : * into the context tree. We must do whatever is needed to make the
434 : * new context minimally valid for deletion. We must *not* risk
435 : * failure --- thus, for example, allocating more memory is not cool.
436 : * (AllocSetContextCreate can allocate memory when it gets control
437 : * back, however.)
438 : */
439 : static void
440 420376 : AllocSetInit(MemoryContext context)
441 : {
442 : /*
443 : * Since MemoryContextCreate already zeroed the context node, we don't
444 : * have to do anything here: it's already OK.
445 : */
446 420376 : }
447 :
448 : /*
449 : * AllocSetReset
450 : * Frees all memory which is allocated in the given set.
451 : *
452 : * Actually, this routine has some discretion about what to do.
453 : * It should mark all allocated chunks freed, but it need not necessarily
454 : * give back all the resources the set owns. Our actual implementation is
455 : * that we hang onto any "keeper" block specified for the set. In this way,
456 : * we don't thrash malloc() when a context is repeatedly reset after small
457 : * allocations, which is typical behavior for per-tuple contexts.
458 : */
459 : static void
460 1188939 : AllocSetReset(MemoryContext context)
461 : {
462 1188939 : AllocSet set = (AllocSet) context;
463 : AllocBlock block;
464 :
465 1188939 : AssertArg(AllocSetIsValid(set));
466 :
467 : #ifdef MEMORY_CONTEXT_CHECKING
468 : /* Check for corruption and leaks before freeing */
469 1188939 : AllocSetCheck(context);
470 : #endif
471 :
472 : /* Clear chunk freelists */
473 1188939 : MemSetAligned(set->freelist, 0, sizeof(set->freelist));
474 :
475 1188939 : block = set->blocks;
476 :
477 : /* New blocks list is either empty or just the keeper block */
478 1188939 : set->blocks = set->keeper;
479 :
480 3595314 : while (block != NULL)
481 : {
482 1217436 : AllocBlock next = block->next;
483 :
484 1217436 : if (block == set->keeper)
485 : {
486 : /* Reset the block, but don't return it to malloc */
487 1188718 : char *datastart = ((char *) block) + ALLOC_BLOCKHDRSZ;
488 :
489 : #ifdef CLOBBER_FREED_MEMORY
490 1188718 : wipe_mem(datastart, block->freeptr - datastart);
491 : #else
492 : /* wipe_mem() would have done this */
493 : VALGRIND_MAKE_MEM_NOACCESS(datastart, block->freeptr - datastart);
494 : #endif
495 1188718 : block->freeptr = datastart;
496 1188718 : block->prev = NULL;
497 1188718 : block->next = NULL;
498 : }
499 : else
500 : {
501 : /* Normal case, release the block */
502 : #ifdef CLOBBER_FREED_MEMORY
503 28718 : wipe_mem(block, block->freeptr - ((char *) block));
504 : #endif
505 28718 : free(block);
506 : }
507 1217436 : block = next;
508 : }
509 :
510 : /* Reset block size allocation sequence, too */
511 1188939 : set->nextBlockSize = set->initBlockSize;
512 1188939 : }
513 :
514 : /*
515 : * AllocSetDelete
516 : * Frees all memory which is allocated in the given set,
517 : * in preparation for deletion of the set.
518 : *
519 : * Unlike AllocSetReset, this *must* free all resources of the set.
520 : * But note we are not responsible for deleting the context node itself.
521 : */
522 : static void
523 388369 : AllocSetDelete(MemoryContext context)
524 : {
525 388369 : AllocSet set = (AllocSet) context;
526 388369 : AllocBlock block = set->blocks;
527 :
528 388369 : AssertArg(AllocSetIsValid(set));
529 :
530 : #ifdef MEMORY_CONTEXT_CHECKING
531 : /* Check for corruption and leaks before freeing */
532 388369 : AllocSetCheck(context);
533 : #endif
534 :
535 : /* Make it look empty, just in case... */
536 388369 : MemSetAligned(set->freelist, 0, sizeof(set->freelist));
537 388369 : set->blocks = NULL;
538 388369 : set->keeper = NULL;
539 :
540 1119040 : while (block != NULL)
541 : {
542 342302 : AllocBlock next = block->next;
543 :
544 : #ifdef CLOBBER_FREED_MEMORY
545 342302 : wipe_mem(block, block->freeptr - ((char *) block));
546 : #endif
547 342302 : free(block);
548 342302 : block = next;
549 : }
550 388369 : }
551 :
552 : /*
553 : * AllocSetAlloc
554 : * Returns pointer to allocated memory of given size or NULL if
555 : * request could not be completed; memory is added to the set.
556 : *
557 : * No request may exceed:
558 : * MAXALIGN_DOWN(SIZE_MAX) - ALLOC_BLOCKHDRSZ - ALLOC_CHUNKHDRSZ
559 : * All callers use a much-lower limit.
560 : */
561 : static void *
562 32741398 : AllocSetAlloc(MemoryContext context, Size size)
563 : {
564 32741398 : AllocSet set = (AllocSet) context;
565 : AllocBlock block;
566 : AllocChunk chunk;
567 : int fidx;
568 : Size chunk_size;
569 : Size blksize;
570 :
571 32741398 : AssertArg(AllocSetIsValid(set));
572 :
573 : /*
574 : * If requested size exceeds maximum for chunks, allocate an entire block
575 : * for this request.
576 : */
577 32741398 : if (size > set->allocChunkLimit)
578 : {
579 417401 : chunk_size = MAXALIGN(size);
580 417401 : blksize = chunk_size + ALLOC_BLOCKHDRSZ + ALLOC_CHUNKHDRSZ;
581 417401 : block = (AllocBlock) malloc(blksize);
582 417401 : if (block == NULL)
583 0 : return NULL;
584 417401 : block->aset = set;
585 417401 : block->freeptr = block->endptr = ((char *) block) + blksize;
586 :
587 417401 : chunk = (AllocChunk) (((char *) block) + ALLOC_BLOCKHDRSZ);
588 417401 : chunk->aset = set;
589 417401 : chunk->size = chunk_size;
590 : #ifdef MEMORY_CONTEXT_CHECKING
591 : /* Valgrind: Will be made NOACCESS below. */
592 417401 : chunk->requested_size = size;
593 : /* set mark to catch clobber of "unused" space */
594 417401 : if (size < chunk_size)
595 6732 : set_sentinel(AllocChunkGetPointer(chunk), size);
596 : #endif
597 : #ifdef RANDOMIZE_ALLOCATED_MEMORY
598 : /* fill the allocated space with junk */
599 : randomize_mem((char *) AllocChunkGetPointer(chunk), size);
600 : #endif
601 :
602 : /*
603 : * Stick the new block underneath the active allocation block, if any,
604 : * so that we don't lose the use of the space remaining therein.
605 : */
606 417401 : if (set->blocks != NULL)
607 : {
608 416099 : block->prev = set->blocks;
609 416099 : block->next = set->blocks->next;
610 416099 : if (block->next)
611 337383 : block->next->prev = block;
612 416099 : set->blocks->next = block;
613 : }
614 : else
615 : {
616 1302 : block->prev = NULL;
617 1302 : block->next = NULL;
618 1302 : set->blocks = block;
619 : }
620 :
621 : AllocAllocInfo(set, chunk);
622 :
623 : /*
624 : * Chunk's metadata fields remain DEFINED. The requested allocation
625 : * itself can be NOACCESS or UNDEFINED; our caller will soon make it
626 : * UNDEFINED. Make extra space at the end of the chunk, if any,
627 : * NOACCESS.
628 : */
629 : VALGRIND_MAKE_MEM_NOACCESS((char *) chunk + ALLOC_CHUNKHDRSZ,
630 : chunk_size - ALLOC_CHUNKHDRSZ);
631 :
632 417401 : return AllocChunkGetPointer(chunk);
633 : }
634 :
635 : /*
636 : * Request is small enough to be treated as a chunk. Look in the
637 : * corresponding free list to see if there is a free chunk we could reuse.
638 : * If one is found, remove it from the free list, make it again a member
639 : * of the alloc set and return its data address.
640 : */
641 32323997 : fidx = AllocSetFreeIndex(size);
642 32323997 : chunk = set->freelist[fidx];
643 32323997 : if (chunk != NULL)
644 : {
645 8928182 : Assert(chunk->size >= size);
646 :
647 8928182 : set->freelist[fidx] = (AllocChunk) chunk->aset;
648 :
649 8928182 : chunk->aset = (void *) set;
650 :
651 : #ifdef MEMORY_CONTEXT_CHECKING
652 : /* Valgrind: Free list requested_size should be DEFINED. */
653 8928182 : chunk->requested_size = size;
654 : VALGRIND_MAKE_MEM_NOACCESS(&chunk->requested_size,
655 : sizeof(chunk->requested_size));
656 : /* set mark to catch clobber of "unused" space */
657 8928182 : if (size < chunk->size)
658 6710947 : set_sentinel(AllocChunkGetPointer(chunk), size);
659 : #endif
660 : #ifdef RANDOMIZE_ALLOCATED_MEMORY
661 : /* fill the allocated space with junk */
662 : randomize_mem((char *) AllocChunkGetPointer(chunk), size);
663 : #endif
664 :
665 : AllocAllocInfo(set, chunk);
666 8928182 : return AllocChunkGetPointer(chunk);
667 : }
668 :
669 : /*
670 : * Choose the actual chunk size to allocate.
671 : */
672 23395815 : chunk_size = (1 << ALLOC_MINBITS) << fidx;
673 23395815 : Assert(chunk_size >= size);
674 :
675 : /*
676 : * If there is enough room in the active allocation block, we will put the
677 : * chunk into that block. Else must start a new one.
678 : */
679 23395815 : if ((block = set->blocks) != NULL)
680 : {
681 23096425 : Size availspace = block->endptr - block->freeptr;
682 :
683 23096425 : if (availspace < (chunk_size + ALLOC_CHUNKHDRSZ))
684 : {
685 : /*
686 : * The existing active (top) block does not have enough room for
687 : * the requested allocation, but it might still have a useful
688 : * amount of space in it. Once we push it down in the block list,
689 : * we'll never try to allocate more space from it. So, before we
690 : * do that, carve up its free space into chunks that we can put on
691 : * the set's freelists.
692 : *
693 : * Because we can only get here when there's less than
694 : * ALLOC_CHUNK_LIMIT left in the block, this loop cannot iterate
695 : * more than ALLOCSET_NUM_FREELISTS-1 times.
696 : */
697 324575 : while (availspace >= ((1 << ALLOC_MINBITS) + ALLOC_CHUNKHDRSZ))
698 : {
699 145559 : Size availchunk = availspace - ALLOC_CHUNKHDRSZ;
700 145559 : int a_fidx = AllocSetFreeIndex(availchunk);
701 :
702 : /*
703 : * In most cases, we'll get back the index of the next larger
704 : * freelist than the one we need to put this chunk on. The
705 : * exception is when availchunk is exactly a power of 2.
706 : */
707 145559 : if (availchunk != ((Size) 1 << (a_fidx + ALLOC_MINBITS)))
708 : {
709 126031 : a_fidx--;
710 126031 : Assert(a_fidx >= 0);
711 126031 : availchunk = ((Size) 1 << (a_fidx + ALLOC_MINBITS));
712 : }
713 :
714 145559 : chunk = (AllocChunk) (block->freeptr);
715 :
716 : /* Prepare to initialize the chunk header. */
717 : VALGRIND_MAKE_MEM_UNDEFINED(chunk, ALLOC_CHUNKHDRSZ);
718 :
719 145559 : block->freeptr += (availchunk + ALLOC_CHUNKHDRSZ);
720 145559 : availspace -= (availchunk + ALLOC_CHUNKHDRSZ);
721 :
722 145559 : chunk->size = availchunk;
723 : #ifdef MEMORY_CONTEXT_CHECKING
724 145559 : chunk->requested_size = 0; /* mark it free */
725 : #endif
726 145559 : chunk->aset = (void *) set->freelist[a_fidx];
727 145559 : set->freelist[a_fidx] = chunk;
728 : }
729 :
730 : /* Mark that we need to create a new block */
731 89508 : block = NULL;
732 : }
733 : }
734 :
735 : /*
736 : * Time to create a new regular (multi-chunk) block?
737 : */
738 23395815 : if (block == NULL)
739 : {
740 : Size required_size;
741 :
742 : /*
743 : * The first such block has size initBlockSize, and we double the
744 : * space in each succeeding block, but not more than maxBlockSize.
745 : */
746 388898 : blksize = set->nextBlockSize;
747 388898 : set->nextBlockSize <<= 1;
748 388898 : if (set->nextBlockSize > set->maxBlockSize)
749 10625 : set->nextBlockSize = set->maxBlockSize;
750 :
751 : /*
752 : * If initBlockSize is less than ALLOC_CHUNK_LIMIT, we could need more
753 : * space... but try to keep it a power of 2.
754 : */
755 388898 : required_size = chunk_size + ALLOC_BLOCKHDRSZ + ALLOC_CHUNKHDRSZ;
756 778563 : while (blksize < required_size)
757 767 : blksize <<= 1;
758 :
759 : /* Try to allocate it */
760 388898 : block = (AllocBlock) malloc(blksize);
761 :
762 : /*
763 : * We could be asking for pretty big blocks here, so cope if malloc
764 : * fails. But give up if there's less than a meg or so available...
765 : */
766 777796 : while (block == NULL && blksize > 1024 * 1024)
767 : {
768 0 : blksize >>= 1;
769 0 : if (blksize < required_size)
770 0 : break;
771 0 : block = (AllocBlock) malloc(blksize);
772 : }
773 :
774 388898 : if (block == NULL)
775 0 : return NULL;
776 :
777 388898 : block->aset = set;
778 388898 : block->freeptr = ((char *) block) + ALLOC_BLOCKHDRSZ;
779 388898 : block->endptr = ((char *) block) + blksize;
780 :
781 : /*
782 : * If this is the first block of the set, make it the "keeper" block.
783 : * Formerly, a keeper block could only be created during context
784 : * creation, but allowing it to happen here lets us have fast reset
785 : * cycling even for contexts created with minContextSize = 0; that way
786 : * we don't have to force space to be allocated in contexts that might
787 : * never need any space. Don't mark an oversize block as a keeper,
788 : * however.
789 : */
790 388898 : if (set->keeper == NULL && blksize == set->initBlockSize)
791 299979 : set->keeper = block;
792 :
793 : /* Mark unallocated space NOACCESS. */
794 : VALGRIND_MAKE_MEM_NOACCESS(block->freeptr,
795 : blksize - ALLOC_BLOCKHDRSZ);
796 :
797 388898 : block->prev = NULL;
798 388898 : block->next = set->blocks;
799 388898 : if (block->next)
800 89508 : block->next->prev = block;
801 388898 : set->blocks = block;
802 : }
803 :
804 : /*
805 : * OK, do the allocation
806 : */
807 23395815 : chunk = (AllocChunk) (block->freeptr);
808 :
809 : /* Prepare to initialize the chunk header. */
810 : VALGRIND_MAKE_MEM_UNDEFINED(chunk, ALLOC_CHUNKHDRSZ);
811 :
812 23395815 : block->freeptr += (chunk_size + ALLOC_CHUNKHDRSZ);
813 23395815 : Assert(block->freeptr <= block->endptr);
814 :
815 23395815 : chunk->aset = (void *) set;
816 23395815 : chunk->size = chunk_size;
817 : #ifdef MEMORY_CONTEXT_CHECKING
818 23395815 : chunk->requested_size = size;
819 : VALGRIND_MAKE_MEM_NOACCESS(&chunk->requested_size,
820 : sizeof(chunk->requested_size));
821 : /* set mark to catch clobber of "unused" space */
822 23395815 : if (size < chunk->size)
823 11484613 : set_sentinel(AllocChunkGetPointer(chunk), size);
824 : #endif
825 : #ifdef RANDOMIZE_ALLOCATED_MEMORY
826 : /* fill the allocated space with junk */
827 : randomize_mem((char *) AllocChunkGetPointer(chunk), size);
828 : #endif
829 :
830 : AllocAllocInfo(set, chunk);
831 23395815 : return AllocChunkGetPointer(chunk);
832 : }
833 :
834 : /*
835 : * AllocSetFree
836 : * Frees allocated memory; memory is removed from the set.
837 : */
838 : static void
839 10781182 : AllocSetFree(MemoryContext context, void *pointer)
840 : {
841 10781182 : AllocSet set = (AllocSet) context;
842 10781182 : AllocChunk chunk = AllocPointerGetChunk(pointer);
843 :
844 : AllocFreeInfo(set, chunk);
845 :
846 : #ifdef MEMORY_CONTEXT_CHECKING
847 : VALGRIND_MAKE_MEM_DEFINED(&chunk->requested_size,
848 : sizeof(chunk->requested_size));
849 : /* Test for someone scribbling on unused space in chunk */
850 10781182 : if (chunk->requested_size < chunk->size)
851 7476627 : if (!sentinel_ok(pointer, chunk->requested_size))
852 0 : elog(WARNING, "detected write past chunk end in %s %p",
853 : set->header.name, chunk);
854 : #endif
855 :
856 10781182 : if (chunk->size > set->allocChunkLimit)
857 : {
858 : /*
859 : * Big chunks are certain to have been allocated as single-chunk
860 : * blocks. Just unlink that block and return it to malloc().
861 : */
862 397763 : AllocBlock block = (AllocBlock) (((char *) chunk) - ALLOC_BLOCKHDRSZ);
863 :
864 : /*
865 : * Try to verify that we have a sane block pointer: it should
866 : * reference the correct aset, and freeptr and endptr should point
867 : * just past the chunk.
868 : */
869 795526 : if (block->aset != set ||
870 795526 : block->freeptr != block->endptr ||
871 795526 : block->freeptr != ((char *) block) +
872 397763 : (chunk->size + ALLOC_BLOCKHDRSZ + ALLOC_CHUNKHDRSZ))
873 0 : elog(ERROR, "could not find block containing chunk %p", chunk);
874 :
875 : /* OK, remove block from aset's list and free it */
876 397763 : if (block->prev)
877 397760 : block->prev->next = block->next;
878 : else
879 3 : set->blocks = block->next;
880 397763 : if (block->next)
881 327195 : block->next->prev = block->prev;
882 : #ifdef CLOBBER_FREED_MEMORY
883 397763 : wipe_mem(block, block->freeptr - ((char *) block));
884 : #endif
885 397763 : free(block);
886 : }
887 : else
888 : {
889 : /* Normal case, put the chunk into appropriate freelist */
890 10383419 : int fidx = AllocSetFreeIndex(chunk->size);
891 :
892 10383419 : chunk->aset = (void *) set->freelist[fidx];
893 :
894 : #ifdef CLOBBER_FREED_MEMORY
895 10383419 : wipe_mem(pointer, chunk->size);
896 : #endif
897 :
898 : #ifdef MEMORY_CONTEXT_CHECKING
899 : /* Reset requested_size to 0 in chunks that are on freelist */
900 10383419 : chunk->requested_size = 0;
901 : #endif
902 10383419 : set->freelist[fidx] = chunk;
903 : }
904 10781182 : }
905 :
906 : /*
907 : * AllocSetRealloc
908 : * Returns new pointer to allocated memory of given size or NULL if
909 : * request could not be completed; this memory is added to the set.
910 : * Memory associated with given pointer is copied into the new memory,
911 : * and the old memory is freed.
912 : *
913 : * Without MEMORY_CONTEXT_CHECKING, we don't know the old request size. This
914 : * makes our Valgrind client requests less-precise, hazarding false negatives.
915 : * (In principle, we could use VALGRIND_GET_VBITS() to rediscover the old
916 : * request size.)
917 : */
918 : static void *
919 65870 : AllocSetRealloc(MemoryContext context, void *pointer, Size size)
920 : {
921 65870 : AllocSet set = (AllocSet) context;
922 65870 : AllocChunk chunk = AllocPointerGetChunk(pointer);
923 65870 : Size oldsize = chunk->size;
924 :
925 : #ifdef MEMORY_CONTEXT_CHECKING
926 : VALGRIND_MAKE_MEM_DEFINED(&chunk->requested_size,
927 : sizeof(chunk->requested_size));
928 : /* Test for someone scribbling on unused space in chunk */
929 65870 : if (chunk->requested_size < oldsize)
930 48316 : if (!sentinel_ok(pointer, chunk->requested_size))
931 0 : elog(WARNING, "detected write past chunk end in %s %p",
932 : set->header.name, chunk);
933 : #endif
934 :
935 : /*
936 : * Chunk sizes are aligned to power of 2 in AllocSetAlloc(). Maybe the
937 : * allocated area already is >= the new size. (In particular, we always
938 : * fall out here if the requested size is a decrease.)
939 : */
940 65870 : if (oldsize >= size)
941 : {
942 : #ifdef MEMORY_CONTEXT_CHECKING
943 3407 : Size oldrequest = chunk->requested_size;
944 :
945 : #ifdef RANDOMIZE_ALLOCATED_MEMORY
946 : /* We can only fill the extra space if we know the prior request */
947 : if (size > oldrequest)
948 : randomize_mem((char *) pointer + oldrequest,
949 : size - oldrequest);
950 : #endif
951 :
952 3407 : chunk->requested_size = size;
953 : VALGRIND_MAKE_MEM_NOACCESS(&chunk->requested_size,
954 : sizeof(chunk->requested_size));
955 :
956 : /*
957 : * If this is an increase, mark any newly-available part UNDEFINED.
958 : * Otherwise, mark the obsolete part NOACCESS.
959 : */
960 : if (size > oldrequest)
961 : VALGRIND_MAKE_MEM_UNDEFINED((char *) pointer + oldrequest,
962 : size - oldrequest);
963 : else
964 : VALGRIND_MAKE_MEM_NOACCESS((char *) pointer + size,
965 : oldsize - size);
966 :
967 : /* set mark to catch clobber of "unused" space */
968 3407 : if (size < oldsize)
969 2710 : set_sentinel(pointer, size);
970 : #else /* !MEMORY_CONTEXT_CHECKING */
971 :
972 : /*
973 : * We don't have the information to determine whether we're growing
974 : * the old request or shrinking it, so we conservatively mark the
975 : * entire new allocation DEFINED.
976 : */
977 : VALGRIND_MAKE_MEM_NOACCESS(pointer, oldsize);
978 : VALGRIND_MAKE_MEM_DEFINED(pointer, size);
979 : #endif
980 :
981 3407 : return pointer;
982 : }
983 :
984 62463 : if (oldsize > set->allocChunkLimit)
985 : {
986 : /*
987 : * The chunk must have been allocated as a single-chunk block. Use
988 : * realloc() to make the containing block bigger with minimum space
989 : * wastage.
990 : */
991 1692 : AllocBlock block = (AllocBlock) (((char *) chunk) - ALLOC_BLOCKHDRSZ);
992 : Size chksize;
993 : Size blksize;
994 :
995 : /*
996 : * Try to verify that we have a sane block pointer: it should
997 : * reference the correct aset, and freeptr and endptr should point
998 : * just past the chunk.
999 : */
1000 3384 : if (block->aset != set ||
1001 3384 : block->freeptr != block->endptr ||
1002 3384 : block->freeptr != ((char *) block) +
1003 1692 : (chunk->size + ALLOC_BLOCKHDRSZ + ALLOC_CHUNKHDRSZ))
1004 0 : elog(ERROR, "could not find block containing chunk %p", chunk);
1005 :
1006 : /* Do the realloc */
1007 1692 : chksize = MAXALIGN(size);
1008 1692 : blksize = chksize + ALLOC_BLOCKHDRSZ + ALLOC_CHUNKHDRSZ;
1009 1692 : block = (AllocBlock) realloc(block, blksize);
1010 1692 : if (block == NULL)
1011 0 : return NULL;
1012 1692 : block->freeptr = block->endptr = ((char *) block) + blksize;
1013 :
1014 : /* Update pointers since block has likely been moved */
1015 1692 : chunk = (AllocChunk) (((char *) block) + ALLOC_BLOCKHDRSZ);
1016 1692 : pointer = AllocChunkGetPointer(chunk);
1017 1692 : if (block->prev)
1018 1692 : block->prev->next = block;
1019 : else
1020 0 : set->blocks = block;
1021 1692 : if (block->next)
1022 976 : block->next->prev = block;
1023 1692 : chunk->size = chksize;
1024 :
1025 : #ifdef MEMORY_CONTEXT_CHECKING
1026 : #ifdef RANDOMIZE_ALLOCATED_MEMORY
1027 : /* We can only fill the extra space if we know the prior request */
1028 : randomize_mem((char *) pointer + chunk->requested_size,
1029 : size - chunk->requested_size);
1030 : #endif
1031 :
1032 : /*
1033 : * realloc() (or randomize_mem()) will have left the newly-allocated
1034 : * part UNDEFINED, but we may need to adjust trailing bytes from the
1035 : * old allocation.
1036 : */
1037 : VALGRIND_MAKE_MEM_UNDEFINED((char *) pointer + chunk->requested_size,
1038 : oldsize - chunk->requested_size);
1039 :
1040 1692 : chunk->requested_size = size;
1041 : VALGRIND_MAKE_MEM_NOACCESS(&chunk->requested_size,
1042 : sizeof(chunk->requested_size));
1043 :
1044 : /* set mark to catch clobber of "unused" space */
1045 1692 : if (size < chunk->size)
1046 0 : set_sentinel(pointer, size);
1047 : #else /* !MEMORY_CONTEXT_CHECKING */
1048 :
1049 : /*
1050 : * We don't know how much of the old chunk size was the actual
1051 : * allocation; it could have been as small as one byte. We have to be
1052 : * conservative and just mark the entire old portion DEFINED.
1053 : */
1054 : VALGRIND_MAKE_MEM_DEFINED(pointer, oldsize);
1055 : #endif
1056 :
1057 : /* Make any trailing alignment padding NOACCESS. */
1058 : VALGRIND_MAKE_MEM_NOACCESS((char *) pointer + size, chksize - size);
1059 :
1060 1692 : return pointer;
1061 : }
1062 : else
1063 : {
1064 : /*
1065 : * Small-chunk case. We just do this by brute force, ie, allocate a
1066 : * new chunk and copy the data. Since we know the existing data isn't
1067 : * huge, this won't involve any great memcpy expense, so it's not
1068 : * worth being smarter. (At one time we tried to avoid memcpy when it
1069 : * was possible to enlarge the chunk in-place, but that turns out to
1070 : * misbehave unpleasantly for repeated cycles of
1071 : * palloc/repalloc/pfree: the eventually freed chunks go into the
1072 : * wrong freelist for the next initial palloc request, and so we leak
1073 : * memory indefinitely. See pgsql-hackers archives for 2007-08-11.)
1074 : */
1075 : AllocPointer newPointer;
1076 :
1077 : /* allocate new chunk */
1078 60771 : newPointer = AllocSetAlloc((MemoryContext) set, size);
1079 :
1080 : /* leave immediately if request was not completed */
1081 60771 : if (newPointer == NULL)
1082 0 : return NULL;
1083 :
1084 : /*
1085 : * AllocSetAlloc() just made the region NOACCESS. Change it to
1086 : * UNDEFINED for the moment; memcpy() will then transfer definedness
1087 : * from the old allocation to the new. If we know the old allocation,
1088 : * copy just that much. Otherwise, make the entire old chunk defined
1089 : * to avoid errors as we copy the currently-NOACCESS trailing bytes.
1090 : */
1091 : VALGRIND_MAKE_MEM_UNDEFINED(newPointer, size);
1092 : #ifdef MEMORY_CONTEXT_CHECKING
1093 60771 : oldsize = chunk->requested_size;
1094 : #else
1095 : VALGRIND_MAKE_MEM_DEFINED(pointer, oldsize);
1096 : #endif
1097 :
1098 : /* transfer existing data (certain to fit) */
1099 60771 : memcpy(newPointer, pointer, oldsize);
1100 :
1101 : /* free old chunk */
1102 60771 : AllocSetFree((MemoryContext) set, pointer);
1103 :
1104 60771 : return newPointer;
1105 : }
1106 : }
1107 :
1108 : /*
1109 : * AllocSetGetChunkSpace
1110 : * Given a currently-allocated chunk, determine the total space
1111 : * it occupies (including all memory-allocation overhead).
1112 : */
1113 : static Size
1114 2187503 : AllocSetGetChunkSpace(MemoryContext context, void *pointer)
1115 : {
1116 2187503 : AllocChunk chunk = AllocPointerGetChunk(pointer);
1117 :
1118 2187503 : return chunk->size + ALLOC_CHUNKHDRSZ;
1119 : }
1120 :
1121 : /*
1122 : * AllocSetIsEmpty
1123 : * Is an allocset empty of any allocated space?
1124 : */
1125 : static bool
1126 49 : AllocSetIsEmpty(MemoryContext context)
1127 : {
1128 : /*
1129 : * For now, we say "empty" only if the context is new or just reset. We
1130 : * could examine the freelists to determine if all space has been freed,
1131 : * but it's not really worth the trouble for present uses of this
1132 : * functionality.
1133 : */
1134 49 : if (context->isReset)
1135 43 : return true;
1136 6 : return false;
1137 : }
1138 :
1139 : /*
1140 : * AllocSetStats
1141 : * Compute stats about memory consumption of an allocset.
1142 : *
1143 : * level: recursion level (0 at top level); used for print indentation.
1144 : * print: true to print stats to stderr.
1145 : * totals: if not NULL, add stats about this allocset into *totals.
1146 : */
1147 : static void
1148 0 : AllocSetStats(MemoryContext context, int level, bool print,
1149 : MemoryContextCounters *totals)
1150 : {
1151 0 : AllocSet set = (AllocSet) context;
1152 0 : Size nblocks = 0;
1153 0 : Size freechunks = 0;
1154 0 : Size totalspace = 0;
1155 0 : Size freespace = 0;
1156 : AllocBlock block;
1157 : int fidx;
1158 :
1159 0 : for (block = set->blocks; block != NULL; block = block->next)
1160 : {
1161 0 : nblocks++;
1162 0 : totalspace += block->endptr - ((char *) block);
1163 0 : freespace += block->endptr - block->freeptr;
1164 : }
1165 0 : for (fidx = 0; fidx < ALLOCSET_NUM_FREELISTS; fidx++)
1166 : {
1167 : AllocChunk chunk;
1168 :
1169 0 : for (chunk = set->freelist[fidx]; chunk != NULL;
1170 0 : chunk = (AllocChunk) chunk->aset)
1171 : {
1172 0 : freechunks++;
1173 0 : freespace += chunk->size + ALLOC_CHUNKHDRSZ;
1174 : }
1175 : }
1176 :
1177 0 : if (print)
1178 : {
1179 : int i;
1180 :
1181 0 : for (i = 0; i < level; i++)
1182 0 : fprintf(stderr, " ");
1183 0 : fprintf(stderr,
1184 : "%s: %zu total in %zd blocks; %zu free (%zd chunks); %zu used\n",
1185 : set->header.name, totalspace, nblocks, freespace, freechunks,
1186 : totalspace - freespace);
1187 : }
1188 :
1189 0 : if (totals)
1190 : {
1191 0 : totals->nblocks += nblocks;
1192 0 : totals->freechunks += freechunks;
1193 0 : totals->totalspace += totalspace;
1194 0 : totals->freespace += freespace;
1195 : }
1196 0 : }
1197 :
1198 :
1199 : #ifdef MEMORY_CONTEXT_CHECKING
1200 :
1201 : /*
1202 : * AllocSetCheck
1203 : * Walk through chunks and check consistency of memory.
1204 : *
1205 : * NOTE: report errors as WARNING, *not* ERROR or FATAL. Otherwise you'll
1206 : * find yourself in an infinite loop when trouble occurs, because this
1207 : * routine will be entered again when elog cleanup tries to release memory!
1208 : */
1209 : static void
1210 6264509 : AllocSetCheck(MemoryContext context)
1211 : {
1212 6264509 : AllocSet set = (AllocSet) context;
1213 6264509 : char *name = set->header.name;
1214 : AllocBlock prevblock;
1215 : AllocBlock block;
1216 :
1217 19753080 : for (prevblock = NULL, block = set->blocks;
1218 : block != NULL;
1219 7224062 : prevblock = block, block = block->next)
1220 : {
1221 7224062 : char *bpoz = ((char *) block) + ALLOC_BLOCKHDRSZ;
1222 7224062 : long blk_used = block->freeptr - bpoz;
1223 7224062 : long blk_data = 0;
1224 7224062 : long nchunks = 0;
1225 :
1226 : /*
1227 : * Empty block - empty can be keeper-block only
1228 : */
1229 7224062 : if (!blk_used)
1230 : {
1231 62462 : if (set->keeper != block)
1232 0 : elog(WARNING, "problem in alloc set %s: empty block %p",
1233 : name, block);
1234 : }
1235 :
1236 : /*
1237 : * Check block header fields
1238 : */
1239 14448124 : if (block->aset != set ||
1240 14448124 : block->prev != prevblock ||
1241 14448124 : block->freeptr < bpoz ||
1242 7224062 : block->freeptr > block->endptr)
1243 0 : elog(WARNING, "problem in alloc set %s: corrupt header in block %p",
1244 : name, block);
1245 :
1246 : /*
1247 : * Chunk walker
1248 : */
1249 203895630 : while (bpoz < block->freeptr)
1250 : {
1251 189447506 : AllocChunk chunk = (AllocChunk) bpoz;
1252 : Size chsize,
1253 : dsize;
1254 :
1255 189447506 : chsize = chunk->size; /* aligned chunk size */
1256 : VALGRIND_MAKE_MEM_DEFINED(&chunk->requested_size,
1257 : sizeof(chunk->requested_size));
1258 189447506 : dsize = chunk->requested_size; /* real data */
1259 : if (dsize > 0) /* not on a free list */
1260 : VALGRIND_MAKE_MEM_NOACCESS(&chunk->requested_size,
1261 : sizeof(chunk->requested_size));
1262 :
1263 : /*
1264 : * Check chunk size
1265 : */
1266 189447506 : if (dsize > chsize)
1267 0 : elog(WARNING, "problem in alloc set %s: req size > alloc size for chunk %p in block %p",
1268 : name, chunk, block);
1269 189447506 : if (chsize < (1 << ALLOC_MINBITS))
1270 0 : elog(WARNING, "problem in alloc set %s: bad size %zu for chunk %p in block %p",
1271 : name, chsize, chunk, block);
1272 :
1273 : /* single-chunk block? */
1274 189612234 : if (chsize > set->allocChunkLimit &&
1275 164728 : chsize + ALLOC_CHUNKHDRSZ != blk_used)
1276 0 : elog(WARNING, "problem in alloc set %s: bad single-chunk %p in block %p",
1277 : name, chunk, block);
1278 :
1279 : /*
1280 : * If chunk is allocated, check for correct aset pointer. (If it's
1281 : * free, the aset is the freelist pointer, which we can't check as
1282 : * easily...)
1283 : */
1284 189447506 : if (dsize > 0 && chunk->aset != (void *) set)
1285 0 : elog(WARNING, "problem in alloc set %s: bogus aset link in block %p, chunk %p",
1286 : name, block, chunk);
1287 :
1288 : /*
1289 : * Check for overwrite of "unallocated" space in chunk
1290 : */
1291 302173649 : if (dsize > 0 && dsize < chsize &&
1292 112726143 : !sentinel_ok(chunk, ALLOC_CHUNKHDRSZ + dsize))
1293 0 : elog(WARNING, "problem in alloc set %s: detected write past chunk end in block %p, chunk %p",
1294 : name, block, chunk);
1295 :
1296 189447506 : blk_data += chsize;
1297 189447506 : nchunks++;
1298 :
1299 189447506 : bpoz += ALLOC_CHUNKHDRSZ + chsize;
1300 : }
1301 :
1302 7224062 : if ((blk_data + (nchunks * ALLOC_CHUNKHDRSZ)) != blk_used)
1303 0 : elog(WARNING, "problem in alloc set %s: found inconsistent memory block %p",
1304 : name, block);
1305 : }
1306 6264509 : }
1307 :
1308 : #endif /* MEMORY_CONTEXT_CHECKING */
|