Age Owner TLA Line data Source code
1 : /*-------------------------------------------------------------------------
2 : *
3 : * aset.c
4 : * Allocation set definitions.
5 : *
6 : * AllocSet is our standard implementation of the abstract MemoryContext
7 : * type.
8 : *
9 : *
10 : * Portions Copyright (c) 1996-2023, PostgreSQL Global Development Group
11 : * Portions Copyright (c) 1994, Regents of the University of California
12 : *
13 : * IDENTIFICATION
14 : * src/backend/utils/mmgr/aset.c
15 : *
16 : * NOTE:
17 : * This is a new (Feb. 05, 1999) implementation of the allocation set
18 : * routines. AllocSet...() does not use OrderedSet...() any more.
19 : * Instead it manages allocations in a block pool by itself, combining
20 : * many small allocations in a few bigger blocks. AllocSetFree() normally
21 : * doesn't free() memory really. It just add's the free'd area to some
22 : * list for later reuse by AllocSetAlloc(). All memory blocks are free()'d
23 : * at once on AllocSetReset(), which happens when the memory context gets
24 : * destroyed.
25 : * Jan Wieck
26 : *
27 : * Performance improvement from Tom Lane, 8/99: for extremely large request
28 : * sizes, we do want to be able to give the memory back to free() as soon
29 : * as it is pfree()'d. Otherwise we risk tying up a lot of memory in
30 : * freelist entries that might never be usable. This is specially needed
31 : * when the caller is repeatedly repalloc()'ing a block bigger and bigger;
32 : * the previous instances of the block were guaranteed to be wasted until
33 : * AllocSetReset() under the old way.
34 : *
35 : * Further improvement 12/00: as the code stood, request sizes in the
36 : * midrange between "small" and "large" were handled very inefficiently,
37 : * because any sufficiently large free chunk would be used to satisfy a
38 : * request, even if it was much larger than necessary. This led to more
39 : * and more wasted space in allocated chunks over time. To fix, get rid
40 : * of the midrange behavior: we now handle only "small" power-of-2-size
41 : * chunks as chunks. Anything "large" is passed off to malloc(). Change
42 : * the number of freelists to change the small/large boundary.
43 : *
44 : *-------------------------------------------------------------------------
45 : */
46 :
47 : #include "postgres.h"
48 :
49 : #include "port/pg_bitutils.h"
50 : #include "utils/memdebug.h"
51 : #include "utils/memutils.h"
52 : #include "utils/memutils_memorychunk.h"
53 : #include "utils/memutils_internal.h"
54 :
55 : /*--------------------
56 : * Chunk freelist k holds chunks of size 1 << (k + ALLOC_MINBITS),
57 : * for k = 0 .. ALLOCSET_NUM_FREELISTS-1.
58 : *
59 : * Note that all chunks in the freelists have power-of-2 sizes. This
60 : * improves recyclability: we may waste some space, but the wasted space
61 : * should stay pretty constant as requests are made and released.
62 : *
63 : * A request too large for the last freelist is handled by allocating a
64 : * dedicated block from malloc(). The block still has a block header and
65 : * chunk header, but when the chunk is freed we'll return the whole block
66 : * to malloc(), not put it on our freelists.
67 : *
68 : * CAUTION: ALLOC_MINBITS must be large enough so that
69 : * 1<<ALLOC_MINBITS is at least MAXALIGN,
70 : * or we may fail to align the smallest chunks adequately.
71 : * 8-byte alignment is enough on all currently known machines. This 8-byte
72 : * minimum also allows us to store a pointer to the next freelist item within
73 : * the chunk of memory itself.
74 : *
75 : * With the current parameters, request sizes up to 8K are treated as chunks,
76 : * larger requests go into dedicated blocks. Change ALLOCSET_NUM_FREELISTS
77 : * to adjust the boundary point; and adjust ALLOCSET_SEPARATE_THRESHOLD in
78 : * memutils.h to agree. (Note: in contexts with small maxBlockSize, we may
79 : * set the allocChunkLimit to less than 8K, so as to avoid space wastage.)
80 : *--------------------
81 : */
82 :
83 : #define ALLOC_MINBITS 3 /* smallest chunk size is 8 bytes */
84 : #define ALLOCSET_NUM_FREELISTS 11
85 : #define ALLOC_CHUNK_LIMIT (1 << (ALLOCSET_NUM_FREELISTS-1+ALLOC_MINBITS))
86 : /* Size of largest chunk that we use a fixed size for */
87 : #define ALLOC_CHUNK_FRACTION 4
88 : /* We allow chunks to be at most 1/4 of maxBlockSize (less overhead) */
89 :
90 : /*--------------------
91 : * The first block allocated for an allocset has size initBlockSize.
92 : * Each time we have to allocate another block, we double the block size
93 : * (if possible, and without exceeding maxBlockSize), so as to reduce
94 : * the bookkeeping load on malloc().
95 : *
96 : * Blocks allocated to hold oversize chunks do not follow this rule, however;
97 : * they are just however big they need to be to hold that single chunk.
98 : *
99 : * Also, if a minContextSize is specified, the first block has that size,
100 : * and then initBlockSize is used for the next one.
101 : *--------------------
102 : */
103 :
104 : #define ALLOC_BLOCKHDRSZ MAXALIGN(sizeof(AllocBlockData))
105 : #define ALLOC_CHUNKHDRSZ sizeof(MemoryChunk)
106 :
107 : typedef struct AllocBlockData *AllocBlock; /* forward reference */
108 :
109 : /*
110 : * AllocPointer
111 : * Aligned pointer which may be a member of an allocation set.
112 : */
113 : typedef void *AllocPointer;
114 :
115 : /*
116 : * AllocFreeListLink
117 : * When pfreeing memory, if we maintain a freelist for the given chunk's
118 : * size then we use a AllocFreeListLink to point to the current item in
119 : * the AllocSetContext's freelist and then set the given freelist element
120 : * to point to the chunk being freed.
121 : */
122 : typedef struct AllocFreeListLink
123 : {
124 : MemoryChunk *next;
125 : } AllocFreeListLink;
126 :
127 : /*
128 : * Obtain a AllocFreeListLink for the given chunk. Allocation sizes are
129 : * always at least sizeof(AllocFreeListLink), so we reuse the pointer's memory
130 : * itself to store the freelist link.
131 : */
132 : #define GetFreeListLink(chkptr) \
133 : (AllocFreeListLink *) ((char *) (chkptr) + ALLOC_CHUNKHDRSZ)
134 :
135 : /* Validate a freelist index retrieved from a chunk header */
136 : #define FreeListIdxIsValid(fidx) \
137 : ((fidx) >= 0 && (fidx) < ALLOCSET_NUM_FREELISTS)
138 :
139 : /* Determine the size of the chunk based on the freelist index */
140 : #define GetChunkSizeFromFreeListIdx(fidx) \
141 : ((((Size) 1) << ALLOC_MINBITS) << (fidx))
142 :
143 : /*
144 : * AllocSetContext is our standard implementation of MemoryContext.
145 : *
146 : * Note: header.isReset means there is nothing for AllocSetReset to do.
147 : * This is different from the aset being physically empty (empty blocks list)
148 : * because we will still have a keeper block. It's also different from the set
149 : * being logically empty, because we don't attempt to detect pfree'ing the
150 : * last active chunk.
151 : */
152 : typedef struct AllocSetContext
153 : {
154 : MemoryContextData header; /* Standard memory-context fields */
155 : /* Info about storage allocated in this context: */
156 : AllocBlock blocks; /* head of list of blocks in this set */
157 : MemoryChunk *freelist[ALLOCSET_NUM_FREELISTS]; /* free chunk lists */
158 : /* Allocation parameters for this context: */
159 : Size initBlockSize; /* initial block size */
160 : Size maxBlockSize; /* maximum block size */
161 : Size nextBlockSize; /* next block size to allocate */
162 : Size allocChunkLimit; /* effective chunk size limit */
163 : AllocBlock keeper; /* keep this block over resets */
164 : /* freelist this context could be put in, or -1 if not a candidate: */
165 : int freeListIndex; /* index in context_freelists[], or -1 */
166 : } AllocSetContext;
167 :
168 : typedef AllocSetContext *AllocSet;
169 :
170 : /*
171 : * AllocBlock
172 : * An AllocBlock is the unit of memory that is obtained by aset.c
173 : * from malloc(). It contains one or more MemoryChunks, which are
174 : * the units requested by palloc() and freed by pfree(). MemoryChunks
175 : * cannot be returned to malloc() individually, instead they are put
176 : * on freelists by pfree() and re-used by the next palloc() that has
177 : * a matching request size.
178 : *
179 : * AllocBlockData is the header data for a block --- the usable space
180 : * within the block begins at the next alignment boundary.
181 : */
182 : typedef struct AllocBlockData
183 : {
184 : AllocSet aset; /* aset that owns this block */
185 : AllocBlock prev; /* prev block in aset's blocks list, if any */
186 : AllocBlock next; /* next block in aset's blocks list, if any */
187 : char *freeptr; /* start of free space in this block */
188 : char *endptr; /* end of space in this block */
189 : } AllocBlockData;
190 :
191 : /*
192 : * Only the "hdrmask" field should be accessed outside this module.
193 : * We keep the rest of an allocated chunk's header marked NOACCESS when using
194 : * valgrind. But note that chunk headers that are in a freelist are kept
195 : * accessible, for simplicity.
196 : */
197 : #define ALLOCCHUNK_PRIVATE_LEN offsetof(MemoryChunk, hdrmask)
198 :
199 : /*
200 : * AllocPointerIsValid
201 : * True iff pointer is valid allocation pointer.
202 : */
203 : #define AllocPointerIsValid(pointer) PointerIsValid(pointer)
204 :
205 : /*
206 : * AllocSetIsValid
207 : * True iff set is valid allocation set.
208 : */
209 : #define AllocSetIsValid(set) \
210 : (PointerIsValid(set) && IsA(set, AllocSetContext))
211 :
212 : /*
213 : * AllocBlockIsValid
214 : * True iff block is valid block of allocation set.
215 : */
216 : #define AllocBlockIsValid(block) \
217 : (PointerIsValid(block) && AllocSetIsValid((block)->aset))
218 :
219 : /*
220 : * We always store external chunks on a dedicated block. This makes fetching
221 : * the block from an external chunk easy since it's always the first and only
222 : * chunk on the block.
223 : */
224 : #define ExternalChunkGetBlock(chunk) \
225 : (AllocBlock) ((char *) chunk - ALLOC_BLOCKHDRSZ)
226 :
227 : /*
228 : * Rather than repeatedly creating and deleting memory contexts, we keep some
229 : * freed contexts in freelists so that we can hand them out again with little
230 : * work. Before putting a context in a freelist, we reset it so that it has
231 : * only its initial malloc chunk and no others. To be a candidate for a
232 : * freelist, a context must have the same minContextSize/initBlockSize as
233 : * other contexts in the list; but its maxBlockSize is irrelevant since that
234 : * doesn't affect the size of the initial chunk.
235 : *
236 : * We currently provide one freelist for ALLOCSET_DEFAULT_SIZES contexts
237 : * and one for ALLOCSET_SMALL_SIZES contexts; the latter works for
238 : * ALLOCSET_START_SMALL_SIZES too, since only the maxBlockSize differs.
239 : *
240 : * Ordinarily, we re-use freelist contexts in last-in-first-out order, in
241 : * hopes of improving locality of reference. But if there get to be too
242 : * many contexts in the list, we'd prefer to drop the most-recently-created
243 : * contexts in hopes of keeping the process memory map compact.
244 : * We approximate that by simply deleting all existing entries when the list
245 : * overflows, on the assumption that queries that allocate a lot of contexts
246 : * will probably free them in more or less reverse order of allocation.
247 : *
248 : * Contexts in a freelist are chained via their nextchild pointers.
249 : */
250 : #define MAX_FREE_CONTEXTS 100 /* arbitrary limit on freelist length */
251 :
252 : typedef struct AllocSetFreeList
253 : {
254 : int num_free; /* current list length */
255 : AllocSetContext *first_free; /* list header */
256 : } AllocSetFreeList;
257 :
258 : /* context_freelists[0] is for default params, [1] for small params */
259 : static AllocSetFreeList context_freelists[2] =
260 : {
261 : {
262 : 0, NULL
263 : },
264 : {
265 : 0, NULL
266 : }
267 : };
268 :
269 :
270 : /* ----------
271 : * AllocSetFreeIndex -
272 : *
8828 JanWieck 273 ECB : * Depending on the size of an allocation compute which freechunk
274 : * list of the alloc set it belongs to. Caller must have verified
275 : * that size <= ALLOC_CHUNK_LIMIT.
276 : * ----------
277 : */
278 : static inline int
8828 JanWieck 279 GIC 620371798 : AllocSetFreeIndex(Size size)
280 : {
281 : int idx;
282 :
5010 tgl 283 620371798 : if (size > (1 << ALLOC_MINBITS))
284 : {
285 : /*----------
286 : * At this point we must compute ceil(log2(size >> ALLOC_MINBITS)).
287 : * This is the same as
1198 tgl 288 ECB : * pg_leftmost_one_pos32((size - 1) >> ALLOC_MINBITS) + 1
289 : * or equivalently
290 : * pg_leftmost_one_pos32(size - 1) - ALLOC_MINBITS + 1
291 : *
292 : * However, for platforms without intrinsic support, we duplicate the
293 : * logic here, allowing an additional optimization. It's reasonable
294 : * to assume that ALLOC_CHUNK_LIMIT fits in 16 bits, so we can unroll
295 : * the byte-at-a-time loop in pg_leftmost_one_pos32 and just handle
296 : * the last two bytes.
297 : *
298 : * Yes, this function is enough of a hot-spot to make it worth this
299 : * much trouble.
300 : *----------
301 : */
302 : #ifdef HAVE_BITSCAN_REVERSE
60 john.naylor 303 GNC 524971614 : idx = pg_leftmost_one_pos32((uint32) size - 1) - ALLOC_MINBITS + 1;
304 : #else
305 : uint32 t,
306 : tsize;
307 :
308 : /* Statically assert that we only have a 16-bit input value. */
309 : StaticAssertDecl(ALLOC_CHUNK_LIMIT < (1 << 16),
310 : "ALLOC_CHUNK_LIMIT must be less than 64kB");
311 :
312 : tsize = size - 1;
313 : t = tsize >> 8;
314 : idx = t ? pg_leftmost_one_pos[t] + 8 : pg_leftmost_one_pos[tsize];
315 : idx -= ALLOC_MINBITS - 1;
316 : #endif
317 :
8164 tgl 318 GIC 524971614 : Assert(idx < ALLOCSET_NUM_FREELISTS);
8828 JanWieck 319 ECB : }
320 : else
5010 tgl 321 GIC 95400184 : idx = 0;
322 :
8828 JanWieck 323 620371798 : return idx;
324 : }
325 :
326 :
327 : /*
328 : * Public routines
329 : */
330 :
331 :
332 : /*
333 : * AllocSetContextCreateInternal
334 : * Create a new AllocSet context.
335 : *
336 : * parent: parent context, or NULL if top-level context
337 : * name: name of context (must be statically allocated)
338 : * minContextSize: minimum context size
339 : * initBlockSize: initial allocation block size
340 : * maxBlockSize: maximum allocation block size
341 : *
342 : * Most callers should abstract the context size parameters using a macro
343 : * such as ALLOCSET_DEFAULT_SIZES.
344 : *
345 : * Note: don't call this directly; go through the wrapper macro
346 : * AllocSetContextCreate.
347 : */
348 : MemoryContext
1640 tgl 349 CBC 6085595 : AllocSetContextCreateInternal(MemoryContext parent,
350 : const char *name,
1943 tgl 351 ECB : Size minContextSize,
352 : Size initBlockSize,
353 : Size maxBlockSize)
9770 scrappy 354 : {
355 : int freeListIndex;
356 : Size firstBlockSize;
357 : AllocSet set;
1943 tgl 358 : AllocBlock block;
359 :
360 : /* ensure MemoryChunk's size is properly maxaligned */
361 : StaticAssertDecl(ALLOC_CHUNKHDRSZ == MAXALIGN(ALLOC_CHUNKHDRSZ),
362 : "sizeof(MemoryChunk) is not maxaligned");
363 : /* check we have enough space to store the freelist link */
364 : StaticAssertDecl(sizeof(AllocFreeListLink) <= (1 << ALLOC_MINBITS),
365 : "sizeof(AllocFreeListLink) larger than minimum allocation size");
2232 andres 366 :
2414 tgl 367 : /*
368 : * First, validate allocation parameters. Once these were regular runtime
369 : * tests and elog's, but in practice Asserts seem sufficient because
370 : * nobody varies their parameters at runtime. We somewhat arbitrarily
371 : * enforce a minimum 1K block size. We restrict the maximum block size to
372 : * MEMORYCHUNK_MAX_BLOCKOFFSET as MemoryChunks are limited to this in
373 : * regards to addressing the offset between the chunk and the block that
374 : * the chunk is stored on. We would be unable to store the offset between
375 : * the chunk and block for any chunks that were beyond
376 : * MEMORYCHUNK_MAX_BLOCKOFFSET bytes into the block if the block was to be
377 : * larger than this.
378 : */
1943 tgl 379 GIC 6085595 : Assert(initBlockSize == MAXALIGN(initBlockSize) &&
380 : initBlockSize >= 1024);
381 6085595 : Assert(maxBlockSize == MAXALIGN(maxBlockSize) &&
1943 tgl 382 ECB : maxBlockSize >= initBlockSize &&
383 : AllocHugeSizeIsValid(maxBlockSize)); /* must be safe to double */
1943 tgl 384 CBC 6085595 : Assert(minContextSize == 0 ||
385 : (minContextSize == MAXALIGN(minContextSize) &&
1943 tgl 386 ECB : minContextSize >= 1024 &&
387 : minContextSize <= maxBlockSize));
223 drowley 388 GNC 6085595 : Assert(maxBlockSize <= MEMORYCHUNK_MAX_BLOCKOFFSET);
389 :
1943 tgl 390 ECB : /*
391 : * Check whether the parameters match either available freelist. We do
392 : * not need to demand a match of maxBlockSize.
393 : */
1839 tgl 394 GIC 6085595 : if (minContextSize == ALLOCSET_DEFAULT_MINSIZE &&
1943 tgl 395 ECB : initBlockSize == ALLOCSET_DEFAULT_INITSIZE)
1943 tgl 396 GIC 4188547 : freeListIndex = 0;
1839 397 1897048 : else if (minContextSize == ALLOCSET_SMALL_MINSIZE &&
1943 tgl 398 ECB : initBlockSize == ALLOCSET_SMALL_INITSIZE)
1943 tgl 399 GIC 1882848 : freeListIndex = 1;
400 : else
401 14200 : freeListIndex = -1;
402 :
403 : /*
1943 tgl 404 ECB : * If a suitable freelist entry exists, just recycle that context.
405 : */
1943 tgl 406 GIC 6085595 : if (freeListIndex >= 0)
1943 tgl 407 ECB : {
1943 tgl 408 GIC 6071395 : AllocSetFreeList *freelist = &context_freelists[freeListIndex];
409 :
410 6071395 : if (freelist->first_free != NULL)
411 : {
1943 tgl 412 ECB : /* Remove entry from freelist */
1943 tgl 413 GIC 4727402 : set = freelist->first_free;
1943 tgl 414 CBC 4727402 : freelist->first_free = (AllocSet) set->header.nextchild;
415 4727402 : freelist->num_free--;
416 :
1943 tgl 417 ECB : /* Update its maxBlockSize; everything else should be OK */
1943 tgl 418 GIC 4727402 : set->maxBlockSize = maxBlockSize;
419 :
420 : /* Reinitialize its header, installing correct name and parent */
421 4727402 : MemoryContextCreate((MemoryContext) set,
422 : T_AllocSetContext,
423 : MCTX_ASET_ID,
1943 tgl 424 ECB : parent,
425 : name);
1943 tgl 426 EUB :
1116 jdavis 427 GBC 4727402 : ((MemoryContext) set)->mem_allocated =
428 4727402 : set->keeper->endptr - ((char *) set);
429 :
1943 tgl 430 GIC 4727402 : return (MemoryContext) set;
431 : }
432 : }
433 :
434 : /* Determine size of initial block */
1839 435 1358193 : firstBlockSize = MAXALIGN(sizeof(AllocSetContext)) +
436 : ALLOC_BLOCKHDRSZ + ALLOC_CHUNKHDRSZ;
1943 437 1358193 : if (minContextSize != 0)
438 14200 : firstBlockSize = Max(firstBlockSize, minContextSize);
439 : else
440 1343993 : firstBlockSize = Max(firstBlockSize, initBlockSize);
1943 tgl 441 ECB :
442 : /*
443 : * Allocate the initial block. Unlike other aset.c blocks, it starts with
444 : * the context header and its block header follows that.
445 : */
1943 tgl 446 CBC 1358193 : set = (AllocSet) malloc(firstBlockSize);
1943 tgl 447 GIC 1358193 : if (set == NULL)
448 : {
1943 tgl 449 UIC 0 : if (TopMemoryContext)
450 0 : MemoryContextStats(TopMemoryContext);
451 0 : ereport(ERROR,
1943 tgl 452 ECB : (errcode(ERRCODE_OUT_OF_MEMORY),
453 : errmsg("out of memory"),
454 : errdetail("Failed while creating memory context \"%s\".",
455 : name)));
456 : }
457 :
458 : /*
459 : * Avoid writing code that can fail between here and MemoryContextCreate;
460 : * we'd leak the header/initial block if we ereport in this stretch.
461 : */
462 :
463 : /* Fill in the initial block's block header */
1839 tgl 464 GIC 1358193 : block = (AllocBlock) (((char *) set) + MAXALIGN(sizeof(AllocSetContext)));
1943 465 1358193 : block->aset = set;
466 1358193 : block->freeptr = ((char *) block) + ALLOC_BLOCKHDRSZ;
467 1358193 : block->endptr = ((char *) set) + firstBlockSize;
468 1358193 : block->prev = NULL;
469 1358193 : block->next = NULL;
470 :
471 : /* Mark unallocated space NOACCESS; leave the block header alone. */
472 : VALGRIND_MAKE_MEM_NOACCESS(block->freeptr, block->endptr - block->freeptr);
473 :
474 : /* Remember block as part of block list */
475 1358193 : set->blocks = block;
476 : /* Mark block as not to be released at reset time */
477 1358193 : set->keeper = block;
478 :
479 : /* Finish filling in aset-specific parts of the context header */
480 16298316 : MemSetAligned(set->freelist, 0, sizeof(set->freelist));
481 :
2969 jdavis 482 1358193 : set->initBlockSize = initBlockSize;
483 1358193 : set->maxBlockSize = maxBlockSize;
484 1358193 : set->nextBlockSize = initBlockSize;
1943 tgl 485 1358193 : set->freeListIndex = freeListIndex;
486 :
487 : /*
5624 bruce 488 ECB : * Compute the allocation chunk size limit for this context. It can't be
489 : * more than ALLOC_CHUNK_LIMIT because of the fixed number of freelists.
4360 tgl 490 : * If maxBlockSize is small then requests exceeding the maxBlockSize, or
491 : * even a significant fraction of it, should be treated as large chunks
492 : * too. For the typical case of maxBlockSize a power of 2, the chunk size
493 : * limit will be at most 1/8th maxBlockSize, so that given a stream of
494 : * requests that are all the maximum chunk size we will waste at most
495 : * 1/8th of the allocated space.
496 : *
497 : * Also, allocChunkLimit must not exceed ALLOCSET_SEPARATE_THRESHOLD.
5947 498 : */
499 : StaticAssertStmt(ALLOC_CHUNK_LIMIT == ALLOCSET_SEPARATE_THRESHOLD,
500 : "ALLOC_CHUNK_LIMIT != ALLOCSET_SEPARATE_THRESHOLD");
501 :
502 : /*
503 : * Determine the maximum size that a chunk can be before we allocate an
504 : * entire AllocBlock dedicated for that chunk. We set the absolute limit
505 : * of that size as ALLOC_CHUNK_LIMIT but we reduce it further so that we
506 : * can fit about ALLOC_CHUNK_FRACTION chunks this size on a maximally
507 : * sized block. (We opt to keep allocChunkLimit a power-of-2 value
508 : * primarily for legacy reasons rather than calculating it so that exactly
509 : * ALLOC_CHUNK_FRACTION chunks fit on a maximally sized block.)
510 : */
2969 jdavis 511 GIC 1358193 : set->allocChunkLimit = ALLOC_CHUNK_LIMIT;
512 1358193 : while ((Size) (set->allocChunkLimit + ALLOC_CHUNKHDRSZ) >
4360 tgl 513 4492360 : (Size) ((maxBlockSize - ALLOC_BLOCKHDRSZ) / ALLOC_CHUNK_FRACTION))
2969 jdavis 514 3134167 : set->allocChunkLimit >>= 1;
515 :
516 : /* Finally, do the type-independent part of context creation */
1943 tgl 517 1358193 : MemoryContextCreate((MemoryContext) set,
518 : T_AllocSetContext,
519 : MCTX_ASET_ID,
520 : parent,
521 : name);
522 :
1116 jdavis 523 CBC 1358193 : ((MemoryContext) set)->mem_allocated = firstBlockSize;
524 :
2969 525 1358193 : return (MemoryContext) set;
526 : }
527 :
528 : /*
8720 bruce 529 ECB : * AllocSetReset
530 : * Frees all memory which is allocated in the given set.
531 : *
532 : * Actually, this routine has some discretion about what to do.
7420 tgl 533 : * It should mark all allocated chunks freed, but it need not necessarily
534 : * give back all the resources the set owns. Our actual implementation is
535 : * that we give back all but the "keeper" block (which we must keep, since
536 : * it shares a malloc chunk with the context header). In this way, we don't
1943 537 : * thrash malloc() when a context is repeatedly reset after small allocations,
538 : * which is typical behavior for per-tuple contexts.
539 : */
540 : void
8320 tgl 541 GIC 25269729 : AllocSetReset(MemoryContext context)
9770 scrappy 542 ECB : {
8320 tgl 543 GIC 25269729 : AllocSet set = (AllocSet) context;
544 : AllocBlock block;
545 : Size keepersize PG_USED_FOR_ASSERTS_ONLY;
9345 bruce 546 ECB :
163 peter 547 GNC 25269729 : Assert(AllocSetIsValid(set));
9345 bruce 548 ECB :
549 : #ifdef MEMORY_CONTEXT_CHECKING
8164 tgl 550 : /* Check for corruption and leaks before freeing */
8164 tgl 551 GIC 25269729 : AllocSetCheck(context);
552 : #endif
8164 tgl 553 ECB :
554 : /* Remember keeper block size for Assert below */
181 tgl 555 GNC 25269729 : keepersize = set->keeper->endptr - ((char *) set);
556 :
557 : /* Clear chunk freelists */
6539 tgl 558 GIC 303236748 : MemSetAligned(set->freelist, 0, sizeof(set->freelist));
6539 tgl 559 ECB :
6429 tgl 560 GIC 25269729 : block = set->blocks;
561 :
562 : /* New blocks list will be just the keeper block */
8122 563 25269729 : set->blocks = set->keeper;
8122 tgl 564 ECB :
6429 tgl 565 CBC 54425203 : while (block != NULL)
8828 JanWieck 566 ECB : {
8320 tgl 567 GIC 29155474 : AllocBlock next = block->next;
568 :
569 29155474 : if (block == set->keeper)
570 : {
8320 tgl 571 ECB : /* Reset the block, but don't return it to malloc */
8053 bruce 572 GIC 25269729 : char *datastart = ((char *) block) + ALLOC_BLOCKHDRSZ;
573 :
8432 tgl 574 ECB : #ifdef CLOBBER_FREED_MEMORY
3574 noah 575 GIC 25269729 : wipe_mem(datastart, block->freeptr - datastart);
3574 noah 576 ECB : #else
577 : /* wipe_mem() would have done this */
578 : VALGRIND_MAKE_MEM_NOACCESS(datastart, block->freeptr - datastart);
579 : #endif
8306 tgl 580 GIC 25269729 : block->freeptr = datastart;
2223 tgl 581 CBC 25269729 : block->prev = NULL;
8306 tgl 582 GIC 25269729 : block->next = NULL;
583 : }
8320 tgl 584 ECB : else
585 : {
586 : /* Normal case, release the block */
1060 tgl 587 GIC 3885745 : context->mem_allocated -= block->endptr - ((char *) block);
588 :
589 : #ifdef CLOBBER_FREED_MEMORY
3574 noah 590 3885745 : wipe_mem(block, block->freeptr - ((char *) block));
591 : #endif
8320 tgl 592 3885745 : free(block);
593 : }
8828 JanWieck 594 29155474 : block = next;
8828 JanWieck 595 ECB : }
596 :
1116 jdavis 597 CBC 25269729 : Assert(context->mem_allocated == keepersize);
1286 tomas.vondra 598 ECB :
599 : /* Reset block size allocation sequence, too */
5996 tgl 600 GIC 25269729 : set->nextBlockSize = set->initBlockSize;
9770 scrappy 601 CBC 25269729 : }
602 :
603 : /*
604 : * AllocSetDelete
8320 tgl 605 ECB : * Frees all memory which is allocated in the given set,
606 : * in preparation for deletion of the set.
607 : *
608 : * Unlike AllocSetReset, this *must* free all resources of the set.
9770 scrappy 609 : */
610 : void
8320 tgl 611 GIC 4943417 : AllocSetDelete(MemoryContext context)
612 : {
613 4943417 : AllocSet set = (AllocSet) context;
614 4943417 : AllocBlock block = set->blocks;
615 : Size keepersize PG_USED_FOR_ASSERTS_ONLY;
8320 tgl 616 ECB :
163 peter 617 GNC 4943417 : Assert(AllocSetIsValid(set));
618 :
619 : #ifdef MEMORY_CONTEXT_CHECKING
620 : /* Check for corruption and leaks before freeing */
8164 tgl 621 GIC 4943417 : AllocSetCheck(context);
8164 tgl 622 ECB : #endif
623 :
624 : /* Remember keeper block size for Assert below */
181 tgl 625 GNC 4943417 : keepersize = set->keeper->endptr - ((char *) set);
626 :
627 : /*
628 : * If the context is a candidate for a freelist, put it into that freelist
629 : * instead of destroying it.
630 : */
1943 tgl 631 GIC 4943417 : if (set->freeListIndex >= 0)
1943 tgl 632 ECB : {
1943 tgl 633 GIC 4943417 : AllocSetFreeList *freelist = &context_freelists[set->freeListIndex];
1943 tgl 634 ECB :
635 : /*
636 : * Reset the context, if it needs it, so that we aren't hanging on to
637 : * more than the initial malloc chunk.
638 : */
1943 tgl 639 CBC 4943417 : if (!context->isReset)
1943 tgl 640 GIC 3379531 : MemoryContextResetOnly(context);
641 :
1943 tgl 642 ECB : /*
643 : * If the freelist is full, just discard what's already in it. See
644 : * comments with context_freelists[].
645 : */
1943 tgl 646 GIC 4943417 : if (freelist->num_free >= MAX_FREE_CONTEXTS)
647 : {
1943 tgl 648 CBC 23432 : while (freelist->first_free != NULL)
1943 tgl 649 ECB : {
1943 tgl 650 CBC 23200 : AllocSetContext *oldset = freelist->first_free;
651 :
652 23200 : freelist->first_free = (AllocSetContext *) oldset->header.nextchild;
1943 tgl 653 GIC 23200 : freelist->num_free--;
654 :
655 : /* All that remains is to free the header/initial block */
1943 tgl 656 GBC 23200 : free(oldset);
657 : }
658 232 : Assert(freelist->num_free == 0);
659 : }
1943 tgl 660 EUB :
661 : /* Now add the just-deleted context to the freelist. */
1943 tgl 662 GIC 4943417 : set->header.nextchild = (MemoryContext) freelist->first_free;
663 4943417 : freelist->first_free = set;
1943 tgl 664 GBC 4943417 : freelist->num_free++;
665 :
1943 tgl 666 GIC 4943417 : return;
1943 tgl 667 EUB : }
8122 668 :
669 : /* Free all blocks, except the keeper which is part of context header */
8320 tgl 670 UBC 0 : while (block != NULL)
671 : {
8320 tgl 672 UIC 0 : AllocBlock next = block->next;
8320 tgl 673 EUB :
1286 tomas.vondra 674 UIC 0 : if (block != set->keeper)
1116 jdavis 675 0 : context->mem_allocated -= block->endptr - ((char *) block);
1286 tomas.vondra 676 EUB :
677 : #ifdef CLOBBER_FREED_MEMORY
3574 noah 678 UIC 0 : wipe_mem(block, block->freeptr - ((char *) block));
679 : #endif
680 :
1943 tgl 681 0 : if (block != set->keeper)
682 0 : free(block);
683 :
8320 684 0 : block = next;
685 : }
686 :
1116 jdavis 687 0 : Assert(context->mem_allocated == keepersize);
688 :
689 : /* Finally, free the context header, including the keeper block */
1943 tgl 690 0 : free(set);
691 : }
692 :
9770 scrappy 693 ECB : /*
694 : * AllocSetAlloc
2992 rhaas 695 : * Returns pointer to allocated memory of given size or NULL if
696 : * request could not be completed; memory is added to the set.
697 : *
698 : * No request may exceed:
699 : * MAXALIGN_DOWN(SIZE_MAX) - ALLOC_BLOCKHDRSZ - ALLOC_CHUNKHDRSZ
700 : * All callers use a much-lower limit.
701 : *
1962 tgl 702 : * Note: when using valgrind, it doesn't matter how the returned allocation
703 : * is marked, as mcxt.c will set it to UNDEFINED. In some paths we will
704 : * return space that is marked NOACCESS - AllocSetRealloc has to beware!
705 : */
706 : void *
8320 tgl 707 GIC 625246354 : AllocSetAlloc(MemoryContext context, Size size)
9770 scrappy 708 ECB : {
8320 tgl 709 GIC 625246354 : AllocSet set = (AllocSet) context;
710 : AllocBlock block;
711 : MemoryChunk *chunk;
8164 tgl 712 ECB : int fidx;
713 : Size chunk_size;
714 : Size blksize;
715 :
163 peter 716 GNC 625246354 : Assert(AllocSetIsValid(set));
9345 bruce 717 ECB :
8828 JanWieck 718 : /*
6385 bruce 719 : * If requested size exceeds maximum for chunks, allocate an entire block
6385 bruce 720 EUB : * for this request.
721 : */
5947 tgl 722 CBC 625246354 : if (size > set->allocChunkLimit)
723 : {
724 : #ifdef MEMORY_CONTEXT_CHECKING
725 : /* ensure there's always space for the sentinel byte */
214 drowley 726 GNC 11334379 : chunk_size = MAXALIGN(size + 1);
727 : #else
8164 tgl 728 ECB : chunk_size = MAXALIGN(size);
729 : #endif
730 :
8164 tgl 731 CBC 11334379 : blksize = chunk_size + ALLOC_BLOCKHDRSZ + ALLOC_CHUNKHDRSZ;
8164 tgl 732 GIC 11334379 : block = (AllocBlock) malloc(blksize);
8164 tgl 733 CBC 11334379 : if (block == NULL)
2992 rhaas 734 UIC 0 : return NULL;
735 :
1116 jdavis 736 CBC 11334379 : context->mem_allocated += blksize;
737 :
8164 tgl 738 GIC 11334379 : block->aset = set;
8164 tgl 739 CBC 11334379 : block->freeptr = block->endptr = ((char *) block) + blksize;
740 :
223 drowley 741 GNC 11334379 : chunk = (MemoryChunk *) (((char *) block) + ALLOC_BLOCKHDRSZ);
742 :
743 : /* mark the MemoryChunk as externally managed */
744 11334379 : MemoryChunkSetHdrMaskExternal(chunk, MCTX_ASET_ID);
745 :
746 : #ifdef MEMORY_CONTEXT_CHECKING
8164 tgl 747 GIC 11334379 : chunk->requested_size = size;
748 : /* set mark to catch clobber of "unused" space */
214 drowley 749 GNC 11334379 : Assert(size < chunk_size);
750 11334379 : set_sentinel(MemoryChunkGetPointer(chunk), size);
751 : #endif
752 : #ifdef RANDOMIZE_ALLOCATED_MEMORY
753 : /* fill the allocated space with junk */
754 : randomize_mem((char *) MemoryChunkGetPointer(chunk), size);
5476 tgl 755 ECB : #endif
756 :
8164 757 : /*
2223 758 : * Stick the new block underneath the active allocation block, if any,
759 : * so that we don't lose the use of the space remaining therein.
8164 760 : */
8164 tgl 761 CBC 11334379 : if (set->blocks != NULL)
762 : {
2223 tgl 763 GIC 11334379 : block->prev = set->blocks;
8164 764 11334379 : block->next = set->blocks->next;
2223 tgl 765 GBC 11334379 : if (block->next)
766 9260544 : block->next->prev = block;
8164 767 11334379 : set->blocks->next = block;
768 : }
769 : else
770 : {
2223 tgl 771 UIC 0 : block->prev = NULL;
8164 772 0 : block->next = NULL;
773 0 : set->blocks = block;
774 : }
775 :
776 : /* Ensure any padding bytes are marked NOACCESS. */
777 : VALGRIND_MAKE_MEM_NOACCESS((char *) MemoryChunkGetPointer(chunk) + size,
778 : chunk_size - size);
779 :
780 : /* Disallow external access to private part of chunk header. */
781 : VALGRIND_MAKE_MEM_NOACCESS(chunk, ALLOCCHUNK_PRIVATE_LEN);
782 :
223 drowley 783 GNC 11334379 : return MemoryChunkGetPointer(chunk);
784 : }
785 :
786 : /*
787 : * Request is small enough to be treated as a chunk. Look in the
788 : * corresponding free list to see if there is a free chunk we could reuse.
789 : * If one is found, remove it from the free list, make it again a member
790 : * of the alloc set and return its data address.
791 : *
792 : * Note that we don't attempt to ensure there's space for the sentinel
793 : * byte here. We expect a large proportion of allocations to be for sizes
794 : * which are already a power of 2. If we were to always make space for a
795 : * sentinel byte in MEMORY_CONTEXT_CHECKING builds, then we'd end up
796 : * doubling the memory requirements for such allocations.
797 : */
5823 tgl 798 CBC 613911975 : fidx = AllocSetFreeIndex(size);
799 613911975 : chunk = set->freelist[fidx];
8164 800 613911975 : if (chunk != NULL)
801 : {
223 drowley 802 GNC 223843982 : AllocFreeListLink *link = GetFreeListLink(chunk);
803 :
804 223843982 : Assert(fidx == MemoryChunkGetValue(chunk));
805 :
806 : /* pop this chunk off the freelist */
807 : VALGRIND_MAKE_MEM_DEFINED(link, sizeof(AllocFreeListLink));
808 223843982 : set->freelist[fidx] = link->next;
809 : VALGRIND_MAKE_MEM_NOACCESS(link, sizeof(AllocFreeListLink));
810 :
8307 bruce 811 ECB : #ifdef MEMORY_CONTEXT_CHECKING
8164 tgl 812 GIC 223843982 : chunk->requested_size = size;
813 : /* set mark to catch clobber of "unused" space */
223 drowley 814 GNC 223843982 : if (size < GetChunkSizeFromFreeListIdx(fidx))
815 130931138 : set_sentinel(MemoryChunkGetPointer(chunk), size);
816 : #endif
5476 tgl 817 ECB : #ifdef RANDOMIZE_ALLOCATED_MEMORY
818 : /* fill the allocated space with junk */
819 : randomize_mem((char *) MemoryChunkGetPointer(chunk), size);
820 : #endif
821 :
822 : /* Ensure any padding bytes are marked NOACCESS. */
823 : VALGRIND_MAKE_MEM_NOACCESS((char *) MemoryChunkGetPointer(chunk) + size,
824 : GetChunkSizeFromFreeListIdx(fidx) - size);
825 :
826 : /* Disallow external access to private part of chunk header. */
827 : VALGRIND_MAKE_MEM_NOACCESS(chunk, ALLOCCHUNK_PRIVATE_LEN);
828 :
223 drowley 829 GNC 223843982 : return MemoryChunkGetPointer(chunk);
830 : }
831 :
8828 JanWieck 832 ECB : /*
833 : * Choose the actual chunk size to allocate.
834 : */
223 drowley 835 GNC 390067993 : chunk_size = GetChunkSizeFromFreeListIdx(fidx);
8723 tgl 836 GIC 390067993 : Assert(chunk_size >= size);
837 :
8723 tgl 838 ECB : /*
6385 bruce 839 : * If there is enough room in the active allocation block, we will put the
840 : * chunk into that block. Else must start a new one.
841 : */
8723 tgl 842 GIC 390067993 : if ((block = set->blocks) != NULL)
843 : {
8164 844 390067993 : Size availspace = block->endptr - block->freeptr;
9345 bruce 845 ECB :
8164 tgl 846 GIC 390067993 : if (availspace < (chunk_size + ALLOC_CHUNKHDRSZ))
8723 tgl 847 ECB : {
848 : /*
6385 bruce 849 : * The existing active (top) block does not have enough room for
850 : * the requested allocation, but it might still have a useful
851 : * amount of space in it. Once we push it down in the block list,
852 : * we'll never try to allocate more space from it. So, before we
853 : * do that, carve up its free space into chunks that we can put on
854 : * the set's freelists.
855 : *
856 : * Because we can only get here when there's less than
857 : * ALLOC_CHUNK_LIMIT left in the block, this loop cannot iterate
858 : * more than ALLOCSET_NUM_FREELISTS-1 times.
859 : */
8164 tgl 860 GIC 10298307 : while (availspace >= ((1 << ALLOC_MINBITS) + ALLOC_CHUNKHDRSZ))
861 : {
862 : AllocFreeListLink *link;
8053 bruce 863 6459823 : Size availchunk = availspace - ALLOC_CHUNKHDRSZ;
8053 bruce 864 CBC 6459823 : int a_fidx = AllocSetFreeIndex(availchunk);
865 :
866 : /*
6385 bruce 867 ECB : * In most cases, we'll get back the index of the next larger
3260 868 : * freelist than the one we need to put this chunk on. The
869 : * exception is when availchunk is exactly a power of 2.
870 : */
223 drowley 871 GNC 6459823 : if (availchunk != GetChunkSizeFromFreeListIdx(a_fidx))
872 : {
8053 bruce 873 GIC 5185206 : a_fidx--;
8164 tgl 874 5185206 : Assert(a_fidx >= 0);
223 drowley 875 GNC 5185206 : availchunk = GetChunkSizeFromFreeListIdx(a_fidx);
876 : }
8164 tgl 877 ECB :
223 drowley 878 GNC 6459823 : chunk = (MemoryChunk *) (block->freeptr);
8122 tgl 879 ECB :
880 : /* Prepare to initialize the chunk header. */
881 : VALGRIND_MAKE_MEM_UNDEFINED(chunk, ALLOC_CHUNKHDRSZ);
8122 tgl 882 GIC 6459823 : block->freeptr += (availchunk + ALLOC_CHUNKHDRSZ);
883 6459823 : availspace -= (availchunk + ALLOC_CHUNKHDRSZ);
884 :
885 : /* store the freelist index in the value field */
223 drowley 886 GNC 6459823 : MemoryChunkSetHdrMask(chunk, block, a_fidx, MCTX_ASET_ID);
8164 tgl 887 ECB : #ifdef MEMORY_CONTEXT_CHECKING
223 drowley 888 GNC 6459823 : chunk->requested_size = InvalidAllocSize; /* mark it free */
889 : #endif
890 : /* push this chunk onto the free list */
891 6459823 : link = GetFreeListLink(chunk);
892 :
893 : VALGRIND_MAKE_MEM_DEFINED(link, sizeof(AllocFreeListLink));
894 6459823 : link->next = set->freelist[a_fidx];
895 : VALGRIND_MAKE_MEM_NOACCESS(link, sizeof(AllocFreeListLink));
896 :
8164 tgl 897 GIC 6459823 : set->freelist[a_fidx] = chunk;
8164 tgl 898 ECB : }
899 : /* Mark that we need to create a new block */
8164 tgl 900 CBC 3838484 : block = NULL;
901 : }
902 : }
8164 tgl 903 ECB :
904 : /*
905 : * Time to create a new regular (multi-chunk) block?
906 : */
8164 tgl 907 GIC 390067993 : if (block == NULL)
908 : {
8053 bruce 909 ECB : Size required_size;
910 :
911 : /*
912 : * The first such block has size initBlockSize, and we double the
913 : * space in each succeeding block, but not more than maxBlockSize.
914 : */
5996 tgl 915 GIC 3838484 : blksize = set->nextBlockSize;
5996 tgl 916 CBC 3838484 : set->nextBlockSize <<= 1;
5996 tgl 917 GIC 3838484 : if (set->nextBlockSize > set->maxBlockSize)
918 716481 : set->nextBlockSize = set->maxBlockSize;
919 :
920 : /*
921 : * If initBlockSize is less than ALLOC_CHUNK_LIMIT, we could need more
922 : * space... but try to keep it a power of 2.
923 : */
8164 tgl 924 CBC 3838484 : required_size = chunk_size + ALLOC_BLOCKHDRSZ + ALLOC_CHUNKHDRSZ;
7148 925 4244209 : while (blksize < required_size)
926 405725 : blksize <<= 1;
8164 tgl 927 ECB :
928 : /* Try to allocate it */
8164 tgl 929 GIC 3838484 : block = (AllocBlock) malloc(blksize);
930 :
931 : /*
932 : * We could be asking for pretty big blocks here, so cope if malloc
1350 michael 933 ECB : * fails. But give up if there's less than 1 MB or so available...
8164 tgl 934 : */
8164 tgl 935 CBC 3838484 : while (block == NULL && blksize > 1024 * 1024)
936 : {
8164 tgl 937 UIC 0 : blksize >>= 1;
8164 tgl 938 LBC 0 : if (blksize < required_size)
8164 tgl 939 UIC 0 : break;
940 0 : block = (AllocBlock) malloc(blksize);
941 : }
942 :
8828 JanWieck 943 GIC 3838484 : if (block == NULL)
2992 rhaas 944 LBC 0 : return NULL;
945 :
1116 jdavis 946 GBC 3838484 : context->mem_allocated += blksize;
1286 tomas.vondra 947 EUB :
8828 JanWieck 948 GBC 3838484 : block->aset = set;
8720 bruce 949 3838484 : block->freeptr = ((char *) block) + ALLOC_BLOCKHDRSZ;
8720 bruce 950 GIC 3838484 : block->endptr = ((char *) block) + blksize;
951 :
3574 noah 952 ECB : /* Mark unallocated space NOACCESS. */
3574 noah 953 EUB : VALGRIND_MAKE_MEM_NOACCESS(block->freeptr,
954 : blksize - ALLOC_BLOCKHDRSZ);
3574 noah 955 ECB :
2223 tgl 956 GIC 3838484 : block->prev = NULL;
8723 tgl 957 CBC 3838484 : block->next = set->blocks;
2223 958 3838484 : if (block->next)
959 3838484 : block->next->prev = block;
8828 JanWieck 960 GIC 3838484 : set->blocks = block;
961 : }
962 :
963 : /*
964 : * OK, do the allocation
8723 tgl 965 ECB : */
223 drowley 966 GNC 390067993 : chunk = (MemoryChunk *) (block->freeptr);
8122 tgl 967 ECB :
3574 noah 968 : /* Prepare to initialize the chunk header. */
2232 andres 969 : VALGRIND_MAKE_MEM_UNDEFINED(chunk, ALLOC_CHUNKHDRSZ);
970 :
8122 tgl 971 GIC 390067993 : block->freeptr += (chunk_size + ALLOC_CHUNKHDRSZ);
972 390067993 : Assert(block->freeptr <= block->endptr);
973 :
974 : /* store the free list index in the value field */
223 drowley 975 GNC 390067993 : MemoryChunkSetHdrMask(chunk, block, fidx, MCTX_ASET_ID);
976 :
977 : #ifdef MEMORY_CONTEXT_CHECKING
8164 tgl 978 GIC 390067993 : chunk->requested_size = size;
979 : /* set mark to catch clobber of "unused" space */
223 drowley 980 GNC 390067993 : if (size < chunk_size)
981 269776560 : set_sentinel(MemoryChunkGetPointer(chunk), size);
8053 bruce 982 ECB : #endif
983 : #ifdef RANDOMIZE_ALLOCATED_MEMORY
984 : /* fill the allocated space with junk */
985 : randomize_mem((char *) MemoryChunkGetPointer(chunk), size);
986 : #endif
987 :
1962 tgl 988 : /* Ensure any padding bytes are marked NOACCESS. */
989 : VALGRIND_MAKE_MEM_NOACCESS((char *) MemoryChunkGetPointer(chunk) + size,
990 : chunk_size - size);
991 :
992 : /* Disallow external access to private part of chunk header. */
993 : VALGRIND_MAKE_MEM_NOACCESS(chunk, ALLOCCHUNK_PRIVATE_LEN);
994 :
223 drowley 995 GNC 390067993 : return MemoryChunkGetPointer(chunk);
996 : }
997 :
998 : /*
999 : * AllocSetFree
1000 : * Frees allocated memory; memory is removed from the set.
1001 : */
1002 : void
1003 269680877 : AllocSetFree(void *pointer)
1004 : {
1005 : AllocSet set;
1006 269680877 : MemoryChunk *chunk = PointerGetMemoryChunk(pointer);
1007 :
1008 : /* Allow access to private part of chunk header. */
1009 : VALGRIND_MAKE_MEM_DEFINED(chunk, ALLOCCHUNK_PRIVATE_LEN);
1010 :
1011 269680877 : if (MemoryChunkIsExternal(chunk))
1012 : {
1013 : /* Release single-chunk block. */
1014 10355913 : AllocBlock block = ExternalChunkGetBlock(chunk);
1015 :
1016 : /*
1017 : * Try to verify that we have a sane block pointer: the block header
1018 : * should reference an aset and the freeptr should match the endptr.
181 tgl 1019 EUB : */
181 tgl 1020 GNC 10355913 : if (!AllocBlockIsValid(block) || block->freeptr != block->endptr)
181 tgl 1021 UIC 0 : elog(ERROR, "could not find block containing chunk %p", chunk);
1022 :
223 drowley 1023 GNC 10355913 : set = block->aset;
1024 :
1025 : #ifdef MEMORY_CONTEXT_CHECKING
1026 : {
1027 : /* Test for someone scribbling on unused space in chunk */
94 1028 10355913 : Assert(chunk->requested_size < (block->endptr - (char *) pointer));
214 1029 10355913 : if (!sentinel_ok(pointer, chunk->requested_size))
214 drowley 1030 UNC 0 : elog(WARNING, "detected write past chunk end in %s %p",
1031 : set->header.name, chunk);
1032 : }
1033 : #endif
1034 :
8629 tgl 1035 ECB : /* OK, remove block from aset's list and free it */
2223 tgl 1036 CBC 10355913 : if (block->prev)
2223 tgl 1037 GBC 10355913 : block->prev->next = block->next;
1038 : else
2223 tgl 1039 UIC 0 : set->blocks = block->next;
2223 tgl 1040 GIC 10355913 : if (block->next)
1041 8575345 : block->next->prev = block->prev;
1042 :
223 drowley 1043 GNC 10355913 : set->header.mem_allocated -= block->endptr - ((char *) block);
1286 tomas.vondra 1044 ECB :
1045 : #ifdef CLOBBER_FREED_MEMORY
3574 noah 1046 GBC 10355913 : wipe_mem(block, block->freeptr - ((char *) block));
8432 tgl 1047 ECB : #endif
8629 tgl 1048 CBC 10355913 : free(block);
1049 : }
8629 tgl 1050 ECB : else
1051 : {
223 drowley 1052 GNC 259324964 : AllocBlock block = MemoryChunkGetBlock(chunk);
1053 : int fidx;
1054 : AllocFreeListLink *link;
1055 :
1056 : /*
1057 : * In this path, for speed reasons we just Assert that the referenced
1058 : * block is good. We can also Assert that the value field is sane.
1059 : * Future field experience may show that these Asserts had better
1060 : * become regular runtime test-and-elog checks.
1061 : */
163 peter 1062 259324964 : Assert(AllocBlockIsValid(block));
223 drowley 1063 259324964 : set = block->aset;
1064 :
181 tgl 1065 259324964 : fidx = MemoryChunkGetValue(chunk);
1066 259324964 : Assert(FreeListIdxIsValid(fidx));
1067 259324964 : link = GetFreeListLink(chunk);
1068 :
1069 : #ifdef MEMORY_CONTEXT_CHECKING
1070 : /* Test for someone scribbling on unused space in chunk */
223 drowley 1071 259324964 : if (chunk->requested_size < GetChunkSizeFromFreeListIdx(fidx))
1072 159341246 : if (!sentinel_ok(pointer, chunk->requested_size))
223 drowley 1073 UNC 0 : elog(WARNING, "detected write past chunk end in %s %p",
1074 : set->header.name, chunk);
1075 : #endif
1076 :
1077 : #ifdef CLOBBER_FREED_MEMORY
223 drowley 1078 GNC 259324964 : wipe_mem(pointer, GetChunkSizeFromFreeListIdx(fidx));
1079 : #endif
1080 : /* push this chunk onto the top of the free list */
1081 : VALGRIND_MAKE_MEM_DEFINED(link, sizeof(AllocFreeListLink));
1082 259324964 : link->next = set->freelist[fidx];
1083 : VALGRIND_MAKE_MEM_NOACCESS(link, sizeof(AllocFreeListLink));
223 drowley 1084 GIC 259324964 : set->freelist[fidx] = chunk;
1085 :
1086 : #ifdef MEMORY_CONTEXT_CHECKING
1087 :
1088 : /*
1089 : * Reset requested_size to InvalidAllocSize in chunks that are on free
1090 : * list.
1091 : */
223 drowley 1092 GNC 259324964 : chunk->requested_size = InvalidAllocSize;
1093 : #endif
1094 : }
9770 scrappy 1095 GIC 269680877 : }
1096 :
9770 scrappy 1097 ECB : /*
8720 bruce 1098 : * AllocSetRealloc
1099 : * Returns new pointer to allocated memory of given size or NULL if
2992 rhaas 1100 : * request could not be completed; this memory is added to the set.
1101 : * Memory associated with given pointer is copied into the new memory,
1102 : * and the old memory is freed.
1103 : *
1104 : * Without MEMORY_CONTEXT_CHECKING, we don't know the old request size. This
1105 : * makes our Valgrind client requests less-precise, hazarding false negatives.
3574 noah 1106 : * (In principle, we could use VALGRIND_GET_VBITS() to rediscover the old
1107 : * request size.)
9770 scrappy 1108 EUB : */
1109 : void *
223 drowley 1110 GNC 2854224 : AllocSetRealloc(void *pointer, Size size)
1111 : {
1112 : AllocBlock block;
1113 : AllocSet set;
1114 2854224 : MemoryChunk *chunk = PointerGetMemoryChunk(pointer);
1115 : Size oldchksize;
1116 : int fidx;
1117 :
1118 : /* Allow access to private part of chunk header. */
1962 tgl 1119 ECB : VALGRIND_MAKE_MEM_DEFINED(chunk, ALLOCCHUNK_PRIVATE_LEN);
1120 :
223 drowley 1121 GNC 2854224 : if (MemoryChunkIsExternal(chunk))
8629 tgl 1122 ECB : {
1123 : /*
1124 : * The chunk must have been allocated as a single-chunk block. Use
1125 : * realloc() to make the containing block bigger, or smaller, with
1126 : * minimum space wastage.
1127 : */
1128 : Size chksize;
1129 : Size blksize;
1130 : Size oldblksize;
1131 :
223 drowley 1132 GNC 218722 : block = ExternalChunkGetBlock(chunk);
1133 :
1134 : /*
1135 : * Try to verify that we have a sane block pointer: the block header
1136 : * should reference an aset and the freeptr should match the endptr.
1137 : */
181 tgl 1138 218722 : if (!AllocBlockIsValid(block) || block->freeptr != block->endptr)
181 tgl 1139 UNC 0 : elog(ERROR, "could not find block containing chunk %p", chunk);
1140 :
223 drowley 1141 GNC 218722 : set = block->aset;
1142 :
47 tgl 1143 218722 : oldchksize = block->endptr - (char *) pointer;
1144 :
1145 : #ifdef MEMORY_CONTEXT_CHECKING
1146 : /* Test for someone scribbling on unused space in chunk */
1147 218722 : Assert(chunk->requested_size < oldchksize);
214 drowley 1148 218722 : if (!sentinel_ok(pointer, chunk->requested_size))
214 drowley 1149 UNC 0 : elog(WARNING, "detected write past chunk end in %s %p",
1150 : set->header.name, chunk);
1151 : #endif
1152 :
1153 : #ifdef MEMORY_CONTEXT_CHECKING
1154 : /* ensure there's always space for the sentinel byte */
214 drowley 1155 GNC 218722 : chksize = MAXALIGN(size + 1);
1156 : #else
1157 : chksize = MAXALIGN(size);
1158 : #endif
1159 :
1160 : /* Do the realloc */
8164 tgl 1161 GIC 218722 : blksize = chksize + ALLOC_BLOCKHDRSZ + ALLOC_CHUNKHDRSZ;
1060 1162 218722 : oldblksize = block->endptr - ((char *) block);
1163 :
8629 1164 218722 : block = (AllocBlock) realloc(block, blksize);
1165 218722 : if (block == NULL)
1962 tgl 1166 ECB : {
1167 : /* Disallow external access to private part of chunk header. */
1168 : VALGRIND_MAKE_MEM_NOACCESS(chunk, ALLOCCHUNK_PRIVATE_LEN);
2992 rhaas 1169 UIC 0 : return NULL;
1170 : }
1171 :
1283 tomas.vondra 1172 ECB : /* updated separately, not to underflow when (oldblksize > blksize) */
223 drowley 1173 GNC 218722 : set->header.mem_allocated -= oldblksize;
1174 218722 : set->header.mem_allocated += blksize;
1286 tomas.vondra 1175 ECB :
8629 tgl 1176 GIC 218722 : block->freeptr = block->endptr = ((char *) block) + blksize;
9345 bruce 1177 ECB :
1178 : /* Update pointers since block has likely been moved */
223 drowley 1179 GNC 218722 : chunk = (MemoryChunk *) (((char *) block) + ALLOC_BLOCKHDRSZ);
1180 218722 : pointer = MemoryChunkGetPointer(chunk);
2223 tgl 1181 CBC 218722 : if (block->prev)
1182 218722 : block->prev->next = block;
8629 tgl 1183 EUB : else
2223 tgl 1184 UIC 0 : set->blocks = block;
2223 tgl 1185 GIC 218722 : if (block->next)
1186 206316 : block->next->prev = block;
1187 :
8307 bruce 1188 ECB : #ifdef MEMORY_CONTEXT_CHECKING
1189 : #ifdef RANDOMIZE_ALLOCATED_MEMORY
1190 :
1191 : /*
1192 : * We can only randomize the extra space if we know the prior request.
1193 : * When using Valgrind, randomize_mem() also marks memory UNDEFINED.
47 tgl 1194 : */
1284 1195 : if (size > chunk->requested_size)
1196 : randomize_mem((char *) pointer + chunk->requested_size,
1197 : size - chunk->requested_size);
47 1198 : #else
1199 :
1200 : /*
1201 : * If this is an increase, realloc() will have marked any
47 tgl 1202 EUB : * newly-allocated part (from oldchksize to chksize) UNDEFINED, but we
1203 : * also need to adjust trailing bytes from the old allocation (from
1204 : * chunk->requested_size to oldchksize) as they are marked NOACCESS.
1205 : * Make sure not to mark too many bytes in case chunk->requested_size
47 tgl 1206 ECB : * < size < oldchksize.
3574 noah 1207 : */
1208 : #ifdef USE_VALGRIND
47 tgl 1209 : if (Min(size, oldchksize) > chunk->requested_size)
1210 : VALGRIND_MAKE_MEM_UNDEFINED((char *) pointer + chunk->requested_size,
1211 : Min(size, oldchksize) - chunk->requested_size);
1212 : #endif
1284 1213 : #endif
3574 noah 1214 :
8164 tgl 1215 CBC 218722 : chunk->requested_size = size;
8164 tgl 1216 EUB : /* set mark to catch clobber of "unused" space */
214 drowley 1217 GNC 218722 : Assert(size < chksize);
1218 218722 : set_sentinel(pointer, size);
1219 : #else /* !MEMORY_CONTEXT_CHECKING */
1220 :
1221 : /*
1222 : * We may need to adjust marking of bytes from the old allocation as
1223 : * some of them may be marked NOACCESS. We don't know how much of the
1224 : * old chunk size was the requested size; it could have been as small
1225 : * as one byte. We have to be conservative and just mark the entire
1226 : * old portion DEFINED. Make sure not to mark memory beyond the new
1227 : * allocation in case it's smaller than the old one.
1228 : */
1229 : VALGRIND_MAKE_MEM_DEFINED(pointer, Min(size, oldchksize));
1230 : #endif
1231 :
1232 : /* Ensure any padding bytes are marked NOACCESS. */
1233 : VALGRIND_MAKE_MEM_NOACCESS((char *) pointer + size, chksize - size);
1234 :
1235 : /* Disallow external access to private part of chunk header. */
1236 : VALGRIND_MAKE_MEM_NOACCESS(chunk, ALLOCCHUNK_PRIVATE_LEN);
1237 :
2223 tgl 1238 GIC 218722 : return pointer;
1239 : }
1240 :
223 drowley 1241 GNC 2635502 : block = MemoryChunkGetBlock(chunk);
1242 :
1243 : /*
1244 : * In this path, for speed reasons we just Assert that the referenced
1245 : * block is good. We can also Assert that the value field is sane. Future
1246 : * field experience may show that these Asserts had better become regular
1247 : * runtime test-and-elog checks.
1248 : */
163 peter 1249 2635502 : Assert(AllocBlockIsValid(block));
223 drowley 1250 2635502 : set = block->aset;
1251 :
181 tgl 1252 2635502 : fidx = MemoryChunkGetValue(chunk);
1253 2635502 : Assert(FreeListIdxIsValid(fidx));
47 1254 2635502 : oldchksize = GetChunkSizeFromFreeListIdx(fidx);
1255 :
1256 : #ifdef MEMORY_CONTEXT_CHECKING
1257 : /* Test for someone scribbling on unused space in chunk */
1258 2635502 : if (chunk->requested_size < oldchksize)
223 drowley 1259 850894 : if (!sentinel_ok(pointer, chunk->requested_size))
223 drowley 1260 UNC 0 : elog(WARNING, "detected write past chunk end in %s %p",
1261 : set->header.name, chunk);
1262 : #endif
1263 :
1264 : /*
1265 : * Chunk sizes are aligned to power of 2 in AllocSetAlloc(). Maybe the
1266 : * allocated area already is >= the new size. (In particular, we will
1267 : * fall out here if the requested size is a decrease.)
1268 : */
47 tgl 1269 GNC 2635502 : if (oldchksize >= size)
1284 tgl 1270 ECB : {
1271 : #ifdef MEMORY_CONTEXT_CHECKING
1284 tgl 1272 CBC 867732 : Size oldrequest = chunk->requested_size;
1284 tgl 1273 ECB :
1274 : #ifdef RANDOMIZE_ALLOCATED_MEMORY
1275 : /* We can only fill the extra space if we know the prior request */
1276 : if (size > oldrequest)
1277 : randomize_mem((char *) pointer + oldrequest,
1278 : size - oldrequest);
1279 : #endif
1280 :
1284 tgl 1281 GIC 867732 : chunk->requested_size = size;
1282 :
1283 : /*
1284 : * If this is an increase, mark any newly-available part UNDEFINED.
1285 : * Otherwise, mark the obsolete part NOACCESS.
1286 : */
1287 : if (size > oldrequest)
1288 : VALGRIND_MAKE_MEM_UNDEFINED((char *) pointer + oldrequest,
1289 : size - oldrequest);
1290 : else
1291 : VALGRIND_MAKE_MEM_NOACCESS((char *) pointer + size,
1292 : oldchksize - size);
1284 tgl 1293 ECB :
1294 : /* set mark to catch clobber of "unused" space */
47 tgl 1295 GIC 867732 : if (size < oldchksize)
1284 tgl 1296 CBC 854119 : set_sentinel(pointer, size);
1297 : #else /* !MEMORY_CONTEXT_CHECKING */
1298 :
1299 : /*
1300 : * We don't have the information to determine whether we're growing
1301 : * the old request or shrinking it, so we conservatively mark the
1302 : * entire new allocation DEFINED.
1303 : */
47 tgl 1304 ECB : VALGRIND_MAKE_MEM_NOACCESS(pointer, oldchksize);
1284 1305 : VALGRIND_MAKE_MEM_DEFINED(pointer, size);
1306 : #endif
1307 :
1308 : /* Disallow external access to private part of chunk header. */
1309 : VALGRIND_MAKE_MEM_NOACCESS(chunk, ALLOCCHUNK_PRIVATE_LEN);
1310 :
1284 tgl 1311 GIC 867732 : return pointer;
1312 : }
8629 tgl 1313 ECB : else
1314 : {
8111 tgl 1315 EUB : /*
1316 : * Enlarge-a-small-chunk case. We just do this by brute force, ie,
1317 : * allocate a new chunk and copy the data. Since we know the existing
1318 : * data isn't huge, this won't involve any great memcpy expense, so
1319 : * it's not worth being smarter. (At one time we tried to avoid
1320 : * memcpy when it was possible to enlarge the chunk in-place, but that
1321 : * turns out to misbehave unpleasantly for repeated cycles of
1322 : * palloc/repalloc/pfree: the eventually freed chunks go into the
1323 : * wrong freelist for the next initial palloc request, and so we leak
5719 tgl 1324 ECB : * memory indefinitely. See pgsql-hackers archives for 2007-08-11.)
1325 : */
1326 : AllocPointer newPointer;
47 1327 : Size oldsize;
1328 :
1329 : /* allocate new chunk */
8111 tgl 1330 GIC 1767770 : newPointer = AllocSetAlloc((MemoryContext) set, size);
1331 :
1332 : /* leave immediately if request was not completed */
2992 rhaas 1333 1767770 : if (newPointer == NULL)
1334 : {
1335 : /* Disallow external access to private part of chunk header. */
1962 tgl 1336 ECB : VALGRIND_MAKE_MEM_NOACCESS(chunk, ALLOCCHUNK_PRIVATE_LEN);
2992 rhaas 1337 UIC 0 : return NULL;
1338 : }
1339 :
1340 : /*
1341 : * AllocSetAlloc() may have returned a region that is still NOACCESS.
1342 : * Change it to UNDEFINED for the moment; memcpy() will then transfer
1343 : * definedness from the old allocation to the new. If we know the old
1344 : * allocation, copy just that much. Otherwise, make the entire old
1345 : * chunk defined to avoid errors as we copy the currently-NOACCESS
1346 : * trailing bytes.
1347 : */
1348 : VALGRIND_MAKE_MEM_UNDEFINED(newPointer, size);
1349 : #ifdef MEMORY_CONTEXT_CHECKING
3574 noah 1350 CBC 1767770 : oldsize = chunk->requested_size;
3574 noah 1351 ECB : #else
1352 : oldsize = oldchksize;
1353 : VALGRIND_MAKE_MEM_DEFINED(pointer, oldsize);
1354 : #endif
1355 :
1356 : /* transfer existing data (certain to fit) */
8629 tgl 1357 GIC 1767770 : memcpy(newPointer, pointer, oldsize);
1358 :
1359 : /* free old chunk */
223 drowley 1360 GNC 1767770 : AllocSetFree(pointer);
1361 :
8629 tgl 1362 GIC 1767770 : return newPointer;
1363 : }
1364 : }
1365 :
1366 : /*
1367 : * AllocSetGetChunkContext
1368 : * Return the MemoryContext that 'pointer' belongs to.
1369 : */
1370 : MemoryContext
223 drowley 1371 GNC 4837875 : AllocSetGetChunkContext(void *pointer)
1372 : {
1373 4837875 : MemoryChunk *chunk = PointerGetMemoryChunk(pointer);
1374 : AllocBlock block;
1375 : AllocSet set;
1376 :
1377 4837875 : if (MemoryChunkIsExternal(chunk))
1378 218722 : block = ExternalChunkGetBlock(chunk);
1379 : else
1380 4619153 : block = (AllocBlock) MemoryChunkGetBlock(chunk);
1381 :
163 peter 1382 4837875 : Assert(AllocBlockIsValid(block));
223 drowley 1383 4837875 : set = block->aset;
1384 :
1385 4837875 : return &set->header;
1386 : }
1387 :
7545 tgl 1388 ECB : /*
1389 : * AllocSetGetChunkSpace
1390 : * Given a currently-allocated chunk, determine the total space
1391 : * it occupies (including all memory-allocation overhead).
1392 : */
1393 : Size
223 drowley 1394 GNC 21026289 : AllocSetGetChunkSpace(void *pointer)
1395 : {
1396 21026289 : MemoryChunk *chunk = PointerGetMemoryChunk(pointer);
1397 : int fidx;
1398 :
1399 21026289 : if (MemoryChunkIsExternal(chunk))
1400 : {
1401 515544 : AllocBlock block = ExternalChunkGetBlock(chunk);
1402 :
163 peter 1403 515544 : Assert(AllocBlockIsValid(block));
223 drowley 1404 515544 : return block->endptr - (char *) chunk;
1405 : }
1406 :
181 tgl 1407 20510745 : fidx = MemoryChunkGetValue(chunk);
1408 20510745 : Assert(FreeListIdxIsValid(fidx));
1409 20510745 : return GetChunkSizeFromFreeListIdx(fidx) + ALLOC_CHUNKHDRSZ;
1410 : }
1411 :
1412 : /*
1413 : * AllocSetIsEmpty
6779 tgl 1414 ECB : * Is an allocset empty of any allocated space?
1415 : */
1416 : bool
6779 tgl 1417 CBC 4330 : AllocSetIsEmpty(MemoryContext context)
1418 : {
163 peter 1419 GNC 4330 : Assert(AllocSetIsValid(context));
1420 :
1421 : /*
1422 : * For now, we say "empty" only if the context is new or just reset. We
6385 bruce 1423 EUB : * could examine the freelists to determine if all space has been freed,
1424 : * but it's not really worth the trouble for present uses of this
1425 : * functionality.
1426 : */
4341 heikki.linnakangas 1427 GIC 4330 : if (context->isReset)
6779 tgl 1428 4318 : return true;
1429 12 : return false;
1430 : }
1431 :
1432 : /*
1433 : * AllocSetStats
1434 : * Compute stats about memory consumption of an allocset.
1435 : *
1839 tgl 1436 ECB : * printfunc: if not NULL, pass a human-readable stats string to this.
1437 : * passthru: pass this pointer through to printfunc.
1438 : * totals: if not NULL, add stats about this context into *totals.
1439 : * print_to_stderr: print stats to stderr if true, elog otherwise.
1440 : */
1441 : void
1839 tgl 1442 GIC 1499 : AllocSetStats(MemoryContext context,
1839 tgl 1443 ECB : MemoryStatsPrintFunc printfunc, void *passthru,
1444 : MemoryContextCounters *totals, bool print_to_stderr)
1445 : {
8320 tgl 1446 CBC 1499 : AllocSet set = (AllocSet) context;
3363 tgl 1447 GIC 1499 : Size nblocks = 0;
2784 tgl 1448 CBC 1499 : Size freechunks = 0;
1449 : Size totalspace;
3363 tgl 1450 GIC 1499 : Size freespace = 0;
1451 : AllocBlock block;
1452 : int fidx;
1453 :
163 peter 1454 GNC 1499 : Assert(AllocSetIsValid(set));
1455 :
1456 : /* Include context header in totalspace */
1839 tgl 1457 GIC 1499 : totalspace = MAXALIGN(sizeof(AllocSetContext));
1458 :
8358 tgl 1459 CBC 4216 : for (block = set->blocks; block != NULL; block = block->next)
1460 : {
1461 2717 : nblocks++;
8358 tgl 1462 GIC 2717 : totalspace += block->endptr - ((char *) block);
1463 2717 : freespace += block->endptr - block->freeptr;
1464 : }
8358 tgl 1465 CBC 17988 : for (fidx = 0; fidx < ALLOCSET_NUM_FREELISTS; fidx++)
8358 tgl 1466 ECB : {
181 tgl 1467 GNC 16489 : Size chksz = GetChunkSizeFromFreeListIdx(fidx);
223 drowley 1468 16489 : MemoryChunk *chunk = set->freelist[fidx];
2784 tgl 1469 ECB :
223 drowley 1470 GNC 21407 : while (chunk != NULL)
8358 tgl 1471 ECB : {
223 drowley 1472 GNC 4918 : AllocFreeListLink *link = GetFreeListLink(chunk);
1473 :
181 tgl 1474 4918 : Assert(MemoryChunkGetValue(chunk) == fidx);
1475 :
2784 tgl 1476 GIC 4918 : freechunks++;
223 drowley 1477 GNC 4918 : freespace += chksz + ALLOC_CHUNKHDRSZ;
1478 :
1479 : VALGRIND_MAKE_MEM_DEFINED(link, sizeof(AllocFreeListLink));
1480 4918 : chunk = link->next;
1481 : VALGRIND_MAKE_MEM_NOACCESS(link, sizeof(AllocFreeListLink));
1482 : }
1483 : }
1484 :
1839 tgl 1485 GIC 1499 : if (printfunc)
1486 : {
1487 : char stats_string[200];
1488 :
1489 767 : snprintf(stats_string, sizeof(stats_string),
473 peter 1490 ECB : "%zu total in %zu blocks; %zu free (%zu chunks); %zu used",
1491 : totalspace, nblocks, freespace, freechunks,
1839 tgl 1492 : totalspace - freespace);
733 fujii 1493 GIC 767 : printfunc(context, passthru, stats_string, print_to_stderr);
1494 : }
2784 tgl 1495 ECB :
2784 tgl 1496 GIC 1499 : if (totals)
2784 tgl 1497 ECB : {
2784 tgl 1498 GIC 1499 : totals->nblocks += nblocks;
2784 tgl 1499 CBC 1499 : totals->freechunks += freechunks;
1500 1499 : totals->totalspace += totalspace;
2784 tgl 1501 GIC 1499 : totals->freespace += freespace;
1502 : }
8358 tgl 1503 CBC 1499 : }
8307 bruce 1504 ECB :
1505 :
1506 : #ifdef MEMORY_CONTEXT_CHECKING
1507 :
1508 : /*
1509 : * AllocSetCheck
1510 : * Walk through chunks and check consistency of memory.
1511 : *
1512 : * NOTE: report errors as WARNING, *not* ERROR or FATAL. Otherwise you'll
8160 tgl 1513 : * find yourself in an infinite loop when trouble occurs, because this
1514 : * routine will be entered again when elog cleanup tries to release memory!
8307 bruce 1515 : */
1516 : void
8307 bruce 1517 GIC 98632521 : AllocSetCheck(MemoryContext context)
1518 : {
8053 1519 98632521 : AllocSet set = (AllocSet) context;
1943 tgl 1520 98632521 : const char *name = set->header.name;
1521 : AllocBlock prevblock;
1522 : AllocBlock block;
1283 tomas.vondra 1523 CBC 98632521 : Size total_allocated = 0;
8307 bruce 1524 ECB :
2223 tgl 1525 CBC 98632521 : for (prevblock = NULL, block = set->blocks;
2223 tgl 1526 GIC 275105514 : block != NULL;
1527 176472993 : prevblock = block, block = block->next)
1528 : {
8053 bruce 1529 176472993 : char *bpoz = ((char *) block) + ALLOC_BLOCKHDRSZ;
1530 176472993 : long blk_used = block->freeptr - bpoz;
1531 176472993 : long blk_data = 0;
1532 176472993 : long nchunks = 0;
223 drowley 1533 GNC 176472993 : bool has_external_chunk = false;
1534 :
1286 tomas.vondra 1535 GIC 176472993 : if (set->keeper == block)
1536 98632521 : total_allocated += block->endptr - ((char *) set);
1537 : else
1538 77840472 : total_allocated += block->endptr - ((char *) block);
1286 tomas.vondra 1539 ECB :
1540 : /*
1541 : * Empty block - empty can be keeper-block only
1542 : */
8307 bruce 1543 CBC 176472993 : if (!blk_used)
8307 bruce 1544 ECB : {
8164 tgl 1545 CBC 2679725 : if (set->keeper != block)
7198 tgl 1546 UIC 0 : elog(WARNING, "problem in alloc set %s: empty block %p",
8164 tgl 1547 ECB : name, block);
1548 : }
1549 :
1550 : /*
2223 1551 : * Check block header fields
1552 : */
2223 tgl 1553 GIC 176472993 : if (block->aset != set ||
2223 tgl 1554 CBC 176472993 : block->prev != prevblock ||
2223 tgl 1555 GIC 176472993 : block->freeptr < bpoz ||
2223 tgl 1556 CBC 176472993 : block->freeptr > block->endptr)
2223 tgl 1557 UIC 0 : elog(WARNING, "problem in alloc set %s: corrupt header in block %p",
2223 tgl 1558 ECB : name, block);
1559 :
8307 bruce 1560 : /*
1561 : * Chunk walker
8053 1562 : */
8164 tgl 1563 GIC 3967174220 : while (bpoz < block->freeptr)
8164 tgl 1564 ECB : {
223 drowley 1565 GNC 3790701227 : MemoryChunk *chunk = (MemoryChunk *) bpoz;
1566 : Size chsize,
8164 tgl 1567 ECB : dsize;
1568 :
1962 1569 : /* Allow access to private part of chunk header. */
1570 : VALGRIND_MAKE_MEM_DEFINED(chunk, ALLOCCHUNK_PRIVATE_LEN);
1571 :
223 drowley 1572 GNC 3790701227 : if (MemoryChunkIsExternal(chunk))
1573 : {
1574 5193595 : chsize = block->endptr - (char *) MemoryChunkGetPointer(chunk); /* aligned chunk size */
1575 5193595 : has_external_chunk = true;
1576 :
1577 : /* make sure this chunk consumes the entire block */
1578 5193595 : if (chsize + ALLOC_CHUNKHDRSZ != blk_used)
223 drowley 1579 UNC 0 : elog(WARNING, "problem in alloc set %s: bad single-chunk %p in block %p",
1580 : name, chunk, block);
1581 : }
1582 : else
1583 : {
181 tgl 1584 GNC 3785507632 : int fidx = MemoryChunkGetValue(chunk);
1585 :
1586 3785507632 : if (!FreeListIdxIsValid(fidx))
181 tgl 1587 UNC 0 : elog(WARNING, "problem in alloc set %s: bad chunk size for chunk %p in block %p",
1588 : name, chunk, block);
1589 :
181 tgl 1590 GNC 3785507632 : chsize = GetChunkSizeFromFreeListIdx(fidx); /* aligned chunk size */
1591 :
1592 : /*
1593 : * Check the stored block offset correctly references this
1594 : * block.
1595 : */
223 drowley 1596 3785507632 : if (block != MemoryChunkGetBlock(chunk))
223 drowley 1597 UNC 0 : elog(WARNING, "problem in alloc set %s: bad block offset for chunk %p in block %p",
1598 : name, chunk, block);
1599 : }
2118 tgl 1600 CBC 3790701227 : dsize = chunk->requested_size; /* real data */
8053 bruce 1601 ECB :
1602 : /* an allocated chunk's requested size must be <= the chsize */
223 drowley 1603 GNC 3790701227 : if (dsize != InvalidAllocSize && dsize > chsize)
7198 tgl 1604 UIC 0 : elog(WARNING, "problem in alloc set %s: req size > alloc size for chunk %p in block %p",
1605 : name, chunk, block);
1606 :
1607 : /* chsize must not be smaller than the first freelist's size */
8307 bruce 1608 GIC 3790701227 : if (chsize < (1 << ALLOC_MINBITS))
3363 tgl 1609 LBC 0 : elog(WARNING, "problem in alloc set %s: bad size %zu for chunk %p in block %p",
1610 : name, chsize, chunk, block);
1611 :
1612 : /*
1613 : * Check for overwrite of padding space in an allocated chunk.
1614 : */
223 drowley 1615 GNC 3790701227 : if (dsize != InvalidAllocSize && dsize < chsize &&
3574 noah 1616 GIC 2548394979 : !sentinel_ok(chunk, ALLOC_CHUNKHDRSZ + dsize))
7198 tgl 1617 UIC 0 : elog(WARNING, "problem in alloc set %s: detected write past chunk end in block %p, chunk %p",
1618 : name, block, chunk);
1619 :
1620 : /*
1621 : * If chunk is allocated, disallow external access to private part
1622 : * of chunk header.
1623 : */
1624 : if (dsize != InvalidAllocSize)
1962 tgl 1625 ECB : VALGRIND_MAKE_MEM_NOACCESS(chunk, ALLOCCHUNK_PRIVATE_LEN);
1626 :
8307 bruce 1627 CBC 3790701227 : blk_data += chsize;
1628 3790701227 : nchunks++;
1629 :
8164 tgl 1630 GIC 3790701227 : bpoz += ALLOC_CHUNKHDRSZ + chsize;
8164 tgl 1631 ECB : }
1632 :
8307 bruce 1633 CBC 176472993 : if ((blk_data + (nchunks * ALLOC_CHUNKHDRSZ)) != blk_used)
7198 tgl 1634 LBC 0 : elog(WARNING, "problem in alloc set %s: found inconsistent memory block %p",
8164 tgl 1635 ECB : name, block);
1636 :
223 drowley 1637 GNC 176472993 : if (has_external_chunk && nchunks > 1)
223 drowley 1638 UNC 0 : elog(WARNING, "problem in alloc set %s: external chunk on non-dedicated block %p",
1639 : name, block);
1640 : }
1286 tomas.vondra 1641 ECB :
1116 jdavis 1642 CBC 98632521 : Assert(total_allocated == context->mem_allocated);
8307 bruce 1643 98632521 : }
7833 bruce 1644 ECB :
2118 tgl 1645 : #endif /* MEMORY_CONTEXT_CHECKING */
|