Age Owner Branch data TLA Line data Source code
1 : : /*-------------------------------------------------------------------------
2 : : *
3 : : * aset.c
4 : : * Allocation set definitions.
5 : : *
6 : : * AllocSet is our standard implementation of the abstract MemoryContext
7 : : * type.
8 : : *
9 : : *
10 : : * Portions Copyright (c) 1996-2024, PostgreSQL Global Development Group
11 : : * Portions Copyright (c) 1994, Regents of the University of California
12 : : *
13 : : * IDENTIFICATION
14 : : * src/backend/utils/mmgr/aset.c
15 : : *
16 : : * NOTE:
17 : : * This is a new (Feb. 05, 1999) implementation of the allocation set
18 : : * routines. AllocSet...() does not use OrderedSet...() any more.
19 : : * Instead it manages allocations in a block pool by itself, combining
20 : : * many small allocations in a few bigger blocks. AllocSetFree() normally
21 : : * doesn't free() memory really. It just add's the free'd area to some
22 : : * list for later reuse by AllocSetAlloc(). All memory blocks are free()'d
23 : : * at once on AllocSetReset(), which happens when the memory context gets
24 : : * destroyed.
25 : : * Jan Wieck
26 : : *
27 : : * Performance improvement from Tom Lane, 8/99: for extremely large request
28 : : * sizes, we do want to be able to give the memory back to free() as soon
29 : : * as it is pfree()'d. Otherwise we risk tying up a lot of memory in
30 : : * freelist entries that might never be usable. This is specially needed
31 : : * when the caller is repeatedly repalloc()'ing a block bigger and bigger;
32 : : * the previous instances of the block were guaranteed to be wasted until
33 : : * AllocSetReset() under the old way.
34 : : *
35 : : * Further improvement 12/00: as the code stood, request sizes in the
36 : : * midrange between "small" and "large" were handled very inefficiently,
37 : : * because any sufficiently large free chunk would be used to satisfy a
38 : : * request, even if it was much larger than necessary. This led to more
39 : : * and more wasted space in allocated chunks over time. To fix, get rid
40 : : * of the midrange behavior: we now handle only "small" power-of-2-size
41 : : * chunks as chunks. Anything "large" is passed off to malloc(). Change
42 : : * the number of freelists to change the small/large boundary.
43 : : *
44 : : *-------------------------------------------------------------------------
45 : : */
46 : :
47 : : #include "postgres.h"
48 : :
49 : : #include "port/pg_bitutils.h"
50 : : #include "utils/memdebug.h"
51 : : #include "utils/memutils.h"
52 : : #include "utils/memutils_internal.h"
53 : : #include "utils/memutils_memorychunk.h"
54 : :
55 : : /*--------------------
56 : : * Chunk freelist k holds chunks of size 1 << (k + ALLOC_MINBITS),
57 : : * for k = 0 .. ALLOCSET_NUM_FREELISTS-1.
58 : : *
59 : : * Note that all chunks in the freelists have power-of-2 sizes. This
60 : : * improves recyclability: we may waste some space, but the wasted space
61 : : * should stay pretty constant as requests are made and released.
62 : : *
63 : : * A request too large for the last freelist is handled by allocating a
64 : : * dedicated block from malloc(). The block still has a block header and
65 : : * chunk header, but when the chunk is freed we'll return the whole block
66 : : * to malloc(), not put it on our freelists.
67 : : *
68 : : * CAUTION: ALLOC_MINBITS must be large enough so that
69 : : * 1<<ALLOC_MINBITS is at least MAXALIGN,
70 : : * or we may fail to align the smallest chunks adequately.
71 : : * 8-byte alignment is enough on all currently known machines. This 8-byte
72 : : * minimum also allows us to store a pointer to the next freelist item within
73 : : * the chunk of memory itself.
74 : : *
75 : : * With the current parameters, request sizes up to 8K are treated as chunks,
76 : : * larger requests go into dedicated blocks. Change ALLOCSET_NUM_FREELISTS
77 : : * to adjust the boundary point; and adjust ALLOCSET_SEPARATE_THRESHOLD in
78 : : * memutils.h to agree. (Note: in contexts with small maxBlockSize, we may
79 : : * set the allocChunkLimit to less than 8K, so as to avoid space wastage.)
80 : : *--------------------
81 : : */
82 : :
83 : : #define ALLOC_MINBITS 3 /* smallest chunk size is 8 bytes */
84 : : #define ALLOCSET_NUM_FREELISTS 11
85 : : #define ALLOC_CHUNK_LIMIT (1 << (ALLOCSET_NUM_FREELISTS-1+ALLOC_MINBITS))
86 : : /* Size of largest chunk that we use a fixed size for */
87 : : #define ALLOC_CHUNK_FRACTION 4
88 : : /* We allow chunks to be at most 1/4 of maxBlockSize (less overhead) */
89 : :
90 : : /*--------------------
91 : : * The first block allocated for an allocset has size initBlockSize.
92 : : * Each time we have to allocate another block, we double the block size
93 : : * (if possible, and without exceeding maxBlockSize), so as to reduce
94 : : * the bookkeeping load on malloc().
95 : : *
96 : : * Blocks allocated to hold oversize chunks do not follow this rule, however;
97 : : * they are just however big they need to be to hold that single chunk.
98 : : *
99 : : * Also, if a minContextSize is specified, the first block has that size,
100 : : * and then initBlockSize is used for the next one.
101 : : *--------------------
102 : : */
103 : :
104 : : #define ALLOC_BLOCKHDRSZ MAXALIGN(sizeof(AllocBlockData))
105 : : #define ALLOC_CHUNKHDRSZ sizeof(MemoryChunk)
106 : :
107 : : typedef struct AllocBlockData *AllocBlock; /* forward reference */
108 : :
109 : : /*
110 : : * AllocPointer
111 : : * Aligned pointer which may be a member of an allocation set.
112 : : */
113 : : typedef void *AllocPointer;
114 : :
115 : : /*
116 : : * AllocFreeListLink
117 : : * When pfreeing memory, if we maintain a freelist for the given chunk's
118 : : * size then we use a AllocFreeListLink to point to the current item in
119 : : * the AllocSetContext's freelist and then set the given freelist element
120 : : * to point to the chunk being freed.
121 : : */
122 : : typedef struct AllocFreeListLink
123 : : {
124 : : MemoryChunk *next;
125 : : } AllocFreeListLink;
126 : :
127 : : /*
128 : : * Obtain a AllocFreeListLink for the given chunk. Allocation sizes are
129 : : * always at least sizeof(AllocFreeListLink), so we reuse the pointer's memory
130 : : * itself to store the freelist link.
131 : : */
132 : : #define GetFreeListLink(chkptr) \
133 : : (AllocFreeListLink *) ((char *) (chkptr) + ALLOC_CHUNKHDRSZ)
134 : :
135 : : /* Validate a freelist index retrieved from a chunk header */
136 : : #define FreeListIdxIsValid(fidx) \
137 : : ((fidx) >= 0 && (fidx) < ALLOCSET_NUM_FREELISTS)
138 : :
139 : : /* Determine the size of the chunk based on the freelist index */
140 : : #define GetChunkSizeFromFreeListIdx(fidx) \
141 : : ((((Size) 1) << ALLOC_MINBITS) << (fidx))
142 : :
143 : : /*
144 : : * AllocSetContext is our standard implementation of MemoryContext.
145 : : *
146 : : * Note: header.isReset means there is nothing for AllocSetReset to do.
147 : : * This is different from the aset being physically empty (empty blocks list)
148 : : * because we will still have a keeper block. It's also different from the set
149 : : * being logically empty, because we don't attempt to detect pfree'ing the
150 : : * last active chunk.
151 : : */
152 : : typedef struct AllocSetContext
153 : : {
154 : : MemoryContextData header; /* Standard memory-context fields */
155 : : /* Info about storage allocated in this context: */
156 : : AllocBlock blocks; /* head of list of blocks in this set */
157 : : MemoryChunk *freelist[ALLOCSET_NUM_FREELISTS]; /* free chunk lists */
158 : : /* Allocation parameters for this context: */
159 : : uint32 initBlockSize; /* initial block size */
160 : : uint32 maxBlockSize; /* maximum block size */
161 : : uint32 nextBlockSize; /* next block size to allocate */
162 : : uint32 allocChunkLimit; /* effective chunk size limit */
163 : : /* freelist this context could be put in, or -1 if not a candidate: */
164 : : int freeListIndex; /* index in context_freelists[], or -1 */
165 : : } AllocSetContext;
166 : :
167 : : typedef AllocSetContext *AllocSet;
168 : :
169 : : /*
170 : : * AllocBlock
171 : : * An AllocBlock is the unit of memory that is obtained by aset.c
172 : : * from malloc(). It contains one or more MemoryChunks, which are
173 : : * the units requested by palloc() and freed by pfree(). MemoryChunks
174 : : * cannot be returned to malloc() individually, instead they are put
175 : : * on freelists by pfree() and re-used by the next palloc() that has
176 : : * a matching request size.
177 : : *
178 : : * AllocBlockData is the header data for a block --- the usable space
179 : : * within the block begins at the next alignment boundary.
180 : : */
181 : : typedef struct AllocBlockData
182 : : {
183 : : AllocSet aset; /* aset that owns this block */
184 : : AllocBlock prev; /* prev block in aset's blocks list, if any */
185 : : AllocBlock next; /* next block in aset's blocks list, if any */
186 : : char *freeptr; /* start of free space in this block */
187 : : char *endptr; /* end of space in this block */
188 : : } AllocBlockData;
189 : :
190 : : /*
191 : : * AllocPointerIsValid
192 : : * True iff pointer is valid allocation pointer.
193 : : */
194 : : #define AllocPointerIsValid(pointer) PointerIsValid(pointer)
195 : :
196 : : /*
197 : : * AllocSetIsValid
198 : : * True iff set is valid allocation set.
199 : : */
200 : : #define AllocSetIsValid(set) \
201 : : (PointerIsValid(set) && IsA(set, AllocSetContext))
202 : :
203 : : /*
204 : : * AllocBlockIsValid
205 : : * True iff block is valid block of allocation set.
206 : : */
207 : : #define AllocBlockIsValid(block) \
208 : : (PointerIsValid(block) && AllocSetIsValid((block)->aset))
209 : :
210 : : /*
211 : : * We always store external chunks on a dedicated block. This makes fetching
212 : : * the block from an external chunk easy since it's always the first and only
213 : : * chunk on the block.
214 : : */
215 : : #define ExternalChunkGetBlock(chunk) \
216 : : (AllocBlock) ((char *) chunk - ALLOC_BLOCKHDRSZ)
217 : :
218 : : /*
219 : : * Rather than repeatedly creating and deleting memory contexts, we keep some
220 : : * freed contexts in freelists so that we can hand them out again with little
221 : : * work. Before putting a context in a freelist, we reset it so that it has
222 : : * only its initial malloc chunk and no others. To be a candidate for a
223 : : * freelist, a context must have the same minContextSize/initBlockSize as
224 : : * other contexts in the list; but its maxBlockSize is irrelevant since that
225 : : * doesn't affect the size of the initial chunk.
226 : : *
227 : : * We currently provide one freelist for ALLOCSET_DEFAULT_SIZES contexts
228 : : * and one for ALLOCSET_SMALL_SIZES contexts; the latter works for
229 : : * ALLOCSET_START_SMALL_SIZES too, since only the maxBlockSize differs.
230 : : *
231 : : * Ordinarily, we re-use freelist contexts in last-in-first-out order, in
232 : : * hopes of improving locality of reference. But if there get to be too
233 : : * many contexts in the list, we'd prefer to drop the most-recently-created
234 : : * contexts in hopes of keeping the process memory map compact.
235 : : * We approximate that by simply deleting all existing entries when the list
236 : : * overflows, on the assumption that queries that allocate a lot of contexts
237 : : * will probably free them in more or less reverse order of allocation.
238 : : *
239 : : * Contexts in a freelist are chained via their nextchild pointers.
240 : : */
241 : : #define MAX_FREE_CONTEXTS 100 /* arbitrary limit on freelist length */
242 : :
243 : : /* Obtain the keeper block for an allocation set */
244 : : #define KeeperBlock(set) \
245 : : ((AllocBlock) (((char *) set) + MAXALIGN(sizeof(AllocSetContext))))
246 : :
247 : : /* Check if the block is the keeper block of the given allocation set */
248 : : #define IsKeeperBlock(set, block) ((block) == (KeeperBlock(set)))
249 : :
250 : : typedef struct AllocSetFreeList
251 : : {
252 : : int num_free; /* current list length */
253 : : AllocSetContext *first_free; /* list header */
254 : : } AllocSetFreeList;
255 : :
256 : : /* context_freelists[0] is for default params, [1] for small params */
257 : : static AllocSetFreeList context_freelists[2] =
258 : : {
259 : : {
260 : : 0, NULL
261 : : },
262 : : {
263 : : 0, NULL
264 : : }
265 : : };
266 : :
267 : :
268 : : /* ----------
269 : : * AllocSetFreeIndex -
270 : : *
271 : : * Depending on the size of an allocation compute which freechunk
272 : : * list of the alloc set it belongs to. Caller must have verified
273 : : * that size <= ALLOC_CHUNK_LIMIT.
274 : : * ----------
275 : : */
276 : : static inline int
9199 JanWieck@Yahoo.com 277 :CBC 455005145 : AllocSetFreeIndex(Size size)
278 : : {
279 : : int idx;
280 : :
5381 tgl@sss.pgh.pa.us 281 [ + + ]: 455005145 : if (size > (1 << ALLOC_MINBITS))
282 : : {
283 : : /*----------
284 : : * At this point we must compute ceil(log2(size >> ALLOC_MINBITS)).
285 : : * This is the same as
286 : : * pg_leftmost_one_pos32((size - 1) >> ALLOC_MINBITS) + 1
287 : : * or equivalently
288 : : * pg_leftmost_one_pos32(size - 1) - ALLOC_MINBITS + 1
289 : : *
290 : : * However, for platforms without intrinsic support, we duplicate the
291 : : * logic here, allowing an additional optimization. It's reasonable
292 : : * to assume that ALLOC_CHUNK_LIMIT fits in 16 bits, so we can unroll
293 : : * the byte-at-a-time loop in pg_leftmost_one_pos32 and just handle
294 : : * the last two bytes.
295 : : *
296 : : * Yes, this function is enough of a hot-spot to make it worth this
297 : : * much trouble.
298 : : *----------
299 : : */
300 : : #ifdef HAVE_BITSCAN_REVERSE
431 john.naylor@postgres 301 : 400884879 : idx = pg_leftmost_one_pos32((uint32) size - 1) - ALLOC_MINBITS + 1;
302 : : #else
303 : : uint32 t,
304 : : tsize;
305 : :
306 : : /* Statically assert that we only have a 16-bit input value. */
307 : : StaticAssertDecl(ALLOC_CHUNK_LIMIT < (1 << 16),
308 : : "ALLOC_CHUNK_LIMIT must be less than 64kB");
309 : :
310 : : tsize = size - 1;
311 : : t = tsize >> 8;
312 : : idx = t ? pg_leftmost_one_pos[t] + 8 : pg_leftmost_one_pos[tsize];
313 : : idx -= ALLOC_MINBITS - 1;
314 : : #endif
315 : :
8535 tgl@sss.pgh.pa.us 316 [ - + ]: 400884879 : Assert(idx < ALLOCSET_NUM_FREELISTS);
317 : : }
318 : : else
5381 319 : 54120266 : idx = 0;
320 : :
9199 JanWieck@Yahoo.com 321 : 455005145 : return idx;
322 : : }
323 : :
324 : :
325 : : /*
326 : : * Public routines
327 : : */
328 : :
329 : :
330 : : /*
331 : : * AllocSetContextCreateInternal
332 : : * Create a new AllocSet context.
333 : : *
334 : : * parent: parent context, or NULL if top-level context
335 : : * name: name of context (must be statically allocated)
336 : : * minContextSize: minimum context size
337 : : * initBlockSize: initial allocation block size
338 : : * maxBlockSize: maximum allocation block size
339 : : *
340 : : * Most callers should abstract the context size parameters using a macro
341 : : * such as ALLOCSET_DEFAULT_SIZES.
342 : : *
343 : : * Note: don't call this directly; go through the wrapper macro
344 : : * AllocSetContextCreate.
345 : : */
346 : : MemoryContext
2011 tgl@sss.pgh.pa.us 347 : 6267581 : AllocSetContextCreateInternal(MemoryContext parent,
348 : : const char *name,
349 : : Size minContextSize,
350 : : Size initBlockSize,
351 : : Size maxBlockSize)
352 : : {
353 : : int freeListIndex;
354 : : Size firstBlockSize;
355 : : AllocSet set;
356 : : AllocBlock block;
357 : :
358 : : /* ensure MemoryChunk's size is properly maxaligned */
359 : : StaticAssertDecl(ALLOC_CHUNKHDRSZ == MAXALIGN(ALLOC_CHUNKHDRSZ),
360 : : "sizeof(MemoryChunk) is not maxaligned");
361 : : /* check we have enough space to store the freelist link */
362 : : StaticAssertDecl(sizeof(AllocFreeListLink) <= (1 << ALLOC_MINBITS),
363 : : "sizeof(AllocFreeListLink) larger than minimum allocation size");
364 : :
365 : : /*
366 : : * First, validate allocation parameters. Once these were regular runtime
367 : : * tests and elog's, but in practice Asserts seem sufficient because
368 : : * nobody varies their parameters at runtime. We somewhat arbitrarily
369 : : * enforce a minimum 1K block size. We restrict the maximum block size to
370 : : * MEMORYCHUNK_MAX_BLOCKOFFSET as MemoryChunks are limited to this in
371 : : * regards to addressing the offset between the chunk and the block that
372 : : * the chunk is stored on. We would be unable to store the offset between
373 : : * the chunk and block for any chunks that were beyond
374 : : * MEMORYCHUNK_MAX_BLOCKOFFSET bytes into the block if the block was to be
375 : : * larger than this.
376 : : */
2314 377 [ + - - + ]: 6267581 : Assert(initBlockSize == MAXALIGN(initBlockSize) &&
378 : : initBlockSize >= 1024);
379 [ + - + - : 6267581 : Assert(maxBlockSize == MAXALIGN(maxBlockSize) &&
- + ]
380 : : maxBlockSize >= initBlockSize &&
381 : : AllocHugeSizeIsValid(maxBlockSize)); /* must be safe to double */
382 [ + + + - : 6267581 : Assert(minContextSize == 0 ||
+ - - + ]
383 : : (minContextSize == MAXALIGN(minContextSize) &&
384 : : minContextSize >= 1024 &&
385 : : minContextSize <= maxBlockSize));
594 drowley@postgresql.o 386 [ - + ]: 6267581 : Assert(maxBlockSize <= MEMORYCHUNK_MAX_BLOCKOFFSET);
387 : :
388 : : /*
389 : : * Check whether the parameters match either available freelist. We do
390 : : * not need to demand a match of maxBlockSize.
391 : : */
2210 tgl@sss.pgh.pa.us 392 [ + + + + ]: 6267581 : if (minContextSize == ALLOCSET_DEFAULT_MINSIZE &&
393 : : initBlockSize == ALLOCSET_DEFAULT_INITSIZE)
2314 394 : 4245122 : freeListIndex = 0;
2210 395 [ + + + - ]: 2022459 : else if (minContextSize == ALLOCSET_SMALL_MINSIZE &&
396 : : initBlockSize == ALLOCSET_SMALL_INITSIZE)
2314 397 : 2005019 : freeListIndex = 1;
398 : : else
399 : 17440 : freeListIndex = -1;
400 : :
401 : : /*
402 : : * If a suitable freelist entry exists, just recycle that context.
403 : : */
404 [ + + ]: 6267581 : if (freeListIndex >= 0)
405 : : {
406 : 6250141 : AllocSetFreeList *freelist = &context_freelists[freeListIndex];
407 : :
408 [ + + ]: 6250141 : if (freelist->first_free != NULL)
409 : : {
410 : : /* Remove entry from freelist */
411 : 4537186 : set = freelist->first_free;
412 : 4537186 : freelist->first_free = (AllocSet) set->header.nextchild;
413 : 4537186 : freelist->num_free--;
414 : :
415 : : /* Update its maxBlockSize; everything else should be OK */
416 : 4537186 : set->maxBlockSize = maxBlockSize;
417 : :
418 : : /* Reinitialize its header, installing correct name and parent */
419 : 4537186 : MemoryContextCreate((MemoryContext) set,
420 : : T_AllocSetContext,
421 : : MCTX_ASET_ID,
422 : : parent,
423 : : name);
424 : :
1487 jdavis@postgresql.or 425 : 4537186 : ((MemoryContext) set)->mem_allocated =
272 drowley@postgresql.o 426 :GNC 4537186 : KeeperBlock(set)->endptr - ((char *) set);
427 : :
2314 tgl@sss.pgh.pa.us 428 :CBC 4537186 : return (MemoryContext) set;
429 : : }
430 : : }
431 : :
432 : : /* Determine size of initial block */
2210 433 : 1730395 : firstBlockSize = MAXALIGN(sizeof(AllocSetContext)) +
434 : : ALLOC_BLOCKHDRSZ + ALLOC_CHUNKHDRSZ;
2314 435 [ + + ]: 1730395 : if (minContextSize != 0)
436 : 17440 : firstBlockSize = Max(firstBlockSize, minContextSize);
437 : : else
438 : 1712955 : firstBlockSize = Max(firstBlockSize, initBlockSize);
439 : :
440 : : /*
441 : : * Allocate the initial block. Unlike other aset.c blocks, it starts with
442 : : * the context header and its block header follows that.
443 : : */
444 : 1730395 : set = (AllocSet) malloc(firstBlockSize);
445 [ - + ]: 1730395 : if (set == NULL)
446 : : {
2314 tgl@sss.pgh.pa.us 447 [ # # ]:UBC 0 : if (TopMemoryContext)
448 : 0 : MemoryContextStats(TopMemoryContext);
449 [ # # ]: 0 : ereport(ERROR,
450 : : (errcode(ERRCODE_OUT_OF_MEMORY),
451 : : errmsg("out of memory"),
452 : : errdetail("Failed while creating memory context \"%s\".",
453 : : name)));
454 : : }
455 : :
456 : : /*
457 : : * Avoid writing code that can fail between here and MemoryContextCreate;
458 : : * we'd leak the header/initial block if we ereport in this stretch.
459 : : */
460 : :
461 : : /* Fill in the initial block's block header */
272 drowley@postgresql.o 462 :GNC 1730395 : block = KeeperBlock(set);
2314 tgl@sss.pgh.pa.us 463 :CBC 1730395 : block->aset = set;
464 : 1730395 : block->freeptr = ((char *) block) + ALLOC_BLOCKHDRSZ;
465 : 1730395 : block->endptr = ((char *) set) + firstBlockSize;
466 : 1730395 : block->prev = NULL;
467 : 1730395 : block->next = NULL;
468 : :
469 : : /* Mark unallocated space NOACCESS; leave the block header alone. */
470 : : VALGRIND_MAKE_MEM_NOACCESS(block->freeptr, block->endptr - block->freeptr);
471 : :
472 : : /* Remember block as part of block list */
473 : 1730395 : set->blocks = block;
474 : :
475 : : /* Finish filling in aset-specific parts of the context header */
476 [ + - + - : 20764740 : MemSetAligned(set->freelist, 0, sizeof(set->freelist));
+ - + + ]
477 : :
272 drowley@postgresql.o 478 :GNC 1730395 : set->initBlockSize = (uint32) initBlockSize;
479 : 1730395 : set->maxBlockSize = (uint32) maxBlockSize;
480 : 1730395 : set->nextBlockSize = (uint32) initBlockSize;
2314 tgl@sss.pgh.pa.us 481 :CBC 1730395 : set->freeListIndex = freeListIndex;
482 : :
483 : : /*
484 : : * Compute the allocation chunk size limit for this context. It can't be
485 : : * more than ALLOC_CHUNK_LIMIT because of the fixed number of freelists.
486 : : * If maxBlockSize is small then requests exceeding the maxBlockSize, or
487 : : * even a significant fraction of it, should be treated as large chunks
488 : : * too. For the typical case of maxBlockSize a power of 2, the chunk size
489 : : * limit will be at most 1/8th maxBlockSize, so that given a stream of
490 : : * requests that are all the maximum chunk size we will waste at most
491 : : * 1/8th of the allocated space.
492 : : *
493 : : * Also, allocChunkLimit must not exceed ALLOCSET_SEPARATE_THRESHOLD.
494 : : */
495 : : StaticAssertStmt(ALLOC_CHUNK_LIMIT == ALLOCSET_SEPARATE_THRESHOLD,
496 : : "ALLOC_CHUNK_LIMIT != ALLOCSET_SEPARATE_THRESHOLD");
497 : :
498 : : /*
499 : : * Determine the maximum size that a chunk can be before we allocate an
500 : : * entire AllocBlock dedicated for that chunk. We set the absolute limit
501 : : * of that size as ALLOC_CHUNK_LIMIT but we reduce it further so that we
502 : : * can fit about ALLOC_CHUNK_FRACTION chunks this size on a maximally
503 : : * sized block. (We opt to keep allocChunkLimit a power-of-2 value
504 : : * primarily for legacy reasons rather than calculating it so that exactly
505 : : * ALLOC_CHUNK_FRACTION chunks fit on a maximally sized block.)
506 : : */
3340 jdavis@postgresql.or 507 : 1730395 : set->allocChunkLimit = ALLOC_CHUNK_LIMIT;
508 : 1730395 : while ((Size) (set->allocChunkLimit + ALLOC_CHUNKHDRSZ) >
4731 tgl@sss.pgh.pa.us 509 [ + + ]: 5684271 : (Size) ((maxBlockSize - ALLOC_BLOCKHDRSZ) / ALLOC_CHUNK_FRACTION))
3340 jdavis@postgresql.or 510 : 3953876 : set->allocChunkLimit >>= 1;
511 : :
512 : : /* Finally, do the type-independent part of context creation */
2314 tgl@sss.pgh.pa.us 513 : 1730395 : MemoryContextCreate((MemoryContext) set,
514 : : T_AllocSetContext,
515 : : MCTX_ASET_ID,
516 : : parent,
517 : : name);
518 : :
1487 jdavis@postgresql.or 519 : 1730395 : ((MemoryContext) set)->mem_allocated = firstBlockSize;
520 : :
3340 521 : 1730395 : return (MemoryContext) set;
522 : : }
523 : :
524 : : /*
525 : : * AllocSetReset
526 : : * Frees all memory which is allocated in the given set.
527 : : *
528 : : * Actually, this routine has some discretion about what to do.
529 : : * It should mark all allocated chunks freed, but it need not necessarily
530 : : * give back all the resources the set owns. Our actual implementation is
531 : : * that we give back all but the "keeper" block (which we must keep, since
532 : : * it shares a malloc chunk with the context header). In this way, we don't
533 : : * thrash malloc() when a context is repeatedly reset after small allocations,
534 : : * which is typical behavior for per-tuple contexts.
535 : : */
536 : : void
8691 tgl@sss.pgh.pa.us 537 : 21081925 : AllocSetReset(MemoryContext context)
538 : : {
539 : 21081925 : AllocSet set = (AllocSet) context;
540 : : AllocBlock block;
541 : : Size keepersize PG_USED_FOR_ASSERTS_ONLY;
542 : :
534 peter@eisentraut.org 543 [ + - - + ]: 21081925 : Assert(AllocSetIsValid(set));
544 : :
545 : : #ifdef MEMORY_CONTEXT_CHECKING
546 : : /* Check for corruption and leaks before freeing */
8535 tgl@sss.pgh.pa.us 547 : 21081925 : AllocSetCheck(context);
548 : : #endif
549 : :
550 : : /* Remember keeper block size for Assert below */
272 drowley@postgresql.o 551 :GNC 21081925 : keepersize = KeeperBlock(set)->endptr - ((char *) set);
552 : :
553 : : /* Clear chunk freelists */
6910 tgl@sss.pgh.pa.us 554 [ + - + - :CBC 252983100 : MemSetAligned(set->freelist, 0, sizeof(set->freelist));
+ - + + ]
555 : :
6800 556 : 21081925 : block = set->blocks;
557 : :
558 : : /* New blocks list will be just the keeper block */
272 drowley@postgresql.o 559 :GNC 21081925 : set->blocks = KeeperBlock(set);
560 : :
6800 tgl@sss.pgh.pa.us 561 [ + + ]:CBC 44618336 : while (block != NULL)
562 : : {
8691 563 : 23536411 : AllocBlock next = block->next;
564 : :
272 drowley@postgresql.o 565 [ + + ]:GNC 23536411 : if (IsKeeperBlock(set, block))
566 : : {
567 : : /* Reset the block, but don't return it to malloc */
8424 bruce@momjian.us 568 :CBC 21081925 : char *datastart = ((char *) block) + ALLOC_BLOCKHDRSZ;
569 : :
570 : : #ifdef CLOBBER_FREED_MEMORY
3945 noah@leadboat.com 571 : 21081925 : wipe_mem(datastart, block->freeptr - datastart);
572 : : #else
573 : : /* wipe_mem() would have done this */
574 : : VALGRIND_MAKE_MEM_NOACCESS(datastart, block->freeptr - datastart);
575 : : #endif
8677 tgl@sss.pgh.pa.us 576 : 21081925 : block->freeptr = datastart;
2594 577 : 21081925 : block->prev = NULL;
8677 578 : 21081925 : block->next = NULL;
579 : : }
580 : : else
581 : : {
582 : : /* Normal case, release the block */
1431 583 : 2454486 : context->mem_allocated -= block->endptr - ((char *) block);
584 : :
585 : : #ifdef CLOBBER_FREED_MEMORY
3945 noah@leadboat.com 586 : 2454486 : wipe_mem(block, block->freeptr - ((char *) block));
587 : : #endif
8691 tgl@sss.pgh.pa.us 588 : 2454486 : free(block);
589 : : }
9199 JanWieck@Yahoo.com 590 : 23536411 : block = next;
591 : : }
592 : :
1487 jdavis@postgresql.or 593 [ - + ]: 21081925 : Assert(context->mem_allocated == keepersize);
594 : :
595 : : /* Reset block size allocation sequence, too */
6367 tgl@sss.pgh.pa.us 596 : 21081925 : set->nextBlockSize = set->initBlockSize;
10141 scrappy@hub.org 597 : 21081925 : }
598 : :
599 : : /*
600 : : * AllocSetDelete
601 : : * Frees all memory which is allocated in the given set,
602 : : * in preparation for deletion of the set.
603 : : *
604 : : * Unlike AllocSetReset, this *must* free all resources of the set.
605 : : */
606 : : void
8691 tgl@sss.pgh.pa.us 607 : 4787923 : AllocSetDelete(MemoryContext context)
608 : : {
609 : 4787923 : AllocSet set = (AllocSet) context;
610 : 4787923 : AllocBlock block = set->blocks;
611 : : Size keepersize PG_USED_FOR_ASSERTS_ONLY;
612 : :
534 peter@eisentraut.org 613 [ + - - + ]: 4787923 : Assert(AllocSetIsValid(set));
614 : :
615 : : #ifdef MEMORY_CONTEXT_CHECKING
616 : : /* Check for corruption and leaks before freeing */
8535 tgl@sss.pgh.pa.us 617 : 4787923 : AllocSetCheck(context);
618 : : #endif
619 : :
620 : : /* Remember keeper block size for Assert below */
272 drowley@postgresql.o 621 :GNC 4787923 : keepersize = KeeperBlock(set)->endptr - ((char *) set);
622 : :
623 : : /*
624 : : * If the context is a candidate for a freelist, put it into that freelist
625 : : * instead of destroying it.
626 : : */
2314 tgl@sss.pgh.pa.us 627 [ + - ]:CBC 4787923 : if (set->freeListIndex >= 0)
628 : : {
629 : 4787923 : AllocSetFreeList *freelist = &context_freelists[set->freeListIndex];
630 : :
631 : : /*
632 : : * Reset the context, if it needs it, so that we aren't hanging on to
633 : : * more than the initial malloc chunk.
634 : : */
635 [ + + ]: 4787923 : if (!context->isReset)
636 : 3078163 : MemoryContextResetOnly(context);
637 : :
638 : : /*
639 : : * If the freelist is full, just discard what's already in it. See
640 : : * comments with context_freelists[].
641 : : */
642 [ + + ]: 4787923 : if (freelist->num_free >= MAX_FREE_CONTEXTS)
643 : : {
644 [ + + ]: 29997 : while (freelist->first_free != NULL)
645 : : {
646 : 29700 : AllocSetContext *oldset = freelist->first_free;
647 : :
648 : 29700 : freelist->first_free = (AllocSetContext *) oldset->header.nextchild;
649 : 29700 : freelist->num_free--;
650 : :
651 : : /* All that remains is to free the header/initial block */
652 : 29700 : free(oldset);
653 : : }
654 [ - + ]: 297 : Assert(freelist->num_free == 0);
655 : : }
656 : :
657 : : /* Now add the just-deleted context to the freelist. */
658 : 4787923 : set->header.nextchild = (MemoryContext) freelist->first_free;
659 : 4787923 : freelist->first_free = set;
660 : 4787923 : freelist->num_free++;
661 : :
662 : 4787923 : return;
663 : : }
664 : :
665 : : /* Free all blocks, except the keeper which is part of context header */
8691 tgl@sss.pgh.pa.us 666 [ # # ]:UBC 0 : while (block != NULL)
667 : : {
668 : 0 : AllocBlock next = block->next;
669 : :
272 drowley@postgresql.o 670 [ # # ]:UNC 0 : if (!IsKeeperBlock(set, block))
1487 jdavis@postgresql.or 671 :UBC 0 : context->mem_allocated -= block->endptr - ((char *) block);
672 : :
673 : : #ifdef CLOBBER_FREED_MEMORY
3945 noah@leadboat.com 674 : 0 : wipe_mem(block, block->freeptr - ((char *) block));
675 : : #endif
676 : :
272 drowley@postgresql.o 677 [ # # ]:UNC 0 : if (!IsKeeperBlock(set, block))
2314 tgl@sss.pgh.pa.us 678 :UBC 0 : free(block);
679 : :
8691 680 : 0 : block = next;
681 : : }
682 : :
1487 jdavis@postgresql.or 683 [ # # ]: 0 : Assert(context->mem_allocated == keepersize);
684 : :
685 : : /* Finally, free the context header, including the keeper block */
2314 tgl@sss.pgh.pa.us 686 : 0 : free(set);
687 : : }
688 : :
689 : : /*
690 : : * Helper for AllocSetAlloc() that allocates an entire block for the chunk.
691 : : *
692 : : * AllocSetAlloc()'s comment explains why this is separate.
693 : : */
694 : : pg_noinline
695 : : static void *
46 drowley@postgresql.o 696 :GNC 7666648 : AllocSetAllocLarge(MemoryContext context, Size size, int flags)
697 : : {
8691 tgl@sss.pgh.pa.us 698 : 7666648 : AllocSet set = (AllocSet) context;
699 : : AllocBlock block;
700 : : MemoryChunk *chunk;
701 : : Size chunk_size;
702 : : Size blksize;
703 : :
704 : : /* validate 'size' is within the limits for the given 'flags' */
46 drowley@postgresql.o 705 : 7666648 : MemoryContextCheckSize(context, size, flags);
706 : :
707 : : #ifdef MEMORY_CONTEXT_CHECKING
708 : : /* ensure there's always space for the sentinel byte */
709 : 7666648 : chunk_size = MAXALIGN(size + 1);
710 : : #else
711 : : chunk_size = MAXALIGN(size);
712 : : #endif
713 : :
714 : 7666648 : blksize = chunk_size + ALLOC_BLOCKHDRSZ + ALLOC_CHUNKHDRSZ;
715 : 7666648 : block = (AllocBlock) malloc(blksize);
716 [ - + ]: 7666648 : if (block == NULL)
46 drowley@postgresql.o 717 :UNC 0 : return MemoryContextAllocationFailure(context, size, flags);
718 : :
46 drowley@postgresql.o 719 :GNC 7666648 : context->mem_allocated += blksize;
720 : :
721 : 7666648 : block->aset = set;
722 : 7666648 : block->freeptr = block->endptr = ((char *) block) + blksize;
723 : :
724 : 7666648 : chunk = (MemoryChunk *) (((char *) block) + ALLOC_BLOCKHDRSZ);
725 : :
726 : : /* mark the MemoryChunk as externally managed */
727 : 7666648 : MemoryChunkSetHdrMaskExternal(chunk, MCTX_ASET_ID);
728 : :
729 : : #ifdef MEMORY_CONTEXT_CHECKING
730 : 7666648 : chunk->requested_size = size;
731 : : /* set mark to catch clobber of "unused" space */
732 [ - + ]: 7666648 : Assert(size < chunk_size);
733 : 7666648 : set_sentinel(MemoryChunkGetPointer(chunk), size);
734 : : #endif
735 : : #ifdef RANDOMIZE_ALLOCATED_MEMORY
736 : : /* fill the allocated space with junk */
737 : : randomize_mem((char *) MemoryChunkGetPointer(chunk), size);
738 : : #endif
739 : :
740 : : /*
741 : : * Stick the new block underneath the active allocation block, if any, so
742 : : * that we don't lose the use of the space remaining therein.
743 : : */
744 [ + - ]: 7666648 : if (set->blocks != NULL)
745 : : {
746 : 7666648 : block->prev = set->blocks;
747 : 7666648 : block->next = set->blocks->next;
748 [ + + ]: 7666648 : if (block->next)
749 : 5326918 : block->next->prev = block;
750 : 7666648 : set->blocks->next = block;
751 : : }
752 : : else
753 : : {
46 drowley@postgresql.o 754 :UNC 0 : block->prev = NULL;
755 : 0 : block->next = NULL;
756 : 0 : set->blocks = block;
757 : : }
758 : :
759 : : /* Ensure any padding bytes are marked NOACCESS. */
760 : : VALGRIND_MAKE_MEM_NOACCESS((char *) MemoryChunkGetPointer(chunk) + size,
761 : : chunk_size - size);
762 : :
763 : : /* Disallow access to the chunk header. */
764 : : VALGRIND_MAKE_MEM_NOACCESS(chunk, ALLOC_CHUNKHDRSZ);
765 : :
46 drowley@postgresql.o 766 :GNC 7666648 : return MemoryChunkGetPointer(chunk);
767 : : }
768 : :
769 : : /*
770 : : * Small helper for allocating a new chunk from a chunk, to avoid duplicating
771 : : * the code between AllocSetAlloc() and AllocSetAllocFromNewBlock().
772 : : */
773 : : static inline void *
774 : 281726958 : AllocSetAllocChunkFromBlock(MemoryContext context, AllocBlock block,
775 : : Size size, Size chunk_size, int fidx)
776 : : {
777 : : MemoryChunk *chunk;
778 : :
779 : 281726958 : chunk = (MemoryChunk *) (block->freeptr);
780 : :
781 : : /* Prepare to initialize the chunk header. */
782 : : VALGRIND_MAKE_MEM_UNDEFINED(chunk, ALLOC_CHUNKHDRSZ);
783 : :
784 : 281726958 : block->freeptr += (chunk_size + ALLOC_CHUNKHDRSZ);
785 [ - + ]: 281726958 : Assert(block->freeptr <= block->endptr);
786 : :
787 : : /* store the free list index in the value field */
788 : 281726958 : MemoryChunkSetHdrMask(chunk, block, fidx, MCTX_ASET_ID);
789 : :
790 : : #ifdef MEMORY_CONTEXT_CHECKING
791 : 281726957 : chunk->requested_size = size;
792 : : /* set mark to catch clobber of "unused" space */
793 [ + + ]: 281726957 : if (size < chunk_size)
585 794 : 189744874 : set_sentinel(MemoryChunkGetPointer(chunk), size);
795 : : #endif
796 : : #ifdef RANDOMIZE_ALLOCATED_MEMORY
797 : : /* fill the allocated space with junk */
798 : : randomize_mem((char *) MemoryChunkGetPointer(chunk), size);
799 : : #endif
800 : :
801 : : /* Ensure any padding bytes are marked NOACCESS. */
802 : : VALGRIND_MAKE_MEM_NOACCESS((char *) MemoryChunkGetPointer(chunk) + size,
803 : : chunk_size - size);
804 : :
805 : : /* Disallow access to the chunk header. */
806 : : VALGRIND_MAKE_MEM_NOACCESS(chunk, ALLOC_CHUNKHDRSZ);
807 : :
46 808 : 281726957 : return MemoryChunkGetPointer(chunk);
809 : : }
810 : :
811 : : /*
812 : : * Helper for AllocSetAlloc() that allocates a new block and returns a chunk
813 : : * allocated from it.
814 : : *
815 : : * AllocSetAlloc()'s comment explains why this is separate.
816 : : */
817 : : pg_noinline
818 : : static void *
819 : 2969475 : AllocSetAllocFromNewBlock(MemoryContext context, Size size, int flags,
820 : : int fidx)
821 : : {
822 : 2969475 : AllocSet set = (AllocSet) context;
823 : : AllocBlock block;
824 : : Size availspace;
825 : : Size blksize;
826 : : Size required_size;
827 : : Size chunk_size;
828 : :
829 : : /* due to the keeper block set->blocks should always be valid */
830 [ - + ]: 2969475 : Assert(set->blocks != NULL);
831 : 2969475 : block = set->blocks;
832 : 2969475 : availspace = block->endptr - block->freeptr;
833 : :
834 : : /*
835 : : * The existing active (top) block does not have enough room for the
836 : : * requested allocation, but it might still have a useful amount of space
837 : : * in it. Once we push it down in the block list, we'll never try to
838 : : * allocate more space from it. So, before we do that, carve up its free
839 : : * space into chunks that we can put on the set's freelists.
840 : : *
841 : : * Because we can only get here when there's less than ALLOC_CHUNK_LIMIT
842 : : * left in the block, this loop cannot iterate more than
843 : : * ALLOCSET_NUM_FREELISTS-1 times.
844 : : */
845 [ + + ]: 8836603 : while (availspace >= ((1 << ALLOC_MINBITS) + ALLOC_CHUNKHDRSZ))
846 : : {
847 : : AllocFreeListLink *link;
848 : : MemoryChunk *chunk;
849 : 5867128 : Size availchunk = availspace - ALLOC_CHUNKHDRSZ;
850 : 5867128 : int a_fidx = AllocSetFreeIndex(availchunk);
851 : :
852 : : /*
853 : : * In most cases, we'll get back the index of the next larger freelist
854 : : * than the one we need to put this chunk on. The exception is when
855 : : * availchunk is exactly a power of 2.
856 : : */
857 [ + + ]: 5867128 : if (availchunk != GetChunkSizeFromFreeListIdx(a_fidx))
858 : : {
859 : 4393205 : a_fidx--;
860 [ - + ]: 4393205 : Assert(a_fidx >= 0);
861 : 4393205 : availchunk = GetChunkSizeFromFreeListIdx(a_fidx);
862 : : }
863 : :
864 : 5867128 : chunk = (MemoryChunk *) (block->freeptr);
865 : :
866 : : /* Prepare to initialize the chunk header. */
867 : : VALGRIND_MAKE_MEM_UNDEFINED(chunk, ALLOC_CHUNKHDRSZ);
868 : 5867128 : block->freeptr += (availchunk + ALLOC_CHUNKHDRSZ);
869 : 5867128 : availspace -= (availchunk + ALLOC_CHUNKHDRSZ);
870 : :
871 : : /* store the freelist index in the value field */
872 : 5867128 : MemoryChunkSetHdrMask(chunk, block, a_fidx, MCTX_ASET_ID);
873 : : #ifdef MEMORY_CONTEXT_CHECKING
874 : 5867128 : chunk->requested_size = InvalidAllocSize; /* mark it free */
875 : : #endif
876 : : /* push this chunk onto the free list */
877 : 5867128 : link = GetFreeListLink(chunk);
878 : :
879 : : VALGRIND_MAKE_MEM_DEFINED(link, sizeof(AllocFreeListLink));
880 : 5867128 : link->next = set->freelist[a_fidx];
881 : : VALGRIND_MAKE_MEM_NOACCESS(link, sizeof(AllocFreeListLink));
882 : :
883 : 5867128 : set->freelist[a_fidx] = chunk;
884 : : }
885 : :
886 : : /*
887 : : * The first such block has size initBlockSize, and we double the space in
888 : : * each succeeding block, but not more than maxBlockSize.
889 : : */
890 : 2969475 : blksize = set->nextBlockSize;
891 : 2969475 : set->nextBlockSize <<= 1;
892 [ + + ]: 2969475 : if (set->nextBlockSize > set->maxBlockSize)
893 : 271172 : set->nextBlockSize = set->maxBlockSize;
894 : :
895 : : /* Choose the actual chunk size to allocate */
896 : 2969475 : chunk_size = GetChunkSizeFromFreeListIdx(fidx);
897 [ - + ]: 2969475 : Assert(chunk_size >= size);
898 : :
899 : : /*
900 : : * If initBlockSize is less than ALLOC_CHUNK_LIMIT, we could need more
901 : : * space... but try to keep it a power of 2.
902 : : */
903 : 2969475 : required_size = chunk_size + ALLOC_BLOCKHDRSZ + ALLOC_CHUNKHDRSZ;
904 [ + + ]: 3405112 : while (blksize < required_size)
905 : 435637 : blksize <<= 1;
906 : :
907 : : /* Try to allocate it */
908 : 2969475 : block = (AllocBlock) malloc(blksize);
909 : :
910 : : /*
911 : : * We could be asking for pretty big blocks here, so cope if malloc fails.
912 : : * But give up if there's less than 1 MB or so available...
913 : : */
914 [ - + - - ]: 2969475 : while (block == NULL && blksize > 1024 * 1024)
915 : : {
46 drowley@postgresql.o 916 :UNC 0 : blksize >>= 1;
917 [ # # ]: 0 : if (blksize < required_size)
918 : 0 : break;
919 : 0 : block = (AllocBlock) malloc(blksize);
920 : : }
921 : :
46 drowley@postgresql.o 922 [ - + ]:GNC 2969475 : if (block == NULL)
46 drowley@postgresql.o 923 :UNC 0 : return MemoryContextAllocationFailure(context, size, flags);
924 : :
46 drowley@postgresql.o 925 :GNC 2969475 : context->mem_allocated += blksize;
926 : :
927 : 2969475 : block->aset = set;
928 : 2969475 : block->freeptr = ((char *) block) + ALLOC_BLOCKHDRSZ;
929 : 2969475 : block->endptr = ((char *) block) + blksize;
930 : :
931 : : /* Mark unallocated space NOACCESS. */
932 : : VALGRIND_MAKE_MEM_NOACCESS(block->freeptr,
933 : : blksize - ALLOC_BLOCKHDRSZ);
934 : :
935 : 2969475 : block->prev = NULL;
936 : 2969475 : block->next = set->blocks;
937 [ + - ]: 2969475 : if (block->next)
938 : 2969475 : block->next->prev = block;
939 : 2969475 : set->blocks = block;
940 : :
941 : 2969475 : return AllocSetAllocChunkFromBlock(context, block, size, chunk_size, fidx);
942 : : }
943 : :
944 : : /*
945 : : * AllocSetAlloc
946 : : * Returns a pointer to allocated memory of given size or raises an ERROR
947 : : * on allocation failure, or returns NULL when flags contains
948 : : * MCXT_ALLOC_NO_OOM.
949 : : *
950 : : * No request may exceed:
951 : : * MAXALIGN_DOWN(SIZE_MAX) - ALLOC_BLOCKHDRSZ - ALLOC_CHUNKHDRSZ
952 : : * All callers use a much-lower limit.
953 : : *
954 : : * Note: when using valgrind, it doesn't matter how the returned allocation
955 : : * is marked, as mcxt.c will set it to UNDEFINED. In some paths we will
956 : : * return space that is marked NOACCESS - AllocSetRealloc has to beware!
957 : : *
958 : : * This function should only contain the most common code paths. Everything
959 : : * else should be in pg_noinline helper functions, thus avoiding the overhead
960 : : * of creating a stack frame for the common cases. Allocating memory is often
961 : : * a bottleneck in many workloads, so avoiding stack frame setup is
962 : : * worthwhile. Helper functions should always directly return the newly
963 : : * allocated memory so that we can just return that address directly as a tail
964 : : * call.
965 : : */
966 : : void *
967 : 456804665 : AllocSetAlloc(MemoryContext context, Size size, int flags)
968 : : {
46 drowley@postgresql.o 969 :CBC 456804665 : AllocSet set = (AllocSet) context;
970 : : AllocBlock block;
971 : : MemoryChunk *chunk;
972 : : int fidx;
973 : : Size chunk_size;
974 : : Size availspace;
975 : :
976 [ + - - + ]: 456804665 : Assert(AllocSetIsValid(set));
977 : :
978 : : /* due to the keeper block set->blocks should never be NULL */
46 drowley@postgresql.o 979 [ - + ]:GNC 456804665 : Assert(set->blocks != NULL);
980 : :
981 : : /*
982 : : * If requested size exceeds maximum for chunks we hand the the request
983 : : * off to AllocSetAllocLarge().
984 : : */
46 drowley@postgresql.o 985 [ + + ]:CBC 456804665 : if (size > set->allocChunkLimit)
46 drowley@postgresql.o 986 :GNC 7666648 : return AllocSetAllocLarge(context, size, flags);
987 : :
988 : : /*
989 : : * Request is small enough to be treated as a chunk. Look in the
990 : : * corresponding free list to see if there is a free chunk we could reuse.
991 : : * If one is found, remove it from the free list, make it again a member
992 : : * of the alloc set and return its data address.
993 : : *
994 : : * Note that we don't attempt to ensure there's space for the sentinel
995 : : * byte here. We expect a large proportion of allocations to be for sizes
996 : : * which are already a power of 2. If we were to always make space for a
997 : : * sentinel byte in MEMORY_CONTEXT_CHECKING builds, then we'd end up
998 : : * doubling the memory requirements for such allocations.
999 : : */
6194 tgl@sss.pgh.pa.us 1000 :CBC 449138017 : fidx = AllocSetFreeIndex(size);
1001 : 449138017 : chunk = set->freelist[fidx];
8535 1002 [ + + ]: 449138017 : if (chunk != NULL)
1003 : : {
594 drowley@postgresql.o 1004 : 167411059 : AllocFreeListLink *link = GetFreeListLink(chunk);
1005 : :
1006 : : /* Allow access to the chunk header. */
1007 : : VALGRIND_MAKE_MEM_DEFINED(chunk, ALLOC_CHUNKHDRSZ);
1008 : :
1009 [ - + ]: 167411059 : Assert(fidx == MemoryChunkGetValue(chunk));
1010 : :
1011 : : /* pop this chunk off the freelist */
1012 : : VALGRIND_MAKE_MEM_DEFINED(link, sizeof(AllocFreeListLink));
1013 : 167411059 : set->freelist[fidx] = link->next;
1014 : : VALGRIND_MAKE_MEM_NOACCESS(link, sizeof(AllocFreeListLink));
1015 : :
1016 : : #ifdef MEMORY_CONTEXT_CHECKING
8535 tgl@sss.pgh.pa.us 1017 : 167411059 : chunk->requested_size = size;
1018 : : /* set mark to catch clobber of "unused" space */
594 drowley@postgresql.o 1019 [ + + ]: 167411059 : if (size < GetChunkSizeFromFreeListIdx(fidx))
1020 : 99396091 : set_sentinel(MemoryChunkGetPointer(chunk), size);
1021 : : #endif
1022 : : #ifdef RANDOMIZE_ALLOCATED_MEMORY
1023 : : /* fill the allocated space with junk */
1024 : : randomize_mem((char *) MemoryChunkGetPointer(chunk), size);
1025 : : #endif
1026 : :
1027 : : /* Ensure any padding bytes are marked NOACCESS. */
1028 : : VALGRIND_MAKE_MEM_NOACCESS((char *) MemoryChunkGetPointer(chunk) + size,
1029 : : GetChunkSizeFromFreeListIdx(fidx) - size);
1030 : :
1031 : : /* Disallow access to the chunk header. */
1032 : : VALGRIND_MAKE_MEM_NOACCESS(chunk, ALLOC_CHUNKHDRSZ);
1033 : :
1034 : 167411059 : return MemoryChunkGetPointer(chunk);
1035 : : }
1036 : :
1037 : : /*
1038 : : * Choose the actual chunk size to allocate.
1039 : : */
1040 : 281726958 : chunk_size = GetChunkSizeFromFreeListIdx(fidx);
9094 tgl@sss.pgh.pa.us 1041 [ - + ]: 281726958 : Assert(chunk_size >= size);
1042 : :
46 drowley@postgresql.o 1043 :GNC 281726958 : block = set->blocks;
1044 : 281726958 : availspace = block->endptr - block->freeptr;
1045 : :
1046 : : /*
1047 : : * If there is enough room in the active allocation block, we will put the
1048 : : * chunk into that block. Else must start a new one.
1049 : : */
1050 [ + + ]: 281726958 : if (unlikely(availspace < (chunk_size + ALLOC_CHUNKHDRSZ)))
1051 : 2969475 : return AllocSetAllocFromNewBlock(context, size, flags, fidx);
1052 : :
1053 : : /* There's enough space on the current block, so allocate from that */
1054 : 278757483 : return AllocSetAllocChunkFromBlock(context, block, size, chunk_size, fidx);
1055 : : }
1056 : :
1057 : : /*
1058 : : * AllocSetFree
1059 : : * Frees allocated memory; memory is removed from the set.
1060 : : */
1061 : : void
594 drowley@postgresql.o 1062 :CBC 201418256 : AllocSetFree(void *pointer)
1063 : : {
1064 : : AllocSet set;
1065 : 201418256 : MemoryChunk *chunk = PointerGetMemoryChunk(pointer);
1066 : :
1067 : : /* Allow access to the chunk header. */
1068 : : VALGRIND_MAKE_MEM_DEFINED(chunk, ALLOC_CHUNKHDRSZ);
1069 : :
1070 [ + + ]: 201418256 : if (MemoryChunkIsExternal(chunk))
1071 : : {
1072 : : /* Release single-chunk block. */
1073 : 7093552 : AllocBlock block = ExternalChunkGetBlock(chunk);
1074 : :
1075 : : /*
1076 : : * Try to verify that we have a sane block pointer: the block header
1077 : : * should reference an aset and the freeptr should match the endptr.
1078 : : */
552 tgl@sss.pgh.pa.us 1079 [ + - + - : 7093552 : if (!AllocBlockIsValid(block) || block->freeptr != block->endptr)
+ - - + ]
552 tgl@sss.pgh.pa.us 1080 [ # # ]:UBC 0 : elog(ERROR, "could not find block containing chunk %p", chunk);
1081 : :
594 drowley@postgresql.o 1082 :CBC 7093552 : set = block->aset;
1083 : :
1084 : : #ifdef MEMORY_CONTEXT_CHECKING
1085 : : {
1086 : : /* Test for someone scribbling on unused space in chunk */
465 1087 [ - + ]: 7093552 : Assert(chunk->requested_size < (block->endptr - (char *) pointer));
585 1088 [ - + ]: 7093552 : if (!sentinel_ok(pointer, chunk->requested_size))
585 drowley@postgresql.o 1089 [ # # ]:UBC 0 : elog(WARNING, "detected write past chunk end in %s %p",
1090 : : set->header.name, chunk);
1091 : : }
1092 : : #endif
1093 : :
1094 : : /* OK, remove block from aset's list and free it */
2594 tgl@sss.pgh.pa.us 1095 [ + - ]:CBC 7093552 : if (block->prev)
1096 : 7093552 : block->prev->next = block->next;
1097 : : else
2594 tgl@sss.pgh.pa.us 1098 :UBC 0 : set->blocks = block->next;
2594 tgl@sss.pgh.pa.us 1099 [ + + ]:CBC 7093552 : if (block->next)
1100 : 4928801 : block->next->prev = block->prev;
1101 : :
594 drowley@postgresql.o 1102 : 7093552 : set->header.mem_allocated -= block->endptr - ((char *) block);
1103 : :
1104 : : #ifdef CLOBBER_FREED_MEMORY
3945 noah@leadboat.com 1105 : 7093552 : wipe_mem(block, block->freeptr - ((char *) block));
1106 : : #endif
9000 tgl@sss.pgh.pa.us 1107 : 7093552 : free(block);
1108 : : }
1109 : : else
1110 : : {
594 drowley@postgresql.o 1111 : 194324704 : AllocBlock block = MemoryChunkGetBlock(chunk);
1112 : : int fidx;
1113 : : AllocFreeListLink *link;
1114 : :
1115 : : /*
1116 : : * In this path, for speed reasons we just Assert that the referenced
1117 : : * block is good. We can also Assert that the value field is sane.
1118 : : * Future field experience may show that these Asserts had better
1119 : : * become regular runtime test-and-elog checks.
1120 : : */
534 peter@eisentraut.org 1121 [ + - + - : 194324704 : Assert(AllocBlockIsValid(block));
- + ]
594 drowley@postgresql.o 1122 : 194324704 : set = block->aset;
1123 : :
552 tgl@sss.pgh.pa.us 1124 : 194324704 : fidx = MemoryChunkGetValue(chunk);
1125 [ + - - + ]: 194324704 : Assert(FreeListIdxIsValid(fidx));
1126 : 194324704 : link = GetFreeListLink(chunk);
1127 : :
1128 : : #ifdef MEMORY_CONTEXT_CHECKING
1129 : : /* Test for someone scribbling on unused space in chunk */
594 drowley@postgresql.o 1130 [ + + ]: 194324704 : if (chunk->requested_size < GetChunkSizeFromFreeListIdx(fidx))
1131 [ - + ]: 120538321 : if (!sentinel_ok(pointer, chunk->requested_size))
594 drowley@postgresql.o 1132 [ # # ]:UBC 0 : elog(WARNING, "detected write past chunk end in %s %p",
1133 : : set->header.name, chunk);
1134 : : #endif
1135 : :
1136 : : #ifdef CLOBBER_FREED_MEMORY
594 drowley@postgresql.o 1137 :CBC 194324704 : wipe_mem(pointer, GetChunkSizeFromFreeListIdx(fidx));
1138 : : #endif
1139 : : /* push this chunk onto the top of the free list */
1140 : : VALGRIND_MAKE_MEM_DEFINED(link, sizeof(AllocFreeListLink));
1141 : 194324704 : link->next = set->freelist[fidx];
1142 : : VALGRIND_MAKE_MEM_NOACCESS(link, sizeof(AllocFreeListLink));
1143 : 194324704 : set->freelist[fidx] = chunk;
1144 : :
1145 : : #ifdef MEMORY_CONTEXT_CHECKING
1146 : :
1147 : : /*
1148 : : * Reset requested_size to InvalidAllocSize in chunks that are on free
1149 : : * list.
1150 : : */
1151 : 194324704 : chunk->requested_size = InvalidAllocSize;
1152 : : #endif
1153 : : }
10141 scrappy@hub.org 1154 : 201418256 : }
1155 : :
1156 : : /*
1157 : : * AllocSetRealloc
1158 : : * Returns new pointer to allocated memory of given size or NULL if
1159 : : * request could not be completed; this memory is added to the set.
1160 : : * Memory associated with given pointer is copied into the new memory,
1161 : : * and the old memory is freed.
1162 : : *
1163 : : * Without MEMORY_CONTEXT_CHECKING, we don't know the old request size. This
1164 : : * makes our Valgrind client requests less-precise, hazarding false negatives.
1165 : : * (In principle, we could use VALGRIND_GET_VBITS() to rediscover the old
1166 : : * request size.)
1167 : : */
1168 : : void *
47 drowley@postgresql.o 1169 :GNC 2415445 : AllocSetRealloc(void *pointer, Size size, int flags)
1170 : : {
1171 : : AllocBlock block;
1172 : : AllocSet set;
594 drowley@postgresql.o 1173 :CBC 2415445 : MemoryChunk *chunk = PointerGetMemoryChunk(pointer);
1174 : : Size oldchksize;
1175 : : int fidx;
1176 : :
1177 : : /* Allow access to the chunk header. */
1178 : : VALGRIND_MAKE_MEM_DEFINED(chunk, ALLOC_CHUNKHDRSZ);
1179 : :
1180 [ + + ]: 2415445 : if (MemoryChunkIsExternal(chunk))
1181 : : {
1182 : : /*
1183 : : * The chunk must have been allocated as a single-chunk block. Use
1184 : : * realloc() to make the containing block bigger, or smaller, with
1185 : : * minimum space wastage.
1186 : : */
1187 : : Size chksize;
1188 : : Size blksize;
1189 : : Size oldblksize;
1190 : :
1191 : 33057 : block = ExternalChunkGetBlock(chunk);
1192 : :
1193 : : /*
1194 : : * Try to verify that we have a sane block pointer: the block header
1195 : : * should reference an aset and the freeptr should match the endptr.
1196 : : */
552 tgl@sss.pgh.pa.us 1197 [ + - + - : 33057 : if (!AllocBlockIsValid(block) || block->freeptr != block->endptr)
+ - - + ]
552 tgl@sss.pgh.pa.us 1198 [ # # ]:UBC 0 : elog(ERROR, "could not find block containing chunk %p", chunk);
1199 : :
594 drowley@postgresql.o 1200 :CBC 33057 : set = block->aset;
1201 : :
1202 : : /* only check size in paths where the limits could be hit */
47 drowley@postgresql.o 1203 :GNC 33057 : MemoryContextCheckSize((MemoryContext) set, size, flags);
1204 : :
418 tgl@sss.pgh.pa.us 1205 :CBC 33057 : oldchksize = block->endptr - (char *) pointer;
1206 : :
1207 : : #ifdef MEMORY_CONTEXT_CHECKING
1208 : : /* Test for someone scribbling on unused space in chunk */
1209 [ - + ]: 33057 : Assert(chunk->requested_size < oldchksize);
585 drowley@postgresql.o 1210 [ - + ]: 33057 : if (!sentinel_ok(pointer, chunk->requested_size))
585 drowley@postgresql.o 1211 [ # # ]:UBC 0 : elog(WARNING, "detected write past chunk end in %s %p",
1212 : : set->header.name, chunk);
1213 : : #endif
1214 : :
1215 : : #ifdef MEMORY_CONTEXT_CHECKING
1216 : : /* ensure there's always space for the sentinel byte */
585 drowley@postgresql.o 1217 :CBC 33057 : chksize = MAXALIGN(size + 1);
1218 : : #else
1219 : : chksize = MAXALIGN(size);
1220 : : #endif
1221 : :
1222 : : /* Do the realloc */
8535 tgl@sss.pgh.pa.us 1223 : 33057 : blksize = chksize + ALLOC_BLOCKHDRSZ + ALLOC_CHUNKHDRSZ;
1431 1224 : 33057 : oldblksize = block->endptr - ((char *) block);
1225 : :
9000 1226 : 33057 : block = (AllocBlock) realloc(block, blksize);
1227 [ - + ]: 33057 : if (block == NULL)
1228 : : {
1229 : : /* Disallow access to the chunk header. */
1230 : : VALGRIND_MAKE_MEM_NOACCESS(chunk, ALLOC_CHUNKHDRSZ);
47 drowley@postgresql.o 1231 :UNC 0 : return MemoryContextAllocationFailure(&set->header, size, flags);
1232 : : }
1233 : :
1234 : : /* updated separately, not to underflow when (oldblksize > blksize) */
594 drowley@postgresql.o 1235 :CBC 33057 : set->header.mem_allocated -= oldblksize;
1236 : 33057 : set->header.mem_allocated += blksize;
1237 : :
9000 tgl@sss.pgh.pa.us 1238 : 33057 : block->freeptr = block->endptr = ((char *) block) + blksize;
1239 : :
1240 : : /* Update pointers since block has likely been moved */
594 drowley@postgresql.o 1241 : 33057 : chunk = (MemoryChunk *) (((char *) block) + ALLOC_BLOCKHDRSZ);
1242 : 33057 : pointer = MemoryChunkGetPointer(chunk);
2594 tgl@sss.pgh.pa.us 1243 [ + - ]: 33057 : if (block->prev)
1244 : 33057 : block->prev->next = block;
1245 : : else
2594 tgl@sss.pgh.pa.us 1246 :UBC 0 : set->blocks = block;
2594 tgl@sss.pgh.pa.us 1247 [ + + ]:CBC 33057 : if (block->next)
1248 : 21636 : block->next->prev = block;
1249 : :
1250 : : #ifdef MEMORY_CONTEXT_CHECKING
1251 : : #ifdef RANDOMIZE_ALLOCATED_MEMORY
1252 : :
1253 : : /*
1254 : : * We can only randomize the extra space if we know the prior request.
1255 : : * When using Valgrind, randomize_mem() also marks memory UNDEFINED.
1256 : : */
1257 : : if (size > chunk->requested_size)
1258 : : randomize_mem((char *) pointer + chunk->requested_size,
1259 : : size - chunk->requested_size);
1260 : : #else
1261 : :
1262 : : /*
1263 : : * If this is an increase, realloc() will have marked any
1264 : : * newly-allocated part (from oldchksize to chksize) UNDEFINED, but we
1265 : : * also need to adjust trailing bytes from the old allocation (from
1266 : : * chunk->requested_size to oldchksize) as they are marked NOACCESS.
1267 : : * Make sure not to mark too many bytes in case chunk->requested_size
1268 : : * < size < oldchksize.
1269 : : */
1270 : : #ifdef USE_VALGRIND
1271 : : if (Min(size, oldchksize) > chunk->requested_size)
1272 : : VALGRIND_MAKE_MEM_UNDEFINED((char *) pointer + chunk->requested_size,
1273 : : Min(size, oldchksize) - chunk->requested_size);
1274 : : #endif
1275 : : #endif
1276 : :
8535 1277 : 33057 : chunk->requested_size = size;
1278 : : /* set mark to catch clobber of "unused" space */
585 drowley@postgresql.o 1279 [ - + ]: 33057 : Assert(size < chksize);
1280 : 33057 : set_sentinel(pointer, size);
1281 : : #else /* !MEMORY_CONTEXT_CHECKING */
1282 : :
1283 : : /*
1284 : : * We may need to adjust marking of bytes from the old allocation as
1285 : : * some of them may be marked NOACCESS. We don't know how much of the
1286 : : * old chunk size was the requested size; it could have been as small
1287 : : * as one byte. We have to be conservative and just mark the entire
1288 : : * old portion DEFINED. Make sure not to mark memory beyond the new
1289 : : * allocation in case it's smaller than the old one.
1290 : : */
1291 : : VALGRIND_MAKE_MEM_DEFINED(pointer, Min(size, oldchksize));
1292 : : #endif
1293 : :
1294 : : /* Ensure any padding bytes are marked NOACCESS. */
1295 : : VALGRIND_MAKE_MEM_NOACCESS((char *) pointer + size, chksize - size);
1296 : :
1297 : : /* Disallow access to the chunk header . */
1298 : : VALGRIND_MAKE_MEM_NOACCESS(chunk, ALLOC_CHUNKHDRSZ);
1299 : :
2594 tgl@sss.pgh.pa.us 1300 : 33057 : return pointer;
1301 : : }
1302 : :
594 drowley@postgresql.o 1303 : 2382388 : block = MemoryChunkGetBlock(chunk);
1304 : :
1305 : : /*
1306 : : * In this path, for speed reasons we just Assert that the referenced
1307 : : * block is good. We can also Assert that the value field is sane. Future
1308 : : * field experience may show that these Asserts had better become regular
1309 : : * runtime test-and-elog checks.
1310 : : */
534 peter@eisentraut.org 1311 [ + - + - : 2382388 : Assert(AllocBlockIsValid(block));
- + ]
594 drowley@postgresql.o 1312 : 2382388 : set = block->aset;
1313 : :
552 tgl@sss.pgh.pa.us 1314 : 2382388 : fidx = MemoryChunkGetValue(chunk);
1315 [ + - - + ]: 2382388 : Assert(FreeListIdxIsValid(fidx));
418 1316 : 2382388 : oldchksize = GetChunkSizeFromFreeListIdx(fidx);
1317 : :
1318 : : #ifdef MEMORY_CONTEXT_CHECKING
1319 : : /* Test for someone scribbling on unused space in chunk */
1320 [ + + ]: 2382388 : if (chunk->requested_size < oldchksize)
594 drowley@postgresql.o 1321 [ - + ]: 1220745 : if (!sentinel_ok(pointer, chunk->requested_size))
594 drowley@postgresql.o 1322 [ # # ]:UBC 0 : elog(WARNING, "detected write past chunk end in %s %p",
1323 : : set->header.name, chunk);
1324 : : #endif
1325 : :
1326 : : /*
1327 : : * Chunk sizes are aligned to power of 2 in AllocSetAlloc(). Maybe the
1328 : : * allocated area already is >= the new size. (In particular, we will
1329 : : * fall out here if the requested size is a decrease.)
1330 : : */
418 tgl@sss.pgh.pa.us 1331 [ + + ]:CBC 2382388 : if (oldchksize >= size)
1332 : : {
1333 : : #ifdef MEMORY_CONTEXT_CHECKING
1655 1334 : 1260698 : Size oldrequest = chunk->requested_size;
1335 : :
1336 : : #ifdef RANDOMIZE_ALLOCATED_MEMORY
1337 : : /* We can only fill the extra space if we know the prior request */
1338 : : if (size > oldrequest)
1339 : : randomize_mem((char *) pointer + oldrequest,
1340 : : size - oldrequest);
1341 : : #endif
1342 : :
1343 : 1260698 : chunk->requested_size = size;
1344 : :
1345 : : /*
1346 : : * If this is an increase, mark any newly-available part UNDEFINED.
1347 : : * Otherwise, mark the obsolete part NOACCESS.
1348 : : */
1349 : : if (size > oldrequest)
1350 : : VALGRIND_MAKE_MEM_UNDEFINED((char *) pointer + oldrequest,
1351 : : size - oldrequest);
1352 : : else
1353 : : VALGRIND_MAKE_MEM_NOACCESS((char *) pointer + size,
1354 : : oldchksize - size);
1355 : :
1356 : : /* set mark to catch clobber of "unused" space */
418 1357 [ + + ]: 1260698 : if (size < oldchksize)
1655 1358 : 1246849 : set_sentinel(pointer, size);
1359 : : #else /* !MEMORY_CONTEXT_CHECKING */
1360 : :
1361 : : /*
1362 : : * We don't have the information to determine whether we're growing
1363 : : * the old request or shrinking it, so we conservatively mark the
1364 : : * entire new allocation DEFINED.
1365 : : */
1366 : : VALGRIND_MAKE_MEM_NOACCESS(pointer, oldchksize);
1367 : : VALGRIND_MAKE_MEM_DEFINED(pointer, size);
1368 : : #endif
1369 : :
1370 : : /* Disallow access to the chunk header. */
1371 : : VALGRIND_MAKE_MEM_NOACCESS(chunk, ALLOC_CHUNKHDRSZ);
1372 : :
1373 : 1260698 : return pointer;
1374 : : }
1375 : : else
1376 : : {
1377 : : /*
1378 : : * Enlarge-a-small-chunk case. We just do this by brute force, ie,
1379 : : * allocate a new chunk and copy the data. Since we know the existing
1380 : : * data isn't huge, this won't involve any great memcpy expense, so
1381 : : * it's not worth being smarter. (At one time we tried to avoid
1382 : : * memcpy when it was possible to enlarge the chunk in-place, but that
1383 : : * turns out to misbehave unpleasantly for repeated cycles of
1384 : : * palloc/repalloc/pfree: the eventually freed chunks go into the
1385 : : * wrong freelist for the next initial palloc request, and so we leak
1386 : : * memory indefinitely. See pgsql-hackers archives for 2007-08-11.)
1387 : : */
1388 : : AllocPointer newPointer;
1389 : : Size oldsize;
1390 : :
1391 : : /* allocate new chunk (this also checks size is valid) */
47 drowley@postgresql.o 1392 :GNC 1121690 : newPointer = AllocSetAlloc((MemoryContext) set, size, flags);
1393 : :
1394 : : /* leave immediately if request was not completed */
3363 rhaas@postgresql.org 1395 [ - + ]:CBC 1121690 : if (newPointer == NULL)
1396 : : {
1397 : : /* Disallow access to the chunk header. */
1398 : : VALGRIND_MAKE_MEM_NOACCESS(chunk, ALLOC_CHUNKHDRSZ);
47 drowley@postgresql.o 1399 :UNC 0 : return MemoryContextAllocationFailure((MemoryContext) set, size, flags);
1400 : : }
1401 : :
1402 : : /*
1403 : : * AllocSetAlloc() may have returned a region that is still NOACCESS.
1404 : : * Change it to UNDEFINED for the moment; memcpy() will then transfer
1405 : : * definedness from the old allocation to the new. If we know the old
1406 : : * allocation, copy just that much. Otherwise, make the entire old
1407 : : * chunk defined to avoid errors as we copy the currently-NOACCESS
1408 : : * trailing bytes.
1409 : : */
1410 : : VALGRIND_MAKE_MEM_UNDEFINED(newPointer, size);
1411 : : #ifdef MEMORY_CONTEXT_CHECKING
3945 noah@leadboat.com 1412 :CBC 1121690 : oldsize = chunk->requested_size;
1413 : : #else
1414 : : oldsize = oldchksize;
1415 : : VALGRIND_MAKE_MEM_DEFINED(pointer, oldsize);
1416 : : #endif
1417 : :
1418 : : /* transfer existing data (certain to fit) */
9000 tgl@sss.pgh.pa.us 1419 : 1121690 : memcpy(newPointer, pointer, oldsize);
1420 : :
1421 : : /* free old chunk */
594 drowley@postgresql.o 1422 : 1121690 : AllocSetFree(pointer);
1423 : :
9000 tgl@sss.pgh.pa.us 1424 : 1121690 : return newPointer;
1425 : : }
1426 : : }
1427 : :
1428 : : /*
1429 : : * AllocSetGetChunkContext
1430 : : * Return the MemoryContext that 'pointer' belongs to.
1431 : : */
1432 : : MemoryContext
594 drowley@postgresql.o 1433 : 3735088 : AllocSetGetChunkContext(void *pointer)
1434 : : {
1435 : 3735088 : MemoryChunk *chunk = PointerGetMemoryChunk(pointer);
1436 : : AllocBlock block;
1437 : : AllocSet set;
1438 : :
1439 : : /* Allow access to the chunk header. */
1440 : : VALGRIND_MAKE_MEM_DEFINED(chunk, ALLOC_CHUNKHDRSZ);
1441 : :
1442 [ + + ]: 3735088 : if (MemoryChunkIsExternal(chunk))
1443 : 33057 : block = ExternalChunkGetBlock(chunk);
1444 : : else
1445 : 3702031 : block = (AllocBlock) MemoryChunkGetBlock(chunk);
1446 : :
1447 : : /* Disallow access to the chunk header. */
1448 : : VALGRIND_MAKE_MEM_NOACCESS(chunk, ALLOC_CHUNKHDRSZ);
1449 : :
534 peter@eisentraut.org 1450 [ + - + - : 3735088 : Assert(AllocBlockIsValid(block));
- + ]
594 drowley@postgresql.o 1451 : 3735088 : set = block->aset;
1452 : :
1453 : 3735088 : return &set->header;
1454 : : }
1455 : :
1456 : : /*
1457 : : * AllocSetGetChunkSpace
1458 : : * Given a currently-allocated chunk, determine the total space
1459 : : * it occupies (including all memory-allocation overhead).
1460 : : */
1461 : : Size
1462 : 22066621 : AllocSetGetChunkSpace(void *pointer)
1463 : : {
1464 : 22066621 : MemoryChunk *chunk = PointerGetMemoryChunk(pointer);
1465 : : int fidx;
1466 : :
1467 : : /* Allow access to the chunk header. */
1468 : : VALGRIND_MAKE_MEM_DEFINED(chunk, ALLOC_CHUNKHDRSZ);
1469 : :
1470 [ + + ]: 22066621 : if (MemoryChunkIsExternal(chunk))
1471 : : {
1472 : 465684 : AllocBlock block = ExternalChunkGetBlock(chunk);
1473 : :
1474 : : /* Disallow access to the chunk header. */
1475 : : VALGRIND_MAKE_MEM_NOACCESS(chunk, ALLOC_CHUNKHDRSZ);
1476 : :
534 peter@eisentraut.org 1477 [ + - + - : 465684 : Assert(AllocBlockIsValid(block));
- + ]
1478 : :
594 drowley@postgresql.o 1479 : 465684 : return block->endptr - (char *) chunk;
1480 : : }
1481 : :
552 tgl@sss.pgh.pa.us 1482 : 21600937 : fidx = MemoryChunkGetValue(chunk);
1483 [ + - - + ]: 21600937 : Assert(FreeListIdxIsValid(fidx));
1484 : :
1485 : : /* Disallow access to the chunk header. */
1486 : : VALGRIND_MAKE_MEM_NOACCESS(chunk, ALLOC_CHUNKHDRSZ);
1487 : :
1488 : 21600937 : return GetChunkSizeFromFreeListIdx(fidx) + ALLOC_CHUNKHDRSZ;
1489 : : }
1490 : :
1491 : : /*
1492 : : * AllocSetIsEmpty
1493 : : * Is an allocset empty of any allocated space?
1494 : : */
1495 : : bool
7150 1496 : 5373 : AllocSetIsEmpty(MemoryContext context)
1497 : : {
534 peter@eisentraut.org 1498 [ + - - + ]: 5373 : Assert(AllocSetIsValid(context));
1499 : :
1500 : : /*
1501 : : * For now, we say "empty" only if the context is new or just reset. We
1502 : : * could examine the freelists to determine if all space has been freed,
1503 : : * but it's not really worth the trouble for present uses of this
1504 : : * functionality.
1505 : : */
4712 heikki.linnakangas@i 1506 [ + + ]: 5373 : if (context->isReset)
7150 tgl@sss.pgh.pa.us 1507 : 5365 : return true;
1508 : 8 : return false;
1509 : : }
1510 : :
1511 : : /*
1512 : : * AllocSetStats
1513 : : * Compute stats about memory consumption of an allocset.
1514 : : *
1515 : : * printfunc: if not NULL, pass a human-readable stats string to this.
1516 : : * passthru: pass this pointer through to printfunc.
1517 : : * totals: if not NULL, add stats about this context into *totals.
1518 : : * print_to_stderr: print stats to stderr if true, elog otherwise.
1519 : : */
1520 : : void
2210 1521 : 1527 : AllocSetStats(MemoryContext context,
1522 : : MemoryStatsPrintFunc printfunc, void *passthru,
1523 : : MemoryContextCounters *totals, bool print_to_stderr)
1524 : : {
8691 1525 : 1527 : AllocSet set = (AllocSet) context;
3734 1526 : 1527 : Size nblocks = 0;
3155 1527 : 1527 : Size freechunks = 0;
1528 : : Size totalspace;
3734 1529 : 1527 : Size freespace = 0;
1530 : : AllocBlock block;
1531 : : int fidx;
1532 : :
534 peter@eisentraut.org 1533 [ + - - + ]: 1527 : Assert(AllocSetIsValid(set));
1534 : :
1535 : : /* Include context header in totalspace */
2210 tgl@sss.pgh.pa.us 1536 : 1527 : totalspace = MAXALIGN(sizeof(AllocSetContext));
1537 : :
8729 1538 [ + + ]: 4288 : for (block = set->blocks; block != NULL; block = block->next)
1539 : : {
1540 : 2761 : nblocks++;
1541 : 2761 : totalspace += block->endptr - ((char *) block);
1542 : 2761 : freespace += block->endptr - block->freeptr;
1543 : : }
1544 [ + + ]: 18324 : for (fidx = 0; fidx < ALLOCSET_NUM_FREELISTS; fidx++)
1545 : : {
552 1546 : 16797 : Size chksz = GetChunkSizeFromFreeListIdx(fidx);
594 drowley@postgresql.o 1547 : 16797 : MemoryChunk *chunk = set->freelist[fidx];
1548 : :
1549 [ + + ]: 25323 : while (chunk != NULL)
1550 : : {
1551 : 8526 : AllocFreeListLink *link = GetFreeListLink(chunk);
1552 : :
1553 : : /* Allow access to the chunk header. */
1554 : : VALGRIND_MAKE_MEM_DEFINED(chunk, ALLOC_CHUNKHDRSZ);
552 tgl@sss.pgh.pa.us 1555 [ - + ]: 8526 : Assert(MemoryChunkGetValue(chunk) == fidx);
1556 : : VALGRIND_MAKE_MEM_NOACCESS(chunk, ALLOC_CHUNKHDRSZ);
1557 : :
3155 1558 : 8526 : freechunks++;
594 drowley@postgresql.o 1559 : 8526 : freespace += chksz + ALLOC_CHUNKHDRSZ;
1560 : :
1561 : : VALGRIND_MAKE_MEM_DEFINED(link, sizeof(AllocFreeListLink));
1562 : 8526 : chunk = link->next;
1563 : : VALGRIND_MAKE_MEM_NOACCESS(link, sizeof(AllocFreeListLink));
1564 : : }
1565 : : }
1566 : :
2210 tgl@sss.pgh.pa.us 1567 [ + + ]: 1527 : if (printfunc)
1568 : : {
1569 : : char stats_string[200];
1570 : :
1571 : 774 : snprintf(stats_string, sizeof(stats_string),
1572 : : "%zu total in %zu blocks; %zu free (%zu chunks); %zu used",
1573 : : totalspace, nblocks, freespace, freechunks,
1574 : : totalspace - freespace);
1104 fujii@postgresql.org 1575 : 774 : printfunc(context, passthru, stats_string, print_to_stderr);
1576 : : }
1577 : :
3155 tgl@sss.pgh.pa.us 1578 [ + - ]: 1527 : if (totals)
1579 : : {
1580 : 1527 : totals->nblocks += nblocks;
1581 : 1527 : totals->freechunks += freechunks;
1582 : 1527 : totals->totalspace += totalspace;
1583 : 1527 : totals->freespace += freespace;
1584 : : }
8729 1585 : 1527 : }
1586 : :
1587 : :
1588 : : #ifdef MEMORY_CONTEXT_CHECKING
1589 : :
1590 : : /*
1591 : : * AllocSetCheck
1592 : : * Walk through chunks and check consistency of memory.
1593 : : *
1594 : : * NOTE: report errors as WARNING, *not* ERROR or FATAL. Otherwise you'll
1595 : : * find yourself in an infinite loop when trouble occurs, because this
1596 : : * routine will be entered again when elog cleanup tries to release memory!
1597 : : */
1598 : : void
8678 bruce@momjian.us 1599 : 78504418 : AllocSetCheck(MemoryContext context)
1600 : : {
8424 1601 : 78504418 : AllocSet set = (AllocSet) context;
2314 tgl@sss.pgh.pa.us 1602 : 78504418 : const char *name = set->header.name;
1603 : : AllocBlock prevblock;
1604 : : AllocBlock block;
1654 tomas.vondra@postgre 1605 : 78504418 : Size total_allocated = 0;
1606 : :
2594 tgl@sss.pgh.pa.us 1607 : 78504418 : for (prevblock = NULL, block = set->blocks;
1608 [ + + ]: 212767003 : block != NULL;
1609 : 134262585 : prevblock = block, block = block->next)
1610 : : {
8424 bruce@momjian.us 1611 : 134262585 : char *bpoz = ((char *) block) + ALLOC_BLOCKHDRSZ;
1612 : 134262585 : long blk_used = block->freeptr - bpoz;
1613 : 134262585 : long blk_data = 0;
1614 : 134262585 : long nchunks = 0;
594 drowley@postgresql.o 1615 : 134262585 : bool has_external_chunk = false;
1616 : :
272 drowley@postgresql.o 1617 [ + + ]:GNC 134262585 : if (IsKeeperBlock(set, block))
1657 tomas.vondra@postgre 1618 :CBC 78504418 : total_allocated += block->endptr - ((char *) set);
1619 : : else
1620 : 55758167 : total_allocated += block->endptr - ((char *) block);
1621 : :
1622 : : /*
1623 : : * Empty block - empty can be keeper-block only
1624 : : */
8678 bruce@momjian.us 1625 [ + + ]: 134262585 : if (!blk_used)
1626 : : {
272 drowley@postgresql.o 1627 [ - + ]:GNC 2263105 : if (!IsKeeperBlock(set, block))
7569 tgl@sss.pgh.pa.us 1628 [ # # ]:UBC 0 : elog(WARNING, "problem in alloc set %s: empty block %p",
1629 : : name, block);
1630 : : }
1631 : :
1632 : : /*
1633 : : * Check block header fields
1634 : : */
2594 tgl@sss.pgh.pa.us 1635 [ + - ]:CBC 134262585 : if (block->aset != set ||
1636 [ + - ]: 134262585 : block->prev != prevblock ||
1637 [ + - ]: 134262585 : block->freeptr < bpoz ||
1638 [ - + ]: 134262585 : block->freeptr > block->endptr)
2594 tgl@sss.pgh.pa.us 1639 [ # # ]:UBC 0 : elog(WARNING, "problem in alloc set %s: corrupt header in block %p",
1640 : : name, block);
1641 : :
1642 : : /*
1643 : : * Chunk walker
1644 : : */
8535 tgl@sss.pgh.pa.us 1645 [ + + ]:CBC 2348016081 : while (bpoz < block->freeptr)
1646 : : {
594 drowley@postgresql.o 1647 : 2213753496 : MemoryChunk *chunk = (MemoryChunk *) bpoz;
1648 : : Size chsize,
1649 : : dsize;
1650 : :
1651 : : /* Allow access to the chunk header. */
1652 : : VALGRIND_MAKE_MEM_DEFINED(chunk, ALLOC_CHUNKHDRSZ);
1653 : :
1654 [ + + ]: 2213753496 : if (MemoryChunkIsExternal(chunk))
1655 : : {
1656 : 3411514 : chsize = block->endptr - (char *) MemoryChunkGetPointer(chunk); /* aligned chunk size */
1657 : 3411514 : has_external_chunk = true;
1658 : :
1659 : : /* make sure this chunk consumes the entire block */
1660 [ - + ]: 3411514 : if (chsize + ALLOC_CHUNKHDRSZ != blk_used)
594 drowley@postgresql.o 1661 [ # # ]:UBC 0 : elog(WARNING, "problem in alloc set %s: bad single-chunk %p in block %p",
1662 : : name, chunk, block);
1663 : : }
1664 : : else
1665 : : {
552 tgl@sss.pgh.pa.us 1666 :CBC 2210341982 : int fidx = MemoryChunkGetValue(chunk);
1667 : :
1668 [ + - - + ]: 2210341982 : if (!FreeListIdxIsValid(fidx))
552 tgl@sss.pgh.pa.us 1669 [ # # ]:UBC 0 : elog(WARNING, "problem in alloc set %s: bad chunk size for chunk %p in block %p",
1670 : : name, chunk, block);
1671 : :
552 tgl@sss.pgh.pa.us 1672 :CBC 2210341982 : chsize = GetChunkSizeFromFreeListIdx(fidx); /* aligned chunk size */
1673 : :
1674 : : /*
1675 : : * Check the stored block offset correctly references this
1676 : : * block.
1677 : : */
594 drowley@postgresql.o 1678 [ - + ]: 2210341982 : if (block != MemoryChunkGetBlock(chunk))
594 drowley@postgresql.o 1679 [ # # ]:UBC 0 : elog(WARNING, "problem in alloc set %s: bad block offset for chunk %p in block %p",
1680 : : name, chunk, block);
1681 : : }
2489 tgl@sss.pgh.pa.us 1682 :CBC 2213753496 : dsize = chunk->requested_size; /* real data */
1683 : :
1684 : : /* an allocated chunk's requested size must be <= the chsize */
594 drowley@postgresql.o 1685 [ + + - + ]: 2213753496 : if (dsize != InvalidAllocSize && dsize > chsize)
7569 tgl@sss.pgh.pa.us 1686 [ # # ]:UBC 0 : elog(WARNING, "problem in alloc set %s: req size > alloc size for chunk %p in block %p",
1687 : : name, chunk, block);
1688 : :
1689 : : /* chsize must not be smaller than the first freelist's size */
8678 bruce@momjian.us 1690 [ - + ]:CBC 2213753496 : if (chsize < (1 << ALLOC_MINBITS))
3734 tgl@sss.pgh.pa.us 1691 [ # # ]:UBC 0 : elog(WARNING, "problem in alloc set %s: bad size %zu for chunk %p in block %p",
1692 : : name, chsize, chunk, block);
1693 : :
1694 : : /*
1695 : : * Check for overwrite of padding space in an allocated chunk.
1696 : : */
594 drowley@postgresql.o 1697 [ + + + + ]:CBC 2213753496 : if (dsize != InvalidAllocSize && dsize < chsize &&
3945 noah@leadboat.com 1698 [ - + ]: 1473406671 : !sentinel_ok(chunk, ALLOC_CHUNKHDRSZ + dsize))
7569 tgl@sss.pgh.pa.us 1699 [ # # ]:UBC 0 : elog(WARNING, "problem in alloc set %s: detected write past chunk end in block %p, chunk %p",
1700 : : name, block, chunk);
1701 : :
1702 : : /* if chunk is allocated, disallow access to the chunk header */
1703 : : if (dsize != InvalidAllocSize)
1704 : : VALGRIND_MAKE_MEM_NOACCESS(chunk, ALLOC_CHUNKHDRSZ);
1705 : :
8678 bruce@momjian.us 1706 :CBC 2213753496 : blk_data += chsize;
1707 : 2213753496 : nchunks++;
1708 : :
8535 tgl@sss.pgh.pa.us 1709 : 2213753496 : bpoz += ALLOC_CHUNKHDRSZ + chsize;
1710 : : }
1711 : :
8678 bruce@momjian.us 1712 [ - + ]: 134262585 : if ((blk_data + (nchunks * ALLOC_CHUNKHDRSZ)) != blk_used)
7569 tgl@sss.pgh.pa.us 1713 [ # # ]:UBC 0 : elog(WARNING, "problem in alloc set %s: found inconsistent memory block %p",
1714 : : name, block);
1715 : :
594 drowley@postgresql.o 1716 [ + + - + ]:CBC 134262585 : if (has_external_chunk && nchunks > 1)
594 drowley@postgresql.o 1717 [ # # ]:UBC 0 : elog(WARNING, "problem in alloc set %s: external chunk on non-dedicated block %p",
1718 : : name, block);
1719 : : }
1720 : :
1487 jdavis@postgresql.or 1721 [ - + ]:CBC 78504418 : Assert(total_allocated == context->mem_allocated);
8678 bruce@momjian.us 1722 : 78504418 : }
1723 : :
1724 : : #endif /* MEMORY_CONTEXT_CHECKING */
|