Age Owner Branch data TLA Line data Source code
1 : : /*-------------------------------------------------------------------------
2 : : *
3 : : * checkpointer.c
4 : : *
5 : : * The checkpointer is new as of Postgres 9.2. It handles all checkpoints.
6 : : * Checkpoints are automatically dispatched after a certain amount of time has
7 : : * elapsed since the last one, and it can be signaled to perform requested
8 : : * checkpoints as well. (The GUC parameter that mandates a checkpoint every
9 : : * so many WAL segments is implemented by having backends signal when they
10 : : * fill WAL segments; the checkpointer itself doesn't watch for the
11 : : * condition.)
12 : : *
13 : : * Normal termination is by SIGUSR2, which instructs the checkpointer to
14 : : * execute a shutdown checkpoint and then exit(0). (All backends must be
15 : : * stopped before SIGUSR2 is issued!) Emergency termination is by SIGQUIT;
16 : : * like any backend, the checkpointer will simply abort and exit on SIGQUIT.
17 : : *
18 : : * If the checkpointer exits unexpectedly, the postmaster treats that the same
19 : : * as a backend crash: shared memory may be corrupted, so remaining backends
20 : : * should be killed by SIGQUIT and then a recovery cycle started. (Even if
21 : : * shared memory isn't corrupted, we have lost information about which
22 : : * files need to be fsync'd for the next checkpoint, and so a system
23 : : * restart needs to be forced.)
24 : : *
25 : : *
26 : : * Portions Copyright (c) 1996-2024, PostgreSQL Global Development Group
27 : : *
28 : : *
29 : : * IDENTIFICATION
30 : : * src/backend/postmaster/checkpointer.c
31 : : *
32 : : *-------------------------------------------------------------------------
33 : : */
34 : : #include "postgres.h"
35 : :
36 : : #include <sys/time.h>
37 : : #include <time.h>
38 : :
39 : : #include "access/xlog.h"
40 : : #include "access/xlog_internal.h"
41 : : #include "access/xlogrecovery.h"
42 : : #include "libpq/pqsignal.h"
43 : : #include "miscadmin.h"
44 : : #include "pgstat.h"
45 : : #include "postmaster/auxprocess.h"
46 : : #include "postmaster/bgwriter.h"
47 : : #include "postmaster/interrupt.h"
48 : : #include "replication/syncrep.h"
49 : : #include "storage/bufmgr.h"
50 : : #include "storage/condition_variable.h"
51 : : #include "storage/fd.h"
52 : : #include "storage/ipc.h"
53 : : #include "storage/lwlock.h"
54 : : #include "storage/proc.h"
55 : : #include "storage/procsignal.h"
56 : : #include "storage/shmem.h"
57 : : #include "storage/smgr.h"
58 : : #include "storage/spin.h"
59 : : #include "utils/guc.h"
60 : : #include "utils/memutils.h"
61 : : #include "utils/resowner.h"
62 : :
63 : :
64 : : /*----------
65 : : * Shared memory area for communication between checkpointer and backends
66 : : *
67 : : * The ckpt counters allow backends to watch for completion of a checkpoint
68 : : * request they send. Here's how it works:
69 : : * * At start of a checkpoint, checkpointer reads (and clears) the request
70 : : * flags and increments ckpt_started, while holding ckpt_lck.
71 : : * * On completion of a checkpoint, checkpointer sets ckpt_done to
72 : : * equal ckpt_started.
73 : : * * On failure of a checkpoint, checkpointer increments ckpt_failed
74 : : * and sets ckpt_done to equal ckpt_started.
75 : : *
76 : : * The algorithm for backends is:
77 : : * 1. Record current values of ckpt_failed and ckpt_started, and
78 : : * set request flags, while holding ckpt_lck.
79 : : * 2. Send signal to request checkpoint.
80 : : * 3. Sleep until ckpt_started changes. Now you know a checkpoint has
81 : : * begun since you started this algorithm (although *not* that it was
82 : : * specifically initiated by your signal), and that it is using your flags.
83 : : * 4. Record new value of ckpt_started.
84 : : * 5. Sleep until ckpt_done >= saved value of ckpt_started. (Use modulo
85 : : * arithmetic here in case counters wrap around.) Now you know a
86 : : * checkpoint has started and completed, but not whether it was
87 : : * successful.
88 : : * 6. If ckpt_failed is different from the originally saved value,
89 : : * assume request failed; otherwise it was definitely successful.
90 : : *
91 : : * ckpt_flags holds the OR of the checkpoint request flags sent by all
92 : : * requesting backends since the last checkpoint start. The flags are
93 : : * chosen so that OR'ing is the correct way to combine multiple requests.
94 : : *
95 : : * The requests array holds fsync requests sent by backends and not yet
96 : : * absorbed by the checkpointer.
97 : : *
98 : : * Unlike the checkpoint fields, requests related fields are protected by
99 : : * CheckpointerCommLock.
100 : : *----------
101 : : */
102 : : typedef struct
103 : : {
104 : : SyncRequestType type; /* request type */
105 : : FileTag ftag; /* file identifier */
106 : : } CheckpointerRequest;
107 : :
108 : : typedef struct
109 : : {
110 : : pid_t checkpointer_pid; /* PID (0 if not started) */
111 : :
112 : : slock_t ckpt_lck; /* protects all the ckpt_* fields */
113 : :
114 : : int ckpt_started; /* advances when checkpoint starts */
115 : : int ckpt_done; /* advances when checkpoint done */
116 : : int ckpt_failed; /* advances when checkpoint fails */
117 : :
118 : : int ckpt_flags; /* checkpoint flags, as defined in xlog.h */
119 : :
120 : : ConditionVariable start_cv; /* signaled when ckpt_started advances */
121 : : ConditionVariable done_cv; /* signaled when ckpt_done advances */
122 : :
123 : : int num_requests; /* current # of requests */
124 : : int max_requests; /* allocated array size */
125 : : CheckpointerRequest requests[FLEXIBLE_ARRAY_MEMBER];
126 : : } CheckpointerShmemStruct;
127 : :
128 : : static CheckpointerShmemStruct *CheckpointerShmem;
129 : :
130 : : /* interval for calling AbsorbSyncRequests in CheckpointWriteDelay */
131 : : #define WRITES_PER_ABSORB 1000
132 : :
133 : : /*
134 : : * GUC parameters
135 : : */
136 : : int CheckPointTimeout = 300;
137 : : int CheckPointWarning = 30;
138 : : double CheckPointCompletionTarget = 0.9;
139 : :
140 : : /*
141 : : * Private state
142 : : */
143 : : static bool ckpt_active = false;
144 : :
145 : : /* these values are valid when ckpt_active is true: */
146 : : static pg_time_t ckpt_start_time;
147 : : static XLogRecPtr ckpt_start_recptr;
148 : : static double ckpt_cached_elapsed;
149 : :
150 : : static pg_time_t last_checkpoint_time;
151 : : static pg_time_t last_xlog_switch_time;
152 : :
153 : : /* Prototypes for private functions */
154 : :
155 : : static void HandleCheckpointerInterrupts(void);
156 : : static void CheckArchiveTimeout(void);
157 : : static bool IsCheckpointOnSchedule(double progress);
158 : : static bool ImmediateCheckpointRequested(void);
159 : : static bool CompactCheckpointerRequestQueue(void);
160 : : static void UpdateSharedMemoryConfig(void);
161 : :
162 : : /* Signal handlers */
163 : : static void ReqCheckpointHandler(SIGNAL_ARGS);
164 : :
165 : :
166 : : /*
167 : : * Main entry point for checkpointer process
168 : : *
169 : : * This is invoked from AuxiliaryProcessMain, which has already created the
170 : : * basic execution environment, but not enabled signals yet.
171 : : */
172 : : void
27 heikki.linnakangas@i 173 :GNC 736 : CheckpointerMain(char *startup_data, size_t startup_data_len)
174 : : {
175 : : sigjmp_buf local_sigjmp_buf;
176 : : MemoryContext checkpointer_context;
177 : :
178 [ - + ]: 736 : Assert(startup_data_len == 0);
179 : :
180 : 736 : MyBackendType = B_CHECKPOINTER;
181 : 736 : AuxiliaryProcessMainCommon();
182 : :
4358 simon@2ndQuadrant.co 183 :CBC 736 : CheckpointerShmem->checkpointer_pid = MyProcPid;
184 : :
185 : : /*
186 : : * Properly accept or ignore signals the postmaster might send us
187 : : *
188 : : * Note: we deliberately ignore SIGTERM, because during a standard Unix
189 : : * system shutdown cycle, init will SIGTERM all processes at once. We
190 : : * want to wait for the backends to exit, whereupon the postmaster will
191 : : * tell us it's okay to shut down (via SIGUSR2).
192 : : */
1580 rhaas@postgresql.org 193 : 736 : pqsignal(SIGHUP, SignalHandlerForConfigReload);
2489 tgl@sss.pgh.pa.us 194 : 736 : pqsignal(SIGINT, ReqCheckpointHandler); /* request checkpoint */
4326 bruce@momjian.us 195 : 736 : pqsignal(SIGTERM, SIG_IGN); /* ignore SIGTERM */
196 : : /* SIGQUIT handler was already set up by InitPostmasterChild */
4548 simon@2ndQuadrant.co 197 : 736 : pqsignal(SIGALRM, SIG_IGN);
198 : 736 : pqsignal(SIGPIPE, SIG_IGN);
1602 rhaas@postgresql.org 199 : 736 : pqsignal(SIGUSR1, procsignal_sigusr1_handler);
1580 200 : 736 : pqsignal(SIGUSR2, SignalHandlerForShutdownRequest);
201 : :
202 : : /*
203 : : * Reset some signals that are accepted by postmaster but not here
204 : : */
4548 simon@2ndQuadrant.co 205 : 736 : pqsignal(SIGCHLD, SIG_DFL);
206 : :
207 : : /*
208 : : * Initialize so that first time-driven event happens at the correct time.
209 : : */
210 : 736 : last_checkpoint_time = last_xlog_switch_time = (pg_time_t) time(NULL);
211 : :
212 : : /*
213 : : * Write out stats after shutdown. This needs to be called by exactly one
214 : : * process during a normal shutdown, and since checkpointer is shut down
215 : : * very late...
216 : : *
217 : : * Walsenders are shut down after the checkpointer, but currently don't
218 : : * report stats. If that changes, we need a more complicated solution.
219 : : */
739 andres@anarazel.de 220 : 736 : before_shmem_exit(pgstat_before_server_shutdown, 0);
221 : :
222 : : /*
223 : : * Create a memory context that we will do all our work in. We do this so
224 : : * that we can reset the context during error recovery and thereby avoid
225 : : * possible memory leaks. Formerly this code just ran in
226 : : * TopMemoryContext, but resetting that would be a really bad idea.
227 : : */
4548 simon@2ndQuadrant.co 228 : 736 : checkpointer_context = AllocSetContextCreate(TopMemoryContext,
229 : : "Checkpointer",
230 : : ALLOCSET_DEFAULT_SIZES);
231 : 736 : MemoryContextSwitchTo(checkpointer_context);
232 : :
233 : : /*
234 : : * If an exception is encountered, processing resumes here.
235 : : *
236 : : * You might wonder why this isn't coded as an infinite loop around a
237 : : * PG_TRY construct. The reason is that this is the bottom of the
238 : : * exception stack, and so with PG_TRY there would be no exception handler
239 : : * in force at all during the CATCH part. By leaving the outermost setjmp
240 : : * always active, we have at least some chance of recovering from an error
241 : : * during error recovery. (If we get into an infinite loop thereby, it
242 : : * will soon be stopped by overflow of elog.c's internal state stack.)
243 : : *
244 : : * Note that we use sigsetjmp(..., 1), so that the prevailing signal mask
245 : : * (to wit, BlockSig) will be restored when longjmp'ing to here. Thus,
246 : : * signals other than SIGQUIT will be blocked until we complete error
247 : : * recovery. It might seem that this policy makes the HOLD_INTERRUPTS()
248 : : * call redundant, but it is not since InterruptPending might be set
249 : : * already.
250 : : */
251 [ - + ]: 736 : if (sigsetjmp(local_sigjmp_buf, 1) != 0)
252 : : {
253 : : /* Since not using PG_TRY, must reset error stack by hand */
4548 simon@2ndQuadrant.co 254 :UBC 0 : error_context_stack = NULL;
255 : :
256 : : /* Prevent interrupts while cleaning up */
257 : 0 : HOLD_INTERRUPTS();
258 : :
259 : : /* Report the error to the server log */
260 : 0 : EmitErrorReport();
261 : :
262 : : /*
263 : : * These operations are really just a minimal subset of
264 : : * AbortTransaction(). We don't have very many resources to worry
265 : : * about in checkpointer, but we do have LWLocks, buffers, and temp
266 : : * files.
267 : : */
268 : 0 : LWLockReleaseAll();
2700 rhaas@postgresql.org 269 : 0 : ConditionVariableCancelSleep();
2957 270 : 0 : pgstat_report_wait_end();
4548 simon@2ndQuadrant.co 271 : 0 : UnlockBuffers();
2097 tgl@sss.pgh.pa.us 272 : 0 : ReleaseAuxProcessResources(false);
4548 simon@2ndQuadrant.co 273 : 0 : AtEOXact_Buffers(false);
4197 tgl@sss.pgh.pa.us 274 : 0 : AtEOXact_SMgr();
2178 275 : 0 : AtEOXact_Files(false);
4548 simon@2ndQuadrant.co 276 : 0 : AtEOXact_HashTables(false);
277 : :
278 : : /* Warn any waiting backends that the checkpoint failed. */
279 [ # # ]: 0 : if (ckpt_active)
280 : : {
3113 rhaas@postgresql.org 281 [ # # ]: 0 : SpinLockAcquire(&CheckpointerShmem->ckpt_lck);
282 : 0 : CheckpointerShmem->ckpt_failed++;
283 : 0 : CheckpointerShmem->ckpt_done = CheckpointerShmem->ckpt_started;
284 : 0 : SpinLockRelease(&CheckpointerShmem->ckpt_lck);
285 : :
1835 tmunro@postgresql.or 286 : 0 : ConditionVariableBroadcast(&CheckpointerShmem->done_cv);
287 : :
4548 simon@2ndQuadrant.co 288 : 0 : ckpt_active = false;
289 : : }
290 : :
291 : : /*
292 : : * Now return to normal top-level context and clear ErrorContext for
293 : : * next time.
294 : : */
295 : 0 : MemoryContextSwitchTo(checkpointer_context);
296 : 0 : FlushErrorState();
297 : :
298 : : /* Flush any leaked data in the top-level context */
151 nathan@postgresql.or 299 :UNC 0 : MemoryContextReset(checkpointer_context);
300 : :
301 : : /* Now we can allow interrupts again */
4548 simon@2ndQuadrant.co 302 [ # # ]:UBC 0 : RESUME_INTERRUPTS();
303 : :
304 : : /*
305 : : * Sleep at least 1 second after any error. A write error is likely
306 : : * to be repeated, and we don't want to be filling the error logs as
307 : : * fast as we can.
308 : : */
309 : 0 : pg_usleep(1000000L);
310 : : }
311 : :
312 : : /* We can now handle ereport(ERROR) */
4548 simon@2ndQuadrant.co 313 :CBC 736 : PG_exception_stack = &local_sigjmp_buf;
314 : :
315 : : /*
316 : : * Unblock signals (they were blocked when the postmaster forked us)
317 : : */
436 tmunro@postgresql.or 318 : 736 : sigprocmask(SIG_SETMASK, &UnBlockSig, NULL);
319 : :
320 : : /*
321 : : * Ensure all shared memory values are set correctly for the config. Doing
322 : : * this here ensures no race conditions from other concurrent updaters.
323 : : */
4463 simon@2ndQuadrant.co 324 : 736 : UpdateSharedMemoryConfig();
325 : :
326 : : /*
327 : : * Advertise our latch that backends can use to wake us up while we're
328 : : * sleeping.
329 : : */
4359 tgl@sss.pgh.pa.us 330 : 736 : ProcGlobal->checkpointerLatch = &MyProc->procLatch;
331 : :
332 : : /*
333 : : * Loop forever
334 : : */
335 : : for (;;)
4548 simon@2ndQuadrant.co 336 : 7931 : {
337 : 8667 : bool do_checkpoint = false;
338 : 8667 : int flags = 0;
339 : : pg_time_t now;
340 : : int elapsed_secs;
341 : : int cur_timeout;
111 akorotkov@postgresql 342 :GNC 8667 : bool chkpt_or_rstpt_requested = false;
343 : 8667 : bool chkpt_or_rstpt_timed = false;
344 : :
345 : : /* Clear any already-pending wakeups */
3378 andres@anarazel.de 346 :CBC 8667 : ResetLatch(MyLatch);
347 : :
348 : : /*
349 : : * Process any requests or signals received recently.
350 : : */
1837 tmunro@postgresql.or 351 : 8667 : AbsorbSyncRequests();
1580 rhaas@postgresql.org 352 : 8667 : HandleCheckpointerInterrupts();
353 : :
354 : : /*
355 : : * Detect a pending checkpoint request by checking whether the flags
356 : : * word in shared memory is nonzero. We shouldn't need to acquire the
357 : : * ckpt_lck for this.
358 : : */
1853 tgl@sss.pgh.pa.us 359 [ + + ]: 8250 : if (((volatile CheckpointerShmemStruct *) CheckpointerShmem)->ckpt_flags)
360 : : {
361 : 531 : do_checkpoint = true;
111 akorotkov@postgresql 362 :GNC 531 : chkpt_or_rstpt_requested = true;
363 : : }
364 : :
365 : : /*
366 : : * Force a checkpoint if too much time has elapsed since the last one.
367 : : * Note that we count a timed checkpoint in stats only when this
368 : : * occurs without an external request, but we set the CAUSE_TIME flag
369 : : * bit even if there is also an external request.
370 : : */
4548 simon@2ndQuadrant.co 371 :CBC 8250 : now = (pg_time_t) time(NULL);
372 : 8250 : elapsed_secs = now - last_checkpoint_time;
373 [ + + ]: 8250 : if (elapsed_secs >= CheckPointTimeout)
374 : : {
375 [ + - ]: 1 : if (!do_checkpoint)
111 akorotkov@postgresql 376 :GNC 1 : chkpt_or_rstpt_timed = true;
4548 simon@2ndQuadrant.co 377 :CBC 1 : do_checkpoint = true;
378 : 1 : flags |= CHECKPOINT_CAUSE_TIME;
379 : : }
380 : :
381 : : /*
382 : : * Do a checkpoint if requested.
383 : : */
384 [ + + ]: 8250 : if (do_checkpoint)
385 : : {
386 : 532 : bool ckpt_performed = false;
387 : : bool do_restartpoint;
388 : :
389 : : /* Check if we should perform a checkpoint or a restartpoint. */
390 : 532 : do_restartpoint = RecoveryInProgress();
391 : :
392 : : /*
393 : : * Atomically fetch the request flags to figure out what kind of a
394 : : * checkpoint we should perform, and increase the started-counter
395 : : * to acknowledge that we've started a new checkpoint.
396 : : */
3113 rhaas@postgresql.org 397 [ - + ]: 532 : SpinLockAcquire(&CheckpointerShmem->ckpt_lck);
398 : 532 : flags |= CheckpointerShmem->ckpt_flags;
399 : 532 : CheckpointerShmem->ckpt_flags = 0;
400 : 532 : CheckpointerShmem->ckpt_started++;
401 : 532 : SpinLockRelease(&CheckpointerShmem->ckpt_lck);
402 : :
1858 tmunro@postgresql.or 403 : 532 : ConditionVariableBroadcast(&CheckpointerShmem->start_cv);
404 : :
405 : : /*
406 : : * The end-of-recovery checkpoint is a real checkpoint that's
407 : : * performed while we're still in recovery.
408 : : */
4548 simon@2ndQuadrant.co 409 [ + + ]: 532 : if (flags & CHECKPOINT_END_OF_RECOVERY)
410 : 100 : do_restartpoint = false;
411 : :
111 akorotkov@postgresql 412 [ + + ]:GNC 532 : if (chkpt_or_rstpt_timed)
413 : : {
414 : 1 : chkpt_or_rstpt_timed = false;
415 [ + - ]: 1 : if (do_restartpoint)
416 : 1 : PendingCheckpointerStats.restartpoints_timed++;
417 : : else
111 akorotkov@postgresql 418 :UNC 0 : PendingCheckpointerStats.num_timed++;
419 : : }
420 : :
111 akorotkov@postgresql 421 [ + + ]:GNC 532 : if (chkpt_or_rstpt_requested)
422 : : {
423 : 531 : chkpt_or_rstpt_requested = false;
424 [ + + ]: 531 : if (do_restartpoint)
425 : 49 : PendingCheckpointerStats.restartpoints_requested++;
426 : : else
427 : 482 : PendingCheckpointerStats.num_requested++;
428 : : }
429 : :
430 : : /*
431 : : * We will warn if (a) too soon since last checkpoint (whatever
432 : : * caused it) and (b) somebody set the CHECKPOINT_CAUSE_XLOG flag
433 : : * since the last checkpoint start. Note in particular that this
434 : : * implementation will not generate warnings caused by
435 : : * CheckPointTimeout < CheckPointWarning.
436 : : */
4548 simon@2ndQuadrant.co 437 [ + + ]:CBC 532 : if (!do_restartpoint &&
438 [ + + ]: 482 : (flags & CHECKPOINT_CAUSE_XLOG) &&
439 [ + + ]: 28 : elapsed_secs < CheckPointWarning)
440 [ + - ]: 25 : ereport(LOG,
441 : : (errmsg_plural("checkpoints are occurring too frequently (%d second apart)",
442 : : "checkpoints are occurring too frequently (%d seconds apart)",
443 : : elapsed_secs,
444 : : elapsed_secs),
445 : : errhint("Consider increasing the configuration parameter max_wal_size.")));
446 : :
447 : : /*
448 : : * Initialize checkpointer-private variables used during
449 : : * checkpoint.
450 : : */
451 : 532 : ckpt_active = true;
3212 heikki.linnakangas@i 452 [ + + ]: 532 : if (do_restartpoint)
453 : 50 : ckpt_start_recptr = GetXLogReplayRecPtr(NULL);
454 : : else
4548 simon@2ndQuadrant.co 455 : 482 : ckpt_start_recptr = GetInsertRecPtr();
456 : 532 : ckpt_start_time = now;
457 : 532 : ckpt_cached_elapsed = 0;
458 : :
459 : : /*
460 : : * Do the checkpoint.
461 : : */
462 [ + + ]: 532 : if (!do_restartpoint)
463 : : {
464 : 482 : CreateCheckPoint(flags);
465 : 477 : ckpt_performed = true;
466 : : }
467 : : else
468 : 50 : ckpt_performed = CreateRestartPoint(flags);
469 : :
470 : : /*
471 : : * After any checkpoint, free all smgr objects. Otherwise we
472 : : * would never do so for dropped relations, as the checkpointer
473 : : * does not process shared invalidation messages or call
474 : : * AtEOXact_SMgr().
475 : : */
74 heikki.linnakangas@i 476 :GNC 527 : smgrdestroyall();
477 : :
478 : : /*
479 : : * Indicate checkpoint completion to any waiting backends.
480 : : */
3113 rhaas@postgresql.org 481 [ - + ]:CBC 527 : SpinLockAcquire(&CheckpointerShmem->ckpt_lck);
482 : 527 : CheckpointerShmem->ckpt_done = CheckpointerShmem->ckpt_started;
483 : 527 : SpinLockRelease(&CheckpointerShmem->ckpt_lck);
484 : :
1858 tmunro@postgresql.or 485 : 527 : ConditionVariableBroadcast(&CheckpointerShmem->done_cv);
486 : :
4548 simon@2ndQuadrant.co 487 [ + + ]: 527 : if (ckpt_performed)
488 : : {
489 : : /*
490 : : * Note we record the checkpoint start time not end time as
491 : : * last_checkpoint_time. This is so that time-driven
492 : : * checkpoints happen at a predictable spacing.
493 : : */
494 : 505 : last_checkpoint_time = now;
495 : :
111 akorotkov@postgresql 496 [ + + ]:GNC 505 : if (do_restartpoint)
497 : 28 : PendingCheckpointerStats.restartpoints_performed++;
498 : : }
499 : : else
500 : : {
501 : : /*
502 : : * We were not able to perform the restartpoint (checkpoints
503 : : * throw an ERROR in case of error). Most likely because we
504 : : * have not received any new checkpoint WAL records since the
505 : : * last restartpoint. Try again in 15 s.
506 : : */
4548 simon@2ndQuadrant.co 507 :CBC 22 : last_checkpoint_time = now - CheckPointTimeout + 15;
508 : : }
509 : :
510 : 527 : ckpt_active = false;
511 : :
512 : : /* We may have received an interrupt during the checkpoint. */
760 tmunro@postgresql.or 513 : 527 : HandleCheckpointerInterrupts();
514 : : }
515 : :
516 : : /* Check for archive_timeout and switch xlog files if necessary. */
4359 tgl@sss.pgh.pa.us 517 : 8236 : CheckArchiveTimeout();
518 : :
519 : : /* Report pending statistics to the cumulative stats system */
739 andres@anarazel.de 520 : 8236 : pgstat_report_checkpointer();
521 : 8236 : pgstat_report_wal(true);
522 : :
523 : : /*
524 : : * If any checkpoint flags have been set, redo the loop to handle the
525 : : * checkpoint without sleeping.
526 : : */
1446 alvherre@alvh.no-ip. 527 [ + + ]: 8236 : if (((volatile CheckpointerShmemStruct *) CheckpointerShmem)->ckpt_flags)
528 : 8 : continue;
529 : :
530 : : /*
531 : : * Sleep until we are signaled or it's time for another checkpoint or
532 : : * xlog file switch.
533 : : */
4359 tgl@sss.pgh.pa.us 534 : 8228 : now = (pg_time_t) time(NULL);
535 : 8228 : elapsed_secs = now - last_checkpoint_time;
536 [ - + ]: 8228 : if (elapsed_secs >= CheckPointTimeout)
4359 tgl@sss.pgh.pa.us 537 :UBC 0 : continue; /* no sleep for us ... */
4359 tgl@sss.pgh.pa.us 538 :CBC 8228 : cur_timeout = CheckPointTimeout - elapsed_secs;
539 [ - + - - ]: 8228 : if (XLogArchiveTimeout > 0 && !RecoveryInProgress())
540 : : {
4359 tgl@sss.pgh.pa.us 541 :UBC 0 : elapsed_secs = now - last_xlog_switch_time;
542 [ # # ]: 0 : if (elapsed_secs >= XLogArchiveTimeout)
543 : 0 : continue; /* no sleep for us ... */
544 : 0 : cur_timeout = Min(cur_timeout, XLogArchiveTimeout - elapsed_secs);
545 : : }
546 : :
1969 tmunro@postgresql.or 547 :CBC 8228 : (void) WaitLatch(MyLatch,
548 : : WL_LATCH_SET | WL_TIMEOUT | WL_EXIT_ON_PM_DEATH,
549 : : cur_timeout * 1000L /* convert to ms */ ,
550 : : WAIT_EVENT_CHECKPOINTER_MAIN);
551 : : }
552 : : }
553 : :
554 : : /*
555 : : * Process any new interrupts.
556 : : */
557 : : static void
1580 rhaas@postgresql.org 558 : 9194 : HandleCheckpointerInterrupts(void)
559 : : {
1578 560 [ + + ]: 9194 : if (ProcSignalBarrierPending)
561 : 110 : ProcessProcSignalBarrier();
562 : :
1580 563 [ + + ]: 9194 : if (ConfigReloadPending)
564 : : {
565 : 127 : ConfigReloadPending = false;
566 : 127 : ProcessConfigFile(PGC_SIGHUP);
567 : :
568 : : /*
569 : : * Checkpointer is the last process to shut down, so we ask it to hold
570 : : * the keys for a range of other tasks required most of which have
571 : : * nothing to do with checkpointing at all.
572 : : *
573 : : * For various reasons, some config values can change dynamically so
574 : : * the primary copy of them is held in shared memory to make sure all
575 : : * backends see the same value. We make Checkpointer responsible for
576 : : * updating the shared memory copy if the parameter setting changes
577 : : * because of SIGHUP.
578 : : */
579 : 127 : UpdateSharedMemoryConfig();
580 : : }
581 [ + + ]: 9194 : if (ShutdownRequestPending)
582 : : {
583 : : /*
584 : : * From here on, elog(ERROR) should end with exit(1), not send control
585 : : * back to the sigsetjmp block above
586 : : */
587 : 426 : ExitOnAnyError = true;
588 : :
589 : : /*
590 : : * Close down the database.
591 : : *
592 : : * Since ShutdownXLOG() creates restartpoint or checkpoint, and
593 : : * updates the statistics, increment the checkpoint request and flush
594 : : * out pending statistic.
595 : : */
167 michael@paquier.xyz 596 :GNC 426 : PendingCheckpointerStats.num_requested++;
1580 rhaas@postgresql.org 597 :CBC 426 : ShutdownXLOG(0, 0);
739 andres@anarazel.de 598 : 426 : pgstat_report_checkpointer();
599 : 426 : pgstat_report_wal(true);
600 : :
601 : : /* Normal exit from the checkpointer is here */
1431 tgl@sss.pgh.pa.us 602 : 426 : proc_exit(0); /* done */
603 : : }
604 : :
605 : : /* Perform logging of memory contexts of this process */
824 fujii@postgresql.org 606 [ + + ]: 8768 : if (LogMemoryContextPending)
607 : 2 : ProcessLogMemoryContextInterrupt();
1580 rhaas@postgresql.org 608 : 8768 : }
609 : :
610 : : /*
611 : : * CheckArchiveTimeout -- check for archive_timeout and switch xlog files
612 : : *
613 : : * This will switch to a new WAL file and force an archive file write if
614 : : * meaningful activity is recorded in the current WAL file. This includes most
615 : : * writes, including just a single checkpoint record, but excludes WAL records
616 : : * that were inserted with the XLOG_MARK_UNIMPORTANT flag being set (like
617 : : * snapshots of running transactions). Such records, depending on
618 : : * configuration, occur on regular intervals and don't contain important
619 : : * information. This avoids generating archives with a few unimportant
620 : : * records.
621 : : */
622 : : static void
4548 simon@2ndQuadrant.co 623 : 10352 : CheckArchiveTimeout(void)
624 : : {
625 : : pg_time_t now;
626 : : pg_time_t last_time;
627 : : XLogRecPtr last_switch_lsn;
628 : :
629 [ - + - - ]: 10352 : if (XLogArchiveTimeout <= 0 || RecoveryInProgress())
630 : 10352 : return;
631 : :
4548 simon@2ndQuadrant.co 632 :UBC 0 : now = (pg_time_t) time(NULL);
633 : :
634 : : /* First we do a quick check using possibly-stale local state. */
635 [ # # ]: 0 : if ((int) (now - last_xlog_switch_time) < XLogArchiveTimeout)
636 : 0 : return;
637 : :
638 : : /*
639 : : * Update local state ... note that last_xlog_switch_time is the last time
640 : : * a switch was performed *or requested*.
641 : : */
2670 andres@anarazel.de 642 : 0 : last_time = GetLastSegSwitchData(&last_switch_lsn);
643 : :
4548 simon@2ndQuadrant.co 644 : 0 : last_xlog_switch_time = Max(last_xlog_switch_time, last_time);
645 : :
646 : : /* Now we can do the real checks */
647 [ # # ]: 0 : if ((int) (now - last_xlog_switch_time) >= XLogArchiveTimeout)
648 : : {
649 : : /*
650 : : * Switch segment only when "important" WAL has been logged since the
651 : : * last segment switch (last_switch_lsn points to end of segment
652 : : * switch occurred in).
653 : : */
2670 andres@anarazel.de 654 [ # # ]: 0 : if (GetLastImportantRecPtr() > last_switch_lsn)
655 : : {
656 : : XLogRecPtr switchpoint;
657 : :
658 : : /* mark switch as unimportant, avoids triggering checkpoints */
659 : 0 : switchpoint = RequestXLogSwitch(true);
660 : :
661 : : /*
662 : : * If the returned pointer points exactly to a segment boundary,
663 : : * assume nothing happened.
664 : : */
2399 665 [ # # ]: 0 : if (XLogSegmentOffset(switchpoint, wal_segment_size) != 0)
2529 peter_e@gmx.net 666 [ # # ]: 0 : elog(DEBUG1, "write-ahead log switch forced (archive_timeout=%d)",
667 : : XLogArchiveTimeout);
668 : : }
669 : :
670 : : /*
671 : : * Update state in any case, so we don't retry constantly when the
672 : : * system is idle.
673 : : */
4548 simon@2ndQuadrant.co 674 : 0 : last_xlog_switch_time = now;
675 : : }
676 : : }
677 : :
678 : : /*
679 : : * Returns true if an immediate checkpoint request is pending. (Note that
680 : : * this does not check the *current* checkpoint's IMMEDIATE flag, but whether
681 : : * there is one pending behind it.)
682 : : */
683 : : static bool
4548 simon@2ndQuadrant.co 684 :CBC 20870 : ImmediateCheckpointRequested(void)
685 : : {
1853 tgl@sss.pgh.pa.us 686 : 20870 : volatile CheckpointerShmemStruct *cps = CheckpointerShmem;
687 : :
688 : : /*
689 : : * We don't need to acquire the ckpt_lck in this case because we're only
690 : : * looking at a single flag bit.
691 : : */
692 [ + + ]: 20870 : if (cps->ckpt_flags & CHECKPOINT_IMMEDIATE)
693 : 637 : return true;
4548 simon@2ndQuadrant.co 694 : 20233 : return false;
695 : : }
696 : :
697 : : /*
698 : : * CheckpointWriteDelay -- control rate of checkpoint
699 : : *
700 : : * This function is called after each page write performed by BufferSync().
701 : : * It is responsible for throttling BufferSync()'s write rate to hit
702 : : * checkpoint_completion_target.
703 : : *
704 : : * The checkpoint request flags should be passed in; currently the only one
705 : : * examined is CHECKPOINT_IMMEDIATE, which disables delays between writes.
706 : : *
707 : : * 'progress' is an estimate of how much of the work has been done, as a
708 : : * fraction between 0.0 meaning none, and 1.0 meaning all done.
709 : : */
710 : : void
711 : 241807 : CheckpointWriteDelay(int flags, double progress)
712 : : {
713 : : static int absorb_counter = WRITES_PER_ABSORB;
714 : :
715 : : /* Do nothing if checkpoint is being executed by non-checkpointer process */
4288 tgl@sss.pgh.pa.us 716 [ + + ]: 241807 : if (!AmCheckpointerProcess())
4548 simon@2ndQuadrant.co 717 : 38869 : return;
718 : :
719 : : /*
720 : : * Perform the usual duties and take a nap, unless we're behind schedule,
721 : : * in which case we just try to catch up as quickly as possible.
722 : : */
723 [ + + ]: 202938 : if (!(flags & CHECKPOINT_IMMEDIATE) &&
1580 rhaas@postgresql.org 724 [ + + ]: 23155 : !ShutdownRequestPending &&
4548 simon@2ndQuadrant.co 725 [ + + + + ]: 41103 : !ImmediateCheckpointRequested() &&
726 : 20233 : IsCheckpointOnSchedule(progress))
727 : : {
1580 rhaas@postgresql.org 728 [ - + ]: 2116 : if (ConfigReloadPending)
729 : : {
1580 rhaas@postgresql.org 730 :UBC 0 : ConfigReloadPending = false;
4548 simon@2ndQuadrant.co 731 : 0 : ProcessConfigFile(PGC_SIGHUP);
732 : : /* update shmem copies of config variables */
4458 733 : 0 : UpdateSharedMemoryConfig();
734 : : }
735 : :
1837 tmunro@postgresql.or 736 :CBC 2116 : AbsorbSyncRequests();
4548 simon@2ndQuadrant.co 737 : 2116 : absorb_counter = WRITES_PER_ABSORB;
738 : :
739 : 2116 : CheckArchiveTimeout();
740 : :
741 : : /* Report interim statistics to the cumulative stats system */
739 andres@anarazel.de 742 : 2116 : pgstat_report_checkpointer();
743 : :
744 : : /*
745 : : * This sleep used to be connected to bgwriter_delay, typically 200ms.
746 : : * That resulted in more frequent wakeups if not much work to do.
747 : : * Checkpointer and bgwriter are no longer related so take the Big
748 : : * Sleep.
749 : : */
760 tmunro@postgresql.or 750 : 2116 : WaitLatch(MyLatch, WL_LATCH_SET | WL_EXIT_ON_PM_DEATH | WL_TIMEOUT,
751 : : 100,
752 : : WAIT_EVENT_CHECKPOINT_WRITE_DELAY);
753 : 2111 : ResetLatch(MyLatch);
754 : : }
4548 simon@2ndQuadrant.co 755 [ + + ]: 200822 : else if (--absorb_counter <= 0)
756 : : {
757 : : /*
758 : : * Absorb pending fsync requests after each WRITES_PER_ABSORB write
759 : : * operations even when we don't sleep, to prevent overflow of the
760 : : * fsync request queue.
761 : : */
1837 tmunro@postgresql.or 762 : 94 : AbsorbSyncRequests();
4548 simon@2ndQuadrant.co 763 : 94 : absorb_counter = WRITES_PER_ABSORB;
764 : : }
765 : :
766 : : /* Check for barrier events. */
1578 rhaas@postgresql.org 767 [ - + ]: 202933 : if (ProcSignalBarrierPending)
1578 rhaas@postgresql.org 768 :UBC 0 : ProcessProcSignalBarrier();
769 : : }
770 : :
771 : : /*
772 : : * IsCheckpointOnSchedule -- are we on schedule to finish this checkpoint
773 : : * (or restartpoint) in time?
774 : : *
775 : : * Compares the current progress against the time/segments elapsed since last
776 : : * checkpoint, and returns true if the progress we've made this far is greater
777 : : * than the elapsed time/segments.
778 : : */
779 : : static bool
4548 simon@2ndQuadrant.co 780 :CBC 20233 : IsCheckpointOnSchedule(double progress)
781 : : {
782 : : XLogRecPtr recptr;
783 : : struct timeval now;
784 : : double elapsed_xlogs,
785 : : elapsed_time;
786 : :
787 [ - + ]: 20233 : Assert(ckpt_active);
788 : :
789 : : /* Scale progress according to checkpoint_completion_target. */
790 : 20233 : progress *= CheckPointCompletionTarget;
791 : :
792 : : /*
793 : : * Check against the cached value first. Only do the more expensive
794 : : * calculations once we reach the target previously calculated. Since
795 : : * neither time or WAL insert pointer moves backwards, a freshly
796 : : * calculated value can only be greater than or equal to the cached value.
797 : : */
798 [ + + ]: 20233 : if (progress < ckpt_cached_elapsed)
799 : 17299 : return false;
800 : :
801 : : /*
802 : : * Check progress against WAL segments written and CheckPointSegments.
803 : : *
804 : : * We compare the current WAL insert location against the location
805 : : * computed before calling CreateCheckPoint. The code in XLogInsert that
806 : : * actually triggers a checkpoint when CheckPointSegments is exceeded
807 : : * compares against RedoRecPtr, so this is not completely accurate.
808 : : * However, it's good enough for our purposes, we're only calculating an
809 : : * estimate anyway.
810 : : *
811 : : * During recovery, we compare last replayed WAL record's location with
812 : : * the location computed before calling CreateRestartPoint. That maintains
813 : : * the same pacing as we have during checkpoints in normal operation, but
814 : : * we might exceed max_wal_size by a fair amount. That's because there can
815 : : * be a large gap between a checkpoint's redo-pointer and the checkpoint
816 : : * record itself, and we only start the restartpoint after we've seen the
817 : : * checkpoint record. (The gap is typically up to CheckPointSegments *
818 : : * checkpoint_completion_target where checkpoint_completion_target is the
819 : : * value that was in effect when the WAL was generated).
820 : : */
3212 heikki.linnakangas@i 821 [ + + ]: 2934 : if (RecoveryInProgress())
822 : 622 : recptr = GetXLogReplayRecPtr(NULL);
823 : : else
4548 simon@2ndQuadrant.co 824 : 2312 : recptr = GetInsertRecPtr();
2399 andres@anarazel.de 825 : 2934 : elapsed_xlogs = (((double) (recptr - ckpt_start_recptr)) /
826 : 2934 : wal_segment_size) / CheckPointSegments;
827 : :
3212 heikki.linnakangas@i 828 [ + + ]: 2934 : if (progress < elapsed_xlogs)
829 : : {
830 : 816 : ckpt_cached_elapsed = elapsed_xlogs;
831 : 816 : return false;
832 : : }
833 : :
834 : : /*
835 : : * Check progress against time elapsed and checkpoint_timeout.
836 : : */
4548 simon@2ndQuadrant.co 837 : 2118 : gettimeofday(&now, NULL);
838 : 2118 : elapsed_time = ((double) ((pg_time_t) now.tv_sec - ckpt_start_time) +
839 : 2118 : now.tv_usec / 1000000.0) / CheckPointTimeout;
840 : :
841 [ + + ]: 2118 : if (progress < elapsed_time)
842 : : {
843 : 2 : ckpt_cached_elapsed = elapsed_time;
844 : 2 : return false;
845 : : }
846 : :
847 : : /* It looks like we're on schedule. */
848 : 2116 : return true;
849 : : }
850 : :
851 : :
852 : : /* --------------------------------
853 : : * signal handler routines
854 : : * --------------------------------
855 : : */
856 : :
857 : : /* SIGINT: set flag to run a normal checkpoint right away */
858 : : static void
859 : 541 : ReqCheckpointHandler(SIGNAL_ARGS)
860 : : {
861 : : /*
862 : : * The signaling process should have set ckpt_flags nonzero, so all we
863 : : * need do is ensure that our main loop gets kicked out of any wait.
864 : : */
3378 andres@anarazel.de 865 : 541 : SetLatch(MyLatch);
4359 tgl@sss.pgh.pa.us 866 : 541 : }
867 : :
868 : :
869 : : /* --------------------------------
870 : : * communication with backends
871 : : * --------------------------------
872 : : */
873 : :
874 : : /*
875 : : * CheckpointerShmemSize
876 : : * Compute space needed for checkpointer-related shared memory
877 : : */
878 : : Size
4358 simon@2ndQuadrant.co 879 : 2577 : CheckpointerShmemSize(void)
880 : : {
881 : : Size size;
882 : :
883 : : /*
884 : : * Currently, the size of the requests[] array is arbitrarily set equal to
885 : : * NBuffers. This may prove too large or small ...
886 : : */
887 : 2577 : size = offsetof(CheckpointerShmemStruct, requests);
888 : 2577 : size = add_size(size, mul_size(NBuffers, sizeof(CheckpointerRequest)));
889 : :
4548 890 : 2577 : return size;
891 : : }
892 : :
893 : : /*
894 : : * CheckpointerShmemInit
895 : : * Allocate and initialize checkpointer-related shared memory
896 : : */
897 : : void
4358 898 : 898 : CheckpointerShmemInit(void)
899 : : {
4289 tgl@sss.pgh.pa.us 900 : 898 : Size size = CheckpointerShmemSize();
901 : : bool found;
902 : :
4358 simon@2ndQuadrant.co 903 : 898 : CheckpointerShmem = (CheckpointerShmemStruct *)
4357 tgl@sss.pgh.pa.us 904 : 898 : ShmemInitStruct("Checkpointer Data",
905 : : size,
906 : : &found);
907 : :
4548 simon@2ndQuadrant.co 908 [ + - ]: 898 : if (!found)
909 : : {
910 : : /*
911 : : * First time through, so initialize. Note that we zero the whole
912 : : * requests array; this is so that CompactCheckpointerRequestQueue can
913 : : * assume that any pad bytes in the request structs are zeroes.
914 : : */
4289 tgl@sss.pgh.pa.us 915 [ + - + - : 1040 : MemSet(CheckpointerShmem, 0, size);
+ - + + +
+ ]
4358 simon@2ndQuadrant.co 916 : 898 : SpinLockInit(&CheckpointerShmem->ckpt_lck);
917 : 898 : CheckpointerShmem->max_requests = NBuffers;
1858 tmunro@postgresql.or 918 : 898 : ConditionVariableInit(&CheckpointerShmem->start_cv);
919 : 898 : ConditionVariableInit(&CheckpointerShmem->done_cv);
920 : : }
4548 simon@2ndQuadrant.co 921 : 898 : }
922 : :
923 : : /*
924 : : * RequestCheckpoint
925 : : * Called in backend processes to request a checkpoint
926 : : *
927 : : * flags is a bitwise OR of the following:
928 : : * CHECKPOINT_IS_SHUTDOWN: checkpoint is for database shutdown.
929 : : * CHECKPOINT_END_OF_RECOVERY: checkpoint is for end of WAL recovery.
930 : : * CHECKPOINT_IMMEDIATE: finish the checkpoint ASAP,
931 : : * ignoring checkpoint_completion_target parameter.
932 : : * CHECKPOINT_FORCE: force a checkpoint even if no XLOG activity has occurred
933 : : * since the last one (implied by CHECKPOINT_IS_SHUTDOWN or
934 : : * CHECKPOINT_END_OF_RECOVERY).
935 : : * CHECKPOINT_WAIT: wait for completion before returning (otherwise,
936 : : * just signal checkpointer to do it, and return).
937 : : * CHECKPOINT_CAUSE_XLOG: checkpoint is requested due to xlog filling.
938 : : * (This affects logging, and in particular enables CheckPointWarning.)
939 : : */
940 : : void
941 : 680 : RequestCheckpoint(int flags)
942 : : {
943 : : int ntries;
944 : : int old_failed,
945 : : old_started;
946 : :
947 : : /*
948 : : * If in a standalone backend, just do it ourselves.
949 : : */
950 [ + + ]: 680 : if (!IsPostmasterEnvironment)
951 : : {
952 : : /*
953 : : * There's no point in doing slow checkpoints in a standalone backend,
954 : : * because there's no other backends the checkpoint could disrupt.
955 : : */
956 : 157 : CreateCheckPoint(flags | CHECKPOINT_IMMEDIATE);
957 : :
958 : : /* Free all smgr objects, as CheckpointerMain() normally would. */
74 heikki.linnakangas@i 959 :GNC 157 : smgrdestroyall();
960 : :
4548 simon@2ndQuadrant.co 961 :CBC 157 : return;
962 : : }
963 : :
964 : : /*
965 : : * Atomically set the request flags, and take a snapshot of the counters.
966 : : * When we see ckpt_started > old_started, we know the flags we set here
967 : : * have been seen by checkpointer.
968 : : *
969 : : * Note that we OR the flags with any existing flags, to avoid overriding
970 : : * a "stronger" request by another backend. The flag senses must be
971 : : * chosen to make this work!
972 : : */
3113 rhaas@postgresql.org 973 [ - + ]: 523 : SpinLockAcquire(&CheckpointerShmem->ckpt_lck);
974 : :
975 : 523 : old_failed = CheckpointerShmem->ckpt_failed;
976 : 523 : old_started = CheckpointerShmem->ckpt_started;
1853 tgl@sss.pgh.pa.us 977 : 523 : CheckpointerShmem->ckpt_flags |= (flags | CHECKPOINT_REQUESTED);
978 : :
3113 rhaas@postgresql.org 979 : 523 : SpinLockRelease(&CheckpointerShmem->ckpt_lck);
980 : :
981 : : /*
982 : : * Send signal to request checkpoint. It's possible that the checkpointer
983 : : * hasn't started yet, or is in process of restarting, so we will retry a
984 : : * few times if needed. (Actually, more than a few times, since on slow
985 : : * or overloaded buildfarm machines, it's been observed that the
986 : : * checkpointer can take several seconds to start.) However, if not told
987 : : * to wait for the checkpoint to occur, we consider failure to send the
988 : : * signal to be nonfatal and merely LOG it. The checkpointer should see
989 : : * the request when it does start, with or without getting a signal.
990 : : */
991 : : #define MAX_SIGNAL_TRIES 600 /* max wait 60.0 sec */
4548 simon@2ndQuadrant.co 992 :UBC 0 : for (ntries = 0;; ntries++)
993 : : {
4358 simon@2ndQuadrant.co 994 [ - + ]:CBC 523 : if (CheckpointerShmem->checkpointer_pid == 0)
995 : : {
1853 tgl@sss.pgh.pa.us 996 [ # # # # ]:UBC 0 : if (ntries >= MAX_SIGNAL_TRIES || !(flags & CHECKPOINT_WAIT))
997 : : {
4548 simon@2ndQuadrant.co 998 [ # # # # ]: 0 : elog((flags & CHECKPOINT_WAIT) ? ERROR : LOG,
999 : : "could not signal for checkpoint: checkpointer is not running");
1000 : 0 : break;
1001 : : }
1002 : : }
4358 simon@2ndQuadrant.co 1003 [ - + ]:CBC 523 : else if (kill(CheckpointerShmem->checkpointer_pid, SIGINT) != 0)
1004 : : {
1853 tgl@sss.pgh.pa.us 1005 [ # # # # ]:UBC 0 : if (ntries >= MAX_SIGNAL_TRIES || !(flags & CHECKPOINT_WAIT))
1006 : : {
4548 simon@2ndQuadrant.co 1007 [ # # # # ]: 0 : elog((flags & CHECKPOINT_WAIT) ? ERROR : LOG,
1008 : : "could not signal for checkpoint: %m");
1009 : 0 : break;
1010 : : }
1011 : : }
1012 : : else
4548 simon@2ndQuadrant.co 1013 :CBC 523 : break; /* signal sent successfully */
1014 : :
4548 simon@2ndQuadrant.co 1015 [ # # ]:UBC 0 : CHECK_FOR_INTERRUPTS();
1016 : 0 : pg_usleep(100000L); /* wait 0.1 sec, then retry */
1017 : : }
1018 : :
1019 : : /*
1020 : : * If requested, wait for completion. We detect completion according to
1021 : : * the algorithm given above.
1022 : : */
4548 simon@2ndQuadrant.co 1023 [ + + ]:CBC 523 : if (flags & CHECKPOINT_WAIT)
1024 : : {
1025 : : int new_started,
1026 : : new_failed;
1027 : :
1028 : : /* Wait for a new checkpoint to start. */
1858 tmunro@postgresql.or 1029 : 418 : ConditionVariablePrepareToSleep(&CheckpointerShmem->start_cv);
1030 : : for (;;)
1031 : : {
3113 rhaas@postgresql.org 1032 [ - + ]: 820 : SpinLockAcquire(&CheckpointerShmem->ckpt_lck);
1033 : 820 : new_started = CheckpointerShmem->ckpt_started;
1034 : 820 : SpinLockRelease(&CheckpointerShmem->ckpt_lck);
1035 : :
4548 simon@2ndQuadrant.co 1036 [ + + ]: 820 : if (new_started != old_started)
1037 : 418 : break;
1038 : :
1858 tmunro@postgresql.or 1039 : 402 : ConditionVariableSleep(&CheckpointerShmem->start_cv,
1040 : : WAIT_EVENT_CHECKPOINT_START);
1041 : : }
1042 : 418 : ConditionVariableCancelSleep();
1043 : :
1044 : : /*
1045 : : * We are waiting for ckpt_done >= new_started, in a modulo sense.
1046 : : */
1047 : 418 : ConditionVariablePrepareToSleep(&CheckpointerShmem->done_cv);
1048 : : for (;;)
4548 simon@2ndQuadrant.co 1049 : 407 : {
1050 : : int new_done;
1051 : :
3113 rhaas@postgresql.org 1052 [ - + ]: 825 : SpinLockAcquire(&CheckpointerShmem->ckpt_lck);
1053 : 825 : new_done = CheckpointerShmem->ckpt_done;
1054 : 825 : new_failed = CheckpointerShmem->ckpt_failed;
1055 : 825 : SpinLockRelease(&CheckpointerShmem->ckpt_lck);
1056 : :
4548 simon@2ndQuadrant.co 1057 [ + + ]: 825 : if (new_done - new_started >= 0)
1058 : 418 : break;
1059 : :
1858 tmunro@postgresql.or 1060 : 407 : ConditionVariableSleep(&CheckpointerShmem->done_cv,
1061 : : WAIT_EVENT_CHECKPOINT_DONE);
1062 : : }
1063 : 418 : ConditionVariableCancelSleep();
1064 : :
4548 simon@2ndQuadrant.co 1065 [ - + ]: 418 : if (new_failed != old_failed)
4548 simon@2ndQuadrant.co 1066 [ # # ]:UBC 0 : ereport(ERROR,
1067 : : (errmsg("checkpoint request failed"),
1068 : : errhint("Consult recent messages in the server log for details.")));
1069 : : }
1070 : : }
1071 : :
1072 : : /*
1073 : : * ForwardSyncRequest
1074 : : * Forward a file-fsync request from a backend to the checkpointer
1075 : : *
1076 : : * Whenever a backend is compelled to write directly to a relation
1077 : : * (which should be seldom, if the background writer is getting its job done),
1078 : : * the backend calls this routine to pass over knowledge that the relation
1079 : : * is dirty and must be fsync'd before next checkpoint. We also use this
1080 : : * opportunity to count such writes for statistical purposes.
1081 : : *
1082 : : * To avoid holding the lock for longer than necessary, we normally write
1083 : : * to the requests[] queue without checking for duplicates. The checkpointer
1084 : : * will have to eliminate dups internally anyway. However, if we discover
1085 : : * that the queue is full, we make a pass over the entire queue to compact
1086 : : * it. This is somewhat expensive, but the alternative is for the backend
1087 : : * to perform its own fsync, which is far more expensive in practice. It
1088 : : * is theoretically possible a backend fsync might still be necessary, if
1089 : : * the queue is full and contains no duplicate entries. In that case, we
1090 : : * let the backend know by returning false.
1091 : : */
1092 : : bool
1837 tmunro@postgresql.or 1093 :CBC 1197718 : ForwardSyncRequest(const FileTag *ftag, SyncRequestType type)
1094 : : {
1095 : : CheckpointerRequest *request;
1096 : : bool too_full;
1097 : :
4548 simon@2ndQuadrant.co 1098 [ - + ]: 1197718 : if (!IsUnderPostmaster)
4548 simon@2ndQuadrant.co 1099 :UBC 0 : return false; /* probably shouldn't even get here */
1100 : :
4288 tgl@sss.pgh.pa.us 1101 [ - + ]:CBC 1197718 : if (AmCheckpointerProcess())
1837 tmunro@postgresql.or 1102 [ # # ]:UBC 0 : elog(ERROR, "ForwardSyncRequest must not be called in checkpointer");
1103 : :
4358 simon@2ndQuadrant.co 1104 :CBC 1197718 : LWLockAcquire(CheckpointerCommLock, LW_EXCLUSIVE);
1105 : :
1106 : : /*
1107 : : * If the checkpointer isn't running or the request queue is full, the
1108 : : * backend will have to perform its own fsync request. But before forcing
1109 : : * that to happen, we can try to compact the request queue.
1110 : : */
1111 [ + + ]: 1197718 : if (CheckpointerShmem->checkpointer_pid == 0 ||
1112 [ + + ]: 1197717 : (CheckpointerShmem->num_requests >= CheckpointerShmem->max_requests &&
4359 tgl@sss.pgh.pa.us 1113 [ - + ]: 29 : !CompactCheckpointerRequestQueue()))
1114 : : {
4358 simon@2ndQuadrant.co 1115 : 1 : LWLockRelease(CheckpointerCommLock);
4548 1116 : 1 : return false;
1117 : : }
1118 : :
1119 : : /* OK, insert request */
4358 1120 : 1197717 : request = &CheckpointerShmem->requests[CheckpointerShmem->num_requests++];
1837 tmunro@postgresql.or 1121 : 1197717 : request->ftag = *ftag;
1122 : 1197717 : request->type = type;
1123 : :
1124 : : /* If queue is more than half full, nudge the checkpointer to empty it */
4358 simon@2ndQuadrant.co 1125 : 1197717 : too_full = (CheckpointerShmem->num_requests >=
1126 : 1197717 : CheckpointerShmem->max_requests / 2);
1127 : :
1128 : 1197717 : LWLockRelease(CheckpointerCommLock);
1129 : :
1130 : : /* ... but not till after we release the lock */
4359 tgl@sss.pgh.pa.us 1131 [ + + + - ]: 1197717 : if (too_full && ProcGlobal->checkpointerLatch)
1132 : 17488 : SetLatch(ProcGlobal->checkpointerLatch);
1133 : :
4548 simon@2ndQuadrant.co 1134 : 1197717 : return true;
1135 : : }
1136 : :
1137 : : /*
1138 : : * CompactCheckpointerRequestQueue
1139 : : * Remove duplicates from the request queue to avoid backend fsyncs.
1140 : : * Returns "true" if any entries were removed.
1141 : : *
1142 : : * Although a full fsync request queue is not common, it can lead to severe
1143 : : * performance problems when it does happen. So far, this situation has
1144 : : * only been observed to occur when the system is under heavy write load,
1145 : : * and especially during the "sync" phase of a checkpoint. Without this
1146 : : * logic, each backend begins doing an fsync for every block written, which
1147 : : * gets very expensive and can slow down the whole system.
1148 : : *
1149 : : * Trying to do this every time the queue is full could lose if there
1150 : : * aren't any removable entries. But that should be vanishingly rare in
1151 : : * practice: there's one queue entry per shared buffer.
1152 : : */
1153 : : static bool
4359 tgl@sss.pgh.pa.us 1154 : 29 : CompactCheckpointerRequestQueue(void)
1155 : : {
1156 : : struct CheckpointerSlotMapping
1157 : : {
1158 : : CheckpointerRequest request;
1159 : : int slot;
1160 : : };
1161 : :
1162 : : int n,
1163 : : preserve_count;
4548 simon@2ndQuadrant.co 1164 : 29 : int num_skipped = 0;
1165 : : HASHCTL ctl;
1166 : : HTAB *htab;
1167 : : bool *skip_slot;
1168 : :
1169 : : /* must hold CheckpointerCommLock in exclusive mode */
4358 1170 [ - + ]: 29 : Assert(LWLockHeldByMe(CheckpointerCommLock));
1171 : :
1172 : : /* Initialize skip_slot array */
4289 tgl@sss.pgh.pa.us 1173 : 29 : skip_slot = palloc0(sizeof(bool) * CheckpointerShmem->num_requests);
1174 : :
1175 : : /* Initialize temporary hash table */
4358 simon@2ndQuadrant.co 1176 : 29 : ctl.keysize = sizeof(CheckpointerRequest);
4357 tgl@sss.pgh.pa.us 1177 : 29 : ctl.entrysize = sizeof(struct CheckpointerSlotMapping);
4289 1178 : 29 : ctl.hcxt = CurrentMemoryContext;
1179 : :
4458 simon@2ndQuadrant.co 1180 : 29 : htab = hash_create("CompactCheckpointerRequestQueue",
4358 1181 : 29 : CheckpointerShmem->num_requests,
1182 : : &ctl,
1183 : : HASH_ELEM | HASH_BLOBS | HASH_CONTEXT);
1184 : :
1185 : : /*
1186 : : * The basic idea here is that a request can be skipped if it's followed
1187 : : * by a later, identical request. It might seem more sensible to work
1188 : : * backwards from the end of the queue and check whether a request is
1189 : : * *preceded* by an earlier, identical request, in the hopes of doing less
1190 : : * copying. But that might change the semantics, if there's an
1191 : : * intervening SYNC_FORGET_REQUEST or SYNC_FILTER_REQUEST, so we do it
1192 : : * this way. It would be possible to be even smarter if we made the code
1193 : : * below understand the specific semantics of such requests (it could blow
1194 : : * away preceding entries that would end up being canceled anyhow), but
1195 : : * it's not clear that the extra complexity would buy us anything.
1196 : : */
4289 tgl@sss.pgh.pa.us 1197 [ + + ]: 2509 : for (n = 0; n < CheckpointerShmem->num_requests; n++)
1198 : : {
1199 : : CheckpointerRequest *request;
1200 : : struct CheckpointerSlotMapping *slotmap;
1201 : : bool found;
1202 : :
1203 : : /*
1204 : : * We use the request struct directly as a hashtable key. This
1205 : : * assumes that any padding bytes in the structs are consistently the
1206 : : * same, which should be okay because we zeroed them in
1207 : : * CheckpointerShmemInit. Note also that RelFileLocator had better
1208 : : * contain no pad bytes.
1209 : : */
4358 simon@2ndQuadrant.co 1210 : 2480 : request = &CheckpointerShmem->requests[n];
4548 1211 : 2480 : slotmap = hash_search(htab, request, HASH_ENTER, &found);
1212 [ + + ]: 2480 : if (found)
1213 : : {
1214 : : /* Duplicate, so mark the previous occurrence as skippable */
1215 : 1943 : skip_slot[slotmap->slot] = true;
4289 tgl@sss.pgh.pa.us 1216 : 1943 : num_skipped++;
1217 : : }
1218 : : /* Remember slot containing latest occurrence of this request value */
4548 simon@2ndQuadrant.co 1219 : 2480 : slotmap->slot = n;
1220 : : }
1221 : :
1222 : : /* Done with the hash table. */
1223 : 29 : hash_destroy(htab);
1224 : :
1225 : : /* If no duplicates, we're out of luck. */
1226 [ - + ]: 29 : if (!num_skipped)
1227 : : {
4548 simon@2ndQuadrant.co 1228 :LBC (501) : pfree(skip_slot);
1229 : (501) : return false;
1230 : : }
1231 : :
1232 : : /* We found some duplicates; remove them. */
4289 tgl@sss.pgh.pa.us 1233 :CBC 29 : preserve_count = 0;
1234 [ + + ]: 2509 : for (n = 0; n < CheckpointerShmem->num_requests; n++)
1235 : : {
4548 simon@2ndQuadrant.co 1236 [ + + ]: 2480 : if (skip_slot[n])
1237 : 1943 : continue;
4358 1238 : 537 : CheckpointerShmem->requests[preserve_count++] = CheckpointerShmem->requests[n];
1239 : : }
4548 1240 [ + + ]: 29 : ereport(DEBUG1,
1241 : : (errmsg_internal("compacted fsync request queue from %d entries to %d entries",
1242 : : CheckpointerShmem->num_requests, preserve_count)));
4358 1243 : 29 : CheckpointerShmem->num_requests = preserve_count;
1244 : :
1245 : : /* Cleanup. */
4548 1246 : 29 : pfree(skip_slot);
1247 : 29 : return true;
1248 : : }
1249 : :
1250 : : /*
1251 : : * AbsorbSyncRequests
1252 : : * Retrieve queued sync requests and pass them to sync mechanism.
1253 : : *
1254 : : * This is exported because it must be called during CreateCheckPoint;
1255 : : * we have to be sure we have accepted all pending requests just before
1256 : : * we start fsync'ing. Since CreateCheckPoint sometimes runs in
1257 : : * non-checkpointer processes, do nothing if not checkpointer.
1258 : : */
1259 : : void
1837 tmunro@postgresql.or 1260 : 16145 : AbsorbSyncRequests(void)
1261 : : {
4358 simon@2ndQuadrant.co 1262 : 16145 : CheckpointerRequest *requests = NULL;
1263 : : CheckpointerRequest *request;
1264 : : int n;
1265 : :
4288 tgl@sss.pgh.pa.us 1266 [ + + ]: 16145 : if (!AmCheckpointerProcess())
4548 simon@2ndQuadrant.co 1267 : 488 : return;
1268 : :
4358 1269 : 15657 : LWLockAcquire(CheckpointerCommLock, LW_EXCLUSIVE);
1270 : :
1271 : : /*
1272 : : * We try to avoid holding the lock for a long time by copying the request
1273 : : * array, and processing the requests after releasing the lock.
1274 : : *
1275 : : * Once we have cleared the requests from shared memory, we have to PANIC
1276 : : * if we then fail to absorb them (eg, because our hashtable runs out of
1277 : : * memory). This is because the system cannot run safely if we are unable
1278 : : * to fsync what we have been told to fsync. Fortunately, the hashtable
1279 : : * is so small that the problem is quite unlikely to arise in practice.
1280 : : */
1281 : 15657 : n = CheckpointerShmem->num_requests;
4548 1282 [ + + ]: 15657 : if (n > 0)
1283 : : {
4358 1284 : 9775 : requests = (CheckpointerRequest *) palloc(n * sizeof(CheckpointerRequest));
1285 : 9775 : memcpy(requests, CheckpointerShmem->requests, n * sizeof(CheckpointerRequest));
1286 : : }
1287 : :
3576 heikki.linnakangas@i 1288 : 15657 : START_CRIT_SECTION();
1289 : :
4358 simon@2ndQuadrant.co 1290 : 15657 : CheckpointerShmem->num_requests = 0;
1291 : :
1292 : 15657 : LWLockRelease(CheckpointerCommLock);
1293 : :
4548 1294 [ + + ]: 1189817 : for (request = requests; n > 0; request++, n--)
1837 tmunro@postgresql.or 1295 : 1174160 : RememberSyncRequest(&request->ftag, request->type);
1296 : :
3576 heikki.linnakangas@i 1297 [ - + ]: 15657 : END_CRIT_SECTION();
1298 : :
4548 simon@2ndQuadrant.co 1299 [ + + ]: 15657 : if (requests)
1300 : 9775 : pfree(requests);
1301 : : }
1302 : :
1303 : : /*
1304 : : * Update any shared memory configurations based on config parameters
1305 : : */
1306 : : static void
4463 1307 : 863 : UpdateSharedMemoryConfig(void)
1308 : : {
1309 : : /* update global shmem state for sync rep */
1310 : 863 : SyncRepUpdateSyncStandbysDefined();
1311 : :
1312 : : /*
1313 : : * If full_page_writes has been changed by SIGHUP, we update it in shared
1314 : : * memory and write an XLOG_FPW_CHANGE record.
1315 : : */
1316 : 863 : UpdateFullPageWrites();
1317 : :
1318 [ + + ]: 863 : elog(DEBUG2, "checkpointer updated shared memory configuration values");
1319 : 863 : }
1320 : :
1321 : : /*
1322 : : * FirstCallSinceLastCheckpoint allows a process to take an action once
1323 : : * per checkpoint cycle by asynchronously checking for checkpoint completion.
1324 : : */
1325 : : bool
4335 1326 : 15299 : FirstCallSinceLastCheckpoint(void)
1327 : : {
1328 : : static int ckpt_done = 0;
1329 : : int new_done;
4326 bruce@momjian.us 1330 : 15299 : bool FirstCall = false;
1331 : :
3113 rhaas@postgresql.org 1332 [ - + ]: 15299 : SpinLockAcquire(&CheckpointerShmem->ckpt_lck);
1333 : 15299 : new_done = CheckpointerShmem->ckpt_done;
1334 : 15299 : SpinLockRelease(&CheckpointerShmem->ckpt_lck);
1335 : :
4335 simon@2ndQuadrant.co 1336 [ + + ]: 15299 : if (new_done != ckpt_done)
1337 : 396 : FirstCall = true;
1338 : :
1339 : 15299 : ckpt_done = new_done;
1340 : :
1341 : 15299 : return FirstCall;
1342 : : }
|