Age Owner TLA Line data Source code
1 : /*-------------------------------------------------------------------------
2 : *
3 : * s_lock.c
4 : * Hardware-dependent implementation of spinlocks.
5 : *
6 : * When waiting for a contended spinlock we loop tightly for awhile, then
7 : * delay using pg_usleep() and try again. Preferably, "awhile" should be a
8 : * small multiple of the maximum time we expect a spinlock to be held. 100
9 : * iterations seems about right as an initial guess. However, on a
10 : * uniprocessor the loop is a waste of cycles, while in a multi-CPU scenario
11 : * it's usually better to spin a bit longer than to call the kernel, so we try
12 : * to adapt the spin loop count depending on whether we seem to be in a
13 : * uniprocessor or multiprocessor.
14 : *
15 : * Note: you might think MIN_SPINS_PER_DELAY should be just 1, but you'd
16 : * be wrong; there are platforms where that can result in a "stuck
17 : * spinlock" failure. This has been seen particularly on Alphas; it seems
18 : * that the first TAS after returning from kernel space will always fail
19 : * on that hardware.
20 : *
21 : * Once we do decide to block, we use randomly increasing pg_usleep()
22 : * delays. The first delay is 1 msec, then the delay randomly increases to
23 : * about one second, after which we reset to 1 msec and start again. The
24 : * idea here is that in the presence of heavy contention we need to
25 : * increase the delay, else the spinlock holder may never get to run and
26 : * release the lock. (Consider situation where spinlock holder has been
27 : * nice'd down in priority by the scheduler --- it will not get scheduled
28 : * until all would-be acquirers are sleeping, so if we always use a 1-msec
29 : * sleep, there is a real possibility of starvation.) But we can't just
30 : * clamp the delay to an upper bound, else it would take a long time to
31 : * make a reasonable number of tries.
32 : *
33 : * We time out and declare error after NUM_DELAYS delays (thus, exactly
34 : * that many tries). With the given settings, this will usually take 2 or
35 : * so minutes. It seems better to fix the total number of tries (and thus
36 : * the probability of unintended failure) than to fix the total time
37 : * spent.
38 : *
39 : * Portions Copyright (c) 1996-2023, PostgreSQL Global Development Group
40 : * Portions Copyright (c) 1994, Regents of the University of California
41 : *
42 : *
43 : * IDENTIFICATION
44 : * src/backend/storage/lmgr/s_lock.c
45 : *
46 : *-------------------------------------------------------------------------
47 : */
48 : #include "postgres.h"
49 :
50 : #include <time.h>
51 : #include <unistd.h>
52 :
53 : #include "common/pg_prng.h"
54 : #include "port/atomics.h"
55 : #include "storage/s_lock.h"
56 : #include "utils/wait_event.h"
57 :
58 : #define MIN_SPINS_PER_DELAY 10
59 : #define MAX_SPINS_PER_DELAY 1000
60 : #define NUM_DELAYS 1000
61 : #define MIN_DELAY_USEC 1000L
62 : #define MAX_DELAY_USEC 1000000L
63 :
64 :
65 : slock_t dummy_spinlock;
66 :
67 : static int spins_per_delay = DEFAULT_SPINS_PER_DELAY;
68 :
69 :
70 : /*
71 : * s_lock_stuck() - complain about a stuck spinlock
72 : */
73 : static void
2552 andres 74 UIC 0 : s_lock_stuck(const char *file, int line, const char *func)
9063 bruce 75 EUB : {
2552 andres 76 UIC 0 : if (!func)
2552 andres 77 UBC 0 : func = "(unknown)";
7196 tgl 78 EUB : #if defined(S_LOCK_TEST)
79 : fprintf(stderr,
80 : "\nStuck spinlock detected at %s, %s:%d.\n",
81 : func, file, line);
82 : exit(1);
83 : #else
2552 andres 84 UIC 0 : elog(PANIC, "stuck spinlock detected at %s, %s:%d",
2552 andres 85 EUB : func, file, line);
86 : #endif
87 : }
88 :
89 : /*
90 : * s_lock(lock) - platform-independent portion of waiting for a spinlock.
91 : */
92 : int
2552 andres 93 GIC 5231 : s_lock(volatile slock_t *lock, const char *file, int line, const char *func)
9106 scrappy 94 ECB : {
95 : SpinDelayStatus delayStatus;
96 :
2551 andres 97 GIC 5231 : init_spin_delay(&delayStatus, file, line, func);
7862 tgl 98 ECB :
4241 rhaas 99 GIC 126340 : while (TAS_SPIN(lock))
9106 scrappy 100 ECB : {
2555 andres 101 GIC 121109 : perform_spin_delay(&delayStatus);
2555 andres 102 ECB : }
103 :
2555 andres 104 GIC 5231 : finish_spin_delay(&delayStatus);
7862 tgl 105 ECB :
2555 andres 106 GIC 5231 : return delayStatus.delays;
2555 andres 107 ECB : }
108 :
109 : #ifdef USE_DEFAULT_S_UNLOCK
110 : void
111 : s_unlock(volatile slock_t *lock)
112 : {
113 : #ifdef TAS_ACTIVE_WORD
114 : /* HP's PA-RISC */
115 : *TAS_ACTIVE_WORD(lock) = -1;
116 : #else
117 : *lock = 0;
118 : #endif
119 : }
120 : #endif
121 :
122 : /*
123 : * Wait while spinning on a contended spinlock.
124 : */
125 : void
2555 andres 126 GIC 183999 : perform_spin_delay(SpinDelayStatus *status)
2555 andres 127 ECB : {
128 : /* CPU-specific delay each time through the loop */
2555 andres 129 GIC 183999 : SPIN_DELAY();
2555 andres 130 ECB :
131 : /* Block the process every spins_per_delay tries */
2555 andres 132 GIC 183999 : if (++(status->spins) >= spins_per_delay)
2555 andres 133 ECB : {
2555 andres 134 GIC 185 : if (++(status->delays) > NUM_DELAYS)
2552 andres 135 LBC 0 : s_lock_stuck(status->file, status->line, status->func);
2555 andres 136 EUB :
2118 tgl 137 GIC 185 : if (status->cur_delay == 0) /* first time to delay? */
2555 andres 138 CBC 90 : status->cur_delay = MIN_DELAY_USEC;
2555 andres 139 ECB :
140 : /*
141 : * Once we start sleeping, the overhead of reporting a wait event is
142 : * justified. Actively spinning easily stands out in profilers, but
143 : * sleeping with an exponential backoff is harder to spot...
144 : *
145 : * We might want to report something more granular at some point, but
146 : * this is better than nothing.
147 : */
139 andres 148 GNC 185 : pgstat_report_wait_start(WAIT_EVENT_SPIN_DELAY);
2555 andres 149 GIC 185 : pg_usleep(status->cur_delay);
139 andres 150 GNC 185 : pgstat_report_wait_end();
151 :
152 : #if defined(S_LOCK_TEST)
153 : fprintf(stdout, "*");
154 : fflush(stdout);
155 : #endif
156 :
157 : /* increase delay by a random fraction between 1X and 2X */
2555 andres 158 GIC 370 : status->cur_delay += (int) (status->cur_delay *
497 tgl 159 CBC 185 : pg_prng_double(&pg_global_prng_state) + 0.5);
2555 andres 160 ECB : /* wrap back to minimum delay when max is exceeded */
2555 andres 161 CBC 185 : if (status->cur_delay > MAX_DELAY_USEC)
2555 andres 162 UIC 0 : status->cur_delay = MIN_DELAY_USEC;
163 :
2555 andres 164 GIC 185 : status->spins = 0;
165 : }
166 183999 : }
167 :
168 : /*
2555 andres 169 ECB : * After acquiring a spinlock, update estimates about how long to loop.
170 : *
171 : * If we were able to acquire the lock without delaying, it's a good
172 : * indication we are in a multiprocessor. If we had to delay, it's a sign
2555 andres 173 EUB : * (but not a sure thing) that we are in a uniprocessor. Hence, we
174 : * decrement spins_per_delay slowly when we had to delay, and increase it
2555 andres 175 ECB : * rapidly when we didn't. It's expected that spins_per_delay will
176 : * converge to the minimum value on a uniprocessor and to the maximum
177 : * value on a multiprocessor.
178 : *
179 : * Note: spins_per_delay is local within our current process. We want to
180 : * average these observations across multiple backends, since it's
181 : * relatively rare for this function to even get entered, and so a single
182 : * backend might not live long enough to converge on a good value. That
183 : * is handled by the two routines below.
184 : */
185 : void
2555 andres 186 GIC 51789482 : finish_spin_delay(SpinDelayStatus *status)
187 : {
188 51789482 : if (status->cur_delay == 0)
189 : {
190 : /* we never had to delay */
6389 tgl 191 51789392 : if (spins_per_delay < MAX_SPINS_PER_DELAY)
192 45659 : spins_per_delay = Min(spins_per_delay + 100, MAX_SPINS_PER_DELAY);
193 : }
194 : else
195 : {
196 90 : if (spins_per_delay > MIN_SPINS_PER_DELAY)
6389 tgl 197 CBC 90 : spins_per_delay = Max(spins_per_delay - 1, MIN_SPINS_PER_DELAY);
198 : }
9106 scrappy 199 51789482 : }
200 :
201 : /*
6389 tgl 202 ECB : * Set local copy of spins_per_delay during backend startup.
203 : *
204 : * NB: this has to be pretty fast as it is called while holding a spinlock
205 : */
206 : void
6389 tgl 207 CBC 13298 : set_spins_per_delay(int shared_spins_per_delay)
6389 tgl 208 ECB : {
6389 tgl 209 GIC 13298 : spins_per_delay = shared_spins_per_delay;
6389 tgl 210 CBC 13298 : }
211 :
212 : /*
213 : * Update shared estimate of spins_per_delay during backend exit.
214 : *
215 : * NB: this has to be pretty fast as it is called while holding a spinlock
216 : */
217 : int
218 13296 : update_spins_per_delay(int shared_spins_per_delay)
219 : {
6389 tgl 220 ECB : /*
6385 bruce 221 : * We use an exponential moving average with a relatively slow adaption
222 : * rate, so that noise in any one backend's result won't affect the shared
223 : * value too much. As long as both inputs are within the allowed range,
224 : * the result must be too, so we need not worry about clamping the result.
225 : *
226 : * We deliberately truncate rather than rounding; this is so that single
227 : * adjustments inside a backend can affect the shared estimate (see the
228 : * asymmetric adjustment rules above).
6389 tgl 229 : */
6389 tgl 230 GIC 13296 : return (shared_spins_per_delay * 15 + spins_per_delay) / 16;
231 : }
232 :
233 :
234 : /*****************************************************************************/
235 : #if defined(S_LOCK_TEST)
236 :
237 : /*
238 : * test program for verifying a port's spinlock support.
239 : */
240 :
241 : struct test_lock_struct
242 : {
243 : char pad1;
244 : slock_t lock;
245 : char pad2;
246 : };
247 :
248 : volatile struct test_lock_struct test_lock;
249 :
250 : int
251 : main()
252 : {
253 : pg_prng_seed(&pg_global_prng_state, (uint64) time(NULL));
254 :
255 : test_lock.pad1 = test_lock.pad2 = 0x44;
256 :
257 : S_INIT_LOCK(&test_lock.lock);
258 :
259 : if (test_lock.pad1 != 0x44 || test_lock.pad2 != 0x44)
260 : {
261 : printf("S_LOCK_TEST: failed, declared datatype is wrong size\n");
262 : return 1;
263 : }
264 :
265 : if (!S_LOCK_FREE(&test_lock.lock))
266 : {
267 : printf("S_LOCK_TEST: failed, lock not initialized\n");
268 : return 1;
269 : }
270 :
271 : S_LOCK(&test_lock.lock);
272 :
273 : if (test_lock.pad1 != 0x44 || test_lock.pad2 != 0x44)
274 : {
275 : printf("S_LOCK_TEST: failed, declared datatype is wrong size\n");
276 : return 1;
277 : }
278 :
279 : if (S_LOCK_FREE(&test_lock.lock))
280 : {
281 : printf("S_LOCK_TEST: failed, lock not locked\n");
282 : return 1;
283 : }
284 :
285 : S_UNLOCK(&test_lock.lock);
286 :
287 : if (test_lock.pad1 != 0x44 || test_lock.pad2 != 0x44)
288 : {
289 : printf("S_LOCK_TEST: failed, declared datatype is wrong size\n");
290 : return 1;
291 : }
292 :
293 : if (!S_LOCK_FREE(&test_lock.lock))
294 : {
295 : printf("S_LOCK_TEST: failed, lock not unlocked\n");
296 : return 1;
297 : }
298 :
299 : S_LOCK(&test_lock.lock);
300 :
301 : if (test_lock.pad1 != 0x44 || test_lock.pad2 != 0x44)
302 : {
303 : printf("S_LOCK_TEST: failed, declared datatype is wrong size\n");
304 : return 1;
305 : }
306 :
307 : if (S_LOCK_FREE(&test_lock.lock))
308 : {
309 : printf("S_LOCK_TEST: failed, lock not re-locked\n");
310 : return 1;
311 : }
312 :
313 : printf("S_LOCK_TEST: this will print %d stars and then\n", NUM_DELAYS);
314 : printf(" exit with a 'stuck spinlock' message\n");
315 : printf(" if S_LOCK() and TAS() are working.\n");
316 : fflush(stdout);
317 :
318 : s_lock(&test_lock.lock, __FILE__, __LINE__, __func__);
319 :
320 : printf("S_LOCK_TEST: failed, lock not locked\n");
321 : return 1;
322 : }
323 :
324 : #endif /* S_LOCK_TEST */
|