PostgreSQL Source Code git master
lwlock.c File Reference
#include "postgres.h"
#include "miscadmin.h"
#include "pg_trace.h"
#include "pgstat.h"
#include "port/pg_bitutils.h"
#include "storage/proc.h"
#include "storage/proclist.h"
#include "storage/procnumber.h"
#include "storage/spin.h"
#include "utils/memutils.h"
#include "storage/lwlocklist.h"
Include dependency graph for lwlock.c:

Go to the source code of this file.

Data Structures

struct  LWLockHandle
 
struct  NamedLWLockTrancheRequest
 

Macros

#define LW_FLAG_HAS_WAITERS   ((uint32) 1 << 31)
 
#define LW_FLAG_RELEASE_OK   ((uint32) 1 << 30)
 
#define LW_FLAG_LOCKED   ((uint32) 1 << 29)
 
#define LW_FLAG_BITS   3
 
#define LW_FLAG_MASK   (((1<<LW_FLAG_BITS)-1)<<(32-LW_FLAG_BITS))
 
#define LW_VAL_EXCLUSIVE   (MAX_BACKENDS + 1)
 
#define LW_VAL_SHARED   1
 
#define LW_SHARED_MASK   MAX_BACKENDS
 
#define LW_LOCK_MASK   (MAX_BACKENDS | LW_VAL_EXCLUSIVE)
 
#define PG_LWLOCK(id, lockname)   [id] = CppAsString(lockname),
 
#define PG_LWLOCKTRANCHE(id, lockname)   [LWTRANCHE_##id] = CppAsString(lockname),
 
#define MAX_SIMUL_LWLOCKS   200
 
#define MAX_NAMED_TRANCHES   256
 
#define T_NAME(lock)    GetLWTrancheName((lock)->tranche)
 
#define PRINT_LWDEBUG(a, b, c)   ((void)0)
 
#define LOG_LWDEBUG(a, b, c)   ((void)0)
 

Typedefs

typedef struct LWLockHandle LWLockHandle
 
typedef struct NamedLWLockTrancheRequest NamedLWLockTrancheRequest
 

Functions

 StaticAssertDecl (((MAX_BACKENDS+1) &MAX_BACKENDS)==0, "MAX_BACKENDS + 1 needs to be a power of 2")
 
 StaticAssertDecl ((MAX_BACKENDS &LW_FLAG_MASK)==0, "MAX_BACKENDS and LW_FLAG_MASK overlap")
 
 StaticAssertDecl ((LW_VAL_EXCLUSIVE &LW_FLAG_MASK)==0, "LW_VAL_EXCLUSIVE and LW_FLAG_MASK overlap")
 
 StaticAssertDecl (lengthof(BuiltinTrancheNames)==LWTRANCHE_FIRST_USER_DEFINED, "missing entries in BuiltinTrancheNames[]")
 
static void InitializeLWLocks (void)
 
static void LWLockReportWaitStart (LWLock *lock)
 
static void LWLockReportWaitEnd (void)
 
static const char * GetLWTrancheName (uint16 trancheId)
 
static int NumLWLocksForNamedTranches (void)
 
Size LWLockShmemSize (void)
 
void CreateLWLocks (void)
 
void InitLWLockAccess (void)
 
LWLockPaddedGetNamedLWLockTranche (const char *tranche_name)
 
int LWLockNewTrancheId (const char *name)
 
void RequestNamedLWLockTranche (const char *tranche_name, int num_lwlocks)
 
void LWLockInitialize (LWLock *lock, int tranche_id)
 
const char * GetLWLockIdentifier (uint32 classId, uint16 eventId)
 
static bool LWLockAttemptLock (LWLock *lock, LWLockMode mode)
 
static void LWLockWaitListLock (LWLock *lock)
 
static void LWLockWaitListUnlock (LWLock *lock)
 
static void LWLockWakeup (LWLock *lock)
 
static void LWLockQueueSelf (LWLock *lock, LWLockMode mode)
 
static void LWLockDequeueSelf (LWLock *lock)
 
bool LWLockAcquire (LWLock *lock, LWLockMode mode)
 
bool LWLockConditionalAcquire (LWLock *lock, LWLockMode mode)
 
bool LWLockAcquireOrWait (LWLock *lock, LWLockMode mode)
 
static bool LWLockConflictsWithVar (LWLock *lock, pg_atomic_uint64 *valptr, uint64 oldval, uint64 *newval, bool *result)
 
bool LWLockWaitForVar (LWLock *lock, pg_atomic_uint64 *valptr, uint64 oldval, uint64 *newval)
 
void LWLockUpdateVar (LWLock *lock, pg_atomic_uint64 *valptr, uint64 val)
 
static LWLockMode LWLockDisownInternal (LWLock *lock)
 
static void LWLockReleaseInternal (LWLock *lock, LWLockMode mode)
 
void LWLockDisown (LWLock *lock)
 
void LWLockRelease (LWLock *lock)
 
void LWLockReleaseDisowned (LWLock *lock, LWLockMode mode)
 
void LWLockReleaseClearVar (LWLock *lock, pg_atomic_uint64 *valptr, uint64 val)
 
void LWLockReleaseAll (void)
 
void ForEachLWLockHeldByMe (void(*callback)(LWLock *, LWLockMode, void *), void *context)
 
bool LWLockHeldByMe (LWLock *lock)
 
bool LWLockAnyHeldByMe (LWLock *lock, int nlocks, size_t stride)
 
bool LWLockHeldByMeInMode (LWLock *lock, LWLockMode mode)
 

Variables

static const char *const BuiltinTrancheNames []
 
char ** LWLockTrancheNames = NULL
 
LWLockPaddedMainLWLockArray = NULL
 
static int num_held_lwlocks = 0
 
static LWLockHandle held_lwlocks [MAX_SIMUL_LWLOCKS]
 
int NamedLWLockTrancheRequests = 0
 
NamedLWLockTrancheRequestNamedLWLockTrancheRequestArray = NULL
 
static NamedLWLockTrancheRequestLocalNamedLWLockTrancheRequestArray = NULL
 
int * LWLockCounter = NULL
 
static int LocalLWLockCounter
 

Macro Definition Documentation

◆ LOG_LWDEBUG

#define LOG_LWDEBUG (   a,
  b,
  c 
)    ((void)0)

Definition at line 276 of file lwlock.c.

◆ LW_FLAG_BITS

#define LW_FLAG_BITS   3

Definition at line 97 of file lwlock.c.

◆ LW_FLAG_HAS_WAITERS

#define LW_FLAG_HAS_WAITERS   ((uint32) 1 << 31)

Definition at line 94 of file lwlock.c.

◆ LW_FLAG_LOCKED

#define LW_FLAG_LOCKED   ((uint32) 1 << 29)

Definition at line 96 of file lwlock.c.

◆ LW_FLAG_MASK

#define LW_FLAG_MASK   (((1<<LW_FLAG_BITS)-1)<<(32-LW_FLAG_BITS))

Definition at line 98 of file lwlock.c.

◆ LW_FLAG_RELEASE_OK

#define LW_FLAG_RELEASE_OK   ((uint32) 1 << 30)

Definition at line 95 of file lwlock.c.

◆ LW_LOCK_MASK

#define LW_LOCK_MASK   (MAX_BACKENDS | LW_VAL_EXCLUSIVE)

Definition at line 106 of file lwlock.c.

◆ LW_SHARED_MASK

#define LW_SHARED_MASK   MAX_BACKENDS

Definition at line 105 of file lwlock.c.

◆ LW_VAL_EXCLUSIVE

#define LW_VAL_EXCLUSIVE   (MAX_BACKENDS + 1)

Definition at line 101 of file lwlock.c.

◆ LW_VAL_SHARED

#define LW_VAL_SHARED   1

Definition at line 102 of file lwlock.c.

◆ MAX_NAMED_TRANCHES

#define MAX_NAMED_TRANCHES   256

Definition at line 204 of file lwlock.c.

◆ MAX_SIMUL_LWLOCKS

#define MAX_SIMUL_LWLOCKS   200

Definition at line 168 of file lwlock.c.

◆ PG_LWLOCK

#define PG_LWLOCK (   id,
  lockname 
)    [id] = CppAsString(lockname),

◆ PG_LWLOCKTRANCHE

#define PG_LWLOCKTRANCHE (   id,
  lockname 
)    [LWTRANCHE_##id] = CppAsString(lockname),

◆ PRINT_LWDEBUG

#define PRINT_LWDEBUG (   a,
  b,
  c 
)    ((void)0)

Definition at line 275 of file lwlock.c.

◆ T_NAME

#define T_NAME (   lock)     GetLWTrancheName((lock)->tranche)

Definition at line 211 of file lwlock.c.

Typedef Documentation

◆ LWLockHandle

typedef struct LWLockHandle LWLockHandle

◆ NamedLWLockTrancheRequest

Function Documentation

◆ CreateLWLocks()

void CreateLWLocks ( void  )

Definition at line 441 of file lwlock.c.

442{
444 {
445 Size spaceLocks = LWLockShmemSize();
446 char *ptr;
447
448 /* Allocate space */
449 ptr = (char *) ShmemAlloc(spaceLocks);
450
451 /* Initialize the dynamic-allocation counter for tranches */
452 LWLockCounter = (int *) ptr;
454 ptr += MAXALIGN(sizeof(int));
455
456 /* Initialize tranche names */
457 LWLockTrancheNames = (char **) ptr;
458 ptr += MAX_NAMED_TRANCHES * sizeof(char *);
459 for (int i = 0; i < MAX_NAMED_TRANCHES; i++)
460 {
461 LWLockTrancheNames[i] = ptr;
462 ptr += NAMEDATALEN;
463 }
464
465 /*
466 * Move named tranche requests to shared memory. This is done for the
467 * benefit of EXEC_BACKEND builds, which otherwise wouldn't be able to
468 * call GetNamedLWLockTranche() outside postmaster.
469 */
471 {
472 /*
473 * Save the pointer to the request array in postmaster's local
474 * memory. We'll need it if we ever need to re-initialize shared
475 * memory after a crash.
476 */
478
483 }
484
485 /* Ensure desired alignment of LWLock array */
486 ptr += LWLOCK_PADDED_SIZE - ((uintptr_t) ptr) % LWLOCK_PADDED_SIZE;
488
489 /* Initialize all LWLocks */
491 }
492}
#define MAXALIGN(LEN)
Definition: c.h:824
size_t Size
Definition: c.h:624
bool IsUnderPostmaster
Definition: globals.c:120
int i
Definition: isn.c:77
char ** LWLockTrancheNames
Definition: lwlock.c:154
int NamedLWLockTrancheRequests
Definition: lwlock.c:192
struct NamedLWLockTrancheRequest NamedLWLockTrancheRequest
static NamedLWLockTrancheRequest * LocalNamedLWLockTrancheRequestArray
Definition: lwlock.c:196
static void InitializeLWLocks(void)
Definition: lwlock.c:498
NamedLWLockTrancheRequest * NamedLWLockTrancheRequestArray
Definition: lwlock.c:193
LWLockPadded * MainLWLockArray
Definition: lwlock.c:161
int * LWLockCounter
Definition: lwlock.c:199
Size LWLockShmemSize(void)
Definition: lwlock.c:397
#define MAX_NAMED_TRANCHES
Definition: lwlock.c:204
#define LWLOCK_PADDED_SIZE
Definition: lwlock.h:62
@ LWTRANCHE_FIRST_USER_DEFINED
Definition: lwlock.h:186
#define NAMEDATALEN
void * ShmemAlloc(Size size)
Definition: shmem.c:154

References i, InitializeLWLocks(), IsUnderPostmaster, LocalNamedLWLockTrancheRequestArray, LWLOCK_PADDED_SIZE, LWLockCounter, LWLockShmemSize(), LWLockTrancheNames, LWTRANCHE_FIRST_USER_DEFINED, MainLWLockArray, MAX_NAMED_TRANCHES, MAXALIGN, NAMEDATALEN, NamedLWLockTrancheRequestArray, NamedLWLockTrancheRequests, and ShmemAlloc().

Referenced by CreateOrAttachShmemStructs().

◆ ForEachLWLockHeldByMe()

void ForEachLWLockHeldByMe ( void(*)(LWLock *, LWLockMode, void *)  callback,
void *  context 
)

Definition at line 1966 of file lwlock.c.

1968{
1969 int i;
1970
1971 for (i = 0; i < num_held_lwlocks; i++)
1972 callback(held_lwlocks[i].lock, held_lwlocks[i].mode, context);
1973}
static LWLockHandle held_lwlocks[MAX_SIMUL_LWLOCKS]
Definition: lwlock.c:178
static int num_held_lwlocks
Definition: lwlock.c:177
static PgChecksumMode mode
Definition: pg_checksums.c:56
static void callback(struct sockaddr *addr, struct sockaddr *mask, void *unused)
Definition: test_ifaddrs.c:46

References callback(), held_lwlocks, i, mode, and num_held_lwlocks.

◆ GetLWLockIdentifier()

const char * GetLWLockIdentifier ( uint32  classId,
uint16  eventId 
)

Definition at line 773 of file lwlock.c.

774{
775 Assert(classId == PG_WAIT_LWLOCK);
776 /* The event IDs are just tranche numbers. */
777 return GetLWTrancheName(eventId);
778}
Assert(PointerIsAligned(start, uint64))
static const char * GetLWTrancheName(uint16 trancheId)
Definition: lwlock.c:737
#define PG_WAIT_LWLOCK
Definition: wait_classes.h:18

References Assert(), GetLWTrancheName(), and PG_WAIT_LWLOCK.

Referenced by pgstat_get_wait_event().

◆ GetLWTrancheName()

static const char * GetLWTrancheName ( uint16  trancheId)
static

Definition at line 737 of file lwlock.c.

738{
739 /* Built-in tranche or individual LWLock? */
740 if (trancheId < LWTRANCHE_FIRST_USER_DEFINED)
741 return BuiltinTrancheNames[trancheId];
742
743 /*
744 * We only ever add new entries to LWLockTrancheNames, so most lookups can
745 * avoid taking the spinlock as long as the backend-local counter
746 * (LocalLWLockCounter) is greater than the requested tranche ID. Else,
747 * we need to first update the backend-local counter with ShmemLock held
748 * before attempting the lookup again. In practice, the latter case is
749 * probably rare.
750 */
751 if (trancheId >= LocalLWLockCounter)
752 {
756
757 if (trancheId >= LocalLWLockCounter)
758 elog(ERROR, "tranche %d is not registered", trancheId);
759 }
760
761 /*
762 * It's an extension tranche, so look in LWLockTrancheNames.
763 */
764 trancheId -= LWTRANCHE_FIRST_USER_DEFINED;
765
766 return LWLockTrancheNames[trancheId];
767}
#define ERROR
Definition: elog.h:39
#define elog(elevel,...)
Definition: elog.h:226
static int LocalLWLockCounter
Definition: lwlock.c:202
static const char *const BuiltinTrancheNames[]
Definition: lwlock.c:135
slock_t * ShmemLock
Definition: shmem.c:90
#define SpinLockRelease(lock)
Definition: spin.h:61
#define SpinLockAcquire(lock)
Definition: spin.h:59

References BuiltinTrancheNames, elog, ERROR, LocalLWLockCounter, LWLockCounter, LWLockTrancheNames, LWTRANCHE_FIRST_USER_DEFINED, ShmemLock, SpinLockAcquire, and SpinLockRelease.

Referenced by GetLWLockIdentifier(), and LWLockInitialize().

◆ GetNamedLWLockTranche()

LWLockPadded * GetNamedLWLockTranche ( const char *  tranche_name)

Definition at line 566 of file lwlock.c.

567{
568 int lock_pos;
569 int i;
570
571 /*
572 * Obtain the position of base address of LWLock belonging to requested
573 * tranche_name in MainLWLockArray. LWLocks for named tranches are placed
574 * in MainLWLockArray after fixed locks.
575 */
576 lock_pos = NUM_FIXED_LWLOCKS;
577 for (i = 0; i < NamedLWLockTrancheRequests; i++)
578 {
579 if (strcmp(NamedLWLockTrancheRequestArray[i].tranche_name,
580 tranche_name) == 0)
581 return &MainLWLockArray[lock_pos];
582
584 }
585
586 elog(ERROR, "requested tranche is not registered");
587
588 /* just to keep compiler quiet */
589 return NULL;
590}
#define NUM_FIXED_LWLOCKS
Definition: lwlock.h:107

References elog, ERROR, i, MainLWLockArray, NamedLWLockTrancheRequestArray, NamedLWLockTrancheRequests, NUM_FIXED_LWLOCKS, and NamedLWLockTrancheRequest::num_lwlocks.

Referenced by pgss_shmem_startup(), and test_lwlock_tranche_lookup().

◆ InitializeLWLocks()

static void InitializeLWLocks ( void  )
static

Definition at line 498 of file lwlock.c.

499{
500 int id;
501 int i;
502 int j;
503 LWLockPadded *lock;
504
505 /* Initialize all individual LWLocks in main array */
506 for (id = 0, lock = MainLWLockArray; id < NUM_INDIVIDUAL_LWLOCKS; id++, lock++)
507 LWLockInitialize(&lock->lock, id);
508
509 /* Initialize buffer mapping LWLocks in main array */
511 for (id = 0; id < NUM_BUFFER_PARTITIONS; id++, lock++)
512 LWLockInitialize(&lock->lock, LWTRANCHE_BUFFER_MAPPING);
513
514 /* Initialize lmgrs' LWLocks in main array */
516 for (id = 0; id < NUM_LOCK_PARTITIONS; id++, lock++)
517 LWLockInitialize(&lock->lock, LWTRANCHE_LOCK_MANAGER);
518
519 /* Initialize predicate lmgrs' LWLocks in main array */
521 for (id = 0; id < NUM_PREDICATELOCK_PARTITIONS; id++, lock++)
522 LWLockInitialize(&lock->lock, LWTRANCHE_PREDICATE_LOCK_MANAGER);
523
524 /*
525 * Copy the info about any named tranches into shared memory (so that
526 * other processes can see it), and initialize the requested LWLocks.
527 */
529 {
531
532 for (i = 0; i < NamedLWLockTrancheRequests; i++)
533 {
535 int tranche;
536
538 tranche = LWLockNewTrancheId(request->tranche_name);
539
540 for (j = 0; j < request->num_lwlocks; j++, lock++)
541 LWLockInitialize(&lock->lock, tranche);
542 }
543 }
544}
int j
Definition: isn.c:78
int LWLockNewTrancheId(const char *name)
Definition: lwlock.c:596
void LWLockInitialize(LWLock *lock, int tranche_id)
Definition: lwlock.c:698
#define BUFFER_MAPPING_LWLOCK_OFFSET
Definition: lwlock.h:102
#define NUM_LOCK_PARTITIONS
Definition: lwlock.h:95
#define LOCK_MANAGER_LWLOCK_OFFSET
Definition: lwlock.h:103
#define NUM_BUFFER_PARTITIONS
Definition: lwlock.h:91
#define PREDICATELOCK_MANAGER_LWLOCK_OFFSET
Definition: lwlock.h:105
#define NUM_PREDICATELOCK_PARTITIONS
Definition: lwlock.h:99
char tranche_name[NAMEDATALEN]
Definition: lwlock.c:183
LWLock lock
Definition: lwlock.h:70

References BUFFER_MAPPING_LWLOCK_OFFSET, i, j, LWLockPadded::lock, LOCK_MANAGER_LWLOCK_OFFSET, LWLockInitialize(), LWLockNewTrancheId(), MainLWLockArray, NamedLWLockTrancheRequestArray, NamedLWLockTrancheRequests, NUM_BUFFER_PARTITIONS, NUM_FIXED_LWLOCKS, NUM_LOCK_PARTITIONS, NamedLWLockTrancheRequest::num_lwlocks, NUM_PREDICATELOCK_PARTITIONS, PREDICATELOCK_MANAGER_LWLOCK_OFFSET, and NamedLWLockTrancheRequest::tranche_name.

Referenced by CreateLWLocks().

◆ InitLWLockAccess()

void InitLWLockAccess ( void  )

Definition at line 550 of file lwlock.c.

551{
552#ifdef LWLOCK_STATS
553 init_lwlock_stats();
554#endif
555}

Referenced by InitAuxiliaryProcess(), and InitProcess().

◆ LWLockAcquire()

bool LWLockAcquire ( LWLock lock,
LWLockMode  mode 
)

Definition at line 1178 of file lwlock.c.

1179{
1180 PGPROC *proc = MyProc;
1181 bool result = true;
1182 int extraWaits = 0;
1183#ifdef LWLOCK_STATS
1184 lwlock_stats *lwstats;
1185
1186 lwstats = get_lwlock_stats_entry(lock);
1187#endif
1188
1190
1191 PRINT_LWDEBUG("LWLockAcquire", lock, mode);
1192
1193#ifdef LWLOCK_STATS
1194 /* Count lock acquisition attempts */
1195 if (mode == LW_EXCLUSIVE)
1196 lwstats->ex_acquire_count++;
1197 else
1198 lwstats->sh_acquire_count++;
1199#endif /* LWLOCK_STATS */
1200
1201 /*
1202 * We can't wait if we haven't got a PGPROC. This should only occur
1203 * during bootstrap or shared memory initialization. Put an Assert here
1204 * to catch unsafe coding practices.
1205 */
1206 Assert(!(proc == NULL && IsUnderPostmaster));
1207
1208 /* Ensure we will have room to remember the lock */
1210 elog(ERROR, "too many LWLocks taken");
1211
1212 /*
1213 * Lock out cancel/die interrupts until we exit the code section protected
1214 * by the LWLock. This ensures that interrupts will not interfere with
1215 * manipulations of data structures in shared memory.
1216 */
1218
1219 /*
1220 * Loop here to try to acquire lock after each time we are signaled by
1221 * LWLockRelease.
1222 *
1223 * NOTE: it might seem better to have LWLockRelease actually grant us the
1224 * lock, rather than retrying and possibly having to go back to sleep. But
1225 * in practice that is no good because it means a process swap for every
1226 * lock acquisition when two or more processes are contending for the same
1227 * lock. Since LWLocks are normally used to protect not-very-long
1228 * sections of computation, a process needs to be able to acquire and
1229 * release the same lock many times during a single CPU time slice, even
1230 * in the presence of contention. The efficiency of being able to do that
1231 * outweighs the inefficiency of sometimes wasting a process dispatch
1232 * cycle because the lock is not free when a released waiter finally gets
1233 * to run. See pgsql-hackers archives for 29-Dec-01.
1234 */
1235 for (;;)
1236 {
1237 bool mustwait;
1238
1239 /*
1240 * Try to grab the lock the first time, we're not in the waitqueue
1241 * yet/anymore.
1242 */
1243 mustwait = LWLockAttemptLock(lock, mode);
1244
1245 if (!mustwait)
1246 {
1247 LOG_LWDEBUG("LWLockAcquire", lock, "immediately acquired lock");
1248 break; /* got the lock */
1249 }
1250
1251 /*
1252 * Ok, at this point we couldn't grab the lock on the first try. We
1253 * cannot simply queue ourselves to the end of the list and wait to be
1254 * woken up because by now the lock could long have been released.
1255 * Instead add us to the queue and try to grab the lock again. If we
1256 * succeed we need to revert the queuing and be happy, otherwise we
1257 * recheck the lock. If we still couldn't grab it, we know that the
1258 * other locker will see our queue entries when releasing since they
1259 * existed before we checked for the lock.
1260 */
1261
1262 /* add to the queue */
1263 LWLockQueueSelf(lock, mode);
1264
1265 /* we're now guaranteed to be woken up if necessary */
1266 mustwait = LWLockAttemptLock(lock, mode);
1267
1268 /* ok, grabbed the lock the second time round, need to undo queueing */
1269 if (!mustwait)
1270 {
1271 LOG_LWDEBUG("LWLockAcquire", lock, "acquired, undoing queue");
1272
1273 LWLockDequeueSelf(lock);
1274 break;
1275 }
1276
1277 /*
1278 * Wait until awakened.
1279 *
1280 * It is possible that we get awakened for a reason other than being
1281 * signaled by LWLockRelease. If so, loop back and wait again. Once
1282 * we've gotten the LWLock, re-increment the sema by the number of
1283 * additional signals received.
1284 */
1285 LOG_LWDEBUG("LWLockAcquire", lock, "waiting");
1286
1287#ifdef LWLOCK_STATS
1288 lwstats->block_count++;
1289#endif
1290
1292 if (TRACE_POSTGRESQL_LWLOCK_WAIT_START_ENABLED())
1293 TRACE_POSTGRESQL_LWLOCK_WAIT_START(T_NAME(lock), mode);
1294
1295 for (;;)
1296 {
1297 PGSemaphoreLock(proc->sem);
1298 if (proc->lwWaiting == LW_WS_NOT_WAITING)
1299 break;
1300 extraWaits++;
1301 }
1302
1303 /* Retrying, allow LWLockRelease to release waiters again. */
1305
1306#ifdef LOCK_DEBUG
1307 {
1308 /* not waiting anymore */
1309 uint32 nwaiters PG_USED_FOR_ASSERTS_ONLY = pg_atomic_fetch_sub_u32(&lock->nwaiters, 1);
1310
1311 Assert(nwaiters < MAX_BACKENDS);
1312 }
1313#endif
1314
1315 if (TRACE_POSTGRESQL_LWLOCK_WAIT_DONE_ENABLED())
1316 TRACE_POSTGRESQL_LWLOCK_WAIT_DONE(T_NAME(lock), mode);
1318
1319 LOG_LWDEBUG("LWLockAcquire", lock, "awakened");
1320
1321 /* Now loop back and try to acquire lock again. */
1322 result = false;
1323 }
1324
1325 if (TRACE_POSTGRESQL_LWLOCK_ACQUIRE_ENABLED())
1326 TRACE_POSTGRESQL_LWLOCK_ACQUIRE(T_NAME(lock), mode);
1327
1328 /* Add lock to list of locks held by this backend */
1331
1332 /*
1333 * Fix the process wait semaphore's count for any absorbed wakeups.
1334 */
1335 while (extraWaits-- > 0)
1336 PGSemaphoreUnlock(proc->sem);
1337
1338 return result;
1339}
static uint32 pg_atomic_fetch_or_u32(volatile pg_atomic_uint32 *ptr, uint32 or_)
Definition: atomics.h:410
static uint32 pg_atomic_fetch_sub_u32(volatile pg_atomic_uint32 *ptr, int32 sub_)
Definition: atomics.h:381
#define PG_USED_FOR_ASSERTS_ONLY
Definition: c.h:229
uint32_t uint32
Definition: c.h:552
static bool LWLockAttemptLock(LWLock *lock, LWLockMode mode)
Definition: lwlock.c:790
static void LWLockReportWaitEnd(void)
Definition: lwlock.c:728
#define LW_FLAG_RELEASE_OK
Definition: lwlock.c:95
#define MAX_SIMUL_LWLOCKS
Definition: lwlock.c:168
#define T_NAME(lock)
Definition: lwlock.c:211
#define LOG_LWDEBUG(a, b, c)
Definition: lwlock.c:276
static void LWLockQueueSelf(LWLock *lock, LWLockMode mode)
Definition: lwlock.c:1046
#define PRINT_LWDEBUG(a, b, c)
Definition: lwlock.c:275
static void LWLockReportWaitStart(LWLock *lock)
Definition: lwlock.c:719
static void LWLockDequeueSelf(LWLock *lock)
Definition: lwlock.c:1089
@ LW_WS_NOT_WAITING
Definition: lwlock.h:30
@ LW_SHARED
Definition: lwlock.h:113
@ LW_EXCLUSIVE
Definition: lwlock.h:112
#define HOLD_INTERRUPTS()
Definition: miscadmin.h:134
void PGSemaphoreUnlock(PGSemaphore sema)
Definition: posix_sema.c:335
void PGSemaphoreLock(PGSemaphore sema)
Definition: posix_sema.c:315
#define MAX_BACKENDS
Definition: procnumber.h:39
PGPROC * MyProc
Definition: proc.c:67
LWLockMode mode
Definition: lwlock.c:174
LWLock * lock
Definition: lwlock.c:173
pg_atomic_uint32 state
Definition: lwlock.h:44
Definition: proc.h:179
PGSemaphore sem
Definition: proc.h:183
uint8 lwWaiting
Definition: proc.h:240

References Assert(), elog, ERROR, held_lwlocks, HOLD_INTERRUPTS, IsUnderPostmaster, LWLockHandle::lock, LOG_LWDEBUG, LW_EXCLUSIVE, LW_FLAG_RELEASE_OK, LW_SHARED, LW_WS_NOT_WAITING, LWLockAttemptLock(), LWLockDequeueSelf(), LWLockQueueSelf(), LWLockReportWaitEnd(), LWLockReportWaitStart(), PGPROC::lwWaiting, MAX_BACKENDS, MAX_SIMUL_LWLOCKS, LWLockHandle::mode, mode, MyProc, num_held_lwlocks, pg_atomic_fetch_or_u32(), pg_atomic_fetch_sub_u32(), PG_USED_FOR_ASSERTS_ONLY, PGSemaphoreLock(), PGSemaphoreUnlock(), PRINT_LWDEBUG, PGPROC::sem, LWLock::state, and T_NAME.

Referenced by _bt_end_vacuum(), _bt_parallel_done(), _bt_parallel_primscan_schedule(), _bt_parallel_release(), _bt_parallel_seize(), _bt_start_vacuum(), _bt_vacuum_cycleid(), AbsorbSyncRequests(), ActivateCommitTs(), addLSNWaiter(), AdvanceNextFullTransactionIdPastXid(), AdvanceOldestClogXid(), AdvanceOldestCommitTsXid(), AdvanceXLInsertBuffer(), alloc_object(), AlterSystemSetConfigFile(), ApplyLauncherMain(), apw_detach_shmem(), apw_dump_now(), apw_load_buffers(), AsyncNotifyFreezeXids(), asyncQueueAddEntries(), asyncQueueAdvanceTail(), asyncQueueReadAllNotifications(), asyncQueueUnregister(), AtAbort_Twophase(), AtEOXact_LogicalRepWorkers(), AtPrepare_PredicateLocks(), attach_internal(), autoprewarm_main(), autoprewarm_start_worker(), AutoVacLauncherMain(), AutoVacuumRequestWork(), AutoVacWorkerMain(), BackendPidGetProc(), BackendXidGetPid(), BecomeLockGroupLeader(), BecomeLockGroupMember(), btparallelrescan(), BufferAlloc(), CancelDBBackends(), check_for_freed_segments(), CheckDeadLock(), CheckForSerializableConflictOut(), CheckPointPredicate(), CheckPointRelationMap(), CheckPointReplicationOrigin(), CheckPointReplicationSlots(), CheckPointTwoPhase(), CheckTableForSerializableConflictIn(), CheckTargetForConflictsIn(), choose_next_subplan_for_leader(), choose_next_subplan_for_worker(), CleanupInvalidationState(), ClearOldPredicateLocks(), ComputeXidHorizons(), consume_xids_shortcut(), copy_replication_slot(), CountDBBackends(), CountDBConnections(), CountOtherDBBackends(), CountUserBackends(), CreateCheckPoint(), CreateEndOfRecoveryRecord(), CreateInitDecodingContext(), CreatePredicateLock(), CreateRestartPoint(), DeactivateCommitTs(), DeleteChildTargetLocks(), DeleteLockTarget(), deleteLSNWaiter(), destroy_superblock(), do_autovacuum(), do_pg_backup_start(), do_pg_backup_stop(), do_start_worker(), DropAllPredicateLocksFromTable(), DropTableSpace(), dsa_allocate_extended(), dsa_dump(), dsa_free(), dsa_get_total_size(), dsa_get_total_size_from_handle(), dsa_pin(), dsa_release_in_place(), dsa_set_size_limit(), dsa_trim(), dsa_unpin(), dshash_delete_key(), dshash_dump(), dshash_find(), dshash_find_or_insert(), dshash_seq_next(), dsm_attach(), dsm_create(), dsm_detach(), dsm_pin_segment(), dsm_unpin_segment(), ensure_active_superblock(), entry_reset(), Exec_ListenPreCommit(), ExecParallelHashMergeCounters(), ExecParallelHashPopChunkQueue(), ExecParallelHashTupleAlloc(), ExecParallelHashTuplePrealloc(), ExpireAllKnownAssignedTransactionIds(), ExpireOldKnownAssignedTransactionIds(), ExpireTreeKnownAssignedTransactionIds(), ExtendBufferedRelShared(), ExtendCLOG(), ExtendCommitTs(), ExtendMultiXactMember(), ExtendMultiXactOffset(), ExtendSUBTRANS(), FastPathGetRelationLockEntry(), FastPathTransferRelationLocks(), FindAndDropRelationBuffers(), FindDeletedTupleInLocalRel(), FinishPreparedTransaction(), FlushUnlockedBuffer(), ForceTransactionIdLimitUpdate(), ForwardSyncRequest(), FreeWorkerInfo(), get_local_synced_slots(), get_val_in_shmem(), get_xid_status(), GetBackgroundWorkerPid(), GetBackgroundWorkerTypeByPid(), GetBlockerStatusData(), GetConflictingVirtualXIDs(), GetCurrentVirtualXIDs(), GetLastImportantRecPtr(), GetLastSegSwitchData(), GetLatestCommitTsData(), GetLeaderApplyWorkerPid(), GetLockConflicts(), GetLockStatusData(), GetMultiXactIdMembers(), GetMultiXactInfo(), GetNewMultiXactId(), GetNewObjectId(), GetNewTransactionId(), GetOldestActiveTransactionId(), GetOldestMultiXactId(), GetOldestRestartPoint(), GetOldestSafeDecodingTransactionId(), GetOldestUnsummarizedLSN(), GetPredicateLockStatusData(), GetPreparedTransactionList(), GetRunningTransactionData(), GetRunningTransactionLocks(), GetSafeSnapshot(), GetSafeSnapshotBlockingPids(), GetSerializableTransactionSnapshotInt(), GetSnapshotData(), GetStrictOldestNonRemovableTransactionId(), GetVirtualXIDsDelayingChkpt(), GetWaitEventCustomIdentifier(), GetWaitEventCustomNames(), GetWalSummarizerState(), HaveVirtualXIDsDelayingChkpt(), init_conflict_slot_xmin(), init_dsm_registry(), InitWalSender(), injection_shmem_startup(), InjectionPointAttach(), InjectionPointDetach(), InjectionPointList(), InstallXLogFileSegment(), InvalidateBuffer(), InvalidateObsoleteReplicationSlots(), InvalidatePossiblyObsoleteSlot(), InvalidateVictimBuffer(), IoWorkerMain(), IsInstallXLogFileSegmentActive(), KnownAssignedXidsCompress(), KnownAssignedXidsReset(), lock_twophase_recover(), LockAcquireExtended(), LockBuffer(), LockErrorCleanup(), LockGXact(), LockHasWaiters(), LockRefindAndRelease(), LockRelease(), LockReleaseAll(), LockWaiterCount(), logicalrep_launcher_attach_dshmem(), logicalrep_pa_worker_stop(), logicalrep_reset_seqsync_start_time(), logicalrep_worker_attach(), logicalrep_worker_detach(), logicalrep_worker_launch(), logicalrep_worker_stop(), logicalrep_worker_stop_internal(), logicalrep_worker_wakeup(), logicalrep_workers_find(), LookupGXact(), LookupGXactBySubid(), MarkAsPrepared(), MarkAsPreparing(), MarkDirtyUnpinnedBufferInternal(), multixact_redo(), MultiXactAdvanceNextMXact(), MultiXactGetCheckptMulti(), MultiXactIdSetOldestMember(), MultiXactIdSetOldestVisible(), MultiXactSetNextMXact(), PageIsPredicateLocked(), perform_relmap_update(), pg_control_checkpoint(), pg_control_init(), pg_control_recovery(), pg_control_system(), pg_get_replication_slots(), pg_get_shmem_allocations(), pg_get_shmem_allocations_numa(), pg_notification_queue_usage(), pg_show_replication_origin_status(), pg_stat_get_subscription(), pg_stat_statements_internal(), pg_xact_status(), pgaio_worker_die(), pgaio_worker_register(), pgaio_worker_submit_internal(), pgss_shmem_startup(), pgss_store(), pgstat_archiver_reset_all_cb(), pgstat_archiver_snapshot_cb(), pgstat_bgwriter_reset_all_cb(), pgstat_bgwriter_snapshot_cb(), pgstat_build_snapshot(), pgstat_checkpointer_reset_all_cb(), pgstat_checkpointer_snapshot_cb(), pgstat_fetch_replslot(), pgstat_io_flush_cb(), pgstat_io_reset_all_cb(), pgstat_io_snapshot_cb(), pgstat_lock_entry(), pgstat_lock_entry_shared(), pgstat_reset_matching_entries(), pgstat_reset_replslot(), pgstat_reset_slru_counter_internal(), pgstat_slru_flush_cb(), pgstat_slru_snapshot_cb(), pgstat_wal_flush_cb(), pgstat_wal_reset_all_cb(), pgstat_wal_snapshot_cb(), PostPrepare_Locks(), PostPrepare_MultiXact(), PostPrepare_Twophase(), PreCommit_CheckForSerializationFailure(), PreCommit_Notify(), predicatelock_twophase_recover(), PredicateLockPageSplit(), PredicateLockTwoPhaseFinish(), PrefetchSharedBuffer(), PrescanPreparedTransactions(), ProcArrayAdd(), ProcArrayApplyRecoveryInfo(), ProcArrayApplyXidAssignment(), ProcArrayClearTransaction(), ProcArrayEndTransaction(), ProcArrayGetReplicationSlotXmin(), ProcArrayGroupClearXid(), ProcArrayInstallImportedXmin(), ProcArrayInstallRestoredXmin(), ProcArrayRemove(), ProcArraySetReplicationSlotXmin(), ProcessSequencesForSync(), ProcessSyncingTablesForApply(), ProcKill(), ProcNumberGetTransactionIds(), ProcSleep(), ReachedEndOfBackup(), read_relmap_file(), ReadMultiXactIdRange(), ReadNextFullTransactionId(), ReadNextMultiXactId(), ReadReplicationSlot(), RecordNewMultiXact(), RecoverPreparedTransactions(), RegisterDynamicBackgroundWorker(), RegisterPredicateLockingXid(), RelationCacheInitFilePreInvalidate(), RelationMapCopy(), RelationMapFinishBootstrap(), ReleaseOneSerializableXact(), ReleasePredicateLocks(), relmap_redo(), RemoveScratchTarget(), ReplicationOriginExitCleanup(), ReplicationSlotAcquire(), ReplicationSlotCleanup(), ReplicationSlotCreate(), ReplicationSlotDropPtr(), ReplicationSlotName(), ReplicationSlotRelease(), ReplicationSlotReserveWal(), ReplicationSlotsComputeLogicalRestartLSN(), ReplicationSlotsComputeRequiredLSN(), ReplicationSlotsComputeRequiredXmin(), ReplicationSlotsCountDBSlots(), ReplicationSlotsDropDBSlots(), replorigin_advance(), replorigin_get_progress(), replorigin_session_advance(), replorigin_session_get_progress(), replorigin_session_reset(), replorigin_session_setup(), replorigin_state_clear(), ResetInstallXLogFileSegmentActive(), resize(), RestoreScratchTarget(), restoreTwoPhaseData(), SaveSlotToPath(), SearchNamedReplicationSlot(), SerialAdd(), SerialGetMinConflictCommitSeqNo(), SerialInit(), SerialSetActiveSerXmin(), set_indexsafe_procflags(), set_val_in_shmem(), SetCommitTsLimit(), SetInstallXLogFileSegmentActive(), SetMultiXactIdLimit(), SetNextObjectId(), SetOldestOffset(), SetTransactionIdLimit(), SetXidCommitTsInPage(), SharedInvalBackendInit(), ShmemInitStruct(), SICleanupQueue(), SIGetDataEntries(), SignalBackends(), SignalVirtualTransaction(), SIInsertDataEntries(), SimpleLruReadPage(), SimpleLruReadPage_ReadOnly(), SimpleLruTruncate(), SimpleLruWaitIO(), SimpleLruWriteAll(), SimpleLruZeroAndWritePage(), SlruDeleteSegment(), SlruInternalWritePage(), SnapBuildInitialSnapshot(), ss_get_location(), StandbyRecoverPreparedTransactions(), StandbySlotsHaveCaughtup(), StartupDecodingContext(), StartupSUBTRANS(), StartupXLOG(), sts_parallel_scan_next(), SubTransSetParent(), SummarizeOldestCommittedSxact(), SummarizeWAL(), SwitchIntoArchiveRecovery(), synchronize_one_slot(), SyncRepCancelWait(), SyncRepCleanupAtProcExit(), SyncRepReleaseWaiters(), SyncRepUpdateSyncStandbysDefined(), SyncRepWaitForLSN(), TablespaceCreateDbspace(), tbm_shared_iterate(), TerminateBackgroundWorker(), TerminateOtherDBBackends(), test_aio_shmem_startup(), test_custom_stats_fixed_reset_all_cb(), test_custom_stats_fixed_snapshot_cb(), test_custom_stats_fixed_update(), test_slru_page_exists(), test_slru_page_read(), test_slru_page_write(), TransactionGroupUpdateXidStatus(), TransactionIdGetCommitTsData(), TransactionIdIsInProgress(), TransactionIdSetPageStatus(), TransactionTreeSetCommitTsData(), TransferPredicateLocksToNewTarget(), TrimCLOG(), TrimMultiXact(), TruncateMultiXact(), TwoPhaseGetGXact(), TwoPhaseGetOldestXidInCommit(), TwoPhaseGetXidByVirtualXID(), update_cached_xid_range(), update_synced_slots_inactive_since(), UpdateMinRecoveryPoint(), vac_truncate_clog(), vacuum_rel(), VacuumUpdateCosts(), VirtualXactLock(), VirtualXactLockTableCleanup(), VirtualXactLockTableInsert(), wait_for_table_state_change(), wait_for_worker_state_change(), WaitEventCustomNew(), WaitForReplicationWorkerAttach(), WaitForWalSummarization(), wakeupWaiters(), WakeupWalSummarizer(), WALInsertLockAcquire(), WALInsertLockAcquireExclusive(), WalSummarizerMain(), WalSummarizerShutdown(), write_relcache_init_file(), xact_redo(), XidCacheRemoveRunningXids(), xlog_redo(), XLogBackgroundFlush(), and XLogReportParameters().

◆ LWLockAcquireOrWait()

bool LWLockAcquireOrWait ( LWLock lock,
LWLockMode  mode 
)

Definition at line 1406 of file lwlock.c.

1407{
1408 PGPROC *proc = MyProc;
1409 bool mustwait;
1410 int extraWaits = 0;
1411#ifdef LWLOCK_STATS
1412 lwlock_stats *lwstats;
1413
1414 lwstats = get_lwlock_stats_entry(lock);
1415#endif
1416
1418
1419 PRINT_LWDEBUG("LWLockAcquireOrWait", lock, mode);
1420
1421 /* Ensure we will have room to remember the lock */
1423 elog(ERROR, "too many LWLocks taken");
1424
1425 /*
1426 * Lock out cancel/die interrupts until we exit the code section protected
1427 * by the LWLock. This ensures that interrupts will not interfere with
1428 * manipulations of data structures in shared memory.
1429 */
1431
1432 /*
1433 * NB: We're using nearly the same twice-in-a-row lock acquisition
1434 * protocol as LWLockAcquire(). Check its comments for details.
1435 */
1436 mustwait = LWLockAttemptLock(lock, mode);
1437
1438 if (mustwait)
1439 {
1441
1442 mustwait = LWLockAttemptLock(lock, mode);
1443
1444 if (mustwait)
1445 {
1446 /*
1447 * Wait until awakened. Like in LWLockAcquire, be prepared for
1448 * bogus wakeups.
1449 */
1450 LOG_LWDEBUG("LWLockAcquireOrWait", lock, "waiting");
1451
1452#ifdef LWLOCK_STATS
1453 lwstats->block_count++;
1454#endif
1455
1457 if (TRACE_POSTGRESQL_LWLOCK_WAIT_START_ENABLED())
1458 TRACE_POSTGRESQL_LWLOCK_WAIT_START(T_NAME(lock), mode);
1459
1460 for (;;)
1461 {
1462 PGSemaphoreLock(proc->sem);
1463 if (proc->lwWaiting == LW_WS_NOT_WAITING)
1464 break;
1465 extraWaits++;
1466 }
1467
1468#ifdef LOCK_DEBUG
1469 {
1470 /* not waiting anymore */
1471 uint32 nwaiters PG_USED_FOR_ASSERTS_ONLY = pg_atomic_fetch_sub_u32(&lock->nwaiters, 1);
1472
1473 Assert(nwaiters < MAX_BACKENDS);
1474 }
1475#endif
1476 if (TRACE_POSTGRESQL_LWLOCK_WAIT_DONE_ENABLED())
1477 TRACE_POSTGRESQL_LWLOCK_WAIT_DONE(T_NAME(lock), mode);
1479
1480 LOG_LWDEBUG("LWLockAcquireOrWait", lock, "awakened");
1481 }
1482 else
1483 {
1484 LOG_LWDEBUG("LWLockAcquireOrWait", lock, "acquired, undoing queue");
1485
1486 /*
1487 * Got lock in the second attempt, undo queueing. We need to treat
1488 * this as having successfully acquired the lock, otherwise we'd
1489 * not necessarily wake up people we've prevented from acquiring
1490 * the lock.
1491 */
1492 LWLockDequeueSelf(lock);
1493 }
1494 }
1495
1496 /*
1497 * Fix the process wait semaphore's count for any absorbed wakeups.
1498 */
1499 while (extraWaits-- > 0)
1500 PGSemaphoreUnlock(proc->sem);
1501
1502 if (mustwait)
1503 {
1504 /* Failed to get lock, so release interrupt holdoff */
1506 LOG_LWDEBUG("LWLockAcquireOrWait", lock, "failed");
1507 if (TRACE_POSTGRESQL_LWLOCK_ACQUIRE_OR_WAIT_FAIL_ENABLED())
1508 TRACE_POSTGRESQL_LWLOCK_ACQUIRE_OR_WAIT_FAIL(T_NAME(lock), mode);
1509 }
1510 else
1511 {
1512 LOG_LWDEBUG("LWLockAcquireOrWait", lock, "succeeded");
1513 /* Add lock to list of locks held by this backend */
1516 if (TRACE_POSTGRESQL_LWLOCK_ACQUIRE_OR_WAIT_ENABLED())
1517 TRACE_POSTGRESQL_LWLOCK_ACQUIRE_OR_WAIT(T_NAME(lock), mode);
1518 }
1519
1520 return !mustwait;
1521}
@ LW_WAIT_UNTIL_FREE
Definition: lwlock.h:114
#define RESUME_INTERRUPTS()
Definition: miscadmin.h:136

References Assert(), elog, ERROR, held_lwlocks, HOLD_INTERRUPTS, LWLockHandle::lock, LOG_LWDEBUG, LW_EXCLUSIVE, LW_SHARED, LW_WAIT_UNTIL_FREE, LW_WS_NOT_WAITING, LWLockAttemptLock(), LWLockDequeueSelf(), LWLockQueueSelf(), LWLockReportWaitEnd(), LWLockReportWaitStart(), PGPROC::lwWaiting, MAX_BACKENDS, MAX_SIMUL_LWLOCKS, LWLockHandle::mode, mode, MyProc, num_held_lwlocks, pg_atomic_fetch_sub_u32(), PG_USED_FOR_ASSERTS_ONLY, PGSemaphoreLock(), PGSemaphoreUnlock(), PRINT_LWDEBUG, RESUME_INTERRUPTS, PGPROC::sem, and T_NAME.

Referenced by XLogFlush().

◆ LWLockAnyHeldByMe()

bool LWLockAnyHeldByMe ( LWLock lock,
int  nlocks,
size_t  stride 
)

Definition at line 1999 of file lwlock.c.

2000{
2001 char *held_lock_addr;
2002 char *begin;
2003 char *end;
2004 int i;
2005
2006 begin = (char *) lock;
2007 end = begin + nlocks * stride;
2008 for (i = 0; i < num_held_lwlocks; i++)
2009 {
2010 held_lock_addr = (char *) held_lwlocks[i].lock;
2011 if (held_lock_addr >= begin &&
2012 held_lock_addr < end &&
2013 (held_lock_addr - begin) % stride == 0)
2014 return true;
2015 }
2016 return false;
2017}

References held_lwlocks, i, and num_held_lwlocks.

◆ LWLockAttemptLock()

static bool LWLockAttemptLock ( LWLock lock,
LWLockMode  mode 
)
static

Definition at line 790 of file lwlock.c.

791{
792 uint32 old_state;
793
795
796 /*
797 * Read once outside the loop, later iterations will get the newer value
798 * via compare & exchange.
799 */
800 old_state = pg_atomic_read_u32(&lock->state);
801
802 /* loop until we've determined whether we could acquire the lock or not */
803 while (true)
804 {
805 uint32 desired_state;
806 bool lock_free;
807
808 desired_state = old_state;
809
810 if (mode == LW_EXCLUSIVE)
811 {
812 lock_free = (old_state & LW_LOCK_MASK) == 0;
813 if (lock_free)
814 desired_state += LW_VAL_EXCLUSIVE;
815 }
816 else
817 {
818 lock_free = (old_state & LW_VAL_EXCLUSIVE) == 0;
819 if (lock_free)
820 desired_state += LW_VAL_SHARED;
821 }
822
823 /*
824 * Attempt to swap in the state we are expecting. If we didn't see
825 * lock to be free, that's just the old value. If we saw it as free,
826 * we'll attempt to mark it acquired. The reason that we always swap
827 * in the value is that this doubles as a memory barrier. We could try
828 * to be smarter and only swap in values if we saw the lock as free,
829 * but benchmark haven't shown it as beneficial so far.
830 *
831 * Retry if the value changed since we last looked at it.
832 */
834 &old_state, desired_state))
835 {
836 if (lock_free)
837 {
838 /* Great! Got the lock. */
839#ifdef LOCK_DEBUG
840 if (mode == LW_EXCLUSIVE)
841 lock->owner = MyProc;
842#endif
843 return false;
844 }
845 else
846 return true; /* somebody else has the lock */
847 }
848 }
850}
static bool pg_atomic_compare_exchange_u32(volatile pg_atomic_uint32 *ptr, uint32 *expected, uint32 newval)
Definition: atomics.h:349
static uint32 pg_atomic_read_u32(volatile pg_atomic_uint32 *ptr)
Definition: atomics.h:237
#define pg_unreachable()
Definition: c.h:347
#define LW_VAL_EXCLUSIVE
Definition: lwlock.c:101
#define LW_VAL_SHARED
Definition: lwlock.c:102
#define LW_LOCK_MASK
Definition: lwlock.c:106

References Assert(), LW_EXCLUSIVE, LW_LOCK_MASK, LW_SHARED, LW_VAL_EXCLUSIVE, LW_VAL_SHARED, mode, MyProc, pg_atomic_compare_exchange_u32(), pg_atomic_read_u32(), pg_unreachable, and LWLock::state.

Referenced by LWLockAcquire(), LWLockAcquireOrWait(), and LWLockConditionalAcquire().

◆ LWLockConditionalAcquire()

bool LWLockConditionalAcquire ( LWLock lock,
LWLockMode  mode 
)

Definition at line 1349 of file lwlock.c.

1350{
1351 bool mustwait;
1352
1354
1355 PRINT_LWDEBUG("LWLockConditionalAcquire", lock, mode);
1356
1357 /* Ensure we will have room to remember the lock */
1359 elog(ERROR, "too many LWLocks taken");
1360
1361 /*
1362 * Lock out cancel/die interrupts until we exit the code section protected
1363 * by the LWLock. This ensures that interrupts will not interfere with
1364 * manipulations of data structures in shared memory.
1365 */
1367
1368 /* Check for the lock */
1369 mustwait = LWLockAttemptLock(lock, mode);
1370
1371 if (mustwait)
1372 {
1373 /* Failed to get lock, so release interrupt holdoff */
1375
1376 LOG_LWDEBUG("LWLockConditionalAcquire", lock, "failed");
1377 if (TRACE_POSTGRESQL_LWLOCK_CONDACQUIRE_FAIL_ENABLED())
1378 TRACE_POSTGRESQL_LWLOCK_CONDACQUIRE_FAIL(T_NAME(lock), mode);
1379 }
1380 else
1381 {
1382 /* Add lock to list of locks held by this backend */
1385 if (TRACE_POSTGRESQL_LWLOCK_CONDACQUIRE_ENABLED())
1386 TRACE_POSTGRESQL_LWLOCK_CONDACQUIRE(T_NAME(lock), mode);
1387 }
1388 return !mustwait;
1389}

References Assert(), elog, ERROR, held_lwlocks, HOLD_INTERRUPTS, LWLockHandle::lock, LOG_LWDEBUG, LW_EXCLUSIVE, LW_SHARED, LWLockAttemptLock(), MAX_SIMUL_LWLOCKS, LWLockHandle::mode, mode, num_held_lwlocks, PRINT_LWDEBUG, RESUME_INTERRUPTS, and T_NAME.

Referenced by ConditionalLockBuffer(), GetVictimBuffer(), pgstat_io_flush_cb(), pgstat_lock_entry(), pgstat_lock_entry_shared(), pgstat_slru_flush_cb(), pgstat_wal_flush_cb(), ProcArrayEndTransaction(), SimpleLruWaitIO(), ss_report_location(), TransactionIdSetPageStatus(), and XLogNeedsFlush().

◆ LWLockConflictsWithVar()

static bool LWLockConflictsWithVar ( LWLock lock,
pg_atomic_uint64 valptr,
uint64  oldval,
uint64 newval,
bool *  result 
)
static

Definition at line 1533 of file lwlock.c.

1535{
1536 bool mustwait;
1537 uint64 value;
1538
1539 /*
1540 * Test first to see if it the slot is free right now.
1541 *
1542 * XXX: the unique caller of this routine, WaitXLogInsertionsToFinish()
1543 * via LWLockWaitForVar(), uses an implied barrier with a spinlock before
1544 * this, so we don't need a memory barrier here as far as the current
1545 * usage is concerned. But that might not be safe in general.
1546 */
1547 mustwait = (pg_atomic_read_u32(&lock->state) & LW_VAL_EXCLUSIVE) != 0;
1548
1549 if (!mustwait)
1550 {
1551 *result = true;
1552 return false;
1553 }
1554
1555 *result = false;
1556
1557 /*
1558 * Reading this value atomically is safe even on platforms where uint64
1559 * cannot be read without observing a torn value.
1560 */
1561 value = pg_atomic_read_u64(valptr);
1562
1563 if (value != oldval)
1564 {
1565 mustwait = false;
1566 *newval = value;
1567 }
1568 else
1569 {
1570 mustwait = true;
1571 }
1572
1573 return mustwait;
1574}
static uint64 pg_atomic_read_u64(volatile pg_atomic_uint64 *ptr)
Definition: atomics.h:467
uint64_t uint64
Definition: c.h:553
#define newval
static struct @171 value

References LW_VAL_EXCLUSIVE, newval, pg_atomic_read_u32(), pg_atomic_read_u64(), LWLock::state, and value.

Referenced by LWLockWaitForVar().

◆ LWLockDequeueSelf()

static void LWLockDequeueSelf ( LWLock lock)
static

Definition at line 1089 of file lwlock.c.

1090{
1091 bool on_waitlist;
1092
1093#ifdef LWLOCK_STATS
1094 lwlock_stats *lwstats;
1095
1096 lwstats = get_lwlock_stats_entry(lock);
1097
1098 lwstats->dequeue_self_count++;
1099#endif
1100
1101 LWLockWaitListLock(lock);
1102
1103 /*
1104 * Remove ourselves from the waitlist, unless we've already been removed.
1105 * The removal happens with the wait list lock held, so there's no race in
1106 * this check.
1107 */
1108 on_waitlist = MyProc->lwWaiting == LW_WS_WAITING;
1109 if (on_waitlist)
1110 proclist_delete(&lock->waiters, MyProcNumber, lwWaitLink);
1111
1112 if (proclist_is_empty(&lock->waiters) &&
1114 {
1116 }
1117
1118 /* XXX: combine with fetch_and above? */
1120
1121 /* clear waiting state again, nice for debugging */
1122 if (on_waitlist)
1124 else
1125 {
1126 int extraWaits = 0;
1127
1128 /*
1129 * Somebody else dequeued us and has or will wake us up. Deal with the
1130 * superfluous absorption of a wakeup.
1131 */
1132
1133 /*
1134 * Reset RELEASE_OK flag if somebody woke us before we removed
1135 * ourselves - they'll have set it to false.
1136 */
1138
1139 /*
1140 * Now wait for the scheduled wakeup, otherwise our ->lwWaiting would
1141 * get reset at some inconvenient point later. Most of the time this
1142 * will immediately return.
1143 */
1144 for (;;)
1145 {
1148 break;
1149 extraWaits++;
1150 }
1151
1152 /*
1153 * Fix the process wait semaphore's count for any absorbed wakeups.
1154 */
1155 while (extraWaits-- > 0)
1157 }
1158
1159#ifdef LOCK_DEBUG
1160 {
1161 /* not waiting anymore */
1162 uint32 nwaiters PG_USED_FOR_ASSERTS_ONLY = pg_atomic_fetch_sub_u32(&lock->nwaiters, 1);
1163
1164 Assert(nwaiters < MAX_BACKENDS);
1165 }
1166#endif
1167}
static uint32 pg_atomic_fetch_and_u32(volatile pg_atomic_uint32 *ptr, uint32 and_)
Definition: atomics.h:396
ProcNumber MyProcNumber
Definition: globals.c:90
static void LWLockWaitListLock(LWLock *lock)
Definition: lwlock.c:861
#define LW_FLAG_HAS_WAITERS
Definition: lwlock.c:94
static void LWLockWaitListUnlock(LWLock *lock)
Definition: lwlock.c:917
@ LW_WS_WAITING
Definition: lwlock.h:31
#define proclist_delete(list, procno, link_member)
Definition: proclist.h:187
static bool proclist_is_empty(const proclist_head *list)
Definition: proclist.h:38
proclist_head waiters
Definition: lwlock.h:45

References Assert(), LW_FLAG_HAS_WAITERS, LW_FLAG_RELEASE_OK, LW_WS_NOT_WAITING, LW_WS_WAITING, LWLockWaitListLock(), LWLockWaitListUnlock(), PGPROC::lwWaiting, MAX_BACKENDS, MyProc, MyProcNumber, pg_atomic_fetch_and_u32(), pg_atomic_fetch_or_u32(), pg_atomic_fetch_sub_u32(), pg_atomic_read_u32(), PG_USED_FOR_ASSERTS_ONLY, PGSemaphoreLock(), PGSemaphoreUnlock(), proclist_delete, proclist_is_empty(), PGPROC::sem, LWLock::state, and LWLock::waiters.

Referenced by LWLockAcquire(), LWLockAcquireOrWait(), and LWLockWaitForVar().

◆ LWLockDisown()

void LWLockDisown ( LWLock lock)

Definition at line 1887 of file lwlock.c.

1888{
1890
1892}
static LWLockMode LWLockDisownInternal(LWLock *lock)
Definition: lwlock.c:1804

References LWLockDisownInternal(), and RESUME_INTERRUPTS.

Referenced by buffer_stage_common().

◆ LWLockDisownInternal()

static LWLockMode LWLockDisownInternal ( LWLock lock)
inlinestatic

Definition at line 1804 of file lwlock.c.

1805{
1807 int i;
1808
1809 /*
1810 * Remove lock from list of locks held. Usually, but not always, it will
1811 * be the latest-acquired lock; so search array backwards.
1812 */
1813 for (i = num_held_lwlocks; --i >= 0;)
1814 if (lock == held_lwlocks[i].lock)
1815 break;
1816
1817 if (i < 0)
1818 elog(ERROR, "lock %s is not held", T_NAME(lock));
1819
1821
1823 for (; i < num_held_lwlocks; i++)
1824 held_lwlocks[i] = held_lwlocks[i + 1];
1825
1826 return mode;
1827}
LWLockMode
Definition: lwlock.h:111

References elog, ERROR, held_lwlocks, i, LWLockHandle::mode, mode, num_held_lwlocks, and T_NAME.

Referenced by LWLockDisown(), and LWLockRelease().

◆ LWLockHeldByMe()

◆ LWLockHeldByMeInMode()

◆ LWLockInitialize()

◆ LWLockNewTrancheId()

int LWLockNewTrancheId ( const char *  name)

Definition at line 596 of file lwlock.c.

597{
598 int result;
599
600 if (!name)
602 (errcode(ERRCODE_INVALID_NAME),
603 errmsg("tranche name cannot be NULL")));
604
605 if (strlen(name) >= NAMEDATALEN)
607 (errcode(ERRCODE_NAME_TOO_LONG),
608 errmsg("tranche name too long"),
609 errdetail("LWLock tranche names must be no longer than %d bytes.",
610 NAMEDATALEN - 1)));
611
612 /*
613 * We use the ShmemLock spinlock to protect LWLockCounter and
614 * LWLockTrancheNames.
615 */
617
619 {
622 (errmsg("maximum number of tranches already registered"),
623 errdetail("No more than %d tranches may be registered.",
625 }
626
627 result = (*LWLockCounter)++;
630
632
633 return result;
634}
int errdetail(const char *fmt,...)
Definition: elog.c:1216
int errcode(int sqlerrcode)
Definition: elog.c:863
int errmsg(const char *fmt,...)
Definition: elog.c:1080
#define ereport(elevel,...)
Definition: elog.h:150
size_t strlcpy(char *dst, const char *src, size_t siz)
Definition: strlcpy.c:45
const char * name

References ereport, errcode(), errdetail(), errmsg(), ERROR, LocalLWLockCounter, LWLockCounter, LWLockTrancheNames, LWTRANCHE_FIRST_USER_DEFINED, MAX_NAMED_TRANCHES, name, NAMEDATALEN, ShmemLock, SpinLockAcquire, SpinLockRelease, and strlcpy().

Referenced by apw_init_state(), GetNamedDSA(), GetNamedDSHash(), init_tdr_dsm(), init_tranche(), InitializeLWLocks(), test_basic(), test_create(), test_empty(), test_lwlock_tranche_creation(), test_lwlock_tranches(), test_random(), and test_slru_shmem_startup().

◆ LWLockQueueSelf()

static void LWLockQueueSelf ( LWLock lock,
LWLockMode  mode 
)
static

Definition at line 1046 of file lwlock.c.

1047{
1048 /*
1049 * If we don't have a PGPROC structure, there's no way to wait. This
1050 * should never occur, since MyProc should only be null during shared
1051 * memory initialization.
1052 */
1053 if (MyProc == NULL)
1054 elog(PANIC, "cannot wait without a PGPROC structure");
1055
1057 elog(PANIC, "queueing for lock while waiting on another one");
1058
1059 LWLockWaitListLock(lock);
1060
1061 /* setting the flag is protected by the spinlock */
1063
1066
1067 /* LW_WAIT_UNTIL_FREE waiters are always at the front of the queue */
1068 if (mode == LW_WAIT_UNTIL_FREE)
1069 proclist_push_head(&lock->waiters, MyProcNumber, lwWaitLink);
1070 else
1071 proclist_push_tail(&lock->waiters, MyProcNumber, lwWaitLink);
1072
1073 /* Can release the mutex now */
1075
1076#ifdef LOCK_DEBUG
1077 pg_atomic_fetch_add_u32(&lock->nwaiters, 1);
1078#endif
1079}
static uint32 pg_atomic_fetch_add_u32(volatile pg_atomic_uint32 *ptr, int32 add_)
Definition: atomics.h:366
#define PANIC
Definition: elog.h:42
#define proclist_push_tail(list, procno, link_member)
Definition: proclist.h:191
#define proclist_push_head(list, procno, link_member)
Definition: proclist.h:189
uint8 lwWaitMode
Definition: proc.h:241

References elog, LW_FLAG_HAS_WAITERS, LW_WAIT_UNTIL_FREE, LW_WS_NOT_WAITING, LW_WS_WAITING, LWLockWaitListLock(), LWLockWaitListUnlock(), PGPROC::lwWaiting, PGPROC::lwWaitMode, mode, MyProc, MyProcNumber, PANIC, pg_atomic_fetch_add_u32(), pg_atomic_fetch_or_u32(), proclist_push_head, proclist_push_tail, LWLock::state, and LWLock::waiters.

Referenced by LWLockAcquire(), LWLockAcquireOrWait(), and LWLockWaitForVar().

◆ LWLockRelease()

void LWLockRelease ( LWLock lock)

Definition at line 1898 of file lwlock.c.

1899{
1901
1902 mode = LWLockDisownInternal(lock);
1903
1904 PRINT_LWDEBUG("LWLockRelease", lock, mode);
1905
1907
1908 /*
1909 * Now okay to allow cancel/die interrupts.
1910 */
1912}
static void LWLockReleaseInternal(LWLock *lock, LWLockMode mode)
Definition: lwlock.c:1834

References LWLockDisownInternal(), LWLockReleaseInternal(), mode, PRINT_LWDEBUG, and RESUME_INTERRUPTS.

Referenced by _bt_end_vacuum(), _bt_parallel_done(), _bt_parallel_primscan_schedule(), _bt_parallel_release(), _bt_parallel_seize(), _bt_start_vacuum(), _bt_vacuum_cycleid(), AbsorbSyncRequests(), ActivateCommitTs(), addLSNWaiter(), AdvanceNextFullTransactionIdPastXid(), AdvanceOldestClogXid(), AdvanceOldestCommitTsXid(), AdvanceXLInsertBuffer(), alloc_object(), AlterSystemSetConfigFile(), ApplyLauncherMain(), apw_detach_shmem(), apw_dump_now(), apw_load_buffers(), AsyncNotifyFreezeXids(), asyncQueueAddEntries(), asyncQueueAdvanceTail(), asyncQueueProcessPageEntries(), asyncQueueReadAllNotifications(), asyncQueueUnregister(), AtAbort_Twophase(), AtEOXact_LogicalRepWorkers(), AtPrepare_PredicateLocks(), attach_internal(), autoprewarm_main(), autoprewarm_start_worker(), AutoVacLauncherMain(), AutoVacuumRequestWork(), AutoVacWorkerMain(), BackendPidGetProc(), BackendXidGetPid(), BecomeLockGroupLeader(), BecomeLockGroupMember(), btparallelrescan(), BufferAlloc(), CancelDBBackends(), check_for_freed_segments(), CheckDeadLock(), CheckForSerializableConflictOut(), CheckPointPredicate(), CheckPointRelationMap(), CheckPointReplicationOrigin(), CheckPointReplicationSlots(), CheckPointTwoPhase(), CheckTableForSerializableConflictIn(), CheckTargetForConflictsIn(), choose_next_subplan_for_leader(), choose_next_subplan_for_worker(), CleanupInvalidationState(), ClearOldPredicateLocks(), ComputeXidHorizons(), consume_xids_shortcut(), copy_replication_slot(), CountDBBackends(), CountDBConnections(), CountOtherDBBackends(), CountUserBackends(), CreateCheckPoint(), CreateEndOfRecoveryRecord(), CreateInitDecodingContext(), CreatePredicateLock(), CreateRestartPoint(), DeactivateCommitTs(), DeleteChildTargetLocks(), DeleteLockTarget(), deleteLSNWaiter(), destroy_superblock(), do_autovacuum(), do_pg_backup_start(), do_pg_backup_stop(), do_start_worker(), DropAllPredicateLocksFromTable(), DropTableSpace(), dsa_allocate_extended(), dsa_dump(), dsa_free(), dsa_get_total_size(), dsa_get_total_size_from_handle(), dsa_pin(), dsa_release_in_place(), dsa_set_size_limit(), dsa_trim(), dsa_unpin(), dshash_delete_entry(), dshash_delete_key(), dshash_dump(), dshash_find(), dshash_find_or_insert(), dshash_release_lock(), dshash_seq_next(), dshash_seq_term(), dsm_attach(), dsm_create(), dsm_detach(), dsm_pin_segment(), dsm_unpin_segment(), ensure_active_superblock(), entry_reset(), Exec_ListenPreCommit(), ExecParallelHashMergeCounters(), ExecParallelHashPopChunkQueue(), ExecParallelHashTupleAlloc(), ExecParallelHashTuplePrealloc(), ExpireAllKnownAssignedTransactionIds(), ExpireOldKnownAssignedTransactionIds(), ExpireTreeKnownAssignedTransactionIds(), ExtendBufferedRelShared(), ExtendCLOG(), ExtendCommitTs(), ExtendMultiXactMember(), ExtendMultiXactOffset(), ExtendSUBTRANS(), FastPathGetRelationLockEntry(), FastPathTransferRelationLocks(), find_multixact_start(), FindAndDropRelationBuffers(), FindDeletedTupleInLocalRel(), FinishPreparedTransaction(), FlushUnlockedBuffer(), ForceTransactionIdLimitUpdate(), ForwardSyncRequest(), FreeWorkerInfo(), get_local_synced_slots(), get_val_in_shmem(), get_xid_status(), GetBackgroundWorkerPid(), GetBackgroundWorkerTypeByPid(), GetBlockerStatusData(), GetConflictingVirtualXIDs(), GetCurrentVirtualXIDs(), GetLastImportantRecPtr(), GetLastSegSwitchData(), GetLatestCommitTsData(), GetLeaderApplyWorkerPid(), GetLockConflicts(), GetLockStatusData(), GetMultiXactIdMembers(), GetMultiXactInfo(), GetNewMultiXactId(), GetNewObjectId(), GetNewTransactionId(), GetOldestActiveTransactionId(), GetOldestMultiXactId(), GetOldestRestartPoint(), GetOldestSafeDecodingTransactionId(), GetOldestUnsummarizedLSN(), GetPredicateLockStatusData(), GetPreparedTransactionList(), GetRunningTransactionLocks(), GetSafeSnapshot(), GetSafeSnapshotBlockingPids(), GetSerializableTransactionSnapshotInt(), GetSnapshotData(), GetStrictOldestNonRemovableTransactionId(), GetVictimBuffer(), GetVirtualXIDsDelayingChkpt(), GetWaitEventCustomIdentifier(), GetWaitEventCustomNames(), GetWalSummarizerState(), HaveVirtualXIDsDelayingChkpt(), init_conflict_slot_xmin(), init_dsm_registry(), InitWalSender(), injection_shmem_startup(), InjectionPointAttach(), InjectionPointDetach(), InjectionPointList(), InstallXLogFileSegment(), InvalidateBuffer(), InvalidateObsoleteReplicationSlots(), InvalidatePossiblyObsoleteSlot(), InvalidateVictimBuffer(), IoWorkerMain(), IsInstallXLogFileSegmentActive(), KnownAssignedXidsCompress(), KnownAssignedXidsReset(), lock_twophase_recover(), LockAcquireExtended(), LockBuffer(), LockErrorCleanup(), LockGXact(), LockHasWaiters(), LockRefindAndRelease(), LockRelease(), LockReleaseAll(), LockWaiterCount(), logicalrep_launcher_attach_dshmem(), logicalrep_pa_worker_stop(), logicalrep_reset_seqsync_start_time(), logicalrep_worker_attach(), logicalrep_worker_detach(), logicalrep_worker_launch(), logicalrep_worker_stop(), logicalrep_worker_stop_internal(), logicalrep_worker_wakeup(), logicalrep_workers_find(), LogStandbySnapshot(), LookupGXact(), LookupGXactBySubid(), LWLockReleaseAll(), LWLockReleaseClearVar(), MarkAsPrepared(), MarkAsPreparing(), MarkDirtyUnpinnedBufferInternal(), multixact_redo(), MultiXactAdvanceNextMXact(), MultiXactGetCheckptMulti(), MultiXactIdSetOldestMember(), MultiXactIdSetOldestVisible(), MultiXactSetNextMXact(), OnConflict_CheckForSerializationFailure(), PageIsPredicateLocked(), perform_relmap_update(), pg_control_checkpoint(), pg_control_init(), pg_control_recovery(), pg_control_system(), pg_get_replication_slots(), pg_get_shmem_allocations(), pg_get_shmem_allocations_numa(), pg_notification_queue_usage(), pg_show_replication_origin_status(), pg_stat_get_subscription(), pg_stat_statements_internal(), pg_xact_status(), pgaio_worker_die(), pgaio_worker_register(), pgaio_worker_submit_internal(), pgss_shmem_startup(), pgss_store(), pgstat_archiver_reset_all_cb(), pgstat_archiver_snapshot_cb(), pgstat_bgwriter_reset_all_cb(), pgstat_bgwriter_snapshot_cb(), pgstat_build_snapshot(), pgstat_checkpointer_reset_all_cb(), pgstat_checkpointer_snapshot_cb(), pgstat_fetch_replslot(), pgstat_io_flush_cb(), pgstat_io_reset_all_cb(), pgstat_io_snapshot_cb(), pgstat_reset_matching_entries(), pgstat_reset_replslot(), pgstat_reset_slru_counter_internal(), pgstat_slru_flush_cb(), pgstat_slru_snapshot_cb(), pgstat_unlock_entry(), pgstat_wal_flush_cb(), pgstat_wal_reset_all_cb(), pgstat_wal_snapshot_cb(), PostPrepare_Locks(), PostPrepare_MultiXact(), PostPrepare_Twophase(), PreCommit_CheckForSerializationFailure(), PreCommit_Notify(), predicatelock_twophase_recover(), PredicateLockPageSplit(), PredicateLockTwoPhaseFinish(), PrefetchSharedBuffer(), PrescanPreparedTransactions(), ProcArrayAdd(), ProcArrayApplyRecoveryInfo(), ProcArrayApplyXidAssignment(), ProcArrayClearTransaction(), ProcArrayEndTransaction(), ProcArrayGetReplicationSlotXmin(), ProcArrayGroupClearXid(), ProcArrayInstallImportedXmin(), ProcArrayInstallRestoredXmin(), ProcArrayRemove(), ProcArraySetReplicationSlotXmin(), ProcessSequencesForSync(), ProcessSyncingTablesForApply(), ProcKill(), ProcNumberGetTransactionIds(), ProcSleep(), ReachedEndOfBackup(), read_relmap_file(), ReadMultiXactIdRange(), ReadNextFullTransactionId(), ReadNextMultiXactId(), ReadReplicationSlot(), RecordNewMultiXact(), RecoverPreparedTransactions(), RegisterDynamicBackgroundWorker(), RegisterPredicateLockingXid(), RelationCacheInitFilePostInvalidate(), RelationMapCopy(), RelationMapFinishBootstrap(), ReleaseOneSerializableXact(), ReleasePredicateLocks(), relmap_redo(), RemoveScratchTarget(), ReplicationOriginExitCleanup(), ReplicationSlotAcquire(), ReplicationSlotCleanup(), ReplicationSlotCreate(), ReplicationSlotDropPtr(), ReplicationSlotName(), ReplicationSlotRelease(), ReplicationSlotReserveWal(), ReplicationSlotsComputeLogicalRestartLSN(), ReplicationSlotsComputeRequiredLSN(), ReplicationSlotsComputeRequiredXmin(), ReplicationSlotsCountDBSlots(), ReplicationSlotsDropDBSlots(), replorigin_advance(), replorigin_get_progress(), replorigin_session_advance(), replorigin_session_get_progress(), replorigin_session_reset(), replorigin_session_setup(), replorigin_state_clear(), ResetInstallXLogFileSegmentActive(), resize(), RestoreScratchTarget(), restoreTwoPhaseData(), SaveSlotToPath(), SearchNamedReplicationSlot(), SerialAdd(), SerialGetMinConflictCommitSeqNo(), SerialInit(), SerialSetActiveSerXmin(), set_indexsafe_procflags(), set_val_in_shmem(), SetCommitTsLimit(), SetInstallXLogFileSegmentActive(), SetMultiXactIdLimit(), SetNextObjectId(), SetOldestOffset(), SetTransactionIdLimit(), SetXidCommitTsInPage(), SharedInvalBackendInit(), ShmemInitStruct(), SICleanupQueue(), SIGetDataEntries(), SignalBackends(), SignalVirtualTransaction(), SIInsertDataEntries(), SimpleLruReadPage(), SimpleLruReadPage_ReadOnly(), SimpleLruTruncate(), SimpleLruWaitIO(), SimpleLruWriteAll(), SimpleLruZeroAndWritePage(), SlruDeleteSegment(), SlruInternalWritePage(), SnapBuildInitialSnapshot(), ss_get_location(), ss_report_location(), StandbyRecoverPreparedTransactions(), StandbySlotsHaveCaughtup(), StartupDecodingContext(), StartupSUBTRANS(), StartupXLOG(), sts_parallel_scan_next(), SubTransGetParent(), SubTransSetParent(), SummarizeOldestCommittedSxact(), SummarizeWAL(), SwitchIntoArchiveRecovery(), synchronize_one_slot(), SyncRepCancelWait(), SyncRepCleanupAtProcExit(), SyncRepReleaseWaiters(), SyncRepUpdateSyncStandbysDefined(), SyncRepWaitForLSN(), TablespaceCreateDbspace(), tbm_shared_iterate(), TerminateBackgroundWorker(), TerminateOtherDBBackends(), test_aio_shmem_startup(), test_custom_stats_fixed_reset_all_cb(), test_custom_stats_fixed_snapshot_cb(), test_custom_stats_fixed_update(), test_slru_page_exists(), test_slru_page_read(), test_slru_page_readonly(), test_slru_page_write(), TransactionGroupUpdateXidStatus(), TransactionIdGetCommitTsData(), TransactionIdGetStatus(), TransactionIdIsInProgress(), TransactionIdSetPageStatus(), TransactionTreeSetCommitTsData(), TransferPredicateLocksToNewTarget(), TrimCLOG(), TrimMultiXact(), TruncateMultiXact(), TwoPhaseGetGXact(), TwoPhaseGetOldestXidInCommit(), TwoPhaseGetXidByVirtualXID(), update_cached_xid_range(), update_synced_slots_inactive_since(), UpdateMinRecoveryPoint(), vac_truncate_clog(), vacuum_rel(), VacuumUpdateCosts(), VirtualXactLock(), VirtualXactLockTableCleanup(), VirtualXactLockTableInsert(), wait_for_table_state_change(), wait_for_worker_state_change(), WaitEventCustomNew(), WaitForReplicationWorkerAttach(), WaitForWalSummarization(), wakeupWaiters(), WakeupWalSummarizer(), WalSummarizerMain(), WalSummarizerShutdown(), write_relcache_init_file(), xact_redo(), XidCacheRemoveRunningXids(), xlog_redo(), XLogBackgroundFlush(), XLogFlush(), XLogNeedsFlush(), and XLogReportParameters().

◆ LWLockReleaseAll()

void LWLockReleaseAll ( void  )

◆ LWLockReleaseClearVar()

void LWLockReleaseClearVar ( LWLock lock,
pg_atomic_uint64 valptr,
uint64  val 
)

Definition at line 1927 of file lwlock.c.

1928{
1929 /*
1930 * Note that pg_atomic_exchange_u64 is a full barrier, so we're guaranteed
1931 * that the variable is updated before releasing the lock.
1932 */
1933 pg_atomic_exchange_u64(valptr, val);
1934
1935 LWLockRelease(lock);
1936}
static uint64 pg_atomic_exchange_u64(volatile pg_atomic_uint64 *ptr, uint64 newval)
Definition: atomics.h:513
long val
Definition: informix.c:689

References LWLockRelease(), pg_atomic_exchange_u64(), and val.

Referenced by WALInsertLockRelease().

◆ LWLockReleaseDisowned()

void LWLockReleaseDisowned ( LWLock lock,
LWLockMode  mode 
)

Definition at line 1918 of file lwlock.c.

1919{
1921}

References LWLockReleaseInternal(), and mode.

◆ LWLockReleaseInternal()

static void LWLockReleaseInternal ( LWLock lock,
LWLockMode  mode 
)
static

Definition at line 1834 of file lwlock.c.

1835{
1836 uint32 oldstate;
1837 bool check_waiters;
1838
1839 /*
1840 * Release my hold on lock, after that it can immediately be acquired by
1841 * others, even if we still have to wakeup other waiters.
1842 */
1843 if (mode == LW_EXCLUSIVE)
1845 else
1846 oldstate = pg_atomic_sub_fetch_u32(&lock->state, LW_VAL_SHARED);
1847
1848 /* nobody else can have that kind of lock */
1849 Assert(!(oldstate & LW_VAL_EXCLUSIVE));
1850
1851 if (TRACE_POSTGRESQL_LWLOCK_RELEASE_ENABLED())
1852 TRACE_POSTGRESQL_LWLOCK_RELEASE(T_NAME(lock));
1853
1854 /*
1855 * We're still waiting for backends to get scheduled, don't wake them up
1856 * again.
1857 */
1858 if ((oldstate & (LW_FLAG_HAS_WAITERS | LW_FLAG_RELEASE_OK)) ==
1860 (oldstate & LW_LOCK_MASK) == 0)
1861 check_waiters = true;
1862 else
1863 check_waiters = false;
1864
1865 /*
1866 * As waking up waiters requires the spinlock to be acquired, only do so
1867 * if necessary.
1868 */
1869 if (check_waiters)
1870 {
1871 /* XXX: remove before commit? */
1872 LOG_LWDEBUG("LWLockRelease", lock, "releasing waiters");
1873 LWLockWakeup(lock);
1874 }
1875}
static uint32 pg_atomic_sub_fetch_u32(volatile pg_atomic_uint32 *ptr, int32 sub_)
Definition: atomics.h:439
static void LWLockWakeup(LWLock *lock)
Definition: lwlock.c:930

References Assert(), LOG_LWDEBUG, LW_EXCLUSIVE, LW_FLAG_HAS_WAITERS, LW_FLAG_RELEASE_OK, LW_LOCK_MASK, LW_VAL_EXCLUSIVE, LW_VAL_SHARED, LWLockWakeup(), mode, pg_atomic_sub_fetch_u32(), LWLock::state, and T_NAME.

Referenced by LWLockRelease(), and LWLockReleaseDisowned().

◆ LWLockReportWaitEnd()

static void LWLockReportWaitEnd ( void  )
inlinestatic

Definition at line 728 of file lwlock.c.

729{
731}
static void pgstat_report_wait_end(void)
Definition: wait_event.h:85

References pgstat_report_wait_end().

Referenced by LWLockAcquire(), LWLockAcquireOrWait(), and LWLockWaitForVar().

◆ LWLockReportWaitStart()

static void LWLockReportWaitStart ( LWLock lock)
inlinestatic

Definition at line 719 of file lwlock.c.

720{
722}
static void pgstat_report_wait_start(uint32 wait_event_info)
Definition: wait_event.h:69

References PG_WAIT_LWLOCK, pgstat_report_wait_start(), and LWLock::tranche.

Referenced by LWLockAcquire(), LWLockAcquireOrWait(), and LWLockWaitForVar().

◆ LWLockShmemSize()

Size LWLockShmemSize ( void  )

Definition at line 397 of file lwlock.c.

398{
399 Size size;
400 int numLocks = NUM_FIXED_LWLOCKS;
401
402 /*
403 * If re-initializing shared memory, the request array will no longer be
404 * accessible, so switch to the copy in postmaster's local memory. We'll
405 * copy it back into shared memory later when CreateLWLocks() is called
406 * again.
407 */
410
411 /* Calculate total number of locks needed in the main array. */
412 numLocks += NumLWLocksForNamedTranches();
413
414 /* Space for dynamic allocation counter. */
415 size = MAXALIGN(sizeof(int));
416
417 /* Space for named tranches. */
418 size = add_size(size, mul_size(MAX_NAMED_TRANCHES, sizeof(char *)));
420
421 /*
422 * Make space for named tranche requests. This is done for the benefit of
423 * EXEC_BACKEND builds, which otherwise wouldn't be able to call
424 * GetNamedLWLockTranche() outside postmaster.
425 */
428
429 /* Space for the LWLock array, plus room for cache line alignment. */
430 size = add_size(size, LWLOCK_PADDED_SIZE);
431 size = add_size(size, mul_size(numLocks, sizeof(LWLockPadded)));
432
433 return size;
434}
static int NumLWLocksForNamedTranches(void)
Definition: lwlock.c:382
Size add_size(Size s1, Size s2)
Definition: shmem.c:495
Size mul_size(Size s1, Size s2)
Definition: shmem.c:510

References add_size(), LocalNamedLWLockTrancheRequestArray, LWLOCK_PADDED_SIZE, MAX_NAMED_TRANCHES, MAXALIGN, mul_size(), NAMEDATALEN, NamedLWLockTrancheRequestArray, NamedLWLockTrancheRequests, NUM_FIXED_LWLOCKS, and NumLWLocksForNamedTranches().

Referenced by CalculateShmemSize(), and CreateLWLocks().

◆ LWLockUpdateVar()

void LWLockUpdateVar ( LWLock lock,
pg_atomic_uint64 valptr,
uint64  val 
)

Definition at line 1730 of file lwlock.c.

1731{
1734
1735 PRINT_LWDEBUG("LWLockUpdateVar", lock, LW_EXCLUSIVE);
1736
1737 /*
1738 * Note that pg_atomic_exchange_u64 is a full barrier, so we're guaranteed
1739 * that the variable is updated before waking up waiters.
1740 */
1741 pg_atomic_exchange_u64(valptr, val);
1742
1744
1745 LWLockWaitListLock(lock);
1746
1748
1749 /*
1750 * See if there are any LW_WAIT_UNTIL_FREE waiters that need to be woken
1751 * up. They are always in the front of the queue.
1752 */
1753 proclist_foreach_modify(iter, &lock->waiters, lwWaitLink)
1754 {
1755 PGPROC *waiter = GetPGProcByNumber(iter.cur);
1756
1757 if (waiter->lwWaitMode != LW_WAIT_UNTIL_FREE)
1758 break;
1759
1760 proclist_delete(&lock->waiters, iter.cur, lwWaitLink);
1761 proclist_push_tail(&wakeup, iter.cur, lwWaitLink);
1762
1763 /* see LWLockWakeup() */
1764 Assert(waiter->lwWaiting == LW_WS_WAITING);
1766 }
1767
1768 /* We are done updating shared state of the lock itself. */
1770
1771 /*
1772 * Awaken any waiters I removed from the queue.
1773 */
1774 proclist_foreach_modify(iter, &wakeup, lwWaitLink)
1775 {
1776 PGPROC *waiter = GetPGProcByNumber(iter.cur);
1777
1778 proclist_delete(&wakeup, iter.cur, lwWaitLink);
1779 /* check comment in LWLockWakeup() about this barrier */
1781 waiter->lwWaiting = LW_WS_NOT_WAITING;
1782 PGSemaphoreUnlock(waiter->sem);
1783 }
1784}
#define pg_write_barrier()
Definition: atomics.h:155
@ LW_WS_PENDING_WAKEUP
Definition: lwlock.h:32
#define GetPGProcByNumber(n)
Definition: proc.h:440
#define proclist_foreach_modify(iter, lhead, link_member)
Definition: proclist.h:206
static TimestampTz wakeup[NUM_WALRCV_WAKEUPS]
Definition: walreceiver.c:130

References Assert(), proclist_mutable_iter::cur, GetPGProcByNumber, LW_EXCLUSIVE, LW_VAL_EXCLUSIVE, LW_WAIT_UNTIL_FREE, LW_WS_NOT_WAITING, LW_WS_PENDING_WAKEUP, LW_WS_WAITING, LWLockWaitListLock(), LWLockWaitListUnlock(), PGPROC::lwWaiting, PGPROC::lwWaitMode, pg_atomic_exchange_u64(), pg_atomic_read_u32(), pg_write_barrier, PGSemaphoreUnlock(), PRINT_LWDEBUG, proclist_delete, proclist_foreach_modify, proclist_init(), proclist_push_tail, PGPROC::sem, LWLock::state, val, LWLock::waiters, and wakeup.

Referenced by WALInsertLockAcquireExclusive(), and WALInsertLockUpdateInsertingAt().

◆ LWLockWaitForVar()

bool LWLockWaitForVar ( LWLock lock,
pg_atomic_uint64 valptr,
uint64  oldval,
uint64 newval 
)

Definition at line 1594 of file lwlock.c.

1596{
1597 PGPROC *proc = MyProc;
1598 int extraWaits = 0;
1599 bool result = false;
1600#ifdef LWLOCK_STATS
1601 lwlock_stats *lwstats;
1602
1603 lwstats = get_lwlock_stats_entry(lock);
1604#endif
1605
1606 PRINT_LWDEBUG("LWLockWaitForVar", lock, LW_WAIT_UNTIL_FREE);
1607
1608 /*
1609 * Lock out cancel/die interrupts while we sleep on the lock. There is no
1610 * cleanup mechanism to remove us from the wait queue if we got
1611 * interrupted.
1612 */
1614
1615 /*
1616 * Loop here to check the lock's status after each time we are signaled.
1617 */
1618 for (;;)
1619 {
1620 bool mustwait;
1621
1622 mustwait = LWLockConflictsWithVar(lock, valptr, oldval, newval,
1623 &result);
1624
1625 if (!mustwait)
1626 break; /* the lock was free or value didn't match */
1627
1628 /*
1629 * Add myself to wait queue. Note that this is racy, somebody else
1630 * could wakeup before we're finished queuing. NB: We're using nearly
1631 * the same twice-in-a-row lock acquisition protocol as
1632 * LWLockAcquire(). Check its comments for details. The only
1633 * difference is that we also have to check the variable's values when
1634 * checking the state of the lock.
1635 */
1637
1638 /*
1639 * Set RELEASE_OK flag, to make sure we get woken up as soon as the
1640 * lock is released.
1641 */
1643
1644 /*
1645 * We're now guaranteed to be woken up if necessary. Recheck the lock
1646 * and variables state.
1647 */
1648 mustwait = LWLockConflictsWithVar(lock, valptr, oldval, newval,
1649 &result);
1650
1651 /* Ok, no conflict after we queued ourselves. Undo queueing. */
1652 if (!mustwait)
1653 {
1654 LOG_LWDEBUG("LWLockWaitForVar", lock, "free, undoing queue");
1655
1656 LWLockDequeueSelf(lock);
1657 break;
1658 }
1659
1660 /*
1661 * Wait until awakened.
1662 *
1663 * It is possible that we get awakened for a reason other than being
1664 * signaled by LWLockRelease. If so, loop back and wait again. Once
1665 * we've gotten the LWLock, re-increment the sema by the number of
1666 * additional signals received.
1667 */
1668 LOG_LWDEBUG("LWLockWaitForVar", lock, "waiting");
1669
1670#ifdef LWLOCK_STATS
1671 lwstats->block_count++;
1672#endif
1673
1675 if (TRACE_POSTGRESQL_LWLOCK_WAIT_START_ENABLED())
1676 TRACE_POSTGRESQL_LWLOCK_WAIT_START(T_NAME(lock), LW_EXCLUSIVE);
1677
1678 for (;;)
1679 {
1680 PGSemaphoreLock(proc->sem);
1681 if (proc->lwWaiting == LW_WS_NOT_WAITING)
1682 break;
1683 extraWaits++;
1684 }
1685
1686#ifdef LOCK_DEBUG
1687 {
1688 /* not waiting anymore */
1689 uint32 nwaiters PG_USED_FOR_ASSERTS_ONLY = pg_atomic_fetch_sub_u32(&lock->nwaiters, 1);
1690
1691 Assert(nwaiters < MAX_BACKENDS);
1692 }
1693#endif
1694
1695 if (TRACE_POSTGRESQL_LWLOCK_WAIT_DONE_ENABLED())
1696 TRACE_POSTGRESQL_LWLOCK_WAIT_DONE(T_NAME(lock), LW_EXCLUSIVE);
1698
1699 LOG_LWDEBUG("LWLockWaitForVar", lock, "awakened");
1700
1701 /* Now loop back and check the status of the lock again. */
1702 }
1703
1704 /*
1705 * Fix the process wait semaphore's count for any absorbed wakeups.
1706 */
1707 while (extraWaits-- > 0)
1708 PGSemaphoreUnlock(proc->sem);
1709
1710 /*
1711 * Now okay to allow cancel/die interrupts.
1712 */
1714
1715 return result;
1716}
static bool LWLockConflictsWithVar(LWLock *lock, pg_atomic_uint64 *valptr, uint64 oldval, uint64 *newval, bool *result)
Definition: lwlock.c:1533

References Assert(), HOLD_INTERRUPTS, LOG_LWDEBUG, LW_EXCLUSIVE, LW_FLAG_RELEASE_OK, LW_WAIT_UNTIL_FREE, LW_WS_NOT_WAITING, LWLockConflictsWithVar(), LWLockDequeueSelf(), LWLockQueueSelf(), LWLockReportWaitEnd(), LWLockReportWaitStart(), PGPROC::lwWaiting, MAX_BACKENDS, MyProc, newval, pg_atomic_fetch_or_u32(), pg_atomic_fetch_sub_u32(), PG_USED_FOR_ASSERTS_ONLY, PGSemaphoreLock(), PGSemaphoreUnlock(), PRINT_LWDEBUG, RESUME_INTERRUPTS, PGPROC::sem, LWLock::state, and T_NAME.

Referenced by WaitXLogInsertionsToFinish().

◆ LWLockWaitListLock()

static void LWLockWaitListLock ( LWLock lock)
static

Definition at line 861 of file lwlock.c.

862{
863 uint32 old_state;
864#ifdef LWLOCK_STATS
865 lwlock_stats *lwstats;
866 uint32 delays = 0;
867
868 lwstats = get_lwlock_stats_entry(lock);
869#endif
870
871 while (true)
872 {
873 /*
874 * Always try once to acquire the lock directly, without setting up
875 * the spin-delay infrastructure. The work necessary for that shows up
876 * in profiles and is rarely necessary.
877 */
878 old_state = pg_atomic_fetch_or_u32(&lock->state, LW_FLAG_LOCKED);
879 if (likely(!(old_state & LW_FLAG_LOCKED)))
880 break; /* got lock */
881
882 /* and then spin without atomic operations until lock is released */
883 {
884 SpinDelayStatus delayStatus;
885
886 init_local_spin_delay(&delayStatus);
887
888 while (old_state & LW_FLAG_LOCKED)
889 {
890 perform_spin_delay(&delayStatus);
891 old_state = pg_atomic_read_u32(&lock->state);
892 }
893#ifdef LWLOCK_STATS
894 delays += delayStatus.delays;
895#endif
896 finish_spin_delay(&delayStatus);
897 }
898
899 /*
900 * Retry. The lock might obviously already be re-acquired by the time
901 * we're attempting to get it again.
902 */
903 }
904
905#ifdef LWLOCK_STATS
906 lwstats->spin_delay_count += delays;
907#endif
908}
#define likely(x)
Definition: c.h:417
#define LW_FLAG_LOCKED
Definition: lwlock.c:96
void perform_spin_delay(SpinDelayStatus *status)
Definition: s_lock.c:126
void finish_spin_delay(SpinDelayStatus *status)
Definition: s_lock.c:186
#define init_local_spin_delay(status)
Definition: s_lock.h:733

References SpinDelayStatus::delays, finish_spin_delay(), init_local_spin_delay, likely, LW_FLAG_LOCKED, perform_spin_delay(), pg_atomic_fetch_or_u32(), pg_atomic_read_u32(), and LWLock::state.

Referenced by LWLockDequeueSelf(), LWLockQueueSelf(), LWLockUpdateVar(), and LWLockWakeup().

◆ LWLockWaitListUnlock()

static void LWLockWaitListUnlock ( LWLock lock)
static

Definition at line 917 of file lwlock.c.

918{
920
921 old_state = pg_atomic_fetch_and_u32(&lock->state, ~LW_FLAG_LOCKED);
922
923 Assert(old_state & LW_FLAG_LOCKED);
924}

References Assert(), LW_FLAG_LOCKED, pg_atomic_fetch_and_u32(), PG_USED_FOR_ASSERTS_ONLY, and LWLock::state.

Referenced by LWLockDequeueSelf(), LWLockQueueSelf(), and LWLockUpdateVar().

◆ LWLockWakeup()

static void LWLockWakeup ( LWLock lock)
static

Definition at line 930 of file lwlock.c.

931{
932 bool new_release_ok;
933 bool wokeup_somebody = false;
936
938
939 new_release_ok = true;
940
941 /* lock wait list while collecting backends to wake up */
942 LWLockWaitListLock(lock);
943
944 proclist_foreach_modify(iter, &lock->waiters, lwWaitLink)
945 {
946 PGPROC *waiter = GetPGProcByNumber(iter.cur);
947
948 if (wokeup_somebody && waiter->lwWaitMode == LW_EXCLUSIVE)
949 continue;
950
951 proclist_delete(&lock->waiters, iter.cur, lwWaitLink);
952 proclist_push_tail(&wakeup, iter.cur, lwWaitLink);
953
954 if (waiter->lwWaitMode != LW_WAIT_UNTIL_FREE)
955 {
956 /*
957 * Prevent additional wakeups until retryer gets to run. Backends
958 * that are just waiting for the lock to become free don't retry
959 * automatically.
960 */
961 new_release_ok = false;
962
963 /*
964 * Don't wakeup (further) exclusive locks.
965 */
966 wokeup_somebody = true;
967 }
968
969 /*
970 * Signal that the process isn't on the wait list anymore. This allows
971 * LWLockDequeueSelf() to remove itself of the waitlist with a
972 * proclist_delete(), rather than having to check if it has been
973 * removed from the list.
974 */
975 Assert(waiter->lwWaiting == LW_WS_WAITING);
977
978 /*
979 * Once we've woken up an exclusive lock, there's no point in waking
980 * up anybody else.
981 */
982 if (waiter->lwWaitMode == LW_EXCLUSIVE)
983 break;
984 }
985
987
988 /* unset required flags, and release lock, in one fell swoop */
989 {
990 uint32 old_state;
991 uint32 desired_state;
992
993 old_state = pg_atomic_read_u32(&lock->state);
994 while (true)
995 {
996 desired_state = old_state;
997
998 /* compute desired flags */
999
1000 if (new_release_ok)
1001 desired_state |= LW_FLAG_RELEASE_OK;
1002 else
1003 desired_state &= ~LW_FLAG_RELEASE_OK;
1004
1005 if (proclist_is_empty(&lock->waiters))
1006 desired_state &= ~LW_FLAG_HAS_WAITERS;
1007
1008 desired_state &= ~LW_FLAG_LOCKED; /* release lock */
1009
1010 if (pg_atomic_compare_exchange_u32(&lock->state, &old_state,
1011 desired_state))
1012 break;
1013 }
1014 }
1015
1016 /* Awaken any waiters I removed from the queue. */
1017 proclist_foreach_modify(iter, &wakeup, lwWaitLink)
1018 {
1019 PGPROC *waiter = GetPGProcByNumber(iter.cur);
1020
1021 LOG_LWDEBUG("LWLockRelease", lock, "release waiter");
1022 proclist_delete(&wakeup, iter.cur, lwWaitLink);
1023
1024 /*
1025 * Guarantee that lwWaiting being unset only becomes visible once the
1026 * unlink from the link has completed. Otherwise the target backend
1027 * could be woken up for other reason and enqueue for a new lock - if
1028 * that happens before the list unlink happens, the list would end up
1029 * being corrupted.
1030 *
1031 * The barrier pairs with the LWLockWaitListLock() when enqueuing for
1032 * another lock.
1033 */
1035 waiter->lwWaiting = LW_WS_NOT_WAITING;
1036 PGSemaphoreUnlock(waiter->sem);
1037 }
1038}

References Assert(), proclist_mutable_iter::cur, GetPGProcByNumber, LOG_LWDEBUG, LW_EXCLUSIVE, LW_FLAG_HAS_WAITERS, LW_FLAG_RELEASE_OK, LW_WAIT_UNTIL_FREE, LW_WS_NOT_WAITING, LW_WS_PENDING_WAKEUP, LW_WS_WAITING, LWLockWaitListLock(), PGPROC::lwWaiting, PGPROC::lwWaitMode, pg_atomic_compare_exchange_u32(), pg_atomic_read_u32(), pg_write_barrier, PGSemaphoreUnlock(), proclist_delete, proclist_foreach_modify, proclist_init(), proclist_is_empty(), proclist_push_tail, PGPROC::sem, LWLock::state, LWLock::waiters, and wakeup.

Referenced by LWLockReleaseInternal().

◆ NumLWLocksForNamedTranches()

static int NumLWLocksForNamedTranches ( void  )
static

Definition at line 382 of file lwlock.c.

383{
384 int numLocks = 0;
385 int i;
386
387 for (i = 0; i < NamedLWLockTrancheRequests; i++)
388 numLocks += NamedLWLockTrancheRequestArray[i].num_lwlocks;
389
390 return numLocks;
391}

References i, NamedLWLockTrancheRequestArray, and NamedLWLockTrancheRequests.

Referenced by LWLockShmemSize().

◆ RequestNamedLWLockTranche()

void RequestNamedLWLockTranche ( const char *  tranche_name,
int  num_lwlocks 
)

Definition at line 649 of file lwlock.c.

650{
652 static int NamedLWLockTrancheRequestsAllocated;
653
655 elog(FATAL, "cannot request additional LWLocks outside shmem_request_hook");
656
657 if (!tranche_name)
659 (errcode(ERRCODE_INVALID_NAME),
660 errmsg("tranche name cannot be NULL")));
661
662 if (strlen(tranche_name) >= NAMEDATALEN)
664 (errcode(ERRCODE_NAME_TOO_LONG),
665 errmsg("tranche name too long"),
666 errdetail("LWLock tranche names must be no longer than %d bytes.",
667 NAMEDATALEN - 1)));
668
670 {
671 NamedLWLockTrancheRequestsAllocated = 16;
674 NamedLWLockTrancheRequestsAllocated
675 * sizeof(NamedLWLockTrancheRequest));
676 }
677
678 if (NamedLWLockTrancheRequests >= NamedLWLockTrancheRequestsAllocated)
679 {
681
684 i * sizeof(NamedLWLockTrancheRequest));
685 NamedLWLockTrancheRequestsAllocated = i;
686 }
687
689 strlcpy(request->tranche_name, tranche_name, NAMEDATALEN);
690 request->num_lwlocks = num_lwlocks;
692}
#define FATAL
Definition: elog.h:41
void * MemoryContextAlloc(MemoryContext context, Size size)
Definition: mcxt.c:1232
void * repalloc(void *pointer, Size size)
Definition: mcxt.c:1632
MemoryContext TopMemoryContext
Definition: mcxt.c:166
bool process_shmem_requests_in_progress
Definition: miscinit.c:1790
static uint32 pg_nextpower2_32(uint32 num)
Definition: pg_bitutils.h:189

References elog, ereport, errcode(), errdetail(), errmsg(), ERROR, FATAL, i, MemoryContextAlloc(), NAMEDATALEN, NamedLWLockTrancheRequestArray, NamedLWLockTrancheRequests, NamedLWLockTrancheRequest::num_lwlocks, pg_nextpower2_32(), process_shmem_requests_in_progress, repalloc(), strlcpy(), TopMemoryContext, and NamedLWLockTrancheRequest::tranche_name.

Referenced by pgss_shmem_request(), and test_lwlock_tranches_shmem_request().

◆ StaticAssertDecl() [1/4]

StaticAssertDecl ( ((MAX_BACKENDS+1) &MAX_BACKENDS = =0,
"MAX_BACKENDS + 1 needs to be a power of 2"   
)

◆ StaticAssertDecl() [2/4]

StaticAssertDecl ( (LW_VAL_EXCLUSIVE &LW_FLAG_MASK = =0,
"LW_VAL_EXCLUSIVE and LW_FLAG_MASK overlap"   
)

◆ StaticAssertDecl() [3/4]

StaticAssertDecl ( (MAX_BACKENDS &LW_FLAG_MASK = =0,
"MAX_BACKENDS and LW_FLAG_MASK overlap"   
)

◆ StaticAssertDecl() [4/4]

StaticAssertDecl ( lengthof(BuiltinTrancheNames = =LWTRANCHE_FIRST_USER_DEFINED,
"missing entries in BuiltinTrancheNames [] 
)

Variable Documentation

◆ BuiltinTrancheNames

const char* const BuiltinTrancheNames[]
static
Initial value:
= {
#define PG_LWLOCK(id, lockname)
#define PG_LWLOCKTRANCHE(id, lockname)
}

Definition at line 135 of file lwlock.c.

Referenced by GetLWTrancheName().

◆ held_lwlocks

◆ LocalLWLockCounter

int LocalLWLockCounter
static

Definition at line 202 of file lwlock.c.

Referenced by GetLWTrancheName(), and LWLockNewTrancheId().

◆ LocalNamedLWLockTrancheRequestArray

NamedLWLockTrancheRequest* LocalNamedLWLockTrancheRequestArray = NULL
static

Definition at line 196 of file lwlock.c.

Referenced by CreateLWLocks(), and LWLockShmemSize().

◆ LWLockCounter

int* LWLockCounter = NULL

Definition at line 199 of file lwlock.c.

Referenced by CreateLWLocks(), GetLWTrancheName(), and LWLockNewTrancheId().

◆ LWLockTrancheNames

char** LWLockTrancheNames = NULL

Definition at line 154 of file lwlock.c.

Referenced by CreateLWLocks(), GetLWTrancheName(), and LWLockNewTrancheId().

◆ MainLWLockArray

◆ NamedLWLockTrancheRequestArray

◆ NamedLWLockTrancheRequests

int NamedLWLockTrancheRequests = 0

◆ num_held_lwlocks