summaryrefslogtreecommitdiff
path: root/src/backend/storage/lmgr
diff options
context:
space:
mode:
authorThomas Munro2024-07-30 09:45:01 +0000
committerThomas Munro2024-07-30 10:58:37 +0000
commite25626677f8076eb3ce94586136c5464ee154381 (patch)
treef03f87af97b998e2dd5aadcfe8a93a83a1ae716b /src/backend/storage/lmgr
parent1330843bb78e9d2422af2f2b9909b80732bd6fc0 (diff)
Remove --disable-spinlocks.
A later change will require atomic support, so it wouldn't make sense for a hypothetical new system not to be able to implement spinlocks. Reviewed-by: Heikki Linnakangas <[email protected]> Reviewed-by: Tom Lane <[email protected]> (concept, not the patch) Reviewed-by: Andres Freund <[email protected]> (concept, not the patch) Discussion: https://postgr.es/m/3351991.1697728588%40sss.pgh.pa.us
Diffstat (limited to 'src/backend/storage/lmgr')
-rw-r--r--src/backend/storage/lmgr/Makefile1
-rw-r--r--src/backend/storage/lmgr/meson.build1
-rw-r--r--src/backend/storage/lmgr/s_lock.c2
-rw-r--r--src/backend/storage/lmgr/spin.c180
4 files changed, 1 insertions, 183 deletions
diff --git a/src/backend/storage/lmgr/Makefile b/src/backend/storage/lmgr/Makefile
index 3f89548bde6..6cbaf23b855 100644
--- a/src/backend/storage/lmgr/Makefile
+++ b/src/backend/storage/lmgr/Makefile
@@ -21,7 +21,6 @@ OBJS = \
predicate.o \
proc.o \
s_lock.o \
- spin.o
include $(top_srcdir)/src/backend/common.mk
diff --git a/src/backend/storage/lmgr/meson.build b/src/backend/storage/lmgr/meson.build
index 05ac41e809a..d43511925e1 100644
--- a/src/backend/storage/lmgr/meson.build
+++ b/src/backend/storage/lmgr/meson.build
@@ -9,5 +9,4 @@ backend_sources += files(
'predicate.c',
'proc.c',
's_lock.c',
- 'spin.c',
)
diff --git a/src/backend/storage/lmgr/s_lock.c b/src/backend/storage/lmgr/s_lock.c
index cba48b3e778..69549a65dba 100644
--- a/src/backend/storage/lmgr/s_lock.c
+++ b/src/backend/storage/lmgr/s_lock.c
@@ -1,7 +1,7 @@
/*-------------------------------------------------------------------------
*
* s_lock.c
- * Hardware-dependent implementation of spinlocks.
+ * Implementation of spinlocks.
*
* When waiting for a contended spinlock we loop tightly for awhile, then
* delay using pg_usleep() and try again. Preferably, "awhile" should be a
diff --git a/src/backend/storage/lmgr/spin.c b/src/backend/storage/lmgr/spin.c
deleted file mode 100644
index 50cb99cd3b6..00000000000
--- a/src/backend/storage/lmgr/spin.c
+++ /dev/null
@@ -1,180 +0,0 @@
-/*-------------------------------------------------------------------------
- *
- * spin.c
- * Hardware-independent implementation of spinlocks.
- *
- *
- * For machines that have test-and-set (TAS) instructions, s_lock.h/.c
- * define the spinlock implementation. This file contains only a stub
- * implementation for spinlocks using PGSemaphores. Unless semaphores
- * are implemented in a way that doesn't involve a kernel call, this
- * is too slow to be very useful :-(
- *
- *
- * Portions Copyright (c) 1996-2024, PostgreSQL Global Development Group
- * Portions Copyright (c) 1994, Regents of the University of California
- *
- *
- * IDENTIFICATION
- * src/backend/storage/lmgr/spin.c
- *
- *-------------------------------------------------------------------------
- */
-#include "postgres.h"
-
-#include "storage/pg_sema.h"
-#include "storage/shmem.h"
-#include "storage/spin.h"
-
-
-#ifndef HAVE_SPINLOCKS
-
-/*
- * No TAS, so spinlocks are implemented as PGSemaphores.
- */
-
-#ifndef HAVE_ATOMICS
-#define NUM_EMULATION_SEMAPHORES (NUM_SPINLOCK_SEMAPHORES + NUM_ATOMICS_SEMAPHORES)
-#else
-#define NUM_EMULATION_SEMAPHORES (NUM_SPINLOCK_SEMAPHORES)
-#endif /* HAVE_ATOMICS */
-
-PGSemaphore *SpinlockSemaArray;
-
-#else /* !HAVE_SPINLOCKS */
-
-#define NUM_EMULATION_SEMAPHORES 0
-
-#endif /* HAVE_SPINLOCKS */
-
-/*
- * Report the amount of shared memory needed to store semaphores for spinlock
- * support.
- */
-Size
-SpinlockSemaSize(void)
-{
- return NUM_EMULATION_SEMAPHORES * sizeof(PGSemaphore);
-}
-
-/*
- * Report number of semaphores needed to support spinlocks.
- */
-int
-SpinlockSemas(void)
-{
- return NUM_EMULATION_SEMAPHORES;
-}
-
-#ifndef HAVE_SPINLOCKS
-
-/*
- * Initialize spinlock emulation.
- *
- * This must be called after PGReserveSemaphores().
- */
-void
-SpinlockSemaInit(void)
-{
- PGSemaphore *spinsemas;
- int nsemas = SpinlockSemas();
- int i;
-
- /*
- * We must use ShmemAllocUnlocked(), since the spinlock protecting
- * ShmemAlloc() obviously can't be ready yet.
- */
- spinsemas = (PGSemaphore *) ShmemAllocUnlocked(SpinlockSemaSize());
- for (i = 0; i < nsemas; ++i)
- spinsemas[i] = PGSemaphoreCreate();
- SpinlockSemaArray = spinsemas;
-}
-
-/*
- * s_lock.h hardware-spinlock emulation using semaphores
- *
- * We map all spinlocks onto NUM_EMULATION_SEMAPHORES semaphores. It's okay to
- * map multiple spinlocks onto one semaphore because no process should ever
- * hold more than one at a time. We just need enough semaphores so that we
- * aren't adding too much extra contention from that.
- *
- * There is one exception to the restriction of only holding one spinlock at a
- * time, which is that it's ok if emulated atomic operations are nested inside
- * spinlocks. To avoid the danger of spinlocks and atomic using the same sema,
- * we make sure "normal" spinlocks and atomics backed by spinlocks use
- * distinct semaphores (see the nested argument to s_init_lock_sema).
- *
- * slock_t is just an int for this implementation; it holds the spinlock
- * number from 1..NUM_EMULATION_SEMAPHORES. We intentionally ensure that 0
- * is not a valid value, so that testing with this code can help find
- * failures to initialize spinlocks.
- */
-
-static inline void
-s_check_valid(int lockndx)
-{
- if (unlikely(lockndx <= 0 || lockndx > NUM_EMULATION_SEMAPHORES))
- elog(ERROR, "invalid spinlock number: %d", lockndx);
-}
-
-void
-s_init_lock_sema(volatile slock_t *lock, bool nested)
-{
- static uint32 counter = 0;
- uint32 offset;
- uint32 sema_total;
- uint32 idx;
-
- if (nested)
- {
- /*
- * To allow nesting atomics inside spinlocked sections, use a
- * different spinlock. See comment above.
- */
- offset = 1 + NUM_SPINLOCK_SEMAPHORES;
- sema_total = NUM_ATOMICS_SEMAPHORES;
- }
- else
- {
- offset = 1;
- sema_total = NUM_SPINLOCK_SEMAPHORES;
- }
-
- idx = (counter++ % sema_total) + offset;
-
- /* double check we did things correctly */
- s_check_valid(idx);
-
- *lock = idx;
-}
-
-void
-s_unlock_sema(volatile slock_t *lock)
-{
- int lockndx = *lock;
-
- s_check_valid(lockndx);
-
- PGSemaphoreUnlock(SpinlockSemaArray[lockndx - 1]);
-}
-
-bool
-s_lock_free_sema(volatile slock_t *lock)
-{
- /* We don't currently use S_LOCK_FREE anyway */
- elog(ERROR, "spin.c does not support S_LOCK_FREE()");
- return false;
-}
-
-int
-tas_sema(volatile slock_t *lock)
-{
- int lockndx = *lock;
-
- s_check_valid(lockndx);
-
- /* Note that TAS macros return 0 if *success* */
- return !PGSemaphoreTryLock(SpinlockSemaArray[lockndx - 1]);
-}
-
-#endif /* !HAVE_SPINLOCKS */