Fix re-distributing previously distributed invalidation messages during logical decoding.
authorMasahiko Sawada <[email protected]>
Tue, 17 Jun 2025 00:35:50 +0000 (17:35 -0700)
committerMasahiko Sawada <[email protected]>
Tue, 17 Jun 2025 00:35:50 +0000 (17:35 -0700)
Commit 4909b38af0 introduced logic to distribute invalidation messages
from catalog-modifying transactions to all concurrent in-progress
transactions. However, since each transaction distributes not only its
original invalidation messages but also previously distributed
messages to other transactions, this leads to an exponential increase
in allocation request size for invalidation messages, ultimately
causing memory allocation failure.

This commit fixes this issue by tracking distributed invalidation
messages separately per decoded transaction and not redistributing
these messages to other in-progress transactions. The maximum size of
distributed invalidation messages that one transaction can store is
limited to MAX_DISTR_INVAL_MSG_PER_TXN (8MB). Once the size of the
distributed invalidation messages exceeds this threshold, we
invalidate all caches in locations where distributed invalidation
messages need to be executed.

Back-patch to all supported versions where we introduced the fix by
commit 4909b38af0.

Note that this commit adds two new fields to ReorderBufferTXN to store
the distributed transactions. This change breaks ABI compatibility in
back branches, affecting third-party extensions that depend on the
size of the ReorderBufferTXN struct, though this scenario seems
unlikely.

Additionally, it adds a new flag to the txn_flags field of
ReorderBufferTXN to indicate distributed invalidation message
overflow. This should not affect existing implementations, as it is
unlikely that third-party extensions use unused bits in the txn_flags
field.

Bug: #18938 #18942
Author: vignesh C <[email protected]>
Reported-by: Duncan Sands <[email protected]>
Reported-by: John Hutchins <[email protected]>
Reported-by: Laurence Parry <[email protected]>
Reported-by: Max Madden <[email protected]>
Reported-by: Braulio Fdo Gonzalez <[email protected]>
Reviewed-by: Masahiko Sawada <[email protected]>
Reviewed-by: Amit Kapila <[email protected]>
Reviewed-by: Hayato Kuroda <[email protected]>
Discussion: https://postgr.es/m/680bdaf6-f7d1-4536-b580-05c2760c67c6@deepbluecap.com
Discussion: https://postgr.es/m/18942-0ab1e5ae156613ad@postgresql.org
Discussion: https://postgr.es/m/18938-57c9a1c463b68ce0@postgresql.org
Discussion: https://postgr.es/m/CAD1FGCT2sYrP_70RTuo56QTizyc+J3wJdtn2gtO3VttQFpdMZg@mail.gmail.com
Discussion: https://postgr.es/m/CANO2=B=2BT1hSYCE=nuuTnVTnjidMg0+-FfnRnqM6kd23qoygg@mail.gmail.com
Backpatch-through: 13

contrib/test_decoding/expected/invalidation_distribution.out
contrib/test_decoding/specs/invalidation_distribution.spec
src/backend/replication/logical/reorderbuffer.c
src/backend/replication/logical/snapbuild.c
src/include/replication/reorderbuffer.h

index c701e290bb9bed76d0512d04453d4da80cea879c..f2bce5ce75c628384cfe9152b238e4d7e7e34ac0 100644 (file)
@@ -1,4 +1,4 @@
-Parsed test spec with 2 sessions
+Parsed test spec with 3 sessions
 
 starting permutation: s1_insert_tbl1 s1_begin s1_insert_tbl1 s2_alter_pub_add_tbl s1_commit s1_insert_tbl1 s2_get_binary_changes
 step s1_insert_tbl1: INSERT INTO tbl1 (val1, val2) VALUES (1, 1);
@@ -18,3 +18,24 @@ count
 stop    
 (1 row)
 
+
+starting permutation: s1_begin s1_insert_tbl1 s3_begin s3_insert_tbl1 s2_alter_pub_add_tbl s1_insert_tbl1 s1_commit s3_commit s2_get_binary_changes
+step s1_begin: BEGIN;
+step s1_insert_tbl1: INSERT INTO tbl1 (val1, val2) VALUES (1, 1);
+step s3_begin: BEGIN;
+step s3_insert_tbl1: INSERT INTO tbl1 (val1, val2) VALUES (2, 2);
+step s2_alter_pub_add_tbl: ALTER PUBLICATION pub ADD TABLE tbl1;
+step s1_insert_tbl1: INSERT INTO tbl1 (val1, val2) VALUES (1, 1);
+step s1_commit: COMMIT;
+step s3_commit: COMMIT;
+step s2_get_binary_changes: SELECT count(data) FROM pg_logical_slot_get_binary_changes('isolation_slot', NULL, NULL, 'proto_version', '2', 'publication_names', 'pub') WHERE get_byte(data, 0) = 73;
+count
+-----
+    1
+(1 row)
+
+?column?
+--------
+stop    
+(1 row)
+
index b8b14e333a1ae01d2308a66b5fb3f0949f08dee9..d9b5cd75aada285ed90795375e2c30fc4aa73f68 100644 (file)
@@ -28,5 +28,16 @@ setup { SET synchronous_commit=on; }
 step "s2_alter_pub_add_tbl" { ALTER PUBLICATION pub ADD TABLE tbl1; }
 step "s2_get_binary_changes" { SELECT count(data) FROM pg_logical_slot_get_binary_changes('isolation_slot', NULL, NULL, 'proto_version', '2', 'publication_names', 'pub') WHERE get_byte(data, 0) = 73; }
 
+session "s3"
+setup { SET synchronous_commit=on; }
+step "s3_begin" { BEGIN; }
+step "s3_insert_tbl1" { INSERT INTO tbl1 (val1, val2) VALUES (2, 2); }
+step "s3_commit" { COMMIT; }
+
 # Expect to get one insert change. LOGICAL_REP_MSG_INSERT = 'I'
 permutation "s1_insert_tbl1" "s1_begin" "s1_insert_tbl1" "s2_alter_pub_add_tbl" "s1_commit" "s1_insert_tbl1" "s2_get_binary_changes"
+
+# Expect to get one insert change with LOGICAL_REP_MSG_INSERT = 'I' from
+# the second "s1_insert_tbl1" executed after adding the table tbl1 to the
+# publication in "s2_alter_pub_add_tbl".
+permutation "s1_begin" "s1_insert_tbl1" "s3_begin" "s3_insert_tbl1" "s2_alter_pub_add_tbl" "s1_insert_tbl1" "s1_commit" "s3_commit" "s2_get_binary_changes"
index a52b51a5e7dd74220322eb2663ed70730d49bdf6..b708877d965fd1803589fc19cfed37ddaaee3d71 100644 (file)
 #include "storage/sinval.h"
 #include "utils/builtins.h"
 #include "utils/combocid.h"
+#include "utils/inval.h"
 #include "utils/memdebug.h"
 #include "utils/memutils.h"
 #include "utils/rel.h"
 #include "utils/relfilenodemap.h"
 
 
+/*
+ * Each transaction has an 8MB limit for invalidation messages distributed from
+ * other transactions. This limit is set considering scenarios with many
+ * concurrent logical decoding operations. When the distributed invalidation
+ * messages reach this threshold, the transaction is marked as
+ * RBTXN_DISTR_INVAL_OVERFLOWED to invalidate the complete cache as we have lost
+ * some inval messages and hence don't know what needs to be invalidated.
+ */
+#define MAX_DISTR_INVAL_MSG_PER_TXN \
+   ((8 * 1024 * 1024) / sizeof(SharedInvalidationMessage))
+
 /* entry for a hash table we use to map from xid to our transaction state */
 typedef struct ReorderBufferTXNByIdEnt
 {
@@ -453,6 +465,12 @@ ReorderBufferReturnTXN(ReorderBuffer *rb, ReorderBufferTXN *txn)
        txn->invalidations = NULL;
    }
 
+   if (txn->invalidations_distributed)
+   {
+       pfree(txn->invalidations_distributed);
+       txn->invalidations_distributed = NULL;
+   }
+
    /* Reset the toast hash */
    ReorderBufferToastReset(rb, txn);
 
@@ -2500,7 +2518,17 @@ ReorderBufferProcessTXN(ReorderBuffer *rb, ReorderBufferTXN *txn,
        AbortCurrentTransaction();
 
        /* make sure there's no cache pollution */
-       ReorderBufferExecuteInvalidations(txn->ninvalidations, txn->invalidations);
+       if (rbtxn_distr_inval_overflowed(txn))
+       {
+           Assert(txn->ninvalidations_distributed == 0);
+           InvalidateSystemCaches();
+       }
+       else
+       {
+           ReorderBufferExecuteInvalidations(txn->ninvalidations, txn->invalidations);
+           ReorderBufferExecuteInvalidations(txn->ninvalidations_distributed,
+                                             txn->invalidations_distributed);
+       }
 
        if (using_subtxn)
            RollbackAndReleaseCurrentSubTransaction();
@@ -2546,8 +2574,17 @@ ReorderBufferProcessTXN(ReorderBuffer *rb, ReorderBufferTXN *txn,
        AbortCurrentTransaction();
 
        /* make sure there's no cache pollution */
-       ReorderBufferExecuteInvalidations(txn->ninvalidations,
-                                         txn->invalidations);
+       if (rbtxn_distr_inval_overflowed(txn))
+       {
+           Assert(txn->ninvalidations_distributed == 0);
+           InvalidateSystemCaches();
+       }
+       else
+       {
+           ReorderBufferExecuteInvalidations(txn->ninvalidations, txn->invalidations);
+           ReorderBufferExecuteInvalidations(txn->ninvalidations_distributed,
+                                             txn->invalidations_distributed);
+       }
 
        if (using_subtxn)
            RollbackAndReleaseCurrentSubTransaction();
@@ -2873,7 +2910,8 @@ ReorderBufferAbort(ReorderBuffer *rb, TransactionId xid, XLogRecPtr lsn)
         * We might have decoded changes for this transaction that could load
         * the cache as per the current transaction's view (consider DDL's
         * happened in this transaction). We don't want the decoding of future
-        * transactions to use those cache entries so execute invalidations.
+        * transactions to use those cache entries so execute only the inval
+        * messages in this transaction.
         */
        if (txn->ninvalidations > 0)
            ReorderBufferImmediateInvalidation(rb, txn->ninvalidations,
@@ -2961,9 +2999,10 @@ ReorderBufferForget(ReorderBuffer *rb, TransactionId xid, XLogRecPtr lsn)
    txn->final_lsn = lsn;
 
    /*
-    * Process cache invalidation messages if there are any. Even if we're not
-    * interested in the transaction's contents, it could have manipulated the
-    * catalog and we need to update the caches according to that.
+    * Process only cache invalidation messages in this transaction if there
+    * are any. Even if we're not interested in the transaction's contents, it
+    * could have manipulated the catalog and we need to update the caches
+    * according to that.
     */
    if (txn->base_snapshot != NULL && txn->ninvalidations > 0)
        ReorderBufferImmediateInvalidation(rb, txn->ninvalidations,
@@ -3218,6 +3257,57 @@ ReorderBufferAddNewTupleCids(ReorderBuffer *rb, TransactionId xid,
    txn->ntuplecids++;
 }
 
+/*
+ * Add new invalidation messages to the reorder buffer queue.
+ */
+static void
+ReorderBufferQueueInvalidations(ReorderBuffer *rb, TransactionId xid,
+                               XLogRecPtr lsn, Size nmsgs,
+                               SharedInvalidationMessage *msgs)
+{
+   ReorderBufferChange *change;
+
+   change = ReorderBufferGetChange(rb);
+   change->action = REORDER_BUFFER_CHANGE_INVALIDATION;
+   change->data.inval.ninvalidations = nmsgs;
+   change->data.inval.invalidations = (SharedInvalidationMessage *)
+       palloc(sizeof(SharedInvalidationMessage) * nmsgs);
+   memcpy(change->data.inval.invalidations, msgs,
+          sizeof(SharedInvalidationMessage) * nmsgs);
+
+   ReorderBufferQueueChange(rb, xid, lsn, change, false);
+}
+
+/*
+ * A helper function for ReorderBufferAddInvalidations() and
+ * ReorderBufferAddDistributedInvalidations() to accumulate the invalidation
+ * messages to the **invals_out.
+ */
+static void
+ReorderBufferAccumulateInvalidations(SharedInvalidationMessage **invals_out,
+                                    uint32 *ninvals_out,
+                                    SharedInvalidationMessage *msgs_new,
+                                    Size nmsgs_new)
+{
+   if (*ninvals_out == 0)
+   {
+       *ninvals_out = nmsgs_new;
+       *invals_out = (SharedInvalidationMessage *)
+           palloc(sizeof(SharedInvalidationMessage) * nmsgs_new);
+       memcpy(*invals_out, msgs_new, sizeof(SharedInvalidationMessage) * nmsgs_new);
+   }
+   else
+   {
+       /* Enlarge the array of inval messages */
+       *invals_out = (SharedInvalidationMessage *)
+           repalloc(*invals_out, sizeof(SharedInvalidationMessage) *
+                    (*ninvals_out + nmsgs_new));
+       memcpy(*invals_out + *ninvals_out, msgs_new,
+              nmsgs_new * sizeof(SharedInvalidationMessage));
+       *ninvals_out += nmsgs_new;
+   }
+}
+
 /*
  * Accumulate the invalidations for executing them later.
  *
@@ -3238,7 +3328,6 @@ ReorderBufferAddInvalidations(ReorderBuffer *rb, TransactionId xid,
 {
    ReorderBufferTXN *txn;
    MemoryContext oldcontext;
-   ReorderBufferChange *change;
 
    txn = ReorderBufferTXNByXid(rb, xid, true, NULL, lsn, true);
 
@@ -3254,35 +3343,77 @@ ReorderBufferAddInvalidations(ReorderBuffer *rb, TransactionId xid,
 
    Assert(nmsgs > 0);
 
-   /* Accumulate invalidations. */
-   if (txn->ninvalidations == 0)
-   {
-       txn->ninvalidations = nmsgs;
-       txn->invalidations = (SharedInvalidationMessage *)
-           palloc(sizeof(SharedInvalidationMessage) * nmsgs);
-       memcpy(txn->invalidations, msgs,
-              sizeof(SharedInvalidationMessage) * nmsgs);
-   }
-   else
+   ReorderBufferAccumulateInvalidations(&txn->invalidations,
+                                        &txn->ninvalidations,
+                                        msgs, nmsgs);
+
+   ReorderBufferQueueInvalidations(rb, xid, lsn, nmsgs, msgs);
+
+   MemoryContextSwitchTo(oldcontext);
+}
+
+/*
+ * Accumulate the invalidations distributed by other committed transactions
+ * for executing them later.
+ *
+ * This function is similar to ReorderBufferAddInvalidations() but stores
+ * the given inval messages to the txn->invalidations_distributed with the
+ * overflow check.
+ *
+ * This needs to be called by committed transactions to distribute their
+ * inval messages to in-progress transactions.
+ */
+void
+ReorderBufferAddDistributedInvalidations(ReorderBuffer *rb, TransactionId xid,
+                                        XLogRecPtr lsn, Size nmsgs,
+                                        SharedInvalidationMessage *msgs)
+{
+   ReorderBufferTXN *txn;
+   MemoryContext oldcontext;
+
+   txn = ReorderBufferTXNByXid(rb, xid, true, NULL, lsn, true);
+
+   oldcontext = MemoryContextSwitchTo(rb->context);
+
+   /*
+    * Collect all the invalidations under the top transaction, if available,
+    * so that we can execute them all together.  See comments
+    * ReorderBufferAddInvalidations.
+    */
+   if (txn->toptxn)
+       txn = txn->toptxn;
+
+   Assert(nmsgs > 0);
+
+   if (!rbtxn_distr_inval_overflowed(txn))
    {
-       txn->invalidations = (SharedInvalidationMessage *)
-           repalloc(txn->invalidations, sizeof(SharedInvalidationMessage) *
-                    (txn->ninvalidations + nmsgs));
+       /*
+        * Check the transaction has enough space for storing distributed
+        * invalidation messages.
+        */
+       if (txn->ninvalidations_distributed + nmsgs >= MAX_DISTR_INVAL_MSG_PER_TXN)
+       {
+           /*
+            * Mark the invalidation message as overflowed and free up the
+            * messages accumulated so far.
+            */
+           txn->txn_flags |= RBTXN_DISTR_INVAL_OVERFLOWED;
 
-       memcpy(txn->invalidations + txn->ninvalidations, msgs,
-              nmsgs * sizeof(SharedInvalidationMessage));
-       txn->ninvalidations += nmsgs;
+           if (txn->invalidations_distributed)
+           {
+               pfree(txn->invalidations_distributed);
+               txn->invalidations_distributed = NULL;
+               txn->ninvalidations_distributed = 0;
+           }
+       }
+       else
+           ReorderBufferAccumulateInvalidations(&txn->invalidations_distributed,
+                                                &txn->ninvalidations_distributed,
+                                                msgs, nmsgs);
    }
 
-   change = ReorderBufferGetChange(rb);
-   change->action = REORDER_BUFFER_CHANGE_INVALIDATION;
-   change->data.inval.ninvalidations = nmsgs;
-   change->data.inval.invalidations = (SharedInvalidationMessage *)
-       palloc(sizeof(SharedInvalidationMessage) * nmsgs);
-   memcpy(change->data.inval.invalidations, msgs,
-          sizeof(SharedInvalidationMessage) * nmsgs);
-
-   ReorderBufferQueueChange(rb, xid, lsn, change, false);
+   /* Queue the invalidation messages into the transaction */
+   ReorderBufferQueueInvalidations(rb, xid, lsn, nmsgs, msgs);
 
    MemoryContextSwitchTo(oldcontext);
 }
index 6025f2727aee7a1b3c31ba6bf04159a541e18a88..67f0b9e089afb47d4c86f0245eaf35dc301cefaf 100644 (file)
@@ -916,6 +916,13 @@ SnapBuildDistributeSnapshotAndInval(SnapBuild *builder, XLogRecPtr lsn, Transact
         * contents built by the current transaction even after its decoding,
         * which should have been invalidated due to concurrent catalog
         * changing transaction.
+        *
+        * Distribute only the invalidation messages generated by the current
+        * committed transaction. Invalidation messages received from other
+        * transactions would have already been propagated to the relevant
+        * in-progress transactions. This transaction would have processed
+        * those invalidations, ensuring that subsequent transactions observe
+        * a consistent cache state.
         */
        if (txn->xid != xid)
        {
@@ -929,8 +936,9 @@ SnapBuildDistributeSnapshotAndInval(SnapBuild *builder, XLogRecPtr lsn, Transact
            {
                Assert(msgs != NULL);
 
-               ReorderBufferAddInvalidations(builder->reorder, txn->xid, lsn,
-                                             ninvalidations, msgs);
+               ReorderBufferAddDistributedInvalidations(builder->reorder,
+                                                        txn->xid, lsn,
+                                                        ninvalidations, msgs);
            }
        }
    }
index d399975e8a545b123bdacc1e59aa4091801c3269..cb633315d04da066aee0cd35d34fdb4e8d4b350d 100644 (file)
@@ -168,14 +168,15 @@ typedef struct ReorderBufferChange
 } ReorderBufferChange;
 
 /* ReorderBufferTXN txn_flags */
-#define RBTXN_HAS_CATALOG_CHANGES 0x0001
-#define RBTXN_IS_SUBXACT          0x0002
-#define RBTXN_IS_SERIALIZED       0x0004
-#define RBTXN_IS_SERIALIZED_CLEAR 0x0008
-#define RBTXN_IS_STREAMED         0x0010
-#define RBTXN_HAS_PARTIAL_CHANGE  0x0020
-#define RBTXN_PREPARE             0x0040
-#define RBTXN_SKIPPED_PREPARE    0x0080
+#define RBTXN_HAS_CATALOG_CHANGES      0x0001
+#define RBTXN_IS_SUBXACT               0x0002
+#define RBTXN_IS_SERIALIZED            0x0004
+#define RBTXN_IS_SERIALIZED_CLEAR      0x0008
+#define RBTXN_IS_STREAMED              0x0010
+#define RBTXN_HAS_PARTIAL_CHANGE       0x0020
+#define RBTXN_PREPARE                  0x0040
+#define RBTXN_SKIPPED_PREPARE          0x0080
+#define RBTXN_DISTR_INVAL_OVERFLOWED    0x0100
 
 /* Does the transaction have catalog changes? */
 #define rbtxn_has_catalog_changes(txn) \
@@ -233,6 +234,12 @@ typedef struct ReorderBufferChange
    ((txn)->txn_flags & RBTXN_SKIPPED_PREPARE) != 0 \
 )
 
+/* Is the array of distributed inval messages overflowed? */
+#define rbtxn_distr_inval_overflowed(txn) \
+( \
+   ((txn)->txn_flags & RBTXN_DISTR_INVAL_OVERFLOWED) != 0 \
+)
+
 typedef struct ReorderBufferTXN
 {
    /* See above */
@@ -391,6 +398,12 @@ typedef struct ReorderBufferTXN
     * Private data pointer of the output plugin.
     */
    void       *output_plugin_private;
+
+   /*
+    * Stores cache invalidation messages distributed by other transactions.
+    */
+   uint32      ninvalidations_distributed;
+   SharedInvalidationMessage *invalidations_distributed;
 } ReorderBufferTXN;
 
 /* so we can define the callbacks used inside struct ReorderBuffer itself */
@@ -657,6 +670,9 @@ void        ReorderBufferAddNewTupleCids(ReorderBuffer *, TransactionId, XLogRecPtr ls
                                         CommandId cmin, CommandId cmax, CommandId combocid);
 void       ReorderBufferAddInvalidations(ReorderBuffer *, TransactionId, XLogRecPtr lsn,
                                          Size nmsgs, SharedInvalidationMessage *msgs);
+void       ReorderBufferAddDistributedInvalidations(ReorderBuffer *rb, TransactionId xid,
+                                                    XLogRecPtr lsn, Size nmsgs,
+                                                    SharedInvalidationMessage *msgs);
 void       ReorderBufferImmediateInvalidation(ReorderBuffer *, uint32 ninvalidations,
                                               SharedInvalidationMessage *invalidations);
 void       ReorderBufferProcessXid(ReorderBuffer *, TransactionId xid, XLogRecPtr lsn);