diff options
-rw-r--r-- | src/backend/executor/nodeGather.c | 14 | ||||
-rw-r--r-- | src/backend/executor/nodeGatherMerge.c | 10 | ||||
-rw-r--r-- | src/backend/executor/tqueue.c | 2 |
3 files changed, 5 insertions, 21 deletions
diff --git a/src/backend/executor/nodeGather.c b/src/backend/executor/nodeGather.c index 212612b5351..a44cf8409af 100644 --- a/src/backend/executor/nodeGather.c +++ b/src/backend/executor/nodeGather.c @@ -131,7 +131,6 @@ static TupleTableSlot * ExecGather(PlanState *pstate) { GatherState *node = castNode(GatherState, pstate); - TupleTableSlot *fslot = node->funnel_slot; TupleTableSlot *slot; ExprContext *econtext; @@ -205,11 +204,8 @@ ExecGather(PlanState *pstate) /* * Reset per-tuple memory context to free any expression evaluation - * storage allocated in the previous tuple cycle. This will also clear - * any previous tuple returned by a TupleQueueReader; to make sure we - * don't leave a dangling pointer around, clear the working slot first. + * storage allocated in the previous tuple cycle. */ - ExecClearTuple(fslot); econtext = node->ps.ps_ExprContext; ResetExprContext(econtext); @@ -258,7 +254,6 @@ gather_getnext(GatherState *gatherstate) PlanState *outerPlan = outerPlanState(gatherstate); TupleTableSlot *outerTupleSlot; TupleTableSlot *fslot = gatherstate->funnel_slot; - MemoryContext tupleContext = gatherstate->ps.ps_ExprContext->ecxt_per_tuple_memory; HeapTuple tup; while (gatherstate->nreaders > 0 || gatherstate->need_to_scan_locally) @@ -267,12 +262,7 @@ gather_getnext(GatherState *gatherstate) if (gatherstate->nreaders > 0) { - MemoryContext oldContext; - - /* Run TupleQueueReaders in per-tuple context */ - oldContext = MemoryContextSwitchTo(tupleContext); tup = gather_readnext(gatherstate); - MemoryContextSwitchTo(oldContext); if (HeapTupleIsValid(tup)) { @@ -280,7 +270,7 @@ gather_getnext(GatherState *gatherstate) fslot, /* slot in which to store the tuple */ InvalidBuffer, /* buffer associated with this * tuple */ - false); /* slot should not pfree tuple */ + true); /* pfree tuple when done with it */ return fslot; } } diff --git a/src/backend/executor/nodeGatherMerge.c b/src/backend/executor/nodeGatherMerge.c index 166f2064ff7..4a8a59eabf1 100644 --- a/src/backend/executor/nodeGatherMerge.c +++ b/src/backend/executor/nodeGatherMerge.c @@ -609,7 +609,7 @@ load_tuple_array(GatherMergeState *gm_state, int reader) &tuple_buffer->done); if (!HeapTupleIsValid(tuple)) break; - tuple_buffer->tuple[i] = heap_copytuple(tuple); + tuple_buffer->tuple[i] = tuple; tuple_buffer->nTuples++; } } @@ -673,7 +673,6 @@ gather_merge_readnext(GatherMergeState *gm_state, int reader, bool nowait) &tuple_buffer->done); if (!HeapTupleIsValid(tup)) return false; - tup = heap_copytuple(tup); /* * Attempt to read more tuples in nowait mode and store them in the @@ -703,20 +702,13 @@ gm_readnext_tuple(GatherMergeState *gm_state, int nreader, bool nowait, { TupleQueueReader *reader; HeapTuple tup; - MemoryContext oldContext; - MemoryContext tupleContext; /* Check for async events, particularly messages from workers. */ CHECK_FOR_INTERRUPTS(); /* Attempt to read a tuple. */ reader = gm_state->reader[nreader - 1]; - - /* Run TupleQueueReaders in per-tuple context */ - tupleContext = gm_state->ps.ps_ExprContext->ecxt_per_tuple_memory; - oldContext = MemoryContextSwitchTo(tupleContext); tup = TupleQueueReaderNext(reader, nowait, done); - MemoryContextSwitchTo(oldContext); return tup; } diff --git a/src/backend/executor/tqueue.c b/src/backend/executor/tqueue.c index 4a295c936ba..0dcb911c3c0 100644 --- a/src/backend/executor/tqueue.c +++ b/src/backend/executor/tqueue.c @@ -161,6 +161,8 @@ DestroyTupleQueueReader(TupleQueueReader *reader) * is set to true when there are no remaining tuples and otherwise to false. * * The returned tuple, if any, is allocated in CurrentMemoryContext. + * Note that this routine must not leak memory! (We used to allow that, + * but not any more.) * * Even when shm_mq_receive() returns SHM_MQ_WOULD_BLOCK, this can still * accumulate bytes from a partially-read message, so it's useful to call |