summaryrefslogtreecommitdiff
path: root/src/backend/executor/nodeHash.c
diff options
context:
space:
mode:
authorThomas Munro2018-06-10 08:30:25 +0000
committerThomas Munro2018-06-10 08:30:25 +0000
commit86a2218eb00eb6f97898945967c5f9c95c72b4c6 (patch)
tree2b5d6558dd721921e5b1d57349765bf4fae63a40 /src/backend/executor/nodeHash.c
parentf6b95ff434bff28c0d9b390d5a0ff316847c4fb7 (diff)
Limit Parallel Hash's bucket array to MaxAllocSize.
Make sure that we don't exceed MaxAllocSize when increasing the number of buckets. Perhaps later we'll remove that limit and use DSA_ALLOC_HUGE, but for now just prevent further increases like the non-parallel code. This change avoids the error from bug report #15225. Author: Thomas Munro Reviewed-By: Tom Lane Reported-by: Frits Jalvingh Discussion: https://postgr.es/m/152802081668.26724.16985037679312485972%40wrigleys.postgresql.org
Diffstat (limited to 'src/backend/executor/nodeHash.c')
-rw-r--r--src/backend/executor/nodeHash.c5
1 files changed, 4 insertions, 1 deletions
diff --git a/src/backend/executor/nodeHash.c b/src/backend/executor/nodeHash.c
index 4f069d17fd8..6ffaa751f23 100644
--- a/src/backend/executor/nodeHash.c
+++ b/src/backend/executor/nodeHash.c
@@ -2818,9 +2818,12 @@ ExecParallelHashTupleAlloc(HashJoinTable hashtable, size_t size,
{
hashtable->batches[0].shared->ntuples += hashtable->batches[0].ntuples;
hashtable->batches[0].ntuples = 0;
+ /* Guard against integer overflow and alloc size overflow */
if (hashtable->batches[0].shared->ntuples + 1 >
hashtable->nbuckets * NTUP_PER_BUCKET &&
- hashtable->nbuckets < (INT_MAX / 2))
+ hashtable->nbuckets < (INT_MAX / 2) &&
+ hashtable->nbuckets * 2 <=
+ MaxAllocSize / sizeof(dsa_pointer_atomic))
{
pstate->growth = PHJ_GROWTH_NEED_MORE_BUCKETS;
LWLockRelease(&pstate->lock);