summaryrefslogtreecommitdiff
path: root/src/bin
diff options
context:
space:
mode:
Diffstat (limited to 'src/bin')
-rw-r--r--src/bin/initdb/initdb.c4
-rw-r--r--src/bin/initdb/t/001_initdb.pl36
-rw-r--r--src/bin/pg_amcheck/pg_amcheck.c10
-rw-r--r--src/bin/pg_amcheck/t/002_nonesuch.pl35
-rw-r--r--src/bin/pg_amcheck/t/005_opclass_damage.pl3
-rw-r--r--src/bin/pg_basebackup/bbstreamer_file.c4
-rw-r--r--src/bin/pg_basebackup/bbstreamer_gzip.c27
-rw-r--r--src/bin/pg_basebackup/bbstreamer_lz4.c116
-rw-r--r--src/bin/pg_basebackup/pg_basebackup.c89
-rw-r--r--src/bin/pg_basebackup/streamutil.c2
-rw-r--r--src/bin/pg_basebackup/t/010_pg_basebackup.pl104
-rw-r--r--src/bin/pg_basebackup/t/020_pg_receivewal.pl4
-rw-r--r--src/bin/pg_basebackup/t/030_pg_recvlogical.pl17
-rw-r--r--src/bin/pg_ctl/pg_ctl.c6
-rw-r--r--src/bin/pg_ctl/t/002_status.pl2
-rw-r--r--src/bin/pg_dump/pg_backup_archiver.c8
-rw-r--r--src/bin/pg_dump/pg_backup_custom.c10
-rw-r--r--src/bin/pg_dump/pg_dump.c19
-rw-r--r--src/bin/pg_dump/pg_dumpall.c2
-rw-r--r--src/bin/pg_dump/t/001_basic.pl2
-rw-r--r--src/bin/pg_dump/t/002_pg_dump.pl46
-rw-r--r--src/bin/pg_dump/t/003_pg_dump_with_server.pl2
-rw-r--r--src/bin/pg_dump/t/010_dump_connstr.pl9
-rw-r--r--src/bin/pg_rewind/filemap.c6
-rw-r--r--src/bin/pg_rewind/t/004_pg_xlog_symlink.pl3
-rw-r--r--src/bin/pg_rewind/t/009_growing_files.pl5
-rw-r--r--src/bin/pg_rewind/t/RewindTest.pm13
-rw-r--r--src/bin/pg_upgrade/t/002_pg_upgrade.pl5
-rw-r--r--src/bin/pg_upgrade/util.c1
-rw-r--r--src/bin/pg_verifybackup/t/003_corruption.pl2
-rw-r--r--src/bin/pg_verifybackup/t/004_options.pl3
-rw-r--r--src/bin/pg_verifybackup/t/005_bad_manifest.pl6
-rw-r--r--src/bin/pg_verifybackup/t/007_wal.pl6
-rw-r--r--src/bin/pg_verifybackup/t/008_untar.pl75
-rw-r--r--src/bin/pg_verifybackup/t/009_extract.pl46
-rw-r--r--src/bin/pg_verifybackup/t/010_client_untar.pl91
-rw-r--r--src/bin/pg_waldump/pg_waldump.c2
-rw-r--r--src/bin/pgbench/pgbench.c172
-rw-r--r--src/bin/pgbench/t/001_pgbench_with_server.pl36
-rw-r--r--src/bin/pgbench/t/002_pgbench_no_server.pl6
-rw-r--r--src/bin/psql/common.c59
-rw-r--r--src/bin/psql/describe.c10
-rw-r--r--src/bin/psql/t/001_basic.pl57
-rw-r--r--src/bin/psql/t/010_tab_completion.pl27
-rw-r--r--src/bin/psql/t/020_cancel.pl29
-rw-r--r--src/bin/psql/tab-complete.c3
-rw-r--r--src/bin/scripts/t/020_createdb.pl14
47 files changed, 683 insertions, 551 deletions
diff --git a/src/bin/initdb/initdb.c b/src/bin/initdb/initdb.c
index fcef651c2fc..ed6de7ca941 100644
--- a/src/bin/initdb/initdb.c
+++ b/src/bin/initdb/initdb.c
@@ -1808,8 +1808,8 @@ make_template0(FILE *cmdfd)
* the new cluster should be the result of a fresh initdb.)
*
* We use "STRATEGY = file_copy" here because checkpoints during initdb
- * are cheap. "STRATEGY = wal_log" would generate more WAL, which would
- * be a little bit slower and make the new cluster a little bit bigger.
+ * are cheap. "STRATEGY = wal_log" would generate more WAL, which would be
+ * a little bit slower and make the new cluster a little bit bigger.
*/
static const char *const template0_setup[] = {
"CREATE DATABASE template0 IS_TEMPLATE = true ALLOW_CONNECTIONS = false"
diff --git a/src/bin/initdb/t/001_initdb.pl b/src/bin/initdb/t/001_initdb.pl
index a3397777cf2..a37f6dd9b33 100644
--- a/src/bin/initdb/t/001_initdb.pl
+++ b/src/bin/initdb/t/001_initdb.pl
@@ -97,27 +97,45 @@ SKIP:
if ($ENV{with_icu} eq 'yes')
{
- command_fails_like(['initdb', '--no-sync', '--locale-provider=icu', "$tempdir/data2"],
+ command_fails_like(
+ [ 'initdb', '--no-sync', '--locale-provider=icu', "$tempdir/data2" ],
qr/initdb: error: ICU locale must be specified/,
'locale provider ICU requires --icu-locale');
- command_ok(['initdb', '--no-sync', '--locale-provider=icu', '--icu-locale=en', "$tempdir/data3"],
+ command_ok(
+ [
+ 'initdb', '--no-sync',
+ '--locale-provider=icu', '--icu-locale=en',
+ "$tempdir/data3"
+ ],
'option --icu-locale');
- command_fails_like(['initdb', '--no-sync', '--locale-provider=icu', '--icu-locale=@colNumeric=lower', "$tempdir/dataX"],
+ command_fails_like(
+ [
+ 'initdb', '--no-sync',
+ '--locale-provider=icu', '--icu-locale=@colNumeric=lower',
+ "$tempdir/dataX"
+ ],
qr/FATAL: could not open collator for locale/,
'fails for invalid ICU locale');
}
else
{
- command_fails(['initdb', '--no-sync', '--locale-provider=icu', "$tempdir/data2"],
- 'locale provider ICU fails since no ICU support');
+ command_fails(
+ [ 'initdb', '--no-sync', '--locale-provider=icu', "$tempdir/data2" ],
+ 'locale provider ICU fails since no ICU support');
}
-command_fails(['initdb', '--no-sync', '--locale-provider=xyz', "$tempdir/dataX"],
- 'fails for invalid locale provider');
+command_fails(
+ [ 'initdb', '--no-sync', '--locale-provider=xyz', "$tempdir/dataX" ],
+ 'fails for invalid locale provider');
-command_fails(['initdb', '--no-sync', '--locale-provider=libc', '--icu-locale=en', "$tempdir/dataX"],
- 'fails for invalid option combination');
+command_fails(
+ [
+ 'initdb', '--no-sync',
+ '--locale-provider=libc', '--icu-locale=en',
+ "$tempdir/dataX"
+ ],
+ 'fails for invalid option combination');
done_testing();
diff --git a/src/bin/pg_amcheck/pg_amcheck.c b/src/bin/pg_amcheck/pg_amcheck.c
index 48cee8c1c4e..f0b818e987a 100644
--- a/src/bin/pg_amcheck/pg_amcheck.c
+++ b/src/bin/pg_amcheck/pg_amcheck.c
@@ -1074,17 +1074,17 @@ verify_btree_slot_handler(PGresult *res, PGconn *conn, void *context)
if (PQresultStatus(res) == PGRES_TUPLES_OK)
{
- int ntups = PQntuples(res);
+ int ntups = PQntuples(res);
if (ntups > 1)
{
/*
* We expect the btree checking functions to return one void row
* each, or zero rows if the check was skipped due to the object
- * being in the wrong state to be checked, so we should output some
- * sort of warning if we get anything more, not because it
- * indicates corruption, but because it suggests a mismatch between
- * amcheck and pg_amcheck versions.
+ * being in the wrong state to be checked, so we should output
+ * some sort of warning if we get anything more, not because it
+ * indicates corruption, but because it suggests a mismatch
+ * between amcheck and pg_amcheck versions.
*
* In conjunction with --progress, anything written to stderr at
* this time would present strangely to the user without an extra
diff --git a/src/bin/pg_amcheck/t/002_nonesuch.pl b/src/bin/pg_amcheck/t/002_nonesuch.pl
index 6c0f97027dd..0c07016aa0c 100644
--- a/src/bin/pg_amcheck/t/002_nonesuch.pl
+++ b/src/bin/pg_amcheck/t/002_nonesuch.pl
@@ -155,8 +155,7 @@ $node->command_checks_all(
[
qr/pg_amcheck: error: improper qualified name \(too many dotted names\): localhost\.postgres/
],
- 'multipart database patterns are rejected'
-);
+ 'multipart database patterns are rejected');
# Check that a three-part schema name is rejected
$node->command_checks_all(
@@ -166,8 +165,7 @@ $node->command_checks_all(
[
qr/pg_amcheck: error: improper qualified name \(too many dotted names\): localhost\.postgres\.pg_catalog/
],
- 'three part schema patterns are rejected'
-);
+ 'three part schema patterns are rejected');
# Check that a four-part table name is rejected
$node->command_checks_all(
@@ -177,39 +175,44 @@ $node->command_checks_all(
[
qr/pg_amcheck: error: improper relation name \(too many dotted names\): localhost\.postgres\.pg_catalog\.pg_class/
],
- 'four part table patterns are rejected'
-);
+ 'four part table patterns are rejected');
# Check that too many dotted names still draws an error under --no-strict-names
# That flag means that it is ok for the object to be missing, not that it is ok
# for the object name to be ungrammatical
$node->command_checks_all(
- [ 'pg_amcheck', '--no-strict-names', '-t', 'this.is.a.really.long.dotted.string' ],
+ [
+ 'pg_amcheck', '--no-strict-names',
+ '-t', 'this.is.a.really.long.dotted.string'
+ ],
2,
[qr/^$/],
[
qr/pg_amcheck: error: improper relation name \(too many dotted names\): this\.is\.a\.really\.long\.dotted\.string/
],
- 'ungrammatical table names still draw errors under --no-strict-names'
-);
+ 'ungrammatical table names still draw errors under --no-strict-names');
$node->command_checks_all(
- [ 'pg_amcheck', '--no-strict-names', '-s', 'postgres.long.dotted.string' ],
+ [
+ 'pg_amcheck', '--no-strict-names', '-s',
+ 'postgres.long.dotted.string'
+ ],
2,
[qr/^$/],
[
qr/pg_amcheck: error: improper qualified name \(too many dotted names\): postgres\.long\.dotted\.string/
],
- 'ungrammatical schema names still draw errors under --no-strict-names'
-);
+ 'ungrammatical schema names still draw errors under --no-strict-names');
$node->command_checks_all(
- [ 'pg_amcheck', '--no-strict-names', '-d', 'postgres.long.dotted.string' ],
+ [
+ 'pg_amcheck', '--no-strict-names', '-d',
+ 'postgres.long.dotted.string'
+ ],
2,
[qr/^$/],
[
qr/pg_amcheck: error: improper qualified name \(too many dotted names\): postgres\.long\.dotted\.string/
],
- 'ungrammatical database names still draw errors under --no-strict-names'
-);
+ 'ungrammatical database names still draw errors under --no-strict-names');
# Likewise for exclusion patterns
$node->command_checks_all(
@@ -262,7 +265,7 @@ $node->command_checks_all(
'-r', 'postgres.none.none',
'-r', 'postgres.pg_catalog.none',
'-r', 'postgres.none.pg_class',
- '-t', 'postgres.pg_catalog.pg_class', # This exists
+ '-t', 'postgres.pg_catalog.pg_class', # This exists
],
0,
[qr/^$/],
diff --git a/src/bin/pg_amcheck/t/005_opclass_damage.pl b/src/bin/pg_amcheck/t/005_opclass_damage.pl
index a5e82082700..ce376f239cf 100644
--- a/src/bin/pg_amcheck/t/005_opclass_damage.pl
+++ b/src/bin/pg_amcheck/t/005_opclass_damage.pl
@@ -33,8 +33,7 @@ $node->safe_psql(
));
# We have not yet broken the index, so we should get no corruption
-$node->command_like(
- [ 'pg_amcheck', '-p', $node->port, 'postgres' ],
+$node->command_like([ 'pg_amcheck', '-p', $node->port, 'postgres' ],
qr/^$/,
'pg_amcheck all schemas, tables and indexes reports no corruption');
diff --git a/src/bin/pg_basebackup/bbstreamer_file.c b/src/bin/pg_basebackup/bbstreamer_file.c
index 393e9f340ce..1a94fb2796c 100644
--- a/src/bin/pg_basebackup/bbstreamer_file.c
+++ b/src/bin/pg_basebackup/bbstreamer_file.c
@@ -34,7 +34,7 @@ typedef struct bbstreamer_extractor
void (*report_output_file) (const char *);
char filename[MAXPGPATH];
FILE *file;
-} bbstreamer_extractor;
+} bbstreamer_extractor;
static void bbstreamer_plain_writer_content(bbstreamer *streamer,
bbstreamer_member *member,
@@ -356,7 +356,7 @@ static void
bbstreamer_extractor_finalize(bbstreamer *streamer)
{
bbstreamer_extractor *mystreamer PG_USED_FOR_ASSERTS_ONLY
- = (bbstreamer_extractor *) streamer;
+ = (bbstreamer_extractor *) streamer;
Assert(mystreamer->file == NULL);
}
diff --git a/src/bin/pg_basebackup/bbstreamer_gzip.c b/src/bin/pg_basebackup/bbstreamer_gzip.c
index b3bfcd62ac3..e7261910d81 100644
--- a/src/bin/pg_basebackup/bbstreamer_gzip.c
+++ b/src/bin/pg_basebackup/bbstreamer_gzip.c
@@ -28,7 +28,7 @@ typedef struct bbstreamer_gzip_writer
bbstreamer base;
char *pathname;
gzFile gzfile;
-} bbstreamer_gzip_writer;
+} bbstreamer_gzip_writer;
typedef struct bbstreamer_gzip_decompressor
{
@@ -52,9 +52,9 @@ const bbstreamer_ops bbstreamer_gzip_writer_ops = {
};
static void bbstreamer_gzip_decompressor_content(bbstreamer *streamer,
- bbstreamer_member *member,
- const char *data, int len,
- bbstreamer_archive_context context);
+ bbstreamer_member *member,
+ const char *data, int len,
+ bbstreamer_archive_context context);
static void bbstreamer_gzip_decompressor_finalize(bbstreamer *streamer);
static void bbstreamer_gzip_decompressor_free(bbstreamer *streamer);
static void *gzip_palloc(void *opaque, unsigned items, unsigned size);
@@ -214,8 +214,8 @@ bbstreamer *
bbstreamer_gzip_decompressor_new(bbstreamer *next)
{
#ifdef HAVE_LIBZ
- bbstreamer_gzip_decompressor *streamer;
- z_stream *zs;
+ bbstreamer_gzip_decompressor *streamer;
+ z_stream *zs;
Assert(next != NULL);
@@ -261,12 +261,12 @@ bbstreamer_gzip_decompressor_new(bbstreamer *next)
*/
static void
bbstreamer_gzip_decompressor_content(bbstreamer *streamer,
- bbstreamer_member *member,
- const char *data, int len,
- bbstreamer_archive_context context)
+ bbstreamer_member *member,
+ const char *data, int len,
+ bbstreamer_archive_context context)
{
bbstreamer_gzip_decompressor *mystreamer;
- z_stream *zs;
+ z_stream *zs;
mystreamer = (bbstreamer_gzip_decompressor *) streamer;
@@ -277,7 +277,7 @@ bbstreamer_gzip_decompressor_content(bbstreamer *streamer,
/* Process the current chunk */
while (zs->avail_in > 0)
{
- int res;
+ int res;
Assert(mystreamer->bytes_written < mystreamer->base.bbs_buffer.maxlen);
@@ -288,8 +288,9 @@ bbstreamer_gzip_decompressor_content(bbstreamer *streamer,
/*
* This call decompresses data starting at zs->next_in and updates
- * zs->next_in * and zs->avail_in. It generates output data starting at
- * zs->next_out and updates zs->next_out and zs->avail_out accordingly.
+ * zs->next_in * and zs->avail_in. It generates output data starting
+ * at zs->next_out and updates zs->next_out and zs->avail_out
+ * accordingly.
*/
res = inflate(zs, Z_NO_FLUSH);
diff --git a/src/bin/pg_basebackup/bbstreamer_lz4.c b/src/bin/pg_basebackup/bbstreamer_lz4.c
index 6070a72cdb5..b9752354c91 100644
--- a/src/bin/pg_basebackup/bbstreamer_lz4.c
+++ b/src/bin/pg_basebackup/bbstreamer_lz4.c
@@ -27,9 +27,9 @@ typedef struct bbstreamer_lz4_frame
{
bbstreamer base;
- LZ4F_compressionContext_t cctx;
- LZ4F_decompressionContext_t dctx;
- LZ4F_preferences_t prefs;
+ LZ4F_compressionContext_t cctx;
+ LZ4F_decompressionContext_t dctx;
+ LZ4F_preferences_t prefs;
size_t bytes_written;
bool header_written;
@@ -70,9 +70,9 @@ bbstreamer *
bbstreamer_lz4_compressor_new(bbstreamer *next, pg_compress_specification *compress)
{
#ifdef USE_LZ4
- bbstreamer_lz4_frame *streamer;
- LZ4F_errorCode_t ctxError;
- LZ4F_preferences_t *prefs;
+ bbstreamer_lz4_frame *streamer;
+ LZ4F_errorCode_t ctxError;
+ LZ4F_preferences_t *prefs;
Assert(next != NULL);
@@ -119,12 +119,12 @@ bbstreamer_lz4_compressor_content(bbstreamer *streamer,
const char *data, int len,
bbstreamer_archive_context context)
{
- bbstreamer_lz4_frame *mystreamer;
- uint8 *next_in,
- *next_out;
- size_t out_bound,
- compressed_size,
- avail_out;
+ bbstreamer_lz4_frame *mystreamer;
+ uint8 *next_in,
+ *next_out;
+ size_t out_bound,
+ compressed_size,
+ avail_out;
mystreamer = (bbstreamer_lz4_frame *) streamer;
next_in = (uint8 *) data;
@@ -146,8 +146,8 @@ bbstreamer_lz4_compressor_content(bbstreamer *streamer,
}
/*
- * Update the offset and capacity of output buffer based on number of bytes
- * written to output buffer.
+ * Update the offset and capacity of output buffer based on number of
+ * bytes written to output buffer.
*/
next_out = (uint8 *) mystreamer->base.bbs_buffer.data + mystreamer->bytes_written;
avail_out = mystreamer->base.bbs_buffer.maxlen - mystreamer->bytes_written;
@@ -160,18 +160,18 @@ bbstreamer_lz4_compressor_content(bbstreamer *streamer,
out_bound = LZ4F_compressBound(len, &mystreamer->prefs);
if (avail_out < out_bound)
{
- bbstreamer_content(mystreamer->base.bbs_next, member,
- mystreamer->base.bbs_buffer.data,
- mystreamer->bytes_written,
- context);
-
- /* Enlarge buffer if it falls short of out bound. */
- if (mystreamer->base.bbs_buffer.maxlen < out_bound)
- enlargeStringInfo(&mystreamer->base.bbs_buffer, out_bound);
-
- avail_out = mystreamer->base.bbs_buffer.maxlen;
- mystreamer->bytes_written = 0;
- next_out = (uint8 *) mystreamer->base.bbs_buffer.data;
+ bbstreamer_content(mystreamer->base.bbs_next, member,
+ mystreamer->base.bbs_buffer.data,
+ mystreamer->bytes_written,
+ context);
+
+ /* Enlarge buffer if it falls short of out bound. */
+ if (mystreamer->base.bbs_buffer.maxlen < out_bound)
+ enlargeStringInfo(&mystreamer->base.bbs_buffer, out_bound);
+
+ avail_out = mystreamer->base.bbs_buffer.maxlen;
+ mystreamer->bytes_written = 0;
+ next_out = (uint8 *) mystreamer->base.bbs_buffer.data;
}
/*
@@ -199,11 +199,11 @@ bbstreamer_lz4_compressor_content(bbstreamer *streamer,
static void
bbstreamer_lz4_compressor_finalize(bbstreamer *streamer)
{
- bbstreamer_lz4_frame *mystreamer;
- uint8 *next_out;
- size_t footer_bound,
- compressed_size,
- avail_out;
+ bbstreamer_lz4_frame *mystreamer;
+ uint8 *next_out;
+ size_t footer_bound,
+ compressed_size,
+ avail_out;
mystreamer = (bbstreamer_lz4_frame *) streamer;
@@ -212,18 +212,18 @@ bbstreamer_lz4_compressor_finalize(bbstreamer *streamer)
if ((mystreamer->base.bbs_buffer.maxlen - mystreamer->bytes_written) <
footer_bound)
{
- bbstreamer_content(mystreamer->base.bbs_next, NULL,
- mystreamer->base.bbs_buffer.data,
- mystreamer->bytes_written,
- BBSTREAMER_UNKNOWN);
-
- /* Enlarge buffer if it falls short of footer bound. */
- if (mystreamer->base.bbs_buffer.maxlen < footer_bound)
- enlargeStringInfo(&mystreamer->base.bbs_buffer, footer_bound);
-
- avail_out = mystreamer->base.bbs_buffer.maxlen;
- mystreamer->bytes_written = 0;
- next_out = (uint8 *) mystreamer->base.bbs_buffer.data;
+ bbstreamer_content(mystreamer->base.bbs_next, NULL,
+ mystreamer->base.bbs_buffer.data,
+ mystreamer->bytes_written,
+ BBSTREAMER_UNKNOWN);
+
+ /* Enlarge buffer if it falls short of footer bound. */
+ if (mystreamer->base.bbs_buffer.maxlen < footer_bound)
+ enlargeStringInfo(&mystreamer->base.bbs_buffer, footer_bound);
+
+ avail_out = mystreamer->base.bbs_buffer.maxlen;
+ mystreamer->bytes_written = 0;
+ next_out = (uint8 *) mystreamer->base.bbs_buffer.data;
}
else
{
@@ -258,7 +258,7 @@ bbstreamer_lz4_compressor_finalize(bbstreamer *streamer)
static void
bbstreamer_lz4_compressor_free(bbstreamer *streamer)
{
- bbstreamer_lz4_frame *mystreamer;
+ bbstreamer_lz4_frame *mystreamer;
mystreamer = (bbstreamer_lz4_frame *) streamer;
bbstreamer_free(streamer->bbs_next);
@@ -276,8 +276,8 @@ bbstreamer *
bbstreamer_lz4_decompressor_new(bbstreamer *next)
{
#ifdef USE_LZ4
- bbstreamer_lz4_frame *streamer;
- LZ4F_errorCode_t ctxError;
+ bbstreamer_lz4_frame *streamer;
+ LZ4F_errorCode_t ctxError;
Assert(next != NULL);
@@ -313,11 +313,11 @@ bbstreamer_lz4_decompressor_content(bbstreamer *streamer,
const char *data, int len,
bbstreamer_archive_context context)
{
- bbstreamer_lz4_frame *mystreamer;
- uint8 *next_in,
- *next_out;
- size_t avail_in,
- avail_out;
+ bbstreamer_lz4_frame *mystreamer;
+ uint8 *next_in,
+ *next_out;
+ size_t avail_in,
+ avail_out;
mystreamer = (bbstreamer_lz4_frame *) streamer;
next_in = (uint8 *) data;
@@ -327,9 +327,9 @@ bbstreamer_lz4_decompressor_content(bbstreamer *streamer,
while (avail_in > 0)
{
- size_t ret,
- read_size,
- out_size;
+ size_t ret,
+ read_size,
+ out_size;
read_size = avail_in;
out_size = avail_out;
@@ -362,8 +362,8 @@ bbstreamer_lz4_decompressor_content(bbstreamer *streamer,
mystreamer->bytes_written += out_size;
/*
- * If output buffer is full then forward the content to next streamer and
- * update the output buffer.
+ * If output buffer is full then forward the content to next streamer
+ * and update the output buffer.
*/
if (mystreamer->bytes_written >= mystreamer->base.bbs_buffer.maxlen)
{
@@ -390,7 +390,7 @@ bbstreamer_lz4_decompressor_content(bbstreamer *streamer,
static void
bbstreamer_lz4_decompressor_finalize(bbstreamer *streamer)
{
- bbstreamer_lz4_frame *mystreamer;
+ bbstreamer_lz4_frame *mystreamer;
mystreamer = (bbstreamer_lz4_frame *) streamer;
@@ -412,7 +412,7 @@ bbstreamer_lz4_decompressor_finalize(bbstreamer *streamer)
static void
bbstreamer_lz4_decompressor_free(bbstreamer *streamer)
{
- bbstreamer_lz4_frame *mystreamer;
+ bbstreamer_lz4_frame *mystreamer;
mystreamer = (bbstreamer_lz4_frame *) streamer;
bbstreamer_free(streamer->bbs_next);
diff --git a/src/bin/pg_basebackup/pg_basebackup.c b/src/bin/pg_basebackup/pg_basebackup.c
index 6be04544763..4adb170d464 100644
--- a/src/bin/pg_basebackup/pg_basebackup.c
+++ b/src/bin/pg_basebackup/pg_basebackup.c
@@ -58,7 +58,7 @@ typedef struct TablespaceList
typedef struct ArchiveStreamState
{
int tablespacenum;
- pg_compress_specification *compress;
+ pg_compress_specification *compress;
bbstreamer *streamer;
bbstreamer *manifest_inject_streamer;
PQExpBuffer manifest_buffer;
@@ -173,6 +173,7 @@ static int bgpipe[2] = {-1, -1};
/* Handle to child process */
static pid_t bgchild = -1;
static bool in_log_streamer = false;
+
/* Flag to indicate if child process exited unexpectedly */
static volatile sig_atomic_t bgchild_exited = false;
@@ -567,8 +568,8 @@ LogStreamerMain(logstreamer_param *param)
*/
#ifdef WIN32
/*
- * In order to signal the main thread of an ungraceful exit we
- * set the same flag that we use on Unix to signal SIGCHLD.
+ * In order to signal the main thread of an ungraceful exit we set the
+ * same flag that we use on Unix to signal SIGCHLD.
*/
bgchild_exited = true;
#endif
@@ -1010,7 +1011,7 @@ parse_compress_options(char *option, char **algorithm, char **detail,
}
else
{
- char *alg;
+ char *alg;
alg = palloc((sep - option) + 1);
memcpy(alg, option, sep - option);
@@ -1133,11 +1134,11 @@ CreateBackupStreamer(char *archive_name, char *spclocation,
/*
* We have to parse the archive if (1) we're suppose to extract it, or if
- * (2) we need to inject backup_manifest or recovery configuration into it.
- * However, we only know how to parse tar archives.
+ * (2) we need to inject backup_manifest or recovery configuration into
+ * it. However, we only know how to parse tar archives.
*/
must_parse_archive = (format == 'p' || inject_manifest ||
- (spclocation == NULL && writerecoveryconf));
+ (spclocation == NULL && writerecoveryconf));
/* At present, we only know how to parse tar archives. */
if (must_parse_archive && !is_tar && !is_compressed_tar)
@@ -1178,8 +1179,8 @@ CreateBackupStreamer(char *archive_name, char *spclocation,
/*
* In tar format, we just write the archive without extracting it.
* Normally, we write it to the archive name provided by the caller,
- * but when the base directory is "-" that means we need to write
- * to standard output.
+ * but when the base directory is "-" that means we need to write to
+ * standard output.
*/
if (strcmp(basedir, "-") == 0)
{
@@ -1233,16 +1234,16 @@ CreateBackupStreamer(char *archive_name, char *spclocation,
}
/*
- * If we're supposed to inject the backup manifest into the results,
- * it should be done here, so that the file content can be injected
- * directly, without worrying about the details of the tar format.
+ * If we're supposed to inject the backup manifest into the results, it
+ * should be done here, so that the file content can be injected directly,
+ * without worrying about the details of the tar format.
*/
if (inject_manifest)
manifest_inject_streamer = streamer;
/*
- * If this is the main tablespace and we're supposed to write
- * recovery information, arrange to do that.
+ * If this is the main tablespace and we're supposed to write recovery
+ * information, arrange to do that.
*/
if (spclocation == NULL && writerecoveryconf)
{
@@ -1253,11 +1254,10 @@ CreateBackupStreamer(char *archive_name, char *spclocation,
}
/*
- * If we're doing anything that involves understanding the contents of
- * the archive, we'll need to parse it. If not, we can skip parsing it,
- * but old versions of the server send improperly terminated tarfiles,
- * so if we're talking to such a server we'll need to add the terminator
- * here.
+ * If we're doing anything that involves understanding the contents of the
+ * archive, we'll need to parse it. If not, we can skip parsing it, but
+ * old versions of the server send improperly terminated tarfiles, so if
+ * we're talking to such a server we'll need to add the terminator here.
*/
if (must_parse_archive)
streamer = bbstreamer_tar_parser_new(streamer);
@@ -1265,8 +1265,8 @@ CreateBackupStreamer(char *archive_name, char *spclocation,
streamer = bbstreamer_tar_terminator_new(streamer);
/*
- * If the user has requested a server compressed archive along with archive
- * extraction at client then we need to decompress it.
+ * If the user has requested a server compressed archive along with
+ * archive extraction at client then we need to decompress it.
*/
if (format == 'p')
{
@@ -1848,17 +1848,17 @@ BaseBackup(char *compression_algorithm, char *compression_detail,
}
if (maxrate > 0)
AppendIntegerCommandOption(&buf, use_new_option_syntax, "MAX_RATE",
- maxrate);
+ maxrate);
if (format == 't')
AppendPlainCommandOption(&buf, use_new_option_syntax, "TABLESPACE_MAP");
if (!verify_checksums)
{
if (use_new_option_syntax)
AppendIntegerCommandOption(&buf, use_new_option_syntax,
- "VERIFY_CHECKSUMS", 0);
+ "VERIFY_CHECKSUMS", 0);
else
AppendPlainCommandOption(&buf, use_new_option_syntax,
- "NOVERIFY_CHECKSUMS");
+ "NOVERIFY_CHECKSUMS");
}
if (manifest)
@@ -1992,8 +1992,8 @@ BaseBackup(char *compression_algorithm, char *compression_detail,
* we do anything anyway.
*
* Note that this is skipped for tar format backups and backups that
- * the server is storing to a target location, since in that case
- * we won't be storing anything into these directories and thus should
+ * the server is storing to a target location, since in that case we
+ * won't be storing anything into these directories and thus should
* not create them.
*/
if (backup_target == NULL && format == 'p' && !PQgetisnull(res, i, 1))
@@ -2019,8 +2019,8 @@ BaseBackup(char *compression_algorithm, char *compression_detail,
*/
if (includewal == STREAM_WAL)
{
- pg_compress_algorithm wal_compress_algorithm;
- int wal_compress_level;
+ pg_compress_algorithm wal_compress_algorithm;
+ int wal_compress_level;
if (verbose)
pg_log_info("starting background WAL receiver");
@@ -2315,8 +2315,8 @@ main(int argc, char **argv)
int option_index;
char *compression_algorithm = "none";
char *compression_detail = NULL;
- CompressionLocation compressloc = COMPRESS_LOCATION_UNSPECIFIED;
- pg_compress_specification client_compress;
+ CompressionLocation compressloc = COMPRESS_LOCATION_UNSPECIFIED;
+ pg_compress_specification client_compress;
pg_logging_init(argv[0]);
progname = get_progname(argv[0]);
@@ -2539,8 +2539,8 @@ main(int argc, char **argv)
/*
* If the user has not specified where to perform backup compression,
- * default to the client, unless the user specified --target, in which case
- * the server is the only choice.
+ * default to the client, unless the user specified --target, in which
+ * case the server is the only choice.
*/
if (compressloc == COMPRESS_LOCATION_UNSPECIFIED)
{
@@ -2551,14 +2551,14 @@ main(int argc, char **argv)
}
/*
- * If any compression that we're doing is happening on the client side,
- * we must try to parse the compression algorithm and detail, but if it's
- * all on the server side, then we're just going to pass through whatever
- * was requested and let the server decide what to do.
+ * If any compression that we're doing is happening on the client side, we
+ * must try to parse the compression algorithm and detail, but if it's all
+ * on the server side, then we're just going to pass through whatever was
+ * requested and let the server decide what to do.
*/
if (compressloc == COMPRESS_LOCATION_CLIENT)
{
- pg_compress_algorithm alg;
+ pg_compress_algorithm alg;
char *error_detail;
if (!parse_compress_algorithm(compression_algorithm, &alg))
@@ -2579,8 +2579,8 @@ main(int argc, char **argv)
}
/*
- * Can't perform client-side compression if the backup is not being
- * sent to the client.
+ * Can't perform client-side compression if the backup is not being sent
+ * to the client.
*/
if (backup_target != NULL && compressloc == COMPRESS_LOCATION_CLIENT)
{
@@ -2724,13 +2724,14 @@ main(int argc, char **argv)
atexit(disconnect_atexit);
#ifndef WIN32
+
/*
* Trap SIGCHLD to be able to handle the WAL stream process exiting. There
- * is no SIGCHLD on Windows, there we rely on the background thread setting
- * the signal variable on unexpected but graceful exit. If the WAL stream
- * thread crashes on Windows it will bring down the entire process as it's
- * a thread, so there is nothing to catch should that happen. A crash on
- * UNIX will be caught by the signal handler.
+ * is no SIGCHLD on Windows, there we rely on the background thread
+ * setting the signal variable on unexpected but graceful exit. If the WAL
+ * stream thread crashes on Windows it will bring down the entire process
+ * as it's a thread, so there is nothing to catch should that happen. A
+ * crash on UNIX will be caught by the signal handler.
*/
pqsignal(SIGCHLD, sigchld_handler);
#endif
diff --git a/src/bin/pg_basebackup/streamutil.c b/src/bin/pg_basebackup/streamutil.c
index 86c0493a949..299b9b76213 100644
--- a/src/bin/pg_basebackup/streamutil.c
+++ b/src/bin/pg_basebackup/streamutil.c
@@ -619,7 +619,7 @@ CreateReplicationSlot(PGconn *conn, const char *slot_name, const char *plugin,
/* pg_recvlogical doesn't use an exported snapshot, so suppress */
if (use_new_option_syntax)
AppendStringCommandOption(query, use_new_option_syntax,
- "SNAPSHOT", "nothing");
+ "SNAPSHOT", "nothing");
else
AppendPlainCommandOption(query, use_new_option_syntax,
"NOEXPORT_SNAPSHOT");
diff --git a/src/bin/pg_basebackup/t/010_pg_basebackup.pl b/src/bin/pg_basebackup/t/010_pg_basebackup.pl
index 056fcf35976..87a211315f0 100644
--- a/src/bin/pg_basebackup/t/010_pg_basebackup.pl
+++ b/src/bin/pg_basebackup/t/010_pg_basebackup.pl
@@ -28,8 +28,9 @@ my @pg_basebackup_defs = ('pg_basebackup', '--no-sync', '-cfast');
umask(0077);
# Initialize node without replication settings
-$node->init(extra => ['--data-checksums'],
- auth_extra => [ '--create-role', 'backupuser' ]);
+$node->init(
+ extra => ['--data-checksums'],
+ auth_extra => [ '--create-role', 'backupuser' ]);
$node->start;
my $pgdata = $node->data_dir;
@@ -85,10 +86,9 @@ $node->restart;
# Now that we have a server that supports replication commands, test whether
# certain invalid compression commands fail on the client side with client-side
# compression and on the server side with server-side compression.
-my $client_fails =
- 'pg_basebackup: error: ';
+my $client_fails = 'pg_basebackup: error: ';
my $server_fails =
- 'pg_basebackup: error: could not initiate base backup: ERROR: ';
+ 'pg_basebackup: error: could not initiate base backup: ERROR: ';
my @compression_failure_tests = (
[
'extrasquishy',
@@ -134,8 +134,7 @@ my @compression_failure_tests = (
'gzip:workers=3',
'invalid compression specification: compression algorithm "gzip" does not accept a worker count',
'failure on worker count for gzip'
- ],
-);
+ ],);
for my $cft (@compression_failure_tests)
{
my $cfail = quotemeta($client_fails . $cft->[1]);
@@ -143,10 +142,13 @@ for my $cft (@compression_failure_tests)
$node->command_fails_like(
[ 'pg_basebackup', '-D', "$tempdir/backup", '--compress', $cft->[0] ],
qr/$cfail/,
- 'client '. $cft->[2]);
+ 'client ' . $cft->[2]);
$node->command_fails_like(
- [ 'pg_basebackup', '-D', "$tempdir/backup", '--compress',
- 'server-' . $cft->[0] ],
+ [
+ 'pg_basebackup', '-D',
+ "$tempdir/backup", '--compress',
+ 'server-' . $cft->[0]
+ ],
qr/$sfail/,
'server ' . $cft->[2]);
}
@@ -189,7 +191,8 @@ foreach my $filename (@tempRelationFiles)
}
# Run base backup.
-$node->command_ok([ @pg_basebackup_defs, '-D', "$tempdir/backup", '-X', 'none' ],
+$node->command_ok(
+ [ @pg_basebackup_defs, '-D', "$tempdir/backup", '-X', 'none' ],
'pg_basebackup runs');
ok(-f "$tempdir/backup/PG_VERSION", 'backup was created');
ok(-f "$tempdir/backup/backup_manifest", 'backup manifest included');
@@ -326,12 +329,12 @@ $node->start;
# to our physical temp location. That way we can use shorter names
# for the tablespace directories, which hopefully won't run afoul of
# the 99 character length limit.
-my $sys_tempdir = PostgreSQL::Test::Utils::tempdir_short;
+my $sys_tempdir = PostgreSQL::Test::Utils::tempdir_short;
my $real_sys_tempdir = "$sys_tempdir/tempdir";
dir_symlink "$tempdir", $real_sys_tempdir;
mkdir "$tempdir/tblspc1";
-my $realTsDir = "$real_sys_tempdir/tblspc1";
+my $realTsDir = "$real_sys_tempdir/tblspc1";
$node->safe_psql('postgres',
"CREATE TABLESPACE tblspc1 LOCATION '$realTsDir';");
$node->safe_psql('postgres',
@@ -368,7 +371,8 @@ SKIP:
my $repTsDir = "$tempdir/tblspc1replica";
my $realRepTsDir = "$real_sys_tempdir/tblspc1replica";
mkdir $repTsDir;
- PostgreSQL::Test::Utils::system_or_bail($tar, 'xf', $tblspc_tars[0], '-C', $repTsDir);
+ PostgreSQL::Test::Utils::system_or_bail($tar, 'xf', $tblspc_tars[0],
+ '-C', $repTsDir);
# Update tablespace map to point to new directory.
# XXX Ideally pg_basebackup would handle this.
@@ -503,7 +507,8 @@ mkdir "$tempdir/$superlongname";
$realTsDir = "$real_sys_tempdir/$superlongname";
$node->safe_psql('postgres',
"CREATE TABLESPACE tblspc3 LOCATION '$realTsDir';");
-$node->command_ok([ @pg_basebackup_defs, '-D', "$tempdir/tarbackup_l3", '-Ft' ],
+$node->command_ok(
+ [ @pg_basebackup_defs, '-D', "$tempdir/tarbackup_l3", '-Ft' ],
'pg_basebackup tar with long symlink target');
$node->safe_psql('postgres', "DROP TABLESPACE tblspc3;");
rmtree("$tempdir/tarbackup_l3");
@@ -541,7 +546,10 @@ ok(grep(/^[0-9A-F]{24}$/, slurp_dir("$tempdir/backupxs/pg_wal")),
'WAL files copied');
rmtree("$tempdir/backupxs");
$node->command_ok(
- [ @pg_basebackup_defs, '-D', "$tempdir/backupxst", '-X', 'stream', '-Ft' ],
+ [
+ @pg_basebackup_defs, '-D', "$tempdir/backupxst", '-X', 'stream',
+ '-Ft'
+ ],
'pg_basebackup -X stream runs in tar mode');
ok(-f "$tempdir/backupxst/pg_wal.tar", "tar file was created");
rmtree("$tempdir/backupxst");
@@ -570,7 +578,10 @@ $node->command_fails_like(
qr/unrecognized target/,
'backup target unrecognized');
$node->command_fails_like(
- [ @pg_basebackup_defs, '--target', 'blackhole', '-X', 'none', '-D', "$tempdir/blackhole" ],
+ [
+ @pg_basebackup_defs, '--target', 'blackhole', '-X',
+ 'none', '-D', "$tempdir/blackhole"
+ ],
qr/cannot specify both output directory and backup target/,
'backup target and output directory');
$node->command_fails_like(
@@ -581,7 +592,11 @@ $node->command_ok(
[ @pg_basebackup_defs, '--target', 'blackhole', '-X', 'none' ],
'backup target blackhole');
$node->command_ok(
- [ @pg_basebackup_defs, '--target', "server:$tempdir/backuponserver", '-X', 'none' ],
+ [
+ @pg_basebackup_defs, '--target',
+ "server:$tempdir/backuponserver", '-X',
+ 'none'
+ ],
'backup target server');
ok(-f "$tempdir/backuponserver/base.tar", 'backup tar was created');
rmtree("$tempdir/backuponserver");
@@ -590,9 +605,14 @@ $node->command_ok(
[qw(createuser --replication --role=pg_write_server_files backupuser)],
'create backup user');
$node->command_ok(
- [ @pg_basebackup_defs, '-U', 'backupuser', '--target', "server:$tempdir/backuponserver", '-X', 'none' ],
+ [
+ @pg_basebackup_defs, '-U', 'backupuser', '--target',
+ "server:$tempdir/backuponserver",
+ '-X', 'none'
+ ],
'backup target server');
-ok(-f "$tempdir/backuponserver/base.tar", 'backup tar was created as non-superuser');
+ok( -f "$tempdir/backuponserver/base.tar",
+ 'backup tar was created as non-superuser');
rmtree("$tempdir/backuponserver");
$node->command_fails(
@@ -617,7 +637,10 @@ $node->command_fails(
],
'pg_basebackup fails with -C -S --no-slot');
$node->command_fails_like(
- [ @pg_basebackup_defs, '--target', 'blackhole', '-D', "$tempdir/blackhole" ],
+ [
+ @pg_basebackup_defs, '--target', 'blackhole', '-D',
+ "$tempdir/blackhole"
+ ],
qr/cannot specify both output directory and backup target/,
'backup target and output directory');
@@ -648,7 +671,11 @@ $node->command_fails(
'pg_basebackup fails with -C -S --no-slot');
$node->command_ok(
- [ @pg_basebackup_defs, '-D', "$tempdir/backupxs_slot", '-C', '-S', 'slot0' ],
+ [
+ @pg_basebackup_defs, '-D',
+ "$tempdir/backupxs_slot", '-C',
+ '-S', 'slot0'
+ ],
'pg_basebackup -C runs');
rmtree("$tempdir/backupxs_slot");
@@ -667,7 +694,11 @@ isnt(
'restart LSN of new slot is not null');
$node->command_fails(
- [ @pg_basebackup_defs, '-D', "$tempdir/backupxs_slot1", '-C', '-S', 'slot0' ],
+ [
+ @pg_basebackup_defs, '-D',
+ "$tempdir/backupxs_slot1", '-C',
+ '-S', 'slot0'
+ ],
'pg_basebackup fails with -C -S and a previously existing slot');
$node->safe_psql('postgres',
@@ -677,7 +708,10 @@ my $lsn = $node->safe_psql('postgres',
);
is($lsn, '', 'restart LSN of new slot is null');
$node->command_fails(
- [ @pg_basebackup_defs, '-D', "$tempdir/fail", '-S', 'slot1', '-X', 'none' ],
+ [
+ @pg_basebackup_defs, '-D', "$tempdir/fail", '-S',
+ 'slot1', '-X', 'none'
+ ],
'pg_basebackup with replication slot fails without WAL streaming');
$node->command_ok(
[
@@ -843,8 +877,10 @@ my $sigchld_bb_timeout =
my ($sigchld_bb_stdin, $sigchld_bb_stdout, $sigchld_bb_stderr) = ('', '', '');
my $sigchld_bb = IPC::Run::start(
[
- @pg_basebackup_defs, '--wal-method=stream', '-D', "$tempdir/sigchld",
- '--max-rate=32', '-d', $node->connstr('postgres')
+ @pg_basebackup_defs, '--wal-method=stream',
+ '-D', "$tempdir/sigchld",
+ '--max-rate=32', '-d',
+ $node->connstr('postgres')
],
'<',
\$sigchld_bb_stdin,
@@ -854,16 +890,18 @@ my $sigchld_bb = IPC::Run::start(
\$sigchld_bb_stderr,
$sigchld_bb_timeout);
-is($node->poll_query_until('postgres',
- "SELECT pg_terminate_backend(pid) FROM pg_stat_activity WHERE " .
- "application_name = '010_pg_basebackup.pl' AND wait_event = 'WalSenderMain' " .
- "AND backend_type = 'walsender' AND query ~ 'START_REPLICATION'"),
+is( $node->poll_query_until(
+ 'postgres',
+ "SELECT pg_terminate_backend(pid) FROM pg_stat_activity WHERE "
+ . "application_name = '010_pg_basebackup.pl' AND wait_event = 'WalSenderMain' "
+ . "AND backend_type = 'walsender' AND query ~ 'START_REPLICATION'"),
"1",
"Walsender killed");
-ok(pump_until($sigchld_bb, $sigchld_bb_timeout, \$sigchld_bb_stderr,
- qr/background process terminated unexpectedly/),
- 'background process exit message');
+ok( pump_until(
+ $sigchld_bb, $sigchld_bb_timeout,
+ \$sigchld_bb_stderr, qr/background process terminated unexpectedly/),
+ 'background process exit message');
$sigchld_bb->finish();
done_testing();
diff --git a/src/bin/pg_basebackup/t/020_pg_receivewal.pl b/src/bin/pg_basebackup/t/020_pg_receivewal.pl
index 465394404fd..4f07bb89078 100644
--- a/src/bin/pg_basebackup/t/020_pg_receivewal.pl
+++ b/src/bin/pg_basebackup/t/020_pg_receivewal.pl
@@ -45,7 +45,7 @@ $primary->command_ok(
'creating a replication slot');
my $slot = $primary->slot($slot_name);
is($slot->{'slot_type'}, 'physical', 'physical replication slot was created');
-is($slot->{'restart_lsn'}, '', 'restart LSN of new slot is null');
+is($slot->{'restart_lsn'}, '', 'restart LSN of new slot is null');
$primary->command_ok([ 'pg_receivewal', '--slot', $slot_name, '--drop-slot' ],
'dropping a replication slot');
is($primary->slot($slot_name)->{'slot_type'},
@@ -281,7 +281,7 @@ $standby->psql(
$primary->wait_for_catchup($standby);
# Get a walfilename from before the promotion to make sure it is archived
# after promotion
-my $standby_slot = $standby->slot($archive_slot);
+my $standby_slot = $standby->slot($archive_slot);
my $replication_slot_lsn = $standby_slot->{'restart_lsn'};
# pg_walfile_name() is not supported while in recovery, so use the primary
diff --git a/src/bin/pg_basebackup/t/030_pg_recvlogical.pl b/src/bin/pg_basebackup/t/030_pg_recvlogical.pl
index 201196f9573..38576c2e008 100644
--- a/src/bin/pg_basebackup/t/030_pg_recvlogical.pl
+++ b/src/bin/pg_basebackup/t/030_pg_recvlogical.pl
@@ -78,7 +78,8 @@ $node->command_ok(
[
'pg_recvlogical', '-S',
'test', '-d',
- $node->connstr('postgres'), '--create-slot', '--two-phase'
+ $node->connstr('postgres'), '--create-slot',
+ '--two-phase'
],
'slot with two-phase created');
@@ -87,16 +88,18 @@ isnt($slot->{'restart_lsn'}, '', 'restart lsn is defined for new slot');
$node->safe_psql('postgres',
"BEGIN; INSERT INTO test_table values (11); PREPARE TRANSACTION 'test'");
-$node->safe_psql('postgres',
- "COMMIT PREPARED 'test'");
-$nextlsn =
- $node->safe_psql('postgres', 'SELECT pg_current_wal_insert_lsn()');
+$node->safe_psql('postgres', "COMMIT PREPARED 'test'");
+$nextlsn = $node->safe_psql('postgres', 'SELECT pg_current_wal_insert_lsn()');
chomp($nextlsn);
$node->command_fails(
[
- 'pg_recvlogical', '-S', 'test', '-d', $node->connstr('postgres'),
- '--start', '--endpos', "$nextlsn", '--two-phase', '--no-loop', '-f', '-'
+ 'pg_recvlogical', '-S',
+ 'test', '-d',
+ $node->connstr('postgres'), '--start',
+ '--endpos', "$nextlsn",
+ '--two-phase', '--no-loop',
+ '-f', '-'
],
'incorrect usage');
diff --git a/src/bin/pg_ctl/pg_ctl.c b/src/bin/pg_ctl/pg_ctl.c
index f605e02da88..dd78e5bc660 100644
--- a/src/bin/pg_ctl/pg_ctl.c
+++ b/src/bin/pg_ctl/pg_ctl.c
@@ -1750,7 +1750,7 @@ typedef BOOL (WINAPI * __QueryInformationJobObject) (HANDLE, JOBOBJECTINFOCLASS,
* achieves the goal of postmaster running in a similar environment as pg_ctl.
*/
static void
-InheritStdHandles(STARTUPINFO* si)
+InheritStdHandles(STARTUPINFO *si)
{
si->dwFlags |= STARTF_USESTDHANDLES;
si->hStdInput = GetStdHandle(STD_INPUT_HANDLE);
@@ -1802,8 +1802,8 @@ CreateRestrictedProcess(char *cmd, PROCESS_INFORMATION *processInfo, bool as_ser
si.cb = sizeof(si);
/*
- * Set stdin/stdout/stderr handles to be inherited in the child
- * process. That allows postmaster and the processes it starts to perform
+ * Set stdin/stdout/stderr handles to be inherited in the child process.
+ * That allows postmaster and the processes it starts to perform
* additional checks to see if running in a service (otherwise they get
* the default console handles - which point to "somewhere").
*/
diff --git a/src/bin/pg_ctl/t/002_status.pl b/src/bin/pg_ctl/t/002_status.pl
index 2503d74a76d..ab26ee686ca 100644
--- a/src/bin/pg_ctl/t/002_status.pl
+++ b/src/bin/pg_ctl/t/002_status.pl
@@ -8,7 +8,7 @@ use PostgreSQL::Test::Cluster;
use PostgreSQL::Test::Utils;
use Test::More;
-my $tempdir = PostgreSQL::Test::Utils::tempdir;
+my $tempdir = PostgreSQL::Test::Utils::tempdir;
command_exit_is([ 'pg_ctl', 'status', '-D', "$tempdir/nonexistent" ],
4, 'pg_ctl status with nonexistent directory');
diff --git a/src/bin/pg_dump/pg_backup_archiver.c b/src/bin/pg_dump/pg_backup_archiver.c
index 24e42fa5d7d..77fe51a3a53 100644
--- a/src/bin/pg_dump/pg_backup_archiver.c
+++ b/src/bin/pg_dump/pg_backup_archiver.c
@@ -2580,12 +2580,12 @@ ReadToc(ArchiveHandle *AH)
is_supported = false;
else
{
- tmp = ReadStr(AH);
+ tmp = ReadStr(AH);
- if (strcmp(tmp, "true") == 0)
- is_supported = false;
+ if (strcmp(tmp, "true") == 0)
+ is_supported = false;
- free(tmp);
+ free(tmp);
}
if (!is_supported)
diff --git a/src/bin/pg_dump/pg_backup_custom.c b/src/bin/pg_dump/pg_backup_custom.c
index c3b9c365d5c..3443eef6b0e 100644
--- a/src/bin/pg_dump/pg_backup_custom.c
+++ b/src/bin/pg_dump/pg_backup_custom.c
@@ -956,11 +956,11 @@ _readBlockHeader(ArchiveHandle *AH, int *type, int *id)
int byt;
/*
- * Note: if we are at EOF with a pre-1.3 input file, we'll pg_fatal() inside
- * ReadInt rather than returning EOF. It doesn't seem worth jumping
- * through hoops to deal with that case better, because no such files are
- * likely to exist in the wild: only some 7.1 development versions of
- * pg_dump ever generated such files.
+ * Note: if we are at EOF with a pre-1.3 input file, we'll pg_fatal()
+ * inside ReadInt rather than returning EOF. It doesn't seem worth
+ * jumping through hoops to deal with that case better, because no such
+ * files are likely to exist in the wild: only some 7.1 development
+ * versions of pg_dump ever generated such files.
*/
if (AH->version < K_VERS_1_3)
*type = BLK_DATA;
diff --git a/src/bin/pg_dump/pg_dump.c b/src/bin/pg_dump/pg_dump.c
index 786d592e2ba..7cc9c72e492 100644
--- a/src/bin/pg_dump/pg_dump.c
+++ b/src/bin/pg_dump/pg_dump.c
@@ -1318,8 +1318,8 @@ expand_schema_name_patterns(Archive *fout,
for (cell = patterns->head; cell; cell = cell->next)
{
- PQExpBufferData dbbuf;
- int dotcnt;
+ PQExpBufferData dbbuf;
+ int dotcnt;
appendPQExpBufferStr(query,
"SELECT oid FROM pg_catalog.pg_namespace n\n");
@@ -1376,7 +1376,7 @@ expand_extension_name_patterns(Archive *fout,
*/
for (cell = patterns->head; cell; cell = cell->next)
{
- int dotcnt;
+ int dotcnt;
appendPQExpBufferStr(query,
"SELECT oid FROM pg_catalog.pg_extension e\n");
@@ -1429,7 +1429,7 @@ expand_foreign_server_name_patterns(Archive *fout,
for (cell = patterns->head; cell; cell = cell->next)
{
- int dotcnt;
+ int dotcnt;
appendPQExpBufferStr(query,
"SELECT oid FROM pg_catalog.pg_foreign_server s\n");
@@ -1481,8 +1481,8 @@ expand_table_name_patterns(Archive *fout,
for (cell = patterns->head; cell; cell = cell->next)
{
- PQExpBufferData dbbuf;
- int dotcnt;
+ PQExpBufferData dbbuf;
+ int dotcnt;
/*
* Query must remain ABSOLUTELY devoid of unqualified names. This
@@ -4342,7 +4342,8 @@ dumpPublicationTable(Archive *fout, const PublicationRelInfo *pubrinfo)
{
/*
* It's necessary to add parentheses around the expression because
- * pg_get_expr won't supply the parentheses for things like WHERE TRUE.
+ * pg_get_expr won't supply the parentheses for things like WHERE
+ * TRUE.
*/
appendPQExpBuffer(query, " WHERE (%s)", pubrinfo->pubrelqual);
}
@@ -4858,8 +4859,8 @@ binary_upgrade_set_pg_class_oids(Archive *fout,
/*
* Not every relation has storage. Also, in a pre-v12 database,
- * partitioned tables have a relfilenode, which should not be preserved
- * when upgrading.
+ * partitioned tables have a relfilenode, which should not be
+ * preserved when upgrading.
*/
if (OidIsValid(relfilenode) && relkind != RELKIND_PARTITIONED_TABLE)
appendPQExpBuffer(upgrade_buffer,
diff --git a/src/bin/pg_dump/pg_dumpall.c b/src/bin/pg_dump/pg_dumpall.c
index 52f9f7c4d66..ae41a652d79 100644
--- a/src/bin/pg_dump/pg_dumpall.c
+++ b/src/bin/pg_dump/pg_dumpall.c
@@ -1269,7 +1269,7 @@ expand_dbname_patterns(PGconn *conn,
for (SimpleStringListCell *cell = patterns->head; cell; cell = cell->next)
{
- int dotcnt;
+ int dotcnt;
appendPQExpBufferStr(query,
"SELECT datname FROM pg_catalog.pg_database n\n");
diff --git a/src/bin/pg_dump/t/001_basic.pl b/src/bin/pg_dump/t/001_basic.pl
index 65e6c01fed7..a583c8a6d24 100644
--- a/src/bin/pg_dump/t/001_basic.pl
+++ b/src/bin/pg_dump/t/001_basic.pl
@@ -8,7 +8,7 @@ use PostgreSQL::Test::Cluster;
use PostgreSQL::Test::Utils;
use Test::More;
-my $tempdir = PostgreSQL::Test::Utils::tempdir;
+my $tempdir = PostgreSQL::Test::Utils::tempdir;
#########################################
# Basic checks
diff --git a/src/bin/pg_dump/t/002_pg_dump.pl b/src/bin/pg_dump/t/002_pg_dump.pl
index 3b31e13f62b..1f08716f690 100644
--- a/src/bin/pg_dump/t/002_pg_dump.pl
+++ b/src/bin/pg_dump/t/002_pg_dump.pl
@@ -8,7 +8,7 @@ use PostgreSQL::Test::Cluster;
use PostgreSQL::Test::Utils;
use Test::More;
-my $tempdir = PostgreSQL::Test::Utils::tempdir;
+my $tempdir = PostgreSQL::Test::Utils::tempdir;
###############################################################
# Definition of the pg_dump runs to make.
@@ -2439,7 +2439,7 @@ my %tests = (
'CREATE PUBLICATION pub3' => {
create_order => 50,
create_sql => 'CREATE PUBLICATION pub3;',
- regexp => qr/^
+ regexp => qr/^
\QCREATE PUBLICATION pub3 WITH (publish = 'insert, update, delete, truncate');\E
/xm,
like => { %full_runs, section_post_data => 1, },
@@ -2448,7 +2448,7 @@ my %tests = (
'CREATE PUBLICATION pub4' => {
create_order => 50,
create_sql => 'CREATE PUBLICATION pub4;',
- regexp => qr/^
+ regexp => qr/^
\QCREATE PUBLICATION pub4 WITH (publish = 'insert, update, delete, truncate');\E
/xm,
like => { %full_runs, section_post_data => 1, },
@@ -2501,7 +2501,8 @@ my %tests = (
unlike => { exclude_dump_test_schema => 1, },
},
- 'ALTER PUBLICATION pub1 ADD TABLE test_seventh_table (col3, col2) WHERE (col1 = 1)' => {
+ 'ALTER PUBLICATION pub1 ADD TABLE test_seventh_table (col3, col2) WHERE (col1 = 1)'
+ => {
create_order => 52,
create_sql =>
'ALTER PUBLICATION pub1 ADD TABLE dump_test.test_seventh_table (col3, col2) WHERE (col1 = 1);',
@@ -2510,7 +2511,7 @@ my %tests = (
/xm,
like => { %full_runs, section_post_data => 1, },
unlike => { exclude_dump_test_schema => 1, },
- },
+ },
'ALTER PUBLICATION pub3 ADD ALL TABLES IN SCHEMA dump_test' => {
create_order => 51,
@@ -2519,7 +2520,7 @@ my %tests = (
regexp => qr/^
\QALTER PUBLICATION pub3 ADD ALL TABLES IN SCHEMA dump_test;\E
/xm,
- like => { %full_runs, section_post_data => 1, },
+ like => { %full_runs, section_post_data => 1, },
unlike => { exclude_dump_test_schema => 1, },
},
@@ -2540,14 +2541,15 @@ my %tests = (
regexp => qr/^
\QALTER PUBLICATION pub4 ADD TABLE ONLY dump_test.test_table WHERE ((col1 > 0));\E
/xm,
- like => { %full_runs, section_post_data => 1, },
+ like => { %full_runs, section_post_data => 1, },
unlike => {
exclude_dump_test_schema => 1,
exclude_test_table => 1,
},
},
- 'ALTER PUBLICATION pub4 ADD TABLE test_second_table WHERE (col2 = \'test\');' => {
+ 'ALTER PUBLICATION pub4 ADD TABLE test_second_table WHERE (col2 = \'test\');'
+ => {
create_order => 52,
create_sql =>
'ALTER PUBLICATION pub4 ADD TABLE dump_test.test_second_table WHERE (col2 = \'test\');',
@@ -2556,7 +2558,7 @@ my %tests = (
/xm,
like => { %full_runs, section_post_data => 1, },
unlike => { exclude_dump_test_schema => 1, },
- },
+ },
'CREATE SCHEMA public' => {
regexp => qr/^CREATE SCHEMA public;/m,
@@ -3979,14 +3981,12 @@ command_fails_like(
$node->command_fails_like(
[ 'pg_dumpall', '--exclude-database', '.' ],
qr/pg_dumpall: error: improper qualified name \(too many dotted names\): \./,
- 'pg_dumpall: option --exclude-database rejects multipart pattern "."'
-);
+ 'pg_dumpall: option --exclude-database rejects multipart pattern "."');
$node->command_fails_like(
[ 'pg_dumpall', '--exclude-database', 'myhost.mydb' ],
qr/pg_dumpall: error: improper qualified name \(too many dotted names\): myhost\.mydb/,
- 'pg_dumpall: option --exclude-database rejects multipart database names'
-);
+ 'pg_dumpall: option --exclude-database rejects multipart database names');
#########################################
# Test valid database exclusion patterns
@@ -4002,20 +4002,17 @@ $node->command_ok(
$node->command_fails_like(
[ 'pg_dump', '--schema', 'myhost.mydb.myschema' ],
qr/pg_dump: error: improper qualified name \(too many dotted names\): myhost\.mydb\.myschema/,
- 'pg_dump: option --schema rejects three-part schema names'
-);
+ 'pg_dump: option --schema rejects three-part schema names');
$node->command_fails_like(
[ 'pg_dump', '--schema', 'otherdb.myschema' ],
qr/pg_dump: error: cross-database references are not implemented: otherdb\.myschema/,
- 'pg_dump: option --schema rejects cross-database multipart schema names'
-);
+ 'pg_dump: option --schema rejects cross-database multipart schema names');
$node->command_fails_like(
[ 'pg_dump', '--schema', '.' ],
qr/pg_dump: error: cross-database references are not implemented: \./,
- 'pg_dump: option --schema rejects degenerate two-part schema name: "."'
-);
+ 'pg_dump: option --schema rejects degenerate two-part schema name: "."');
$node->command_fails_like(
[ 'pg_dump', '--schema', '"some.other.db".myschema' ],
@@ -4035,17 +4032,18 @@ $node->command_fails_like(
$node->command_fails_like(
[ 'pg_dump', '--table', 'myhost.mydb.myschema.mytable' ],
qr/pg_dump: error: improper relation name \(too many dotted names\): myhost\.mydb\.myschema\.mytable/,
- 'pg_dump: option --table rejects four-part table names'
-);
+ 'pg_dump: option --table rejects four-part table names');
$node->command_fails_like(
[ 'pg_dump', '--table', 'otherdb.pg_catalog.pg_class' ],
qr/pg_dump: error: cross-database references are not implemented: otherdb\.pg_catalog\.pg_class/,
- 'pg_dump: option --table rejects cross-database three part table names'
-);
+ 'pg_dump: option --table rejects cross-database three part table names');
command_fails_like(
- [ 'pg_dump', '-p', "$port", '--table', '"some.other.db".pg_catalog.pg_class' ],
+ [
+ 'pg_dump', '-p', "$port", '--table',
+ '"some.other.db".pg_catalog.pg_class'
+ ],
qr/pg_dump: error: cross-database references are not implemented: "some\.other\.db"\.pg_catalog\.pg_class/,
'pg_dump: option --table rejects cross-database three part table names with embedded dots'
);
diff --git a/src/bin/pg_dump/t/003_pg_dump_with_server.pl b/src/bin/pg_dump/t/003_pg_dump_with_server.pl
index c2848663264..a0b23aae0fb 100644
--- a/src/bin/pg_dump/t/003_pg_dump_with_server.pl
+++ b/src/bin/pg_dump/t/003_pg_dump_with_server.pl
@@ -8,7 +8,7 @@ use PostgreSQL::Test::Cluster;
use PostgreSQL::Test::Utils;
use Test::More;
-my $tempdir = PostgreSQL::Test::Utils::tempdir;
+my $tempdir = PostgreSQL::Test::Utils::tempdir;
my $node = PostgreSQL::Test::Cluster->new('main');
my $port = $node->port;
diff --git a/src/bin/pg_dump/t/010_dump_connstr.pl b/src/bin/pg_dump/t/010_dump_connstr.pl
index 7a745ade0fb..6e497447c35 100644
--- a/src/bin/pg_dump/t/010_dump_connstr.pl
+++ b/src/bin/pg_dump/t/010_dump_connstr.pl
@@ -30,8 +30,10 @@ my $dbname1 =
. generate_ascii_string(1, 9)
. generate_ascii_string(11, 12)
. generate_ascii_string(14, 33)
- . ($PostgreSQL::Test::Utils::windows_os ? '' : '"x"') # IPC::Run mishandles '"' on Windows
- . generate_ascii_string(35, 43) # skip ','
+ . ($PostgreSQL::Test::Utils::windows_os
+ ? ''
+ : '"x"') # IPC::Run mishandles '"' on Windows
+ . generate_ascii_string(35, 43) # skip ','
. generate_ascii_string(45, 54);
my $dbname2 = 'regression' . generate_ascii_string(55, 65) # skip 'B'-'W'
. generate_ascii_string(88, 99) # skip 'd'-'w'
@@ -171,7 +173,8 @@ system_log('cat', $plain);
my ($stderr, $result);
my $restore_super = qq{regress_a'b\\c=d\\ne"f};
$restore_super =~ s/"//g
- if $PostgreSQL::Test::Utils::windows_os; # IPC::Run mishandles '"' on Windows
+ if
+ $PostgreSQL::Test::Utils::windows_os; # IPC::Run mishandles '"' on Windows
# Restore full dump through psql using environment variables for
diff --git a/src/bin/pg_rewind/filemap.c b/src/bin/pg_rewind/filemap.c
index d61067f6b2e..62529310415 100644
--- a/src/bin/pg_rewind/filemap.c
+++ b/src/bin/pg_rewind/filemap.c
@@ -139,9 +139,9 @@ static const struct exclude_list_item excludeFiles[] =
{"pg_internal.init", true}, /* defined as RELCACHE_INIT_FILENAME */
/*
- * If there is a backup_label or tablespace_map file, it indicates that
- * a recovery failed and this cluster probably can't be rewound, but
- * exclude them anyway if they are found.
+ * If there is a backup_label or tablespace_map file, it indicates that a
+ * recovery failed and this cluster probably can't be rewound, but exclude
+ * them anyway if they are found.
*/
{"backup_label", false}, /* defined as BACKUP_LABEL_FILE */
{"tablespace_map", false}, /* defined as TABLESPACE_MAP */
diff --git a/src/bin/pg_rewind/t/004_pg_xlog_symlink.pl b/src/bin/pg_rewind/t/004_pg_xlog_symlink.pl
index 805935c6fd5..5aafe586e14 100644
--- a/src/bin/pg_rewind/t/004_pg_xlog_symlink.pl
+++ b/src/bin/pg_rewind/t/004_pg_xlog_symlink.pl
@@ -20,7 +20,8 @@ sub run_test
{
my $test_mode = shift;
- my $primary_xlogdir = "${PostgreSQL::Test::Utils::tmp_check}/xlog_primary";
+ my $primary_xlogdir =
+ "${PostgreSQL::Test::Utils::tmp_check}/xlog_primary";
rmtree($primary_xlogdir);
RewindTest::setup_cluster($test_mode);
diff --git a/src/bin/pg_rewind/t/009_growing_files.pl b/src/bin/pg_rewind/t/009_growing_files.pl
index a5a58dbe060..9422828712a 100644
--- a/src/bin/pg_rewind/t/009_growing_files.pl
+++ b/src/bin/pg_rewind/t/009_growing_files.pl
@@ -51,12 +51,13 @@ append_to_file "$standby_pgdata/tst_both_dir/file1", 'a';
# copy operation and the result will be an error.
my $ret = run_log(
[
- 'pg_rewind', '--debug',
+ 'pg_rewind', '--debug',
'--source-pgdata', $standby_pgdata,
'--target-pgdata', $primary_pgdata,
'--no-sync',
],
- '2>>', "$standby_pgdata/tst_both_dir/file1");
+ '2>>',
+ "$standby_pgdata/tst_both_dir/file1");
ok(!$ret, 'Error out on copying growing file');
# Ensure that the files are of different size, the final error message should
diff --git a/src/bin/pg_rewind/t/RewindTest.pm b/src/bin/pg_rewind/t/RewindTest.pm
index 8fd1f4b9de4..98b66b01f82 100644
--- a/src/bin/pg_rewind/t/RewindTest.pm
+++ b/src/bin/pg_rewind/t/RewindTest.pm
@@ -101,8 +101,8 @@ sub check_query
],
'>', \$stdout, '2>', \$stderr;
- is($result, 1, "$test_name: psql exit code");
- is($stderr, '', "$test_name: psql no stderr");
+ is($result, 1, "$test_name: psql exit code");
+ is($stderr, '', "$test_name: psql no stderr");
is($stdout, $expected_stdout, "$test_name: query result matches");
return;
@@ -115,7 +115,8 @@ sub setup_cluster
# Initialize primary, data checksums are mandatory
$node_primary =
- PostgreSQL::Test::Cluster->new('primary' . ($extra_name ? "_${extra_name}" : ''));
+ PostgreSQL::Test::Cluster->new(
+ 'primary' . ($extra_name ? "_${extra_name}" : ''));
# Set up pg_hba.conf and pg_ident.conf for the role running
# pg_rewind. This role is used for all the tests, and has
@@ -163,7 +164,8 @@ sub create_standby
my $extra_name = shift;
$node_standby =
- PostgreSQL::Test::Cluster->new('standby' . ($extra_name ? "_${extra_name}" : ''));
+ PostgreSQL::Test::Cluster->new(
+ 'standby' . ($extra_name ? "_${extra_name}" : ''));
$node_primary->backup('my_backup');
$node_standby->init_from_backup($node_primary, 'my_backup');
my $connstr_primary = $node_primary->connstr();
@@ -305,7 +307,8 @@ sub run_pg_rewind
# segments from the old primary to the archives. These
# will be used by pg_rewind.
rmtree($node_primary->archive_dir);
- PostgreSQL::Test::RecursiveCopy::copypath($node_primary->data_dir . "/pg_wal",
+ PostgreSQL::Test::RecursiveCopy::copypath(
+ $node_primary->data_dir . "/pg_wal",
$node_primary->archive_dir);
# Fast way to remove entire directory content
diff --git a/src/bin/pg_upgrade/t/002_pg_upgrade.pl b/src/bin/pg_upgrade/t/002_pg_upgrade.pl
index 76b8dab4b73..8372a85e6ef 100644
--- a/src/bin/pg_upgrade/t/002_pg_upgrade.pl
+++ b/src/bin/pg_upgrade/t/002_pg_upgrade.pl
@@ -51,7 +51,8 @@ if ( (defined($ENV{olddump}) && !defined($ENV{oldinstall}))
my $tempdir = PostgreSQL::Test::Utils::tempdir;
# Initialize node to upgrade
-my $oldnode = PostgreSQL::Test::Cluster->new('old_node',
+my $oldnode =
+ PostgreSQL::Test::Cluster->new('old_node',
install_path => $ENV{oldinstall});
# To increase coverage of non-standard segment size and group access without
@@ -132,7 +133,7 @@ if (defined($ENV{oldinstall}))
$oldnode->command_ok(
[
'psql', '-X',
- '-f', "$srcdir/src/bin/pg_upgrade/upgrade_adapt.sql",
+ '-f', "$srcdir/src/bin/pg_upgrade/upgrade_adapt.sql",
'regression'
]);
}
diff --git a/src/bin/pg_upgrade/util.c b/src/bin/pg_upgrade/util.c
index 414de063496..9edfe7c3605 100644
--- a/src/bin/pg_upgrade/util.c
+++ b/src/bin/pg_upgrade/util.c
@@ -143,6 +143,7 @@ pg_log_v(eLogType type, const char *fmt, va_list ap)
break;
case PG_STATUS:
+
/*
* For output to a display, do leading truncation. Append \r so
* that the next message is output at the start of the line.
diff --git a/src/bin/pg_verifybackup/t/003_corruption.pl b/src/bin/pg_verifybackup/t/003_corruption.pl
index 843016ad80c..3dba7d8a698 100644
--- a/src/bin/pg_verifybackup/t/003_corruption.pl
+++ b/src/bin/pg_verifybackup/t/003_corruption.pl
@@ -16,7 +16,7 @@ $primary->start;
# Include a user-defined tablespace in the hopes of detecting problems in that
# area.
-my $source_ts_path =PostgreSQL::Test::Utils::tempdir_short();
+my $source_ts_path = PostgreSQL::Test::Utils::tempdir_short();
my $source_ts_prefix = $source_ts_path;
$source_ts_prefix =~ s!(^[A-Z]:/[^/]*)/.*!$1!;
diff --git a/src/bin/pg_verifybackup/t/004_options.pl b/src/bin/pg_verifybackup/t/004_options.pl
index 6fdd74e5eea..8cda66ca001 100644
--- a/src/bin/pg_verifybackup/t/004_options.pl
+++ b/src/bin/pg_verifybackup/t/004_options.pl
@@ -15,7 +15,8 @@ my $primary = PostgreSQL::Test::Cluster->new('primary');
$primary->init(allows_streaming => 1);
$primary->start;
my $backup_path = $primary->backup_dir . '/test_options';
-$primary->command_ok([ 'pg_basebackup', '-D', $backup_path, '--no-sync', '-cfast' ],
+$primary->command_ok(
+ [ 'pg_basebackup', '-D', $backup_path, '--no-sync', '-cfast' ],
"base backup ok");
# Verify that pg_verifybackup -q succeeds and produces no output.
diff --git a/src/bin/pg_verifybackup/t/005_bad_manifest.pl b/src/bin/pg_verifybackup/t/005_bad_manifest.pl
index 48fecfa3152..b9573c57426 100644
--- a/src/bin/pg_verifybackup/t/005_bad_manifest.pl
+++ b/src/bin/pg_verifybackup/t/005_bad_manifest.pl
@@ -12,10 +12,8 @@ use Test::More;
my $tempdir = PostgreSQL::Test::Utils::tempdir;
-test_bad_manifest(
- 'input string ended unexpectedly',
- qr/could not parse backup manifest: parsing failed/,
- <<EOM);
+test_bad_manifest('input string ended unexpectedly',
+ qr/could not parse backup manifest: parsing failed/, <<EOM);
{
EOM
diff --git a/src/bin/pg_verifybackup/t/007_wal.pl b/src/bin/pg_verifybackup/t/007_wal.pl
index bef2701ef75..6e9fafcd55a 100644
--- a/src/bin/pg_verifybackup/t/007_wal.pl
+++ b/src/bin/pg_verifybackup/t/007_wal.pl
@@ -15,7 +15,8 @@ my $primary = PostgreSQL::Test::Cluster->new('primary');
$primary->init(allows_streaming => 1);
$primary->start;
my $backup_path = $primary->backup_dir . '/test_wal';
-$primary->command_ok([ 'pg_basebackup', '-D', $backup_path, '--no-sync', '-cfast' ],
+$primary->command_ok(
+ [ 'pg_basebackup', '-D', $backup_path, '--no-sync', '-cfast' ],
"base backup ok");
# Rename pg_wal.
@@ -69,7 +70,8 @@ $primary->safe_psql('postgres', 'SELECT pg_switch_wal()');
my $backup_path2 = $primary->backup_dir . '/test_tli';
# The base backup run below does a checkpoint, that removes the first segment
# of the current timeline.
-$primary->command_ok([ 'pg_basebackup', '-D', $backup_path2, '--no-sync', '-cfast' ],
+$primary->command_ok(
+ [ 'pg_basebackup', '-D', $backup_path2, '--no-sync', '-cfast' ],
"base backup 2 ok");
command_ok(
[ 'pg_verifybackup', $backup_path2 ],
diff --git a/src/bin/pg_verifybackup/t/008_untar.pl b/src/bin/pg_verifybackup/t/008_untar.pl
index 915249a19de..4c4959516dd 100644
--- a/src/bin/pg_verifybackup/t/008_untar.pl
+++ b/src/bin/pg_verifybackup/t/008_untar.pl
@@ -16,89 +16,90 @@ my $primary = PostgreSQL::Test::Cluster->new('primary');
$primary->init(allows_streaming => 1);
$primary->start;
-my $backup_path = $primary->backup_dir . '/server-backup';
+my $backup_path = $primary->backup_dir . '/server-backup';
my $extract_path = $primary->backup_dir . '/extracted-backup';
my @test_configuration = (
{
'compression_method' => 'none',
- 'backup_flags' => [],
- 'backup_archive' => 'base.tar',
- 'enabled' => 1
+ 'backup_flags' => [],
+ 'backup_archive' => 'base.tar',
+ 'enabled' => 1
},
{
'compression_method' => 'gzip',
- 'backup_flags' => ['--compress', 'server-gzip'],
- 'backup_archive' => 'base.tar.gz',
+ 'backup_flags' => [ '--compress', 'server-gzip' ],
+ 'backup_archive' => 'base.tar.gz',
'decompress_program' => $ENV{'GZIP_PROGRAM'},
- 'decompress_flags' => [ '-d' ],
- 'enabled' => check_pg_config("#define HAVE_LIBZ 1")
+ 'decompress_flags' => ['-d'],
+ 'enabled' => check_pg_config("#define HAVE_LIBZ 1")
},
{
'compression_method' => 'lz4',
- 'backup_flags' => ['--compress', 'server-lz4'],
- 'backup_archive' => 'base.tar.lz4',
+ 'backup_flags' => [ '--compress', 'server-lz4' ],
+ 'backup_archive' => 'base.tar.lz4',
'decompress_program' => $ENV{'LZ4'},
- 'decompress_flags' => [ '-d', '-m'],
- 'enabled' => check_pg_config("#define USE_LZ4 1")
+ 'decompress_flags' => [ '-d', '-m' ],
+ 'enabled' => check_pg_config("#define USE_LZ4 1")
},
{
'compression_method' => 'zstd',
- 'backup_flags' => ['--compress', 'server-zstd'],
- 'backup_archive' => 'base.tar.zst',
+ 'backup_flags' => [ '--compress', 'server-zstd' ],
+ 'backup_archive' => 'base.tar.zst',
'decompress_program' => $ENV{'ZSTD'},
- 'decompress_flags' => [ '-d' ],
- 'enabled' => check_pg_config("#define USE_ZSTD 1")
- }
-);
+ 'decompress_flags' => ['-d'],
+ 'enabled' => check_pg_config("#define USE_ZSTD 1")
+ });
for my $tc (@test_configuration)
{
my $method = $tc->{'compression_method'};
- SKIP: {
+ SKIP:
+ {
skip "$method compression not supported by this build", 3
- if ! $tc->{'enabled'};
+ if !$tc->{'enabled'};
skip "no decompressor available for $method", 3
if exists $tc->{'decompress_program'}
&& (!defined $tc->{'decompress_program'}
- || $tc->{'decompress_program'} eq '');
+ || $tc->{'decompress_program'} eq '');
# Take a server-side backup.
my @backup = (
- 'pg_basebackup', '--no-sync', '-cfast', '--target',
- "server:$backup_path", '-Xfetch'
- );
- push @backup, @{$tc->{'backup_flags'}};
+ 'pg_basebackup', '--no-sync',
+ '-cfast', '--target',
+ "server:$backup_path", '-Xfetch');
+ push @backup, @{ $tc->{'backup_flags'} };
$primary->command_ok(\@backup,
- "server side backup, compression $method");
+ "server side backup, compression $method");
# Verify that the we got the files we expected.
my $backup_files = join(',',
sort grep { $_ ne '.' && $_ ne '..' } slurp_dir($backup_path));
- my $expected_backup_files = join(',',
- sort ('backup_manifest', $tc->{'backup_archive'}));
- is($backup_files,$expected_backup_files,
+ my $expected_backup_files =
+ join(',', sort ('backup_manifest', $tc->{'backup_archive'}));
+ is($backup_files, $expected_backup_files,
"found expected backup files, compression $method");
# Decompress.
if (exists $tc->{'decompress_program'})
{
my @decompress = ($tc->{'decompress_program'});
- push @decompress, @{$tc->{'decompress_flags'}}
- if $tc->{'decompress_flags'};
+ push @decompress, @{ $tc->{'decompress_flags'} }
+ if $tc->{'decompress_flags'};
push @decompress, $backup_path . '/' . $tc->{'backup_archive'};
system_or_bail(@decompress);
}
- SKIP: {
+ SKIP:
+ {
my $tar = $ENV{TAR};
# don't check for a working tar here, to accommodate various odd
# cases such as AIX. If tar doesn't work the init_from_backup below
# will fail.
skip "no tar program available", 1
- if (!defined $tar || $tar eq '');
+ if (!defined $tar || $tar eq '');
# Untar.
mkdir($extract_path);
@@ -106,8 +107,12 @@ for my $tc (@test_configuration)
'-C', $extract_path);
# Verify.
- $primary->command_ok([ 'pg_verifybackup', '-n',
- '-m', "$backup_path/backup_manifest", '-e', $extract_path ],
+ $primary->command_ok(
+ [
+ 'pg_verifybackup', '-n',
+ '-m', "$backup_path/backup_manifest",
+ '-e', $extract_path
+ ],
"verify backup, compression $method");
}
diff --git a/src/bin/pg_verifybackup/t/009_extract.pl b/src/bin/pg_verifybackup/t/009_extract.pl
index d6f11b95535..56889e1ece9 100644
--- a/src/bin/pg_verifybackup/t/009_extract.pl
+++ b/src/bin/pg_verifybackup/t/009_extract.pl
@@ -17,46 +17,47 @@ $primary->start;
my @test_configuration = (
{
'compression_method' => 'none',
- 'backup_flags' => [],
- 'enabled' => 1
+ 'backup_flags' => [],
+ 'enabled' => 1
},
{
'compression_method' => 'gzip',
- 'backup_flags' => ['--compress', 'server-gzip:5'],
- 'enabled' => check_pg_config("#define HAVE_LIBZ 1")
+ 'backup_flags' => [ '--compress', 'server-gzip:5' ],
+ 'enabled' => check_pg_config("#define HAVE_LIBZ 1")
},
{
'compression_method' => 'lz4',
- 'backup_flags' => ['--compress', 'server-lz4:5'],
- 'enabled' => check_pg_config("#define USE_LZ4 1")
+ 'backup_flags' => [ '--compress', 'server-lz4:5' ],
+ 'enabled' => check_pg_config("#define USE_LZ4 1")
},
{
'compression_method' => 'zstd',
- 'backup_flags' => ['--compress', 'server-zstd:5'],
- 'enabled' => check_pg_config("#define USE_ZSTD 1")
+ 'backup_flags' => [ '--compress', 'server-zstd:5' ],
+ 'enabled' => check_pg_config("#define USE_ZSTD 1")
},
{
'compression_method' => 'parallel zstd',
- 'backup_flags' => ['--compress', 'server-zstd:workers=3'],
- 'enabled' => check_pg_config("#define USE_ZSTD 1"),
- 'possibly_unsupported' => qr/could not set compression worker count to 3: Unsupported parameter/
- }
-);
+ 'backup_flags' => [ '--compress', 'server-zstd:workers=3' ],
+ 'enabled' => check_pg_config("#define USE_ZSTD 1"),
+ 'possibly_unsupported' =>
+ qr/could not set compression worker count to 3: Unsupported parameter/
+ });
for my $tc (@test_configuration)
{
my $backup_path = $primary->backup_dir . '/' . 'extract_backup';
- my $method = $tc->{'compression_method'};
+ my $method = $tc->{'compression_method'};
- SKIP: {
+ SKIP:
+ {
skip "$method compression not supported by this build", 2
- if ! $tc->{'enabled'};
+ if !$tc->{'enabled'};
# Take backup with server compression enabled.
- my @backup = (
+ my @backup = (
'pg_basebackup', '-D', $backup_path,
'-Xfetch', '--no-sync', '-cfast', '-Fp');
- push @backup, @{$tc->{'backup_flags'}};
+ push @backup, @{ $tc->{'backup_flags'} };
my @verify = ('pg_verifybackup', '-e', $backup_path);
@@ -64,7 +65,7 @@ for my $tc (@test_configuration)
my $backup_stdout = '';
my $backup_stderr = '';
my $backup_result = $primary->run_log(\@backup, '>', \$backup_stdout,
- '2>', \$backup_stderr);
+ '2>', \$backup_stderr);
if ($backup_stdout ne '')
{
print "# standard output was:\n$backup_stdout";
@@ -73,8 +74,9 @@ for my $tc (@test_configuration)
{
print "# standard error was:\n$backup_stderr";
}
- if (! $backup_result && $tc->{'possibly_unsupported'} &&
- $backup_stderr =~ /$tc->{'possibly_unsupported'}/)
+ if ( !$backup_result
+ && $tc->{'possibly_unsupported'}
+ && $backup_stderr =~ /$tc->{'possibly_unsupported'}/)
{
skip "compression with $method not supported by this build", 2;
}
@@ -85,7 +87,7 @@ for my $tc (@test_configuration)
# Make sure that it verifies OK.
$primary->command_ok(\@verify,
- "backup verified, compression method \"$method\"");
+ "backup verified, compression method \"$method\"");
}
# Remove backup immediately to save disk space.
diff --git a/src/bin/pg_verifybackup/t/010_client_untar.pl b/src/bin/pg_verifybackup/t/010_client_untar.pl
index c1cd12cb065..77cb503784c 100644
--- a/src/bin/pg_verifybackup/t/010_client_untar.pl
+++ b/src/bin/pg_verifybackup/t/010_client_untar.pl
@@ -15,73 +15,74 @@ my $primary = PostgreSQL::Test::Cluster->new('primary');
$primary->init(allows_streaming => 1);
$primary->start;
-my $backup_path = $primary->backup_dir . '/client-backup';
+my $backup_path = $primary->backup_dir . '/client-backup';
my $extract_path = $primary->backup_dir . '/extracted-backup';
my @test_configuration = (
{
'compression_method' => 'none',
- 'backup_flags' => [],
- 'backup_archive' => 'base.tar',
- 'enabled' => 1
+ 'backup_flags' => [],
+ 'backup_archive' => 'base.tar',
+ 'enabled' => 1
},
{
'compression_method' => 'gzip',
- 'backup_flags' => ['--compress', 'client-gzip:5'],
- 'backup_archive' => 'base.tar.gz',
+ 'backup_flags' => [ '--compress', 'client-gzip:5' ],
+ 'backup_archive' => 'base.tar.gz',
'decompress_program' => $ENV{'GZIP_PROGRAM'},
- 'decompress_flags' => [ '-d' ],
- 'enabled' => check_pg_config("#define HAVE_LIBZ 1")
+ 'decompress_flags' => ['-d'],
+ 'enabled' => check_pg_config("#define HAVE_LIBZ 1")
},
{
'compression_method' => 'lz4',
- 'backup_flags' => ['--compress', 'client-lz4:5'],
- 'backup_archive' => 'base.tar.lz4',
+ 'backup_flags' => [ '--compress', 'client-lz4:5' ],
+ 'backup_archive' => 'base.tar.lz4',
'decompress_program' => $ENV{'LZ4'},
- 'decompress_flags' => [ '-d' ],
- 'output_file' => 'base.tar',
- 'enabled' => check_pg_config("#define USE_LZ4 1")
+ 'decompress_flags' => ['-d'],
+ 'output_file' => 'base.tar',
+ 'enabled' => check_pg_config("#define USE_LZ4 1")
},
{
'compression_method' => 'zstd',
- 'backup_flags' => ['--compress', 'client-zstd:5'],
- 'backup_archive' => 'base.tar.zst',
+ 'backup_flags' => [ '--compress', 'client-zstd:5' ],
+ 'backup_archive' => 'base.tar.zst',
'decompress_program' => $ENV{'ZSTD'},
- 'decompress_flags' => [ '-d' ],
- 'enabled' => check_pg_config("#define USE_ZSTD 1")
+ 'decompress_flags' => ['-d'],
+ 'enabled' => check_pg_config("#define USE_ZSTD 1")
},
{
'compression_method' => 'parallel zstd',
- 'backup_flags' => ['--compress', 'client-zstd:workers=3'],
- 'backup_archive' => 'base.tar.zst',
+ 'backup_flags' => [ '--compress', 'client-zstd:workers=3' ],
+ 'backup_archive' => 'base.tar.zst',
'decompress_program' => $ENV{'ZSTD'},
- 'decompress_flags' => [ '-d' ],
- 'enabled' => check_pg_config("#define USE_ZSTD 1"),
- 'possibly_unsupported' => qr/could not set compression worker count to 3: Unsupported parameter/
- }
-);
+ 'decompress_flags' => ['-d'],
+ 'enabled' => check_pg_config("#define USE_ZSTD 1"),
+ 'possibly_unsupported' =>
+ qr/could not set compression worker count to 3: Unsupported parameter/
+ });
for my $tc (@test_configuration)
{
my $method = $tc->{'compression_method'};
- SKIP: {
+ SKIP:
+ {
skip "$method compression not supported by this build", 3
- if ! $tc->{'enabled'};
+ if !$tc->{'enabled'};
skip "no decompressor available for $method", 3
if exists $tc->{'decompress_program'}
&& (!defined $tc->{'decompress_program'}
- || $tc->{'decompress_program'} eq '');
+ || $tc->{'decompress_program'} eq '');
# Take a client-side backup.
- my @backup = (
+ my @backup = (
'pg_basebackup', '-D', $backup_path,
'-Xfetch', '--no-sync', '-cfast', '-Ft');
- push @backup, @{$tc->{'backup_flags'}};
+ push @backup, @{ $tc->{'backup_flags'} };
my $backup_stdout = '';
my $backup_stderr = '';
my $backup_result = $primary->run_log(\@backup, '>', \$backup_stdout,
- '2>', \$backup_stderr);
+ '2>', \$backup_stderr);
if ($backup_stdout ne '')
{
print "# standard output was:\n$backup_stdout";
@@ -90,8 +91,9 @@ for my $tc (@test_configuration)
{
print "# standard error was:\n$backup_stderr";
}
- if (! $backup_result && $tc->{'possibly_unsupported'} &&
- $backup_stderr =~ /$tc->{'possibly_unsupported'}/)
+ if ( !$backup_result
+ && $tc->{'possibly_unsupported'}
+ && $backup_stderr =~ /$tc->{'possibly_unsupported'}/)
{
skip "compression with $method not supported by this build", 3;
}
@@ -103,30 +105,31 @@ for my $tc (@test_configuration)
# Verify that the we got the files we expected.
my $backup_files = join(',',
sort grep { $_ ne '.' && $_ ne '..' } slurp_dir($backup_path));
- my $expected_backup_files = join(',',
- sort ('backup_manifest', $tc->{'backup_archive'}));
- is($backup_files,$expected_backup_files,
+ my $expected_backup_files =
+ join(',', sort ('backup_manifest', $tc->{'backup_archive'}));
+ is($backup_files, $expected_backup_files,
"found expected backup files, compression $method");
# Decompress.
if (exists $tc->{'decompress_program'})
{
my @decompress = ($tc->{'decompress_program'});
- push @decompress, @{$tc->{'decompress_flags'}}
- if $tc->{'decompress_flags'};
+ push @decompress, @{ $tc->{'decompress_flags'} }
+ if $tc->{'decompress_flags'};
push @decompress, $backup_path . '/' . $tc->{'backup_archive'};
push @decompress, $backup_path . '/' . $tc->{'output_file'}
- if $tc->{'output_file'};
+ if $tc->{'output_file'};
system_or_bail(@decompress);
}
- SKIP: {
+ SKIP:
+ {
my $tar = $ENV{TAR};
# don't check for a working tar here, to accommodate various odd
# cases such as AIX. If tar doesn't work the init_from_backup below
# will fail.
skip "no tar program available", 1
- if (!defined $tar || $tar eq '');
+ if (!defined $tar || $tar eq '');
# Untar.
mkdir($extract_path);
@@ -134,8 +137,12 @@ for my $tc (@test_configuration)
'-C', $extract_path);
# Verify.
- $primary->command_ok([ 'pg_verifybackup', '-n',
- '-m', "$backup_path/backup_manifest", '-e', $extract_path ],
+ $primary->command_ok(
+ [
+ 'pg_verifybackup', '-n',
+ '-m', "$backup_path/backup_manifest",
+ '-e', $extract_path
+ ],
"verify backup, compression $method");
}
diff --git a/src/bin/pg_waldump/pg_waldump.c b/src/bin/pg_waldump/pg_waldump.c
index 4f265ef5460..3151cb5562b 100644
--- a/src/bin/pg_waldump/pg_waldump.c
+++ b/src/bin/pg_waldump/pg_waldump.c
@@ -695,7 +695,7 @@ main(int argc, char **argv)
XLogReaderState *xlogreader_state;
XLogDumpPrivate private;
XLogDumpConfig config;
- XLogStats stats;
+ XLogStats stats;
XLogRecord *record;
XLogRecPtr first_record;
char *waldir = NULL;
diff --git a/src/bin/pgbench/pgbench.c b/src/bin/pgbench/pgbench.c
index 02f250f5119..79c0cd374d3 100644
--- a/src/bin/pgbench/pgbench.c
+++ b/src/bin/pgbench/pgbench.c
@@ -277,9 +277,9 @@ bool progress_timestamp = false; /* progress report with Unix time */
int nclients = 1; /* number of clients */
int nthreads = 1; /* number of threads */
bool is_connect; /* establish connection for each transaction */
-bool report_per_command = false; /* report per-command latencies, retries
- * after errors and failures (errors
- * without retrying) */
+bool report_per_command = false; /* report per-command latencies,
+ * retries after errors and failures
+ * (errors without retrying) */
int main_pid; /* main process id used in log filename */
/*
@@ -302,8 +302,8 @@ int main_pid; /* main process id used in log filename */
*/
uint32 max_tries = 1;
-bool failures_detailed = false; /* whether to group failures in reports
- * or logs by basic types */
+bool failures_detailed = false; /* whether to group failures in
+ * reports or logs by basic types */
const char *pghost = NULL;
const char *pgport = NULL;
@@ -349,8 +349,8 @@ typedef struct
/*
* The maximum number of variables that we can currently store in 'vars'
- * without having to reallocate more space. We must always have max_vars >=
- * nvars.
+ * without having to reallocate more space. We must always have max_vars
+ * >= nvars.
*/
int max_vars;
@@ -390,17 +390,17 @@ typedef struct StatsData
{
pg_time_usec_t start_time; /* interval start time, for aggregates */
- /*
- * Transactions are counted depending on their execution and outcome. First
- * a transaction may have started or not: skipped transactions occur under
- * --rate and --latency-limit when the client is too late to execute them.
- * Secondly, a started transaction may ultimately succeed or fail, possibly
- * after some retries when --max-tries is not one. Thus
+ /*----------
+ * Transactions are counted depending on their execution and outcome.
+ * First a transaction may have started or not: skipped transactions occur
+ * under --rate and --latency-limit when the client is too late to execute
+ * them. Secondly, a started transaction may ultimately succeed or fail,
+ * possibly after some retries when --max-tries is not one. Thus
*
* the number of all transactions =
* 'skipped' (it was too late to execute them) +
* 'cnt' (the number of successful transactions) +
- * failed (the number of failed transactions).
+ * 'failed' (the number of failed transactions).
*
* A successful transaction can have several unsuccessful tries before a
* successful run. Thus
@@ -419,11 +419,11 @@ typedef struct StatsData
* failed (the number of failed transactions) =
* 'serialization_failures' (they got a serialization error and were not
* successfully retried) +
- * 'deadlock_failures' (they got a deadlock error and were not successfully
- * retried).
+ * 'deadlock_failures' (they got a deadlock error and were not
+ * successfully retried).
*
- * If the transaction was retried after a serialization or a deadlock error
- * this does not guarantee that this retry was successful. Thus
+ * If the transaction was retried after a serialization or a deadlock
+ * error this does not guarantee that this retry was successful. Thus
*
* 'retries' (number of retries) =
* number of retries in all retried transactions =
@@ -433,18 +433,20 @@ typedef struct StatsData
* 'retried' (number of all retried transactions) =
* successfully retried transactions +
* failed transactions.
+ *----------
*/
int64 cnt; /* number of successful transactions, not
* including 'skipped' */
int64 skipped; /* number of transactions skipped under --rate
* and --latency-limit */
- int64 retries; /* number of retries after a serialization or a
- * deadlock error in all the transactions */
- int64 retried; /* number of all transactions that were retried
- * after a serialization or a deadlock error
- * (perhaps the last try was unsuccessful) */
- int64 serialization_failures; /* number of transactions that were not
- * successfully retried after a
+ int64 retries; /* number of retries after a serialization or
+ * a deadlock error in all the transactions */
+ int64 retried; /* number of all transactions that were
+ * retried after a serialization or a deadlock
+ * error (perhaps the last try was
+ * unsuccessful) */
+ int64 serialization_failures; /* number of transactions that were
+ * not successfully retried after a
* serialization error */
int64 deadlock_failures; /* number of transactions that were not
* successfully retried after a deadlock
@@ -559,16 +561,15 @@ typedef enum
* States for failed commands.
*
* If the SQL/meta command fails, in CSTATE_ERROR clean up after an error:
- * - clear the conditional stack;
- * - if we have an unterminated (possibly failed) transaction block, send
- * the rollback command to the server and wait for the result in
- * CSTATE_WAIT_ROLLBACK_RESULT. If something goes wrong with rolling back,
- * go to CSTATE_ABORTED.
+ * (1) clear the conditional stack; (2) if we have an unterminated
+ * (possibly failed) transaction block, send the rollback command to the
+ * server and wait for the result in CSTATE_WAIT_ROLLBACK_RESULT. If
+ * something goes wrong with rolling back, go to CSTATE_ABORTED.
*
- * But if everything is ok we are ready for future transactions: if this is
- * a serialization or deadlock error and we can re-execute the transaction
- * from the very beginning, go to CSTATE_RETRY; otherwise go to
- * CSTATE_FAILURE.
+ * But if everything is ok we are ready for future transactions: if this
+ * is a serialization or deadlock error and we can re-execute the
+ * transaction from the very beginning, go to CSTATE_RETRY; otherwise go
+ * to CSTATE_FAILURE.
*
* In CSTATE_RETRY report an error, set the same parameters for the
* transaction execution as in the previous tries and process the first
@@ -622,7 +623,7 @@ typedef struct
int command; /* command number in script */
/* client variables */
- Variables variables;
+ Variables variables;
/* various times about current transaction in microseconds */
pg_time_usec_t txn_scheduled; /* scheduled start time of transaction */
@@ -633,19 +634,20 @@ typedef struct
bool prepared[MAX_SCRIPTS]; /* whether client prepared the script */
/*
- * For processing failures and repeating transactions with serialization or
- * deadlock errors:
+ * For processing failures and repeating transactions with serialization
+ * or deadlock errors:
*/
- EStatus estatus; /* the error status of the current transaction
- * execution; this is ESTATUS_NO_ERROR if there were
- * no errors */
- pg_prng_state random_state; /* random state */
- uint32 tries; /* how many times have we already tried the
+ EStatus estatus; /* the error status of the current transaction
+ * execution; this is ESTATUS_NO_ERROR if
+ * there were no errors */
+ pg_prng_state random_state; /* random state */
+ uint32 tries; /* how many times have we already tried the
* current transaction? */
/* per client collected stats */
- int64 cnt; /* client transaction count, for -t; skipped and
- * failed transactions are also counted here */
+ int64 cnt; /* client transaction count, for -t; skipped
+ * and failed transactions are also counted
+ * here */
} CState;
/*
@@ -771,7 +773,7 @@ static ParsedScript sql_script[MAX_SCRIPTS]; /* SQL script files */
static int num_scripts; /* number of scripts in sql_script[] */
static int64 total_weight = 0;
-static bool verbose_errors = false; /* print verbose messages of all errors */
+static bool verbose_errors = false; /* print verbose messages of all errors */
/* Builtin test scripts */
typedef struct BuiltinScript
@@ -3050,7 +3052,7 @@ commandError(CState *st, const char *message)
{
Assert(sql_script[st->use_file].commands[st->command]->type == SQL_COMMAND);
pg_log_info("client %d got an error in command %d (SQL) of script %d; %s",
- st->id, st->command, st->use_file, message);
+ st->id, st->command, st->use_file, message);
}
/* return a script number with a weighted choice. */
@@ -3289,8 +3291,8 @@ readCommandResponse(CState *st, MetaCommand meta, char *varprefix)
case PGRES_NONFATAL_ERROR:
case PGRES_FATAL_ERROR:
- st->estatus = getSQLErrorStatus(
- PQresultErrorField(res, PG_DIAG_SQLSTATE));
+ st->estatus = getSQLErrorStatus(PQresultErrorField(res,
+ PG_DIAG_SQLSTATE));
if (canRetryError(st->estatus))
{
if (verbose_errors)
@@ -3397,13 +3399,15 @@ doRetry(CState *st, pg_time_usec_t *now)
Assert(max_tries || latency_limit || duration > 0);
/*
- * We cannot retry the error if we have reached the maximum number of tries.
+ * We cannot retry the error if we have reached the maximum number of
+ * tries.
*/
if (max_tries && st->tries >= max_tries)
return false;
/*
- * We cannot retry the error if we spent too much time on this transaction.
+ * We cannot retry the error if we spent too much time on this
+ * transaction.
*/
if (latency_limit)
{
@@ -3432,14 +3436,15 @@ discardUntilSync(CState *st)
if (!PQpipelineSync(st->con))
{
pg_log_error("client %d aborted: failed to send a pipeline sync",
- st->id);
+ st->id);
return 0;
}
/* receive PGRES_PIPELINE_SYNC and null following it */
- for(;;)
+ for (;;)
{
- PGresult *res = PQgetResult(st->con);
+ PGresult *res = PQgetResult(st->con);
+
if (PQresultStatus(res) == PGRES_PIPELINE_SYNC)
{
PQclear(res);
@@ -3484,9 +3489,10 @@ getTransactionStatus(PGconn *con)
/* fall through */
case PQTRANS_ACTIVE:
default:
+
/*
- * We cannot find out whether we are in a transaction block or not.
- * Internal error which should never occur.
+ * We cannot find out whether we are in a transaction block or
+ * not. Internal error which should never occur.
*/
pg_log_error("unexpected transaction status %d", tx_status);
return TSTATUS_OTHER_ERROR;
@@ -3513,8 +3519,8 @@ printVerboseErrorMessages(CState *st, pg_time_usec_t *now, bool is_retry)
printfPQExpBuffer(buf, "client %d ", st->id);
appendPQExpBuffer(buf, "%s",
(is_retry ?
- "repeats the transaction after the error" :
- "ends the failed transaction"));
+ "repeats the transaction after the error" :
+ "ends the failed transaction"));
appendPQExpBuffer(buf, " (try %u", st->tries);
/* Print max_tries if it is not unlimitted. */
@@ -3522,8 +3528,8 @@ printVerboseErrorMessages(CState *st, pg_time_usec_t *now, bool is_retry)
appendPQExpBuffer(buf, "/%u", max_tries);
/*
- * If the latency limit is used, print a percentage of the current transaction
- * latency from the latency limit.
+ * If the latency limit is used, print a percentage of the current
+ * transaction latency from the latency limit.
*/
if (latency_limit)
{
@@ -3619,8 +3625,8 @@ advanceConnectionState(TState *thread, CState *st, StatsData *agg)
/*
* It is the first try to run this transaction. Remember the
- * random state: maybe it will get an error and we will need to
- * run it again.
+ * random state: maybe it will get an error and we will need
+ * to run it again.
*/
st->random_state = st->cs_func_rs;
@@ -3998,8 +4004,8 @@ advanceConnectionState(TState *thread, CState *st, StatsData *agg)
}
/*
- * Check if we have a (failed) transaction block or not, and
- * roll it back if any.
+ * Check if we have a (failed) transaction block or not,
+ * and roll it back if any.
*/
tstatus = getTransactionStatus(st->con);
if (tstatus == TSTATUS_IN_BLOCK)
@@ -4017,9 +4023,9 @@ advanceConnectionState(TState *thread, CState *st, StatsData *agg)
else if (tstatus == TSTATUS_IDLE)
{
/*
- * If time is over, we're done;
- * otherwise, check if we can retry the error.
- */
+ * If time is over, we're done; otherwise, check if we
+ * can retry the error.
+ */
st->state = timer_exceeded ? CSTATE_FINISHED :
doRetry(st, &now) ? CSTATE_RETRY : CSTATE_FAILURE;
}
@@ -4039,7 +4045,7 @@ advanceConnectionState(TState *thread, CState *st, StatsData *agg)
*/
case CSTATE_WAIT_ROLLBACK_RESULT:
{
- PGresult *res;
+ PGresult *res;
pg_log_debug("client %d receiving", st->id);
if (!PQconsumeInput(st->con))
@@ -4050,7 +4056,7 @@ advanceConnectionState(TState *thread, CState *st, StatsData *agg)
break;
}
if (PQisBusy(st->con))
- return; /* don't have the whole result yet */
+ return; /* don't have the whole result yet */
/*
* Read and discard the query result;
@@ -4066,8 +4072,8 @@ advanceConnectionState(TState *thread, CState *st, StatsData *agg)
Assert(res == NULL);
/*
- * If time is over, we're done;
- * otherwise, check if we can retry the error.
+ * If time is over, we're done; otherwise, check
+ * if we can retry the error.
*/
st->state = timer_exceeded ? CSTATE_FINISHED :
doRetry(st, &now) ? CSTATE_RETRY : CSTATE_FAILURE;
@@ -4089,7 +4095,8 @@ advanceConnectionState(TState *thread, CState *st, StatsData *agg)
command = sql_script[st->use_file].commands[st->command];
/*
- * Inform that the transaction will be retried after the error.
+ * Inform that the transaction will be retried after the
+ * error.
*/
if (verbose_errors)
printVerboseErrorMessages(st, &now, true);
@@ -4099,8 +4106,8 @@ advanceConnectionState(TState *thread, CState *st, StatsData *agg)
command->retries++;
/*
- * Reset the random state as they were at the beginning
- * of the transaction.
+ * Reset the random state as they were at the beginning of the
+ * transaction.
*/
st->cs_func_rs = st->random_state;
@@ -4188,8 +4195,9 @@ advanceConnectionState(TState *thread, CState *st, StatsData *agg)
st->state = CSTATE_CHOOSE_SCRIPT;
/*
- * Ensure that we always return on this point, so as to avoid
- * an infinite loop if the script only contains meta commands.
+ * Ensure that we always return on this point, so as to
+ * avoid an infinite loop if the script only contains meta
+ * commands.
*/
return;
}
@@ -4518,10 +4526,10 @@ doLog(TState *thread, CState *st,
lag_max = agg->lag.max;
}
fprintf(logfile, " %.0f %.0f %.0f %.0f",
- lag_sum,
- lag_sum2,
- lag_min,
- lag_max);
+ lag_sum,
+ lag_sum2,
+ lag_min,
+ lag_max);
if (latency_limit)
skipped = agg->skipped;
@@ -4588,7 +4596,7 @@ processXactStats(TState *thread, CState *st, pg_time_usec_t *now,
double latency = 0.0,
lag = 0.0;
bool detailed = progress || throttle_delay || latency_limit ||
- use_log || per_script_stats;
+ use_log || per_script_stats;
if (detailed && !skipped && st->estatus == ESTATUS_NO_ERROR)
{
@@ -4838,7 +4846,7 @@ initGenerateDataClientSide(PGconn *con)
PGresult *res;
int i;
int64 k;
- char *copy_statement;
+ char *copy_statement;
/* used to track elapsed time and estimate of the remaining time */
pg_time_usec_t start;
@@ -6365,7 +6373,7 @@ printResults(StatsData *total,
StatsData *sstats = &sql_script[i].stats;
int64 script_failures = getFailures(sstats);
int64 script_total_cnt =
- sstats->cnt + sstats->skipped + script_failures;
+ sstats->cnt + sstats->skipped + script_failures;
printf("SQL script %d: %s\n"
" - weight: %d (targets %.1f%% of total)\n"
diff --git a/src/bin/pgbench/t/001_pgbench_with_server.pl b/src/bin/pgbench/t/001_pgbench_with_server.pl
index ca71f968dc4..2c0dc369652 100644
--- a/src/bin/pgbench/t/001_pgbench_with_server.pl
+++ b/src/bin/pgbench/t/001_pgbench_with_server.pl
@@ -1202,17 +1202,21 @@ check_pgbench_logs($bdir, '001_pgbench_log_3', 1, 10, 10,
# abortion of the client if the script contains an incomplete transaction block
$node->pgbench(
- '--no-vacuum', 2, [ qr{processed: 1/10} ],
- [ qr{client 0 aborted: end of script reached without completing the last transaction} ],
+ '--no-vacuum',
+ 2,
+ [qr{processed: 1/10}],
+ [
+ qr{client 0 aborted: end of script reached without completing the last transaction}
+ ],
'incomplete transaction block',
{ '001_pgbench_incomplete_transaction_block' => q{BEGIN;SELECT 1;} });
# Test the concurrent update in the table row and deadlocks.
$node->safe_psql('postgres',
- 'CREATE UNLOGGED TABLE first_client_table (value integer); '
- . 'CREATE UNLOGGED TABLE xy (x integer, y integer); '
- . 'INSERT INTO xy VALUES (1, 2);');
+ 'CREATE UNLOGGED TABLE first_client_table (value integer); '
+ . 'CREATE UNLOGGED TABLE xy (x integer, y integer); '
+ . 'INSERT INTO xy VALUES (1, 2);');
# Serialization error and retry
@@ -1221,7 +1225,7 @@ local $ENV{PGOPTIONS} = "-c default_transaction_isolation=repeatable\\ read";
# Check that we have a serialization error and the same random value of the
# delta variable in the next try
my $err_pattern =
- "(client (0|1) sending UPDATE xy SET y = y \\+ -?\\d+\\b).*"
+ "(client (0|1) sending UPDATE xy SET y = y \\+ -?\\d+\\b).*"
. "client \\2 got an error in command 3 \\(SQL\\) of script 0; "
. "ERROR: could not serialize access due to concurrent update\\b.*"
. "\\1";
@@ -1229,9 +1233,12 @@ my $err_pattern =
$node->pgbench(
"-n -c 2 -t 1 -d --verbose-errors --max-tries 2",
0,
- [ qr{processed: 2/2\b}, qr{number of transactions retried: 1\b},
- qr{total number of retries: 1\b} ],
- [ qr/$err_pattern/s ],
+ [
+ qr{processed: 2/2\b},
+ qr{number of transactions retried: 1\b},
+ qr{total number of retries: 1\b}
+ ],
+ [qr/$err_pattern/s],
'concurrent update with retrying',
{
'001_pgbench_serialization' => q{
@@ -1304,15 +1311,18 @@ local $ENV{PGOPTIONS} = "-c default_transaction_isolation=read\\ committed";
# Check that we have a deadlock error
$err_pattern =
- "client (0|1) got an error in command (3|5) \\(SQL\\) of script 0; "
+ "client (0|1) got an error in command (3|5) \\(SQL\\) of script 0; "
. "ERROR: deadlock detected\\b";
$node->pgbench(
"-n -c 2 -t 1 --max-tries 2 --verbose-errors",
0,
- [ qr{processed: 2/2\b}, qr{number of transactions retried: 1\b},
- qr{total number of retries: 1\b} ],
- [ qr{$err_pattern} ],
+ [
+ qr{processed: 2/2\b},
+ qr{number of transactions retried: 1\b},
+ qr{total number of retries: 1\b}
+ ],
+ [qr{$err_pattern}],
'deadlock with retrying',
{
'001_pgbench_deadlock' => q{
diff --git a/src/bin/pgbench/t/002_pgbench_no_server.pl b/src/bin/pgbench/t/002_pgbench_no_server.pl
index a5074c70d9d..50bde7dd0fc 100644
--- a/src/bin/pgbench/t/002_pgbench_no_server.pl
+++ b/src/bin/pgbench/t/002_pgbench_no_server.pl
@@ -37,7 +37,7 @@ sub pgbench_scripts
local $Test::Builder::Level = $Test::Builder::Level + 1;
my ($opts, $stat, $out, $err, $name, $files) = @_;
- my @cmd = ('pgbench', split /\s+/, $opts);
+ my @cmd = ('pgbench', split /\s+/, $opts);
my @filenames = ();
if (defined $files)
{
@@ -196,7 +196,9 @@ my @options = (
[
'an infinite number of tries',
'--max-tries 0',
- [qr{an unlimited number of transaction tries can only be used with --latency-limit or a duration}]
+ [
+ qr{an unlimited number of transaction tries can only be used with --latency-limit or a duration}
+ ]
],
# logging sub-options
diff --git a/src/bin/psql/common.c b/src/bin/psql/common.c
index feb1d547d4d..9b140badeb9 100644
--- a/src/bin/psql/common.c
+++ b/src/bin/psql/common.c
@@ -32,8 +32,8 @@
static bool DescribeQuery(const char *query, double *elapsed_msec);
static bool ExecQueryUsingCursor(const char *query, double *elapsed_msec);
-static int ExecQueryAndProcessResults(const char *query, double *elapsed_msec, bool *svpt_gone_p,
- bool is_watch, const printQueryOpt *opt, FILE *printQueryFout);
+static int ExecQueryAndProcessResults(const char *query, double *elapsed_msec, bool *svpt_gone_p,
+ bool is_watch, const printQueryOpt *opt, FILE *printQueryFout);
static bool command_no_begin(const char *query);
static bool is_select_command(const char *query);
@@ -482,7 +482,7 @@ ClearOrSaveResult(PGresult *result)
static void
ClearOrSaveAllResults(void)
{
- PGresult *result;
+ PGresult *result;
while ((result = PQgetResult(pset.db)) != NULL)
ClearOrSaveResult(result);
@@ -697,7 +697,8 @@ PrintQueryTuples(const PGresult *result, const printQueryOpt *opt, FILE *printQu
}
else
{
- FILE *fout = printQueryFout ? printQueryFout : pset.queryFout;
+ FILE *fout = printQueryFout ? printQueryFout : pset.queryFout;
+
printQuery(result, opt ? opt : &pset.popt, fout, false, pset.logfile);
if (ferror(fout))
{
@@ -907,9 +908,9 @@ HandleCopyResult(PGresult **resultp)
&& (copystream != NULL);
/*
- * Suppress status printing if the report would go to the same
- * place as the COPY data just went. Note this doesn't
- * prevent error reporting, since handleCopyOut did that.
+ * Suppress status printing if the report would go to the same place
+ * as the COPY data just went. Note this doesn't prevent error
+ * reporting, since handleCopyOut did that.
*/
if (copystream == pset.queryFout)
{
@@ -943,8 +944,8 @@ HandleCopyResult(PGresult **resultp)
ResetCancelConn();
/*
- * Replace the PGRES_COPY_OUT/IN result with COPY command's exit
- * status, or with NULL if we want to suppress printing anything.
+ * Replace the PGRES_COPY_OUT/IN result with COPY command's exit status,
+ * or with NULL if we want to suppress printing anything.
*/
PQclear(*resultp);
*resultp = copy_result;
@@ -1069,7 +1070,7 @@ PrintQueryResult(PGresult *result, bool last, bool is_watch, const printQueryOpt
*/
struct t_notice_messages
{
- PQExpBufferData messages[2];
+ PQExpBufferData messages[2];
int current;
};
@@ -1080,6 +1081,7 @@ static void
AppendNoticeMessage(void *arg, const char *msg)
{
struct t_notice_messages *notices = arg;
+
appendPQExpBufferStr(&notices->messages[notices->current], msg);
}
@@ -1089,7 +1091,8 @@ AppendNoticeMessage(void *arg, const char *msg)
static void
ShowNoticeMessage(struct t_notice_messages *notices)
{
- PQExpBufferData *current = &notices->messages[notices->current];
+ PQExpBufferData *current = &notices->messages[notices->current];
+
if (*current->data != '\0')
pg_log_info("%s", current->data);
resetPQExpBuffer(current);
@@ -1234,6 +1237,7 @@ SendQuery(const char *query)
break;
case PQTRANS_INTRANS:
+
/*
* Release our savepoint, but do nothing if they are messing
* with savepoints themselves
@@ -1472,7 +1476,7 @@ DescribeQuery(const char *query, double *elapsed_msec)
*/
static int
ExecQueryAndProcessResults(const char *query, double *elapsed_msec, bool *svpt_gone_p,
- bool is_watch, const printQueryOpt *opt, FILE *printQueryFout)
+ bool is_watch, const printQueryOpt *opt, FILE *printQueryFout)
{
bool timing = pset.timing;
bool success;
@@ -1527,8 +1531,8 @@ ExecQueryAndProcessResults(const char *query, double *elapsed_msec, bool *svpt_g
if (!AcceptResult(result, false))
{
/*
- * Some error occured, either a server-side failure or
- * a failure to submit the command string. Record that.
+ * Some error occured, either a server-side failure or a failure
+ * to submit the command string. Record that.
*/
const char *error = PQresultErrorMessage(result);
@@ -1551,10 +1555,12 @@ ExecQueryAndProcessResults(const char *query, double *elapsed_msec, bool *svpt_g
if (result_status == PGRES_COPY_BOTH ||
result_status == PGRES_COPY_OUT ||
result_status == PGRES_COPY_IN)
+
/*
- * For some obscure reason PQgetResult does *not* return a NULL in copy
- * cases despite the result having been cleared, but keeps returning an
- * "empty" result that we have to ignore manually.
+ * For some obscure reason PQgetResult does *not* return a
+ * NULL in copy cases despite the result having been cleared,
+ * but keeps returning an "empty" result that we have to
+ * ignore manually.
*/
result = NULL;
else
@@ -1565,12 +1571,13 @@ ExecQueryAndProcessResults(const char *query, double *elapsed_msec, bool *svpt_g
else if (svpt_gone_p && !*svpt_gone_p)
{
/*
- * Check if the user ran any command that would destroy our internal
- * savepoint: If the user did COMMIT AND CHAIN, RELEASE or ROLLBACK, our
- * savepoint is gone. If they issued a SAVEPOINT, releasing ours would
- * remove theirs.
+ * Check if the user ran any command that would destroy our
+ * internal savepoint: If the user did COMMIT AND CHAIN, RELEASE
+ * or ROLLBACK, our savepoint is gone. If they issued a SAVEPOINT,
+ * releasing ours would remove theirs.
*/
const char *cmd = PQcmdStatus(result);
+
*svpt_gone_p = (strcmp(cmd, "COMMIT") == 0 ||
strcmp(cmd, "SAVEPOINT") == 0 ||
strcmp(cmd, "RELEASE") == 0 ||
@@ -1614,11 +1621,11 @@ ExecQueryAndProcessResults(const char *query, double *elapsed_msec, bool *svpt_g
/*
* Get timing measure before printing the last result.
*
- * It will include the display of previous results, if any.
- * This cannot be helped because the server goes on processing
- * further queries anyway while the previous ones are being displayed.
- * The parallel execution of the client display hides the server time
- * when it is shorter.
+ * It will include the display of previous results, if any. This
+ * cannot be helped because the server goes on processing further
+ * queries anyway while the previous ones are being displayed. The
+ * parallel execution of the client display hides the server time when
+ * it is shorter.
*
* With combined queries, timing must be understood as an upper bound
* of the time spent processing them.
diff --git a/src/bin/psql/describe.c b/src/bin/psql/describe.c
index 31df8b759cd..1a5d924a23f 100644
--- a/src/bin/psql/describe.c
+++ b/src/bin/psql/describe.c
@@ -109,9 +109,9 @@ describeAggregates(const char *pattern, bool verbose, bool showSystem)
" AND n.nspname <> 'information_schema'\n");
if (!validateSQLNamePattern(&buf, pattern, true, false,
- "n.nspname", "p.proname", NULL,
- "pg_catalog.pg_function_is_visible(p.oid)",
- NULL, 3))
+ "n.nspname", "p.proname", NULL,
+ "pg_catalog.pg_function_is_visible(p.oid)",
+ NULL, 3))
return false;
appendPQExpBufferStr(&buf, "ORDER BY 1, 2, 4;");
@@ -6002,7 +6002,7 @@ validateSQLNamePattern(PQExpBuffer buf, const char *pattern, bool have_where,
const char *visibilityrule, bool *added_clause,
int maxparts)
{
- PQExpBufferData dbbuf;
+ PQExpBufferData dbbuf;
int dotcnt;
bool added;
@@ -6021,7 +6021,7 @@ validateSQLNamePattern(PQExpBuffer buf, const char *pattern, bool have_where,
return false;
}
- if (maxparts > 1 && dotcnt == maxparts-1)
+ if (maxparts > 1 && dotcnt == maxparts - 1)
{
if (PQdb(pset.db) == NULL)
{
diff --git a/src/bin/psql/t/001_basic.pl b/src/bin/psql/t/001_basic.pl
index 98996d9a379..90e69d7cdba 100644
--- a/src/bin/psql/t/001_basic.pl
+++ b/src/bin/psql/t/001_basic.pl
@@ -36,9 +36,8 @@ sub psql_fails_like
my ($node, $sql, $expected_stderr, $test_name) = @_;
# Use the context of a WAL sender, some of the tests rely on that.
- my ($ret, $stdout, $stderr) = $node->psql(
- 'postgres', $sql,
- replication => 'database');
+ my ($ret, $stdout, $stderr) =
+ $node->psql('postgres', $sql, replication => 'database');
isnt($ret, 0, "$test_name: exit code not 0");
like($stderr, $expected_stderr, "$test_name: matches");
@@ -69,9 +68,9 @@ max_wal_senders = 4
});
$node->start;
-psql_like($node, '\copyright', qr/Copyright/, '\copyright');
-psql_like($node, '\help', qr/ALTER/, '\help without arguments');
-psql_like($node, '\help SELECT', qr/SELECT/, '\help with argument');
+psql_like($node, '\copyright', qr/Copyright/, '\copyright');
+psql_like($node, '\help', qr/ALTER/, '\help without arguments');
+psql_like($node, '\help SELECT', qr/SELECT/, '\help with argument');
# Test clean handling of unsupported replication command responses
psql_fails_like(
@@ -116,16 +115,16 @@ NOTIFY foo, 'bar';",
'notification with payload');
# test behavior and output on server crash
-my ($ret, $out, $err) = $node->psql(
- 'postgres',
- "SELECT 'before' AS running;\n" .
- "SELECT pg_terminate_backend(pg_backend_pid());\n" .
- "SELECT 'AFTER' AS not_running;\n");
+my ($ret, $out, $err) = $node->psql('postgres',
+ "SELECT 'before' AS running;\n"
+ . "SELECT pg_terminate_backend(pg_backend_pid());\n"
+ . "SELECT 'AFTER' AS not_running;\n");
is($ret, 2, 'server crash: psql exit code');
like($out, qr/before/, 'server crash: output before crash');
ok($out !~ qr/AFTER/, 'server crash: no output after crash');
-is($err, 'psql:<stdin>:2: FATAL: terminating connection due to administrator command
+is( $err,
+ 'psql:<stdin>:2: FATAL: terminating connection due to administrator command
psql:<stdin>:2: server closed the connection unexpectedly
This probably means the server terminated abnormally
before or while processing the request.
@@ -149,34 +148,46 @@ psql_like(
# \errverbose: The normal way, using a cursor by setting FETCH_COUNT,
# and using \gdesc. Test them all.
-like(($node->psql('postgres', "SELECT error;\n\\errverbose", on_error_stop => 0))[2],
- qr/\A^psql:<stdin>:1: ERROR: .*$
+like(
+ ( $node->psql(
+ 'postgres',
+ "SELECT error;\n\\errverbose",
+ on_error_stop => 0))[2],
+ qr/\A^psql:<stdin>:1: ERROR: .*$
^LINE 1: SELECT error;$
^ *^.*$
^psql:<stdin>:2: error: ERROR: [0-9A-Z]{5}: .*$
^LINE 1: SELECT error;$
^ *^.*$
^LOCATION: .*$/m,
- '\errverbose after normal query with error');
-
-like(($node->psql('postgres', "\\set FETCH_COUNT 1\nSELECT error;\n\\errverbose", on_error_stop => 0))[2],
- qr/\A^psql:<stdin>:2: ERROR: .*$
+ '\errverbose after normal query with error');
+
+like(
+ ( $node->psql(
+ 'postgres',
+ "\\set FETCH_COUNT 1\nSELECT error;\n\\errverbose",
+ on_error_stop => 0))[2],
+ qr/\A^psql:<stdin>:2: ERROR: .*$
^LINE 2: SELECT error;$
^ *^.*$
^psql:<stdin>:3: error: ERROR: [0-9A-Z]{5}: .*$
^LINE 2: SELECT error;$
^ *^.*$
^LOCATION: .*$/m,
- '\errverbose after FETCH_COUNT query with error');
-
-like(($node->psql('postgres', "SELECT error\\gdesc\n\\errverbose", on_error_stop => 0))[2],
- qr/\A^psql:<stdin>:1: ERROR: .*$
+ '\errverbose after FETCH_COUNT query with error');
+
+like(
+ ( $node->psql(
+ 'postgres',
+ "SELECT error\\gdesc\n\\errverbose",
+ on_error_stop => 0))[2],
+ qr/\A^psql:<stdin>:1: ERROR: .*$
^LINE 1: SELECT error$
^ *^.*$
^psql:<stdin>:2: error: ERROR: [0-9A-Z]{5}: .*$
^LINE 1: SELECT error$
^ *^.*$
^LOCATION: .*$/m,
- '\errverbose after \gdesc with error');
+ '\errverbose after \gdesc with error');
done_testing();
diff --git a/src/bin/psql/t/010_tab_completion.pl b/src/bin/psql/t/010_tab_completion.pl
index 2711935a2cc..2eea515e871 100644
--- a/src/bin/psql/t/010_tab_completion.pl
+++ b/src/bin/psql/t/010_tab_completion.pl
@@ -212,10 +212,7 @@ check_completion(
clear_line();
# check case folding
-check_completion(
- "select * from TAB\t",
- qr/tab1 /,
- "automatically fold case");
+check_completion("select * from TAB\t", qr/tab1 /, "automatically fold case");
clear_query();
@@ -228,15 +225,10 @@ check_completion("\\DRD\t", qr/drds /, "complete \\DRD<tab> to \\drds");
clear_line();
# check completion of a schema-qualified name
-check_completion(
- "select * from pub\t",
- qr/public\./,
- "complete schema when relevant");
+check_completion("select * from pub\t",
+ qr/public\./, "complete schema when relevant");
-check_completion(
- "tab\t",
- qr/tab1 /,
- "complete schema-qualified name");
+check_completion("tab\t", qr/tab1 /, "complete schema-qualified name");
clear_query();
@@ -339,15 +331,10 @@ check_completion(
clear_line();
# check timezone name completion
-check_completion(
- "SET timezone TO am\t",
- qr|'America/|,
- "offer partial timezone name");
+check_completion("SET timezone TO am\t",
+ qr|'America/|, "offer partial timezone name");
-check_completion(
- "new_\t",
- qr|New_York|,
- "complete partial timezone name");
+check_completion("new_\t", qr|New_York|, "complete partial timezone name");
clear_line();
diff --git a/src/bin/psql/t/020_cancel.pl b/src/bin/psql/t/020_cancel.pl
index d57d3429521..f4dbd36c391 100644
--- a/src/bin/psql/t/020_cancel.pl
+++ b/src/bin/psql/t/020_cancel.pl
@@ -21,7 +21,8 @@ $node->start;
# the process from IPC::Run. As a workaround, we have psql print its
# own PID (which is the parent of the shell launched by psql) to a
# file.
-SKIP: {
+SKIP:
+{
skip "cancel test requires a Unix shell", 2 if $windows_os;
local %ENV = $node->_get_env();
@@ -31,31 +32,38 @@ SKIP: {
# Test whether shell supports $PPID. It's part of POSIX, but some
# pre-/non-POSIX shells don't support it (e.g., NetBSD).
$stdin = "\\! echo \$PPID";
- IPC::Run::run(['psql', '-X', '-v', 'ON_ERROR_STOP=1'], '<', \$stdin, '>', \$stdout, '2>', \$stderr);
+ IPC::Run::run([ 'psql', '-X', '-v', 'ON_ERROR_STOP=1' ],
+ '<', \$stdin, '>', \$stdout, '2>', \$stderr);
$stdout =~ /^\d+$/ or skip "shell apparently does not support \$PPID", 2;
# Now start the real test
- my $h = IPC::Run::start(['psql', '-X', '-v', 'ON_ERROR_STOP=1'], \$stdin, \$stdout, \$stderr);
+ my $h = IPC::Run::start([ 'psql', '-X', '-v', 'ON_ERROR_STOP=1' ],
+ \$stdin, \$stdout, \$stderr);
# Get the PID
$stdout = '';
$stderr = '';
- $stdin = "\\! echo \$PPID >$tempdir/psql.pid\n";
+ $stdin = "\\! echo \$PPID >$tempdir/psql.pid\n";
pump $h while length $stdin;
my $count;
my $psql_pid;
- until (-s "$tempdir/psql.pid" and ($psql_pid = PostgreSQL::Test::Utils::slurp_file("$tempdir/psql.pid")) =~ /^\d+\n/s)
+ until (
+ -s "$tempdir/psql.pid"
+ and ($psql_pid =
+ PostgreSQL::Test::Utils::slurp_file("$tempdir/psql.pid")) =~
+ /^\d+\n/s)
{
($count++ < 100 * $PostgreSQL::Test::Utils::timeout_default)
or die "pid file did not appear";
- usleep(10_000)
+ usleep(10_000);
}
# Send sleep command and wait until the server has registered it
$stdin = "select pg_sleep($PostgreSQL::Test::Utils::timeout_default);\n";
pump $h while length $stdin;
- $node->poll_query_until('postgres', q{SELECT (SELECT count(*) FROM pg_stat_activity WHERE query ~ '^select pg_sleep') > 0;})
- or die "timed out";
+ $node->poll_query_until('postgres',
+ q{SELECT (SELECT count(*) FROM pg_stat_activity WHERE query ~ '^select pg_sleep') > 0;}
+ ) or die "timed out";
# Send cancel request
kill 'INT', $psql_pid;
@@ -63,7 +71,10 @@ SKIP: {
my $result = finish $h;
ok(!$result, 'query failed as expected');
- like($stderr, qr/canceling statement due to user request/, 'query was canceled');
+ like(
+ $stderr,
+ qr/canceling statement due to user request/,
+ 'query was canceled');
}
done_testing();
diff --git a/src/bin/psql/tab-complete.c b/src/bin/psql/tab-complete.c
index 588c0841fee..55af9eb04e4 100644
--- a/src/bin/psql/tab-complete.c
+++ b/src/bin/psql/tab-complete.c
@@ -826,7 +826,7 @@ static const SchemaQuery Query_for_list_of_mergetargets = {
.selcondition =
"c.relkind IN (" CppAsString2(RELKIND_RELATION) ", "
CppAsString2(RELKIND_PARTITIONED_TABLE) ") ",
- .viscondition = "pg_catalog.pg_table_is_visible(c.oid)",
+ .viscondition = "pg_catalog.pg_table_is_visible(c.oid)",
.namespace = "c.relnamespace",
.result = "c.relname",
};
@@ -1827,6 +1827,7 @@ psql_completion(const char *text, int start, int end)
(HeadMatches("ALTER", "PUBLICATION", MatchAny, "ADD|SET", "TABLE") &&
ends_with(prev_wd, ',')))
COMPLETE_WITH_SCHEMA_QUERY(Query_for_list_of_tables);
+
/*
* "ALTER PUBLICATION <name> SET TABLE <name> WHERE (" - complete with
* table attributes
diff --git a/src/bin/scripts/t/020_createdb.pl b/src/bin/scripts/t/020_createdb.pl
index 18f6e313d57..78733f64d25 100644
--- a/src/bin/scripts/t/020_createdb.pl
+++ b/src/bin/scripts/t/020_createdb.pl
@@ -35,12 +35,19 @@ if ($ENV{with_icu} eq 'yes')
'create database with ICU fails without ICU locale specified');
$node->issues_sql_like(
- [ 'createdb', '-T', 'template0', '--locale-provider=icu', '--icu-locale=en', 'foobar5' ],
+ [
+ 'createdb', '-T',
+ 'template0', '--locale-provider=icu',
+ '--icu-locale=en', 'foobar5'
+ ],
qr/statement: CREATE DATABASE foobar5 .* LOCALE_PROVIDER icu ICU_LOCALE 'en'/,
'create database with ICU locale specified');
$node->command_fails(
- [ 'createdb', '-T', 'template0', '--locale-provider=icu', '--icu-locale=@colNumeric=lower', 'foobarX' ],
+ [
+ 'createdb', '-T', 'template0', '--locale-provider=icu',
+ '--icu-locale=@colNumeric=lower', 'foobarX'
+ ],
'fails for invalid ICU locale');
}
else
@@ -53,7 +60,8 @@ else
$node->command_fails([ 'createdb', 'foobar1' ],
'fails if database already exists');
-$node->command_fails([ 'createdb', '-T', 'template0', '--locale-provider=xyz', 'foobarX' ],
+$node->command_fails(
+ [ 'createdb', '-T', 'template0', '--locale-provider=xyz', 'foobarX' ],
'fails for invalid locale provider');
# Check use of templates with shared dependencies copied from the template.