From 9fa8b0ee90c44c0f97d16bf65e94322988c94864 Mon Sep 17 00:00:00 2001 From: Peter Eisentraut Date: Tue, 10 Mar 2015 22:33:25 -0400 Subject: Move pg_upgrade from contrib/ to src/bin/ Reviewed-by: Michael Paquier --- contrib/Makefile | 1 - contrib/pg_upgrade/.gitignore | 8 - contrib/pg_upgrade/IMPLEMENTATION | 100 ---- contrib/pg_upgrade/Makefile | 34 -- contrib/pg_upgrade/TESTING | 83 --- contrib/pg_upgrade/check.c | 1016 ------------------------------------- contrib/pg_upgrade/controldata.c | 606 ---------------------- contrib/pg_upgrade/dump.c | 139 ----- contrib/pg_upgrade/exec.c | 379 -------------- contrib/pg_upgrade/file.c | 250 --------- contrib/pg_upgrade/function.c | 240 --------- contrib/pg_upgrade/info.c | 535 ------------------- contrib/pg_upgrade/option.c | 518 ------------------- contrib/pg_upgrade/page.c | 164 ------ contrib/pg_upgrade/parallel.c | 357 ------------- contrib/pg_upgrade/pg_upgrade.c | 616 ---------------------- contrib/pg_upgrade/pg_upgrade.h | 481 ------------------ contrib/pg_upgrade/relfilenode.c | 294 ----------- contrib/pg_upgrade/server.c | 350 ------------- contrib/pg_upgrade/tablespace.c | 124 ----- contrib/pg_upgrade/test.sh | 224 -------- contrib/pg_upgrade/util.c | 298 ----------- contrib/pg_upgrade/version.c | 178 ------- doc/src/sgml/contrib.sgml | 1 - doc/src/sgml/filelist.sgml | 1 - doc/src/sgml/pgupgrade.sgml | 723 -------------------------- doc/src/sgml/ref/allfiles.sgml | 1 + doc/src/sgml/ref/pgupgrade.sgml | 715 ++++++++++++++++++++++++++ doc/src/sgml/reference.sgml | 1 + src/bin/Makefile | 1 + src/bin/pg_upgrade/.gitignore | 8 + src/bin/pg_upgrade/IMPLEMENTATION | 98 ++++ src/bin/pg_upgrade/Makefile | 42 ++ src/bin/pg_upgrade/TESTING | 81 +++ src/bin/pg_upgrade/check.c | 1016 +++++++++++++++++++++++++++++++++++++ src/bin/pg_upgrade/controldata.c | 606 ++++++++++++++++++++++ src/bin/pg_upgrade/dump.c | 139 +++++ src/bin/pg_upgrade/exec.c | 379 ++++++++++++++ src/bin/pg_upgrade/file.c | 250 +++++++++ src/bin/pg_upgrade/function.c | 240 +++++++++ src/bin/pg_upgrade/info.c | 535 +++++++++++++++++++ src/bin/pg_upgrade/option.c | 518 +++++++++++++++++++ src/bin/pg_upgrade/page.c | 164 ++++++ src/bin/pg_upgrade/parallel.c | 357 +++++++++++++ src/bin/pg_upgrade/pg_upgrade.c | 616 ++++++++++++++++++++++ src/bin/pg_upgrade/pg_upgrade.h | 481 ++++++++++++++++++ src/bin/pg_upgrade/relfilenode.c | 294 +++++++++++ src/bin/pg_upgrade/server.c | 350 +++++++++++++ src/bin/pg_upgrade/tablespace.c | 124 +++++ src/bin/pg_upgrade/test.sh | 224 ++++++++ src/bin/pg_upgrade/util.c | 298 +++++++++++ src/bin/pg_upgrade/version.c | 178 +++++++ src/tools/msvc/Mkvcbuild.pm | 12 +- src/tools/msvc/vcregress.pl | 6 +- 54 files changed, 7725 insertions(+), 7729 deletions(-) delete mode 100644 contrib/pg_upgrade/.gitignore delete mode 100644 contrib/pg_upgrade/IMPLEMENTATION delete mode 100644 contrib/pg_upgrade/Makefile delete mode 100644 contrib/pg_upgrade/TESTING delete mode 100644 contrib/pg_upgrade/check.c delete mode 100644 contrib/pg_upgrade/controldata.c delete mode 100644 contrib/pg_upgrade/dump.c delete mode 100644 contrib/pg_upgrade/exec.c delete mode 100644 contrib/pg_upgrade/file.c delete mode 100644 contrib/pg_upgrade/function.c delete mode 100644 contrib/pg_upgrade/info.c delete mode 100644 contrib/pg_upgrade/option.c delete mode 100644 contrib/pg_upgrade/page.c delete mode 100644 contrib/pg_upgrade/parallel.c delete mode 100644 contrib/pg_upgrade/pg_upgrade.c delete mode 100644 contrib/pg_upgrade/pg_upgrade.h delete mode 100644 contrib/pg_upgrade/relfilenode.c delete mode 100644 contrib/pg_upgrade/server.c delete mode 100644 contrib/pg_upgrade/tablespace.c delete mode 100644 contrib/pg_upgrade/test.sh delete mode 100644 contrib/pg_upgrade/util.c delete mode 100644 contrib/pg_upgrade/version.c delete mode 100644 doc/src/sgml/pgupgrade.sgml create mode 100644 doc/src/sgml/ref/pgupgrade.sgml create mode 100644 src/bin/pg_upgrade/.gitignore create mode 100644 src/bin/pg_upgrade/IMPLEMENTATION create mode 100644 src/bin/pg_upgrade/Makefile create mode 100644 src/bin/pg_upgrade/TESTING create mode 100644 src/bin/pg_upgrade/check.c create mode 100644 src/bin/pg_upgrade/controldata.c create mode 100644 src/bin/pg_upgrade/dump.c create mode 100644 src/bin/pg_upgrade/exec.c create mode 100644 src/bin/pg_upgrade/file.c create mode 100644 src/bin/pg_upgrade/function.c create mode 100644 src/bin/pg_upgrade/info.c create mode 100644 src/bin/pg_upgrade/option.c create mode 100644 src/bin/pg_upgrade/page.c create mode 100644 src/bin/pg_upgrade/parallel.c create mode 100644 src/bin/pg_upgrade/pg_upgrade.c create mode 100644 src/bin/pg_upgrade/pg_upgrade.h create mode 100644 src/bin/pg_upgrade/relfilenode.c create mode 100644 src/bin/pg_upgrade/server.c create mode 100644 src/bin/pg_upgrade/tablespace.c create mode 100644 src/bin/pg_upgrade/test.sh create mode 100644 src/bin/pg_upgrade/util.c create mode 100644 src/bin/pg_upgrade/version.c diff --git a/contrib/Makefile b/contrib/Makefile index 074e39477b1..cc60d680fca 100644 --- a/contrib/Makefile +++ b/contrib/Makefile @@ -36,7 +36,6 @@ SUBDIRS = \ pg_test_fsync \ pg_test_timing \ pg_trgm \ - pg_upgrade \ pgcrypto \ pgrowlocks \ pgstattuple \ diff --git a/contrib/pg_upgrade/.gitignore b/contrib/pg_upgrade/.gitignore deleted file mode 100644 index d24ec60184f..00000000000 --- a/contrib/pg_upgrade/.gitignore +++ /dev/null @@ -1,8 +0,0 @@ -/pg_upgrade -# Generated by test suite -/analyze_new_cluster.sh -/delete_old_cluster.sh -/analyze_new_cluster.bat -/delete_old_cluster.bat -/log/ -/tmp_check/ diff --git a/contrib/pg_upgrade/IMPLEMENTATION b/contrib/pg_upgrade/IMPLEMENTATION deleted file mode 100644 index a0cfcf15dac..00000000000 --- a/contrib/pg_upgrade/IMPLEMENTATION +++ /dev/null @@ -1,100 +0,0 @@ -contrib/pg_upgrade/IMPLEMENTATION - ------------------------------------------------------------------------------- -PG_UPGRADE: IN-PLACE UPGRADES FOR POSTGRESQL ------------------------------------------------------------------------------- - -Upgrading a PostgreSQL database from one major release to another can be -an expensive process. For minor upgrades, you can simply install new -executables and forget about upgrading existing data. But for major -upgrades, you have to export all of your data using pg_dump, install the -new release, run initdb to create a new cluster, and then import your -old data. If you have a lot of data, that can take a considerable amount -of time. If you have too much data, you may have to buy more storage -since you need enough room to hold the original data plus the exported -data. pg_upgrade can reduce the amount of time and disk space required -for many upgrades. - -The URL http://momjian.us/main/writings/pgsql/pg_upgrade.pdf contains a -presentation about pg_upgrade internals that mirrors the text -description below. - ------------------------------------------------------------------------------- -WHAT IT DOES ------------------------------------------------------------------------------- - -pg_upgrade is a tool that performs an in-place upgrade of existing -data. Some upgrades change the on-disk representation of data; -pg_upgrade cannot help in those upgrades. However, many upgrades do -not change the on-disk representation of a user-defined table. In those -cases, pg_upgrade can move existing user-defined tables from the old -database cluster into the new cluster. - -There are two factors that determine whether an in-place upgrade is -practical. - -Every table in a cluster shares the same on-disk representation of the -table headers and trailers and the on-disk representation of tuple -headers. If this changes between the old version of PostgreSQL and the -new version, pg_upgrade cannot move existing tables to the new cluster; -you will have to pg_dump the old data and then import that data into the -new cluster. - -Second, all data types should have the same binary representation -between the two major PostgreSQL versions. - ------------------------------------------------------------------------------- -HOW IT WORKS ------------------------------------------------------------------------------- - -To use pg_upgrade during an upgrade, start by installing a fresh -cluster using the newest version in a new directory. When you've -finished installation, the new cluster will contain the new executables -and the usual template0, template1, and postgres, but no user-defined -tables. At this point, you can shut down the old and new postmasters and -invoke pg_upgrade. - -When pg_upgrade starts, it ensures that all required executables are -present and contain the expected version numbers. The verification -process also checks the old and new $PGDATA directories to ensure that -the expected files and subdirectories are in place. If the verification -process succeeds, pg_upgrade starts the old postmaster and runs -pg_dumpall --schema-only to capture the metadata contained in the old -cluster. The script produced by pg_dumpall will be used in a later step -to recreate all user-defined objects in the new cluster. - -Note that the script produced by pg_dumpall will only recreate -user-defined objects, not system-defined objects. The new cluster will -contain the system-defined objects created by the latest version of -PostgreSQL. - -Once pg_upgrade has extracted the metadata from the old cluster, it -performs a number of bookkeeping tasks required to 'sync up' the new -cluster with the existing data. - -First, pg_upgrade copies the commit status information and 'next -transaction ID' from the old cluster to the new cluster. This is the -steps ensures that the proper tuples are visible from the new cluster. -Remember, pg_upgrade does not export/import the content of user-defined -tables so the transaction IDs in the new cluster must match the -transaction IDs in the old data. pg_upgrade also copies the starting -address for write-ahead logs from the old cluster to the new cluster. - -Now pg_upgrade begins reconstructing the metadata obtained from the old -cluster using the first part of the pg_dumpall output. - -Next, pg_upgrade executes the remainder of the script produced earlier -by pg_dumpall --- this script effectively creates the complete -user-defined metadata from the old cluster to the new cluster. It -preserves the relfilenode numbers so TOAST and other references -to relfilenodes in user data is preserved. (See binary-upgrade usage -in pg_dump). - -Finally, pg_upgrade links or copies each user-defined table and its -supporting indexes and toast tables from the old cluster to the new -cluster. - -An important feature of the pg_upgrade design is that it leaves the -original cluster intact --- if a problem occurs during the upgrade, you -can still run the previous version, after renaming the tablespaces back -to the original names. diff --git a/contrib/pg_upgrade/Makefile b/contrib/pg_upgrade/Makefile deleted file mode 100644 index 87da4b8e834..00000000000 --- a/contrib/pg_upgrade/Makefile +++ /dev/null @@ -1,34 +0,0 @@ -# contrib/pg_upgrade/Makefile - -PGFILEDESC = "pg_upgrade - an in-place binary upgrade utility" -PGAPPICON = win32 - -PROGRAM = pg_upgrade -OBJS = check.o controldata.o dump.o exec.o file.o function.o info.o \ - option.o page.o parallel.o pg_upgrade.o relfilenode.o server.o \ - tablespace.o util.o version.o $(WIN32RES) - -PG_CPPFLAGS = -DFRONTEND -DDLSUFFIX=\"$(DLSUFFIX)\" -I$(srcdir) -I$(libpq_srcdir) -PG_LIBS = $(libpq_pgport) - -EXTRA_CLEAN = analyze_new_cluster.sh delete_old_cluster.sh log/ tmp_check/ \ - pg_upgrade_dump_globals.sql \ - pg_upgrade_dump_*.custom pg_upgrade_*.log - -ifdef USE_PGXS -PG_CONFIG = pg_config -PGXS := $(shell $(PG_CONFIG) --pgxs) -include $(PGXS) -else -subdir = contrib/pg_upgrade -top_builddir = ../.. -include $(top_builddir)/src/Makefile.global -include $(top_srcdir)/contrib/contrib-global.mk -endif - -check: test.sh all - MAKE=$(MAKE) bindir=$(bindir) libdir=$(libdir) EXTRA_REGRESS_OPTS="$(EXTRA_REGRESS_OPTS)" $(SHELL) $< --install - -# disabled because it upsets the build farm -#installcheck: test.sh -# MAKE=$(MAKE) bindir=$(bindir) libdir=$(libdir) $(SHELL) $< diff --git a/contrib/pg_upgrade/TESTING b/contrib/pg_upgrade/TESTING deleted file mode 100644 index 359688c6645..00000000000 --- a/contrib/pg_upgrade/TESTING +++ /dev/null @@ -1,83 +0,0 @@ -contrib/pg_upgrade/TESTING - -The most effective way to test pg_upgrade, aside from testing on user -data, is by upgrading the PostgreSQL regression database. - -This testing process first requires the creation of a valid regression -database dump. Such files contain most database features and are -specific to each major version of Postgres. - -Here are the steps needed to create a regression database dump file: - -1) Create and populate the regression database in the old cluster - This database can be created by running 'make installcheck' from - src/test/regression. - -2) Use pg_dump to dump out the regression database. Use the new - cluster's pg_dump on the old database to minimize whitespace - differences in the diff. - -3) Adjust the regression database dump file - - a) Perform the load/dump twice - This fixes problems with the ordering of COPY columns for - inherited tables. - - b) Change CREATE FUNCTION shared object paths to use '$libdir' - The old and new cluster will have different shared object paths. - - c) Fix any wrapping format differences - Commands like CREATE TRIGGER and ALTER TABLE sometimes have - differences. - - d) For pre-9.0, change CREATE OR REPLACE LANGUAGE to CREATE LANGUAGE - - e) For pre-9.0, remove 'regex_flavor' - - f) For pre-9.0, adjust extra_float_digits - Postgres 9.0 pg_dump uses extra_float_digits=-2 for pre-9.0 - databases, and extra_float_digits=-3 for >= 9.0 databases. - It is necessary to modify 9.0 pg_dump to always use -3, and - modify the pre-9.0 old server to accept extra_float_digits=-3. - -Once the dump is created, it can be repeatedly loaded into the old -database, upgraded, and dumped out of the new database, and then -compared to the original version. To test the dump file, perform these -steps: - -1) Create the old and new clusters in different directories. - -2) Copy the regression shared object files into the appropriate /lib - directory for old and new clusters. - -3) Create the regression database in the old server. - -4) Load the dump file created above into the regression database; - check for errors while loading. - -5) Upgrade the old database to the new major version, as outlined in - the pg_upgrade manual section. - -6) Use pg_dump to dump out the regression database in the new cluster. - -7) Diff the regression database dump file with the regression dump - file loaded into the old server. - -The shell script test.sh in this directory performs more or less this -procedure. You can invoke it by running - - make check - -or by running - - make installcheck - -if "make install" (or "make install-world") were done beforehand. -When invoked without arguments, it will run an upgrade from the -version in this source tree to a new instance of the same version. To -test an upgrade from a different version, invoke it like this: - - make installcheck oldbindir=...otherversion/bin oldsrc=...somewhere/postgresql - -In this case, you will have to manually eyeball the resulting dump -diff for version-specific differences, as explained above. diff --git a/contrib/pg_upgrade/check.c b/contrib/pg_upgrade/check.c deleted file mode 100644 index 6a498c3bd5c..00000000000 --- a/contrib/pg_upgrade/check.c +++ /dev/null @@ -1,1016 +0,0 @@ -/* - * check.c - * - * server checks and output routines - * - * Copyright (c) 2010-2015, PostgreSQL Global Development Group - * contrib/pg_upgrade/check.c - */ - -#include "postgres_fe.h" - -#include "catalog/pg_authid.h" -#include "mb/pg_wchar.h" -#include "pg_upgrade.h" - - -static void check_new_cluster_is_empty(void); -static void check_databases_are_compatible(void); -static void check_locale_and_encoding(DbInfo *olddb, DbInfo *newdb); -static bool equivalent_locale(int category, const char *loca, const char *locb); -static void check_is_install_user(ClusterInfo *cluster); -static void check_for_prepared_transactions(ClusterInfo *cluster); -static void check_for_isn_and_int8_passing_mismatch(ClusterInfo *cluster); -static void check_for_reg_data_type_usage(ClusterInfo *cluster); -static void check_for_jsonb_9_4_usage(ClusterInfo *cluster); -static void get_bin_version(ClusterInfo *cluster); -static char *get_canonical_locale_name(int category, const char *locale); - - -/* - * fix_path_separator - * For non-Windows, just return the argument. - * For Windows convert any forward slash to a backslash - * such as is suitable for arguments to builtin commands - * like RMDIR and DEL. - */ -static char * -fix_path_separator(char *path) -{ -#ifdef WIN32 - - char *result; - char *c; - - result = pg_strdup(path); - - for (c = result; *c != '\0'; c++) - if (*c == '/') - *c = '\\'; - - return result; -#else - - return path; -#endif -} - -void -output_check_banner(bool live_check) -{ - if (user_opts.check && live_check) - { - pg_log(PG_REPORT, "Performing Consistency Checks on Old Live Server\n"); - pg_log(PG_REPORT, "------------------------------------------------\n"); - } - else - { - pg_log(PG_REPORT, "Performing Consistency Checks\n"); - pg_log(PG_REPORT, "-----------------------------\n"); - } -} - - -void -check_and_dump_old_cluster(bool live_check) -{ - /* -- OLD -- */ - - if (!live_check) - start_postmaster(&old_cluster, true); - - get_pg_database_relfilenode(&old_cluster); - - /* Extract a list of databases and tables from the old cluster */ - get_db_and_rel_infos(&old_cluster); - - init_tablespaces(); - - get_loadable_libraries(); - - - /* - * Check for various failure cases - */ - check_is_install_user(&old_cluster); - check_for_prepared_transactions(&old_cluster); - check_for_reg_data_type_usage(&old_cluster); - check_for_isn_and_int8_passing_mismatch(&old_cluster); - if (GET_MAJOR_VERSION(old_cluster.major_version) == 904 && - old_cluster.controldata.cat_ver < JSONB_FORMAT_CHANGE_CAT_VER) - check_for_jsonb_9_4_usage(&old_cluster); - - /* Pre-PG 9.4 had a different 'line' data type internal format */ - if (GET_MAJOR_VERSION(old_cluster.major_version) <= 903) - old_9_3_check_for_line_data_type_usage(&old_cluster); - - /* Pre-PG 9.0 had no large object permissions */ - if (GET_MAJOR_VERSION(old_cluster.major_version) <= 804) - new_9_0_populate_pg_largeobject_metadata(&old_cluster, true); - - /* - * While not a check option, we do this now because this is the only time - * the old server is running. - */ - if (!user_opts.check) - generate_old_dump(); - - if (!live_check) - stop_postmaster(false); -} - - -void -check_new_cluster(void) -{ - get_db_and_rel_infos(&new_cluster); - - check_new_cluster_is_empty(); - check_databases_are_compatible(); - - check_loadable_libraries(); - - if (user_opts.transfer_mode == TRANSFER_MODE_LINK) - check_hard_link(); - - check_is_install_user(&new_cluster); - - check_for_prepared_transactions(&new_cluster); -} - - -void -report_clusters_compatible(void) -{ - if (user_opts.check) - { - pg_log(PG_REPORT, "\n*Clusters are compatible*\n"); - /* stops new cluster */ - stop_postmaster(false); - exit(0); - } - - pg_log(PG_REPORT, "\n" - "If pg_upgrade fails after this point, you must re-initdb the\n" - "new cluster before continuing.\n"); -} - - -void -issue_warnings(void) -{ - /* Create dummy large object permissions for old < PG 9.0? */ - if (GET_MAJOR_VERSION(old_cluster.major_version) <= 804) - { - start_postmaster(&new_cluster, true); - new_9_0_populate_pg_largeobject_metadata(&new_cluster, false); - stop_postmaster(false); - } -} - - -void -output_completion_banner(char *analyze_script_file_name, - char *deletion_script_file_name) -{ - /* Did we copy the free space files? */ - if (GET_MAJOR_VERSION(old_cluster.major_version) >= 804) - pg_log(PG_REPORT, - "Optimizer statistics are not transferred by pg_upgrade so,\n" - "once you start the new server, consider running:\n" - " %s\n\n", analyze_script_file_name); - else - pg_log(PG_REPORT, - "Optimizer statistics and free space information are not transferred\n" - "by pg_upgrade so, once you start the new server, consider running:\n" - " %s\n\n", analyze_script_file_name); - - - if (deletion_script_file_name) - pg_log(PG_REPORT, - "Running this script will delete the old cluster's data files:\n" - " %s\n", - deletion_script_file_name); - else - pg_log(PG_REPORT, - "Could not create a script to delete the old cluster's data\n" - "files because user-defined tablespaces exist in the old cluster\n" - "directory. The old cluster's contents must be deleted manually.\n"); -} - - -void -check_cluster_versions(void) -{ - prep_status("Checking cluster versions"); - - /* get old and new cluster versions */ - old_cluster.major_version = get_major_server_version(&old_cluster); - new_cluster.major_version = get_major_server_version(&new_cluster); - - /* - * We allow upgrades from/to the same major version for alpha/beta - * upgrades - */ - - if (GET_MAJOR_VERSION(old_cluster.major_version) < 804) - pg_fatal("This utility can only upgrade from PostgreSQL version 8.4 and later.\n"); - - /* Only current PG version is supported as a target */ - if (GET_MAJOR_VERSION(new_cluster.major_version) != GET_MAJOR_VERSION(PG_VERSION_NUM)) - pg_fatal("This utility can only upgrade to PostgreSQL version %s.\n", - PG_MAJORVERSION); - - /* - * We can't allow downgrading because we use the target pg_dump, and - * pg_dump cannot operate on newer database versions, only current and - * older versions. - */ - if (old_cluster.major_version > new_cluster.major_version) - pg_fatal("This utility cannot be used to downgrade to older major PostgreSQL versions.\n"); - - /* get old and new binary versions */ - get_bin_version(&old_cluster); - get_bin_version(&new_cluster); - - /* Ensure binaries match the designated data directories */ - if (GET_MAJOR_VERSION(old_cluster.major_version) != - GET_MAJOR_VERSION(old_cluster.bin_version)) - pg_fatal("Old cluster data and binary directories are from different major versions.\n"); - if (GET_MAJOR_VERSION(new_cluster.major_version) != - GET_MAJOR_VERSION(new_cluster.bin_version)) - pg_fatal("New cluster data and binary directories are from different major versions.\n"); - - check_ok(); -} - - -void -check_cluster_compatibility(bool live_check) -{ - /* get/check pg_control data of servers */ - get_control_data(&old_cluster, live_check); - get_control_data(&new_cluster, false); - check_control_data(&old_cluster.controldata, &new_cluster.controldata); - - /* Is it 9.0 but without tablespace directories? */ - if (GET_MAJOR_VERSION(new_cluster.major_version) == 900 && - new_cluster.controldata.cat_ver < TABLE_SPACE_SUBDIRS_CAT_VER) - pg_fatal("This utility can only upgrade to PostgreSQL version 9.0 after 2010-01-11\n" - "because of backend API changes made during development.\n"); - - /* We read the real port number for PG >= 9.1 */ - if (live_check && GET_MAJOR_VERSION(old_cluster.major_version) < 901 && - old_cluster.port == DEF_PGUPORT) - pg_fatal("When checking a pre-PG 9.1 live old server, " - "you must specify the old server's port number.\n"); - - if (live_check && old_cluster.port == new_cluster.port) - pg_fatal("When checking a live server, " - "the old and new port numbers must be different.\n"); -} - - -/* - * check_locale_and_encoding() - * - * Check that locale and encoding of a database in the old and new clusters - * are compatible. - */ -static void -check_locale_and_encoding(DbInfo *olddb, DbInfo *newdb) -{ - if (olddb->db_encoding != newdb->db_encoding) - pg_fatal("encodings for database \"%s\" do not match: old \"%s\", new \"%s\"\n", - olddb->db_name, - pg_encoding_to_char(olddb->db_encoding), - pg_encoding_to_char(newdb->db_encoding)); - if (!equivalent_locale(LC_COLLATE, olddb->db_collate, newdb->db_collate)) - pg_fatal("lc_collate values for database \"%s\" do not match: old \"%s\", new \"%s\"\n", - olddb->db_name, olddb->db_collate, newdb->db_collate); - if (!equivalent_locale(LC_CTYPE, olddb->db_ctype, newdb->db_ctype)) - pg_fatal("lc_ctype values for database \"%s\" do not match: old \"%s\", new \"%s\"\n", - olddb->db_name, olddb->db_ctype, newdb->db_ctype); -} - -/* - * equivalent_locale() - * - * Best effort locale-name comparison. Return false if we are not 100% sure - * the locales are equivalent. - * - * Note: The encoding parts of the names are ignored. This function is - * currently used to compare locale names stored in pg_database, and - * pg_database contains a separate encoding field. That's compared directly - * in check_locale_and_encoding(). - */ -static bool -equivalent_locale(int category, const char *loca, const char *locb) -{ - const char *chara; - const char *charb; - char *canona; - char *canonb; - int lena; - int lenb; - - /* - * If the names are equal, the locales are equivalent. Checking this - * first avoids calling setlocale() in the common case that the names - * are equal. That's a good thing, if setlocale() is buggy, for example. - */ - if (pg_strcasecmp(loca, locb) == 0) - return true; - - /* - * Not identical. Canonicalize both names, remove the encoding parts, - * and try again. - */ - canona = get_canonical_locale_name(category, loca); - chara = strrchr(canona, '.'); - lena = chara ? (chara - canona) : strlen(canona); - - canonb = get_canonical_locale_name(category, locb); - charb = strrchr(canonb, '.'); - lenb = charb ? (charb - canonb) : strlen(canonb); - - if (lena == lenb && pg_strncasecmp(canona, canonb, lena) == 0) - return true; - - return false; -} - - -static void -check_new_cluster_is_empty(void) -{ - int dbnum; - - for (dbnum = 0; dbnum < new_cluster.dbarr.ndbs; dbnum++) - { - int relnum; - RelInfoArr *rel_arr = &new_cluster.dbarr.dbs[dbnum].rel_arr; - - for (relnum = 0; relnum < rel_arr->nrels; - relnum++) - { - /* pg_largeobject and its index should be skipped */ - if (strcmp(rel_arr->rels[relnum].nspname, "pg_catalog") != 0) - pg_fatal("New cluster database \"%s\" is not empty\n", - new_cluster.dbarr.dbs[dbnum].db_name); - } - } -} - -/* - * Check that every database that already exists in the new cluster is - * compatible with the corresponding database in the old one. - */ -static void -check_databases_are_compatible(void) -{ - int newdbnum; - int olddbnum; - DbInfo *newdbinfo; - DbInfo *olddbinfo; - - for (newdbnum = 0; newdbnum < new_cluster.dbarr.ndbs; newdbnum++) - { - newdbinfo = &new_cluster.dbarr.dbs[newdbnum]; - - /* Find the corresponding database in the old cluster */ - for (olddbnum = 0; olddbnum < old_cluster.dbarr.ndbs; olddbnum++) - { - olddbinfo = &old_cluster.dbarr.dbs[olddbnum]; - if (strcmp(newdbinfo->db_name, olddbinfo->db_name) == 0) - { - check_locale_and_encoding(olddbinfo, newdbinfo); - break; - } - } - } -} - - -/* - * create_script_for_cluster_analyze() - * - * This incrementally generates better optimizer statistics - */ -void -create_script_for_cluster_analyze(char **analyze_script_file_name) -{ - FILE *script = NULL; - char *user_specification = ""; - - prep_status("Creating script to analyze new cluster"); - - if (os_info.user_specified) - user_specification = psprintf("-U \"%s\" ", os_info.user); - - *analyze_script_file_name = psprintf("%sanalyze_new_cluster.%s", - SCRIPT_PREFIX, SCRIPT_EXT); - - if ((script = fopen_priv(*analyze_script_file_name, "w")) == NULL) - pg_fatal("Could not open file \"%s\": %s\n", - *analyze_script_file_name, getErrorText(errno)); - -#ifndef WIN32 - /* add shebang header */ - fprintf(script, "#!/bin/sh\n\n"); -#else - /* suppress command echoing */ - fprintf(script, "@echo off\n"); -#endif - - fprintf(script, "echo %sThis script will generate minimal optimizer statistics rapidly%s\n", - ECHO_QUOTE, ECHO_QUOTE); - fprintf(script, "echo %sso your system is usable, and then gather statistics twice more%s\n", - ECHO_QUOTE, ECHO_QUOTE); - fprintf(script, "echo %swith increasing accuracy. When it is done, your system will%s\n", - ECHO_QUOTE, ECHO_QUOTE); - fprintf(script, "echo %shave the default level of optimizer statistics.%s\n", - ECHO_QUOTE, ECHO_QUOTE); - fprintf(script, "echo%s\n\n", ECHO_BLANK); - - fprintf(script, "echo %sIf you have used ALTER TABLE to modify the statistics target for%s\n", - ECHO_QUOTE, ECHO_QUOTE); - fprintf(script, "echo %sany tables, you might want to remove them and restore them after%s\n", - ECHO_QUOTE, ECHO_QUOTE); - fprintf(script, "echo %srunning this script because they will delay fast statistics generation.%s\n", - ECHO_QUOTE, ECHO_QUOTE); - fprintf(script, "echo%s\n\n", ECHO_BLANK); - - fprintf(script, "echo %sIf you would like default statistics as quickly as possible, cancel%s\n", - ECHO_QUOTE, ECHO_QUOTE); - fprintf(script, "echo %sthis script and run:%s\n", - ECHO_QUOTE, ECHO_QUOTE); - fprintf(script, "echo %s \"%s/vacuumdb\" %s--all %s%s\n", ECHO_QUOTE, - new_cluster.bindir, user_specification, - /* Did we copy the free space files? */ - (GET_MAJOR_VERSION(old_cluster.major_version) >= 804) ? - "--analyze-only" : "--analyze", ECHO_QUOTE); - fprintf(script, "echo%s\n\n", ECHO_BLANK); - - fprintf(script, "\"%s/vacuumdb\" %s--all --analyze-in-stages\n", - new_cluster.bindir, user_specification); - /* Did we copy the free space files? */ - if (GET_MAJOR_VERSION(old_cluster.major_version) < 804) - fprintf(script, "\"%s/vacuumdb\" %s--all\n", new_cluster.bindir, - user_specification); - - fprintf(script, "echo%s\n\n", ECHO_BLANK); - fprintf(script, "echo %sDone%s\n", - ECHO_QUOTE, ECHO_QUOTE); - - fclose(script); - -#ifndef WIN32 - if (chmod(*analyze_script_file_name, S_IRWXU) != 0) - pg_fatal("Could not add execute permission to file \"%s\": %s\n", - *analyze_script_file_name, getErrorText(errno)); -#endif - - if (os_info.user_specified) - pg_free(user_specification); - - check_ok(); -} - - -/* - * create_script_for_old_cluster_deletion() - * - * This is particularly useful for tablespace deletion. - */ -void -create_script_for_old_cluster_deletion(char **deletion_script_file_name) -{ - FILE *script = NULL; - int tblnum; - char old_cluster_pgdata[MAXPGPATH]; - - *deletion_script_file_name = psprintf("%sdelete_old_cluster.%s", - SCRIPT_PREFIX, SCRIPT_EXT); - - /* - * Some users (oddly) create tablespaces inside the cluster data - * directory. We can't create a proper old cluster delete script in that - * case. - */ - strlcpy(old_cluster_pgdata, old_cluster.pgdata, MAXPGPATH); - canonicalize_path(old_cluster_pgdata); - for (tblnum = 0; tblnum < os_info.num_old_tablespaces; tblnum++) - { - char old_tablespace_dir[MAXPGPATH]; - - strlcpy(old_tablespace_dir, os_info.old_tablespaces[tblnum], MAXPGPATH); - canonicalize_path(old_tablespace_dir); - if (path_is_prefix_of_path(old_cluster_pgdata, old_tablespace_dir)) - { - /* Unlink file in case it is left over from a previous run. */ - unlink(*deletion_script_file_name); - pg_free(*deletion_script_file_name); - *deletion_script_file_name = NULL; - return; - } - } - - prep_status("Creating script to delete old cluster"); - - if ((script = fopen_priv(*deletion_script_file_name, "w")) == NULL) - pg_fatal("Could not open file \"%s\": %s\n", - *deletion_script_file_name, getErrorText(errno)); - -#ifndef WIN32 - /* add shebang header */ - fprintf(script, "#!/bin/sh\n\n"); -#endif - - /* delete old cluster's default tablespace */ - fprintf(script, RMDIR_CMD " \"%s\"\n", fix_path_separator(old_cluster.pgdata)); - - /* delete old cluster's alternate tablespaces */ - for (tblnum = 0; tblnum < os_info.num_old_tablespaces; tblnum++) - { - /* - * Do the old cluster's per-database directories share a directory - * with a new version-specific tablespace? - */ - if (strlen(old_cluster.tablespace_suffix) == 0) - { - /* delete per-database directories */ - int dbnum; - - fprintf(script, "\n"); - /* remove PG_VERSION? */ - if (GET_MAJOR_VERSION(old_cluster.major_version) <= 804) - fprintf(script, RM_CMD " %s%cPG_VERSION\n", - fix_path_separator(os_info.old_tablespaces[tblnum]), - PATH_SEPARATOR); - - for (dbnum = 0; dbnum < old_cluster.dbarr.ndbs; dbnum++) - fprintf(script, RMDIR_CMD " \"%s%c%d\"\n", - fix_path_separator(os_info.old_tablespaces[tblnum]), - PATH_SEPARATOR, old_cluster.dbarr.dbs[dbnum].db_oid); - } - else - { - char *suffix_path = pg_strdup(old_cluster.tablespace_suffix); - - /* - * Simply delete the tablespace directory, which might be ".old" - * or a version-specific subdirectory. - */ - fprintf(script, RMDIR_CMD " \"%s%s\"\n", - fix_path_separator(os_info.old_tablespaces[tblnum]), - fix_path_separator(suffix_path)); - pfree(suffix_path); - } - } - - fclose(script); - -#ifndef WIN32 - if (chmod(*deletion_script_file_name, S_IRWXU) != 0) - pg_fatal("Could not add execute permission to file \"%s\": %s\n", - *deletion_script_file_name, getErrorText(errno)); -#endif - - check_ok(); -} - - -/* - * check_is_install_user() - * - * Check we are the install user, and that the new cluster - * has no other users. - */ -static void -check_is_install_user(ClusterInfo *cluster) -{ - PGresult *res; - PGconn *conn = connectToServer(cluster, "template1"); - - prep_status("Checking database user is the install user"); - - /* Can't use pg_authid because only superusers can view it. */ - res = executeQueryOrDie(conn, - "SELECT rolsuper, oid " - "FROM pg_catalog.pg_roles " - "WHERE rolname = current_user"); - - /* - * We only allow the install user in the new cluster (see comment below) - * and we preserve pg_authid.oid, so this must be the install user in - * the old cluster too. - */ - if (PQntuples(res) != 1 || - atooid(PQgetvalue(res, 0, 1)) != BOOTSTRAP_SUPERUSERID) - pg_fatal("database user \"%s\" is not the install user\n", - os_info.user); - - PQclear(res); - - res = executeQueryOrDie(conn, - "SELECT COUNT(*) " - "FROM pg_catalog.pg_roles "); - - if (PQntuples(res) != 1) - pg_fatal("could not determine the number of users\n"); - - /* - * We only allow the install user in the new cluster because other defined - * users might match users defined in the old cluster and generate an - * error during pg_dump restore. - */ - if (cluster == &new_cluster && atooid(PQgetvalue(res, 0, 0)) != 1) - pg_fatal("Only the install user can be defined in the new cluster.\n"); - - PQclear(res); - - PQfinish(conn); - - check_ok(); -} - - -/* - * check_for_prepared_transactions() - * - * Make sure there are no prepared transactions because the storage format - * might have changed. - */ -static void -check_for_prepared_transactions(ClusterInfo *cluster) -{ - PGresult *res; - PGconn *conn = connectToServer(cluster, "template1"); - - prep_status("Checking for prepared transactions"); - - res = executeQueryOrDie(conn, - "SELECT * " - "FROM pg_catalog.pg_prepared_xacts"); - - if (PQntuples(res) != 0) - pg_fatal("The %s cluster contains prepared transactions\n", - CLUSTER_NAME(cluster)); - - PQclear(res); - - PQfinish(conn); - - check_ok(); -} - - -/* - * check_for_isn_and_int8_passing_mismatch() - * - * contrib/isn relies on data type int8, and in 8.4 int8 can now be passed - * by value. The schema dumps the CREATE TYPE PASSEDBYVALUE setting so - * it must match for the old and new servers. - */ -static void -check_for_isn_and_int8_passing_mismatch(ClusterInfo *cluster) -{ - int dbnum; - FILE *script = NULL; - bool found = false; - char output_path[MAXPGPATH]; - - prep_status("Checking for contrib/isn with bigint-passing mismatch"); - - if (old_cluster.controldata.float8_pass_by_value == - new_cluster.controldata.float8_pass_by_value) - { - /* no mismatch */ - check_ok(); - return; - } - - snprintf(output_path, sizeof(output_path), - "contrib_isn_and_int8_pass_by_value.txt"); - - for (dbnum = 0; dbnum < cluster->dbarr.ndbs; dbnum++) - { - PGresult *res; - bool db_used = false; - int ntups; - int rowno; - int i_nspname, - i_proname; - DbInfo *active_db = &cluster->dbarr.dbs[dbnum]; - PGconn *conn = connectToServer(cluster, active_db->db_name); - - /* Find any functions coming from contrib/isn */ - res = executeQueryOrDie(conn, - "SELECT n.nspname, p.proname " - "FROM pg_catalog.pg_proc p, " - " pg_catalog.pg_namespace n " - "WHERE p.pronamespace = n.oid AND " - " p.probin = '$libdir/isn'"); - - ntups = PQntuples(res); - i_nspname = PQfnumber(res, "nspname"); - i_proname = PQfnumber(res, "proname"); - for (rowno = 0; rowno < ntups; rowno++) - { - found = true; - if (script == NULL && (script = fopen_priv(output_path, "w")) == NULL) - pg_fatal("Could not open file \"%s\": %s\n", - output_path, getErrorText(errno)); - if (!db_used) - { - fprintf(script, "Database: %s\n", active_db->db_name); - db_used = true; - } - fprintf(script, " %s.%s\n", - PQgetvalue(res, rowno, i_nspname), - PQgetvalue(res, rowno, i_proname)); - } - - PQclear(res); - - PQfinish(conn); - } - - if (script) - fclose(script); - - if (found) - { - pg_log(PG_REPORT, "fatal\n"); - pg_fatal("Your installation contains \"contrib/isn\" functions which rely on the\n" - "bigint data type. Your old and new clusters pass bigint values\n" - "differently so this cluster cannot currently be upgraded. You can\n" - "manually upgrade databases that use \"contrib/isn\" facilities and remove\n" - "\"contrib/isn\" from the old cluster and restart the upgrade. A list of\n" - "the problem functions is in the file:\n" - " %s\n\n", output_path); - } - else - check_ok(); -} - - -/* - * check_for_reg_data_type_usage() - * pg_upgrade only preserves these system values: - * pg_class.oid - * pg_type.oid - * pg_enum.oid - * - * Many of the reg* data types reference system catalog info that is - * not preserved, and hence these data types cannot be used in user - * tables upgraded by pg_upgrade. - */ -static void -check_for_reg_data_type_usage(ClusterInfo *cluster) -{ - int dbnum; - FILE *script = NULL; - bool found = false; - char output_path[MAXPGPATH]; - - prep_status("Checking for reg* system OID user data types"); - - snprintf(output_path, sizeof(output_path), "tables_using_reg.txt"); - - for (dbnum = 0; dbnum < cluster->dbarr.ndbs; dbnum++) - { - PGresult *res; - bool db_used = false; - int ntups; - int rowno; - int i_nspname, - i_relname, - i_attname; - DbInfo *active_db = &cluster->dbarr.dbs[dbnum]; - PGconn *conn = connectToServer(cluster, active_db->db_name); - - /* - * While several relkinds don't store any data, e.g. views, they can - * be used to define data types of other columns, so we check all - * relkinds. - */ - res = executeQueryOrDie(conn, - "SELECT n.nspname, c.relname, a.attname " - "FROM pg_catalog.pg_class c, " - " pg_catalog.pg_namespace n, " - " pg_catalog.pg_attribute a " - "WHERE c.oid = a.attrelid AND " - " NOT a.attisdropped AND " - " a.atttypid IN ( " - " 'pg_catalog.regproc'::pg_catalog.regtype, " - " 'pg_catalog.regprocedure'::pg_catalog.regtype, " - " 'pg_catalog.regoper'::pg_catalog.regtype, " - " 'pg_catalog.regoperator'::pg_catalog.regtype, " - /* regclass.oid is preserved, so 'regclass' is OK */ - /* regtype.oid is preserved, so 'regtype' is OK */ - " 'pg_catalog.regconfig'::pg_catalog.regtype, " - " 'pg_catalog.regdictionary'::pg_catalog.regtype) AND " - " c.relnamespace = n.oid AND " - " n.nspname NOT IN ('pg_catalog', 'information_schema')"); - - ntups = PQntuples(res); - i_nspname = PQfnumber(res, "nspname"); - i_relname = PQfnumber(res, "relname"); - i_attname = PQfnumber(res, "attname"); - for (rowno = 0; rowno < ntups; rowno++) - { - found = true; - if (script == NULL && (script = fopen_priv(output_path, "w")) == NULL) - pg_fatal("Could not open file \"%s\": %s\n", - output_path, getErrorText(errno)); - if (!db_used) - { - fprintf(script, "Database: %s\n", active_db->db_name); - db_used = true; - } - fprintf(script, " %s.%s.%s\n", - PQgetvalue(res, rowno, i_nspname), - PQgetvalue(res, rowno, i_relname), - PQgetvalue(res, rowno, i_attname)); - } - - PQclear(res); - - PQfinish(conn); - } - - if (script) - fclose(script); - - if (found) - { - pg_log(PG_REPORT, "fatal\n"); - pg_fatal("Your installation contains one of the reg* data types in user tables.\n" - "These data types reference system OIDs that are not preserved by\n" - "pg_upgrade, so this cluster cannot currently be upgraded. You can\n" - "remove the problem tables and restart the upgrade. A list of the problem\n" - "columns is in the file:\n" - " %s\n\n", output_path); - } - else - check_ok(); -} - - -/* - * check_for_jsonb_9_4_usage() - * - * JSONB changed its storage format during 9.4 beta, so check for it. - */ -static void -check_for_jsonb_9_4_usage(ClusterInfo *cluster) -{ - int dbnum; - FILE *script = NULL; - bool found = false; - char output_path[MAXPGPATH]; - - prep_status("Checking for JSONB user data types"); - - snprintf(output_path, sizeof(output_path), "tables_using_jsonb.txt"); - - for (dbnum = 0; dbnum < cluster->dbarr.ndbs; dbnum++) - { - PGresult *res; - bool db_used = false; - int ntups; - int rowno; - int i_nspname, - i_relname, - i_attname; - DbInfo *active_db = &cluster->dbarr.dbs[dbnum]; - PGconn *conn = connectToServer(cluster, active_db->db_name); - - /* - * While several relkinds don't store any data, e.g. views, they can - * be used to define data types of other columns, so we check all - * relkinds. - */ - res = executeQueryOrDie(conn, - "SELECT n.nspname, c.relname, a.attname " - "FROM pg_catalog.pg_class c, " - " pg_catalog.pg_namespace n, " - " pg_catalog.pg_attribute a " - "WHERE c.oid = a.attrelid AND " - " NOT a.attisdropped AND " - " a.atttypid = 'pg_catalog.jsonb'::pg_catalog.regtype AND " - " c.relnamespace = n.oid AND " - /* exclude possible orphaned temp tables */ - " n.nspname !~ '^pg_temp_' AND " - " n.nspname NOT IN ('pg_catalog', 'information_schema')"); - - ntups = PQntuples(res); - i_nspname = PQfnumber(res, "nspname"); - i_relname = PQfnumber(res, "relname"); - i_attname = PQfnumber(res, "attname"); - for (rowno = 0; rowno < ntups; rowno++) - { - found = true; - if (script == NULL && (script = fopen_priv(output_path, "w")) == NULL) - pg_fatal("Could not open file \"%s\": %s\n", - output_path, getErrorText(errno)); - if (!db_used) - { - fprintf(script, "Database: %s\n", active_db->db_name); - db_used = true; - } - fprintf(script, " %s.%s.%s\n", - PQgetvalue(res, rowno, i_nspname), - PQgetvalue(res, rowno, i_relname), - PQgetvalue(res, rowno, i_attname)); - } - - PQclear(res); - - PQfinish(conn); - } - - if (script) - fclose(script); - - if (found) - { - pg_log(PG_REPORT, "fatal\n"); - pg_fatal("Your installation contains one of the JSONB data types in user tables.\n" - "The internal format of JSONB changed during 9.4 beta so this cluster cannot currently\n" - "be upgraded. You can remove the problem tables and restart the upgrade. A list\n" - "of the problem columns is in the file:\n" - " %s\n\n", output_path); - } - else - check_ok(); -} - - -static void -get_bin_version(ClusterInfo *cluster) -{ - char cmd[MAXPGPATH], - cmd_output[MAX_STRING]; - FILE *output; - int pre_dot, - post_dot; - - snprintf(cmd, sizeof(cmd), "\"%s/pg_ctl\" --version", cluster->bindir); - - if ((output = popen(cmd, "r")) == NULL || - fgets(cmd_output, sizeof(cmd_output), output) == NULL) - pg_fatal("Could not get pg_ctl version data using %s: %s\n", - cmd, getErrorText(errno)); - - pclose(output); - - /* Remove trailing newline */ - if (strchr(cmd_output, '\n') != NULL) - *strchr(cmd_output, '\n') = '\0'; - - if (sscanf(cmd_output, "%*s %*s %d.%d", &pre_dot, &post_dot) != 2) - pg_fatal("could not get version from %s\n", cmd); - - cluster->bin_version = (pre_dot * 100 + post_dot) * 100; -} - - -/* - * get_canonical_locale_name - * - * Send the locale name to the system, and hope we get back a canonical - * version. This should match the backend's check_locale() function. - */ -static char * -get_canonical_locale_name(int category, const char *locale) -{ - char *save; - char *res; - - /* get the current setting, so we can restore it. */ - save = setlocale(category, NULL); - if (!save) - pg_fatal("failed to get the current locale\n"); - - /* 'save' may be pointing at a modifiable scratch variable, so copy it. */ - save = pg_strdup(save); - - /* set the locale with setlocale, to see if it accepts it. */ - res = setlocale(category, locale); - - if (!res) - pg_fatal("failed to get system locale name for \"%s\"\n", locale); - - res = pg_strdup(res); - - /* restore old value. */ - if (!setlocale(category, save)) - pg_fatal("failed to restore old locale \"%s\"\n", save); - - pg_free(save); - - return res; -} diff --git a/contrib/pg_upgrade/controldata.c b/contrib/pg_upgrade/controldata.c deleted file mode 100644 index 0e70b6f80b4..00000000000 --- a/contrib/pg_upgrade/controldata.c +++ /dev/null @@ -1,606 +0,0 @@ -/* - * controldata.c - * - * controldata functions - * - * Copyright (c) 2010-2015, PostgreSQL Global Development Group - * contrib/pg_upgrade/controldata.c - */ - -#include "postgres_fe.h" - -#include "pg_upgrade.h" - -#include - -/* - * get_control_data() - * - * gets pg_control information in "ctrl". Assumes that bindir and - * datadir are valid absolute paths to postgresql bin and pgdata - * directories respectively *and* pg_resetxlog is version compatible - * with datadir. The main purpose of this function is to get pg_control - * data in a version independent manner. - * - * The approach taken here is to invoke pg_resetxlog with -n option - * and then pipe its output. With little string parsing we get the - * pg_control data. pg_resetxlog cannot be run while the server is running - * so we use pg_controldata; pg_controldata doesn't provide all the fields - * we need to actually perform the upgrade, but it provides enough for - * check mode. We do not implement pg_resetxlog -n because it is hard to - * return valid xid data for a running server. - */ -void -get_control_data(ClusterInfo *cluster, bool live_check) -{ - char cmd[MAXPGPATH]; - char bufin[MAX_STRING]; - FILE *output; - char *p; - bool got_xid = false; - bool got_oid = false; - bool got_nextxlogfile = false; - bool got_multi = false; - bool got_mxoff = false; - bool got_oldestmulti = false; - bool got_log_id = false; - bool got_log_seg = false; - bool got_tli = false; - bool got_align = false; - bool got_blocksz = false; - bool got_largesz = false; - bool got_walsz = false; - bool got_walseg = false; - bool got_ident = false; - bool got_index = false; - bool got_toast = false; - bool got_large_object = false; - bool got_date_is_int = false; - bool got_float8_pass_by_value = false; - bool got_data_checksum_version = false; - char *lc_collate = NULL; - char *lc_ctype = NULL; - char *lc_monetary = NULL; - char *lc_numeric = NULL; - char *lc_time = NULL; - char *lang = NULL; - char *language = NULL; - char *lc_all = NULL; - char *lc_messages = NULL; - uint32 logid = 0; - uint32 segno = 0; - uint32 tli = 0; - - - /* - * Because we test the pg_resetxlog output as strings, it has to be in - * English. Copied from pg_regress.c. - */ - if (getenv("LC_COLLATE")) - lc_collate = pg_strdup(getenv("LC_COLLATE")); - if (getenv("LC_CTYPE")) - lc_ctype = pg_strdup(getenv("LC_CTYPE")); - if (getenv("LC_MONETARY")) - lc_monetary = pg_strdup(getenv("LC_MONETARY")); - if (getenv("LC_NUMERIC")) - lc_numeric = pg_strdup(getenv("LC_NUMERIC")); - if (getenv("LC_TIME")) - lc_time = pg_strdup(getenv("LC_TIME")); - if (getenv("LANG")) - lang = pg_strdup(getenv("LANG")); - if (getenv("LANGUAGE")) - language = pg_strdup(getenv("LANGUAGE")); - if (getenv("LC_ALL")) - lc_all = pg_strdup(getenv("LC_ALL")); - if (getenv("LC_MESSAGES")) - lc_messages = pg_strdup(getenv("LC_MESSAGES")); - - pg_putenv("LC_COLLATE", NULL); - pg_putenv("LC_CTYPE", NULL); - pg_putenv("LC_MONETARY", NULL); - pg_putenv("LC_NUMERIC", NULL); - pg_putenv("LC_TIME", NULL); - pg_putenv("LANG", -#ifndef WIN32 - NULL); -#else - /* On Windows the default locale cannot be English, so force it */ - "en"); -#endif - pg_putenv("LANGUAGE", NULL); - pg_putenv("LC_ALL", NULL); - pg_putenv("LC_MESSAGES", "C"); - - snprintf(cmd, sizeof(cmd), "\"%s/%s \"%s\"", - cluster->bindir, - live_check ? "pg_controldata\"" : "pg_resetxlog\" -n", - cluster->pgdata); - fflush(stdout); - fflush(stderr); - - if ((output = popen(cmd, "r")) == NULL) - pg_fatal("Could not get control data using %s: %s\n", - cmd, getErrorText(errno)); - - /* Only in <= 9.2 */ - if (GET_MAJOR_VERSION(cluster->major_version) <= 902) - { - cluster->controldata.data_checksum_version = 0; - got_data_checksum_version = true; - } - - /* we have the result of cmd in "output". so parse it line by line now */ - while (fgets(bufin, sizeof(bufin), output)) - { - pg_log(PG_VERBOSE, "%s", bufin); - - if ((p = strstr(bufin, "pg_control version number:")) != NULL) - { - p = strchr(p, ':'); - - if (p == NULL || strlen(p) <= 1) - pg_fatal("%d: pg_resetxlog problem\n", __LINE__); - - p++; /* remove ':' char */ - cluster->controldata.ctrl_ver = str2uint(p); - } - else if ((p = strstr(bufin, "Catalog version number:")) != NULL) - { - p = strchr(p, ':'); - - if (p == NULL || strlen(p) <= 1) - pg_fatal("%d: controldata retrieval problem\n", __LINE__); - - p++; /* remove ':' char */ - cluster->controldata.cat_ver = str2uint(p); - } - else if ((p = strstr(bufin, "First log segment after reset:")) != NULL) - { - /* Skip the colon and any whitespace after it */ - p = strchr(p, ':'); - if (p == NULL || strlen(p) <= 1) - pg_fatal("%d: controldata retrieval problem\n", __LINE__); - p = strpbrk(p, "01234567890ABCDEF"); - if (p == NULL || strlen(p) <= 1) - pg_fatal("%d: controldata retrieval problem\n", __LINE__); - - /* Make sure it looks like a valid WAL file name */ - if (strspn(p, "0123456789ABCDEF") != 24) - pg_fatal("%d: controldata retrieval problem\n", __LINE__); - - strlcpy(cluster->controldata.nextxlogfile, p, 25); - got_nextxlogfile = true; - } - else if ((p = strstr(bufin, "First log file ID after reset:")) != NULL) - { - p = strchr(p, ':'); - - if (p == NULL || strlen(p) <= 1) - pg_fatal("%d: controldata retrieval problem\n", __LINE__); - - p++; /* remove ':' char */ - logid = str2uint(p); - got_log_id = true; - } - else if ((p = strstr(bufin, "First log file segment after reset:")) != NULL) - { - p = strchr(p, ':'); - - if (p == NULL || strlen(p) <= 1) - pg_fatal("%d: controldata retrieval problem\n", __LINE__); - - p++; /* remove ':' char */ - segno = str2uint(p); - got_log_seg = true; - } - else if ((p = strstr(bufin, "Latest checkpoint's TimeLineID:")) != NULL) - { - p = strchr(p, ':'); - - if (p == NULL || strlen(p) <= 1) - pg_fatal("%d: controldata retrieval problem\n", __LINE__); - - p++; /* remove ':' char */ - cluster->controldata.chkpnt_tli = str2uint(p); - got_tli = true; - } - else if ((p = strstr(bufin, "Latest checkpoint's NextXID:")) != NULL) - { - p = strchr(p, ':'); - - if (p == NULL || strlen(p) <= 1) - pg_fatal("%d: controldata retrieval problem\n", __LINE__); - - p++; /* remove ':' char */ - cluster->controldata.chkpnt_nxtepoch = str2uint(p); - - p = strchr(p, '/'); - if (p == NULL || strlen(p) <= 1) - pg_fatal("%d: controldata retrieval problem\n", __LINE__); - - p++; /* remove '/' char */ - cluster->controldata.chkpnt_nxtxid = str2uint(p); - got_xid = true; - } - else if ((p = strstr(bufin, "Latest checkpoint's NextOID:")) != NULL) - { - p = strchr(p, ':'); - - if (p == NULL || strlen(p) <= 1) - pg_fatal("%d: controldata retrieval problem\n", __LINE__); - - p++; /* remove ':' char */ - cluster->controldata.chkpnt_nxtoid = str2uint(p); - got_oid = true; - } - else if ((p = strstr(bufin, "Latest checkpoint's NextMultiXactId:")) != NULL) - { - p = strchr(p, ':'); - - if (p == NULL || strlen(p) <= 1) - pg_fatal("%d: controldata retrieval problem\n", __LINE__); - - p++; /* remove ':' char */ - cluster->controldata.chkpnt_nxtmulti = str2uint(p); - got_multi = true; - } - else if ((p = strstr(bufin, "Latest checkpoint's oldestMultiXid:")) != NULL) - { - p = strchr(p, ':'); - - if (p == NULL || strlen(p) <= 1) - pg_fatal("%d: controldata retrieval problem\n", __LINE__); - - p++; /* remove ':' char */ - cluster->controldata.chkpnt_oldstMulti = str2uint(p); - got_oldestmulti = true; - } - else if ((p = strstr(bufin, "Latest checkpoint's NextMultiOffset:")) != NULL) - { - p = strchr(p, ':'); - - if (p == NULL || strlen(p) <= 1) - pg_fatal("%d: controldata retrieval problem\n", __LINE__); - - p++; /* remove ':' char */ - cluster->controldata.chkpnt_nxtmxoff = str2uint(p); - got_mxoff = true; - } - else if ((p = strstr(bufin, "Maximum data alignment:")) != NULL) - { - p = strchr(p, ':'); - - if (p == NULL || strlen(p) <= 1) - pg_fatal("%d: controldata retrieval problem\n", __LINE__); - - p++; /* remove ':' char */ - cluster->controldata.align = str2uint(p); - got_align = true; - } - else if ((p = strstr(bufin, "Database block size:")) != NULL) - { - p = strchr(p, ':'); - - if (p == NULL || strlen(p) <= 1) - pg_fatal("%d: controldata retrieval problem\n", __LINE__); - - p++; /* remove ':' char */ - cluster->controldata.blocksz = str2uint(p); - got_blocksz = true; - } - else if ((p = strstr(bufin, "Blocks per segment of large relation:")) != NULL) - { - p = strchr(p, ':'); - - if (p == NULL || strlen(p) <= 1) - pg_fatal("%d: controldata retrieval problem\n", __LINE__); - - p++; /* remove ':' char */ - cluster->controldata.largesz = str2uint(p); - got_largesz = true; - } - else if ((p = strstr(bufin, "WAL block size:")) != NULL) - { - p = strchr(p, ':'); - - if (p == NULL || strlen(p) <= 1) - pg_fatal("%d: controldata retrieval problem\n", __LINE__); - - p++; /* remove ':' char */ - cluster->controldata.walsz = str2uint(p); - got_walsz = true; - } - else if ((p = strstr(bufin, "Bytes per WAL segment:")) != NULL) - { - p = strchr(p, ':'); - - if (p == NULL || strlen(p) <= 1) - pg_fatal("%d: controldata retrieval problem\n", __LINE__); - - p++; /* remove ':' char */ - cluster->controldata.walseg = str2uint(p); - got_walseg = true; - } - else if ((p = strstr(bufin, "Maximum length of identifiers:")) != NULL) - { - p = strchr(p, ':'); - - if (p == NULL || strlen(p) <= 1) - pg_fatal("%d: controldata retrieval problem\n", __LINE__); - - p++; /* remove ':' char */ - cluster->controldata.ident = str2uint(p); - got_ident = true; - } - else if ((p = strstr(bufin, "Maximum columns in an index:")) != NULL) - { - p = strchr(p, ':'); - - if (p == NULL || strlen(p) <= 1) - pg_fatal("%d: controldata retrieval problem\n", __LINE__); - - p++; /* remove ':' char */ - cluster->controldata.index = str2uint(p); - got_index = true; - } - else if ((p = strstr(bufin, "Maximum size of a TOAST chunk:")) != NULL) - { - p = strchr(p, ':'); - - if (p == NULL || strlen(p) <= 1) - pg_fatal("%d: controldata retrieval problem\n", __LINE__); - - p++; /* remove ':' char */ - cluster->controldata.toast = str2uint(p); - got_toast = true; - } - else if ((p = strstr(bufin, "Size of a large-object chunk:")) != NULL) - { - p = strchr(p, ':'); - - if (p == NULL || strlen(p) <= 1) - pg_fatal("%d: controldata retrieval problem\n", __LINE__); - - p++; /* remove ':' char */ - cluster->controldata.large_object = str2uint(p); - got_large_object = true; - } - else if ((p = strstr(bufin, "Date/time type storage:")) != NULL) - { - p = strchr(p, ':'); - - if (p == NULL || strlen(p) <= 1) - pg_fatal("%d: controldata retrieval problem\n", __LINE__); - - p++; /* remove ':' char */ - cluster->controldata.date_is_int = strstr(p, "64-bit integers") != NULL; - got_date_is_int = true; - } - else if ((p = strstr(bufin, "Float8 argument passing:")) != NULL) - { - p = strchr(p, ':'); - - if (p == NULL || strlen(p) <= 1) - pg_fatal("%d: controldata retrieval problem\n", __LINE__); - - p++; /* remove ':' char */ - /* used later for contrib check */ - cluster->controldata.float8_pass_by_value = strstr(p, "by value") != NULL; - got_float8_pass_by_value = true; - } - else if ((p = strstr(bufin, "checksum")) != NULL) - { - p = strchr(p, ':'); - - if (p == NULL || strlen(p) <= 1) - pg_fatal("%d: controldata retrieval problem\n", __LINE__); - - p++; /* remove ':' char */ - /* used later for contrib check */ - cluster->controldata.data_checksum_version = str2uint(p); - got_data_checksum_version = true; - } - } - - if (output) - pclose(output); - - /* - * Restore environment variables - */ - pg_putenv("LC_COLLATE", lc_collate); - pg_putenv("LC_CTYPE", lc_ctype); - pg_putenv("LC_MONETARY", lc_monetary); - pg_putenv("LC_NUMERIC", lc_numeric); - pg_putenv("LC_TIME", lc_time); - pg_putenv("LANG", lang); - pg_putenv("LANGUAGE", language); - pg_putenv("LC_ALL", lc_all); - pg_putenv("LC_MESSAGES", lc_messages); - - pg_free(lc_collate); - pg_free(lc_ctype); - pg_free(lc_monetary); - pg_free(lc_numeric); - pg_free(lc_time); - pg_free(lang); - pg_free(language); - pg_free(lc_all); - pg_free(lc_messages); - - /* - * Before 9.3, pg_resetxlog reported the xlogid and segno of the first log - * file after reset as separate lines. Starting with 9.3, it reports the - * WAL file name. If the old cluster is older than 9.3, we construct the - * WAL file name from the xlogid and segno. - */ - if (GET_MAJOR_VERSION(cluster->major_version) <= 902) - { - if (got_log_id && got_log_seg) - { - snprintf(cluster->controldata.nextxlogfile, 25, "%08X%08X%08X", - tli, logid, segno); - got_nextxlogfile = true; - } - } - - /* verify that we got all the mandatory pg_control data */ - if (!got_xid || !got_oid || - !got_multi || !got_mxoff || - (!got_oldestmulti && - cluster->controldata.cat_ver >= MULTIXACT_FORMATCHANGE_CAT_VER) || - (!live_check && !got_nextxlogfile) || - !got_tli || - !got_align || !got_blocksz || !got_largesz || !got_walsz || - !got_walseg || !got_ident || !got_index || !got_toast || - (!got_large_object && - cluster->controldata.ctrl_ver >= LARGE_OBJECT_SIZE_PG_CONTROL_VER) || - !got_date_is_int || !got_float8_pass_by_value || !got_data_checksum_version) - { - pg_log(PG_REPORT, - "The %s cluster lacks some required control information:\n", - CLUSTER_NAME(cluster)); - - if (!got_xid) - pg_log(PG_REPORT, " checkpoint next XID\n"); - - if (!got_oid) - pg_log(PG_REPORT, " latest checkpoint next OID\n"); - - if (!got_multi) - pg_log(PG_REPORT, " latest checkpoint next MultiXactId\n"); - - if (!got_mxoff) - pg_log(PG_REPORT, " latest checkpoint next MultiXactOffset\n"); - - if (!got_oldestmulti && - cluster->controldata.cat_ver >= MULTIXACT_FORMATCHANGE_CAT_VER) - pg_log(PG_REPORT, " latest checkpoint oldest MultiXactId\n"); - - if (!live_check && !got_nextxlogfile) - pg_log(PG_REPORT, " first WAL segment after reset\n"); - - if (!got_tli) - pg_log(PG_REPORT, " latest checkpoint timeline ID\n"); - - if (!got_align) - pg_log(PG_REPORT, " maximum alignment\n"); - - if (!got_blocksz) - pg_log(PG_REPORT, " block size\n"); - - if (!got_largesz) - pg_log(PG_REPORT, " large relation segment size\n"); - - if (!got_walsz) - pg_log(PG_REPORT, " WAL block size\n"); - - if (!got_walseg) - pg_log(PG_REPORT, " WAL segment size\n"); - - if (!got_ident) - pg_log(PG_REPORT, " maximum identifier length\n"); - - if (!got_index) - pg_log(PG_REPORT, " maximum number of indexed columns\n"); - - if (!got_toast) - pg_log(PG_REPORT, " maximum TOAST chunk size\n"); - - if (!got_large_object && - cluster->controldata.ctrl_ver >= LARGE_OBJECT_SIZE_PG_CONTROL_VER) - pg_log(PG_REPORT, " large-object chunk size\n"); - - if (!got_date_is_int) - pg_log(PG_REPORT, " dates/times are integers?\n"); - - if (!got_float8_pass_by_value) - pg_log(PG_REPORT, " float8 argument passing method\n"); - - /* value added in Postgres 9.3 */ - if (!got_data_checksum_version) - pg_log(PG_REPORT, " data checksum version\n"); - - pg_fatal("Cannot continue without required control information, terminating\n"); - } -} - - -/* - * check_control_data() - * - * check to make sure the control data settings are compatible - */ -void -check_control_data(ControlData *oldctrl, - ControlData *newctrl) -{ - if (oldctrl->align == 0 || oldctrl->align != newctrl->align) - pg_fatal("old and new pg_controldata alignments are invalid or do not match\n" - "Likely one cluster is a 32-bit install, the other 64-bit\n"); - - if (oldctrl->blocksz == 0 || oldctrl->blocksz != newctrl->blocksz) - pg_fatal("old and new pg_controldata block sizes are invalid or do not match\n"); - - if (oldctrl->largesz == 0 || oldctrl->largesz != newctrl->largesz) - pg_fatal("old and new pg_controldata maximum relation segement sizes are invalid or do not match\n"); - - if (oldctrl->walsz == 0 || oldctrl->walsz != newctrl->walsz) - pg_fatal("old and new pg_controldata WAL block sizes are invalid or do not match\n"); - - if (oldctrl->walseg == 0 || oldctrl->walseg != newctrl->walseg) - pg_fatal("old and new pg_controldata WAL segment sizes are invalid or do not match\n"); - - if (oldctrl->ident == 0 || oldctrl->ident != newctrl->ident) - pg_fatal("old and new pg_controldata maximum identifier lengths are invalid or do not match\n"); - - if (oldctrl->index == 0 || oldctrl->index != newctrl->index) - pg_fatal("old and new pg_controldata maximum indexed columns are invalid or do not match\n"); - - if (oldctrl->toast == 0 || oldctrl->toast != newctrl->toast) - pg_fatal("old and new pg_controldata maximum TOAST chunk sizes are invalid or do not match\n"); - - /* large_object added in 9.5, so it might not exist in the old cluster */ - if (oldctrl->large_object != 0 && - oldctrl->large_object != newctrl->large_object) - pg_fatal("old and new pg_controldata large-object chunk sizes are invalid or do not match\n"); - - if (oldctrl->date_is_int != newctrl->date_is_int) - pg_fatal("old and new pg_controldata date/time storage types do not match\n"); - - /* - * We might eventually allow upgrades from checksum to no-checksum - * clusters. - */ - if (oldctrl->data_checksum_version == 0 && - newctrl->data_checksum_version != 0) - pg_fatal("old cluster does not use data checksums but the new one does\n"); - else if (oldctrl->data_checksum_version != 0 && - newctrl->data_checksum_version == 0) - pg_fatal("old cluster uses data checksums but the new one does not\n"); - else if (oldctrl->data_checksum_version != newctrl->data_checksum_version) - pg_fatal("old and new cluster pg_controldata checksum versions do not match\n"); -} - - -void -disable_old_cluster(void) -{ - char old_path[MAXPGPATH], - new_path[MAXPGPATH]; - - /* rename pg_control so old server cannot be accidentally started */ - prep_status("Adding \".old\" suffix to old global/pg_control"); - - snprintf(old_path, sizeof(old_path), "%s/global/pg_control", old_cluster.pgdata); - snprintf(new_path, sizeof(new_path), "%s/global/pg_control.old", old_cluster.pgdata); - if (pg_mv_file(old_path, new_path) != 0) - pg_fatal("Unable to rename %s to %s.\n", old_path, new_path); - check_ok(); - - pg_log(PG_REPORT, "\n" - "If you want to start the old cluster, you will need to remove\n" - "the \".old\" suffix from %s/global/pg_control.old.\n" - "Because \"link\" mode was used, the old cluster cannot be safely\n" - "started once the new cluster has been started.\n\n", old_cluster.pgdata); -} diff --git a/contrib/pg_upgrade/dump.c b/contrib/pg_upgrade/dump.c deleted file mode 100644 index 906e85f2b53..00000000000 --- a/contrib/pg_upgrade/dump.c +++ /dev/null @@ -1,139 +0,0 @@ -/* - * dump.c - * - * dump functions - * - * Copyright (c) 2010-2015, PostgreSQL Global Development Group - * contrib/pg_upgrade/dump.c - */ - -#include "postgres_fe.h" - -#include "pg_upgrade.h" - -#include -#include "catalog/binary_upgrade.h" - - -void -generate_old_dump(void) -{ - int dbnum; - mode_t old_umask; - - prep_status("Creating dump of global objects"); - - /* run new pg_dumpall binary for globals */ - exec_prog(UTILITY_LOG_FILE, NULL, true, - "\"%s/pg_dumpall\" %s --globals-only --quote-all-identifiers " - "--binary-upgrade %s -f %s", - new_cluster.bindir, cluster_conn_opts(&old_cluster), - log_opts.verbose ? "--verbose" : "", - GLOBALS_DUMP_FILE); - check_ok(); - - prep_status("Creating dump of database schemas\n"); - - /* - * Set umask for this function, all functions it calls, and all - * subprocesses/threads it creates. We can't use fopen_priv() as Windows - * uses threads and umask is process-global. - */ - old_umask = umask(S_IRWXG | S_IRWXO); - - /* create per-db dump files */ - for (dbnum = 0; dbnum < old_cluster.dbarr.ndbs; dbnum++) - { - char sql_file_name[MAXPGPATH], - log_file_name[MAXPGPATH]; - DbInfo *old_db = &old_cluster.dbarr.dbs[dbnum]; - - pg_log(PG_STATUS, "%s", old_db->db_name); - snprintf(sql_file_name, sizeof(sql_file_name), DB_DUMP_FILE_MASK, old_db->db_oid); - snprintf(log_file_name, sizeof(log_file_name), DB_DUMP_LOG_FILE_MASK, old_db->db_oid); - - parallel_exec_prog(log_file_name, NULL, - "\"%s/pg_dump\" %s --schema-only --quote-all-identifiers " - "--binary-upgrade --format=custom %s --file=\"%s\" \"%s\"", - new_cluster.bindir, cluster_conn_opts(&old_cluster), - log_opts.verbose ? "--verbose" : "", - sql_file_name, old_db->db_name); - } - - /* reap all children */ - while (reap_child(true) == true) - ; - - umask(old_umask); - - end_progress_output(); - check_ok(); -} - - -/* - * It is possible for there to be a mismatch in the need for TOAST tables - * between the old and new servers, e.g. some pre-9.1 tables didn't need - * TOAST tables but will need them in 9.1+. (There are also opposite cases, - * but these are handled by setting binary_upgrade_next_toast_pg_class_oid.) - * - * We can't allow the TOAST table to be created by pg_dump with a - * pg_dump-assigned oid because it might conflict with a later table that - * uses that oid, causing a "file exists" error for pg_class conflicts, and - * a "duplicate oid" error for pg_type conflicts. (TOAST tables need pg_type - * entries.) - * - * Therefore, a backend in binary-upgrade mode will not create a TOAST - * table unless an OID as passed in via pg_upgrade_support functions. - * This function is called after the restore and uses ALTER TABLE to - * auto-create any needed TOAST tables which will not conflict with - * restored oids. - */ -void -optionally_create_toast_tables(void) -{ - int dbnum; - - prep_status("Creating newly-required TOAST tables"); - - for (dbnum = 0; dbnum < new_cluster.dbarr.ndbs; dbnum++) - { - PGresult *res; - int ntups; - int rowno; - int i_nspname, - i_relname; - DbInfo *active_db = &new_cluster.dbarr.dbs[dbnum]; - PGconn *conn = connectToServer(&new_cluster, active_db->db_name); - - res = executeQueryOrDie(conn, - "SELECT n.nspname, c.relname " - "FROM pg_catalog.pg_class c, " - " pg_catalog.pg_namespace n " - "WHERE c.relnamespace = n.oid AND " - " n.nspname NOT IN ('pg_catalog', 'information_schema') AND " - "c.relkind IN ('r', 'm') AND " - "c.reltoastrelid = 0"); - - ntups = PQntuples(res); - i_nspname = PQfnumber(res, "nspname"); - i_relname = PQfnumber(res, "relname"); - for (rowno = 0; rowno < ntups; rowno++) - { - /* enable auto-oid-numbered TOAST creation if needed */ - PQclear(executeQueryOrDie(conn, "SELECT pg_catalog.binary_upgrade_set_next_toast_pg_class_oid('%d'::pg_catalog.oid);", - OPTIONALLY_CREATE_TOAST_OID)); - - /* dummy command that also triggers check for required TOAST table */ - PQclear(executeQueryOrDie(conn, "ALTER TABLE %s.%s RESET (binary_upgrade_dummy_option);", - quote_identifier(PQgetvalue(res, rowno, i_nspname)), - quote_identifier(PQgetvalue(res, rowno, i_relname)))); - } - - PQclear(res); - - PQfinish(conn); - } - - check_ok(); -} diff --git a/contrib/pg_upgrade/exec.c b/contrib/pg_upgrade/exec.c deleted file mode 100644 index bf87419b187..00000000000 --- a/contrib/pg_upgrade/exec.c +++ /dev/null @@ -1,379 +0,0 @@ -/* - * exec.c - * - * execution functions - * - * Copyright (c) 2010-2015, PostgreSQL Global Development Group - * contrib/pg_upgrade/exec.c - */ - -#include "postgres_fe.h" - -#include "pg_upgrade.h" - -#include -#include - -static void check_data_dir(const char *pg_data); -static void check_bin_dir(ClusterInfo *cluster); -static void validate_exec(const char *dir, const char *cmdName); - -#ifdef WIN32 -static int win32_check_directory_write_permissions(void); -#endif - - -/* - * exec_prog() - * Execute an external program with stdout/stderr redirected, and report - * errors - * - * Formats a command from the given argument list, logs it to the log file, - * and attempts to execute that command. If the command executes - * successfully, exec_prog() returns true. - * - * If the command fails, an error message is saved to the specified log_file. - * If throw_error is true, this raises a PG_FATAL error and pg_upgrade - * terminates; otherwise it is just reported as PG_REPORT and exec_prog() - * returns false. - * - * The code requires it be called first from the primary thread on Windows. - */ -bool -exec_prog(const char *log_file, const char *opt_log_file, - bool throw_error, const char *fmt,...) -{ - int result = 0; - int written; - -#define MAXCMDLEN (2 * MAXPGPATH) - char cmd[MAXCMDLEN]; - FILE *log; - va_list ap; - -#ifdef WIN32 - static DWORD mainThreadId = 0; - - /* We assume we are called from the primary thread first */ - if (mainThreadId == 0) - mainThreadId = GetCurrentThreadId(); -#endif - - written = 0; - va_start(ap, fmt); - written += vsnprintf(cmd + written, MAXCMDLEN - written, fmt, ap); - va_end(ap); - if (written >= MAXCMDLEN) - pg_fatal("command too long\n"); - written += snprintf(cmd + written, MAXCMDLEN - written, - " >> \"%s\" 2>&1", log_file); - if (written >= MAXCMDLEN) - pg_fatal("command too long\n"); - - pg_log(PG_VERBOSE, "%s\n", cmd); - -#ifdef WIN32 - - /* - * For some reason, Windows issues a file-in-use error if we write data to - * the log file from a non-primary thread just before we create a - * subprocess that also writes to the same log file. One fix is to sleep - * for 100ms. A cleaner fix is to write to the log file _after_ the - * subprocess has completed, so we do this only when writing from a - * non-primary thread. fflush(), running system() twice, and pre-creating - * the file do not see to help. - */ - if (mainThreadId != GetCurrentThreadId()) - result = system(cmd); -#endif - - log = fopen(log_file, "a"); - -#ifdef WIN32 - { - /* - * "pg_ctl -w stop" might have reported that the server has stopped - * because the postmaster.pid file has been removed, but "pg_ctl -w - * start" might still be in the process of closing and might still be - * holding its stdout and -l log file descriptors open. Therefore, - * try to open the log file a few more times. - */ - int iter; - - for (iter = 0; iter < 4 && log == NULL; iter++) - { - pg_usleep(1000000); /* 1 sec */ - log = fopen(log_file, "a"); - } - } -#endif - - if (log == NULL) - pg_fatal("cannot write to log file %s\n", log_file); - -#ifdef WIN32 - /* Are we printing "command:" before its output? */ - if (mainThreadId == GetCurrentThreadId()) - fprintf(log, "\n\n"); -#endif - fprintf(log, "command: %s\n", cmd); -#ifdef WIN32 - /* Are we printing "command:" after its output? */ - if (mainThreadId != GetCurrentThreadId()) - fprintf(log, "\n\n"); -#endif - - /* - * In Windows, we must close the log file at this point so the file is not - * open while the command is running, or we get a share violation. - */ - fclose(log); - -#ifdef WIN32 - /* see comment above */ - if (mainThreadId == GetCurrentThreadId()) -#endif - result = system(cmd); - - if (result != 0) - { - /* we might be in on a progress status line, so go to the next line */ - report_status(PG_REPORT, "\n*failure*"); - fflush(stdout); - - pg_log(PG_VERBOSE, "There were problems executing \"%s\"\n", cmd); - if (opt_log_file) - pg_log(throw_error ? PG_FATAL : PG_REPORT, - "Consult the last few lines of \"%s\" or \"%s\" for\n" - "the probable cause of the failure.\n", - log_file, opt_log_file); - else - pg_log(throw_error ? PG_FATAL : PG_REPORT, - "Consult the last few lines of \"%s\" for\n" - "the probable cause of the failure.\n", - log_file); - } - -#ifndef WIN32 - - /* - * We can't do this on Windows because it will keep the "pg_ctl start" - * output filename open until the server stops, so we do the \n\n above on - * that platform. We use a unique filename for "pg_ctl start" that is - * never reused while the server is running, so it works fine. We could - * log these commands to a third file, but that just adds complexity. - */ - if ((log = fopen(log_file, "a")) == NULL) - pg_fatal("cannot write to log file %s\n", log_file); - fprintf(log, "\n\n"); - fclose(log); -#endif - - return result == 0; -} - - -/* - * pid_lock_file_exists() - * - * Checks whether the postmaster.pid file exists. - */ -bool -pid_lock_file_exists(const char *datadir) -{ - char path[MAXPGPATH]; - int fd; - - snprintf(path, sizeof(path), "%s/postmaster.pid", datadir); - - if ((fd = open(path, O_RDONLY, 0)) < 0) - { - /* ENOTDIR means we will throw a more useful error later */ - if (errno != ENOENT && errno != ENOTDIR) - pg_fatal("could not open file \"%s\" for reading: %s\n", - path, getErrorText(errno)); - - return false; - } - - close(fd); - return true; -} - - -/* - * verify_directories() - * - * does all the hectic work of verifying directories and executables - * of old and new server. - * - * NOTE: May update the values of all parameters - */ -void -verify_directories(void) -{ -#ifndef WIN32 - if (access(".", R_OK | W_OK | X_OK) != 0) -#else - if (win32_check_directory_write_permissions() != 0) -#endif - pg_fatal("You must have read and write access in the current directory.\n"); - - check_bin_dir(&old_cluster); - check_data_dir(old_cluster.pgdata); - check_bin_dir(&new_cluster); - check_data_dir(new_cluster.pgdata); -} - - -#ifdef WIN32 -/* - * win32_check_directory_write_permissions() - * - * access() on WIN32 can't check directory permissions, so we have to - * optionally create, then delete a file to check. - * http://msdn.microsoft.com/en-us/library/1w06ktdy%28v=vs.80%29.aspx - */ -static int -win32_check_directory_write_permissions(void) -{ - int fd; - - /* - * We open a file we would normally create anyway. We do this even in - * 'check' mode, which isn't ideal, but this is the best we can do. - */ - if ((fd = open(GLOBALS_DUMP_FILE, O_RDWR | O_CREAT, S_IRUSR | S_IWUSR)) < 0) - return -1; - close(fd); - - return unlink(GLOBALS_DUMP_FILE); -} -#endif - - -/* - * check_data_dir() - * - * This function validates the given cluster directory - we search for a - * small set of subdirectories that we expect to find in a valid $PGDATA - * directory. If any of the subdirectories are missing (or secured against - * us) we display an error message and exit() - * - */ -static void -check_data_dir(const char *pg_data) -{ - char subDirName[MAXPGPATH]; - int subdirnum; - - /* start check with top-most directory */ - const char *requiredSubdirs[] = {"", "base", "global", "pg_clog", - "pg_multixact", "pg_subtrans", "pg_tblspc", "pg_twophase", - "pg_xlog"}; - - for (subdirnum = 0; - subdirnum < sizeof(requiredSubdirs) / sizeof(requiredSubdirs[0]); - ++subdirnum) - { - struct stat statBuf; - - snprintf(subDirName, sizeof(subDirName), "%s%s%s", pg_data, - /* Win32 can't stat() a directory with a trailing slash. */ - *requiredSubdirs[subdirnum] ? "/" : "", - requiredSubdirs[subdirnum]); - - if (stat(subDirName, &statBuf) != 0) - report_status(PG_FATAL, "check for \"%s\" failed: %s\n", - subDirName, getErrorText(errno)); - else if (!S_ISDIR(statBuf.st_mode)) - report_status(PG_FATAL, "%s is not a directory\n", - subDirName); - } -} - - -/* - * check_bin_dir() - * - * This function searches for the executables that we expect to find - * in the binaries directory. If we find that a required executable - * is missing (or secured against us), we display an error message and - * exit(). - */ -static void -check_bin_dir(ClusterInfo *cluster) -{ - struct stat statBuf; - - /* check bindir */ - if (stat(cluster->bindir, &statBuf) != 0) - report_status(PG_FATAL, "check for \"%s\" failed: %s\n", - cluster->bindir, getErrorText(errno)); - else if (!S_ISDIR(statBuf.st_mode)) - report_status(PG_FATAL, "%s is not a directory\n", - cluster->bindir); - - validate_exec(cluster->bindir, "postgres"); - validate_exec(cluster->bindir, "pg_ctl"); - validate_exec(cluster->bindir, "pg_resetxlog"); - if (cluster == &new_cluster) - { - /* these are only needed in the new cluster */ - validate_exec(cluster->bindir, "psql"); - validate_exec(cluster->bindir, "pg_dump"); - validate_exec(cluster->bindir, "pg_dumpall"); - } -} - - -/* - * validate_exec() - * - * validate "path" as an executable file - */ -static void -validate_exec(const char *dir, const char *cmdName) -{ - char path[MAXPGPATH]; - struct stat buf; - - snprintf(path, sizeof(path), "%s/%s", dir, cmdName); - -#ifdef WIN32 - /* Windows requires a .exe suffix for stat() */ - if (strlen(path) <= strlen(EXE_EXT) || - pg_strcasecmp(path + strlen(path) - strlen(EXE_EXT), EXE_EXT) != 0) - strlcat(path, EXE_EXT, sizeof(path)); -#endif - - /* - * Ensure that the file exists and is a regular file. - */ - if (stat(path, &buf) < 0) - pg_fatal("check for \"%s\" failed: %s\n", - path, getErrorText(errno)); - else if (!S_ISREG(buf.st_mode)) - pg_fatal("check for \"%s\" failed: not an executable file\n", - path); - - /* - * Ensure that the file is both executable and readable (required for - * dynamic loading). - */ -#ifndef WIN32 - if (access(path, R_OK) != 0) -#else - if ((buf.st_mode & S_IRUSR) == 0) -#endif - pg_fatal("check for \"%s\" failed: cannot read file (permission denied)\n", - path); - -#ifndef WIN32 - if (access(path, X_OK) != 0) -#else - if ((buf.st_mode & S_IXUSR) == 0) -#endif - pg_fatal("check for \"%s\" failed: cannot execute (permission denied)\n", - path); -} diff --git a/contrib/pg_upgrade/file.c b/contrib/pg_upgrade/file.c deleted file mode 100644 index 5a8d17ae0f4..00000000000 --- a/contrib/pg_upgrade/file.c +++ /dev/null @@ -1,250 +0,0 @@ -/* - * file.c - * - * file system operations - * - * Copyright (c) 2010-2015, PostgreSQL Global Development Group - * contrib/pg_upgrade/file.c - */ - -#include "postgres_fe.h" - -#include "pg_upgrade.h" - -#include - - - -#ifndef WIN32 -static int copy_file(const char *fromfile, const char *tofile, bool force); -#else -static int win32_pghardlink(const char *src, const char *dst); -#endif - - -/* - * copyAndUpdateFile() - * - * Copies a relation file from src to dst. If pageConverter is non-NULL, this function - * uses that pageConverter to do a page-by-page conversion. - */ -const char * -copyAndUpdateFile(pageCnvCtx *pageConverter, - const char *src, const char *dst, bool force) -{ - if (pageConverter == NULL) - { - if (pg_copy_file(src, dst, force) == -1) - return getErrorText(errno); - else - return NULL; - } - else - { - /* - * We have a pageConverter object - that implies that the - * PageLayoutVersion differs between the two clusters so we have to - * perform a page-by-page conversion. - * - * If the pageConverter can convert the entire file at once, invoke - * that plugin function, otherwise, read each page in the relation - * file and call the convertPage plugin function. - */ - -#ifdef PAGE_CONVERSION - if (pageConverter->convertFile) - return pageConverter->convertFile(pageConverter->pluginData, - dst, src); - else -#endif - { - int src_fd; - int dstfd; - char buf[BLCKSZ]; - ssize_t bytesRead; - const char *msg = NULL; - - if ((src_fd = open(src, O_RDONLY, 0)) < 0) - return "could not open source file"; - - if ((dstfd = open(dst, O_RDWR | O_CREAT | O_EXCL, S_IRUSR | S_IWUSR)) < 0) - { - close(src_fd); - return "could not create destination file"; - } - - while ((bytesRead = read(src_fd, buf, BLCKSZ)) == BLCKSZ) - { -#ifdef PAGE_CONVERSION - if ((msg = pageConverter->convertPage(pageConverter->pluginData, buf, buf)) != NULL) - break; -#endif - if (write(dstfd, buf, BLCKSZ) != BLCKSZ) - { - msg = "could not write new page to destination"; - break; - } - } - - close(src_fd); - close(dstfd); - - if (msg) - return msg; - else if (bytesRead != 0) - return "found partial page in source file"; - else - return NULL; - } - } -} - - -/* - * linkAndUpdateFile() - * - * Creates a hard link between the given relation files. We use - * this function to perform a true in-place update. If the on-disk - * format of the new cluster is bit-for-bit compatible with the on-disk - * format of the old cluster, we can simply link each relation - * instead of copying the data from the old cluster to the new cluster. - */ -const char * -linkAndUpdateFile(pageCnvCtx *pageConverter, - const char *src, const char *dst) -{ - if (pageConverter != NULL) - return "Cannot in-place update this cluster, page-by-page conversion is required"; - - if (pg_link_file(src, dst) == -1) - return getErrorText(errno); - else - return NULL; -} - - -#ifndef WIN32 -static int -copy_file(const char *srcfile, const char *dstfile, bool force) -{ -#define COPY_BUF_SIZE (50 * BLCKSZ) - - int src_fd; - int dest_fd; - char *buffer; - int ret = 0; - int save_errno = 0; - - if ((srcfile == NULL) || (dstfile == NULL)) - { - errno = EINVAL; - return -1; - } - - if ((src_fd = open(srcfile, O_RDONLY, 0)) < 0) - return -1; - - if ((dest_fd = open(dstfile, O_RDWR | O_CREAT | (force ? 0 : O_EXCL), S_IRUSR | S_IWUSR)) < 0) - { - save_errno = errno; - - if (src_fd != 0) - close(src_fd); - - errno = save_errno; - return -1; - } - - buffer = (char *) pg_malloc(COPY_BUF_SIZE); - - /* perform data copying i.e read src source, write to destination */ - while (true) - { - ssize_t nbytes = read(src_fd, buffer, COPY_BUF_SIZE); - - if (nbytes < 0) - { - save_errno = errno; - ret = -1; - break; - } - - if (nbytes == 0) - break; - - errno = 0; - - if (write(dest_fd, buffer, nbytes) != nbytes) - { - /* if write didn't set errno, assume problem is no disk space */ - if (errno == 0) - errno = ENOSPC; - save_errno = errno; - ret = -1; - break; - } - } - - pg_free(buffer); - - if (src_fd != 0) - close(src_fd); - - if (dest_fd != 0) - close(dest_fd); - - if (save_errno != 0) - errno = save_errno; - - return ret; -} -#endif - - -void -check_hard_link(void) -{ - char existing_file[MAXPGPATH]; - char new_link_file[MAXPGPATH]; - - snprintf(existing_file, sizeof(existing_file), "%s/PG_VERSION", old_cluster.pgdata); - snprintf(new_link_file, sizeof(new_link_file), "%s/PG_VERSION.linktest", new_cluster.pgdata); - unlink(new_link_file); /* might fail */ - - if (pg_link_file(existing_file, new_link_file) == -1) - { - pg_fatal("Could not create hard link between old and new data directories: %s\n" - "In link mode the old and new data directories must be on the same file system volume.\n", - getErrorText(errno)); - } - unlink(new_link_file); -} - -#ifdef WIN32 -static int -win32_pghardlink(const char *src, const char *dst) -{ - /* - * CreateHardLinkA returns zero for failure - * http://msdn.microsoft.com/en-us/library/aa363860(VS.85).aspx - */ - if (CreateHardLinkA(dst, src, NULL) == 0) - return -1; - else - return 0; -} -#endif - - -/* fopen() file with no group/other permissions */ -FILE * -fopen_priv(const char *path, const char *mode) -{ - mode_t old_umask = umask(S_IRWXG | S_IRWXO); - FILE *fp; - - fp = fopen(path, mode); - umask(old_umask); - - return fp; -} diff --git a/contrib/pg_upgrade/function.c b/contrib/pg_upgrade/function.c deleted file mode 100644 index d8009d195d8..00000000000 --- a/contrib/pg_upgrade/function.c +++ /dev/null @@ -1,240 +0,0 @@ -/* - * function.c - * - * server-side function support - * - * Copyright (c) 2010-2015, PostgreSQL Global Development Group - * contrib/pg_upgrade/function.c - */ - -#include "postgres_fe.h" - -#include "pg_upgrade.h" - -#include "access/transam.h" - - -/* - * get_loadable_libraries() - * - * Fetch the names of all old libraries containing C-language functions. - * We will later check that they all exist in the new installation. - */ -void -get_loadable_libraries(void) -{ - PGresult **ress; - int totaltups; - int dbnum; - bool found_public_plpython_handler = false; - - ress = (PGresult **) pg_malloc(old_cluster.dbarr.ndbs * sizeof(PGresult *)); - totaltups = 0; - - /* Fetch all library names, removing duplicates within each DB */ - for (dbnum = 0; dbnum < old_cluster.dbarr.ndbs; dbnum++) - { - DbInfo *active_db = &old_cluster.dbarr.dbs[dbnum]; - PGconn *conn = connectToServer(&old_cluster, active_db->db_name); - - /* - * Fetch all libraries referenced in this DB. We can't exclude the - * "pg_catalog" schema because, while such functions are not - * explicitly dumped by pg_dump, they do reference implicit objects - * that pg_dump does dump, e.g. CREATE LANGUAGE plperl. - */ - ress[dbnum] = executeQueryOrDie(conn, - "SELECT DISTINCT probin " - "FROM pg_catalog.pg_proc " - "WHERE prolang = 13 /* C */ AND " - "probin IS NOT NULL AND " - "oid >= %u;", - FirstNormalObjectId); - totaltups += PQntuples(ress[dbnum]); - - /* - * Systems that install plpython before 8.1 have - * plpython_call_handler() defined in the "public" schema, causing - * pg_dump to dump it. However that function still references - * "plpython" (no "2"), so it throws an error on restore. This code - * checks for the problem function, reports affected databases to the - * user and explains how to remove them. 8.1 git commit: - * e0dedd0559f005d60c69c9772163e69c204bac69 - * http://archives.postgresql.org/pgsql-hackers/2012-03/msg01101.php - * http://archives.postgresql.org/pgsql-bugs/2012-05/msg00206.php - */ - if (GET_MAJOR_VERSION(old_cluster.major_version) < 901) - { - PGresult *res; - - res = executeQueryOrDie(conn, - "SELECT 1 " - "FROM pg_catalog.pg_proc JOIN pg_namespace " - " ON pronamespace = pg_namespace.oid " - "WHERE proname = 'plpython_call_handler' AND " - "nspname = 'public' AND " - "prolang = 13 /* C */ AND " - "probin = '$libdir/plpython' AND " - "pg_proc.oid >= %u;", - FirstNormalObjectId); - if (PQntuples(res) > 0) - { - if (!found_public_plpython_handler) - { - pg_log(PG_WARNING, - "\nThe old cluster has a \"plpython_call_handler\" function defined\n" - "in the \"public\" schema which is a duplicate of the one defined\n" - "in the \"pg_catalog\" schema. You can confirm this by executing\n" - "in psql:\n" - "\n" - " \\df *.plpython_call_handler\n" - "\n" - "The \"public\" schema version of this function was created by a\n" - "pre-8.1 install of plpython, and must be removed for pg_upgrade\n" - "to complete because it references a now-obsolete \"plpython\"\n" - "shared object file. You can remove the \"public\" schema version\n" - "of this function by running the following command:\n" - "\n" - " DROP FUNCTION public.plpython_call_handler()\n" - "\n" - "in each affected database:\n" - "\n"); - } - pg_log(PG_WARNING, " %s\n", active_db->db_name); - found_public_plpython_handler = true; - } - PQclear(res); - } - - PQfinish(conn); - } - - if (found_public_plpython_handler) - pg_fatal("Remove the problem functions from the old cluster to continue.\n"); - - /* Allocate what's certainly enough space */ - os_info.libraries = (char **) pg_malloc(totaltups * sizeof(char *)); - - /* - * Now remove duplicates across DBs. This is pretty inefficient code, but - * there probably aren't enough entries to matter. - */ - totaltups = 0; - - for (dbnum = 0; dbnum < old_cluster.dbarr.ndbs; dbnum++) - { - PGresult *res = ress[dbnum]; - int ntups; - int rowno; - - ntups = PQntuples(res); - for (rowno = 0; rowno < ntups; rowno++) - { - char *lib = PQgetvalue(res, rowno, 0); - bool dup = false; - int n; - - for (n = 0; n < totaltups; n++) - { - if (strcmp(lib, os_info.libraries[n]) == 0) - { - dup = true; - break; - } - } - if (!dup) - os_info.libraries[totaltups++] = pg_strdup(lib); - } - - PQclear(res); - } - - os_info.num_libraries = totaltups; - - pg_free(ress); -} - - -/* - * check_loadable_libraries() - * - * Check that the new cluster contains all required libraries. - * We do this by actually trying to LOAD each one, thereby testing - * compatibility as well as presence. - */ -void -check_loadable_libraries(void) -{ - PGconn *conn = connectToServer(&new_cluster, "template1"); - int libnum; - FILE *script = NULL; - bool found = false; - char output_path[MAXPGPATH]; - - prep_status("Checking for presence of required libraries"); - - snprintf(output_path, sizeof(output_path), "loadable_libraries.txt"); - - for (libnum = 0; libnum < os_info.num_libraries; libnum++) - { - char *lib = os_info.libraries[libnum]; - int llen = strlen(lib); - char cmd[7 + 2 * MAXPGPATH + 1]; - PGresult *res; - - /* - * In Postgres 9.0, Python 3 support was added, and to do that, a - * plpython2u language was created with library name plpython2.so as a - * symbolic link to plpython.so. In Postgres 9.1, only the - * plpython2.so library was created, and both plpythonu and plpython2u - * pointing to it. For this reason, any reference to library name - * "plpython" in an old PG <= 9.1 cluster must look for "plpython2" in - * the new cluster. - * - * For this case, we could check pg_pltemplate, but that only works - * for languages, and does not help with function shared objects, so - * we just do a general fix. - */ - if (GET_MAJOR_VERSION(old_cluster.major_version) < 901 && - strcmp(lib, "$libdir/plpython") == 0) - { - lib = "$libdir/plpython2"; - llen = strlen(lib); - } - - strcpy(cmd, "LOAD '"); - PQescapeStringConn(conn, cmd + strlen(cmd), lib, llen, NULL); - strcat(cmd, "'"); - - res = PQexec(conn, cmd); - - if (PQresultStatus(res) != PGRES_COMMAND_OK) - { - found = true; - - if (script == NULL && (script = fopen_priv(output_path, "w")) == NULL) - pg_fatal("Could not open file \"%s\": %s\n", - output_path, getErrorText(errno)); - fprintf(script, "Could not load library \"%s\"\n%s\n", - lib, - PQerrorMessage(conn)); - } - - PQclear(res); - } - - PQfinish(conn); - - if (found) - { - fclose(script); - pg_log(PG_REPORT, "fatal\n"); - pg_fatal("Your installation references loadable libraries that are missing from the\n" - "new installation. You can add these libraries to the new installation,\n" - "or remove the functions using them from the old installation. A list of\n" - "problem libraries is in the file:\n" - " %s\n\n", output_path); - } - else - check_ok(); -} diff --git a/contrib/pg_upgrade/info.c b/contrib/pg_upgrade/info.c deleted file mode 100644 index 12549342707..00000000000 --- a/contrib/pg_upgrade/info.c +++ /dev/null @@ -1,535 +0,0 @@ -/* - * info.c - * - * information support functions - * - * Copyright (c) 2010-2015, PostgreSQL Global Development Group - * contrib/pg_upgrade/info.c - */ - -#include "postgres_fe.h" - -#include "pg_upgrade.h" - -#include "access/transam.h" - - -static void create_rel_filename_map(const char *old_data, const char *new_data, - const DbInfo *old_db, const DbInfo *new_db, - const RelInfo *old_rel, const RelInfo *new_rel, - FileNameMap *map); -static void free_db_and_rel_infos(DbInfoArr *db_arr); -static void get_db_infos(ClusterInfo *cluster); -static void get_rel_infos(ClusterInfo *cluster, DbInfo *dbinfo); -static void free_rel_infos(RelInfoArr *rel_arr); -static void print_db_infos(DbInfoArr *dbinfo); -static void print_rel_infos(RelInfoArr *rel_arr); - - -/* - * gen_db_file_maps() - * - * generates database mappings for "old_db" and "new_db". Returns a malloc'ed - * array of mappings. nmaps is a return parameter which refers to the number - * mappings. - */ -FileNameMap * -gen_db_file_maps(DbInfo *old_db, DbInfo *new_db, - int *nmaps, const char *old_pgdata, const char *new_pgdata) -{ - FileNameMap *maps; - int old_relnum, new_relnum; - int num_maps = 0; - - maps = (FileNameMap *) pg_malloc(sizeof(FileNameMap) * - old_db->rel_arr.nrels); - - /* - * The old database shouldn't have more relations than the new one. - * We force the new cluster to have a TOAST table if the old table - * had one. - */ - if (old_db->rel_arr.nrels > new_db->rel_arr.nrels) - pg_fatal("old and new databases \"%s\" have a mismatched number of relations\n", - old_db->db_name); - - /* Drive the loop using new_relnum, which might be higher. */ - for (old_relnum = new_relnum = 0; new_relnum < new_db->rel_arr.nrels; - new_relnum++) - { - RelInfo *old_rel; - RelInfo *new_rel = &new_db->rel_arr.rels[new_relnum]; - - /* - * It is possible that the new cluster has a TOAST table for a table - * that didn't need one in the old cluster, e.g. 9.0 to 9.1 changed the - * NUMERIC length computation. Therefore, if we have a TOAST table - * in the new cluster that doesn't match, skip over it and continue - * processing. It is possible this TOAST table used an OID that was - * reserved in the old cluster, but we have no way of testing that, - * and we would have already gotten an error at the new cluster schema - * creation stage. Fortunately, since we only restore the OID counter - * after schema restore, and restore in OID order via pg_dump, a - * conflict would only happen if the new TOAST table had a very low - * OID. However, TOAST tables created long after initial table - * creation can have any OID, particularly after OID wraparound. - */ - if (old_relnum == old_db->rel_arr.nrels) - { - if (strcmp(new_rel->nspname, "pg_toast") == 0) - continue; - else - pg_fatal("Extra non-TOAST relation found in database \"%s\": new OID %d\n", - old_db->db_name, new_rel->reloid); - } - - old_rel = &old_db->rel_arr.rels[old_relnum]; - - if (old_rel->reloid != new_rel->reloid) - { - if (strcmp(new_rel->nspname, "pg_toast") == 0) - continue; - else - pg_fatal("Mismatch of relation OID in database \"%s\": old OID %d, new OID %d\n", - old_db->db_name, old_rel->reloid, new_rel->reloid); - } - - /* - * TOAST table names initially match the heap pg_class oid. In - * pre-8.4, TOAST table names change during CLUSTER; in pre-9.0, TOAST - * table names change during ALTER TABLE ALTER COLUMN SET TYPE. In >= - * 9.0, TOAST relation names always use heap table oids, hence we - * cannot check relation names when upgrading from pre-9.0. Clusters - * upgraded to 9.0 will get matching TOAST names. If index names don't - * match primary key constraint names, this will fail because pg_dump - * dumps constraint names and pg_upgrade checks index names. - */ - if (strcmp(old_rel->nspname, new_rel->nspname) != 0 || - ((GET_MAJOR_VERSION(old_cluster.major_version) >= 900 || - strcmp(old_rel->nspname, "pg_toast") != 0) && - strcmp(old_rel->relname, new_rel->relname) != 0)) - pg_fatal("Mismatch of relation names in database \"%s\": " - "old name \"%s.%s\", new name \"%s.%s\"\n", - old_db->db_name, old_rel->nspname, old_rel->relname, - new_rel->nspname, new_rel->relname); - - create_rel_filename_map(old_pgdata, new_pgdata, old_db, new_db, - old_rel, new_rel, maps + num_maps); - num_maps++; - old_relnum++; - } - - /* Did we fail to exhaust the old array? */ - if (old_relnum != old_db->rel_arr.nrels) - pg_fatal("old and new databases \"%s\" have a mismatched number of relations\n", - old_db->db_name); - - *nmaps = num_maps; - return maps; -} - - -/* - * create_rel_filename_map() - * - * fills a file node map structure and returns it in "map". - */ -static void -create_rel_filename_map(const char *old_data, const char *new_data, - const DbInfo *old_db, const DbInfo *new_db, - const RelInfo *old_rel, const RelInfo *new_rel, - FileNameMap *map) -{ - if (strlen(old_rel->tablespace) == 0) - { - /* - * relation belongs to the default tablespace, hence relfiles should - * exist in the data directories. - */ - map->old_tablespace = old_data; - map->new_tablespace = new_data; - map->old_tablespace_suffix = "/base"; - map->new_tablespace_suffix = "/base"; - } - else - { - /* relation belongs to a tablespace, so use the tablespace location */ - map->old_tablespace = old_rel->tablespace; - map->new_tablespace = new_rel->tablespace; - map->old_tablespace_suffix = old_cluster.tablespace_suffix; - map->new_tablespace_suffix = new_cluster.tablespace_suffix; - } - - map->old_db_oid = old_db->db_oid; - map->new_db_oid = new_db->db_oid; - - /* - * old_relfilenode might differ from pg_class.oid (and hence - * new_relfilenode) because of CLUSTER, REINDEX, or VACUUM FULL. - */ - map->old_relfilenode = old_rel->relfilenode; - - /* new_relfilenode will match old and new pg_class.oid */ - map->new_relfilenode = new_rel->relfilenode; - - /* used only for logging and error reporing, old/new are identical */ - map->nspname = old_rel->nspname; - map->relname = old_rel->relname; -} - - -void -print_maps(FileNameMap *maps, int n_maps, const char *db_name) -{ - if (log_opts.verbose) - { - int mapnum; - - pg_log(PG_VERBOSE, "mappings for database \"%s\":\n", db_name); - - for (mapnum = 0; mapnum < n_maps; mapnum++) - pg_log(PG_VERBOSE, "%s.%s: %u to %u\n", - maps[mapnum].nspname, maps[mapnum].relname, - maps[mapnum].old_relfilenode, - maps[mapnum].new_relfilenode); - - pg_log(PG_VERBOSE, "\n\n"); - } -} - - -/* - * get_db_and_rel_infos() - * - * higher level routine to generate dbinfos for the database running - * on the given "port". Assumes that server is already running. - */ -void -get_db_and_rel_infos(ClusterInfo *cluster) -{ - int dbnum; - - if (cluster->dbarr.dbs != NULL) - free_db_and_rel_infos(&cluster->dbarr); - - get_db_infos(cluster); - - for (dbnum = 0; dbnum < cluster->dbarr.ndbs; dbnum++) - get_rel_infos(cluster, &cluster->dbarr.dbs[dbnum]); - - pg_log(PG_VERBOSE, "\n%s databases:\n", CLUSTER_NAME(cluster)); - if (log_opts.verbose) - print_db_infos(&cluster->dbarr); -} - - -/* - * get_db_infos() - * - * Scans pg_database system catalog and populates all user - * databases. - */ -static void -get_db_infos(ClusterInfo *cluster) -{ - PGconn *conn = connectToServer(cluster, "template1"); - PGresult *res; - int ntups; - int tupnum; - DbInfo *dbinfos; - int i_datname, - i_oid, - i_encoding, - i_datcollate, - i_datctype, - i_spclocation; - char query[QUERY_ALLOC]; - - snprintf(query, sizeof(query), - "SELECT d.oid, d.datname, d.encoding, d.datcollate, d.datctype, " - "%s AS spclocation " - "FROM pg_catalog.pg_database d " - " LEFT OUTER JOIN pg_catalog.pg_tablespace t " - " ON d.dattablespace = t.oid " - "WHERE d.datallowconn = true " - /* we don't preserve pg_database.oid so we sort by name */ - "ORDER BY 2", - /* 9.2 removed the spclocation column */ - (GET_MAJOR_VERSION(cluster->major_version) <= 901) ? - "t.spclocation" : "pg_catalog.pg_tablespace_location(t.oid)"); - - res = executeQueryOrDie(conn, "%s", query); - - i_oid = PQfnumber(res, "oid"); - i_datname = PQfnumber(res, "datname"); - i_encoding = PQfnumber(res, "encoding"); - i_datcollate = PQfnumber(res, "datcollate"); - i_datctype = PQfnumber(res, "datctype"); - i_spclocation = PQfnumber(res, "spclocation"); - - ntups = PQntuples(res); - dbinfos = (DbInfo *) pg_malloc(sizeof(DbInfo) * ntups); - - for (tupnum = 0; tupnum < ntups; tupnum++) - { - dbinfos[tupnum].db_oid = atooid(PQgetvalue(res, tupnum, i_oid)); - dbinfos[tupnum].db_name = pg_strdup(PQgetvalue(res, tupnum, i_datname)); - dbinfos[tupnum].db_encoding = atoi(PQgetvalue(res, tupnum, i_encoding)); - dbinfos[tupnum].db_collate = pg_strdup(PQgetvalue(res, tupnum, i_datcollate)); - dbinfos[tupnum].db_ctype = pg_strdup(PQgetvalue(res, tupnum, i_datctype)); - snprintf(dbinfos[tupnum].db_tablespace, sizeof(dbinfos[tupnum].db_tablespace), "%s", - PQgetvalue(res, tupnum, i_spclocation)); - } - PQclear(res); - - PQfinish(conn); - - cluster->dbarr.dbs = dbinfos; - cluster->dbarr.ndbs = ntups; -} - - -/* - * get_rel_infos() - * - * gets the relinfos for all the user tables of the database referred - * by "db". - * - * NOTE: we assume that relations/entities with oids greater than - * FirstNormalObjectId belongs to the user - */ -static void -get_rel_infos(ClusterInfo *cluster, DbInfo *dbinfo) -{ - PGconn *conn = connectToServer(cluster, - dbinfo->db_name); - PGresult *res; - RelInfo *relinfos; - int ntups; - int relnum; - int num_rels = 0; - char *nspname = NULL; - char *relname = NULL; - char *tablespace = NULL; - int i_spclocation, - i_nspname, - i_relname, - i_oid, - i_relfilenode, - i_reltablespace; - char query[QUERY_ALLOC]; - char *last_namespace = NULL, - *last_tablespace = NULL; - - /* - * pg_largeobject contains user data that does not appear in pg_dump - * --schema-only output, so we have to copy that system table heap and - * index. We could grab the pg_largeobject oids from template1, but it is - * easy to treat it as a normal table. Order by oid so we can join old/new - * structures efficiently. - */ - - snprintf(query, sizeof(query), - /* get regular heap */ - "WITH regular_heap (reloid) AS ( " - " SELECT c.oid " - " FROM pg_catalog.pg_class c JOIN pg_catalog.pg_namespace n " - " ON c.relnamespace = n.oid " - " LEFT OUTER JOIN pg_catalog.pg_index i " - " ON c.oid = i.indexrelid " - " WHERE relkind IN ('r', 'm', 'i', 'S') AND " - /* - * pg_dump only dumps valid indexes; testing indisready is necessary in - * 9.2, and harmless in earlier/later versions. - */ - " i.indisvalid IS DISTINCT FROM false AND " - " i.indisready IS DISTINCT FROM false AND " - /* exclude possible orphaned temp tables */ - " ((n.nspname !~ '^pg_temp_' AND " - " n.nspname !~ '^pg_toast_temp_' AND " - /* skip pg_toast because toast index have relkind == 'i', not 't' */ - " n.nspname NOT IN ('pg_catalog', 'information_schema', " - " 'binary_upgrade', 'pg_toast') AND " - " c.oid >= %u) OR " - " (n.nspname = 'pg_catalog' AND " - " relname IN ('pg_largeobject', 'pg_largeobject_loid_pn_index'%s) ))), " - /* - * We have to gather the TOAST tables in later steps because we - * can't schema-qualify TOAST tables. - */ - /* get TOAST heap */ - " toast_heap (reloid) AS ( " - " SELECT reltoastrelid " - " FROM regular_heap JOIN pg_catalog.pg_class c " - " ON regular_heap.reloid = c.oid " - " AND c.reltoastrelid != %u), " - /* get indexes on regular and TOAST heap */ - " all_index (reloid) AS ( " - " SELECT indexrelid " - " FROM pg_index " - " WHERE indisvalid " - " AND indrelid IN (SELECT reltoastrelid " - " FROM (SELECT reloid FROM regular_heap " - " UNION ALL " - " SELECT reloid FROM toast_heap) all_heap " - " JOIN pg_catalog.pg_class c " - " ON all_heap.reloid = c.oid " - " AND c.reltoastrelid != %u)) " - /* get all rels */ - "SELECT c.oid, n.nspname, c.relname, " - " c.relfilenode, c.reltablespace, %s " - "FROM (SELECT reloid FROM regular_heap " - " UNION ALL " - " SELECT reloid FROM toast_heap " - " UNION ALL " - " SELECT reloid FROM all_index) all_rels " - " JOIN pg_catalog.pg_class c " - " ON all_rels.reloid = c.oid " - " JOIN pg_catalog.pg_namespace n " - " ON c.relnamespace = n.oid " - " LEFT OUTER JOIN pg_catalog.pg_tablespace t " - " ON c.reltablespace = t.oid " - /* we preserve pg_class.oid so we sort by it to match old/new */ - "ORDER BY 1;", - FirstNormalObjectId, - /* does pg_largeobject_metadata need to be migrated? */ - (GET_MAJOR_VERSION(old_cluster.major_version) <= 804) ? - "" : ", 'pg_largeobject_metadata', 'pg_largeobject_metadata_oid_index'", - InvalidOid, InvalidOid, - /* 9.2 removed the spclocation column */ - (GET_MAJOR_VERSION(cluster->major_version) <= 901) ? - "t.spclocation" : "pg_catalog.pg_tablespace_location(t.oid) AS spclocation"); - - res = executeQueryOrDie(conn, "%s", query); - - ntups = PQntuples(res); - - relinfos = (RelInfo *) pg_malloc(sizeof(RelInfo) * ntups); - - i_oid = PQfnumber(res, "oid"); - i_nspname = PQfnumber(res, "nspname"); - i_relname = PQfnumber(res, "relname"); - i_relfilenode = PQfnumber(res, "relfilenode"); - i_reltablespace = PQfnumber(res, "reltablespace"); - i_spclocation = PQfnumber(res, "spclocation"); - - for (relnum = 0; relnum < ntups; relnum++) - { - RelInfo *curr = &relinfos[num_rels++]; - - curr->reloid = atooid(PQgetvalue(res, relnum, i_oid)); - - nspname = PQgetvalue(res, relnum, i_nspname); - curr->nsp_alloc = false; - - /* - * Many of the namespace and tablespace strings are identical, so we - * try to reuse the allocated string pointers where possible to reduce - * memory consumption. - */ - /* Can we reuse the previous string allocation? */ - if (last_namespace && strcmp(nspname, last_namespace) == 0) - curr->nspname = last_namespace; - else - { - last_namespace = curr->nspname = pg_strdup(nspname); - curr->nsp_alloc = true; - } - - relname = PQgetvalue(res, relnum, i_relname); - curr->relname = pg_strdup(relname); - - curr->relfilenode = atooid(PQgetvalue(res, relnum, i_relfilenode)); - curr->tblsp_alloc = false; - - /* Is the tablespace oid non-zero? */ - if (atooid(PQgetvalue(res, relnum, i_reltablespace)) != 0) - { - /* - * The tablespace location might be "", meaning the cluster - * default location, i.e. pg_default or pg_global. - */ - tablespace = PQgetvalue(res, relnum, i_spclocation); - - /* Can we reuse the previous string allocation? */ - if (last_tablespace && strcmp(tablespace, last_tablespace) == 0) - curr->tablespace = last_tablespace; - else - { - last_tablespace = curr->tablespace = pg_strdup(tablespace); - curr->tblsp_alloc = true; - } - } - else - /* A zero reltablespace oid indicates the database tablespace. */ - curr->tablespace = dbinfo->db_tablespace; - } - PQclear(res); - - PQfinish(conn); - - dbinfo->rel_arr.rels = relinfos; - dbinfo->rel_arr.nrels = num_rels; -} - - -static void -free_db_and_rel_infos(DbInfoArr *db_arr) -{ - int dbnum; - - for (dbnum = 0; dbnum < db_arr->ndbs; dbnum++) - { - free_rel_infos(&db_arr->dbs[dbnum].rel_arr); - pg_free(db_arr->dbs[dbnum].db_name); - } - pg_free(db_arr->dbs); - db_arr->dbs = NULL; - db_arr->ndbs = 0; -} - - -static void -free_rel_infos(RelInfoArr *rel_arr) -{ - int relnum; - - for (relnum = 0; relnum < rel_arr->nrels; relnum++) - { - if (rel_arr->rels[relnum].nsp_alloc) - pg_free(rel_arr->rels[relnum].nspname); - pg_free(rel_arr->rels[relnum].relname); - if (rel_arr->rels[relnum].tblsp_alloc) - pg_free(rel_arr->rels[relnum].tablespace); - } - pg_free(rel_arr->rels); - rel_arr->nrels = 0; -} - - -static void -print_db_infos(DbInfoArr *db_arr) -{ - int dbnum; - - for (dbnum = 0; dbnum < db_arr->ndbs; dbnum++) - { - pg_log(PG_VERBOSE, "Database: %s\n", db_arr->dbs[dbnum].db_name); - print_rel_infos(&db_arr->dbs[dbnum].rel_arr); - pg_log(PG_VERBOSE, "\n\n"); - } -} - - -static void -print_rel_infos(RelInfoArr *rel_arr) -{ - int relnum; - - for (relnum = 0; relnum < rel_arr->nrels; relnum++) - pg_log(PG_VERBOSE, "relname: %s.%s: reloid: %u reltblspace: %s\n", - rel_arr->rels[relnum].nspname, - rel_arr->rels[relnum].relname, - rel_arr->rels[relnum].reloid, - rel_arr->rels[relnum].tablespace); -} diff --git a/contrib/pg_upgrade/option.c b/contrib/pg_upgrade/option.c deleted file mode 100644 index 742d133e391..00000000000 --- a/contrib/pg_upgrade/option.c +++ /dev/null @@ -1,518 +0,0 @@ -/* - * opt.c - * - * options functions - * - * Copyright (c) 2010-2015, PostgreSQL Global Development Group - * contrib/pg_upgrade/option.c - */ - -#include "postgres_fe.h" - -#include "miscadmin.h" -#include "getopt_long.h" - -#include "pg_upgrade.h" - -#include -#include -#ifdef WIN32 -#include -#endif - - -static void usage(void); -static void check_required_directory(char **dirpath, char **configpath, - char *envVarName, char *cmdLineOption, char *description); -#define FIX_DEFAULT_READ_ONLY "-c default_transaction_read_only=false" - - -UserOpts user_opts; - - -/* - * parseCommandLine() - * - * Parses the command line (argc, argv[]) and loads structures - */ -void -parseCommandLine(int argc, char *argv[]) -{ - static struct option long_options[] = { - {"old-datadir", required_argument, NULL, 'd'}, - {"new-datadir", required_argument, NULL, 'D'}, - {"old-bindir", required_argument, NULL, 'b'}, - {"new-bindir", required_argument, NULL, 'B'}, - {"old-options", required_argument, NULL, 'o'}, - {"new-options", required_argument, NULL, 'O'}, - {"old-port", required_argument, NULL, 'p'}, - {"new-port", required_argument, NULL, 'P'}, - - {"username", required_argument, NULL, 'U'}, - {"check", no_argument, NULL, 'c'}, - {"link", no_argument, NULL, 'k'}, - {"retain", no_argument, NULL, 'r'}, - {"jobs", required_argument, NULL, 'j'}, - {"verbose", no_argument, NULL, 'v'}, - {NULL, 0, NULL, 0} - }; - int option; /* Command line option */ - int optindex = 0; /* used by getopt_long */ - int os_user_effective_id; - FILE *fp; - char **filename; - time_t run_time = time(NULL); - - user_opts.transfer_mode = TRANSFER_MODE_COPY; - - os_info.progname = get_progname(argv[0]); - - /* Process libpq env. variables; load values here for usage() output */ - old_cluster.port = getenv("PGPORTOLD") ? atoi(getenv("PGPORTOLD")) : DEF_PGUPORT; - new_cluster.port = getenv("PGPORTNEW") ? atoi(getenv("PGPORTNEW")) : DEF_PGUPORT; - - os_user_effective_id = get_user_info(&os_info.user); - /* we override just the database user name; we got the OS id above */ - if (getenv("PGUSER")) - { - pg_free(os_info.user); - /* must save value, getenv()'s pointer is not stable */ - os_info.user = pg_strdup(getenv("PGUSER")); - } - - if (argc > 1) - { - if (strcmp(argv[1], "--help") == 0 || strcmp(argv[1], "-?") == 0) - { - usage(); - exit(0); - } - if (strcmp(argv[1], "--version") == 0 || strcmp(argv[1], "-V") == 0) - { - puts("pg_upgrade (PostgreSQL) " PG_VERSION); - exit(0); - } - } - - /* Allow help and version to be run as root, so do the test here. */ - if (os_user_effective_id == 0) - pg_fatal("%s: cannot be run as root\n", os_info.progname); - - if ((log_opts.internal = fopen_priv(INTERNAL_LOG_FILE, "a")) == NULL) - pg_fatal("cannot write to log file %s\n", INTERNAL_LOG_FILE); - - while ((option = getopt_long(argc, argv, "d:D:b:B:cj:ko:O:p:P:rU:v", - long_options, &optindex)) != -1) - { - switch (option) - { - case 'b': - old_cluster.bindir = pg_strdup(optarg); - break; - - case 'B': - new_cluster.bindir = pg_strdup(optarg); - break; - - case 'c': - user_opts.check = true; - break; - - case 'd': - old_cluster.pgdata = pg_strdup(optarg); - old_cluster.pgconfig = pg_strdup(optarg); - break; - - case 'D': - new_cluster.pgdata = pg_strdup(optarg); - new_cluster.pgconfig = pg_strdup(optarg); - break; - - case 'j': - user_opts.jobs = atoi(optarg); - break; - - case 'k': - user_opts.transfer_mode = TRANSFER_MODE_LINK; - break; - - case 'o': - /* append option? */ - if (!old_cluster.pgopts) - old_cluster.pgopts = pg_strdup(optarg); - else - { - char *old_pgopts = old_cluster.pgopts; - - old_cluster.pgopts = psprintf("%s %s", old_pgopts, optarg); - free(old_pgopts); - } - break; - - case 'O': - /* append option? */ - if (!new_cluster.pgopts) - new_cluster.pgopts = pg_strdup(optarg); - else - { - char *new_pgopts = new_cluster.pgopts; - - new_cluster.pgopts = psprintf("%s %s", new_pgopts, optarg); - free(new_pgopts); - } - break; - - /* - * Someday, the port number option could be removed and passed - * using -o/-O, but that requires postmaster -C to be - * supported on all old/new versions (added in PG 9.2). - */ - case 'p': - if ((old_cluster.port = atoi(optarg)) <= 0) - { - pg_fatal("invalid old port number\n"); - exit(1); - } - break; - - case 'P': - if ((new_cluster.port = atoi(optarg)) <= 0) - { - pg_fatal("invalid new port number\n"); - exit(1); - } - break; - - case 'r': - log_opts.retain = true; - break; - - case 'U': - pg_free(os_info.user); - os_info.user = pg_strdup(optarg); - os_info.user_specified = true; - - /* - * Push the user name into the environment so pre-9.1 - * pg_ctl/libpq uses it. - */ - pg_putenv("PGUSER", os_info.user); - break; - - case 'v': - pg_log(PG_REPORT, "Running in verbose mode\n"); - log_opts.verbose = true; - break; - - default: - pg_fatal("Try \"%s --help\" for more information.\n", - os_info.progname); - break; - } - } - - /* label start of upgrade in logfiles */ - for (filename = output_files; *filename != NULL; filename++) - { - if ((fp = fopen_priv(*filename, "a")) == NULL) - pg_fatal("cannot write to log file %s\n", *filename); - - /* Start with newline because we might be appending to a file. */ - fprintf(fp, "\n" - "-----------------------------------------------------------------\n" - " pg_upgrade run on %s" - "-----------------------------------------------------------------\n\n", - ctime(&run_time)); - fclose(fp); - } - - /* Turn off read-only mode; add prefix to PGOPTIONS? */ - if (getenv("PGOPTIONS")) - { - char *pgoptions = psprintf("%s %s", FIX_DEFAULT_READ_ONLY, - getenv("PGOPTIONS")); - - pg_putenv("PGOPTIONS", pgoptions); - pfree(pgoptions); - } - else - pg_putenv("PGOPTIONS", FIX_DEFAULT_READ_ONLY); - - /* Get values from env if not already set */ - check_required_directory(&old_cluster.bindir, NULL, "PGBINOLD", "-b", - "old cluster binaries reside"); - check_required_directory(&new_cluster.bindir, NULL, "PGBINNEW", "-B", - "new cluster binaries reside"); - check_required_directory(&old_cluster.pgdata, &old_cluster.pgconfig, - "PGDATAOLD", "-d", "old cluster data resides"); - check_required_directory(&new_cluster.pgdata, &new_cluster.pgconfig, - "PGDATANEW", "-D", "new cluster data resides"); - -#ifdef WIN32 - /* - * On Windows, initdb --sync-only will fail with a "Permission denied" - * error on file pg_upgrade_utility.log if pg_upgrade is run inside - * the new cluster directory, so we do a check here. - */ - { - char cwd[MAXPGPATH], new_cluster_pgdata[MAXPGPATH]; - - strlcpy(new_cluster_pgdata, new_cluster.pgdata, MAXPGPATH); - canonicalize_path(new_cluster_pgdata); - - if (!getcwd(cwd, MAXPGPATH)) - pg_fatal("cannot find current directory\n"); - canonicalize_path(cwd); - if (path_is_prefix_of_path(new_cluster_pgdata, cwd)) - pg_fatal("cannot run pg_upgrade from inside the new cluster data directory on Windows\n"); - } -#endif -} - - -static void -usage(void) -{ - printf(_("pg_upgrade upgrades a PostgreSQL cluster to a different major version.\n\ -\nUsage:\n\ - pg_upgrade [OPTION]...\n\ -\n\ -Options:\n\ - -b, --old-bindir=BINDIR old cluster executable directory\n\ - -B, --new-bindir=BINDIR new cluster executable directory\n\ - -c, --check check clusters only, don't change any data\n\ - -d, --old-datadir=DATADIR old cluster data directory\n\ - -D, --new-datadir=DATADIR new cluster data directory\n\ - -j, --jobs number of simultaneous processes or threads to use\n\ - -k, --link link instead of copying files to new cluster\n\ - -o, --old-options=OPTIONS old cluster options to pass to the server\n\ - -O, --new-options=OPTIONS new cluster options to pass to the server\n\ - -p, --old-port=PORT old cluster port number (default %d)\n\ - -P, --new-port=PORT new cluster port number (default %d)\n\ - -r, --retain retain SQL and log files after success\n\ - -U, --username=NAME cluster superuser (default \"%s\")\n\ - -v, --verbose enable verbose internal logging\n\ - -V, --version display version information, then exit\n\ - -?, --help show this help, then exit\n\ -\n\ -Before running pg_upgrade you must:\n\ - create a new database cluster (using the new version of initdb)\n\ - shutdown the postmaster servicing the old cluster\n\ - shutdown the postmaster servicing the new cluster\n\ -\n\ -When you run pg_upgrade, you must provide the following information:\n\ - the data directory for the old cluster (-d DATADIR)\n\ - the data directory for the new cluster (-D DATADIR)\n\ - the \"bin\" directory for the old version (-b BINDIR)\n\ - the \"bin\" directory for the new version (-B BINDIR)\n\ -\n\ -For example:\n\ - pg_upgrade -d oldCluster/data -D newCluster/data -b oldCluster/bin -B newCluster/bin\n\ -or\n"), old_cluster.port, new_cluster.port, os_info.user); -#ifndef WIN32 - printf(_("\ - $ export PGDATAOLD=oldCluster/data\n\ - $ export PGDATANEW=newCluster/data\n\ - $ export PGBINOLD=oldCluster/bin\n\ - $ export PGBINNEW=newCluster/bin\n\ - $ pg_upgrade\n")); -#else - printf(_("\ - C:\\> set PGDATAOLD=oldCluster/data\n\ - C:\\> set PGDATANEW=newCluster/data\n\ - C:\\> set PGBINOLD=oldCluster/bin\n\ - C:\\> set PGBINNEW=newCluster/bin\n\ - C:\\> pg_upgrade\n")); -#endif - printf(_("\nReport bugs to .\n")); -} - - -/* - * check_required_directory() - * - * Checks a directory option. - * dirpath - the directory name supplied on the command line - * configpath - optional configuration directory - * envVarName - the name of an environment variable to get if dirpath is NULL - * cmdLineOption - the command line option corresponds to this directory (-o, -O, -n, -N) - * description - a description of this directory option - * - * We use the last two arguments to construct a meaningful error message if the - * user hasn't provided the required directory name. - */ -static void -check_required_directory(char **dirpath, char **configpath, - char *envVarName, char *cmdLineOption, - char *description) -{ - if (*dirpath == NULL || strlen(*dirpath) == 0) - { - const char *envVar; - - if ((envVar = getenv(envVarName)) && strlen(envVar)) - { - *dirpath = pg_strdup(envVar); - if (configpath) - *configpath = pg_strdup(envVar); - } - else - pg_fatal("You must identify the directory where the %s.\n" - "Please use the %s command-line option or the %s environment variable.\n", - description, cmdLineOption, envVarName); - } - - /* - * Trim off any trailing path separators because we construct paths by - * appending to this path. - */ -#ifndef WIN32 - if ((*dirpath)[strlen(*dirpath) - 1] == '/') -#else - if ((*dirpath)[strlen(*dirpath) - 1] == '/' || - (*dirpath)[strlen(*dirpath) - 1] == '\\') -#endif - (*dirpath)[strlen(*dirpath) - 1] = 0; -} - -/* - * adjust_data_dir - * - * If a configuration-only directory was specified, find the real data dir - * by quering the running server. This has limited checking because we - * can't check for a running server because we can't find postmaster.pid. - */ -void -adjust_data_dir(ClusterInfo *cluster) -{ - char filename[MAXPGPATH]; - char cmd[MAXPGPATH], - cmd_output[MAX_STRING]; - FILE *fp, - *output; - - /* If there is no postgresql.conf, it can't be a config-only dir */ - snprintf(filename, sizeof(filename), "%s/postgresql.conf", cluster->pgconfig); - if ((fp = fopen(filename, "r")) == NULL) - return; - fclose(fp); - - /* If PG_VERSION exists, it can't be a config-only dir */ - snprintf(filename, sizeof(filename), "%s/PG_VERSION", cluster->pgconfig); - if ((fp = fopen(filename, "r")) != NULL) - { - fclose(fp); - return; - } - - /* Must be a configuration directory, so find the real data directory. */ - - prep_status("Finding the real data directory for the %s cluster", - CLUSTER_NAME(cluster)); - - /* - * We don't have a data directory yet, so we can't check the PG version, - * so this might fail --- only works for PG 9.2+. If this fails, - * pg_upgrade will fail anyway because the data files will not be found. - */ - snprintf(cmd, sizeof(cmd), "\"%s/postgres\" -D \"%s\" -C data_directory", - cluster->bindir, cluster->pgconfig); - - if ((output = popen(cmd, "r")) == NULL || - fgets(cmd_output, sizeof(cmd_output), output) == NULL) - pg_fatal("Could not get data directory using %s: %s\n", - cmd, getErrorText(errno)); - - pclose(output); - - /* Remove trailing newline */ - if (strchr(cmd_output, '\n') != NULL) - *strchr(cmd_output, '\n') = '\0'; - - cluster->pgdata = pg_strdup(cmd_output); - - check_ok(); -} - - -/* - * get_sock_dir - * - * Identify the socket directory to use for this cluster. If we're doing - * a live check (old cluster only), we need to find out where the postmaster - * is listening. Otherwise, we're going to put the socket into the current - * directory. - */ -void -get_sock_dir(ClusterInfo *cluster, bool live_check) -{ -#ifdef HAVE_UNIX_SOCKETS - - /* - * sockdir and port were added to postmaster.pid in PG 9.1. Pre-9.1 cannot - * process pg_ctl -w for sockets in non-default locations. - */ - if (GET_MAJOR_VERSION(cluster->major_version) >= 901) - { - if (!live_check) - { - /* Use the current directory for the socket */ - cluster->sockdir = pg_malloc(MAXPGPATH); - if (!getcwd(cluster->sockdir, MAXPGPATH)) - pg_fatal("cannot find current directory\n"); - } - else - { - /* - * If we are doing a live check, we will use the old cluster's - * Unix domain socket directory so we can connect to the live - * server. - */ - unsigned short orig_port = cluster->port; - char filename[MAXPGPATH], - line[MAXPGPATH]; - FILE *fp; - int lineno; - - snprintf(filename, sizeof(filename), "%s/postmaster.pid", - cluster->pgdata); - if ((fp = fopen(filename, "r")) == NULL) - pg_fatal("Cannot open file %s: %m\n", filename); - - for (lineno = 1; - lineno <= Max(LOCK_FILE_LINE_PORT, LOCK_FILE_LINE_SOCKET_DIR); - lineno++) - { - if (fgets(line, sizeof(line), fp) == NULL) - pg_fatal("Cannot read line %d from %s: %m\n", lineno, filename); - - /* potentially overwrite user-supplied value */ - if (lineno == LOCK_FILE_LINE_PORT) - sscanf(line, "%hu", &old_cluster.port); - if (lineno == LOCK_FILE_LINE_SOCKET_DIR) - { - cluster->sockdir = pg_strdup(line); - /* strip off newline */ - if (strchr(cluster->sockdir, '\n') != NULL) - *strchr(cluster->sockdir, '\n') = '\0'; - } - } - fclose(fp); - - /* warn of port number correction */ - if (orig_port != DEF_PGUPORT && old_cluster.port != orig_port) - pg_log(PG_WARNING, "User-supplied old port number %hu corrected to %hu\n", - orig_port, cluster->port); - } - } - else - - /* - * Can't get sockdir and pg_ctl -w can't use a non-default, use - * default - */ - cluster->sockdir = NULL; -#else /* !HAVE_UNIX_SOCKETS */ - cluster->sockdir = NULL; -#endif -} diff --git a/contrib/pg_upgrade/page.c b/contrib/pg_upgrade/page.c deleted file mode 100644 index 1cfc10f8a2a..00000000000 --- a/contrib/pg_upgrade/page.c +++ /dev/null @@ -1,164 +0,0 @@ -/* - * page.c - * - * per-page conversion operations - * - * Copyright (c) 2010-2015, PostgreSQL Global Development Group - * contrib/pg_upgrade/page.c - */ - -#include "postgres_fe.h" - -#include "pg_upgrade.h" - -#include "storage/bufpage.h" - - -#ifdef PAGE_CONVERSION - - -static void getPageVersion( - uint16 *version, const char *pathName); -static pageCnvCtx *loadConverterPlugin( - uint16 newPageVersion, uint16 oldPageVersion); - - -/* - * setupPageConverter() - * - * This function determines the PageLayoutVersion of the old cluster and - * the PageLayoutVersion of the new cluster. If the versions differ, this - * function loads a converter plugin and returns a pointer to a pageCnvCtx - * object (in *result) that knows how to convert pages from the old format - * to the new format. If the versions are identical, this function just - * returns a NULL pageCnvCtx pointer to indicate that page-by-page conversion - * is not required. - */ -pageCnvCtx * -setupPageConverter(void) -{ - uint16 oldPageVersion; - uint16 newPageVersion; - pageCnvCtx *converter; - const char *msg; - char dstName[MAXPGPATH]; - char srcName[MAXPGPATH]; - - snprintf(dstName, sizeof(dstName), "%s/global/%u", new_cluster.pgdata, - new_cluster.pg_database_oid); - snprintf(srcName, sizeof(srcName), "%s/global/%u", old_cluster.pgdata, - old_cluster.pg_database_oid); - - getPageVersion(&oldPageVersion, srcName); - getPageVersion(&newPageVersion, dstName); - - /* - * If the old cluster and new cluster use the same page layouts, then we - * don't need a page converter. - */ - if (newPageVersion != oldPageVersion) - { - /* - * The clusters use differing page layouts, see if we can find a - * plugin that knows how to convert from the old page layout to the - * new page layout. - */ - - if ((converter = loadConverterPlugin(newPageVersion, oldPageVersion)) == NULL) - pg_fatal("could not find plugin to convert from old page layout to new page layout\n"); - - return converter; - } - else - return NULL; -} - - -/* - * getPageVersion() - * - * Retrieves the PageLayoutVersion for the given relation. - * - * Returns NULL on success (and stores the PageLayoutVersion at *version), - * if an error occurs, this function returns an error message (in the form - * of a null-terminated string). - */ -static void -getPageVersion(uint16 *version, const char *pathName) -{ - int relfd; - PageHeaderData page; - ssize_t bytesRead; - - if ((relfd = open(pathName, O_RDONLY, 0)) < 0) - pg_fatal("could not open relation %s\n", pathName); - - if ((bytesRead = read(relfd, &page, sizeof(page))) != sizeof(page)) - pg_fatal("could not read page header of %s\n", pathName); - - *version = PageGetPageLayoutVersion(&page); - - close(relfd); - - return; -} - - -/* - * loadConverterPlugin() - * - * This function loads a page-converter plugin library and grabs a - * pointer to each of the (interesting) functions provided by that - * plugin. The name of the plugin library is derived from the given - * newPageVersion and oldPageVersion. If a plugin is found, this - * function returns a pointer to a pageCnvCtx object (which will contain - * a collection of plugin function pointers). If the required plugin - * is not found, this function returns NULL. - */ -static pageCnvCtx * -loadConverterPlugin(uint16 newPageVersion, uint16 oldPageVersion) -{ - char pluginName[MAXPGPATH]; - void *plugin; - - /* - * Try to find a plugin that can convert pages of oldPageVersion into - * pages of newPageVersion. For example, if we oldPageVersion = 3 and - * newPageVersion is 4, we search for a plugin named: - * plugins/convertLayout_3_to_4.dll - */ - - /* - * FIXME: we are searching for plugins relative to the current directory, - * we should really search relative to our own executable instead. - */ - snprintf(pluginName, sizeof(pluginName), "./plugins/convertLayout_%d_to_%d%s", - oldPageVersion, newPageVersion, DLSUFFIX); - - if ((plugin = pg_dlopen(pluginName)) == NULL) - return NULL; - else - { - pageCnvCtx *result = (pageCnvCtx *) pg_malloc(sizeof(*result)); - - result->old.PageVersion = oldPageVersion; - result->new.PageVersion = newPageVersion; - - result->startup = (pluginStartup) pg_dlsym(plugin, "init"); - result->convertFile = (pluginConvertFile) pg_dlsym(plugin, "convertFile"); - result->convertPage = (pluginConvertPage) pg_dlsym(plugin, "convertPage"); - result->shutdown = (pluginShutdown) pg_dlsym(plugin, "fini"); - result->pluginData = NULL; - - /* - * If the plugin has exported an initializer, go ahead and invoke it. - */ - if (result->startup) - result->startup(MIGRATOR_API_VERSION, &result->pluginVersion, - newPageVersion, oldPageVersion, &result->pluginData); - - return result; - } -} - -#endif diff --git a/contrib/pg_upgrade/parallel.c b/contrib/pg_upgrade/parallel.c deleted file mode 100644 index 6da996559a4..00000000000 --- a/contrib/pg_upgrade/parallel.c +++ /dev/null @@ -1,357 +0,0 @@ -/* - * parallel.c - * - * multi-process support - * - * Copyright (c) 2010-2015, PostgreSQL Global Development Group - * contrib/pg_upgrade/parallel.c - */ - -#include "postgres_fe.h" - -#include "pg_upgrade.h" - -#include -#include -#include -#include - -#ifdef WIN32 -#include -#endif - -static int parallel_jobs; - -#ifdef WIN32 -/* - * Array holding all active threads. There can't be any gaps/zeros so - * it can be passed to WaitForMultipleObjects(). We use two arrays - * so the thread_handles array can be passed to WaitForMultipleObjects(). - */ -HANDLE *thread_handles; - -typedef struct -{ - char *log_file; - char *opt_log_file; - char *cmd; -} exec_thread_arg; - -typedef struct -{ - DbInfoArr *old_db_arr; - DbInfoArr *new_db_arr; - char *old_pgdata; - char *new_pgdata; - char *old_tablespace; -} transfer_thread_arg; - -exec_thread_arg **exec_thread_args; -transfer_thread_arg **transfer_thread_args; - -/* track current thread_args struct so reap_child() can be used for all cases */ -void **cur_thread_args; - -DWORD win32_exec_prog(exec_thread_arg *args); -DWORD win32_transfer_all_new_dbs(transfer_thread_arg *args); -#endif - -/* - * parallel_exec_prog - * - * This has the same API as exec_prog, except it does parallel execution, - * and therefore must throw errors and doesn't return an error status. - */ -void -parallel_exec_prog(const char *log_file, const char *opt_log_file, - const char *fmt,...) -{ - va_list args; - char cmd[MAX_STRING]; - -#ifndef WIN32 - pid_t child; -#else - HANDLE child; - exec_thread_arg *new_arg; -#endif - - va_start(args, fmt); - vsnprintf(cmd, sizeof(cmd), fmt, args); - va_end(args); - - if (user_opts.jobs <= 1) - /* throw_error must be true to allow jobs */ - exec_prog(log_file, opt_log_file, true, "%s", cmd); - else - { - /* parallel */ -#ifdef WIN32 - if (thread_handles == NULL) - thread_handles = pg_malloc(user_opts.jobs * sizeof(HANDLE)); - - if (exec_thread_args == NULL) - { - int i; - - exec_thread_args = pg_malloc(user_opts.jobs * sizeof(exec_thread_arg *)); - - /* - * For safety and performance, we keep the args allocated during - * the entire life of the process, and we don't free the args in a - * thread different from the one that allocated it. - */ - for (i = 0; i < user_opts.jobs; i++) - exec_thread_args[i] = pg_malloc0(sizeof(exec_thread_arg)); - } - - cur_thread_args = (void **) exec_thread_args; -#endif - /* harvest any dead children */ - while (reap_child(false) == true) - ; - - /* must we wait for a dead child? */ - if (parallel_jobs >= user_opts.jobs) - reap_child(true); - - /* set this before we start the job */ - parallel_jobs++; - - /* Ensure stdio state is quiesced before forking */ - fflush(NULL); - -#ifndef WIN32 - child = fork(); - if (child == 0) - /* use _exit to skip atexit() functions */ - _exit(!exec_prog(log_file, opt_log_file, true, "%s", cmd)); - else if (child < 0) - /* fork failed */ - pg_fatal("could not create worker process: %s\n", strerror(errno)); -#else - /* empty array element are always at the end */ - new_arg = exec_thread_args[parallel_jobs - 1]; - - /* Can only pass one pointer into the function, so use a struct */ - if (new_arg->log_file) - pg_free(new_arg->log_file); - new_arg->log_file = pg_strdup(log_file); - if (new_arg->opt_log_file) - pg_free(new_arg->opt_log_file); - new_arg->opt_log_file = opt_log_file ? pg_strdup(opt_log_file) : NULL; - if (new_arg->cmd) - pg_free(new_arg->cmd); - new_arg->cmd = pg_strdup(cmd); - - child = (HANDLE) _beginthreadex(NULL, 0, (void *) win32_exec_prog, - new_arg, 0, NULL); - if (child == 0) - pg_fatal("could not create worker thread: %s\n", strerror(errno)); - - thread_handles[parallel_jobs - 1] = child; -#endif - } - - return; -} - - -#ifdef WIN32 -DWORD -win32_exec_prog(exec_thread_arg *args) -{ - int ret; - - ret = !exec_prog(args->log_file, args->opt_log_file, true, "%s", args->cmd); - - /* terminates thread */ - return ret; -} -#endif - - -/* - * parallel_transfer_all_new_dbs - * - * This has the same API as transfer_all_new_dbs, except it does parallel execution - * by transfering multiple tablespaces in parallel - */ -void -parallel_transfer_all_new_dbs(DbInfoArr *old_db_arr, DbInfoArr *new_db_arr, - char *old_pgdata, char *new_pgdata, - char *old_tablespace) -{ -#ifndef WIN32 - pid_t child; -#else - HANDLE child; - transfer_thread_arg *new_arg; -#endif - - if (user_opts.jobs <= 1) - /* throw_error must be true to allow jobs */ - transfer_all_new_dbs(old_db_arr, new_db_arr, old_pgdata, new_pgdata, NULL); - else - { - /* parallel */ -#ifdef WIN32 - if (thread_handles == NULL) - thread_handles = pg_malloc(user_opts.jobs * sizeof(HANDLE)); - - if (transfer_thread_args == NULL) - { - int i; - - transfer_thread_args = pg_malloc(user_opts.jobs * sizeof(transfer_thread_arg *)); - - /* - * For safety and performance, we keep the args allocated during - * the entire life of the process, and we don't free the args in a - * thread different from the one that allocated it. - */ - for (i = 0; i < user_opts.jobs; i++) - transfer_thread_args[i] = pg_malloc0(sizeof(transfer_thread_arg)); - } - - cur_thread_args = (void **) transfer_thread_args; -#endif - /* harvest any dead children */ - while (reap_child(false) == true) - ; - - /* must we wait for a dead child? */ - if (parallel_jobs >= user_opts.jobs) - reap_child(true); - - /* set this before we start the job */ - parallel_jobs++; - - /* Ensure stdio state is quiesced before forking */ - fflush(NULL); - -#ifndef WIN32 - child = fork(); - if (child == 0) - { - transfer_all_new_dbs(old_db_arr, new_db_arr, old_pgdata, new_pgdata, - old_tablespace); - /* if we take another exit path, it will be non-zero */ - /* use _exit to skip atexit() functions */ - _exit(0); - } - else if (child < 0) - /* fork failed */ - pg_fatal("could not create worker process: %s\n", strerror(errno)); -#else - /* empty array element are always at the end */ - new_arg = transfer_thread_args[parallel_jobs - 1]; - - /* Can only pass one pointer into the function, so use a struct */ - new_arg->old_db_arr = old_db_arr; - new_arg->new_db_arr = new_db_arr; - if (new_arg->old_pgdata) - pg_free(new_arg->old_pgdata); - new_arg->old_pgdata = pg_strdup(old_pgdata); - if (new_arg->new_pgdata) - pg_free(new_arg->new_pgdata); - new_arg->new_pgdata = pg_strdup(new_pgdata); - if (new_arg->old_tablespace) - pg_free(new_arg->old_tablespace); - new_arg->old_tablespace = old_tablespace ? pg_strdup(old_tablespace) : NULL; - - child = (HANDLE) _beginthreadex(NULL, 0, (void *) win32_transfer_all_new_dbs, - new_arg, 0, NULL); - if (child == 0) - pg_fatal("could not create worker thread: %s\n", strerror(errno)); - - thread_handles[parallel_jobs - 1] = child; -#endif - } - - return; -} - - -#ifdef WIN32 -DWORD -win32_transfer_all_new_dbs(transfer_thread_arg *args) -{ - transfer_all_new_dbs(args->old_db_arr, args->new_db_arr, args->old_pgdata, - args->new_pgdata, args->old_tablespace); - - /* terminates thread */ - return 0; -} -#endif - - -/* - * collect status from a completed worker child - */ -bool -reap_child(bool wait_for_child) -{ -#ifndef WIN32 - int work_status; - int ret; -#else - int thread_num; - DWORD res; -#endif - - if (user_opts.jobs <= 1 || parallel_jobs == 0) - return false; - -#ifndef WIN32 - ret = waitpid(-1, &work_status, wait_for_child ? 0 : WNOHANG); - - /* no children or, for WNOHANG, no dead children */ - if (ret <= 0 || !WIFEXITED(work_status)) - return false; - - if (WEXITSTATUS(work_status) != 0) - pg_fatal("child worker exited abnormally: %s\n", strerror(errno)); -#else - /* wait for one to finish */ - thread_num = WaitForMultipleObjects(parallel_jobs, thread_handles, - false, wait_for_child ? INFINITE : 0); - - if (thread_num == WAIT_TIMEOUT || thread_num == WAIT_FAILED) - return false; - - /* compute thread index in active_threads */ - thread_num -= WAIT_OBJECT_0; - - /* get the result */ - GetExitCodeThread(thread_handles[thread_num], &res); - if (res != 0) - pg_fatal("child worker exited abnormally: %s\n", strerror(errno)); - - /* dispose of handle to stop leaks */ - CloseHandle(thread_handles[thread_num]); - - /* Move last slot into dead child's position */ - if (thread_num != parallel_jobs - 1) - { - void *tmp_args; - - thread_handles[thread_num] = thread_handles[parallel_jobs - 1]; - - /* - * Move last active thead arg struct into the now-dead slot, and the - * now-dead slot to the end for reuse by the next thread. Though the - * thread struct is in use by another thread, we can safely swap the - * struct pointers within the array. - */ - tmp_args = cur_thread_args[thread_num]; - cur_thread_args[thread_num] = cur_thread_args[parallel_jobs - 1]; - cur_thread_args[parallel_jobs - 1] = tmp_args; - } -#endif - - /* do this after job has been removed */ - parallel_jobs--; - - return true; -} diff --git a/contrib/pg_upgrade/pg_upgrade.c b/contrib/pg_upgrade/pg_upgrade.c deleted file mode 100644 index 78bd29fb957..00000000000 --- a/contrib/pg_upgrade/pg_upgrade.c +++ /dev/null @@ -1,616 +0,0 @@ -/* - * pg_upgrade.c - * - * main source file - * - * Copyright (c) 2010-2015, PostgreSQL Global Development Group - * contrib/pg_upgrade/pg_upgrade.c - */ - -/* - * To simplify the upgrade process, we force certain system values to be - * identical between old and new clusters: - * - * We control all assignments of pg_class.oid (and relfilenode) so toast - * oids are the same between old and new clusters. This is important - * because toast oids are stored as toast pointers in user tables. - * - * While pg_class.oid and pg_class.relfilenode are initially the same - * in a cluster, they can diverge due to CLUSTER, REINDEX, or VACUUM - * FULL. In the new cluster, pg_class.oid and pg_class.relfilenode will - * be the same and will match the old pg_class.oid value. Because of - * this, old/new pg_class.relfilenode values will not match if CLUSTER, - * REINDEX, or VACUUM FULL have been performed in the old cluster. - * - * We control all assignments of pg_type.oid because these oids are stored - * in user composite type values. - * - * We control all assignments of pg_enum.oid because these oids are stored - * in user tables as enum values. - * - * We control all assignments of pg_authid.oid because these oids are stored - * in pg_largeobject_metadata. - */ - - - -#include "postgres_fe.h" - -#include "pg_upgrade.h" -#include "common/restricted_token.h" - -#ifdef HAVE_LANGINFO_H -#include -#endif - -static void prepare_new_cluster(void); -static void prepare_new_databases(void); -static void create_new_objects(void); -static void copy_clog_xlog_xid(void); -static void set_frozenxids(bool minmxid_only); -static void setup(char *argv0, bool *live_check); -static void cleanup(void); - -ClusterInfo old_cluster, - new_cluster; -OSInfo os_info; - -char *output_files[] = { - SERVER_LOG_FILE, -#ifdef WIN32 - /* unique file for pg_ctl start */ - SERVER_START_LOG_FILE, -#endif - UTILITY_LOG_FILE, - INTERNAL_LOG_FILE, - NULL -}; - - -int -main(int argc, char **argv) -{ - char *analyze_script_file_name = NULL; - char *deletion_script_file_name = NULL; - bool live_check = false; - - parseCommandLine(argc, argv); - - get_restricted_token(os_info.progname); - - adjust_data_dir(&old_cluster); - adjust_data_dir(&new_cluster); - - setup(argv[0], &live_check); - - output_check_banner(live_check); - - check_cluster_versions(); - - get_sock_dir(&old_cluster, live_check); - get_sock_dir(&new_cluster, false); - - check_cluster_compatibility(live_check); - - check_and_dump_old_cluster(live_check); - - - /* -- NEW -- */ - start_postmaster(&new_cluster, true); - - check_new_cluster(); - report_clusters_compatible(); - - pg_log(PG_REPORT, "\nPerforming Upgrade\n"); - pg_log(PG_REPORT, "------------------\n"); - - prepare_new_cluster(); - - stop_postmaster(false); - - /* - * Destructive Changes to New Cluster - */ - - copy_clog_xlog_xid(); - - /* New now using xids of the old system */ - - /* -- NEW -- */ - start_postmaster(&new_cluster, true); - - prepare_new_databases(); - - create_new_objects(); - - stop_postmaster(false); - - /* - * Most failures happen in create_new_objects(), which has completed at - * this point. We do this here because it is just before linking, which - * will link the old and new cluster data files, preventing the old - * cluster from being safely started once the new cluster is started. - */ - if (user_opts.transfer_mode == TRANSFER_MODE_LINK) - disable_old_cluster(); - - transfer_all_new_tablespaces(&old_cluster.dbarr, &new_cluster.dbarr, - old_cluster.pgdata, new_cluster.pgdata); - - /* - * Assuming OIDs are only used in system tables, there is no need to - * restore the OID counter because we have not transferred any OIDs from - * the old system, but we do it anyway just in case. We do it late here - * because there is no need to have the schema load use new oids. - */ - prep_status("Setting next OID for new cluster"); - exec_prog(UTILITY_LOG_FILE, NULL, true, - "\"%s/pg_resetxlog\" -o %u \"%s\"", - new_cluster.bindir, old_cluster.controldata.chkpnt_nxtoid, - new_cluster.pgdata); - check_ok(); - - prep_status("Sync data directory to disk"); - exec_prog(UTILITY_LOG_FILE, NULL, true, - "\"%s/initdb\" --sync-only \"%s\"", new_cluster.bindir, - new_cluster.pgdata); - check_ok(); - - create_script_for_cluster_analyze(&analyze_script_file_name); - create_script_for_old_cluster_deletion(&deletion_script_file_name); - - issue_warnings(); - - pg_log(PG_REPORT, "\nUpgrade Complete\n"); - pg_log(PG_REPORT, "----------------\n"); - - output_completion_banner(analyze_script_file_name, - deletion_script_file_name); - - pg_free(analyze_script_file_name); - pg_free(deletion_script_file_name); - - cleanup(); - - return 0; -} - - -static void -setup(char *argv0, bool *live_check) -{ - char exec_path[MAXPGPATH]; /* full path to my executable */ - - /* - * make sure the user has a clean environment, otherwise, we may confuse - * libpq when we connect to one (or both) of the servers. - */ - check_pghost_envvar(); - - verify_directories(); - - /* no postmasters should be running, except for a live check */ - if (pid_lock_file_exists(old_cluster.pgdata)) - { - /* - * If we have a postmaster.pid file, try to start the server. If it - * starts, the pid file was stale, so stop the server. If it doesn't - * start, assume the server is running. If the pid file is left over - * from a server crash, this also allows any committed transactions - * stored in the WAL to be replayed so they are not lost, because WAL - * files are not transfered from old to new servers. - */ - if (start_postmaster(&old_cluster, false)) - stop_postmaster(false); - else - { - if (!user_opts.check) - pg_fatal("There seems to be a postmaster servicing the old cluster.\n" - "Please shutdown that postmaster and try again.\n"); - else - *live_check = true; - } - } - - /* same goes for the new postmaster */ - if (pid_lock_file_exists(new_cluster.pgdata)) - { - if (start_postmaster(&new_cluster, false)) - stop_postmaster(false); - else - pg_fatal("There seems to be a postmaster servicing the new cluster.\n" - "Please shutdown that postmaster and try again.\n"); - } - - /* get path to pg_upgrade executable */ - if (find_my_exec(argv0, exec_path) < 0) - pg_fatal("Could not get path name to pg_upgrade: %s\n", getErrorText(errno)); - - /* Trim off program name and keep just path */ - *last_dir_separator(exec_path) = '\0'; - canonicalize_path(exec_path); - os_info.exec_path = pg_strdup(exec_path); -} - - -static void -prepare_new_cluster(void) -{ - /* - * It would make more sense to freeze after loading the schema, but that - * would cause us to lose the frozenids restored by the load. We use - * --analyze so autovacuum doesn't update statistics later - */ - prep_status("Analyzing all rows in the new cluster"); - exec_prog(UTILITY_LOG_FILE, NULL, true, - "\"%s/vacuumdb\" %s --all --analyze %s", - new_cluster.bindir, cluster_conn_opts(&new_cluster), - log_opts.verbose ? "--verbose" : ""); - check_ok(); - - /* - * We do freeze after analyze so pg_statistic is also frozen. template0 is - * not frozen here, but data rows were frozen by initdb, and we set its - * datfrozenxid, relfrozenxids, and relminmxid later to match the new xid - * counter later. - */ - prep_status("Freezing all rows on the new cluster"); - exec_prog(UTILITY_LOG_FILE, NULL, true, - "\"%s/vacuumdb\" %s --all --freeze %s", - new_cluster.bindir, cluster_conn_opts(&new_cluster), - log_opts.verbose ? "--verbose" : ""); - check_ok(); - - get_pg_database_relfilenode(&new_cluster); -} - - -static void -prepare_new_databases(void) -{ - /* - * We set autovacuum_freeze_max_age to its maximum value so autovacuum - * does not launch here and delete clog files, before the frozen xids are - * set. - */ - - set_frozenxids(false); - - prep_status("Restoring global objects in the new cluster"); - - /* - * We have to create the databases first so we can install support - * functions in all the other databases. Ideally we could create the - * support functions in template1 but pg_dumpall creates database using - * the template0 template. - */ - exec_prog(UTILITY_LOG_FILE, NULL, true, - "\"%s/psql\" " EXEC_PSQL_ARGS " %s -f \"%s\"", - new_cluster.bindir, cluster_conn_opts(&new_cluster), - GLOBALS_DUMP_FILE); - check_ok(); - - /* we load this to get a current list of databases */ - get_db_and_rel_infos(&new_cluster); -} - - -static void -create_new_objects(void) -{ - int dbnum; - - prep_status("Restoring database schemas in the new cluster\n"); - - for (dbnum = 0; dbnum < old_cluster.dbarr.ndbs; dbnum++) - { - char sql_file_name[MAXPGPATH], - log_file_name[MAXPGPATH]; - DbInfo *old_db = &old_cluster.dbarr.dbs[dbnum]; - - pg_log(PG_STATUS, "%s", old_db->db_name); - snprintf(sql_file_name, sizeof(sql_file_name), DB_DUMP_FILE_MASK, old_db->db_oid); - snprintf(log_file_name, sizeof(log_file_name), DB_DUMP_LOG_FILE_MASK, old_db->db_oid); - - /* - * pg_dump only produces its output at the end, so there is little - * parallelism if using the pipe. - */ - parallel_exec_prog(log_file_name, - NULL, - "\"%s/pg_restore\" %s --exit-on-error --verbose --dbname \"%s\" \"%s\"", - new_cluster.bindir, - cluster_conn_opts(&new_cluster), - old_db->db_name, - sql_file_name); - } - - /* reap all children */ - while (reap_child(true) == true) - ; - - end_progress_output(); - check_ok(); - - /* - * We don't have minmxids for databases or relations in pre-9.3 - * clusters, so set those after we have restores the schemas. - */ - if (GET_MAJOR_VERSION(old_cluster.major_version) < 903) - set_frozenxids(true); - - optionally_create_toast_tables(); - - /* regenerate now that we have objects in the databases */ - get_db_and_rel_infos(&new_cluster); -} - -/* - * Delete the given subdirectory contents from the new cluster - */ -static void -remove_new_subdir(char *subdir, bool rmtopdir) -{ - char new_path[MAXPGPATH]; - - prep_status("Deleting files from new %s", subdir); - - snprintf(new_path, sizeof(new_path), "%s/%s", new_cluster.pgdata, subdir); - if (!rmtree(new_path, rmtopdir)) - pg_fatal("could not delete directory \"%s\"\n", new_path); - - check_ok(); -} - -/* - * Copy the files from the old cluster into it - */ -static void -copy_subdir_files(char *subdir) -{ - char old_path[MAXPGPATH]; - char new_path[MAXPGPATH]; - - remove_new_subdir(subdir, true); - - snprintf(old_path, sizeof(old_path), "%s/%s", old_cluster.pgdata, subdir); - snprintf(new_path, sizeof(new_path), "%s/%s", new_cluster.pgdata, subdir); - - prep_status("Copying old %s to new server", subdir); - - exec_prog(UTILITY_LOG_FILE, NULL, true, -#ifndef WIN32 - "cp -Rf \"%s\" \"%s\"", -#else - /* flags: everything, no confirm, quiet, overwrite read-only */ - "xcopy /e /y /q /r \"%s\" \"%s\\\"", -#endif - old_path, new_path); - - check_ok(); -} - -static void -copy_clog_xlog_xid(void) -{ - /* copy old commit logs to new data dir */ - copy_subdir_files("pg_clog"); - - /* set the next transaction id and epoch of the new cluster */ - prep_status("Setting next transaction ID and epoch for new cluster"); - exec_prog(UTILITY_LOG_FILE, NULL, true, - "\"%s/pg_resetxlog\" -f -x %u \"%s\"", - new_cluster.bindir, old_cluster.controldata.chkpnt_nxtxid, - new_cluster.pgdata); - exec_prog(UTILITY_LOG_FILE, NULL, true, - "\"%s/pg_resetxlog\" -f -e %u \"%s\"", - new_cluster.bindir, old_cluster.controldata.chkpnt_nxtepoch, - new_cluster.pgdata); - /* must reset commit timestamp limits also */ - exec_prog(UTILITY_LOG_FILE, NULL, true, - "\"%s/pg_resetxlog\" -f -c %u,%u \"%s\"", - new_cluster.bindir, - old_cluster.controldata.chkpnt_nxtxid, - old_cluster.controldata.chkpnt_nxtxid, - new_cluster.pgdata); - check_ok(); - - /* - * If the old server is before the MULTIXACT_FORMATCHANGE_CAT_VER change - * (see pg_upgrade.h) and the new server is after, then we don't copy - * pg_multixact files, but we need to reset pg_control so that the new - * server doesn't attempt to read multis older than the cutoff value. - */ - if (old_cluster.controldata.cat_ver >= MULTIXACT_FORMATCHANGE_CAT_VER && - new_cluster.controldata.cat_ver >= MULTIXACT_FORMATCHANGE_CAT_VER) - { - copy_subdir_files("pg_multixact/offsets"); - copy_subdir_files("pg_multixact/members"); - - prep_status("Setting next multixact ID and offset for new cluster"); - - /* - * we preserve all files and contents, so we must preserve both "next" - * counters here and the oldest multi present on system. - */ - exec_prog(UTILITY_LOG_FILE, NULL, true, - "\"%s/pg_resetxlog\" -O %u -m %u,%u \"%s\"", - new_cluster.bindir, - old_cluster.controldata.chkpnt_nxtmxoff, - old_cluster.controldata.chkpnt_nxtmulti, - old_cluster.controldata.chkpnt_oldstMulti, - new_cluster.pgdata); - check_ok(); - } - else if (new_cluster.controldata.cat_ver >= MULTIXACT_FORMATCHANGE_CAT_VER) - { - /* - * Remove offsets/0000 file created by initdb that no longer matches - * the new multi-xid value. "members" starts at zero so no need to - * remove it. - */ - remove_new_subdir("pg_multixact/offsets", false); - - prep_status("Setting oldest multixact ID on new cluster"); - - /* - * We don't preserve files in this case, but it's important that the - * oldest multi is set to the latest value used by the old system, so - * that multixact.c returns the empty set for multis that might be - * present on disk. We set next multi to the value following that; it - * might end up wrapped around (i.e. 0) if the old cluster had - * next=MaxMultiXactId, but multixact.c can cope with that just fine. - */ - exec_prog(UTILITY_LOG_FILE, NULL, true, - "\"%s/pg_resetxlog\" -m %u,%u \"%s\"", - new_cluster.bindir, - old_cluster.controldata.chkpnt_nxtmulti + 1, - old_cluster.controldata.chkpnt_nxtmulti, - new_cluster.pgdata); - check_ok(); - } - - /* now reset the wal archives in the new cluster */ - prep_status("Resetting WAL archives"); - exec_prog(UTILITY_LOG_FILE, NULL, true, - "\"%s/pg_resetxlog\" -l %s \"%s\"", new_cluster.bindir, - old_cluster.controldata.nextxlogfile, - new_cluster.pgdata); - check_ok(); -} - - -/* - * set_frozenxids() - * - * We have frozen all xids, so set datfrozenxid, relfrozenxid, and - * relminmxid to be the old cluster's xid counter, which we just set - * in the new cluster. User-table frozenxid and minmxid values will - * be set by pg_dump --binary-upgrade, but objects not set by the pg_dump - * must have proper frozen counters. - */ -static -void -set_frozenxids(bool minmxid_only) -{ - int dbnum; - PGconn *conn, - *conn_template1; - PGresult *dbres; - int ntups; - int i_datname; - int i_datallowconn; - - if (!minmxid_only) - prep_status("Setting frozenxid and minmxid counters in new cluster"); - else - prep_status("Setting minmxid counter in new cluster"); - - conn_template1 = connectToServer(&new_cluster, "template1"); - - if (!minmxid_only) - /* set pg_database.datfrozenxid */ - PQclear(executeQueryOrDie(conn_template1, - "UPDATE pg_catalog.pg_database " - "SET datfrozenxid = '%u'", - old_cluster.controldata.chkpnt_nxtxid)); - - /* set pg_database.datminmxid */ - PQclear(executeQueryOrDie(conn_template1, - "UPDATE pg_catalog.pg_database " - "SET datminmxid = '%u'", - old_cluster.controldata.chkpnt_nxtmulti)); - - /* get database names */ - dbres = executeQueryOrDie(conn_template1, - "SELECT datname, datallowconn " - "FROM pg_catalog.pg_database"); - - i_datname = PQfnumber(dbres, "datname"); - i_datallowconn = PQfnumber(dbres, "datallowconn"); - - ntups = PQntuples(dbres); - for (dbnum = 0; dbnum < ntups; dbnum++) - { - char *datname = PQgetvalue(dbres, dbnum, i_datname); - char *datallowconn = PQgetvalue(dbres, dbnum, i_datallowconn); - - /* - * We must update databases where datallowconn = false, e.g. - * template0, because autovacuum increments their datfrozenxids, - * relfrozenxids, and relminmxid even if autovacuum is turned off, - * and even though all the data rows are already frozen To enable - * this, we temporarily change datallowconn. - */ - if (strcmp(datallowconn, "f") == 0) - PQclear(executeQueryOrDie(conn_template1, - "ALTER DATABASE %s ALLOW_CONNECTIONS = true", - quote_identifier(datname))); - - conn = connectToServer(&new_cluster, datname); - - if (!minmxid_only) - /* set pg_class.relfrozenxid */ - PQclear(executeQueryOrDie(conn, - "UPDATE pg_catalog.pg_class " - "SET relfrozenxid = '%u' " - /* only heap, materialized view, and TOAST are vacuumed */ - "WHERE relkind IN ('r', 'm', 't')", - old_cluster.controldata.chkpnt_nxtxid)); - - /* set pg_class.relminmxid */ - PQclear(executeQueryOrDie(conn, - "UPDATE pg_catalog.pg_class " - "SET relminmxid = '%u' " - /* only heap, materialized view, and TOAST are vacuumed */ - "WHERE relkind IN ('r', 'm', 't')", - old_cluster.controldata.chkpnt_nxtmulti)); - PQfinish(conn); - - /* Reset datallowconn flag */ - if (strcmp(datallowconn, "f") == 0) - PQclear(executeQueryOrDie(conn_template1, - "ALTER DATABASE %s ALLOW_CONNECTIONS = false", - quote_identifier(datname))); - } - - PQclear(dbres); - - PQfinish(conn_template1); - - check_ok(); -} - - -static void -cleanup(void) -{ - fclose(log_opts.internal); - - /* Remove dump and log files? */ - if (!log_opts.retain) - { - int dbnum; - char **filename; - - for (filename = output_files; *filename != NULL; filename++) - unlink(*filename); - - /* remove dump files */ - unlink(GLOBALS_DUMP_FILE); - - if (old_cluster.dbarr.dbs) - for (dbnum = 0; dbnum < old_cluster.dbarr.ndbs; dbnum++) - { - char sql_file_name[MAXPGPATH], - log_file_name[MAXPGPATH]; - DbInfo *old_db = &old_cluster.dbarr.dbs[dbnum]; - - snprintf(sql_file_name, sizeof(sql_file_name), DB_DUMP_FILE_MASK, old_db->db_oid); - unlink(sql_file_name); - - snprintf(log_file_name, sizeof(log_file_name), DB_DUMP_LOG_FILE_MASK, old_db->db_oid); - unlink(log_file_name); - } - } -} diff --git a/contrib/pg_upgrade/pg_upgrade.h b/contrib/pg_upgrade/pg_upgrade.h deleted file mode 100644 index ace3465f989..00000000000 --- a/contrib/pg_upgrade/pg_upgrade.h +++ /dev/null @@ -1,481 +0,0 @@ -/* - * pg_upgrade.h - * - * Copyright (c) 2010-2015, PostgreSQL Global Development Group - * contrib/pg_upgrade/pg_upgrade.h - */ - -#include -#include -#include -#include - -#include "libpq-fe.h" - -/* Use port in the private/dynamic port number range */ -#define DEF_PGUPORT 50432 - -/* Allocate for null byte */ -#define USER_NAME_SIZE 128 - -#define MAX_STRING 1024 -#define LINE_ALLOC 4096 -#define QUERY_ALLOC 8192 - -#define MIGRATOR_API_VERSION 1 - -#define MESSAGE_WIDTH 60 - -#define GET_MAJOR_VERSION(v) ((v) / 100) - -/* contains both global db information and CREATE DATABASE commands */ -#define GLOBALS_DUMP_FILE "pg_upgrade_dump_globals.sql" -#define DB_DUMP_FILE_MASK "pg_upgrade_dump_%u.custom" - -#define DB_DUMP_LOG_FILE_MASK "pg_upgrade_dump_%u.log" -#define SERVER_LOG_FILE "pg_upgrade_server.log" -#define UTILITY_LOG_FILE "pg_upgrade_utility.log" -#define INTERNAL_LOG_FILE "pg_upgrade_internal.log" - -extern char *output_files[]; - -/* - * WIN32 files do not accept writes from multiple processes - * - * On Win32, we can't send both pg_upgrade output and command output to the - * same file because we get the error: "The process cannot access the file - * because it is being used by another process." so send the pg_ctl - * command-line output to a new file, rather than into the server log file. - * Ideally we could use UTILITY_LOG_FILE for this, but some Windows platforms - * keep the pg_ctl output file open by the running postmaster, even after - * pg_ctl exits. - * - * We could use the Windows pgwin32_open() flags to allow shared file - * writes but is unclear how all other tools would use those flags, so - * we just avoid it and log a little differently on Windows; we adjust - * the error message appropriately. - */ -#ifndef WIN32 -#define SERVER_START_LOG_FILE SERVER_LOG_FILE -#define SERVER_STOP_LOG_FILE SERVER_LOG_FILE -#else -#define SERVER_START_LOG_FILE "pg_upgrade_server_start.log" -/* - * "pg_ctl start" keeps SERVER_START_LOG_FILE and SERVER_LOG_FILE open - * while the server is running, so we use UTILITY_LOG_FILE for "pg_ctl - * stop". - */ -#define SERVER_STOP_LOG_FILE UTILITY_LOG_FILE -#endif - - -#ifndef WIN32 -#define pg_copy_file copy_file -#define pg_mv_file rename -#define pg_link_file link -#define PATH_SEPARATOR '/' -#define RM_CMD "rm -f" -#define RMDIR_CMD "rm -rf" -#define SCRIPT_PREFIX "./" -#define SCRIPT_EXT "sh" -#define ECHO_QUOTE "'" -#define ECHO_BLANK "" -#else -#define pg_copy_file CopyFile -#define pg_mv_file pgrename -#define pg_link_file win32_pghardlink -#define PATH_SEPARATOR '\\' -#define RM_CMD "DEL /q" -#define RMDIR_CMD "RMDIR /s/q" -#define SCRIPT_PREFIX "" -#define SCRIPT_EXT "bat" -#define EXE_EXT ".exe" -#define ECHO_QUOTE "" -#define ECHO_BLANK "." -#endif - -#define CLUSTER_NAME(cluster) ((cluster) == &old_cluster ? "old" : \ - (cluster) == &new_cluster ? "new" : "none") - -#define atooid(x) ((Oid) strtoul((x), NULL, 10)) - -/* OID system catalog preservation added during PG 9.0 development */ -#define TABLE_SPACE_SUBDIRS_CAT_VER 201001111 -/* postmaster/postgres -b (binary_upgrade) flag added during PG 9.1 development */ -#define BINARY_UPGRADE_SERVER_FLAG_CAT_VER 201104251 -/* - * Visibility map changed with this 9.2 commit, - * 8f9fe6edce358f7904e0db119416b4d1080a83aa; pick later catalog version. - */ -#define VISIBILITY_MAP_CRASHSAFE_CAT_VER 201107031 - -/* - * pg_multixact format changed in 9.3 commit 0ac5ad5134f2769ccbaefec73844f85, - * ("Improve concurrency of foreign key locking") which also updated catalog - * version to this value. pg_upgrade behavior depends on whether old and new - * server versions are both newer than this, or only the new one is. - */ -#define MULTIXACT_FORMATCHANGE_CAT_VER 201301231 - -/* - * large object chunk size added to pg_controldata, - * commit 5f93c37805e7485488480916b4585e098d3cc883 - */ -#define LARGE_OBJECT_SIZE_PG_CONTROL_VER 942 - -/* - * change in JSONB format during 9.4 beta - */ -#define JSONB_FORMAT_CHANGE_CAT_VER 201409291 - -/* - * Each relation is represented by a relinfo structure. - */ -typedef struct -{ - /* Can't use NAMEDATALEN; not guaranteed to fit on client */ - char *nspname; /* namespace name */ - char *relname; /* relation name */ - Oid reloid; /* relation oid */ - Oid relfilenode; /* relation relfile node */ - /* relation tablespace path, or "" for the cluster default */ - char *tablespace; - bool nsp_alloc; - bool tblsp_alloc; -} RelInfo; - -typedef struct -{ - RelInfo *rels; - int nrels; -} RelInfoArr; - -/* - * The following structure represents a relation mapping. - */ -typedef struct -{ - const char *old_tablespace; - const char *new_tablespace; - const char *old_tablespace_suffix; - const char *new_tablespace_suffix; - Oid old_db_oid; - Oid new_db_oid; - - /* - * old/new relfilenodes might differ for pg_largeobject(_metadata) indexes - * due to VACUUM FULL or REINDEX. Other relfilenodes are preserved. - */ - Oid old_relfilenode; - Oid new_relfilenode; - /* the rest are used only for logging and error reporting */ - char *nspname; /* namespaces */ - char *relname; -} FileNameMap; - -/* - * Structure to store database information - */ -typedef struct -{ - Oid db_oid; /* oid of the database */ - char *db_name; /* database name */ - char db_tablespace[MAXPGPATH]; /* database default tablespace - * path */ - char *db_collate; - char *db_ctype; - int db_encoding; - RelInfoArr rel_arr; /* array of all user relinfos */ -} DbInfo; - -typedef struct -{ - DbInfo *dbs; /* array of db infos */ - int ndbs; /* number of db infos */ -} DbInfoArr; - -/* - * The following structure is used to hold pg_control information. - * Rather than using the backend's control structure we use our own - * structure to avoid pg_control version issues between releases. - */ -typedef struct -{ - uint32 ctrl_ver; - uint32 cat_ver; - char nextxlogfile[25]; - uint32 chkpnt_tli; - uint32 chkpnt_nxtxid; - uint32 chkpnt_nxtepoch; - uint32 chkpnt_nxtoid; - uint32 chkpnt_nxtmulti; - uint32 chkpnt_nxtmxoff; - uint32 chkpnt_oldstMulti; - uint32 align; - uint32 blocksz; - uint32 largesz; - uint32 walsz; - uint32 walseg; - uint32 ident; - uint32 index; - uint32 toast; - uint32 large_object; - bool date_is_int; - bool float8_pass_by_value; - bool data_checksum_version; -} ControlData; - -/* - * Enumeration to denote link modes - */ -typedef enum -{ - TRANSFER_MODE_COPY, - TRANSFER_MODE_LINK -} transferMode; - -/* - * Enumeration to denote pg_log modes - */ -typedef enum -{ - PG_VERBOSE, - PG_STATUS, - PG_REPORT, - PG_WARNING, - PG_FATAL -} eLogType; - - -typedef long pgpid_t; - - -/* - * cluster - * - * information about each cluster - */ -typedef struct -{ - ControlData controldata; /* pg_control information */ - DbInfoArr dbarr; /* dbinfos array */ - char *pgdata; /* pathname for cluster's $PGDATA directory */ - char *pgconfig; /* pathname for cluster's config file - * directory */ - char *bindir; /* pathname for cluster's executable directory */ - char *pgopts; /* options to pass to the server, like pg_ctl - * -o */ - char *sockdir; /* directory for Unix Domain socket, if any */ - unsigned short port; /* port number where postmaster is waiting */ - uint32 major_version; /* PG_VERSION of cluster */ - char major_version_str[64]; /* string PG_VERSION of cluster */ - uint32 bin_version; /* version returned from pg_ctl */ - Oid pg_database_oid; /* OID of pg_database relation */ - const char *tablespace_suffix; /* directory specification */ -} ClusterInfo; - - -/* - * LogOpts -*/ -typedef struct -{ - FILE *internal; /* internal log FILE */ - bool verbose; /* TRUE -> be verbose in messages */ - bool retain; /* retain log files on success */ -} LogOpts; - - -/* - * UserOpts -*/ -typedef struct -{ - bool check; /* TRUE -> ask user for permission to make - * changes */ - transferMode transfer_mode; /* copy files or link them? */ - int jobs; -} UserOpts; - - -/* - * OSInfo - */ -typedef struct -{ - const char *progname; /* complete pathname for this program */ - char *exec_path; /* full path to my executable */ - char *user; /* username for clusters */ - bool user_specified; /* user specified on command-line */ - char **old_tablespaces; /* tablespaces */ - int num_old_tablespaces; - char **libraries; /* loadable libraries */ - int num_libraries; - ClusterInfo *running_cluster; -} OSInfo; - - -/* - * Global variables - */ -extern LogOpts log_opts; -extern UserOpts user_opts; -extern ClusterInfo old_cluster, - new_cluster; -extern OSInfo os_info; - - -/* check.c */ - -void output_check_banner(bool live_check); -void check_and_dump_old_cluster(bool live_check); -void check_new_cluster(void); -void report_clusters_compatible(void); -void issue_warnings(void); -void output_completion_banner(char *analyze_script_file_name, - char *deletion_script_file_name); -void check_cluster_versions(void); -void check_cluster_compatibility(bool live_check); -void create_script_for_old_cluster_deletion(char **deletion_script_file_name); -void create_script_for_cluster_analyze(char **analyze_script_file_name); - - -/* controldata.c */ - -void get_control_data(ClusterInfo *cluster, bool live_check); -void check_control_data(ControlData *oldctrl, ControlData *newctrl); -void disable_old_cluster(void); - - -/* dump.c */ - -void generate_old_dump(void); -void optionally_create_toast_tables(void); - - -/* exec.c */ - -#define EXEC_PSQL_ARGS "--echo-queries --set ON_ERROR_STOP=on --no-psqlrc --dbname=template1" - -bool exec_prog(const char *log_file, const char *opt_log_file, - bool throw_error, const char *fmt,...) pg_attribute_printf(4, 5); -void verify_directories(void); -bool pid_lock_file_exists(const char *datadir); - - -/* file.c */ - -#ifdef PAGE_CONVERSION -typedef const char *(*pluginStartup) (uint16 migratorVersion, - uint16 *pluginVersion, uint16 newPageVersion, - uint16 oldPageVersion, void **pluginData); -typedef const char *(*pluginConvertFile) (void *pluginData, - const char *dstName, const char *srcName); -typedef const char *(*pluginConvertPage) (void *pluginData, - const char *dstPage, const char *srcPage); -typedef const char *(*pluginShutdown) (void *pluginData); - -typedef struct -{ - uint16 oldPageVersion; /* Page layout version of the old cluster */ - uint16 newPageVersion; /* Page layout version of the new cluster */ - uint16 pluginVersion; /* API version of converter plugin */ - void *pluginData; /* Plugin data (set by plugin) */ - pluginStartup startup; /* Pointer to plugin's startup function */ - pluginConvertFile convertFile; /* Pointer to plugin's file converter - * function */ - pluginConvertPage convertPage; /* Pointer to plugin's page converter - * function */ - pluginShutdown shutdown; /* Pointer to plugin's shutdown function */ -} pageCnvCtx; - -const pageCnvCtx *setupPageConverter(void); -#else -/* dummy */ -typedef void *pageCnvCtx; -#endif - -const char *copyAndUpdateFile(pageCnvCtx *pageConverter, const char *src, - const char *dst, bool force); -const char *linkAndUpdateFile(pageCnvCtx *pageConverter, const char *src, - const char *dst); - -void check_hard_link(void); -FILE *fopen_priv(const char *path, const char *mode); - -/* function.c */ - -void get_loadable_libraries(void); -void check_loadable_libraries(void); - -/* info.c */ - -FileNameMap *gen_db_file_maps(DbInfo *old_db, - DbInfo *new_db, int *nmaps, const char *old_pgdata, - const char *new_pgdata); -void get_db_and_rel_infos(ClusterInfo *cluster); -void print_maps(FileNameMap *maps, int n, - const char *db_name); - -/* option.c */ - -void parseCommandLine(int argc, char *argv[]); -void adjust_data_dir(ClusterInfo *cluster); -void get_sock_dir(ClusterInfo *cluster, bool live_check); - -/* relfilenode.c */ - -void get_pg_database_relfilenode(ClusterInfo *cluster); -void transfer_all_new_tablespaces(DbInfoArr *old_db_arr, - DbInfoArr *new_db_arr, char *old_pgdata, char *new_pgdata); -void transfer_all_new_dbs(DbInfoArr *old_db_arr, - DbInfoArr *new_db_arr, char *old_pgdata, char *new_pgdata, - char *old_tablespace); - -/* tablespace.c */ - -void init_tablespaces(void); - - -/* server.c */ - -PGconn *connectToServer(ClusterInfo *cluster, const char *db_name); -PGresult *executeQueryOrDie(PGconn *conn, const char *fmt,...) pg_attribute_printf(2, 3); - -char *cluster_conn_opts(ClusterInfo *cluster); - -bool start_postmaster(ClusterInfo *cluster, bool throw_error); -void stop_postmaster(bool fast); -uint32 get_major_server_version(ClusterInfo *cluster); -void check_pghost_envvar(void); - - -/* util.c */ - -char *quote_identifier(const char *s); -int get_user_info(char **user_name_p); -void check_ok(void); -void report_status(eLogType type, const char *fmt,...) pg_attribute_printf(2, 3); -void pg_log(eLogType type, const char *fmt,...) pg_attribute_printf(2, 3); -void pg_fatal(const char *fmt,...) pg_attribute_printf(1, 2) pg_attribute_noreturn(); -void end_progress_output(void); -void prep_status(const char *fmt,...) pg_attribute_printf(1, 2); -void check_ok(void); -const char *getErrorText(int errNum); -unsigned int str2uint(const char *str); -void pg_putenv(const char *var, const char *val); - - -/* version.c */ - -void new_9_0_populate_pg_largeobject_metadata(ClusterInfo *cluster, - bool check_mode); -void old_9_3_check_for_line_data_type_usage(ClusterInfo *cluster); - -/* parallel.c */ -void parallel_exec_prog(const char *log_file, const char *opt_log_file, - const char *fmt,...) pg_attribute_printf(3, 4); -void parallel_transfer_all_new_dbs(DbInfoArr *old_db_arr, DbInfoArr *new_db_arr, - char *old_pgdata, char *new_pgdata, - char *old_tablespace); -bool reap_child(bool wait_for_child); diff --git a/contrib/pg_upgrade/relfilenode.c b/contrib/pg_upgrade/relfilenode.c deleted file mode 100644 index 423802bd239..00000000000 --- a/contrib/pg_upgrade/relfilenode.c +++ /dev/null @@ -1,294 +0,0 @@ -/* - * relfilenode.c - * - * relfilenode functions - * - * Copyright (c) 2010-2015, PostgreSQL Global Development Group - * contrib/pg_upgrade/relfilenode.c - */ - -#include "postgres_fe.h" - -#include "pg_upgrade.h" - -#include "catalog/pg_class.h" -#include "access/transam.h" - - -static void transfer_single_new_db(pageCnvCtx *pageConverter, - FileNameMap *maps, int size, char *old_tablespace); -static void transfer_relfile(pageCnvCtx *pageConverter, FileNameMap *map, - const char *suffix); - - -/* - * transfer_all_new_tablespaces() - * - * Responsible for upgrading all database. invokes routines to generate mappings and then - * physically link the databases. - */ -void -transfer_all_new_tablespaces(DbInfoArr *old_db_arr, DbInfoArr *new_db_arr, - char *old_pgdata, char *new_pgdata) -{ - pg_log(PG_REPORT, "%s user relation files\n", - user_opts.transfer_mode == TRANSFER_MODE_LINK ? "Linking" : "Copying"); - - /* - * Transfering files by tablespace is tricky because a single database can - * use multiple tablespaces. For non-parallel mode, we just pass a NULL - * tablespace path, which matches all tablespaces. In parallel mode, we - * pass the default tablespace and all user-created tablespaces and let - * those operations happen in parallel. - */ - if (user_opts.jobs <= 1) - parallel_transfer_all_new_dbs(old_db_arr, new_db_arr, old_pgdata, - new_pgdata, NULL); - else - { - int tblnum; - - /* transfer default tablespace */ - parallel_transfer_all_new_dbs(old_db_arr, new_db_arr, old_pgdata, - new_pgdata, old_pgdata); - - for (tblnum = 0; tblnum < os_info.num_old_tablespaces; tblnum++) - parallel_transfer_all_new_dbs(old_db_arr, - new_db_arr, - old_pgdata, - new_pgdata, - os_info.old_tablespaces[tblnum]); - /* reap all children */ - while (reap_child(true) == true) - ; - } - - end_progress_output(); - check_ok(); - - return; -} - - -/* - * transfer_all_new_dbs() - * - * Responsible for upgrading all database. invokes routines to generate mappings and then - * physically link the databases. - */ -void -transfer_all_new_dbs(DbInfoArr *old_db_arr, DbInfoArr *new_db_arr, - char *old_pgdata, char *new_pgdata, char *old_tablespace) -{ - int old_dbnum, - new_dbnum; - - /* Scan the old cluster databases and transfer their files */ - for (old_dbnum = new_dbnum = 0; - old_dbnum < old_db_arr->ndbs; - old_dbnum++, new_dbnum++) - { - DbInfo *old_db = &old_db_arr->dbs[old_dbnum], - *new_db = NULL; - FileNameMap *mappings; - int n_maps; - pageCnvCtx *pageConverter = NULL; - - /* - * Advance past any databases that exist in the new cluster but not in - * the old, e.g. "postgres". (The user might have removed the - * 'postgres' database from the old cluster.) - */ - for (; new_dbnum < new_db_arr->ndbs; new_dbnum++) - { - new_db = &new_db_arr->dbs[new_dbnum]; - if (strcmp(old_db->db_name, new_db->db_name) == 0) - break; - } - - if (new_dbnum >= new_db_arr->ndbs) - pg_fatal("old database \"%s\" not found in the new cluster\n", - old_db->db_name); - - mappings = gen_db_file_maps(old_db, new_db, &n_maps, old_pgdata, - new_pgdata); - if (n_maps) - { - print_maps(mappings, n_maps, new_db->db_name); - -#ifdef PAGE_CONVERSION - pageConverter = setupPageConverter(); -#endif - transfer_single_new_db(pageConverter, mappings, n_maps, - old_tablespace); - } - /* We allocate something even for n_maps == 0 */ - pg_free(mappings); - } - - return; -} - - -/* - * get_pg_database_relfilenode() - * - * Retrieves the relfilenode for a few system-catalog tables. We need these - * relfilenodes later in the upgrade process. - */ -void -get_pg_database_relfilenode(ClusterInfo *cluster) -{ - PGconn *conn = connectToServer(cluster, "template1"); - PGresult *res; - int i_relfile; - - res = executeQueryOrDie(conn, - "SELECT c.relname, c.relfilenode " - "FROM pg_catalog.pg_class c, " - " pg_catalog.pg_namespace n " - "WHERE c.relnamespace = n.oid AND " - " n.nspname = 'pg_catalog' AND " - " c.relname = 'pg_database' " - "ORDER BY c.relname"); - - i_relfile = PQfnumber(res, "relfilenode"); - cluster->pg_database_oid = atooid(PQgetvalue(res, 0, i_relfile)); - - PQclear(res); - PQfinish(conn); -} - - -/* - * transfer_single_new_db() - * - * create links for mappings stored in "maps" array. - */ -static void -transfer_single_new_db(pageCnvCtx *pageConverter, - FileNameMap *maps, int size, char *old_tablespace) -{ - int mapnum; - bool vm_crashsafe_match = true; - - /* - * Do the old and new cluster disagree on the crash-safetiness of the vm - * files? If so, do not copy them. - */ - if (old_cluster.controldata.cat_ver < VISIBILITY_MAP_CRASHSAFE_CAT_VER && - new_cluster.controldata.cat_ver >= VISIBILITY_MAP_CRASHSAFE_CAT_VER) - vm_crashsafe_match = false; - - for (mapnum = 0; mapnum < size; mapnum++) - { - if (old_tablespace == NULL || - strcmp(maps[mapnum].old_tablespace, old_tablespace) == 0) - { - /* transfer primary file */ - transfer_relfile(pageConverter, &maps[mapnum], ""); - - /* fsm/vm files added in PG 8.4 */ - if (GET_MAJOR_VERSION(old_cluster.major_version) >= 804) - { - /* - * Copy/link any fsm and vm files, if they exist - */ - transfer_relfile(pageConverter, &maps[mapnum], "_fsm"); - if (vm_crashsafe_match) - transfer_relfile(pageConverter, &maps[mapnum], "_vm"); - } - } - } -} - - -/* - * transfer_relfile() - * - * Copy or link file from old cluster to new one. - */ -static void -transfer_relfile(pageCnvCtx *pageConverter, FileNameMap *map, - const char *type_suffix) -{ - const char *msg; - char old_file[MAXPGPATH]; - char new_file[MAXPGPATH]; - int fd; - int segno; - char extent_suffix[65]; - - /* - * Now copy/link any related segments as well. Remember, PG breaks large - * files into 1GB segments, the first segment has no extension, subsequent - * segments are named relfilenode.1, relfilenode.2, relfilenode.3. copied. - */ - for (segno = 0;; segno++) - { - if (segno == 0) - extent_suffix[0] = '\0'; - else - snprintf(extent_suffix, sizeof(extent_suffix), ".%d", segno); - - snprintf(old_file, sizeof(old_file), "%s%s/%u/%u%s%s", - map->old_tablespace, - map->old_tablespace_suffix, - map->old_db_oid, - map->old_relfilenode, - type_suffix, - extent_suffix); - snprintf(new_file, sizeof(new_file), "%s%s/%u/%u%s%s", - map->new_tablespace, - map->new_tablespace_suffix, - map->new_db_oid, - map->new_relfilenode, - type_suffix, - extent_suffix); - - /* Is it an extent, fsm, or vm file? */ - if (type_suffix[0] != '\0' || segno != 0) - { - /* Did file open fail? */ - if ((fd = open(old_file, O_RDONLY, 0)) == -1) - { - /* File does not exist? That's OK, just return */ - if (errno == ENOENT) - return; - else - pg_fatal("error while checking for file existence \"%s.%s\" (\"%s\" to \"%s\"): %s\n", - map->nspname, map->relname, old_file, new_file, - getErrorText(errno)); - } - close(fd); - } - - unlink(new_file); - - /* Copying files might take some time, so give feedback. */ - pg_log(PG_STATUS, "%s", old_file); - - if ((user_opts.transfer_mode == TRANSFER_MODE_LINK) && (pageConverter != NULL)) - pg_fatal("This upgrade requires page-by-page conversion, " - "you must use copy mode instead of link mode.\n"); - - if (user_opts.transfer_mode == TRANSFER_MODE_COPY) - { - pg_log(PG_VERBOSE, "copying \"%s\" to \"%s\"\n", old_file, new_file); - - if ((msg = copyAndUpdateFile(pageConverter, old_file, new_file, true)) != NULL) - pg_fatal("error while copying relation \"%s.%s\" (\"%s\" to \"%s\"): %s\n", - map->nspname, map->relname, old_file, new_file, msg); - } - else - { - pg_log(PG_VERBOSE, "linking \"%s\" to \"%s\"\n", old_file, new_file); - - if ((msg = linkAndUpdateFile(pageConverter, old_file, new_file)) != NULL) - pg_fatal("error while creating link for relation \"%s.%s\" (\"%s\" to \"%s\"): %s\n", - map->nspname, map->relname, old_file, new_file, msg); - } - } - - return; -} diff --git a/contrib/pg_upgrade/server.c b/contrib/pg_upgrade/server.c deleted file mode 100644 index c5f66f09632..00000000000 --- a/contrib/pg_upgrade/server.c +++ /dev/null @@ -1,350 +0,0 @@ -/* - * server.c - * - * database server functions - * - * Copyright (c) 2010-2015, PostgreSQL Global Development Group - * contrib/pg_upgrade/server.c - */ - -#include "postgres_fe.h" - -#include "pg_upgrade.h" - - -static PGconn *get_db_conn(ClusterInfo *cluster, const char *db_name); - - -/* - * connectToServer() - * - * Connects to the desired database on the designated server. - * If the connection attempt fails, this function logs an error - * message and calls exit() to kill the program. - */ -PGconn * -connectToServer(ClusterInfo *cluster, const char *db_name) -{ - PGconn *conn = get_db_conn(cluster, db_name); - - if (conn == NULL || PQstatus(conn) != CONNECTION_OK) - { - pg_log(PG_REPORT, "connection to database failed: %s\n", - PQerrorMessage(conn)); - - if (conn) - PQfinish(conn); - - printf("Failure, exiting\n"); - exit(1); - } - - return conn; -} - - -/* - * get_db_conn() - * - * get database connection, using named database + standard params for cluster - */ -static PGconn * -get_db_conn(ClusterInfo *cluster, const char *db_name) -{ - char conn_opts[2 * NAMEDATALEN + MAXPGPATH + 100]; - - if (cluster->sockdir) - snprintf(conn_opts, sizeof(conn_opts), - "dbname = '%s' user = '%s' host = '%s' port = %d", - db_name, os_info.user, cluster->sockdir, cluster->port); - else - snprintf(conn_opts, sizeof(conn_opts), - "dbname = '%s' user = '%s' port = %d", - db_name, os_info.user, cluster->port); - - return PQconnectdb(conn_opts); -} - - -/* - * cluster_conn_opts() - * - * Return standard command-line options for connecting to this cluster when - * using psql, pg_dump, etc. Ideally this would match what get_db_conn() - * sets, but the utilities we need aren't very consistent about the treatment - * of database name options, so we leave that out. - * - * Note result is in static storage, so use it right away. - */ -char * -cluster_conn_opts(ClusterInfo *cluster) -{ - static char conn_opts[MAXPGPATH + NAMEDATALEN + 100]; - - if (cluster->sockdir) - snprintf(conn_opts, sizeof(conn_opts), - "--host \"%s\" --port %d --username \"%s\"", - cluster->sockdir, cluster->port, os_info.user); - else - snprintf(conn_opts, sizeof(conn_opts), - "--port %d --username \"%s\"", - cluster->port, os_info.user); - - return conn_opts; -} - - -/* - * executeQueryOrDie() - * - * Formats a query string from the given arguments and executes the - * resulting query. If the query fails, this function logs an error - * message and calls exit() to kill the program. - */ -PGresult * -executeQueryOrDie(PGconn *conn, const char *fmt,...) -{ - static char query[QUERY_ALLOC]; - va_list args; - PGresult *result; - ExecStatusType status; - - va_start(args, fmt); - vsnprintf(query, sizeof(query), fmt, args); - va_end(args); - - pg_log(PG_VERBOSE, "executing: %s\n", query); - result = PQexec(conn, query); - status = PQresultStatus(result); - - if ((status != PGRES_TUPLES_OK) && (status != PGRES_COMMAND_OK)) - { - pg_log(PG_REPORT, "SQL command failed\n%s\n%s\n", query, - PQerrorMessage(conn)); - PQclear(result); - PQfinish(conn); - printf("Failure, exiting\n"); - exit(1); - } - else - return result; -} - - -/* - * get_major_server_version() - * - * gets the version (in unsigned int form) for the given datadir. Assumes - * that datadir is an absolute path to a valid pgdata directory. The version - * is retrieved by reading the PG_VERSION file. - */ -uint32 -get_major_server_version(ClusterInfo *cluster) -{ - FILE *version_fd; - char ver_filename[MAXPGPATH]; - int integer_version = 0; - int fractional_version = 0; - - snprintf(ver_filename, sizeof(ver_filename), "%s/PG_VERSION", - cluster->pgdata); - if ((version_fd = fopen(ver_filename, "r")) == NULL) - pg_fatal("could not open version file: %s\n", ver_filename); - - if (fscanf(version_fd, "%63s", cluster->major_version_str) == 0 || - sscanf(cluster->major_version_str, "%d.%d", &integer_version, - &fractional_version) != 2) - pg_fatal("could not get version from %s\n", cluster->pgdata); - - fclose(version_fd); - - return (100 * integer_version + fractional_version) * 100; -} - - -static void -stop_postmaster_atexit(void) -{ - stop_postmaster(true); -} - - -bool -start_postmaster(ClusterInfo *cluster, bool throw_error) -{ - char cmd[MAXPGPATH * 4 + 1000]; - PGconn *conn; - bool exit_hook_registered = false; - bool pg_ctl_return = false; - char socket_string[MAXPGPATH + 200]; - - if (!exit_hook_registered) - { - atexit(stop_postmaster_atexit); - exit_hook_registered = true; - } - - socket_string[0] = '\0'; - -#ifdef HAVE_UNIX_SOCKETS - /* prevent TCP/IP connections, restrict socket access */ - strcat(socket_string, - " -c listen_addresses='' -c unix_socket_permissions=0700"); - - /* Have a sockdir? Tell the postmaster. */ - if (cluster->sockdir) - snprintf(socket_string + strlen(socket_string), - sizeof(socket_string) - strlen(socket_string), - " -c %s='%s'", - (GET_MAJOR_VERSION(cluster->major_version) < 903) ? - "unix_socket_directory" : "unix_socket_directories", - cluster->sockdir); -#endif - - /* - * Since PG 9.1, we have used -b to disable autovacuum. For earlier - * releases, setting autovacuum=off disables cleanup vacuum and analyze, - * but freeze vacuums can still happen, so we set autovacuum_freeze_max_age - * to its maximum. (autovacuum_multixact_freeze_max_age was introduced - * after 9.1, so there is no need to set that.) We assume all datfrozenxid - * and relfrozenxid values are less than a gap of 2000000000 from the current - * xid counter, so autovacuum will not touch them. - * - * Turn off durability requirements to improve object creation speed, and - * we only modify the new cluster, so only use it there. If there is a - * crash, the new cluster has to be recreated anyway. fsync=off is a big - * win on ext4. - */ - snprintf(cmd, sizeof(cmd), - "\"%s/pg_ctl\" -w -l \"%s\" -D \"%s\" -o \"-p %d%s%s %s%s\" start", - cluster->bindir, SERVER_LOG_FILE, cluster->pgconfig, cluster->port, - (cluster->controldata.cat_ver >= - BINARY_UPGRADE_SERVER_FLAG_CAT_VER) ? " -b" : - " -c autovacuum=off -c autovacuum_freeze_max_age=2000000000", - (cluster == &new_cluster) ? - " -c synchronous_commit=off -c fsync=off -c full_page_writes=off" : "", - cluster->pgopts ? cluster->pgopts : "", socket_string); - - /* - * Don't throw an error right away, let connecting throw the error because - * it might supply a reason for the failure. - */ - pg_ctl_return = exec_prog(SERVER_START_LOG_FILE, - /* pass both file names if they differ */ - (strcmp(SERVER_LOG_FILE, - SERVER_START_LOG_FILE) != 0) ? - SERVER_LOG_FILE : NULL, - false, - "%s", cmd); - - /* Did it fail and we are just testing if the server could be started? */ - if (!pg_ctl_return && !throw_error) - return false; - - /* - * We set this here to make sure atexit() shuts down the server, but only - * if we started the server successfully. We do it before checking for - * connectivity in case the server started but there is a connectivity - * failure. If pg_ctl did not return success, we will exit below. - * - * Pre-9.1 servers do not have PQping(), so we could be leaving the server - * running if authentication was misconfigured, so someday we might went - * to be more aggressive about doing server shutdowns even if pg_ctl - * fails, but now (2013-08-14) it seems prudent to be cautious. We don't - * want to shutdown a server that might have been accidentally started - * during the upgrade. - */ - if (pg_ctl_return) - os_info.running_cluster = cluster; - - /* - * pg_ctl -w might have failed because the server couldn't be started, or - * there might have been a connection problem in _checking_ if the server - * has started. Therefore, even if pg_ctl failed, we continue and test - * for connectivity in case we get a connection reason for the failure. - */ - if ((conn = get_db_conn(cluster, "template1")) == NULL || - PQstatus(conn) != CONNECTION_OK) - { - pg_log(PG_REPORT, "\nconnection to database failed: %s\n", - PQerrorMessage(conn)); - if (conn) - PQfinish(conn); - pg_fatal("could not connect to %s postmaster started with the command:\n" - "%s\n", - CLUSTER_NAME(cluster), cmd); - } - PQfinish(conn); - - /* - * If pg_ctl failed, and the connection didn't fail, and throw_error is - * enabled, fail now. This could happen if the server was already - * running. - */ - if (!pg_ctl_return) - pg_fatal("pg_ctl failed to start the %s server, or connection failed\n", - CLUSTER_NAME(cluster)); - - return true; -} - - -void -stop_postmaster(bool fast) -{ - ClusterInfo *cluster; - - if (os_info.running_cluster == &old_cluster) - cluster = &old_cluster; - else if (os_info.running_cluster == &new_cluster) - cluster = &new_cluster; - else - return; /* no cluster running */ - - exec_prog(SERVER_STOP_LOG_FILE, NULL, !fast, - "\"%s/pg_ctl\" -w -D \"%s\" -o \"%s\" %s stop", - cluster->bindir, cluster->pgconfig, - cluster->pgopts ? cluster->pgopts : "", - fast ? "-m fast" : ""); - - os_info.running_cluster = NULL; -} - - -/* - * check_pghost_envvar() - * - * Tests that PGHOST does not point to a non-local server - */ -void -check_pghost_envvar(void) -{ - PQconninfoOption *option; - PQconninfoOption *start; - - /* Get valid libpq env vars from the PQconndefaults function */ - - start = PQconndefaults(); - - if (!start) - pg_fatal("out of memory\n"); - - for (option = start; option->keyword != NULL; option++) - { - if (option->envvar && (strcmp(option->envvar, "PGHOST") == 0 || - strcmp(option->envvar, "PGHOSTADDR") == 0)) - { - const char *value = getenv(option->envvar); - - if (value && strlen(value) > 0 && - /* check for 'local' host values */ - (strcmp(value, "localhost") != 0 && strcmp(value, "127.0.0.1") != 0 && - strcmp(value, "::1") != 0 && value[0] != '/')) - pg_fatal("libpq environment variable %s has a non-local server value: %s\n", - option->envvar, value); - } - } - - /* Free the memory that libpq allocated on our behalf */ - PQconninfoFree(start); -} diff --git a/contrib/pg_upgrade/tablespace.c b/contrib/pg_upgrade/tablespace.c deleted file mode 100644 index eecdf4b2983..00000000000 --- a/contrib/pg_upgrade/tablespace.c +++ /dev/null @@ -1,124 +0,0 @@ -/* - * tablespace.c - * - * tablespace functions - * - * Copyright (c) 2010-2015, PostgreSQL Global Development Group - * contrib/pg_upgrade/tablespace.c - */ - -#include "postgres_fe.h" - -#include "pg_upgrade.h" - -#include - -static void get_tablespace_paths(void); -static void set_tablespace_directory_suffix(ClusterInfo *cluster); - - -void -init_tablespaces(void) -{ - get_tablespace_paths(); - - set_tablespace_directory_suffix(&old_cluster); - set_tablespace_directory_suffix(&new_cluster); - - if (os_info.num_old_tablespaces > 0 && - strcmp(old_cluster.tablespace_suffix, new_cluster.tablespace_suffix) == 0) - pg_fatal("Cannot upgrade to/from the same system catalog version when\n" - "using tablespaces.\n"); -} - - -/* - * get_tablespace_paths() - * - * Scans pg_tablespace and returns a malloc'ed array of all tablespace - * paths. Its the caller's responsibility to free the array. - */ -static void -get_tablespace_paths(void) -{ - PGconn *conn = connectToServer(&old_cluster, "template1"); - PGresult *res; - int tblnum; - int i_spclocation; - char query[QUERY_ALLOC]; - - snprintf(query, sizeof(query), - "SELECT %s " - "FROM pg_catalog.pg_tablespace " - "WHERE spcname != 'pg_default' AND " - " spcname != 'pg_global'", - /* 9.2 removed the spclocation column */ - (GET_MAJOR_VERSION(old_cluster.major_version) <= 901) ? - "spclocation" : "pg_catalog.pg_tablespace_location(oid) AS spclocation"); - - res = executeQueryOrDie(conn, "%s", query); - - if ((os_info.num_old_tablespaces = PQntuples(res)) != 0) - os_info.old_tablespaces = (char **) pg_malloc( - os_info.num_old_tablespaces * sizeof(char *)); - else - os_info.old_tablespaces = NULL; - - i_spclocation = PQfnumber(res, "spclocation"); - - for (tblnum = 0; tblnum < os_info.num_old_tablespaces; tblnum++) - { - struct stat statBuf; - - os_info.old_tablespaces[tblnum] = pg_strdup( - PQgetvalue(res, tblnum, i_spclocation)); - - /* - * Check that the tablespace path exists and is a directory. - * Effectively, this is checking only for tables/indexes in - * non-existent tablespace directories. Databases located in - * non-existent tablespaces already throw a backend error. - * Non-existent tablespace directories can occur when a data directory - * that contains user tablespaces is moved as part of pg_upgrade - * preparation and the symbolic links are not updated. - */ - if (stat(os_info.old_tablespaces[tblnum], &statBuf) != 0) - { - if (errno == ENOENT) - report_status(PG_FATAL, - "tablespace directory \"%s\" does not exist\n", - os_info.old_tablespaces[tblnum]); - else - report_status(PG_FATAL, - "cannot stat() tablespace directory \"%s\": %s\n", - os_info.old_tablespaces[tblnum], getErrorText(errno)); - } - if (!S_ISDIR(statBuf.st_mode)) - report_status(PG_FATAL, - "tablespace path \"%s\" is not a directory\n", - os_info.old_tablespaces[tblnum]); - } - - PQclear(res); - - PQfinish(conn); - - return; -} - - -static void -set_tablespace_directory_suffix(ClusterInfo *cluster) -{ - if (GET_MAJOR_VERSION(cluster->major_version) <= 804) - cluster->tablespace_suffix = pg_strdup(""); - else - { - /* This cluster has a version-specific subdirectory */ - - /* The leading slash is needed to start a new directory. */ - cluster->tablespace_suffix = psprintf("/PG_%s_%d", - cluster->major_version_str, - cluster->controldata.cat_ver); - } -} diff --git a/contrib/pg_upgrade/test.sh b/contrib/pg_upgrade/test.sh deleted file mode 100644 index 2e9f97688c6..00000000000 --- a/contrib/pg_upgrade/test.sh +++ /dev/null @@ -1,224 +0,0 @@ -#!/bin/sh - -# contrib/pg_upgrade/test.sh -# -# Test driver for pg_upgrade. Initializes a new database cluster, -# runs the regression tests (to put in some data), runs pg_dumpall, -# runs pg_upgrade, runs pg_dumpall again, compares the dumps. -# -# Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group -# Portions Copyright (c) 1994, Regents of the University of California - -set -e - -: ${MAKE=make} - -# Guard against parallel make issues (see comments in pg_regress.c) -unset MAKEFLAGS -unset MAKELEVEL - -# Run a given "initdb" binary and overlay the regression testing -# authentication configuration. -standard_initdb() { - "$1" -N - ../../src/test/regress/pg_regress --config-auth "$PGDATA" -} - -# Establish how the server will listen for connections -testhost=`uname -s` - -case $testhost in - MINGW*) - LISTEN_ADDRESSES="localhost" - PGHOST=localhost - ;; - *) - LISTEN_ADDRESSES="" - # Select a socket directory. The algorithm is from the "configure" - # script; the outcome mimics pg_regress.c:make_temp_sockdir(). - PGHOST=$PG_REGRESS_SOCK_DIR - if [ "x$PGHOST" = x ]; then - { - dir=`(umask 077 && - mktemp -d /tmp/pg_upgrade_check-XXXXXX) 2>/dev/null` && - [ -d "$dir" ] - } || - { - dir=/tmp/pg_upgrade_check-$$-$RANDOM - (umask 077 && mkdir "$dir") - } || - { - echo "could not create socket temporary directory in \"/tmp\"" - exit 1 - } - - PGHOST=$dir - trap 'rm -rf "$PGHOST"' 0 - trap 'exit 3' 1 2 13 15 - fi - ;; -esac - -POSTMASTER_OPTS="-F -c listen_addresses=$LISTEN_ADDRESSES -k \"$PGHOST\"" -export PGHOST - -temp_root=$PWD/tmp_check - -if [ "$1" = '--install' ]; then - temp_install=$temp_root/install - bindir=$temp_install/$bindir - libdir=$temp_install/$libdir - - "$MAKE" -s -C ../.. install DESTDIR="$temp_install" - "$MAKE" -s -C . install DESTDIR="$temp_install" - - # platform-specific magic to find the shared libraries; see pg_regress.c - LD_LIBRARY_PATH=$libdir:$LD_LIBRARY_PATH - export LD_LIBRARY_PATH - DYLD_LIBRARY_PATH=$libdir:$DYLD_LIBRARY_PATH - export DYLD_LIBRARY_PATH - LIBPATH=$libdir:$LIBPATH - export LIBPATH - PATH=$libdir:$PATH - - # We need to make it use psql from our temporary installation, - # because otherwise the installcheck run below would try to - # use psql from the proper installation directory, which might - # be outdated or missing. But don't override anything else that's - # already in EXTRA_REGRESS_OPTS. - EXTRA_REGRESS_OPTS="$EXTRA_REGRESS_OPTS --psqldir='$bindir'" - export EXTRA_REGRESS_OPTS -fi - -: ${oldbindir=$bindir} - -: ${oldsrc=../..} -oldsrc=`cd "$oldsrc" && pwd` -newsrc=`cd ../.. && pwd` - -PATH=$bindir:$PATH -export PATH - -BASE_PGDATA=$temp_root/data -PGDATA="$BASE_PGDATA.old" -export PGDATA -rm -rf "$BASE_PGDATA" "$PGDATA" - -logdir=$PWD/log -rm -rf "$logdir" -mkdir "$logdir" - -# Clear out any environment vars that might cause libpq to connect to -# the wrong postmaster (cf pg_regress.c) -# -# Some shells, such as NetBSD's, return non-zero from unset if the variable -# is already unset. Since we are operating under 'set -e', this causes the -# script to fail. To guard against this, set them all to an empty string first. -PGDATABASE=""; unset PGDATABASE -PGUSER=""; unset PGUSER -PGSERVICE=""; unset PGSERVICE -PGSSLMODE=""; unset PGSSLMODE -PGREQUIRESSL=""; unset PGREQUIRESSL -PGCONNECT_TIMEOUT=""; unset PGCONNECT_TIMEOUT -PGHOSTADDR=""; unset PGHOSTADDR - -# Select a non-conflicting port number, similarly to pg_regress.c -PG_VERSION_NUM=`grep '#define PG_VERSION_NUM' "$newsrc"/src/include/pg_config.h | awk '{print $3}'` -PGPORT=`expr $PG_VERSION_NUM % 16384 + 49152` -export PGPORT - -i=0 -while psql -X postgres /dev/null -do - i=`expr $i + 1` - if [ $i -eq 16 ] - then - echo port $PGPORT apparently in use - exit 1 - fi - PGPORT=`expr $PGPORT + 1` - export PGPORT -done - -# buildfarm may try to override port via EXTRA_REGRESS_OPTS ... -EXTRA_REGRESS_OPTS="$EXTRA_REGRESS_OPTS --port=$PGPORT" -export EXTRA_REGRESS_OPTS - -# enable echo so the user can see what is being executed -set -x - -standard_initdb "$oldbindir"/initdb -"$oldbindir"/pg_ctl start -l "$logdir/postmaster1.log" -o "$POSTMASTER_OPTS" -w -if "$MAKE" -C "$oldsrc" installcheck; then - pg_dumpall -f "$temp_root"/dump1.sql || pg_dumpall1_status=$? - if [ "$newsrc" != "$oldsrc" ]; then - oldpgversion=`psql -A -t -d regression -c "SHOW server_version_num"` - fix_sql="" - case $oldpgversion in - 804??) - fix_sql="UPDATE pg_proc SET probin = replace(probin::text, '$oldsrc', '$newsrc')::bytea WHERE probin LIKE '$oldsrc%'; DROP FUNCTION public.myfunc(integer);" - ;; - 900??) - fix_sql="SET bytea_output TO escape; UPDATE pg_proc SET probin = replace(probin::text, '$oldsrc', '$newsrc')::bytea WHERE probin LIKE '$oldsrc%';" - ;; - 901??) - fix_sql="UPDATE pg_proc SET probin = replace(probin, '$oldsrc', '$newsrc') WHERE probin LIKE '$oldsrc%';" - ;; - esac - psql -d regression -c "$fix_sql;" || psql_fix_sql_status=$? - - mv "$temp_root"/dump1.sql "$temp_root"/dump1.sql.orig - sed "s;$oldsrc;$newsrc;g" "$temp_root"/dump1.sql.orig >"$temp_root"/dump1.sql - fi -else - make_installcheck_status=$? -fi -"$oldbindir"/pg_ctl -m fast stop -if [ -n "$make_installcheck_status" ]; then - exit 1 -fi -if [ -n "$psql_fix_sql_status" ]; then - exit 1 -fi -if [ -n "$pg_dumpall1_status" ]; then - echo "pg_dumpall of pre-upgrade database cluster failed" - exit 1 -fi - -PGDATA=$BASE_PGDATA - -standard_initdb 'initdb' - -pg_upgrade $PG_UPGRADE_OPTS -d "${PGDATA}.old" -D "${PGDATA}" -b "$oldbindir" -B "$bindir" -p "$PGPORT" -P "$PGPORT" - -pg_ctl start -l "$logdir/postmaster2.log" -o "$POSTMASTER_OPTS" -w - -case $testhost in - MINGW*) cmd /c analyze_new_cluster.bat ;; - *) sh ./analyze_new_cluster.sh ;; -esac - -pg_dumpall -f "$temp_root"/dump2.sql || pg_dumpall2_status=$? -pg_ctl -m fast stop - -# no need to echo commands anymore -set +x -echo - -if [ -n "$pg_dumpall2_status" ]; then - echo "pg_dumpall of post-upgrade database cluster failed" - exit 1 -fi - -case $testhost in - MINGW*) cmd /c delete_old_cluster.bat ;; - *) sh ./delete_old_cluster.sh ;; -esac - -if diff -q "$temp_root"/dump1.sql "$temp_root"/dump2.sql; then - echo PASSED - exit 0 -else - echo "dumps were not identical" - exit 1 -fi diff --git a/contrib/pg_upgrade/util.c b/contrib/pg_upgrade/util.c deleted file mode 100644 index 6184ceef933..00000000000 --- a/contrib/pg_upgrade/util.c +++ /dev/null @@ -1,298 +0,0 @@ -/* - * util.c - * - * utility functions - * - * Copyright (c) 2010-2015, PostgreSQL Global Development Group - * contrib/pg_upgrade/util.c - */ - -#include "postgres_fe.h" - -#include "common/username.h" -#include "pg_upgrade.h" - -#include - - -LogOpts log_opts; - -static void pg_log_v(eLogType type, const char *fmt, va_list ap) pg_attribute_printf(2, 0); - - -/* - * report_status() - * - * Displays the result of an operation (ok, failed, error message,...) - */ -void -report_status(eLogType type, const char *fmt,...) -{ - va_list args; - char message[MAX_STRING]; - - va_start(args, fmt); - vsnprintf(message, sizeof(message), fmt, args); - va_end(args); - - pg_log(type, "%s\n", message); -} - - -/* force blank output for progress display */ -void -end_progress_output(void) -{ - /* - * In case nothing printed; pass a space so gcc doesn't complain about - * empty format string. - */ - prep_status(" "); -} - - -/* - * prep_status - * - * Displays a message that describes an operation we are about to begin. - * We pad the message out to MESSAGE_WIDTH characters so that all of the "ok" and - * "failed" indicators line up nicely. - * - * A typical sequence would look like this: - * prep_status("about to flarb the next %d files", fileCount ); - * - * if(( message = flarbFiles(fileCount)) == NULL) - * report_status(PG_REPORT, "ok" ); - * else - * pg_log(PG_FATAL, "failed - %s\n", message ); - */ -void -prep_status(const char *fmt,...) -{ - va_list args; - char message[MAX_STRING]; - - va_start(args, fmt); - vsnprintf(message, sizeof(message), fmt, args); - va_end(args); - - if (strlen(message) > 0 && message[strlen(message) - 1] == '\n') - pg_log(PG_REPORT, "%s", message); - else - /* trim strings that don't end in a newline */ - pg_log(PG_REPORT, "%-*s", MESSAGE_WIDTH, message); -} - - -static void -pg_log_v(eLogType type, const char *fmt, va_list ap) -{ - char message[QUERY_ALLOC]; - - vsnprintf(message, sizeof(message), fmt, ap); - - /* PG_VERBOSE and PG_STATUS are only output in verbose mode */ - /* fopen() on log_opts.internal might have failed, so check it */ - if (((type != PG_VERBOSE && type != PG_STATUS) || log_opts.verbose) && - log_opts.internal != NULL) - { - if (type == PG_STATUS) - /* status messages need two leading spaces and a newline */ - fprintf(log_opts.internal, " %s\n", message); - else - fprintf(log_opts.internal, "%s", message); - fflush(log_opts.internal); - } - - switch (type) - { - case PG_VERBOSE: - if (log_opts.verbose) - printf("%s", _(message)); - break; - - case PG_STATUS: - /* for output to a display, do leading truncation and append \r */ - if (isatty(fileno(stdout))) - /* -2 because we use a 2-space indent */ - printf(" %s%-*.*s\r", - /* prefix with "..." if we do leading truncation */ - strlen(message) <= MESSAGE_WIDTH - 2 ? "" : "...", - MESSAGE_WIDTH - 2, MESSAGE_WIDTH - 2, - /* optional leading truncation */ - strlen(message) <= MESSAGE_WIDTH - 2 ? message : - message + strlen(message) - MESSAGE_WIDTH + 3 + 2); - else - printf(" %s\n", _(message)); - break; - - case PG_REPORT: - case PG_WARNING: - printf("%s", _(message)); - break; - - case PG_FATAL: - printf("\n%s", _(message)); - printf("Failure, exiting\n"); - exit(1); - break; - - default: - break; - } - fflush(stdout); -} - - -void -pg_log(eLogType type, const char *fmt,...) -{ - va_list args; - - va_start(args, fmt); - pg_log_v(type, fmt, args); - va_end(args); -} - - -void -pg_fatal(const char *fmt,...) -{ - va_list args; - - va_start(args, fmt); - pg_log_v(PG_FATAL, fmt, args); - va_end(args); - printf("Failure, exiting\n"); - exit(1); -} - - -void -check_ok(void) -{ - /* all seems well */ - report_status(PG_REPORT, "ok"); - fflush(stdout); -} - - -/* - * quote_identifier() - * Properly double-quote a SQL identifier. - * - * The result should be pg_free'd, but most callers don't bother because - * memory leakage is not a big deal in this program. - */ -char * -quote_identifier(const char *s) -{ - char *result = pg_malloc(strlen(s) * 2 + 3); - char *r = result; - - *r++ = '"'; - while (*s) - { - if (*s == '"') - *r++ = *s; - *r++ = *s; - s++; - } - *r++ = '"'; - *r++ = '\0'; - - return result; -} - - -/* - * get_user_info() - */ -int -get_user_info(char **user_name_p) -{ - int user_id; - const char *user_name; - char *errstr; - -#ifndef WIN32 - user_id = geteuid(); -#else - user_id = 1; -#endif - - user_name = get_user_name(&errstr); - if (!user_name) - pg_fatal("%s\n", errstr); - - /* make a copy */ - *user_name_p = pg_strdup(user_name); - - return user_id; -} - - -/* - * getErrorText() - * - * Returns the text of the error message for the given error number - * - * This feature is factored into a separate function because it is - * system-dependent. - */ -const char * -getErrorText(int errNum) -{ -#ifdef WIN32 - _dosmaperr(GetLastError()); -#endif - return pg_strdup(strerror(errNum)); -} - - -/* - * str2uint() - * - * convert string to oid - */ -unsigned int -str2uint(const char *str) -{ - return strtoul(str, NULL, 10); -} - - -/* - * pg_putenv() - * - * This is like putenv(), but takes two arguments. - * It also does unsetenv() if val is NULL. - */ -void -pg_putenv(const char *var, const char *val) -{ - if (val) - { -#ifndef WIN32 - char *envstr; - - envstr = psprintf("%s=%s", var, val); - putenv(envstr); - - /* - * Do not free envstr because it becomes part of the environment on - * some operating systems. See port/unsetenv.c::unsetenv. - */ -#else - SetEnvironmentVariableA(var, val); -#endif - } - else - { -#ifndef WIN32 - unsetenv(var); -#else - SetEnvironmentVariableA(var, ""); -#endif - } -} diff --git a/contrib/pg_upgrade/version.c b/contrib/pg_upgrade/version.c deleted file mode 100644 index 4ae9511d045..00000000000 --- a/contrib/pg_upgrade/version.c +++ /dev/null @@ -1,178 +0,0 @@ -/* - * version.c - * - * Postgres-version-specific routines - * - * Copyright (c) 2010-2015, PostgreSQL Global Development Group - * contrib/pg_upgrade/version.c - */ - -#include "postgres_fe.h" - -#include "pg_upgrade.h" - - - -/* - * new_9_0_populate_pg_largeobject_metadata() - * new >= 9.0, old <= 8.4 - * 9.0 has a new pg_largeobject permission table - */ -void -new_9_0_populate_pg_largeobject_metadata(ClusterInfo *cluster, bool check_mode) -{ - int dbnum; - FILE *script = NULL; - bool found = false; - char output_path[MAXPGPATH]; - - prep_status("Checking for large objects"); - - snprintf(output_path, sizeof(output_path), "pg_largeobject.sql"); - - for (dbnum = 0; dbnum < cluster->dbarr.ndbs; dbnum++) - { - PGresult *res; - int i_count; - DbInfo *active_db = &cluster->dbarr.dbs[dbnum]; - PGconn *conn = connectToServer(cluster, active_db->db_name); - - /* find if there are any large objects */ - res = executeQueryOrDie(conn, - "SELECT count(*) " - "FROM pg_catalog.pg_largeobject "); - - i_count = PQfnumber(res, "count"); - if (atoi(PQgetvalue(res, 0, i_count)) != 0) - { - found = true; - if (!check_mode) - { - if (script == NULL && (script = fopen_priv(output_path, "w")) == NULL) - pg_fatal("could not open file \"%s\": %s\n", output_path, getErrorText(errno)); - fprintf(script, "\\connect %s\n", - quote_identifier(active_db->db_name)); - fprintf(script, - "SELECT pg_catalog.lo_create(t.loid)\n" - "FROM (SELECT DISTINCT loid FROM pg_catalog.pg_largeobject) AS t;\n"); - } - } - - PQclear(res); - PQfinish(conn); - } - - if (script) - fclose(script); - - if (found) - { - report_status(PG_WARNING, "warning"); - if (check_mode) - pg_log(PG_WARNING, "\n" - "Your installation contains large objects. The new database has an\n" - "additional large object permission table. After upgrading, you will be\n" - "given a command to populate the pg_largeobject permission table with\n" - "default permissions.\n\n"); - else - pg_log(PG_WARNING, "\n" - "Your installation contains large objects. The new database has an\n" - "additional large object permission table, so default permissions must be\n" - "defined for all large objects. The file\n" - " %s\n" - "when executed by psql by the database superuser will set the default\n" - "permissions.\n\n", - output_path); - } - else - check_ok(); -} - - -/* - * old_9_3_check_for_line_data_type_usage() - * 9.3 -> 9.4 - * Fully implement the 'line' data type in 9.4, which previously returned - * "not enabled" by default and was only functionally enabled with a - * compile-time switch; 9.4 "line" has different binary and text - * representation formats; checks tables and indexes. - */ -void -old_9_3_check_for_line_data_type_usage(ClusterInfo *cluster) -{ - int dbnum; - FILE *script = NULL; - bool found = false; - char output_path[MAXPGPATH]; - - prep_status("Checking for invalid \"line\" user columns"); - - snprintf(output_path, sizeof(output_path), "tables_using_line.txt"); - - for (dbnum = 0; dbnum < cluster->dbarr.ndbs; dbnum++) - { - PGresult *res; - bool db_used = false; - int ntups; - int rowno; - int i_nspname, - i_relname, - i_attname; - DbInfo *active_db = &cluster->dbarr.dbs[dbnum]; - PGconn *conn = connectToServer(cluster, active_db->db_name); - - res = executeQueryOrDie(conn, - "SELECT n.nspname, c.relname, a.attname " - "FROM pg_catalog.pg_class c, " - " pg_catalog.pg_namespace n, " - " pg_catalog.pg_attribute a " - "WHERE c.oid = a.attrelid AND " - " NOT a.attisdropped AND " - " a.atttypid = 'pg_catalog.line'::pg_catalog.regtype AND " - " c.relnamespace = n.oid AND " - /* exclude possible orphaned temp tables */ - " n.nspname !~ '^pg_temp_' AND " - " n.nspname !~ '^pg_toast_temp_' AND " - " n.nspname NOT IN ('pg_catalog', 'information_schema')"); - - ntups = PQntuples(res); - i_nspname = PQfnumber(res, "nspname"); - i_relname = PQfnumber(res, "relname"); - i_attname = PQfnumber(res, "attname"); - for (rowno = 0; rowno < ntups; rowno++) - { - found = true; - if (script == NULL && (script = fopen_priv(output_path, "w")) == NULL) - pg_fatal("could not open file \"%s\": %s\n", output_path, getErrorText(errno)); - if (!db_used) - { - fprintf(script, "Database: %s\n", active_db->db_name); - db_used = true; - } - fprintf(script, " %s.%s.%s\n", - PQgetvalue(res, rowno, i_nspname), - PQgetvalue(res, rowno, i_relname), - PQgetvalue(res, rowno, i_attname)); - } - - PQclear(res); - - PQfinish(conn); - } - - if (script) - fclose(script); - - if (found) - { - pg_log(PG_REPORT, "fatal\n"); - pg_fatal("Your installation contains the \"line\" data type in user tables. This\n" - "data type changed its internal and input/output format between your old\n" - "and new clusters so this cluster cannot currently be upgraded. You can\n" - "remove the problem tables and restart the upgrade. A list of the problem\n" - "columns is in the file:\n" - " %s\n\n", output_path); - } - else - check_ok(); -} diff --git a/doc/src/sgml/contrib.sgml b/doc/src/sgml/contrib.sgml index 57730955bfa..adc21843db2 100644 --- a/doc/src/sgml/contrib.sgml +++ b/doc/src/sgml/contrib.sgml @@ -204,7 +204,6 @@ pages. &pgstandby; &pgtestfsync; &pgtesttiming; - &pgupgrade; &pgxlogdump; diff --git a/doc/src/sgml/filelist.sgml b/doc/src/sgml/filelist.sgml index ab935a6664f..2d7514c3ea1 100644 --- a/doc/src/sgml/filelist.sgml +++ b/doc/src/sgml/filelist.sgml @@ -136,7 +136,6 @@ - diff --git a/doc/src/sgml/pgupgrade.sgml b/doc/src/sgml/pgupgrade.sgml deleted file mode 100644 index 45bceff9bea..00000000000 --- a/doc/src/sgml/pgupgrade.sgml +++ /dev/null @@ -1,723 +0,0 @@ - - - - - pg_upgrade - - - - pg_upgrade - 1 - Application - - - - pg_upgrade - upgrade a PostgreSQL server instance - - - - - pg_upgrade - - oldbindir - - newbindir - - olddatadir - - newdatadir - option - - - - - Description - - - pg_upgrade (formerly called pg_migrator) allows data - stored in PostgreSQL data files to be upgraded to a later PostgreSQL - major version without the data dump/reload typically required for - major version upgrades, e.g. from 8.4.7 to the current major release - of PostgreSQL. It is not required for minor version upgrades, e.g. from - 9.0.1 to 9.0.4. - - - - Major PostgreSQL releases regularly add new features that often - change the layout of the system tables, but the internal data storage - format rarely changes. pg_upgrade uses this fact - to perform rapid upgrades by creating new system tables and simply - reusing the old user data files. If a future major release ever - changes the data storage format in a way that makes the old data - format unreadable, pg_upgrade will not be usable - for such upgrades. (The community will attempt to avoid such - situations.) - - - - pg_upgrade does its best to - make sure the old and new clusters are binary-compatible, e.g. by - checking for compatible compile-time settings, including 32/64-bit - binaries. It is important that - any external modules are also binary compatible, though this cannot - be checked by pg_upgrade. - - - - pg_upgrade supports upgrades from 8.4.X and later to the current - major release of PostgreSQL, including snapshot and alpha releases. - - - - - Options - - - pg_upgrade accepts the following command-line arguments: - - - - - bindir - bindir - the old PostgreSQL executable directory; - environment variable PGBINOLD - - - - bindir - bindir - the new PostgreSQL executable directory; - environment variable PGBINNEW - - - - - - check clusters only, don't change any data - - - - datadir - datadir - the old cluster data directory; environment - variable PGDATAOLD - - - - datadir - datadir - the new cluster data directory; environment - variable PGDATANEW - - - - - - number of simultaneous processes or threads to use - - - - - - - use hard links instead of copying files to the new - cluster (use junction points on Windows) - - - - options - options - options to be passed directly to the - old postgres command; multiple - option invocations are appended - - - - options - options - options to be passed directly to the - new postgres command; multiple - option invocations are appended - - - - port - port - the old cluster port number; environment - variable PGPORTOLD - - - - port - port - the new cluster port number; environment - variable PGPORTNEW - - - - - - retain SQL and log files even after successful completion - - - - - username - username - cluster's install user name; environment - variable PGUSER - - - - - - enable verbose internal logging - - - - - - display version information, then exit - - - - - - show help, then exit - - - - - - - - - Usage - - - These are the steps to perform an upgrade - with pg_upgrade: - - - - - Optionally move the old cluster - - - If you are using a version-specific installation directory, e.g. - /opt/PostgreSQL/9.1, you do not need to move the old cluster. The - graphical installers all use version-specific installation directories. - - - - If your installation directory is not version-specific, e.g. - /usr/local/pgsql, it is necessary to move the current PostgreSQL install - directory so it does not interfere with the new PostgreSQL installation. - Once the current PostgreSQL server is shut down, it is safe to rename the - PostgreSQL installation directory; assuming the old directory is - /usr/local/pgsql, you can do: - - -mv /usr/local/pgsql /usr/local/pgsql.old - - to rename the directory. - - - - - For source installs, build the new version - - - Build the new PostgreSQL source with configure flags that are compatible - with the old cluster. pg_upgrade will check pg_controldata to make - sure all settings are compatible before starting the upgrade. - - - - - Install the new PostgreSQL binaries - - - Install the new server's binaries and support files. - - - - For source installs, if you wish to install the new server in a custom - location, use the prefix variable: - - -make prefix=/usr/local/pgsql.new install - - - - - Install pg_upgrade - - - Install the pg_upgrade binary in the new PostgreSQL - installation. - - - - - Initialize the new PostgreSQL cluster - - - Initialize the new cluster using initdb. - Again, use compatible initdb - flags that match the old cluster. Many - prebuilt installers do this step automatically. There is no need to - start the new cluster. - - - - - Install custom shared object files - - - Install any custom shared object files (or DLLs) used by the old cluster - into the new cluster, e.g. pgcrypto.so, - whether they are from contrib - or some other source. Do not install the schema definitions, e.g. - pgcrypto.sql, because these will be upgraded from the old cluster. - - - - - Adjust authentication - - - pg_upgrade will connect to the old and new servers several - times, so you might want to set authentication to peer - in pg_hba.conf or use a ~/.pgpass file - (see ). - - - - - Stop both servers - - - Make sure both database servers are stopped using, on Unix, e.g.: - - -pg_ctl -D /opt/PostgreSQL/8.4 stop -pg_ctl -D /opt/PostgreSQL/9.0 stop - - - or on Windows, using the proper service names: - - -NET STOP postgresql-8.4 -NET STOP postgresql-9.0 - - - - - Streaming replication and log-shipping standby servers can remain running until - a later step. - - - - - Run <application>pg_upgrade</> - - - Always run the pg_upgrade binary of the new server, not the old one. - pg_upgrade requires the specification of the old and new cluster's - data and executable (bin) directories. You can also specify - user and port values, and whether you want the data linked instead of - copied (the default). - - - - If you use link mode, the upgrade will be much faster (no file - copying) and use less disk space, but you will not be able to access - your old cluster - once you start the new cluster after the upgrade. Link mode also - requires that the old and new cluster data directories be in the - same file system. (Tablespaces and pg_xlog can be on - different file systems.) See pg_upgrade --help for a full - list of options. - - - - The - - - For Windows users, you must be logged into an administrative account, and - then start a shell as the postgres user and set the proper path: - - -RUNAS /USER:postgres "CMD.EXE" -SET PATH=%PATH%;C:\Program Files\PostgreSQL\9.0\bin; - - - and then run pg_upgrade with quoted directories, e.g.: - - -pg_upgrade.exe - --old-datadir "C:/Program Files/PostgreSQL/8.4/data" - --new-datadir "C:/Program Files/PostgreSQL/9.0/data" - --old-bindir "C:/Program Files/PostgreSQL/8.4/bin" - --new-bindir "C:/Program Files/PostgreSQL/9.0/bin" - - - Once started, pg_upgrade will verify the two clusters are compatible - and then do the upgrade. You can use pg_upgrade --check - to perform only the checks, even if the old server is still - running. pg_upgrade --check will also outline any - manual adjustments you will need to make after the upgrade. If you - are going to be using link mode, you should use the to enable link-mode-specific checks. - pg_upgrade requires write permission in the current directory. - - - - Obviously, no one should be accessing the clusters during the - upgrade. pg_upgrade defaults to running servers - on port 50432 to avoid unintended client connections. - You can use the same port number for both clusters when doing an - upgrade because the old and new clusters will not be running at the - same time. However, when checking an old running server, the old - and new port numbers must be different. - - - - If an error occurs while restoring the database schema, pg_upgrade will - exit and you will have to revert to the old cluster as outlined in - below. To try pg_upgrade again, you will need to modify the old - cluster so the pg_upgrade schema restore succeeds. If the problem is a - contrib module, you might need to uninstall the contrib module from - the old cluster and install it in the new cluster after the upgrade, - assuming the module is not being used to store user data. - - - - - Upgrade Streaming Replication and Log-Shipping standby - servers - - - If you have Streaming Replication () or Log-Shipping () standby servers, follow these steps to - upgrade them (before starting any servers): - - - - - - Install the new PostgreSQL binaries on standby servers - - - Make sure the new binaries and support files are installed on all - standby servers. - - - - - Make sure the new standby data directories do <emphasis>not</> - exist - - - Make sure the new standby data directories do not - exist or are empty. If initdb was run, delete - the standby server data directories. - - - - - Install custom shared object files - - - Install the same custom shared object files on the new standbys - that you installed in the new master cluster. - - - - - Stop standby servers - - - If the standby servers are still running, stop them now using the - above instructions. - - - - - Verify standby servers - - - To prevent old standby servers from being modified, run - pg_controldata against the primary and standby - clusters and verify that the Latest checkpoint location - values match in all clusters. (This requires the standbys to be - shut down after the primary.) - - - - - Save configuration files - - - Save any configuration files from the standbys you need to keep, - e.g. postgresql.conf, recovery.conf, - as these will be overwritten or removed in the next step. - - - - - Start and stop the new master cluster - - - In the new master cluster, change wal_level to - hot_standby in the postgresql.conf file - and then start and stop the cluster. - - - - - Run <application>rsync</> - - - From a directory that is above the old and new database cluster - directories, run this for each slave: - - - rsync --archive --delete --hard-links --size-only old_pgdata new_pgdata remote_dir - - - where - - - If you have tablespaces, you will need to run a similar - rsync command for each tablespace directory. If you - have relocated pg_xlog outside the data directories, - rsync must be run on those directories too. - - - - - Configure streaming replication and log-shipping standby - servers - - - Configure the servers for log shipping. (You do not need to run - pg_start_backup() and pg_stop_backup() - or take a file system backup as the slaves are still synchronized - with the master.) - - - - - - - - - Restore <filename>pg_hba.conf</> - - - If you modified pg_hba.conf, restore its original settings. - It might also be necessary to adjust other configuration files in the new - cluster to match the old cluster, e.g. postgresql.conf. - - - - - Start the new server - - - The new server can now be safely started, and then any - rsync'ed standby servers. - - - - - Post-Upgrade processing - - - If any post-upgrade processing is required, pg_upgrade will issue - warnings as it completes. It will also generate script files that must - be run by the administrator. The script files will connect to each - database that needs post-upgrade processing. Each script should be - run using: - - -psql --username postgres --file script.sql postgres - - - The scripts can be run in any order and can be deleted once they have - been run. - - - - - In general it is unsafe to access tables referenced in rebuild scripts - until the rebuild scripts have run to completion; doing so could yield - incorrect results or poor performance. Tables not referenced in rebuild - scripts can be accessed immediately. - - - - - - Statistics - - - Because optimizer statistics are not transferred by pg_upgrade, you will - be instructed to run a command to regenerate that information at the end - of the upgrade. You might need to set connection parameters to - match your new cluster. - - - - - Delete old cluster - - - Once you are satisfied with the upgrade, you can delete the old - cluster's data directories by running the script mentioned when - pg_upgrade completes. (Automatic deletion is not - possible if you have user-defined tablespaces inside the old data - directory.) You can also delete the old installation directories - (e.g. bin, share). - - - - - Reverting to old cluster - - - If, after running pg_upgrade, you wish to revert to the old cluster, - there are several options: - - - - - If you ran pg_upgrade - with - - - - - If you ran pg_upgrade - with - - - - - If you ran pg_upgrade without - - - - - - - - - - - Notes - - - pg_upgrade does not support upgrading of databases - containing these reg* OID-referencing system data types: - regproc, regprocedure, regoper, - regoperator, regconfig, and - regdictionary. (regtype can be upgraded.) - - - - All failure, rebuild, and reindex cases will be reported by - pg_upgrade if they affect your installation; - post-upgrade scripts to rebuild tables and indexes will be - generated automatically. If you are trying to automate the upgrade - of many clusters, you should find that clusters with identical database - schemas require the same post-upgrade steps for all cluster upgrades; - this is because the post-upgrade steps are based on the database - schemas, and not user data. - - - - For deployment testing, create a schema-only copy of the old cluster, - insert dummy data, and upgrade that. - - - - If you are upgrading a pre-PostgreSQL 9.2 cluster - that uses a configuration-file-only directory, you must pass the - real data directory location to pg_upgrade, and - pass the configuration directory location to the server, e.g. - -d /real-data-directory -o '-D /configuration-directory'. - - - - If using a pre-9.1 old server that is using a non-default Unix-domain - socket directory or a default that differs from the default of the - new cluster, set PGHOST to point to the old server's socket - location. (This is not relevant on Windows.) - - - - If you want to use link mode and you do not want your old cluster - to be modified when the new cluster is started, make a copy of the - old cluster and upgrade that in link mode. To make a valid copy - of the old cluster, use rsync to create a dirty - copy of the old cluster while the server is running, then shut down - the old server and run rsync --checksum again to update the - copy with any changes to make it consistent. ( - - - - - See Also - - - - - - - - - diff --git a/doc/src/sgml/ref/allfiles.sgml b/doc/src/sgml/ref/allfiles.sgml index 9ae6aecb1a1..211a3c42bd5 100644 --- a/doc/src/sgml/ref/allfiles.sgml +++ b/doc/src/sgml/ref/allfiles.sgml @@ -193,6 +193,7 @@ Complete list of usable sgml source files in this directory. + diff --git a/doc/src/sgml/ref/pgupgrade.sgml b/doc/src/sgml/ref/pgupgrade.sgml new file mode 100644 index 00000000000..ce5e3082b5d --- /dev/null +++ b/doc/src/sgml/ref/pgupgrade.sgml @@ -0,0 +1,715 @@ + + + + + pg_upgrade + + + + pg_upgrade + 1 + Application + + + + pg_upgrade + upgrade a PostgreSQL server instance + + + + + pg_upgrade + + oldbindir + + newbindir + + olddatadir + + newdatadir + option + + + + + Description + + + pg_upgrade (formerly called pg_migrator) allows data + stored in PostgreSQL data files to be upgraded to a later PostgreSQL + major version without the data dump/reload typically required for + major version upgrades, e.g. from 8.4.7 to the current major release + of PostgreSQL. It is not required for minor version upgrades, e.g. from + 9.0.1 to 9.0.4. + + + + Major PostgreSQL releases regularly add new features that often + change the layout of the system tables, but the internal data storage + format rarely changes. pg_upgrade uses this fact + to perform rapid upgrades by creating new system tables and simply + reusing the old user data files. If a future major release ever + changes the data storage format in a way that makes the old data + format unreadable, pg_upgrade will not be usable + for such upgrades. (The community will attempt to avoid such + situations.) + + + + pg_upgrade does its best to + make sure the old and new clusters are binary-compatible, e.g. by + checking for compatible compile-time settings, including 32/64-bit + binaries. It is important that + any external modules are also binary compatible, though this cannot + be checked by pg_upgrade. + + + + pg_upgrade supports upgrades from 8.4.X and later to the current + major release of PostgreSQL, including snapshot and alpha releases. + + + + + Options + + + pg_upgrade accepts the following command-line arguments: + + + + + bindir + bindir + the old PostgreSQL executable directory; + environment variable PGBINOLD + + + + bindir + bindir + the new PostgreSQL executable directory; + environment variable PGBINNEW + + + + + + check clusters only, don't change any data + + + + datadir + datadir + the old cluster data directory; environment + variable PGDATAOLD + + + + datadir + datadir + the new cluster data directory; environment + variable PGDATANEW + + + + + + number of simultaneous processes or threads to use + + + + + + + use hard links instead of copying files to the new + cluster (use junction points on Windows) + + + + options + options + options to be passed directly to the + old postgres command; multiple + option invocations are appended + + + + options + options + options to be passed directly to the + new postgres command; multiple + option invocations are appended + + + + port + port + the old cluster port number; environment + variable PGPORTOLD + + + + port + port + the new cluster port number; environment + variable PGPORTNEW + + + + + + retain SQL and log files even after successful completion + + + + + username + username + cluster's install user name; environment + variable PGUSER + + + + + + enable verbose internal logging + + + + + + display version information, then exit + + + + + + show help, then exit + + + + + + + + + Usage + + + These are the steps to perform an upgrade + with pg_upgrade: + + + + + Optionally move the old cluster + + + If you are using a version-specific installation directory, e.g. + /opt/PostgreSQL/9.1, you do not need to move the old cluster. The + graphical installers all use version-specific installation directories. + + + + If your installation directory is not version-specific, e.g. + /usr/local/pgsql, it is necessary to move the current PostgreSQL install + directory so it does not interfere with the new PostgreSQL installation. + Once the current PostgreSQL server is shut down, it is safe to rename the + PostgreSQL installation directory; assuming the old directory is + /usr/local/pgsql, you can do: + + +mv /usr/local/pgsql /usr/local/pgsql.old + + to rename the directory. + + + + + For source installs, build the new version + + + Build the new PostgreSQL source with configure flags that are compatible + with the old cluster. pg_upgrade will check pg_controldata to make + sure all settings are compatible before starting the upgrade. + + + + + Install the new PostgreSQL binaries + + + Install the new server's binaries and support + files. pg_upgrade is included in a default installation. + + + + For source installs, if you wish to install the new server in a custom + location, use the prefix variable: + + +make prefix=/usr/local/pgsql.new install + + + + + Initialize the new PostgreSQL cluster + + + Initialize the new cluster using initdb. + Again, use compatible initdb + flags that match the old cluster. Many + prebuilt installers do this step automatically. There is no need to + start the new cluster. + + + + + Install custom shared object files + + + Install any custom shared object files (or DLLs) used by the old cluster + into the new cluster, e.g. pgcrypto.so, + whether they are from contrib + or some other source. Do not install the schema definitions, e.g. + pgcrypto.sql, because these will be upgraded from the old cluster. + + + + + Adjust authentication + + + pg_upgrade will connect to the old and new servers several + times, so you might want to set authentication to peer + in pg_hba.conf or use a ~/.pgpass file + (see ). + + + + + Stop both servers + + + Make sure both database servers are stopped using, on Unix, e.g.: + + +pg_ctl -D /opt/PostgreSQL/8.4 stop +pg_ctl -D /opt/PostgreSQL/9.0 stop + + + or on Windows, using the proper service names: + + +NET STOP postgresql-8.4 +NET STOP postgresql-9.0 + + + + + Streaming replication and log-shipping standby servers can remain running until + a later step. + + + + + Run <application>pg_upgrade</> + + + Always run the pg_upgrade binary of the new server, not the old one. + pg_upgrade requires the specification of the old and new cluster's + data and executable (bin) directories. You can also specify + user and port values, and whether you want the data linked instead of + copied (the default). + + + + If you use link mode, the upgrade will be much faster (no file + copying) and use less disk space, but you will not be able to access + your old cluster + once you start the new cluster after the upgrade. Link mode also + requires that the old and new cluster data directories be in the + same file system. (Tablespaces and pg_xlog can be on + different file systems.) See pg_upgrade --help for a full + list of options. + + + + The + + + For Windows users, you must be logged into an administrative account, and + then start a shell as the postgres user and set the proper path: + + +RUNAS /USER:postgres "CMD.EXE" +SET PATH=%PATH%;C:\Program Files\PostgreSQL\9.0\bin; + + + and then run pg_upgrade with quoted directories, e.g.: + + +pg_upgrade.exe + --old-datadir "C:/Program Files/PostgreSQL/8.4/data" + --new-datadir "C:/Program Files/PostgreSQL/9.0/data" + --old-bindir "C:/Program Files/PostgreSQL/8.4/bin" + --new-bindir "C:/Program Files/PostgreSQL/9.0/bin" + + + Once started, pg_upgrade will verify the two clusters are compatible + and then do the upgrade. You can use pg_upgrade --check + to perform only the checks, even if the old server is still + running. pg_upgrade --check will also outline any + manual adjustments you will need to make after the upgrade. If you + are going to be using link mode, you should use the to enable link-mode-specific checks. + pg_upgrade requires write permission in the current directory. + + + + Obviously, no one should be accessing the clusters during the + upgrade. pg_upgrade defaults to running servers + on port 50432 to avoid unintended client connections. + You can use the same port number for both clusters when doing an + upgrade because the old and new clusters will not be running at the + same time. However, when checking an old running server, the old + and new port numbers must be different. + + + + If an error occurs while restoring the database schema, pg_upgrade will + exit and you will have to revert to the old cluster as outlined in + below. To try pg_upgrade again, you will need to modify the old + cluster so the pg_upgrade schema restore succeeds. If the problem is a + contrib module, you might need to uninstall the contrib module from + the old cluster and install it in the new cluster after the upgrade, + assuming the module is not being used to store user data. + + + + + Upgrade Streaming Replication and Log-Shipping standby + servers + + + If you have Streaming Replication () or Log-Shipping () standby servers, follow these steps to + upgrade them (before starting any servers): + + + + + + Install the new PostgreSQL binaries on standby servers + + + Make sure the new binaries and support files are installed on all + standby servers. + + + + + Make sure the new standby data directories do <emphasis>not</> + exist + + + Make sure the new standby data directories do not + exist or are empty. If initdb was run, delete + the standby server data directories. + + + + + Install custom shared object files + + + Install the same custom shared object files on the new standbys + that you installed in the new master cluster. + + + + + Stop standby servers + + + If the standby servers are still running, stop them now using the + above instructions. + + + + + Verify standby servers + + + To prevent old standby servers from being modified, run + pg_controldata against the primary and standby + clusters and verify that the Latest checkpoint location + values match in all clusters. (This requires the standbys to be + shut down after the primary.) + + + + + Save configuration files + + + Save any configuration files from the standbys you need to keep, + e.g. postgresql.conf, recovery.conf, + as these will be overwritten or removed in the next step. + + + + + Start and stop the new master cluster + + + In the new master cluster, change wal_level to + hot_standby in the postgresql.conf file + and then start and stop the cluster. + + + + + Run <application>rsync</> + + + From a directory that is above the old and new database cluster + directories, run this for each slave: + + + rsync --archive --delete --hard-links --size-only old_pgdata new_pgdata remote_dir + + + where + + + If you have tablespaces, you will need to run a similar + rsync command for each tablespace directory. If you + have relocated pg_xlog outside the data directories, + rsync must be run on those directories too. + + + + + Configure streaming replication and log-shipping standby + servers + + + Configure the servers for log shipping. (You do not need to run + pg_start_backup() and pg_stop_backup() + or take a file system backup as the slaves are still synchronized + with the master.) + + + + + + + + + Restore <filename>pg_hba.conf</> + + + If you modified pg_hba.conf, restore its original settings. + It might also be necessary to adjust other configuration files in the new + cluster to match the old cluster, e.g. postgresql.conf. + + + + + Start the new server + + + The new server can now be safely started, and then any + rsync'ed standby servers. + + + + + Post-Upgrade processing + + + If any post-upgrade processing is required, pg_upgrade will issue + warnings as it completes. It will also generate script files that must + be run by the administrator. The script files will connect to each + database that needs post-upgrade processing. Each script should be + run using: + + +psql --username postgres --file script.sql postgres + + + The scripts can be run in any order and can be deleted once they have + been run. + + + + + In general it is unsafe to access tables referenced in rebuild scripts + until the rebuild scripts have run to completion; doing so could yield + incorrect results or poor performance. Tables not referenced in rebuild + scripts can be accessed immediately. + + + + + + Statistics + + + Because optimizer statistics are not transferred by pg_upgrade, you will + be instructed to run a command to regenerate that information at the end + of the upgrade. You might need to set connection parameters to + match your new cluster. + + + + + Delete old cluster + + + Once you are satisfied with the upgrade, you can delete the old + cluster's data directories by running the script mentioned when + pg_upgrade completes. (Automatic deletion is not + possible if you have user-defined tablespaces inside the old data + directory.) You can also delete the old installation directories + (e.g. bin, share). + + + + + Reverting to old cluster + + + If, after running pg_upgrade, you wish to revert to the old cluster, + there are several options: + + + + + If you ran pg_upgrade + with + + + + + If you ran pg_upgrade + with + + + + + If you ran pg_upgrade without + + + + + + + + + + + Notes + + + pg_upgrade does not support upgrading of databases + containing these reg* OID-referencing system data types: + regproc, regprocedure, regoper, + regoperator, regconfig, and + regdictionary. (regtype can be upgraded.) + + + + All failure, rebuild, and reindex cases will be reported by + pg_upgrade if they affect your installation; + post-upgrade scripts to rebuild tables and indexes will be + generated automatically. If you are trying to automate the upgrade + of many clusters, you should find that clusters with identical database + schemas require the same post-upgrade steps for all cluster upgrades; + this is because the post-upgrade steps are based on the database + schemas, and not user data. + + + + For deployment testing, create a schema-only copy of the old cluster, + insert dummy data, and upgrade that. + + + + If you are upgrading a pre-PostgreSQL 9.2 cluster + that uses a configuration-file-only directory, you must pass the + real data directory location to pg_upgrade, and + pass the configuration directory location to the server, e.g. + -d /real-data-directory -o '-D /configuration-directory'. + + + + If using a pre-9.1 old server that is using a non-default Unix-domain + socket directory or a default that differs from the default of the + new cluster, set PGHOST to point to the old server's socket + location. (This is not relevant on Windows.) + + + + If you want to use link mode and you do not want your old cluster + to be modified when the new cluster is started, make a copy of the + old cluster and upgrade that in link mode. To make a valid copy + of the old cluster, use rsync to create a dirty + copy of the old cluster while the server is running, then shut down + the old server and run rsync --checksum again to update the + copy with any changes to make it consistent. ( + + + + + See Also + + + + + + + + + diff --git a/doc/src/sgml/reference.sgml b/doc/src/sgml/reference.sgml index c1765ef1c5e..fb18d94ea09 100644 --- a/doc/src/sgml/reference.sgml +++ b/doc/src/sgml/reference.sgml @@ -263,6 +263,7 @@ &pgCtl; &pgResetxlog; &pgRewind; + &pgupgrade; &postgres; &postmaster; diff --git a/src/bin/Makefile b/src/bin/Makefile index bb77142cab8..cc78798fba7 100644 --- a/src/bin/Makefile +++ b/src/bin/Makefile @@ -23,6 +23,7 @@ SUBDIRS = \ pg_dump \ pg_resetxlog \ pg_rewind \ + pg_upgrade \ pgbench \ psql \ scripts diff --git a/src/bin/pg_upgrade/.gitignore b/src/bin/pg_upgrade/.gitignore new file mode 100644 index 00000000000..d24ec60184f --- /dev/null +++ b/src/bin/pg_upgrade/.gitignore @@ -0,0 +1,8 @@ +/pg_upgrade +# Generated by test suite +/analyze_new_cluster.sh +/delete_old_cluster.sh +/analyze_new_cluster.bat +/delete_old_cluster.bat +/log/ +/tmp_check/ diff --git a/src/bin/pg_upgrade/IMPLEMENTATION b/src/bin/pg_upgrade/IMPLEMENTATION new file mode 100644 index 00000000000..9b5ff7295c1 --- /dev/null +++ b/src/bin/pg_upgrade/IMPLEMENTATION @@ -0,0 +1,98 @@ +------------------------------------------------------------------------------ +PG_UPGRADE: IN-PLACE UPGRADES FOR POSTGRESQL +------------------------------------------------------------------------------ + +Upgrading a PostgreSQL database from one major release to another can be +an expensive process. For minor upgrades, you can simply install new +executables and forget about upgrading existing data. But for major +upgrades, you have to export all of your data using pg_dump, install the +new release, run initdb to create a new cluster, and then import your +old data. If you have a lot of data, that can take a considerable amount +of time. If you have too much data, you may have to buy more storage +since you need enough room to hold the original data plus the exported +data. pg_upgrade can reduce the amount of time and disk space required +for many upgrades. + +The URL http://momjian.us/main/writings/pgsql/pg_upgrade.pdf contains a +presentation about pg_upgrade internals that mirrors the text +description below. + +------------------------------------------------------------------------------ +WHAT IT DOES +------------------------------------------------------------------------------ + +pg_upgrade is a tool that performs an in-place upgrade of existing +data. Some upgrades change the on-disk representation of data; +pg_upgrade cannot help in those upgrades. However, many upgrades do +not change the on-disk representation of a user-defined table. In those +cases, pg_upgrade can move existing user-defined tables from the old +database cluster into the new cluster. + +There are two factors that determine whether an in-place upgrade is +practical. + +Every table in a cluster shares the same on-disk representation of the +table headers and trailers and the on-disk representation of tuple +headers. If this changes between the old version of PostgreSQL and the +new version, pg_upgrade cannot move existing tables to the new cluster; +you will have to pg_dump the old data and then import that data into the +new cluster. + +Second, all data types should have the same binary representation +between the two major PostgreSQL versions. + +------------------------------------------------------------------------------ +HOW IT WORKS +------------------------------------------------------------------------------ + +To use pg_upgrade during an upgrade, start by installing a fresh +cluster using the newest version in a new directory. When you've +finished installation, the new cluster will contain the new executables +and the usual template0, template1, and postgres, but no user-defined +tables. At this point, you can shut down the old and new postmasters and +invoke pg_upgrade. + +When pg_upgrade starts, it ensures that all required executables are +present and contain the expected version numbers. The verification +process also checks the old and new $PGDATA directories to ensure that +the expected files and subdirectories are in place. If the verification +process succeeds, pg_upgrade starts the old postmaster and runs +pg_dumpall --schema-only to capture the metadata contained in the old +cluster. The script produced by pg_dumpall will be used in a later step +to recreate all user-defined objects in the new cluster. + +Note that the script produced by pg_dumpall will only recreate +user-defined objects, not system-defined objects. The new cluster will +contain the system-defined objects created by the latest version of +PostgreSQL. + +Once pg_upgrade has extracted the metadata from the old cluster, it +performs a number of bookkeeping tasks required to 'sync up' the new +cluster with the existing data. + +First, pg_upgrade copies the commit status information and 'next +transaction ID' from the old cluster to the new cluster. This is the +steps ensures that the proper tuples are visible from the new cluster. +Remember, pg_upgrade does not export/import the content of user-defined +tables so the transaction IDs in the new cluster must match the +transaction IDs in the old data. pg_upgrade also copies the starting +address for write-ahead logs from the old cluster to the new cluster. + +Now pg_upgrade begins reconstructing the metadata obtained from the old +cluster using the first part of the pg_dumpall output. + +Next, pg_upgrade executes the remainder of the script produced earlier +by pg_dumpall --- this script effectively creates the complete +user-defined metadata from the old cluster to the new cluster. It +preserves the relfilenode numbers so TOAST and other references +to relfilenodes in user data is preserved. (See binary-upgrade usage +in pg_dump). + +Finally, pg_upgrade links or copies each user-defined table and its +supporting indexes and toast tables from the old cluster to the new +cluster. + +An important feature of the pg_upgrade design is that it leaves the +original cluster intact --- if a problem occurs during the upgrade, you +can still run the previous version, after renaming the tablespaces back +to the original names. diff --git a/src/bin/pg_upgrade/Makefile b/src/bin/pg_upgrade/Makefile new file mode 100644 index 00000000000..4eb20d6b547 --- /dev/null +++ b/src/bin/pg_upgrade/Makefile @@ -0,0 +1,42 @@ +# src/bin/pg_upgrade/Makefile + +PGFILEDESC = "pg_upgrade - an in-place binary upgrade utility" +PGAPPICON = win32 + +subdir = src/bin/pg_upgrade +top_builddir = ../../.. +include $(top_builddir)/src/Makefile.global + +OBJS = check.o controldata.o dump.o exec.o file.o function.o info.o \ + option.o page.o parallel.o pg_upgrade.o relfilenode.o server.o \ + tablespace.o util.o version.o $(WIN32RES) + +override CPPFLAGS := -DFRONTEND -DDLSUFFIX=\"$(DLSUFFIX)\" -I$(srcdir) -I$(libpq_srcdir) $(CPPFLAGS) + + +all: pg_upgrade + +pg_upgrade: $(OBJS) | submake-libpq submake-libpgport + $(CC) $(CFLAGS) $^ $(libpq_pgport) $(LDFLAGS) $(LDFLAGS_EX) $(LIBS) -o $@$(X) + +install: all installdirs + $(INSTALL_PROGRAM) pg_upgrade$(X) '$(DESTDIR)$(bindir)/pg_upgrade$(X)' + +installdirs: + $(MKDIR_P) '$(DESTDIR)$(bindir)' + +uninstall: + rm -f '$(DESTDIR)$(bindir)/pg_upgrade$(X)' + +clean distclean maintainer-clean: + rm -f pg_upgrade$(X) $(OBJS) + rm -rf analyze_new_cluster.sh delete_old_cluster.sh log/ tmp_check/ \ + pg_upgrade_dump_globals.sql \ + pg_upgrade_dump_*.custom pg_upgrade_*.log + +check: test.sh all + MAKE=$(MAKE) bindir=$(bindir) libdir=$(libdir) EXTRA_REGRESS_OPTS="$(EXTRA_REGRESS_OPTS)" $(SHELL) $< --install + +# disabled because it upsets the build farm +#installcheck: test.sh +# MAKE=$(MAKE) bindir=$(bindir) libdir=$(libdir) $(SHELL) $< diff --git a/src/bin/pg_upgrade/TESTING b/src/bin/pg_upgrade/TESTING new file mode 100644 index 00000000000..4ecfc5798e0 --- /dev/null +++ b/src/bin/pg_upgrade/TESTING @@ -0,0 +1,81 @@ +The most effective way to test pg_upgrade, aside from testing on user +data, is by upgrading the PostgreSQL regression database. + +This testing process first requires the creation of a valid regression +database dump. Such files contain most database features and are +specific to each major version of Postgres. + +Here are the steps needed to create a regression database dump file: + +1) Create and populate the regression database in the old cluster + This database can be created by running 'make installcheck' from + src/test/regression. + +2) Use pg_dump to dump out the regression database. Use the new + cluster's pg_dump on the old database to minimize whitespace + differences in the diff. + +3) Adjust the regression database dump file + + a) Perform the load/dump twice + This fixes problems with the ordering of COPY columns for + inherited tables. + + b) Change CREATE FUNCTION shared object paths to use '$libdir' + The old and new cluster will have different shared object paths. + + c) Fix any wrapping format differences + Commands like CREATE TRIGGER and ALTER TABLE sometimes have + differences. + + d) For pre-9.0, change CREATE OR REPLACE LANGUAGE to CREATE LANGUAGE + + e) For pre-9.0, remove 'regex_flavor' + + f) For pre-9.0, adjust extra_float_digits + Postgres 9.0 pg_dump uses extra_float_digits=-2 for pre-9.0 + databases, and extra_float_digits=-3 for >= 9.0 databases. + It is necessary to modify 9.0 pg_dump to always use -3, and + modify the pre-9.0 old server to accept extra_float_digits=-3. + +Once the dump is created, it can be repeatedly loaded into the old +database, upgraded, and dumped out of the new database, and then +compared to the original version. To test the dump file, perform these +steps: + +1) Create the old and new clusters in different directories. + +2) Copy the regression shared object files into the appropriate /lib + directory for old and new clusters. + +3) Create the regression database in the old server. + +4) Load the dump file created above into the regression database; + check for errors while loading. + +5) Upgrade the old database to the new major version, as outlined in + the pg_upgrade manual section. + +6) Use pg_dump to dump out the regression database in the new cluster. + +7) Diff the regression database dump file with the regression dump + file loaded into the old server. + +The shell script test.sh in this directory performs more or less this +procedure. You can invoke it by running + + make check + +or by running + + make installcheck + +if "make install" (or "make install-world") were done beforehand. +When invoked without arguments, it will run an upgrade from the +version in this source tree to a new instance of the same version. To +test an upgrade from a different version, invoke it like this: + + make installcheck oldbindir=...otherversion/bin oldsrc=...somewhere/postgresql + +In this case, you will have to manually eyeball the resulting dump +diff for version-specific differences, as explained above. diff --git a/src/bin/pg_upgrade/check.c b/src/bin/pg_upgrade/check.c new file mode 100644 index 00000000000..647bf349f4d --- /dev/null +++ b/src/bin/pg_upgrade/check.c @@ -0,0 +1,1016 @@ +/* + * check.c + * + * server checks and output routines + * + * Copyright (c) 2010-2015, PostgreSQL Global Development Group + * src/bin/pg_upgrade/check.c + */ + +#include "postgres_fe.h" + +#include "catalog/pg_authid.h" +#include "mb/pg_wchar.h" +#include "pg_upgrade.h" + + +static void check_new_cluster_is_empty(void); +static void check_databases_are_compatible(void); +static void check_locale_and_encoding(DbInfo *olddb, DbInfo *newdb); +static bool equivalent_locale(int category, const char *loca, const char *locb); +static void check_is_install_user(ClusterInfo *cluster); +static void check_for_prepared_transactions(ClusterInfo *cluster); +static void check_for_isn_and_int8_passing_mismatch(ClusterInfo *cluster); +static void check_for_reg_data_type_usage(ClusterInfo *cluster); +static void check_for_jsonb_9_4_usage(ClusterInfo *cluster); +static void get_bin_version(ClusterInfo *cluster); +static char *get_canonical_locale_name(int category, const char *locale); + + +/* + * fix_path_separator + * For non-Windows, just return the argument. + * For Windows convert any forward slash to a backslash + * such as is suitable for arguments to builtin commands + * like RMDIR and DEL. + */ +static char * +fix_path_separator(char *path) +{ +#ifdef WIN32 + + char *result; + char *c; + + result = pg_strdup(path); + + for (c = result; *c != '\0'; c++) + if (*c == '/') + *c = '\\'; + + return result; +#else + + return path; +#endif +} + +void +output_check_banner(bool live_check) +{ + if (user_opts.check && live_check) + { + pg_log(PG_REPORT, "Performing Consistency Checks on Old Live Server\n"); + pg_log(PG_REPORT, "------------------------------------------------\n"); + } + else + { + pg_log(PG_REPORT, "Performing Consistency Checks\n"); + pg_log(PG_REPORT, "-----------------------------\n"); + } +} + + +void +check_and_dump_old_cluster(bool live_check) +{ + /* -- OLD -- */ + + if (!live_check) + start_postmaster(&old_cluster, true); + + get_pg_database_relfilenode(&old_cluster); + + /* Extract a list of databases and tables from the old cluster */ + get_db_and_rel_infos(&old_cluster); + + init_tablespaces(); + + get_loadable_libraries(); + + + /* + * Check for various failure cases + */ + check_is_install_user(&old_cluster); + check_for_prepared_transactions(&old_cluster); + check_for_reg_data_type_usage(&old_cluster); + check_for_isn_and_int8_passing_mismatch(&old_cluster); + if (GET_MAJOR_VERSION(old_cluster.major_version) == 904 && + old_cluster.controldata.cat_ver < JSONB_FORMAT_CHANGE_CAT_VER) + check_for_jsonb_9_4_usage(&old_cluster); + + /* Pre-PG 9.4 had a different 'line' data type internal format */ + if (GET_MAJOR_VERSION(old_cluster.major_version) <= 903) + old_9_3_check_for_line_data_type_usage(&old_cluster); + + /* Pre-PG 9.0 had no large object permissions */ + if (GET_MAJOR_VERSION(old_cluster.major_version) <= 804) + new_9_0_populate_pg_largeobject_metadata(&old_cluster, true); + + /* + * While not a check option, we do this now because this is the only time + * the old server is running. + */ + if (!user_opts.check) + generate_old_dump(); + + if (!live_check) + stop_postmaster(false); +} + + +void +check_new_cluster(void) +{ + get_db_and_rel_infos(&new_cluster); + + check_new_cluster_is_empty(); + check_databases_are_compatible(); + + check_loadable_libraries(); + + if (user_opts.transfer_mode == TRANSFER_MODE_LINK) + check_hard_link(); + + check_is_install_user(&new_cluster); + + check_for_prepared_transactions(&new_cluster); +} + + +void +report_clusters_compatible(void) +{ + if (user_opts.check) + { + pg_log(PG_REPORT, "\n*Clusters are compatible*\n"); + /* stops new cluster */ + stop_postmaster(false); + exit(0); + } + + pg_log(PG_REPORT, "\n" + "If pg_upgrade fails after this point, you must re-initdb the\n" + "new cluster before continuing.\n"); +} + + +void +issue_warnings(void) +{ + /* Create dummy large object permissions for old < PG 9.0? */ + if (GET_MAJOR_VERSION(old_cluster.major_version) <= 804) + { + start_postmaster(&new_cluster, true); + new_9_0_populate_pg_largeobject_metadata(&new_cluster, false); + stop_postmaster(false); + } +} + + +void +output_completion_banner(char *analyze_script_file_name, + char *deletion_script_file_name) +{ + /* Did we copy the free space files? */ + if (GET_MAJOR_VERSION(old_cluster.major_version) >= 804) + pg_log(PG_REPORT, + "Optimizer statistics are not transferred by pg_upgrade so,\n" + "once you start the new server, consider running:\n" + " %s\n\n", analyze_script_file_name); + else + pg_log(PG_REPORT, + "Optimizer statistics and free space information are not transferred\n" + "by pg_upgrade so, once you start the new server, consider running:\n" + " %s\n\n", analyze_script_file_name); + + + if (deletion_script_file_name) + pg_log(PG_REPORT, + "Running this script will delete the old cluster's data files:\n" + " %s\n", + deletion_script_file_name); + else + pg_log(PG_REPORT, + "Could not create a script to delete the old cluster's data\n" + "files because user-defined tablespaces exist in the old cluster\n" + "directory. The old cluster's contents must be deleted manually.\n"); +} + + +void +check_cluster_versions(void) +{ + prep_status("Checking cluster versions"); + + /* get old and new cluster versions */ + old_cluster.major_version = get_major_server_version(&old_cluster); + new_cluster.major_version = get_major_server_version(&new_cluster); + + /* + * We allow upgrades from/to the same major version for alpha/beta + * upgrades + */ + + if (GET_MAJOR_VERSION(old_cluster.major_version) < 804) + pg_fatal("This utility can only upgrade from PostgreSQL version 8.4 and later.\n"); + + /* Only current PG version is supported as a target */ + if (GET_MAJOR_VERSION(new_cluster.major_version) != GET_MAJOR_VERSION(PG_VERSION_NUM)) + pg_fatal("This utility can only upgrade to PostgreSQL version %s.\n", + PG_MAJORVERSION); + + /* + * We can't allow downgrading because we use the target pg_dump, and + * pg_dump cannot operate on newer database versions, only current and + * older versions. + */ + if (old_cluster.major_version > new_cluster.major_version) + pg_fatal("This utility cannot be used to downgrade to older major PostgreSQL versions.\n"); + + /* get old and new binary versions */ + get_bin_version(&old_cluster); + get_bin_version(&new_cluster); + + /* Ensure binaries match the designated data directories */ + if (GET_MAJOR_VERSION(old_cluster.major_version) != + GET_MAJOR_VERSION(old_cluster.bin_version)) + pg_fatal("Old cluster data and binary directories are from different major versions.\n"); + if (GET_MAJOR_VERSION(new_cluster.major_version) != + GET_MAJOR_VERSION(new_cluster.bin_version)) + pg_fatal("New cluster data and binary directories are from different major versions.\n"); + + check_ok(); +} + + +void +check_cluster_compatibility(bool live_check) +{ + /* get/check pg_control data of servers */ + get_control_data(&old_cluster, live_check); + get_control_data(&new_cluster, false); + check_control_data(&old_cluster.controldata, &new_cluster.controldata); + + /* Is it 9.0 but without tablespace directories? */ + if (GET_MAJOR_VERSION(new_cluster.major_version) == 900 && + new_cluster.controldata.cat_ver < TABLE_SPACE_SUBDIRS_CAT_VER) + pg_fatal("This utility can only upgrade to PostgreSQL version 9.0 after 2010-01-11\n" + "because of backend API changes made during development.\n"); + + /* We read the real port number for PG >= 9.1 */ + if (live_check && GET_MAJOR_VERSION(old_cluster.major_version) < 901 && + old_cluster.port == DEF_PGUPORT) + pg_fatal("When checking a pre-PG 9.1 live old server, " + "you must specify the old server's port number.\n"); + + if (live_check && old_cluster.port == new_cluster.port) + pg_fatal("When checking a live server, " + "the old and new port numbers must be different.\n"); +} + + +/* + * check_locale_and_encoding() + * + * Check that locale and encoding of a database in the old and new clusters + * are compatible. + */ +static void +check_locale_and_encoding(DbInfo *olddb, DbInfo *newdb) +{ + if (olddb->db_encoding != newdb->db_encoding) + pg_fatal("encodings for database \"%s\" do not match: old \"%s\", new \"%s\"\n", + olddb->db_name, + pg_encoding_to_char(olddb->db_encoding), + pg_encoding_to_char(newdb->db_encoding)); + if (!equivalent_locale(LC_COLLATE, olddb->db_collate, newdb->db_collate)) + pg_fatal("lc_collate values for database \"%s\" do not match: old \"%s\", new \"%s\"\n", + olddb->db_name, olddb->db_collate, newdb->db_collate); + if (!equivalent_locale(LC_CTYPE, olddb->db_ctype, newdb->db_ctype)) + pg_fatal("lc_ctype values for database \"%s\" do not match: old \"%s\", new \"%s\"\n", + olddb->db_name, olddb->db_ctype, newdb->db_ctype); +} + +/* + * equivalent_locale() + * + * Best effort locale-name comparison. Return false if we are not 100% sure + * the locales are equivalent. + * + * Note: The encoding parts of the names are ignored. This function is + * currently used to compare locale names stored in pg_database, and + * pg_database contains a separate encoding field. That's compared directly + * in check_locale_and_encoding(). + */ +static bool +equivalent_locale(int category, const char *loca, const char *locb) +{ + const char *chara; + const char *charb; + char *canona; + char *canonb; + int lena; + int lenb; + + /* + * If the names are equal, the locales are equivalent. Checking this + * first avoids calling setlocale() in the common case that the names + * are equal. That's a good thing, if setlocale() is buggy, for example. + */ + if (pg_strcasecmp(loca, locb) == 0) + return true; + + /* + * Not identical. Canonicalize both names, remove the encoding parts, + * and try again. + */ + canona = get_canonical_locale_name(category, loca); + chara = strrchr(canona, '.'); + lena = chara ? (chara - canona) : strlen(canona); + + canonb = get_canonical_locale_name(category, locb); + charb = strrchr(canonb, '.'); + lenb = charb ? (charb - canonb) : strlen(canonb); + + if (lena == lenb && pg_strncasecmp(canona, canonb, lena) == 0) + return true; + + return false; +} + + +static void +check_new_cluster_is_empty(void) +{ + int dbnum; + + for (dbnum = 0; dbnum < new_cluster.dbarr.ndbs; dbnum++) + { + int relnum; + RelInfoArr *rel_arr = &new_cluster.dbarr.dbs[dbnum].rel_arr; + + for (relnum = 0; relnum < rel_arr->nrels; + relnum++) + { + /* pg_largeobject and its index should be skipped */ + if (strcmp(rel_arr->rels[relnum].nspname, "pg_catalog") != 0) + pg_fatal("New cluster database \"%s\" is not empty\n", + new_cluster.dbarr.dbs[dbnum].db_name); + } + } +} + +/* + * Check that every database that already exists in the new cluster is + * compatible with the corresponding database in the old one. + */ +static void +check_databases_are_compatible(void) +{ + int newdbnum; + int olddbnum; + DbInfo *newdbinfo; + DbInfo *olddbinfo; + + for (newdbnum = 0; newdbnum < new_cluster.dbarr.ndbs; newdbnum++) + { + newdbinfo = &new_cluster.dbarr.dbs[newdbnum]; + + /* Find the corresponding database in the old cluster */ + for (olddbnum = 0; olddbnum < old_cluster.dbarr.ndbs; olddbnum++) + { + olddbinfo = &old_cluster.dbarr.dbs[olddbnum]; + if (strcmp(newdbinfo->db_name, olddbinfo->db_name) == 0) + { + check_locale_and_encoding(olddbinfo, newdbinfo); + break; + } + } + } +} + + +/* + * create_script_for_cluster_analyze() + * + * This incrementally generates better optimizer statistics + */ +void +create_script_for_cluster_analyze(char **analyze_script_file_name) +{ + FILE *script = NULL; + char *user_specification = ""; + + prep_status("Creating script to analyze new cluster"); + + if (os_info.user_specified) + user_specification = psprintf("-U \"%s\" ", os_info.user); + + *analyze_script_file_name = psprintf("%sanalyze_new_cluster.%s", + SCRIPT_PREFIX, SCRIPT_EXT); + + if ((script = fopen_priv(*analyze_script_file_name, "w")) == NULL) + pg_fatal("Could not open file \"%s\": %s\n", + *analyze_script_file_name, getErrorText(errno)); + +#ifndef WIN32 + /* add shebang header */ + fprintf(script, "#!/bin/sh\n\n"); +#else + /* suppress command echoing */ + fprintf(script, "@echo off\n"); +#endif + + fprintf(script, "echo %sThis script will generate minimal optimizer statistics rapidly%s\n", + ECHO_QUOTE, ECHO_QUOTE); + fprintf(script, "echo %sso your system is usable, and then gather statistics twice more%s\n", + ECHO_QUOTE, ECHO_QUOTE); + fprintf(script, "echo %swith increasing accuracy. When it is done, your system will%s\n", + ECHO_QUOTE, ECHO_QUOTE); + fprintf(script, "echo %shave the default level of optimizer statistics.%s\n", + ECHO_QUOTE, ECHO_QUOTE); + fprintf(script, "echo%s\n\n", ECHO_BLANK); + + fprintf(script, "echo %sIf you have used ALTER TABLE to modify the statistics target for%s\n", + ECHO_QUOTE, ECHO_QUOTE); + fprintf(script, "echo %sany tables, you might want to remove them and restore them after%s\n", + ECHO_QUOTE, ECHO_QUOTE); + fprintf(script, "echo %srunning this script because they will delay fast statistics generation.%s\n", + ECHO_QUOTE, ECHO_QUOTE); + fprintf(script, "echo%s\n\n", ECHO_BLANK); + + fprintf(script, "echo %sIf you would like default statistics as quickly as possible, cancel%s\n", + ECHO_QUOTE, ECHO_QUOTE); + fprintf(script, "echo %sthis script and run:%s\n", + ECHO_QUOTE, ECHO_QUOTE); + fprintf(script, "echo %s \"%s/vacuumdb\" %s--all %s%s\n", ECHO_QUOTE, + new_cluster.bindir, user_specification, + /* Did we copy the free space files? */ + (GET_MAJOR_VERSION(old_cluster.major_version) >= 804) ? + "--analyze-only" : "--analyze", ECHO_QUOTE); + fprintf(script, "echo%s\n\n", ECHO_BLANK); + + fprintf(script, "\"%s/vacuumdb\" %s--all --analyze-in-stages\n", + new_cluster.bindir, user_specification); + /* Did we copy the free space files? */ + if (GET_MAJOR_VERSION(old_cluster.major_version) < 804) + fprintf(script, "\"%s/vacuumdb\" %s--all\n", new_cluster.bindir, + user_specification); + + fprintf(script, "echo%s\n\n", ECHO_BLANK); + fprintf(script, "echo %sDone%s\n", + ECHO_QUOTE, ECHO_QUOTE); + + fclose(script); + +#ifndef WIN32 + if (chmod(*analyze_script_file_name, S_IRWXU) != 0) + pg_fatal("Could not add execute permission to file \"%s\": %s\n", + *analyze_script_file_name, getErrorText(errno)); +#endif + + if (os_info.user_specified) + pg_free(user_specification); + + check_ok(); +} + + +/* + * create_script_for_old_cluster_deletion() + * + * This is particularly useful for tablespace deletion. + */ +void +create_script_for_old_cluster_deletion(char **deletion_script_file_name) +{ + FILE *script = NULL; + int tblnum; + char old_cluster_pgdata[MAXPGPATH]; + + *deletion_script_file_name = psprintf("%sdelete_old_cluster.%s", + SCRIPT_PREFIX, SCRIPT_EXT); + + /* + * Some users (oddly) create tablespaces inside the cluster data + * directory. We can't create a proper old cluster delete script in that + * case. + */ + strlcpy(old_cluster_pgdata, old_cluster.pgdata, MAXPGPATH); + canonicalize_path(old_cluster_pgdata); + for (tblnum = 0; tblnum < os_info.num_old_tablespaces; tblnum++) + { + char old_tablespace_dir[MAXPGPATH]; + + strlcpy(old_tablespace_dir, os_info.old_tablespaces[tblnum], MAXPGPATH); + canonicalize_path(old_tablespace_dir); + if (path_is_prefix_of_path(old_cluster_pgdata, old_tablespace_dir)) + { + /* Unlink file in case it is left over from a previous run. */ + unlink(*deletion_script_file_name); + pg_free(*deletion_script_file_name); + *deletion_script_file_name = NULL; + return; + } + } + + prep_status("Creating script to delete old cluster"); + + if ((script = fopen_priv(*deletion_script_file_name, "w")) == NULL) + pg_fatal("Could not open file \"%s\": %s\n", + *deletion_script_file_name, getErrorText(errno)); + +#ifndef WIN32 + /* add shebang header */ + fprintf(script, "#!/bin/sh\n\n"); +#endif + + /* delete old cluster's default tablespace */ + fprintf(script, RMDIR_CMD " \"%s\"\n", fix_path_separator(old_cluster.pgdata)); + + /* delete old cluster's alternate tablespaces */ + for (tblnum = 0; tblnum < os_info.num_old_tablespaces; tblnum++) + { + /* + * Do the old cluster's per-database directories share a directory + * with a new version-specific tablespace? + */ + if (strlen(old_cluster.tablespace_suffix) == 0) + { + /* delete per-database directories */ + int dbnum; + + fprintf(script, "\n"); + /* remove PG_VERSION? */ + if (GET_MAJOR_VERSION(old_cluster.major_version) <= 804) + fprintf(script, RM_CMD " %s%cPG_VERSION\n", + fix_path_separator(os_info.old_tablespaces[tblnum]), + PATH_SEPARATOR); + + for (dbnum = 0; dbnum < old_cluster.dbarr.ndbs; dbnum++) + fprintf(script, RMDIR_CMD " \"%s%c%d\"\n", + fix_path_separator(os_info.old_tablespaces[tblnum]), + PATH_SEPARATOR, old_cluster.dbarr.dbs[dbnum].db_oid); + } + else + { + char *suffix_path = pg_strdup(old_cluster.tablespace_suffix); + + /* + * Simply delete the tablespace directory, which might be ".old" + * or a version-specific subdirectory. + */ + fprintf(script, RMDIR_CMD " \"%s%s\"\n", + fix_path_separator(os_info.old_tablespaces[tblnum]), + fix_path_separator(suffix_path)); + pfree(suffix_path); + } + } + + fclose(script); + +#ifndef WIN32 + if (chmod(*deletion_script_file_name, S_IRWXU) != 0) + pg_fatal("Could not add execute permission to file \"%s\": %s\n", + *deletion_script_file_name, getErrorText(errno)); +#endif + + check_ok(); +} + + +/* + * check_is_install_user() + * + * Check we are the install user, and that the new cluster + * has no other users. + */ +static void +check_is_install_user(ClusterInfo *cluster) +{ + PGresult *res; + PGconn *conn = connectToServer(cluster, "template1"); + + prep_status("Checking database user is the install user"); + + /* Can't use pg_authid because only superusers can view it. */ + res = executeQueryOrDie(conn, + "SELECT rolsuper, oid " + "FROM pg_catalog.pg_roles " + "WHERE rolname = current_user"); + + /* + * We only allow the install user in the new cluster (see comment below) + * and we preserve pg_authid.oid, so this must be the install user in + * the old cluster too. + */ + if (PQntuples(res) != 1 || + atooid(PQgetvalue(res, 0, 1)) != BOOTSTRAP_SUPERUSERID) + pg_fatal("database user \"%s\" is not the install user\n", + os_info.user); + + PQclear(res); + + res = executeQueryOrDie(conn, + "SELECT COUNT(*) " + "FROM pg_catalog.pg_roles "); + + if (PQntuples(res) != 1) + pg_fatal("could not determine the number of users\n"); + + /* + * We only allow the install user in the new cluster because other defined + * users might match users defined in the old cluster and generate an + * error during pg_dump restore. + */ + if (cluster == &new_cluster && atooid(PQgetvalue(res, 0, 0)) != 1) + pg_fatal("Only the install user can be defined in the new cluster.\n"); + + PQclear(res); + + PQfinish(conn); + + check_ok(); +} + + +/* + * check_for_prepared_transactions() + * + * Make sure there are no prepared transactions because the storage format + * might have changed. + */ +static void +check_for_prepared_transactions(ClusterInfo *cluster) +{ + PGresult *res; + PGconn *conn = connectToServer(cluster, "template1"); + + prep_status("Checking for prepared transactions"); + + res = executeQueryOrDie(conn, + "SELECT * " + "FROM pg_catalog.pg_prepared_xacts"); + + if (PQntuples(res) != 0) + pg_fatal("The %s cluster contains prepared transactions\n", + CLUSTER_NAME(cluster)); + + PQclear(res); + + PQfinish(conn); + + check_ok(); +} + + +/* + * check_for_isn_and_int8_passing_mismatch() + * + * contrib/isn relies on data type int8, and in 8.4 int8 can now be passed + * by value. The schema dumps the CREATE TYPE PASSEDBYVALUE setting so + * it must match for the old and new servers. + */ +static void +check_for_isn_and_int8_passing_mismatch(ClusterInfo *cluster) +{ + int dbnum; + FILE *script = NULL; + bool found = false; + char output_path[MAXPGPATH]; + + prep_status("Checking for contrib/isn with bigint-passing mismatch"); + + if (old_cluster.controldata.float8_pass_by_value == + new_cluster.controldata.float8_pass_by_value) + { + /* no mismatch */ + check_ok(); + return; + } + + snprintf(output_path, sizeof(output_path), + "contrib_isn_and_int8_pass_by_value.txt"); + + for (dbnum = 0; dbnum < cluster->dbarr.ndbs; dbnum++) + { + PGresult *res; + bool db_used = false; + int ntups; + int rowno; + int i_nspname, + i_proname; + DbInfo *active_db = &cluster->dbarr.dbs[dbnum]; + PGconn *conn = connectToServer(cluster, active_db->db_name); + + /* Find any functions coming from contrib/isn */ + res = executeQueryOrDie(conn, + "SELECT n.nspname, p.proname " + "FROM pg_catalog.pg_proc p, " + " pg_catalog.pg_namespace n " + "WHERE p.pronamespace = n.oid AND " + " p.probin = '$libdir/isn'"); + + ntups = PQntuples(res); + i_nspname = PQfnumber(res, "nspname"); + i_proname = PQfnumber(res, "proname"); + for (rowno = 0; rowno < ntups; rowno++) + { + found = true; + if (script == NULL && (script = fopen_priv(output_path, "w")) == NULL) + pg_fatal("Could not open file \"%s\": %s\n", + output_path, getErrorText(errno)); + if (!db_used) + { + fprintf(script, "Database: %s\n", active_db->db_name); + db_used = true; + } + fprintf(script, " %s.%s\n", + PQgetvalue(res, rowno, i_nspname), + PQgetvalue(res, rowno, i_proname)); + } + + PQclear(res); + + PQfinish(conn); + } + + if (script) + fclose(script); + + if (found) + { + pg_log(PG_REPORT, "fatal\n"); + pg_fatal("Your installation contains \"contrib/isn\" functions which rely on the\n" + "bigint data type. Your old and new clusters pass bigint values\n" + "differently so this cluster cannot currently be upgraded. You can\n" + "manually upgrade databases that use \"contrib/isn\" facilities and remove\n" + "\"contrib/isn\" from the old cluster and restart the upgrade. A list of\n" + "the problem functions is in the file:\n" + " %s\n\n", output_path); + } + else + check_ok(); +} + + +/* + * check_for_reg_data_type_usage() + * pg_upgrade only preserves these system values: + * pg_class.oid + * pg_type.oid + * pg_enum.oid + * + * Many of the reg* data types reference system catalog info that is + * not preserved, and hence these data types cannot be used in user + * tables upgraded by pg_upgrade. + */ +static void +check_for_reg_data_type_usage(ClusterInfo *cluster) +{ + int dbnum; + FILE *script = NULL; + bool found = false; + char output_path[MAXPGPATH]; + + prep_status("Checking for reg* system OID user data types"); + + snprintf(output_path, sizeof(output_path), "tables_using_reg.txt"); + + for (dbnum = 0; dbnum < cluster->dbarr.ndbs; dbnum++) + { + PGresult *res; + bool db_used = false; + int ntups; + int rowno; + int i_nspname, + i_relname, + i_attname; + DbInfo *active_db = &cluster->dbarr.dbs[dbnum]; + PGconn *conn = connectToServer(cluster, active_db->db_name); + + /* + * While several relkinds don't store any data, e.g. views, they can + * be used to define data types of other columns, so we check all + * relkinds. + */ + res = executeQueryOrDie(conn, + "SELECT n.nspname, c.relname, a.attname " + "FROM pg_catalog.pg_class c, " + " pg_catalog.pg_namespace n, " + " pg_catalog.pg_attribute a " + "WHERE c.oid = a.attrelid AND " + " NOT a.attisdropped AND " + " a.atttypid IN ( " + " 'pg_catalog.regproc'::pg_catalog.regtype, " + " 'pg_catalog.regprocedure'::pg_catalog.regtype, " + " 'pg_catalog.regoper'::pg_catalog.regtype, " + " 'pg_catalog.regoperator'::pg_catalog.regtype, " + /* regclass.oid is preserved, so 'regclass' is OK */ + /* regtype.oid is preserved, so 'regtype' is OK */ + " 'pg_catalog.regconfig'::pg_catalog.regtype, " + " 'pg_catalog.regdictionary'::pg_catalog.regtype) AND " + " c.relnamespace = n.oid AND " + " n.nspname NOT IN ('pg_catalog', 'information_schema')"); + + ntups = PQntuples(res); + i_nspname = PQfnumber(res, "nspname"); + i_relname = PQfnumber(res, "relname"); + i_attname = PQfnumber(res, "attname"); + for (rowno = 0; rowno < ntups; rowno++) + { + found = true; + if (script == NULL && (script = fopen_priv(output_path, "w")) == NULL) + pg_fatal("Could not open file \"%s\": %s\n", + output_path, getErrorText(errno)); + if (!db_used) + { + fprintf(script, "Database: %s\n", active_db->db_name); + db_used = true; + } + fprintf(script, " %s.%s.%s\n", + PQgetvalue(res, rowno, i_nspname), + PQgetvalue(res, rowno, i_relname), + PQgetvalue(res, rowno, i_attname)); + } + + PQclear(res); + + PQfinish(conn); + } + + if (script) + fclose(script); + + if (found) + { + pg_log(PG_REPORT, "fatal\n"); + pg_fatal("Your installation contains one of the reg* data types in user tables.\n" + "These data types reference system OIDs that are not preserved by\n" + "pg_upgrade, so this cluster cannot currently be upgraded. You can\n" + "remove the problem tables and restart the upgrade. A list of the problem\n" + "columns is in the file:\n" + " %s\n\n", output_path); + } + else + check_ok(); +} + + +/* + * check_for_jsonb_9_4_usage() + * + * JSONB changed its storage format during 9.4 beta, so check for it. + */ +static void +check_for_jsonb_9_4_usage(ClusterInfo *cluster) +{ + int dbnum; + FILE *script = NULL; + bool found = false; + char output_path[MAXPGPATH]; + + prep_status("Checking for JSONB user data types"); + + snprintf(output_path, sizeof(output_path), "tables_using_jsonb.txt"); + + for (dbnum = 0; dbnum < cluster->dbarr.ndbs; dbnum++) + { + PGresult *res; + bool db_used = false; + int ntups; + int rowno; + int i_nspname, + i_relname, + i_attname; + DbInfo *active_db = &cluster->dbarr.dbs[dbnum]; + PGconn *conn = connectToServer(cluster, active_db->db_name); + + /* + * While several relkinds don't store any data, e.g. views, they can + * be used to define data types of other columns, so we check all + * relkinds. + */ + res = executeQueryOrDie(conn, + "SELECT n.nspname, c.relname, a.attname " + "FROM pg_catalog.pg_class c, " + " pg_catalog.pg_namespace n, " + " pg_catalog.pg_attribute a " + "WHERE c.oid = a.attrelid AND " + " NOT a.attisdropped AND " + " a.atttypid = 'pg_catalog.jsonb'::pg_catalog.regtype AND " + " c.relnamespace = n.oid AND " + /* exclude possible orphaned temp tables */ + " n.nspname !~ '^pg_temp_' AND " + " n.nspname NOT IN ('pg_catalog', 'information_schema')"); + + ntups = PQntuples(res); + i_nspname = PQfnumber(res, "nspname"); + i_relname = PQfnumber(res, "relname"); + i_attname = PQfnumber(res, "attname"); + for (rowno = 0; rowno < ntups; rowno++) + { + found = true; + if (script == NULL && (script = fopen_priv(output_path, "w")) == NULL) + pg_fatal("Could not open file \"%s\": %s\n", + output_path, getErrorText(errno)); + if (!db_used) + { + fprintf(script, "Database: %s\n", active_db->db_name); + db_used = true; + } + fprintf(script, " %s.%s.%s\n", + PQgetvalue(res, rowno, i_nspname), + PQgetvalue(res, rowno, i_relname), + PQgetvalue(res, rowno, i_attname)); + } + + PQclear(res); + + PQfinish(conn); + } + + if (script) + fclose(script); + + if (found) + { + pg_log(PG_REPORT, "fatal\n"); + pg_fatal("Your installation contains one of the JSONB data types in user tables.\n" + "The internal format of JSONB changed during 9.4 beta so this cluster cannot currently\n" + "be upgraded. You can remove the problem tables and restart the upgrade. A list\n" + "of the problem columns is in the file:\n" + " %s\n\n", output_path); + } + else + check_ok(); +} + + +static void +get_bin_version(ClusterInfo *cluster) +{ + char cmd[MAXPGPATH], + cmd_output[MAX_STRING]; + FILE *output; + int pre_dot, + post_dot; + + snprintf(cmd, sizeof(cmd), "\"%s/pg_ctl\" --version", cluster->bindir); + + if ((output = popen(cmd, "r")) == NULL || + fgets(cmd_output, sizeof(cmd_output), output) == NULL) + pg_fatal("Could not get pg_ctl version data using %s: %s\n", + cmd, getErrorText(errno)); + + pclose(output); + + /* Remove trailing newline */ + if (strchr(cmd_output, '\n') != NULL) + *strchr(cmd_output, '\n') = '\0'; + + if (sscanf(cmd_output, "%*s %*s %d.%d", &pre_dot, &post_dot) != 2) + pg_fatal("could not get version from %s\n", cmd); + + cluster->bin_version = (pre_dot * 100 + post_dot) * 100; +} + + +/* + * get_canonical_locale_name + * + * Send the locale name to the system, and hope we get back a canonical + * version. This should match the backend's check_locale() function. + */ +static char * +get_canonical_locale_name(int category, const char *locale) +{ + char *save; + char *res; + + /* get the current setting, so we can restore it. */ + save = setlocale(category, NULL); + if (!save) + pg_fatal("failed to get the current locale\n"); + + /* 'save' may be pointing at a modifiable scratch variable, so copy it. */ + save = pg_strdup(save); + + /* set the locale with setlocale, to see if it accepts it. */ + res = setlocale(category, locale); + + if (!res) + pg_fatal("failed to get system locale name for \"%s\"\n", locale); + + res = pg_strdup(res); + + /* restore old value. */ + if (!setlocale(category, save)) + pg_fatal("failed to restore old locale \"%s\"\n", save); + + pg_free(save); + + return res; +} diff --git a/src/bin/pg_upgrade/controldata.c b/src/bin/pg_upgrade/controldata.c new file mode 100644 index 00000000000..bf53db05515 --- /dev/null +++ b/src/bin/pg_upgrade/controldata.c @@ -0,0 +1,606 @@ +/* + * controldata.c + * + * controldata functions + * + * Copyright (c) 2010-2015, PostgreSQL Global Development Group + * src/bin/pg_upgrade/controldata.c + */ + +#include "postgres_fe.h" + +#include "pg_upgrade.h" + +#include + +/* + * get_control_data() + * + * gets pg_control information in "ctrl". Assumes that bindir and + * datadir are valid absolute paths to postgresql bin and pgdata + * directories respectively *and* pg_resetxlog is version compatible + * with datadir. The main purpose of this function is to get pg_control + * data in a version independent manner. + * + * The approach taken here is to invoke pg_resetxlog with -n option + * and then pipe its output. With little string parsing we get the + * pg_control data. pg_resetxlog cannot be run while the server is running + * so we use pg_controldata; pg_controldata doesn't provide all the fields + * we need to actually perform the upgrade, but it provides enough for + * check mode. We do not implement pg_resetxlog -n because it is hard to + * return valid xid data for a running server. + */ +void +get_control_data(ClusterInfo *cluster, bool live_check) +{ + char cmd[MAXPGPATH]; + char bufin[MAX_STRING]; + FILE *output; + char *p; + bool got_xid = false; + bool got_oid = false; + bool got_nextxlogfile = false; + bool got_multi = false; + bool got_mxoff = false; + bool got_oldestmulti = false; + bool got_log_id = false; + bool got_log_seg = false; + bool got_tli = false; + bool got_align = false; + bool got_blocksz = false; + bool got_largesz = false; + bool got_walsz = false; + bool got_walseg = false; + bool got_ident = false; + bool got_index = false; + bool got_toast = false; + bool got_large_object = false; + bool got_date_is_int = false; + bool got_float8_pass_by_value = false; + bool got_data_checksum_version = false; + char *lc_collate = NULL; + char *lc_ctype = NULL; + char *lc_monetary = NULL; + char *lc_numeric = NULL; + char *lc_time = NULL; + char *lang = NULL; + char *language = NULL; + char *lc_all = NULL; + char *lc_messages = NULL; + uint32 logid = 0; + uint32 segno = 0; + uint32 tli = 0; + + + /* + * Because we test the pg_resetxlog output as strings, it has to be in + * English. Copied from pg_regress.c. + */ + if (getenv("LC_COLLATE")) + lc_collate = pg_strdup(getenv("LC_COLLATE")); + if (getenv("LC_CTYPE")) + lc_ctype = pg_strdup(getenv("LC_CTYPE")); + if (getenv("LC_MONETARY")) + lc_monetary = pg_strdup(getenv("LC_MONETARY")); + if (getenv("LC_NUMERIC")) + lc_numeric = pg_strdup(getenv("LC_NUMERIC")); + if (getenv("LC_TIME")) + lc_time = pg_strdup(getenv("LC_TIME")); + if (getenv("LANG")) + lang = pg_strdup(getenv("LANG")); + if (getenv("LANGUAGE")) + language = pg_strdup(getenv("LANGUAGE")); + if (getenv("LC_ALL")) + lc_all = pg_strdup(getenv("LC_ALL")); + if (getenv("LC_MESSAGES")) + lc_messages = pg_strdup(getenv("LC_MESSAGES")); + + pg_putenv("LC_COLLATE", NULL); + pg_putenv("LC_CTYPE", NULL); + pg_putenv("LC_MONETARY", NULL); + pg_putenv("LC_NUMERIC", NULL); + pg_putenv("LC_TIME", NULL); + pg_putenv("LANG", +#ifndef WIN32 + NULL); +#else + /* On Windows the default locale cannot be English, so force it */ + "en"); +#endif + pg_putenv("LANGUAGE", NULL); + pg_putenv("LC_ALL", NULL); + pg_putenv("LC_MESSAGES", "C"); + + snprintf(cmd, sizeof(cmd), "\"%s/%s \"%s\"", + cluster->bindir, + live_check ? "pg_controldata\"" : "pg_resetxlog\" -n", + cluster->pgdata); + fflush(stdout); + fflush(stderr); + + if ((output = popen(cmd, "r")) == NULL) + pg_fatal("Could not get control data using %s: %s\n", + cmd, getErrorText(errno)); + + /* Only in <= 9.2 */ + if (GET_MAJOR_VERSION(cluster->major_version) <= 902) + { + cluster->controldata.data_checksum_version = 0; + got_data_checksum_version = true; + } + + /* we have the result of cmd in "output". so parse it line by line now */ + while (fgets(bufin, sizeof(bufin), output)) + { + pg_log(PG_VERBOSE, "%s", bufin); + + if ((p = strstr(bufin, "pg_control version number:")) != NULL) + { + p = strchr(p, ':'); + + if (p == NULL || strlen(p) <= 1) + pg_fatal("%d: pg_resetxlog problem\n", __LINE__); + + p++; /* remove ':' char */ + cluster->controldata.ctrl_ver = str2uint(p); + } + else if ((p = strstr(bufin, "Catalog version number:")) != NULL) + { + p = strchr(p, ':'); + + if (p == NULL || strlen(p) <= 1) + pg_fatal("%d: controldata retrieval problem\n", __LINE__); + + p++; /* remove ':' char */ + cluster->controldata.cat_ver = str2uint(p); + } + else if ((p = strstr(bufin, "First log segment after reset:")) != NULL) + { + /* Skip the colon and any whitespace after it */ + p = strchr(p, ':'); + if (p == NULL || strlen(p) <= 1) + pg_fatal("%d: controldata retrieval problem\n", __LINE__); + p = strpbrk(p, "01234567890ABCDEF"); + if (p == NULL || strlen(p) <= 1) + pg_fatal("%d: controldata retrieval problem\n", __LINE__); + + /* Make sure it looks like a valid WAL file name */ + if (strspn(p, "0123456789ABCDEF") != 24) + pg_fatal("%d: controldata retrieval problem\n", __LINE__); + + strlcpy(cluster->controldata.nextxlogfile, p, 25); + got_nextxlogfile = true; + } + else if ((p = strstr(bufin, "First log file ID after reset:")) != NULL) + { + p = strchr(p, ':'); + + if (p == NULL || strlen(p) <= 1) + pg_fatal("%d: controldata retrieval problem\n", __LINE__); + + p++; /* remove ':' char */ + logid = str2uint(p); + got_log_id = true; + } + else if ((p = strstr(bufin, "First log file segment after reset:")) != NULL) + { + p = strchr(p, ':'); + + if (p == NULL || strlen(p) <= 1) + pg_fatal("%d: controldata retrieval problem\n", __LINE__); + + p++; /* remove ':' char */ + segno = str2uint(p); + got_log_seg = true; + } + else if ((p = strstr(bufin, "Latest checkpoint's TimeLineID:")) != NULL) + { + p = strchr(p, ':'); + + if (p == NULL || strlen(p) <= 1) + pg_fatal("%d: controldata retrieval problem\n", __LINE__); + + p++; /* remove ':' char */ + cluster->controldata.chkpnt_tli = str2uint(p); + got_tli = true; + } + else if ((p = strstr(bufin, "Latest checkpoint's NextXID:")) != NULL) + { + p = strchr(p, ':'); + + if (p == NULL || strlen(p) <= 1) + pg_fatal("%d: controldata retrieval problem\n", __LINE__); + + p++; /* remove ':' char */ + cluster->controldata.chkpnt_nxtepoch = str2uint(p); + + p = strchr(p, '/'); + if (p == NULL || strlen(p) <= 1) + pg_fatal("%d: controldata retrieval problem\n", __LINE__); + + p++; /* remove '/' char */ + cluster->controldata.chkpnt_nxtxid = str2uint(p); + got_xid = true; + } + else if ((p = strstr(bufin, "Latest checkpoint's NextOID:")) != NULL) + { + p = strchr(p, ':'); + + if (p == NULL || strlen(p) <= 1) + pg_fatal("%d: controldata retrieval problem\n", __LINE__); + + p++; /* remove ':' char */ + cluster->controldata.chkpnt_nxtoid = str2uint(p); + got_oid = true; + } + else if ((p = strstr(bufin, "Latest checkpoint's NextMultiXactId:")) != NULL) + { + p = strchr(p, ':'); + + if (p == NULL || strlen(p) <= 1) + pg_fatal("%d: controldata retrieval problem\n", __LINE__); + + p++; /* remove ':' char */ + cluster->controldata.chkpnt_nxtmulti = str2uint(p); + got_multi = true; + } + else if ((p = strstr(bufin, "Latest checkpoint's oldestMultiXid:")) != NULL) + { + p = strchr(p, ':'); + + if (p == NULL || strlen(p) <= 1) + pg_fatal("%d: controldata retrieval problem\n", __LINE__); + + p++; /* remove ':' char */ + cluster->controldata.chkpnt_oldstMulti = str2uint(p); + got_oldestmulti = true; + } + else if ((p = strstr(bufin, "Latest checkpoint's NextMultiOffset:")) != NULL) + { + p = strchr(p, ':'); + + if (p == NULL || strlen(p) <= 1) + pg_fatal("%d: controldata retrieval problem\n", __LINE__); + + p++; /* remove ':' char */ + cluster->controldata.chkpnt_nxtmxoff = str2uint(p); + got_mxoff = true; + } + else if ((p = strstr(bufin, "Maximum data alignment:")) != NULL) + { + p = strchr(p, ':'); + + if (p == NULL || strlen(p) <= 1) + pg_fatal("%d: controldata retrieval problem\n", __LINE__); + + p++; /* remove ':' char */ + cluster->controldata.align = str2uint(p); + got_align = true; + } + else if ((p = strstr(bufin, "Database block size:")) != NULL) + { + p = strchr(p, ':'); + + if (p == NULL || strlen(p) <= 1) + pg_fatal("%d: controldata retrieval problem\n", __LINE__); + + p++; /* remove ':' char */ + cluster->controldata.blocksz = str2uint(p); + got_blocksz = true; + } + else if ((p = strstr(bufin, "Blocks per segment of large relation:")) != NULL) + { + p = strchr(p, ':'); + + if (p == NULL || strlen(p) <= 1) + pg_fatal("%d: controldata retrieval problem\n", __LINE__); + + p++; /* remove ':' char */ + cluster->controldata.largesz = str2uint(p); + got_largesz = true; + } + else if ((p = strstr(bufin, "WAL block size:")) != NULL) + { + p = strchr(p, ':'); + + if (p == NULL || strlen(p) <= 1) + pg_fatal("%d: controldata retrieval problem\n", __LINE__); + + p++; /* remove ':' char */ + cluster->controldata.walsz = str2uint(p); + got_walsz = true; + } + else if ((p = strstr(bufin, "Bytes per WAL segment:")) != NULL) + { + p = strchr(p, ':'); + + if (p == NULL || strlen(p) <= 1) + pg_fatal("%d: controldata retrieval problem\n", __LINE__); + + p++; /* remove ':' char */ + cluster->controldata.walseg = str2uint(p); + got_walseg = true; + } + else if ((p = strstr(bufin, "Maximum length of identifiers:")) != NULL) + { + p = strchr(p, ':'); + + if (p == NULL || strlen(p) <= 1) + pg_fatal("%d: controldata retrieval problem\n", __LINE__); + + p++; /* remove ':' char */ + cluster->controldata.ident = str2uint(p); + got_ident = true; + } + else if ((p = strstr(bufin, "Maximum columns in an index:")) != NULL) + { + p = strchr(p, ':'); + + if (p == NULL || strlen(p) <= 1) + pg_fatal("%d: controldata retrieval problem\n", __LINE__); + + p++; /* remove ':' char */ + cluster->controldata.index = str2uint(p); + got_index = true; + } + else if ((p = strstr(bufin, "Maximum size of a TOAST chunk:")) != NULL) + { + p = strchr(p, ':'); + + if (p == NULL || strlen(p) <= 1) + pg_fatal("%d: controldata retrieval problem\n", __LINE__); + + p++; /* remove ':' char */ + cluster->controldata.toast = str2uint(p); + got_toast = true; + } + else if ((p = strstr(bufin, "Size of a large-object chunk:")) != NULL) + { + p = strchr(p, ':'); + + if (p == NULL || strlen(p) <= 1) + pg_fatal("%d: controldata retrieval problem\n", __LINE__); + + p++; /* remove ':' char */ + cluster->controldata.large_object = str2uint(p); + got_large_object = true; + } + else if ((p = strstr(bufin, "Date/time type storage:")) != NULL) + { + p = strchr(p, ':'); + + if (p == NULL || strlen(p) <= 1) + pg_fatal("%d: controldata retrieval problem\n", __LINE__); + + p++; /* remove ':' char */ + cluster->controldata.date_is_int = strstr(p, "64-bit integers") != NULL; + got_date_is_int = true; + } + else if ((p = strstr(bufin, "Float8 argument passing:")) != NULL) + { + p = strchr(p, ':'); + + if (p == NULL || strlen(p) <= 1) + pg_fatal("%d: controldata retrieval problem\n", __LINE__); + + p++; /* remove ':' char */ + /* used later for contrib check */ + cluster->controldata.float8_pass_by_value = strstr(p, "by value") != NULL; + got_float8_pass_by_value = true; + } + else if ((p = strstr(bufin, "checksum")) != NULL) + { + p = strchr(p, ':'); + + if (p == NULL || strlen(p) <= 1) + pg_fatal("%d: controldata retrieval problem\n", __LINE__); + + p++; /* remove ':' char */ + /* used later for contrib check */ + cluster->controldata.data_checksum_version = str2uint(p); + got_data_checksum_version = true; + } + } + + if (output) + pclose(output); + + /* + * Restore environment variables + */ + pg_putenv("LC_COLLATE", lc_collate); + pg_putenv("LC_CTYPE", lc_ctype); + pg_putenv("LC_MONETARY", lc_monetary); + pg_putenv("LC_NUMERIC", lc_numeric); + pg_putenv("LC_TIME", lc_time); + pg_putenv("LANG", lang); + pg_putenv("LANGUAGE", language); + pg_putenv("LC_ALL", lc_all); + pg_putenv("LC_MESSAGES", lc_messages); + + pg_free(lc_collate); + pg_free(lc_ctype); + pg_free(lc_monetary); + pg_free(lc_numeric); + pg_free(lc_time); + pg_free(lang); + pg_free(language); + pg_free(lc_all); + pg_free(lc_messages); + + /* + * Before 9.3, pg_resetxlog reported the xlogid and segno of the first log + * file after reset as separate lines. Starting with 9.3, it reports the + * WAL file name. If the old cluster is older than 9.3, we construct the + * WAL file name from the xlogid and segno. + */ + if (GET_MAJOR_VERSION(cluster->major_version) <= 902) + { + if (got_log_id && got_log_seg) + { + snprintf(cluster->controldata.nextxlogfile, 25, "%08X%08X%08X", + tli, logid, segno); + got_nextxlogfile = true; + } + } + + /* verify that we got all the mandatory pg_control data */ + if (!got_xid || !got_oid || + !got_multi || !got_mxoff || + (!got_oldestmulti && + cluster->controldata.cat_ver >= MULTIXACT_FORMATCHANGE_CAT_VER) || + (!live_check && !got_nextxlogfile) || + !got_tli || + !got_align || !got_blocksz || !got_largesz || !got_walsz || + !got_walseg || !got_ident || !got_index || !got_toast || + (!got_large_object && + cluster->controldata.ctrl_ver >= LARGE_OBJECT_SIZE_PG_CONTROL_VER) || + !got_date_is_int || !got_float8_pass_by_value || !got_data_checksum_version) + { + pg_log(PG_REPORT, + "The %s cluster lacks some required control information:\n", + CLUSTER_NAME(cluster)); + + if (!got_xid) + pg_log(PG_REPORT, " checkpoint next XID\n"); + + if (!got_oid) + pg_log(PG_REPORT, " latest checkpoint next OID\n"); + + if (!got_multi) + pg_log(PG_REPORT, " latest checkpoint next MultiXactId\n"); + + if (!got_mxoff) + pg_log(PG_REPORT, " latest checkpoint next MultiXactOffset\n"); + + if (!got_oldestmulti && + cluster->controldata.cat_ver >= MULTIXACT_FORMATCHANGE_CAT_VER) + pg_log(PG_REPORT, " latest checkpoint oldest MultiXactId\n"); + + if (!live_check && !got_nextxlogfile) + pg_log(PG_REPORT, " first WAL segment after reset\n"); + + if (!got_tli) + pg_log(PG_REPORT, " latest checkpoint timeline ID\n"); + + if (!got_align) + pg_log(PG_REPORT, " maximum alignment\n"); + + if (!got_blocksz) + pg_log(PG_REPORT, " block size\n"); + + if (!got_largesz) + pg_log(PG_REPORT, " large relation segment size\n"); + + if (!got_walsz) + pg_log(PG_REPORT, " WAL block size\n"); + + if (!got_walseg) + pg_log(PG_REPORT, " WAL segment size\n"); + + if (!got_ident) + pg_log(PG_REPORT, " maximum identifier length\n"); + + if (!got_index) + pg_log(PG_REPORT, " maximum number of indexed columns\n"); + + if (!got_toast) + pg_log(PG_REPORT, " maximum TOAST chunk size\n"); + + if (!got_large_object && + cluster->controldata.ctrl_ver >= LARGE_OBJECT_SIZE_PG_CONTROL_VER) + pg_log(PG_REPORT, " large-object chunk size\n"); + + if (!got_date_is_int) + pg_log(PG_REPORT, " dates/times are integers?\n"); + + if (!got_float8_pass_by_value) + pg_log(PG_REPORT, " float8 argument passing method\n"); + + /* value added in Postgres 9.3 */ + if (!got_data_checksum_version) + pg_log(PG_REPORT, " data checksum version\n"); + + pg_fatal("Cannot continue without required control information, terminating\n"); + } +} + + +/* + * check_control_data() + * + * check to make sure the control data settings are compatible + */ +void +check_control_data(ControlData *oldctrl, + ControlData *newctrl) +{ + if (oldctrl->align == 0 || oldctrl->align != newctrl->align) + pg_fatal("old and new pg_controldata alignments are invalid or do not match\n" + "Likely one cluster is a 32-bit install, the other 64-bit\n"); + + if (oldctrl->blocksz == 0 || oldctrl->blocksz != newctrl->blocksz) + pg_fatal("old and new pg_controldata block sizes are invalid or do not match\n"); + + if (oldctrl->largesz == 0 || oldctrl->largesz != newctrl->largesz) + pg_fatal("old and new pg_controldata maximum relation segement sizes are invalid or do not match\n"); + + if (oldctrl->walsz == 0 || oldctrl->walsz != newctrl->walsz) + pg_fatal("old and new pg_controldata WAL block sizes are invalid or do not match\n"); + + if (oldctrl->walseg == 0 || oldctrl->walseg != newctrl->walseg) + pg_fatal("old and new pg_controldata WAL segment sizes are invalid or do not match\n"); + + if (oldctrl->ident == 0 || oldctrl->ident != newctrl->ident) + pg_fatal("old and new pg_controldata maximum identifier lengths are invalid or do not match\n"); + + if (oldctrl->index == 0 || oldctrl->index != newctrl->index) + pg_fatal("old and new pg_controldata maximum indexed columns are invalid or do not match\n"); + + if (oldctrl->toast == 0 || oldctrl->toast != newctrl->toast) + pg_fatal("old and new pg_controldata maximum TOAST chunk sizes are invalid or do not match\n"); + + /* large_object added in 9.5, so it might not exist in the old cluster */ + if (oldctrl->large_object != 0 && + oldctrl->large_object != newctrl->large_object) + pg_fatal("old and new pg_controldata large-object chunk sizes are invalid or do not match\n"); + + if (oldctrl->date_is_int != newctrl->date_is_int) + pg_fatal("old and new pg_controldata date/time storage types do not match\n"); + + /* + * We might eventually allow upgrades from checksum to no-checksum + * clusters. + */ + if (oldctrl->data_checksum_version == 0 && + newctrl->data_checksum_version != 0) + pg_fatal("old cluster does not use data checksums but the new one does\n"); + else if (oldctrl->data_checksum_version != 0 && + newctrl->data_checksum_version == 0) + pg_fatal("old cluster uses data checksums but the new one does not\n"); + else if (oldctrl->data_checksum_version != newctrl->data_checksum_version) + pg_fatal("old and new cluster pg_controldata checksum versions do not match\n"); +} + + +void +disable_old_cluster(void) +{ + char old_path[MAXPGPATH], + new_path[MAXPGPATH]; + + /* rename pg_control so old server cannot be accidentally started */ + prep_status("Adding \".old\" suffix to old global/pg_control"); + + snprintf(old_path, sizeof(old_path), "%s/global/pg_control", old_cluster.pgdata); + snprintf(new_path, sizeof(new_path), "%s/global/pg_control.old", old_cluster.pgdata); + if (pg_mv_file(old_path, new_path) != 0) + pg_fatal("Unable to rename %s to %s.\n", old_path, new_path); + check_ok(); + + pg_log(PG_REPORT, "\n" + "If you want to start the old cluster, you will need to remove\n" + "the \".old\" suffix from %s/global/pg_control.old.\n" + "Because \"link\" mode was used, the old cluster cannot be safely\n" + "started once the new cluster has been started.\n\n", old_cluster.pgdata); +} diff --git a/src/bin/pg_upgrade/dump.c b/src/bin/pg_upgrade/dump.c new file mode 100644 index 00000000000..2c20e847ac0 --- /dev/null +++ b/src/bin/pg_upgrade/dump.c @@ -0,0 +1,139 @@ +/* + * dump.c + * + * dump functions + * + * Copyright (c) 2010-2015, PostgreSQL Global Development Group + * src/bin/pg_upgrade/dump.c + */ + +#include "postgres_fe.h" + +#include "pg_upgrade.h" + +#include +#include "catalog/binary_upgrade.h" + + +void +generate_old_dump(void) +{ + int dbnum; + mode_t old_umask; + + prep_status("Creating dump of global objects"); + + /* run new pg_dumpall binary for globals */ + exec_prog(UTILITY_LOG_FILE, NULL, true, + "\"%s/pg_dumpall\" %s --globals-only --quote-all-identifiers " + "--binary-upgrade %s -f %s", + new_cluster.bindir, cluster_conn_opts(&old_cluster), + log_opts.verbose ? "--verbose" : "", + GLOBALS_DUMP_FILE); + check_ok(); + + prep_status("Creating dump of database schemas\n"); + + /* + * Set umask for this function, all functions it calls, and all + * subprocesses/threads it creates. We can't use fopen_priv() as Windows + * uses threads and umask is process-global. + */ + old_umask = umask(S_IRWXG | S_IRWXO); + + /* create per-db dump files */ + for (dbnum = 0; dbnum < old_cluster.dbarr.ndbs; dbnum++) + { + char sql_file_name[MAXPGPATH], + log_file_name[MAXPGPATH]; + DbInfo *old_db = &old_cluster.dbarr.dbs[dbnum]; + + pg_log(PG_STATUS, "%s", old_db->db_name); + snprintf(sql_file_name, sizeof(sql_file_name), DB_DUMP_FILE_MASK, old_db->db_oid); + snprintf(log_file_name, sizeof(log_file_name), DB_DUMP_LOG_FILE_MASK, old_db->db_oid); + + parallel_exec_prog(log_file_name, NULL, + "\"%s/pg_dump\" %s --schema-only --quote-all-identifiers " + "--binary-upgrade --format=custom %s --file=\"%s\" \"%s\"", + new_cluster.bindir, cluster_conn_opts(&old_cluster), + log_opts.verbose ? "--verbose" : "", + sql_file_name, old_db->db_name); + } + + /* reap all children */ + while (reap_child(true) == true) + ; + + umask(old_umask); + + end_progress_output(); + check_ok(); +} + + +/* + * It is possible for there to be a mismatch in the need for TOAST tables + * between the old and new servers, e.g. some pre-9.1 tables didn't need + * TOAST tables but will need them in 9.1+. (There are also opposite cases, + * but these are handled by setting binary_upgrade_next_toast_pg_class_oid.) + * + * We can't allow the TOAST table to be created by pg_dump with a + * pg_dump-assigned oid because it might conflict with a later table that + * uses that oid, causing a "file exists" error for pg_class conflicts, and + * a "duplicate oid" error for pg_type conflicts. (TOAST tables need pg_type + * entries.) + * + * Therefore, a backend in binary-upgrade mode will not create a TOAST + * table unless an OID as passed in via pg_upgrade_support functions. + * This function is called after the restore and uses ALTER TABLE to + * auto-create any needed TOAST tables which will not conflict with + * restored oids. + */ +void +optionally_create_toast_tables(void) +{ + int dbnum; + + prep_status("Creating newly-required TOAST tables"); + + for (dbnum = 0; dbnum < new_cluster.dbarr.ndbs; dbnum++) + { + PGresult *res; + int ntups; + int rowno; + int i_nspname, + i_relname; + DbInfo *active_db = &new_cluster.dbarr.dbs[dbnum]; + PGconn *conn = connectToServer(&new_cluster, active_db->db_name); + + res = executeQueryOrDie(conn, + "SELECT n.nspname, c.relname " + "FROM pg_catalog.pg_class c, " + " pg_catalog.pg_namespace n " + "WHERE c.relnamespace = n.oid AND " + " n.nspname NOT IN ('pg_catalog', 'information_schema') AND " + "c.relkind IN ('r', 'm') AND " + "c.reltoastrelid = 0"); + + ntups = PQntuples(res); + i_nspname = PQfnumber(res, "nspname"); + i_relname = PQfnumber(res, "relname"); + for (rowno = 0; rowno < ntups; rowno++) + { + /* enable auto-oid-numbered TOAST creation if needed */ + PQclear(executeQueryOrDie(conn, "SELECT pg_catalog.binary_upgrade_set_next_toast_pg_class_oid('%d'::pg_catalog.oid);", + OPTIONALLY_CREATE_TOAST_OID)); + + /* dummy command that also triggers check for required TOAST table */ + PQclear(executeQueryOrDie(conn, "ALTER TABLE %s.%s RESET (binary_upgrade_dummy_option);", + quote_identifier(PQgetvalue(res, rowno, i_nspname)), + quote_identifier(PQgetvalue(res, rowno, i_relname)))); + } + + PQclear(res); + + PQfinish(conn); + } + + check_ok(); +} diff --git a/src/bin/pg_upgrade/exec.c b/src/bin/pg_upgrade/exec.c new file mode 100644 index 00000000000..7d319126ed9 --- /dev/null +++ b/src/bin/pg_upgrade/exec.c @@ -0,0 +1,379 @@ +/* + * exec.c + * + * execution functions + * + * Copyright (c) 2010-2015, PostgreSQL Global Development Group + * src/bin/pg_upgrade/exec.c + */ + +#include "postgres_fe.h" + +#include "pg_upgrade.h" + +#include +#include + +static void check_data_dir(const char *pg_data); +static void check_bin_dir(ClusterInfo *cluster); +static void validate_exec(const char *dir, const char *cmdName); + +#ifdef WIN32 +static int win32_check_directory_write_permissions(void); +#endif + + +/* + * exec_prog() + * Execute an external program with stdout/stderr redirected, and report + * errors + * + * Formats a command from the given argument list, logs it to the log file, + * and attempts to execute that command. If the command executes + * successfully, exec_prog() returns true. + * + * If the command fails, an error message is saved to the specified log_file. + * If throw_error is true, this raises a PG_FATAL error and pg_upgrade + * terminates; otherwise it is just reported as PG_REPORT and exec_prog() + * returns false. + * + * The code requires it be called first from the primary thread on Windows. + */ +bool +exec_prog(const char *log_file, const char *opt_log_file, + bool throw_error, const char *fmt,...) +{ + int result = 0; + int written; + +#define MAXCMDLEN (2 * MAXPGPATH) + char cmd[MAXCMDLEN]; + FILE *log; + va_list ap; + +#ifdef WIN32 + static DWORD mainThreadId = 0; + + /* We assume we are called from the primary thread first */ + if (mainThreadId == 0) + mainThreadId = GetCurrentThreadId(); +#endif + + written = 0; + va_start(ap, fmt); + written += vsnprintf(cmd + written, MAXCMDLEN - written, fmt, ap); + va_end(ap); + if (written >= MAXCMDLEN) + pg_fatal("command too long\n"); + written += snprintf(cmd + written, MAXCMDLEN - written, + " >> \"%s\" 2>&1", log_file); + if (written >= MAXCMDLEN) + pg_fatal("command too long\n"); + + pg_log(PG_VERBOSE, "%s\n", cmd); + +#ifdef WIN32 + + /* + * For some reason, Windows issues a file-in-use error if we write data to + * the log file from a non-primary thread just before we create a + * subprocess that also writes to the same log file. One fix is to sleep + * for 100ms. A cleaner fix is to write to the log file _after_ the + * subprocess has completed, so we do this only when writing from a + * non-primary thread. fflush(), running system() twice, and pre-creating + * the file do not see to help. + */ + if (mainThreadId != GetCurrentThreadId()) + result = system(cmd); +#endif + + log = fopen(log_file, "a"); + +#ifdef WIN32 + { + /* + * "pg_ctl -w stop" might have reported that the server has stopped + * because the postmaster.pid file has been removed, but "pg_ctl -w + * start" might still be in the process of closing and might still be + * holding its stdout and -l log file descriptors open. Therefore, + * try to open the log file a few more times. + */ + int iter; + + for (iter = 0; iter < 4 && log == NULL; iter++) + { + pg_usleep(1000000); /* 1 sec */ + log = fopen(log_file, "a"); + } + } +#endif + + if (log == NULL) + pg_fatal("cannot write to log file %s\n", log_file); + +#ifdef WIN32 + /* Are we printing "command:" before its output? */ + if (mainThreadId == GetCurrentThreadId()) + fprintf(log, "\n\n"); +#endif + fprintf(log, "command: %s\n", cmd); +#ifdef WIN32 + /* Are we printing "command:" after its output? */ + if (mainThreadId != GetCurrentThreadId()) + fprintf(log, "\n\n"); +#endif + + /* + * In Windows, we must close the log file at this point so the file is not + * open while the command is running, or we get a share violation. + */ + fclose(log); + +#ifdef WIN32 + /* see comment above */ + if (mainThreadId == GetCurrentThreadId()) +#endif + result = system(cmd); + + if (result != 0) + { + /* we might be in on a progress status line, so go to the next line */ + report_status(PG_REPORT, "\n*failure*"); + fflush(stdout); + + pg_log(PG_VERBOSE, "There were problems executing \"%s\"\n", cmd); + if (opt_log_file) + pg_log(throw_error ? PG_FATAL : PG_REPORT, + "Consult the last few lines of \"%s\" or \"%s\" for\n" + "the probable cause of the failure.\n", + log_file, opt_log_file); + else + pg_log(throw_error ? PG_FATAL : PG_REPORT, + "Consult the last few lines of \"%s\" for\n" + "the probable cause of the failure.\n", + log_file); + } + +#ifndef WIN32 + + /* + * We can't do this on Windows because it will keep the "pg_ctl start" + * output filename open until the server stops, so we do the \n\n above on + * that platform. We use a unique filename for "pg_ctl start" that is + * never reused while the server is running, so it works fine. We could + * log these commands to a third file, but that just adds complexity. + */ + if ((log = fopen(log_file, "a")) == NULL) + pg_fatal("cannot write to log file %s\n", log_file); + fprintf(log, "\n\n"); + fclose(log); +#endif + + return result == 0; +} + + +/* + * pid_lock_file_exists() + * + * Checks whether the postmaster.pid file exists. + */ +bool +pid_lock_file_exists(const char *datadir) +{ + char path[MAXPGPATH]; + int fd; + + snprintf(path, sizeof(path), "%s/postmaster.pid", datadir); + + if ((fd = open(path, O_RDONLY, 0)) < 0) + { + /* ENOTDIR means we will throw a more useful error later */ + if (errno != ENOENT && errno != ENOTDIR) + pg_fatal("could not open file \"%s\" for reading: %s\n", + path, getErrorText(errno)); + + return false; + } + + close(fd); + return true; +} + + +/* + * verify_directories() + * + * does all the hectic work of verifying directories and executables + * of old and new server. + * + * NOTE: May update the values of all parameters + */ +void +verify_directories(void) +{ +#ifndef WIN32 + if (access(".", R_OK | W_OK | X_OK) != 0) +#else + if (win32_check_directory_write_permissions() != 0) +#endif + pg_fatal("You must have read and write access in the current directory.\n"); + + check_bin_dir(&old_cluster); + check_data_dir(old_cluster.pgdata); + check_bin_dir(&new_cluster); + check_data_dir(new_cluster.pgdata); +} + + +#ifdef WIN32 +/* + * win32_check_directory_write_permissions() + * + * access() on WIN32 can't check directory permissions, so we have to + * optionally create, then delete a file to check. + * http://msdn.microsoft.com/en-us/library/1w06ktdy%28v=vs.80%29.aspx + */ +static int +win32_check_directory_write_permissions(void) +{ + int fd; + + /* + * We open a file we would normally create anyway. We do this even in + * 'check' mode, which isn't ideal, but this is the best we can do. + */ + if ((fd = open(GLOBALS_DUMP_FILE, O_RDWR | O_CREAT, S_IRUSR | S_IWUSR)) < 0) + return -1; + close(fd); + + return unlink(GLOBALS_DUMP_FILE); +} +#endif + + +/* + * check_data_dir() + * + * This function validates the given cluster directory - we search for a + * small set of subdirectories that we expect to find in a valid $PGDATA + * directory. If any of the subdirectories are missing (or secured against + * us) we display an error message and exit() + * + */ +static void +check_data_dir(const char *pg_data) +{ + char subDirName[MAXPGPATH]; + int subdirnum; + + /* start check with top-most directory */ + const char *requiredSubdirs[] = {"", "base", "global", "pg_clog", + "pg_multixact", "pg_subtrans", "pg_tblspc", "pg_twophase", + "pg_xlog"}; + + for (subdirnum = 0; + subdirnum < sizeof(requiredSubdirs) / sizeof(requiredSubdirs[0]); + ++subdirnum) + { + struct stat statBuf; + + snprintf(subDirName, sizeof(subDirName), "%s%s%s", pg_data, + /* Win32 can't stat() a directory with a trailing slash. */ + *requiredSubdirs[subdirnum] ? "/" : "", + requiredSubdirs[subdirnum]); + + if (stat(subDirName, &statBuf) != 0) + report_status(PG_FATAL, "check for \"%s\" failed: %s\n", + subDirName, getErrorText(errno)); + else if (!S_ISDIR(statBuf.st_mode)) + report_status(PG_FATAL, "%s is not a directory\n", + subDirName); + } +} + + +/* + * check_bin_dir() + * + * This function searches for the executables that we expect to find + * in the binaries directory. If we find that a required executable + * is missing (or secured against us), we display an error message and + * exit(). + */ +static void +check_bin_dir(ClusterInfo *cluster) +{ + struct stat statBuf; + + /* check bindir */ + if (stat(cluster->bindir, &statBuf) != 0) + report_status(PG_FATAL, "check for \"%s\" failed: %s\n", + cluster->bindir, getErrorText(errno)); + else if (!S_ISDIR(statBuf.st_mode)) + report_status(PG_FATAL, "%s is not a directory\n", + cluster->bindir); + + validate_exec(cluster->bindir, "postgres"); + validate_exec(cluster->bindir, "pg_ctl"); + validate_exec(cluster->bindir, "pg_resetxlog"); + if (cluster == &new_cluster) + { + /* these are only needed in the new cluster */ + validate_exec(cluster->bindir, "psql"); + validate_exec(cluster->bindir, "pg_dump"); + validate_exec(cluster->bindir, "pg_dumpall"); + } +} + + +/* + * validate_exec() + * + * validate "path" as an executable file + */ +static void +validate_exec(const char *dir, const char *cmdName) +{ + char path[MAXPGPATH]; + struct stat buf; + + snprintf(path, sizeof(path), "%s/%s", dir, cmdName); + +#ifdef WIN32 + /* Windows requires a .exe suffix for stat() */ + if (strlen(path) <= strlen(EXE_EXT) || + pg_strcasecmp(path + strlen(path) - strlen(EXE_EXT), EXE_EXT) != 0) + strlcat(path, EXE_EXT, sizeof(path)); +#endif + + /* + * Ensure that the file exists and is a regular file. + */ + if (stat(path, &buf) < 0) + pg_fatal("check for \"%s\" failed: %s\n", + path, getErrorText(errno)); + else if (!S_ISREG(buf.st_mode)) + pg_fatal("check for \"%s\" failed: not an executable file\n", + path); + + /* + * Ensure that the file is both executable and readable (required for + * dynamic loading). + */ +#ifndef WIN32 + if (access(path, R_OK) != 0) +#else + if ((buf.st_mode & S_IRUSR) == 0) +#endif + pg_fatal("check for \"%s\" failed: cannot read file (permission denied)\n", + path); + +#ifndef WIN32 + if (access(path, X_OK) != 0) +#else + if ((buf.st_mode & S_IXUSR) == 0) +#endif + pg_fatal("check for \"%s\" failed: cannot execute (permission denied)\n", + path); +} diff --git a/src/bin/pg_upgrade/file.c b/src/bin/pg_upgrade/file.c new file mode 100644 index 00000000000..79d9390216e --- /dev/null +++ b/src/bin/pg_upgrade/file.c @@ -0,0 +1,250 @@ +/* + * file.c + * + * file system operations + * + * Copyright (c) 2010-2015, PostgreSQL Global Development Group + * src/bin/pg_upgrade/file.c + */ + +#include "postgres_fe.h" + +#include "pg_upgrade.h" + +#include + + + +#ifndef WIN32 +static int copy_file(const char *fromfile, const char *tofile, bool force); +#else +static int win32_pghardlink(const char *src, const char *dst); +#endif + + +/* + * copyAndUpdateFile() + * + * Copies a relation file from src to dst. If pageConverter is non-NULL, this function + * uses that pageConverter to do a page-by-page conversion. + */ +const char * +copyAndUpdateFile(pageCnvCtx *pageConverter, + const char *src, const char *dst, bool force) +{ + if (pageConverter == NULL) + { + if (pg_copy_file(src, dst, force) == -1) + return getErrorText(errno); + else + return NULL; + } + else + { + /* + * We have a pageConverter object - that implies that the + * PageLayoutVersion differs between the two clusters so we have to + * perform a page-by-page conversion. + * + * If the pageConverter can convert the entire file at once, invoke + * that plugin function, otherwise, read each page in the relation + * file and call the convertPage plugin function. + */ + +#ifdef PAGE_CONVERSION + if (pageConverter->convertFile) + return pageConverter->convertFile(pageConverter->pluginData, + dst, src); + else +#endif + { + int src_fd; + int dstfd; + char buf[BLCKSZ]; + ssize_t bytesRead; + const char *msg = NULL; + + if ((src_fd = open(src, O_RDONLY, 0)) < 0) + return "could not open source file"; + + if ((dstfd = open(dst, O_RDWR | O_CREAT | O_EXCL, S_IRUSR | S_IWUSR)) < 0) + { + close(src_fd); + return "could not create destination file"; + } + + while ((bytesRead = read(src_fd, buf, BLCKSZ)) == BLCKSZ) + { +#ifdef PAGE_CONVERSION + if ((msg = pageConverter->convertPage(pageConverter->pluginData, buf, buf)) != NULL) + break; +#endif + if (write(dstfd, buf, BLCKSZ) != BLCKSZ) + { + msg = "could not write new page to destination"; + break; + } + } + + close(src_fd); + close(dstfd); + + if (msg) + return msg; + else if (bytesRead != 0) + return "found partial page in source file"; + else + return NULL; + } + } +} + + +/* + * linkAndUpdateFile() + * + * Creates a hard link between the given relation files. We use + * this function to perform a true in-place update. If the on-disk + * format of the new cluster is bit-for-bit compatible with the on-disk + * format of the old cluster, we can simply link each relation + * instead of copying the data from the old cluster to the new cluster. + */ +const char * +linkAndUpdateFile(pageCnvCtx *pageConverter, + const char *src, const char *dst) +{ + if (pageConverter != NULL) + return "Cannot in-place update this cluster, page-by-page conversion is required"; + + if (pg_link_file(src, dst) == -1) + return getErrorText(errno); + else + return NULL; +} + + +#ifndef WIN32 +static int +copy_file(const char *srcfile, const char *dstfile, bool force) +{ +#define COPY_BUF_SIZE (50 * BLCKSZ) + + int src_fd; + int dest_fd; + char *buffer; + int ret = 0; + int save_errno = 0; + + if ((srcfile == NULL) || (dstfile == NULL)) + { + errno = EINVAL; + return -1; + } + + if ((src_fd = open(srcfile, O_RDONLY, 0)) < 0) + return -1; + + if ((dest_fd = open(dstfile, O_RDWR | O_CREAT | (force ? 0 : O_EXCL), S_IRUSR | S_IWUSR)) < 0) + { + save_errno = errno; + + if (src_fd != 0) + close(src_fd); + + errno = save_errno; + return -1; + } + + buffer = (char *) pg_malloc(COPY_BUF_SIZE); + + /* perform data copying i.e read src source, write to destination */ + while (true) + { + ssize_t nbytes = read(src_fd, buffer, COPY_BUF_SIZE); + + if (nbytes < 0) + { + save_errno = errno; + ret = -1; + break; + } + + if (nbytes == 0) + break; + + errno = 0; + + if (write(dest_fd, buffer, nbytes) != nbytes) + { + /* if write didn't set errno, assume problem is no disk space */ + if (errno == 0) + errno = ENOSPC; + save_errno = errno; + ret = -1; + break; + } + } + + pg_free(buffer); + + if (src_fd != 0) + close(src_fd); + + if (dest_fd != 0) + close(dest_fd); + + if (save_errno != 0) + errno = save_errno; + + return ret; +} +#endif + + +void +check_hard_link(void) +{ + char existing_file[MAXPGPATH]; + char new_link_file[MAXPGPATH]; + + snprintf(existing_file, sizeof(existing_file), "%s/PG_VERSION", old_cluster.pgdata); + snprintf(new_link_file, sizeof(new_link_file), "%s/PG_VERSION.linktest", new_cluster.pgdata); + unlink(new_link_file); /* might fail */ + + if (pg_link_file(existing_file, new_link_file) == -1) + { + pg_fatal("Could not create hard link between old and new data directories: %s\n" + "In link mode the old and new data directories must be on the same file system volume.\n", + getErrorText(errno)); + } + unlink(new_link_file); +} + +#ifdef WIN32 +static int +win32_pghardlink(const char *src, const char *dst) +{ + /* + * CreateHardLinkA returns zero for failure + * http://msdn.microsoft.com/en-us/library/aa363860(VS.85).aspx + */ + if (CreateHardLinkA(dst, src, NULL) == 0) + return -1; + else + return 0; +} +#endif + + +/* fopen() file with no group/other permissions */ +FILE * +fopen_priv(const char *path, const char *mode) +{ + mode_t old_umask = umask(S_IRWXG | S_IRWXO); + FILE *fp; + + fp = fopen(path, mode); + umask(old_umask); + + return fp; +} diff --git a/src/bin/pg_upgrade/function.c b/src/bin/pg_upgrade/function.c new file mode 100644 index 00000000000..04492a5cee4 --- /dev/null +++ b/src/bin/pg_upgrade/function.c @@ -0,0 +1,240 @@ +/* + * function.c + * + * server-side function support + * + * Copyright (c) 2010-2015, PostgreSQL Global Development Group + * src/bin/pg_upgrade/function.c + */ + +#include "postgres_fe.h" + +#include "pg_upgrade.h" + +#include "access/transam.h" + + +/* + * get_loadable_libraries() + * + * Fetch the names of all old libraries containing C-language functions. + * We will later check that they all exist in the new installation. + */ +void +get_loadable_libraries(void) +{ + PGresult **ress; + int totaltups; + int dbnum; + bool found_public_plpython_handler = false; + + ress = (PGresult **) pg_malloc(old_cluster.dbarr.ndbs * sizeof(PGresult *)); + totaltups = 0; + + /* Fetch all library names, removing duplicates within each DB */ + for (dbnum = 0; dbnum < old_cluster.dbarr.ndbs; dbnum++) + { + DbInfo *active_db = &old_cluster.dbarr.dbs[dbnum]; + PGconn *conn = connectToServer(&old_cluster, active_db->db_name); + + /* + * Fetch all libraries referenced in this DB. We can't exclude the + * "pg_catalog" schema because, while such functions are not + * explicitly dumped by pg_dump, they do reference implicit objects + * that pg_dump does dump, e.g. CREATE LANGUAGE plperl. + */ + ress[dbnum] = executeQueryOrDie(conn, + "SELECT DISTINCT probin " + "FROM pg_catalog.pg_proc " + "WHERE prolang = 13 /* C */ AND " + "probin IS NOT NULL AND " + "oid >= %u;", + FirstNormalObjectId); + totaltups += PQntuples(ress[dbnum]); + + /* + * Systems that install plpython before 8.1 have + * plpython_call_handler() defined in the "public" schema, causing + * pg_dump to dump it. However that function still references + * "plpython" (no "2"), so it throws an error on restore. This code + * checks for the problem function, reports affected databases to the + * user and explains how to remove them. 8.1 git commit: + * e0dedd0559f005d60c69c9772163e69c204bac69 + * http://archives.postgresql.org/pgsql-hackers/2012-03/msg01101.php + * http://archives.postgresql.org/pgsql-bugs/2012-05/msg00206.php + */ + if (GET_MAJOR_VERSION(old_cluster.major_version) < 901) + { + PGresult *res; + + res = executeQueryOrDie(conn, + "SELECT 1 " + "FROM pg_catalog.pg_proc JOIN pg_namespace " + " ON pronamespace = pg_namespace.oid " + "WHERE proname = 'plpython_call_handler' AND " + "nspname = 'public' AND " + "prolang = 13 /* C */ AND " + "probin = '$libdir/plpython' AND " + "pg_proc.oid >= %u;", + FirstNormalObjectId); + if (PQntuples(res) > 0) + { + if (!found_public_plpython_handler) + { + pg_log(PG_WARNING, + "\nThe old cluster has a \"plpython_call_handler\" function defined\n" + "in the \"public\" schema which is a duplicate of the one defined\n" + "in the \"pg_catalog\" schema. You can confirm this by executing\n" + "in psql:\n" + "\n" + " \\df *.plpython_call_handler\n" + "\n" + "The \"public\" schema version of this function was created by a\n" + "pre-8.1 install of plpython, and must be removed for pg_upgrade\n" + "to complete because it references a now-obsolete \"plpython\"\n" + "shared object file. You can remove the \"public\" schema version\n" + "of this function by running the following command:\n" + "\n" + " DROP FUNCTION public.plpython_call_handler()\n" + "\n" + "in each affected database:\n" + "\n"); + } + pg_log(PG_WARNING, " %s\n", active_db->db_name); + found_public_plpython_handler = true; + } + PQclear(res); + } + + PQfinish(conn); + } + + if (found_public_plpython_handler) + pg_fatal("Remove the problem functions from the old cluster to continue.\n"); + + /* Allocate what's certainly enough space */ + os_info.libraries = (char **) pg_malloc(totaltups * sizeof(char *)); + + /* + * Now remove duplicates across DBs. This is pretty inefficient code, but + * there probably aren't enough entries to matter. + */ + totaltups = 0; + + for (dbnum = 0; dbnum < old_cluster.dbarr.ndbs; dbnum++) + { + PGresult *res = ress[dbnum]; + int ntups; + int rowno; + + ntups = PQntuples(res); + for (rowno = 0; rowno < ntups; rowno++) + { + char *lib = PQgetvalue(res, rowno, 0); + bool dup = false; + int n; + + for (n = 0; n < totaltups; n++) + { + if (strcmp(lib, os_info.libraries[n]) == 0) + { + dup = true; + break; + } + } + if (!dup) + os_info.libraries[totaltups++] = pg_strdup(lib); + } + + PQclear(res); + } + + os_info.num_libraries = totaltups; + + pg_free(ress); +} + + +/* + * check_loadable_libraries() + * + * Check that the new cluster contains all required libraries. + * We do this by actually trying to LOAD each one, thereby testing + * compatibility as well as presence. + */ +void +check_loadable_libraries(void) +{ + PGconn *conn = connectToServer(&new_cluster, "template1"); + int libnum; + FILE *script = NULL; + bool found = false; + char output_path[MAXPGPATH]; + + prep_status("Checking for presence of required libraries"); + + snprintf(output_path, sizeof(output_path), "loadable_libraries.txt"); + + for (libnum = 0; libnum < os_info.num_libraries; libnum++) + { + char *lib = os_info.libraries[libnum]; + int llen = strlen(lib); + char cmd[7 + 2 * MAXPGPATH + 1]; + PGresult *res; + + /* + * In Postgres 9.0, Python 3 support was added, and to do that, a + * plpython2u language was created with library name plpython2.so as a + * symbolic link to plpython.so. In Postgres 9.1, only the + * plpython2.so library was created, and both plpythonu and plpython2u + * pointing to it. For this reason, any reference to library name + * "plpython" in an old PG <= 9.1 cluster must look for "plpython2" in + * the new cluster. + * + * For this case, we could check pg_pltemplate, but that only works + * for languages, and does not help with function shared objects, so + * we just do a general fix. + */ + if (GET_MAJOR_VERSION(old_cluster.major_version) < 901 && + strcmp(lib, "$libdir/plpython") == 0) + { + lib = "$libdir/plpython2"; + llen = strlen(lib); + } + + strcpy(cmd, "LOAD '"); + PQescapeStringConn(conn, cmd + strlen(cmd), lib, llen, NULL); + strcat(cmd, "'"); + + res = PQexec(conn, cmd); + + if (PQresultStatus(res) != PGRES_COMMAND_OK) + { + found = true; + + if (script == NULL && (script = fopen_priv(output_path, "w")) == NULL) + pg_fatal("Could not open file \"%s\": %s\n", + output_path, getErrorText(errno)); + fprintf(script, "Could not load library \"%s\"\n%s\n", + lib, + PQerrorMessage(conn)); + } + + PQclear(res); + } + + PQfinish(conn); + + if (found) + { + fclose(script); + pg_log(PG_REPORT, "fatal\n"); + pg_fatal("Your installation references loadable libraries that are missing from the\n" + "new installation. You can add these libraries to the new installation,\n" + "or remove the functions using them from the old installation. A list of\n" + "problem libraries is in the file:\n" + " %s\n\n", output_path); + } + else + check_ok(); +} diff --git a/src/bin/pg_upgrade/info.c b/src/bin/pg_upgrade/info.c new file mode 100644 index 00000000000..c0a56012090 --- /dev/null +++ b/src/bin/pg_upgrade/info.c @@ -0,0 +1,535 @@ +/* + * info.c + * + * information support functions + * + * Copyright (c) 2010-2015, PostgreSQL Global Development Group + * src/bin/pg_upgrade/info.c + */ + +#include "postgres_fe.h" + +#include "pg_upgrade.h" + +#include "access/transam.h" + + +static void create_rel_filename_map(const char *old_data, const char *new_data, + const DbInfo *old_db, const DbInfo *new_db, + const RelInfo *old_rel, const RelInfo *new_rel, + FileNameMap *map); +static void free_db_and_rel_infos(DbInfoArr *db_arr); +static void get_db_infos(ClusterInfo *cluster); +static void get_rel_infos(ClusterInfo *cluster, DbInfo *dbinfo); +static void free_rel_infos(RelInfoArr *rel_arr); +static void print_db_infos(DbInfoArr *dbinfo); +static void print_rel_infos(RelInfoArr *rel_arr); + + +/* + * gen_db_file_maps() + * + * generates database mappings for "old_db" and "new_db". Returns a malloc'ed + * array of mappings. nmaps is a return parameter which refers to the number + * mappings. + */ +FileNameMap * +gen_db_file_maps(DbInfo *old_db, DbInfo *new_db, + int *nmaps, const char *old_pgdata, const char *new_pgdata) +{ + FileNameMap *maps; + int old_relnum, new_relnum; + int num_maps = 0; + + maps = (FileNameMap *) pg_malloc(sizeof(FileNameMap) * + old_db->rel_arr.nrels); + + /* + * The old database shouldn't have more relations than the new one. + * We force the new cluster to have a TOAST table if the old table + * had one. + */ + if (old_db->rel_arr.nrels > new_db->rel_arr.nrels) + pg_fatal("old and new databases \"%s\" have a mismatched number of relations\n", + old_db->db_name); + + /* Drive the loop using new_relnum, which might be higher. */ + for (old_relnum = new_relnum = 0; new_relnum < new_db->rel_arr.nrels; + new_relnum++) + { + RelInfo *old_rel; + RelInfo *new_rel = &new_db->rel_arr.rels[new_relnum]; + + /* + * It is possible that the new cluster has a TOAST table for a table + * that didn't need one in the old cluster, e.g. 9.0 to 9.1 changed the + * NUMERIC length computation. Therefore, if we have a TOAST table + * in the new cluster that doesn't match, skip over it and continue + * processing. It is possible this TOAST table used an OID that was + * reserved in the old cluster, but we have no way of testing that, + * and we would have already gotten an error at the new cluster schema + * creation stage. Fortunately, since we only restore the OID counter + * after schema restore, and restore in OID order via pg_dump, a + * conflict would only happen if the new TOAST table had a very low + * OID. However, TOAST tables created long after initial table + * creation can have any OID, particularly after OID wraparound. + */ + if (old_relnum == old_db->rel_arr.nrels) + { + if (strcmp(new_rel->nspname, "pg_toast") == 0) + continue; + else + pg_fatal("Extra non-TOAST relation found in database \"%s\": new OID %d\n", + old_db->db_name, new_rel->reloid); + } + + old_rel = &old_db->rel_arr.rels[old_relnum]; + + if (old_rel->reloid != new_rel->reloid) + { + if (strcmp(new_rel->nspname, "pg_toast") == 0) + continue; + else + pg_fatal("Mismatch of relation OID in database \"%s\": old OID %d, new OID %d\n", + old_db->db_name, old_rel->reloid, new_rel->reloid); + } + + /* + * TOAST table names initially match the heap pg_class oid. In + * pre-8.4, TOAST table names change during CLUSTER; in pre-9.0, TOAST + * table names change during ALTER TABLE ALTER COLUMN SET TYPE. In >= + * 9.0, TOAST relation names always use heap table oids, hence we + * cannot check relation names when upgrading from pre-9.0. Clusters + * upgraded to 9.0 will get matching TOAST names. If index names don't + * match primary key constraint names, this will fail because pg_dump + * dumps constraint names and pg_upgrade checks index names. + */ + if (strcmp(old_rel->nspname, new_rel->nspname) != 0 || + ((GET_MAJOR_VERSION(old_cluster.major_version) >= 900 || + strcmp(old_rel->nspname, "pg_toast") != 0) && + strcmp(old_rel->relname, new_rel->relname) != 0)) + pg_fatal("Mismatch of relation names in database \"%s\": " + "old name \"%s.%s\", new name \"%s.%s\"\n", + old_db->db_name, old_rel->nspname, old_rel->relname, + new_rel->nspname, new_rel->relname); + + create_rel_filename_map(old_pgdata, new_pgdata, old_db, new_db, + old_rel, new_rel, maps + num_maps); + num_maps++; + old_relnum++; + } + + /* Did we fail to exhaust the old array? */ + if (old_relnum != old_db->rel_arr.nrels) + pg_fatal("old and new databases \"%s\" have a mismatched number of relations\n", + old_db->db_name); + + *nmaps = num_maps; + return maps; +} + + +/* + * create_rel_filename_map() + * + * fills a file node map structure and returns it in "map". + */ +static void +create_rel_filename_map(const char *old_data, const char *new_data, + const DbInfo *old_db, const DbInfo *new_db, + const RelInfo *old_rel, const RelInfo *new_rel, + FileNameMap *map) +{ + if (strlen(old_rel->tablespace) == 0) + { + /* + * relation belongs to the default tablespace, hence relfiles should + * exist in the data directories. + */ + map->old_tablespace = old_data; + map->new_tablespace = new_data; + map->old_tablespace_suffix = "/base"; + map->new_tablespace_suffix = "/base"; + } + else + { + /* relation belongs to a tablespace, so use the tablespace location */ + map->old_tablespace = old_rel->tablespace; + map->new_tablespace = new_rel->tablespace; + map->old_tablespace_suffix = old_cluster.tablespace_suffix; + map->new_tablespace_suffix = new_cluster.tablespace_suffix; + } + + map->old_db_oid = old_db->db_oid; + map->new_db_oid = new_db->db_oid; + + /* + * old_relfilenode might differ from pg_class.oid (and hence + * new_relfilenode) because of CLUSTER, REINDEX, or VACUUM FULL. + */ + map->old_relfilenode = old_rel->relfilenode; + + /* new_relfilenode will match old and new pg_class.oid */ + map->new_relfilenode = new_rel->relfilenode; + + /* used only for logging and error reporing, old/new are identical */ + map->nspname = old_rel->nspname; + map->relname = old_rel->relname; +} + + +void +print_maps(FileNameMap *maps, int n_maps, const char *db_name) +{ + if (log_opts.verbose) + { + int mapnum; + + pg_log(PG_VERBOSE, "mappings for database \"%s\":\n", db_name); + + for (mapnum = 0; mapnum < n_maps; mapnum++) + pg_log(PG_VERBOSE, "%s.%s: %u to %u\n", + maps[mapnum].nspname, maps[mapnum].relname, + maps[mapnum].old_relfilenode, + maps[mapnum].new_relfilenode); + + pg_log(PG_VERBOSE, "\n\n"); + } +} + + +/* + * get_db_and_rel_infos() + * + * higher level routine to generate dbinfos for the database running + * on the given "port". Assumes that server is already running. + */ +void +get_db_and_rel_infos(ClusterInfo *cluster) +{ + int dbnum; + + if (cluster->dbarr.dbs != NULL) + free_db_and_rel_infos(&cluster->dbarr); + + get_db_infos(cluster); + + for (dbnum = 0; dbnum < cluster->dbarr.ndbs; dbnum++) + get_rel_infos(cluster, &cluster->dbarr.dbs[dbnum]); + + pg_log(PG_VERBOSE, "\n%s databases:\n", CLUSTER_NAME(cluster)); + if (log_opts.verbose) + print_db_infos(&cluster->dbarr); +} + + +/* + * get_db_infos() + * + * Scans pg_database system catalog and populates all user + * databases. + */ +static void +get_db_infos(ClusterInfo *cluster) +{ + PGconn *conn = connectToServer(cluster, "template1"); + PGresult *res; + int ntups; + int tupnum; + DbInfo *dbinfos; + int i_datname, + i_oid, + i_encoding, + i_datcollate, + i_datctype, + i_spclocation; + char query[QUERY_ALLOC]; + + snprintf(query, sizeof(query), + "SELECT d.oid, d.datname, d.encoding, d.datcollate, d.datctype, " + "%s AS spclocation " + "FROM pg_catalog.pg_database d " + " LEFT OUTER JOIN pg_catalog.pg_tablespace t " + " ON d.dattablespace = t.oid " + "WHERE d.datallowconn = true " + /* we don't preserve pg_database.oid so we sort by name */ + "ORDER BY 2", + /* 9.2 removed the spclocation column */ + (GET_MAJOR_VERSION(cluster->major_version) <= 901) ? + "t.spclocation" : "pg_catalog.pg_tablespace_location(t.oid)"); + + res = executeQueryOrDie(conn, "%s", query); + + i_oid = PQfnumber(res, "oid"); + i_datname = PQfnumber(res, "datname"); + i_encoding = PQfnumber(res, "encoding"); + i_datcollate = PQfnumber(res, "datcollate"); + i_datctype = PQfnumber(res, "datctype"); + i_spclocation = PQfnumber(res, "spclocation"); + + ntups = PQntuples(res); + dbinfos = (DbInfo *) pg_malloc(sizeof(DbInfo) * ntups); + + for (tupnum = 0; tupnum < ntups; tupnum++) + { + dbinfos[tupnum].db_oid = atooid(PQgetvalue(res, tupnum, i_oid)); + dbinfos[tupnum].db_name = pg_strdup(PQgetvalue(res, tupnum, i_datname)); + dbinfos[tupnum].db_encoding = atoi(PQgetvalue(res, tupnum, i_encoding)); + dbinfos[tupnum].db_collate = pg_strdup(PQgetvalue(res, tupnum, i_datcollate)); + dbinfos[tupnum].db_ctype = pg_strdup(PQgetvalue(res, tupnum, i_datctype)); + snprintf(dbinfos[tupnum].db_tablespace, sizeof(dbinfos[tupnum].db_tablespace), "%s", + PQgetvalue(res, tupnum, i_spclocation)); + } + PQclear(res); + + PQfinish(conn); + + cluster->dbarr.dbs = dbinfos; + cluster->dbarr.ndbs = ntups; +} + + +/* + * get_rel_infos() + * + * gets the relinfos for all the user tables of the database referred + * by "db". + * + * NOTE: we assume that relations/entities with oids greater than + * FirstNormalObjectId belongs to the user + */ +static void +get_rel_infos(ClusterInfo *cluster, DbInfo *dbinfo) +{ + PGconn *conn = connectToServer(cluster, + dbinfo->db_name); + PGresult *res; + RelInfo *relinfos; + int ntups; + int relnum; + int num_rels = 0; + char *nspname = NULL; + char *relname = NULL; + char *tablespace = NULL; + int i_spclocation, + i_nspname, + i_relname, + i_oid, + i_relfilenode, + i_reltablespace; + char query[QUERY_ALLOC]; + char *last_namespace = NULL, + *last_tablespace = NULL; + + /* + * pg_largeobject contains user data that does not appear in pg_dump + * --schema-only output, so we have to copy that system table heap and + * index. We could grab the pg_largeobject oids from template1, but it is + * easy to treat it as a normal table. Order by oid so we can join old/new + * structures efficiently. + */ + + snprintf(query, sizeof(query), + /* get regular heap */ + "WITH regular_heap (reloid) AS ( " + " SELECT c.oid " + " FROM pg_catalog.pg_class c JOIN pg_catalog.pg_namespace n " + " ON c.relnamespace = n.oid " + " LEFT OUTER JOIN pg_catalog.pg_index i " + " ON c.oid = i.indexrelid " + " WHERE relkind IN ('r', 'm', 'i', 'S') AND " + /* + * pg_dump only dumps valid indexes; testing indisready is necessary in + * 9.2, and harmless in earlier/later versions. + */ + " i.indisvalid IS DISTINCT FROM false AND " + " i.indisready IS DISTINCT FROM false AND " + /* exclude possible orphaned temp tables */ + " ((n.nspname !~ '^pg_temp_' AND " + " n.nspname !~ '^pg_toast_temp_' AND " + /* skip pg_toast because toast index have relkind == 'i', not 't' */ + " n.nspname NOT IN ('pg_catalog', 'information_schema', " + " 'binary_upgrade', 'pg_toast') AND " + " c.oid >= %u) OR " + " (n.nspname = 'pg_catalog' AND " + " relname IN ('pg_largeobject', 'pg_largeobject_loid_pn_index'%s) ))), " + /* + * We have to gather the TOAST tables in later steps because we + * can't schema-qualify TOAST tables. + */ + /* get TOAST heap */ + " toast_heap (reloid) AS ( " + " SELECT reltoastrelid " + " FROM regular_heap JOIN pg_catalog.pg_class c " + " ON regular_heap.reloid = c.oid " + " AND c.reltoastrelid != %u), " + /* get indexes on regular and TOAST heap */ + " all_index (reloid) AS ( " + " SELECT indexrelid " + " FROM pg_index " + " WHERE indisvalid " + " AND indrelid IN (SELECT reltoastrelid " + " FROM (SELECT reloid FROM regular_heap " + " UNION ALL " + " SELECT reloid FROM toast_heap) all_heap " + " JOIN pg_catalog.pg_class c " + " ON all_heap.reloid = c.oid " + " AND c.reltoastrelid != %u)) " + /* get all rels */ + "SELECT c.oid, n.nspname, c.relname, " + " c.relfilenode, c.reltablespace, %s " + "FROM (SELECT reloid FROM regular_heap " + " UNION ALL " + " SELECT reloid FROM toast_heap " + " UNION ALL " + " SELECT reloid FROM all_index) all_rels " + " JOIN pg_catalog.pg_class c " + " ON all_rels.reloid = c.oid " + " JOIN pg_catalog.pg_namespace n " + " ON c.relnamespace = n.oid " + " LEFT OUTER JOIN pg_catalog.pg_tablespace t " + " ON c.reltablespace = t.oid " + /* we preserve pg_class.oid so we sort by it to match old/new */ + "ORDER BY 1;", + FirstNormalObjectId, + /* does pg_largeobject_metadata need to be migrated? */ + (GET_MAJOR_VERSION(old_cluster.major_version) <= 804) ? + "" : ", 'pg_largeobject_metadata', 'pg_largeobject_metadata_oid_index'", + InvalidOid, InvalidOid, + /* 9.2 removed the spclocation column */ + (GET_MAJOR_VERSION(cluster->major_version) <= 901) ? + "t.spclocation" : "pg_catalog.pg_tablespace_location(t.oid) AS spclocation"); + + res = executeQueryOrDie(conn, "%s", query); + + ntups = PQntuples(res); + + relinfos = (RelInfo *) pg_malloc(sizeof(RelInfo) * ntups); + + i_oid = PQfnumber(res, "oid"); + i_nspname = PQfnumber(res, "nspname"); + i_relname = PQfnumber(res, "relname"); + i_relfilenode = PQfnumber(res, "relfilenode"); + i_reltablespace = PQfnumber(res, "reltablespace"); + i_spclocation = PQfnumber(res, "spclocation"); + + for (relnum = 0; relnum < ntups; relnum++) + { + RelInfo *curr = &relinfos[num_rels++]; + + curr->reloid = atooid(PQgetvalue(res, relnum, i_oid)); + + nspname = PQgetvalue(res, relnum, i_nspname); + curr->nsp_alloc = false; + + /* + * Many of the namespace and tablespace strings are identical, so we + * try to reuse the allocated string pointers where possible to reduce + * memory consumption. + */ + /* Can we reuse the previous string allocation? */ + if (last_namespace && strcmp(nspname, last_namespace) == 0) + curr->nspname = last_namespace; + else + { + last_namespace = curr->nspname = pg_strdup(nspname); + curr->nsp_alloc = true; + } + + relname = PQgetvalue(res, relnum, i_relname); + curr->relname = pg_strdup(relname); + + curr->relfilenode = atooid(PQgetvalue(res, relnum, i_relfilenode)); + curr->tblsp_alloc = false; + + /* Is the tablespace oid non-zero? */ + if (atooid(PQgetvalue(res, relnum, i_reltablespace)) != 0) + { + /* + * The tablespace location might be "", meaning the cluster + * default location, i.e. pg_default or pg_global. + */ + tablespace = PQgetvalue(res, relnum, i_spclocation); + + /* Can we reuse the previous string allocation? */ + if (last_tablespace && strcmp(tablespace, last_tablespace) == 0) + curr->tablespace = last_tablespace; + else + { + last_tablespace = curr->tablespace = pg_strdup(tablespace); + curr->tblsp_alloc = true; + } + } + else + /* A zero reltablespace oid indicates the database tablespace. */ + curr->tablespace = dbinfo->db_tablespace; + } + PQclear(res); + + PQfinish(conn); + + dbinfo->rel_arr.rels = relinfos; + dbinfo->rel_arr.nrels = num_rels; +} + + +static void +free_db_and_rel_infos(DbInfoArr *db_arr) +{ + int dbnum; + + for (dbnum = 0; dbnum < db_arr->ndbs; dbnum++) + { + free_rel_infos(&db_arr->dbs[dbnum].rel_arr); + pg_free(db_arr->dbs[dbnum].db_name); + } + pg_free(db_arr->dbs); + db_arr->dbs = NULL; + db_arr->ndbs = 0; +} + + +static void +free_rel_infos(RelInfoArr *rel_arr) +{ + int relnum; + + for (relnum = 0; relnum < rel_arr->nrels; relnum++) + { + if (rel_arr->rels[relnum].nsp_alloc) + pg_free(rel_arr->rels[relnum].nspname); + pg_free(rel_arr->rels[relnum].relname); + if (rel_arr->rels[relnum].tblsp_alloc) + pg_free(rel_arr->rels[relnum].tablespace); + } + pg_free(rel_arr->rels); + rel_arr->nrels = 0; +} + + +static void +print_db_infos(DbInfoArr *db_arr) +{ + int dbnum; + + for (dbnum = 0; dbnum < db_arr->ndbs; dbnum++) + { + pg_log(PG_VERBOSE, "Database: %s\n", db_arr->dbs[dbnum].db_name); + print_rel_infos(&db_arr->dbs[dbnum].rel_arr); + pg_log(PG_VERBOSE, "\n\n"); + } +} + + +static void +print_rel_infos(RelInfoArr *rel_arr) +{ + int relnum; + + for (relnum = 0; relnum < rel_arr->nrels; relnum++) + pg_log(PG_VERBOSE, "relname: %s.%s: reloid: %u reltblspace: %s\n", + rel_arr->rels[relnum].nspname, + rel_arr->rels[relnum].relname, + rel_arr->rels[relnum].reloid, + rel_arr->rels[relnum].tablespace); +} diff --git a/src/bin/pg_upgrade/option.c b/src/bin/pg_upgrade/option.c new file mode 100644 index 00000000000..b8510561350 --- /dev/null +++ b/src/bin/pg_upgrade/option.c @@ -0,0 +1,518 @@ +/* + * opt.c + * + * options functions + * + * Copyright (c) 2010-2015, PostgreSQL Global Development Group + * src/bin/pg_upgrade/option.c + */ + +#include "postgres_fe.h" + +#include "miscadmin.h" +#include "getopt_long.h" + +#include "pg_upgrade.h" + +#include +#include +#ifdef WIN32 +#include +#endif + + +static void usage(void); +static void check_required_directory(char **dirpath, char **configpath, + char *envVarName, char *cmdLineOption, char *description); +#define FIX_DEFAULT_READ_ONLY "-c default_transaction_read_only=false" + + +UserOpts user_opts; + + +/* + * parseCommandLine() + * + * Parses the command line (argc, argv[]) and loads structures + */ +void +parseCommandLine(int argc, char *argv[]) +{ + static struct option long_options[] = { + {"old-datadir", required_argument, NULL, 'd'}, + {"new-datadir", required_argument, NULL, 'D'}, + {"old-bindir", required_argument, NULL, 'b'}, + {"new-bindir", required_argument, NULL, 'B'}, + {"old-options", required_argument, NULL, 'o'}, + {"new-options", required_argument, NULL, 'O'}, + {"old-port", required_argument, NULL, 'p'}, + {"new-port", required_argument, NULL, 'P'}, + + {"username", required_argument, NULL, 'U'}, + {"check", no_argument, NULL, 'c'}, + {"link", no_argument, NULL, 'k'}, + {"retain", no_argument, NULL, 'r'}, + {"jobs", required_argument, NULL, 'j'}, + {"verbose", no_argument, NULL, 'v'}, + {NULL, 0, NULL, 0} + }; + int option; /* Command line option */ + int optindex = 0; /* used by getopt_long */ + int os_user_effective_id; + FILE *fp; + char **filename; + time_t run_time = time(NULL); + + user_opts.transfer_mode = TRANSFER_MODE_COPY; + + os_info.progname = get_progname(argv[0]); + + /* Process libpq env. variables; load values here for usage() output */ + old_cluster.port = getenv("PGPORTOLD") ? atoi(getenv("PGPORTOLD")) : DEF_PGUPORT; + new_cluster.port = getenv("PGPORTNEW") ? atoi(getenv("PGPORTNEW")) : DEF_PGUPORT; + + os_user_effective_id = get_user_info(&os_info.user); + /* we override just the database user name; we got the OS id above */ + if (getenv("PGUSER")) + { + pg_free(os_info.user); + /* must save value, getenv()'s pointer is not stable */ + os_info.user = pg_strdup(getenv("PGUSER")); + } + + if (argc > 1) + { + if (strcmp(argv[1], "--help") == 0 || strcmp(argv[1], "-?") == 0) + { + usage(); + exit(0); + } + if (strcmp(argv[1], "--version") == 0 || strcmp(argv[1], "-V") == 0) + { + puts("pg_upgrade (PostgreSQL) " PG_VERSION); + exit(0); + } + } + + /* Allow help and version to be run as root, so do the test here. */ + if (os_user_effective_id == 0) + pg_fatal("%s: cannot be run as root\n", os_info.progname); + + if ((log_opts.internal = fopen_priv(INTERNAL_LOG_FILE, "a")) == NULL) + pg_fatal("cannot write to log file %s\n", INTERNAL_LOG_FILE); + + while ((option = getopt_long(argc, argv, "d:D:b:B:cj:ko:O:p:P:rU:v", + long_options, &optindex)) != -1) + { + switch (option) + { + case 'b': + old_cluster.bindir = pg_strdup(optarg); + break; + + case 'B': + new_cluster.bindir = pg_strdup(optarg); + break; + + case 'c': + user_opts.check = true; + break; + + case 'd': + old_cluster.pgdata = pg_strdup(optarg); + old_cluster.pgconfig = pg_strdup(optarg); + break; + + case 'D': + new_cluster.pgdata = pg_strdup(optarg); + new_cluster.pgconfig = pg_strdup(optarg); + break; + + case 'j': + user_opts.jobs = atoi(optarg); + break; + + case 'k': + user_opts.transfer_mode = TRANSFER_MODE_LINK; + break; + + case 'o': + /* append option? */ + if (!old_cluster.pgopts) + old_cluster.pgopts = pg_strdup(optarg); + else + { + char *old_pgopts = old_cluster.pgopts; + + old_cluster.pgopts = psprintf("%s %s", old_pgopts, optarg); + free(old_pgopts); + } + break; + + case 'O': + /* append option? */ + if (!new_cluster.pgopts) + new_cluster.pgopts = pg_strdup(optarg); + else + { + char *new_pgopts = new_cluster.pgopts; + + new_cluster.pgopts = psprintf("%s %s", new_pgopts, optarg); + free(new_pgopts); + } + break; + + /* + * Someday, the port number option could be removed and passed + * using -o/-O, but that requires postmaster -C to be + * supported on all old/new versions (added in PG 9.2). + */ + case 'p': + if ((old_cluster.port = atoi(optarg)) <= 0) + { + pg_fatal("invalid old port number\n"); + exit(1); + } + break; + + case 'P': + if ((new_cluster.port = atoi(optarg)) <= 0) + { + pg_fatal("invalid new port number\n"); + exit(1); + } + break; + + case 'r': + log_opts.retain = true; + break; + + case 'U': + pg_free(os_info.user); + os_info.user = pg_strdup(optarg); + os_info.user_specified = true; + + /* + * Push the user name into the environment so pre-9.1 + * pg_ctl/libpq uses it. + */ + pg_putenv("PGUSER", os_info.user); + break; + + case 'v': + pg_log(PG_REPORT, "Running in verbose mode\n"); + log_opts.verbose = true; + break; + + default: + pg_fatal("Try \"%s --help\" for more information.\n", + os_info.progname); + break; + } + } + + /* label start of upgrade in logfiles */ + for (filename = output_files; *filename != NULL; filename++) + { + if ((fp = fopen_priv(*filename, "a")) == NULL) + pg_fatal("cannot write to log file %s\n", *filename); + + /* Start with newline because we might be appending to a file. */ + fprintf(fp, "\n" + "-----------------------------------------------------------------\n" + " pg_upgrade run on %s" + "-----------------------------------------------------------------\n\n", + ctime(&run_time)); + fclose(fp); + } + + /* Turn off read-only mode; add prefix to PGOPTIONS? */ + if (getenv("PGOPTIONS")) + { + char *pgoptions = psprintf("%s %s", FIX_DEFAULT_READ_ONLY, + getenv("PGOPTIONS")); + + pg_putenv("PGOPTIONS", pgoptions); + pfree(pgoptions); + } + else + pg_putenv("PGOPTIONS", FIX_DEFAULT_READ_ONLY); + + /* Get values from env if not already set */ + check_required_directory(&old_cluster.bindir, NULL, "PGBINOLD", "-b", + "old cluster binaries reside"); + check_required_directory(&new_cluster.bindir, NULL, "PGBINNEW", "-B", + "new cluster binaries reside"); + check_required_directory(&old_cluster.pgdata, &old_cluster.pgconfig, + "PGDATAOLD", "-d", "old cluster data resides"); + check_required_directory(&new_cluster.pgdata, &new_cluster.pgconfig, + "PGDATANEW", "-D", "new cluster data resides"); + +#ifdef WIN32 + /* + * On Windows, initdb --sync-only will fail with a "Permission denied" + * error on file pg_upgrade_utility.log if pg_upgrade is run inside + * the new cluster directory, so we do a check here. + */ + { + char cwd[MAXPGPATH], new_cluster_pgdata[MAXPGPATH]; + + strlcpy(new_cluster_pgdata, new_cluster.pgdata, MAXPGPATH); + canonicalize_path(new_cluster_pgdata); + + if (!getcwd(cwd, MAXPGPATH)) + pg_fatal("cannot find current directory\n"); + canonicalize_path(cwd); + if (path_is_prefix_of_path(new_cluster_pgdata, cwd)) + pg_fatal("cannot run pg_upgrade from inside the new cluster data directory on Windows\n"); + } +#endif +} + + +static void +usage(void) +{ + printf(_("pg_upgrade upgrades a PostgreSQL cluster to a different major version.\n\ +\nUsage:\n\ + pg_upgrade [OPTION]...\n\ +\n\ +Options:\n\ + -b, --old-bindir=BINDIR old cluster executable directory\n\ + -B, --new-bindir=BINDIR new cluster executable directory\n\ + -c, --check check clusters only, don't change any data\n\ + -d, --old-datadir=DATADIR old cluster data directory\n\ + -D, --new-datadir=DATADIR new cluster data directory\n\ + -j, --jobs number of simultaneous processes or threads to use\n\ + -k, --link link instead of copying files to new cluster\n\ + -o, --old-options=OPTIONS old cluster options to pass to the server\n\ + -O, --new-options=OPTIONS new cluster options to pass to the server\n\ + -p, --old-port=PORT old cluster port number (default %d)\n\ + -P, --new-port=PORT new cluster port number (default %d)\n\ + -r, --retain retain SQL and log files after success\n\ + -U, --username=NAME cluster superuser (default \"%s\")\n\ + -v, --verbose enable verbose internal logging\n\ + -V, --version display version information, then exit\n\ + -?, --help show this help, then exit\n\ +\n\ +Before running pg_upgrade you must:\n\ + create a new database cluster (using the new version of initdb)\n\ + shutdown the postmaster servicing the old cluster\n\ + shutdown the postmaster servicing the new cluster\n\ +\n\ +When you run pg_upgrade, you must provide the following information:\n\ + the data directory for the old cluster (-d DATADIR)\n\ + the data directory for the new cluster (-D DATADIR)\n\ + the \"bin\" directory for the old version (-b BINDIR)\n\ + the \"bin\" directory for the new version (-B BINDIR)\n\ +\n\ +For example:\n\ + pg_upgrade -d oldCluster/data -D newCluster/data -b oldCluster/bin -B newCluster/bin\n\ +or\n"), old_cluster.port, new_cluster.port, os_info.user); +#ifndef WIN32 + printf(_("\ + $ export PGDATAOLD=oldCluster/data\n\ + $ export PGDATANEW=newCluster/data\n\ + $ export PGBINOLD=oldCluster/bin\n\ + $ export PGBINNEW=newCluster/bin\n\ + $ pg_upgrade\n")); +#else + printf(_("\ + C:\\> set PGDATAOLD=oldCluster/data\n\ + C:\\> set PGDATANEW=newCluster/data\n\ + C:\\> set PGBINOLD=oldCluster/bin\n\ + C:\\> set PGBINNEW=newCluster/bin\n\ + C:\\> pg_upgrade\n")); +#endif + printf(_("\nReport bugs to .\n")); +} + + +/* + * check_required_directory() + * + * Checks a directory option. + * dirpath - the directory name supplied on the command line + * configpath - optional configuration directory + * envVarName - the name of an environment variable to get if dirpath is NULL + * cmdLineOption - the command line option corresponds to this directory (-o, -O, -n, -N) + * description - a description of this directory option + * + * We use the last two arguments to construct a meaningful error message if the + * user hasn't provided the required directory name. + */ +static void +check_required_directory(char **dirpath, char **configpath, + char *envVarName, char *cmdLineOption, + char *description) +{ + if (*dirpath == NULL || strlen(*dirpath) == 0) + { + const char *envVar; + + if ((envVar = getenv(envVarName)) && strlen(envVar)) + { + *dirpath = pg_strdup(envVar); + if (configpath) + *configpath = pg_strdup(envVar); + } + else + pg_fatal("You must identify the directory where the %s.\n" + "Please use the %s command-line option or the %s environment variable.\n", + description, cmdLineOption, envVarName); + } + + /* + * Trim off any trailing path separators because we construct paths by + * appending to this path. + */ +#ifndef WIN32 + if ((*dirpath)[strlen(*dirpath) - 1] == '/') +#else + if ((*dirpath)[strlen(*dirpath) - 1] == '/' || + (*dirpath)[strlen(*dirpath) - 1] == '\\') +#endif + (*dirpath)[strlen(*dirpath) - 1] = 0; +} + +/* + * adjust_data_dir + * + * If a configuration-only directory was specified, find the real data dir + * by quering the running server. This has limited checking because we + * can't check for a running server because we can't find postmaster.pid. + */ +void +adjust_data_dir(ClusterInfo *cluster) +{ + char filename[MAXPGPATH]; + char cmd[MAXPGPATH], + cmd_output[MAX_STRING]; + FILE *fp, + *output; + + /* If there is no postgresql.conf, it can't be a config-only dir */ + snprintf(filename, sizeof(filename), "%s/postgresql.conf", cluster->pgconfig); + if ((fp = fopen(filename, "r")) == NULL) + return; + fclose(fp); + + /* If PG_VERSION exists, it can't be a config-only dir */ + snprintf(filename, sizeof(filename), "%s/PG_VERSION", cluster->pgconfig); + if ((fp = fopen(filename, "r")) != NULL) + { + fclose(fp); + return; + } + + /* Must be a configuration directory, so find the real data directory. */ + + prep_status("Finding the real data directory for the %s cluster", + CLUSTER_NAME(cluster)); + + /* + * We don't have a data directory yet, so we can't check the PG version, + * so this might fail --- only works for PG 9.2+. If this fails, + * pg_upgrade will fail anyway because the data files will not be found. + */ + snprintf(cmd, sizeof(cmd), "\"%s/postgres\" -D \"%s\" -C data_directory", + cluster->bindir, cluster->pgconfig); + + if ((output = popen(cmd, "r")) == NULL || + fgets(cmd_output, sizeof(cmd_output), output) == NULL) + pg_fatal("Could not get data directory using %s: %s\n", + cmd, getErrorText(errno)); + + pclose(output); + + /* Remove trailing newline */ + if (strchr(cmd_output, '\n') != NULL) + *strchr(cmd_output, '\n') = '\0'; + + cluster->pgdata = pg_strdup(cmd_output); + + check_ok(); +} + + +/* + * get_sock_dir + * + * Identify the socket directory to use for this cluster. If we're doing + * a live check (old cluster only), we need to find out where the postmaster + * is listening. Otherwise, we're going to put the socket into the current + * directory. + */ +void +get_sock_dir(ClusterInfo *cluster, bool live_check) +{ +#ifdef HAVE_UNIX_SOCKETS + + /* + * sockdir and port were added to postmaster.pid in PG 9.1. Pre-9.1 cannot + * process pg_ctl -w for sockets in non-default locations. + */ + if (GET_MAJOR_VERSION(cluster->major_version) >= 901) + { + if (!live_check) + { + /* Use the current directory for the socket */ + cluster->sockdir = pg_malloc(MAXPGPATH); + if (!getcwd(cluster->sockdir, MAXPGPATH)) + pg_fatal("cannot find current directory\n"); + } + else + { + /* + * If we are doing a live check, we will use the old cluster's + * Unix domain socket directory so we can connect to the live + * server. + */ + unsigned short orig_port = cluster->port; + char filename[MAXPGPATH], + line[MAXPGPATH]; + FILE *fp; + int lineno; + + snprintf(filename, sizeof(filename), "%s/postmaster.pid", + cluster->pgdata); + if ((fp = fopen(filename, "r")) == NULL) + pg_fatal("Cannot open file %s: %m\n", filename); + + for (lineno = 1; + lineno <= Max(LOCK_FILE_LINE_PORT, LOCK_FILE_LINE_SOCKET_DIR); + lineno++) + { + if (fgets(line, sizeof(line), fp) == NULL) + pg_fatal("Cannot read line %d from %s: %m\n", lineno, filename); + + /* potentially overwrite user-supplied value */ + if (lineno == LOCK_FILE_LINE_PORT) + sscanf(line, "%hu", &old_cluster.port); + if (lineno == LOCK_FILE_LINE_SOCKET_DIR) + { + cluster->sockdir = pg_strdup(line); + /* strip off newline */ + if (strchr(cluster->sockdir, '\n') != NULL) + *strchr(cluster->sockdir, '\n') = '\0'; + } + } + fclose(fp); + + /* warn of port number correction */ + if (orig_port != DEF_PGUPORT && old_cluster.port != orig_port) + pg_log(PG_WARNING, "User-supplied old port number %hu corrected to %hu\n", + orig_port, cluster->port); + } + } + else + + /* + * Can't get sockdir and pg_ctl -w can't use a non-default, use + * default + */ + cluster->sockdir = NULL; +#else /* !HAVE_UNIX_SOCKETS */ + cluster->sockdir = NULL; +#endif +} diff --git a/src/bin/pg_upgrade/page.c b/src/bin/pg_upgrade/page.c new file mode 100644 index 00000000000..3f4c697a108 --- /dev/null +++ b/src/bin/pg_upgrade/page.c @@ -0,0 +1,164 @@ +/* + * page.c + * + * per-page conversion operations + * + * Copyright (c) 2010-2015, PostgreSQL Global Development Group + * src/bin/pg_upgrade/page.c + */ + +#include "postgres_fe.h" + +#include "pg_upgrade.h" + +#include "storage/bufpage.h" + + +#ifdef PAGE_CONVERSION + + +static void getPageVersion( + uint16 *version, const char *pathName); +static pageCnvCtx *loadConverterPlugin( + uint16 newPageVersion, uint16 oldPageVersion); + + +/* + * setupPageConverter() + * + * This function determines the PageLayoutVersion of the old cluster and + * the PageLayoutVersion of the new cluster. If the versions differ, this + * function loads a converter plugin and returns a pointer to a pageCnvCtx + * object (in *result) that knows how to convert pages from the old format + * to the new format. If the versions are identical, this function just + * returns a NULL pageCnvCtx pointer to indicate that page-by-page conversion + * is not required. + */ +pageCnvCtx * +setupPageConverter(void) +{ + uint16 oldPageVersion; + uint16 newPageVersion; + pageCnvCtx *converter; + const char *msg; + char dstName[MAXPGPATH]; + char srcName[MAXPGPATH]; + + snprintf(dstName, sizeof(dstName), "%s/global/%u", new_cluster.pgdata, + new_cluster.pg_database_oid); + snprintf(srcName, sizeof(srcName), "%s/global/%u", old_cluster.pgdata, + old_cluster.pg_database_oid); + + getPageVersion(&oldPageVersion, srcName); + getPageVersion(&newPageVersion, dstName); + + /* + * If the old cluster and new cluster use the same page layouts, then we + * don't need a page converter. + */ + if (newPageVersion != oldPageVersion) + { + /* + * The clusters use differing page layouts, see if we can find a + * plugin that knows how to convert from the old page layout to the + * new page layout. + */ + + if ((converter = loadConverterPlugin(newPageVersion, oldPageVersion)) == NULL) + pg_fatal("could not find plugin to convert from old page layout to new page layout\n"); + + return converter; + } + else + return NULL; +} + + +/* + * getPageVersion() + * + * Retrieves the PageLayoutVersion for the given relation. + * + * Returns NULL on success (and stores the PageLayoutVersion at *version), + * if an error occurs, this function returns an error message (in the form + * of a null-terminated string). + */ +static void +getPageVersion(uint16 *version, const char *pathName) +{ + int relfd; + PageHeaderData page; + ssize_t bytesRead; + + if ((relfd = open(pathName, O_RDONLY, 0)) < 0) + pg_fatal("could not open relation %s\n", pathName); + + if ((bytesRead = read(relfd, &page, sizeof(page))) != sizeof(page)) + pg_fatal("could not read page header of %s\n", pathName); + + *version = PageGetPageLayoutVersion(&page); + + close(relfd); + + return; +} + + +/* + * loadConverterPlugin() + * + * This function loads a page-converter plugin library and grabs a + * pointer to each of the (interesting) functions provided by that + * plugin. The name of the plugin library is derived from the given + * newPageVersion and oldPageVersion. If a plugin is found, this + * function returns a pointer to a pageCnvCtx object (which will contain + * a collection of plugin function pointers). If the required plugin + * is not found, this function returns NULL. + */ +static pageCnvCtx * +loadConverterPlugin(uint16 newPageVersion, uint16 oldPageVersion) +{ + char pluginName[MAXPGPATH]; + void *plugin; + + /* + * Try to find a plugin that can convert pages of oldPageVersion into + * pages of newPageVersion. For example, if we oldPageVersion = 3 and + * newPageVersion is 4, we search for a plugin named: + * plugins/convertLayout_3_to_4.dll + */ + + /* + * FIXME: we are searching for plugins relative to the current directory, + * we should really search relative to our own executable instead. + */ + snprintf(pluginName, sizeof(pluginName), "./plugins/convertLayout_%d_to_%d%s", + oldPageVersion, newPageVersion, DLSUFFIX); + + if ((plugin = pg_dlopen(pluginName)) == NULL) + return NULL; + else + { + pageCnvCtx *result = (pageCnvCtx *) pg_malloc(sizeof(*result)); + + result->old.PageVersion = oldPageVersion; + result->new.PageVersion = newPageVersion; + + result->startup = (pluginStartup) pg_dlsym(plugin, "init"); + result->convertFile = (pluginConvertFile) pg_dlsym(plugin, "convertFile"); + result->convertPage = (pluginConvertPage) pg_dlsym(plugin, "convertPage"); + result->shutdown = (pluginShutdown) pg_dlsym(plugin, "fini"); + result->pluginData = NULL; + + /* + * If the plugin has exported an initializer, go ahead and invoke it. + */ + if (result->startup) + result->startup(MIGRATOR_API_VERSION, &result->pluginVersion, + newPageVersion, oldPageVersion, &result->pluginData); + + return result; + } +} + +#endif diff --git a/src/bin/pg_upgrade/parallel.c b/src/bin/pg_upgrade/parallel.c new file mode 100644 index 00000000000..c6978b596b4 --- /dev/null +++ b/src/bin/pg_upgrade/parallel.c @@ -0,0 +1,357 @@ +/* + * parallel.c + * + * multi-process support + * + * Copyright (c) 2010-2015, PostgreSQL Global Development Group + * src/bin/pg_upgrade/parallel.c + */ + +#include "postgres_fe.h" + +#include "pg_upgrade.h" + +#include +#include +#include +#include + +#ifdef WIN32 +#include +#endif + +static int parallel_jobs; + +#ifdef WIN32 +/* + * Array holding all active threads. There can't be any gaps/zeros so + * it can be passed to WaitForMultipleObjects(). We use two arrays + * so the thread_handles array can be passed to WaitForMultipleObjects(). + */ +HANDLE *thread_handles; + +typedef struct +{ + char *log_file; + char *opt_log_file; + char *cmd; +} exec_thread_arg; + +typedef struct +{ + DbInfoArr *old_db_arr; + DbInfoArr *new_db_arr; + char *old_pgdata; + char *new_pgdata; + char *old_tablespace; +} transfer_thread_arg; + +exec_thread_arg **exec_thread_args; +transfer_thread_arg **transfer_thread_args; + +/* track current thread_args struct so reap_child() can be used for all cases */ +void **cur_thread_args; + +DWORD win32_exec_prog(exec_thread_arg *args); +DWORD win32_transfer_all_new_dbs(transfer_thread_arg *args); +#endif + +/* + * parallel_exec_prog + * + * This has the same API as exec_prog, except it does parallel execution, + * and therefore must throw errors and doesn't return an error status. + */ +void +parallel_exec_prog(const char *log_file, const char *opt_log_file, + const char *fmt,...) +{ + va_list args; + char cmd[MAX_STRING]; + +#ifndef WIN32 + pid_t child; +#else + HANDLE child; + exec_thread_arg *new_arg; +#endif + + va_start(args, fmt); + vsnprintf(cmd, sizeof(cmd), fmt, args); + va_end(args); + + if (user_opts.jobs <= 1) + /* throw_error must be true to allow jobs */ + exec_prog(log_file, opt_log_file, true, "%s", cmd); + else + { + /* parallel */ +#ifdef WIN32 + if (thread_handles == NULL) + thread_handles = pg_malloc(user_opts.jobs * sizeof(HANDLE)); + + if (exec_thread_args == NULL) + { + int i; + + exec_thread_args = pg_malloc(user_opts.jobs * sizeof(exec_thread_arg *)); + + /* + * For safety and performance, we keep the args allocated during + * the entire life of the process, and we don't free the args in a + * thread different from the one that allocated it. + */ + for (i = 0; i < user_opts.jobs; i++) + exec_thread_args[i] = pg_malloc0(sizeof(exec_thread_arg)); + } + + cur_thread_args = (void **) exec_thread_args; +#endif + /* harvest any dead children */ + while (reap_child(false) == true) + ; + + /* must we wait for a dead child? */ + if (parallel_jobs >= user_opts.jobs) + reap_child(true); + + /* set this before we start the job */ + parallel_jobs++; + + /* Ensure stdio state is quiesced before forking */ + fflush(NULL); + +#ifndef WIN32 + child = fork(); + if (child == 0) + /* use _exit to skip atexit() functions */ + _exit(!exec_prog(log_file, opt_log_file, true, "%s", cmd)); + else if (child < 0) + /* fork failed */ + pg_fatal("could not create worker process: %s\n", strerror(errno)); +#else + /* empty array element are always at the end */ + new_arg = exec_thread_args[parallel_jobs - 1]; + + /* Can only pass one pointer into the function, so use a struct */ + if (new_arg->log_file) + pg_free(new_arg->log_file); + new_arg->log_file = pg_strdup(log_file); + if (new_arg->opt_log_file) + pg_free(new_arg->opt_log_file); + new_arg->opt_log_file = opt_log_file ? pg_strdup(opt_log_file) : NULL; + if (new_arg->cmd) + pg_free(new_arg->cmd); + new_arg->cmd = pg_strdup(cmd); + + child = (HANDLE) _beginthreadex(NULL, 0, (void *) win32_exec_prog, + new_arg, 0, NULL); + if (child == 0) + pg_fatal("could not create worker thread: %s\n", strerror(errno)); + + thread_handles[parallel_jobs - 1] = child; +#endif + } + + return; +} + + +#ifdef WIN32 +DWORD +win32_exec_prog(exec_thread_arg *args) +{ + int ret; + + ret = !exec_prog(args->log_file, args->opt_log_file, true, "%s", args->cmd); + + /* terminates thread */ + return ret; +} +#endif + + +/* + * parallel_transfer_all_new_dbs + * + * This has the same API as transfer_all_new_dbs, except it does parallel execution + * by transfering multiple tablespaces in parallel + */ +void +parallel_transfer_all_new_dbs(DbInfoArr *old_db_arr, DbInfoArr *new_db_arr, + char *old_pgdata, char *new_pgdata, + char *old_tablespace) +{ +#ifndef WIN32 + pid_t child; +#else + HANDLE child; + transfer_thread_arg *new_arg; +#endif + + if (user_opts.jobs <= 1) + /* throw_error must be true to allow jobs */ + transfer_all_new_dbs(old_db_arr, new_db_arr, old_pgdata, new_pgdata, NULL); + else + { + /* parallel */ +#ifdef WIN32 + if (thread_handles == NULL) + thread_handles = pg_malloc(user_opts.jobs * sizeof(HANDLE)); + + if (transfer_thread_args == NULL) + { + int i; + + transfer_thread_args = pg_malloc(user_opts.jobs * sizeof(transfer_thread_arg *)); + + /* + * For safety and performance, we keep the args allocated during + * the entire life of the process, and we don't free the args in a + * thread different from the one that allocated it. + */ + for (i = 0; i < user_opts.jobs; i++) + transfer_thread_args[i] = pg_malloc0(sizeof(transfer_thread_arg)); + } + + cur_thread_args = (void **) transfer_thread_args; +#endif + /* harvest any dead children */ + while (reap_child(false) == true) + ; + + /* must we wait for a dead child? */ + if (parallel_jobs >= user_opts.jobs) + reap_child(true); + + /* set this before we start the job */ + parallel_jobs++; + + /* Ensure stdio state is quiesced before forking */ + fflush(NULL); + +#ifndef WIN32 + child = fork(); + if (child == 0) + { + transfer_all_new_dbs(old_db_arr, new_db_arr, old_pgdata, new_pgdata, + old_tablespace); + /* if we take another exit path, it will be non-zero */ + /* use _exit to skip atexit() functions */ + _exit(0); + } + else if (child < 0) + /* fork failed */ + pg_fatal("could not create worker process: %s\n", strerror(errno)); +#else + /* empty array element are always at the end */ + new_arg = transfer_thread_args[parallel_jobs - 1]; + + /* Can only pass one pointer into the function, so use a struct */ + new_arg->old_db_arr = old_db_arr; + new_arg->new_db_arr = new_db_arr; + if (new_arg->old_pgdata) + pg_free(new_arg->old_pgdata); + new_arg->old_pgdata = pg_strdup(old_pgdata); + if (new_arg->new_pgdata) + pg_free(new_arg->new_pgdata); + new_arg->new_pgdata = pg_strdup(new_pgdata); + if (new_arg->old_tablespace) + pg_free(new_arg->old_tablespace); + new_arg->old_tablespace = old_tablespace ? pg_strdup(old_tablespace) : NULL; + + child = (HANDLE) _beginthreadex(NULL, 0, (void *) win32_transfer_all_new_dbs, + new_arg, 0, NULL); + if (child == 0) + pg_fatal("could not create worker thread: %s\n", strerror(errno)); + + thread_handles[parallel_jobs - 1] = child; +#endif + } + + return; +} + + +#ifdef WIN32 +DWORD +win32_transfer_all_new_dbs(transfer_thread_arg *args) +{ + transfer_all_new_dbs(args->old_db_arr, args->new_db_arr, args->old_pgdata, + args->new_pgdata, args->old_tablespace); + + /* terminates thread */ + return 0; +} +#endif + + +/* + * collect status from a completed worker child + */ +bool +reap_child(bool wait_for_child) +{ +#ifndef WIN32 + int work_status; + int ret; +#else + int thread_num; + DWORD res; +#endif + + if (user_opts.jobs <= 1 || parallel_jobs == 0) + return false; + +#ifndef WIN32 + ret = waitpid(-1, &work_status, wait_for_child ? 0 : WNOHANG); + + /* no children or, for WNOHANG, no dead children */ + if (ret <= 0 || !WIFEXITED(work_status)) + return false; + + if (WEXITSTATUS(work_status) != 0) + pg_fatal("child worker exited abnormally: %s\n", strerror(errno)); +#else + /* wait for one to finish */ + thread_num = WaitForMultipleObjects(parallel_jobs, thread_handles, + false, wait_for_child ? INFINITE : 0); + + if (thread_num == WAIT_TIMEOUT || thread_num == WAIT_FAILED) + return false; + + /* compute thread index in active_threads */ + thread_num -= WAIT_OBJECT_0; + + /* get the result */ + GetExitCodeThread(thread_handles[thread_num], &res); + if (res != 0) + pg_fatal("child worker exited abnormally: %s\n", strerror(errno)); + + /* dispose of handle to stop leaks */ + CloseHandle(thread_handles[thread_num]); + + /* Move last slot into dead child's position */ + if (thread_num != parallel_jobs - 1) + { + void *tmp_args; + + thread_handles[thread_num] = thread_handles[parallel_jobs - 1]; + + /* + * Move last active thead arg struct into the now-dead slot, and the + * now-dead slot to the end for reuse by the next thread. Though the + * thread struct is in use by another thread, we can safely swap the + * struct pointers within the array. + */ + tmp_args = cur_thread_args[thread_num]; + cur_thread_args[thread_num] = cur_thread_args[parallel_jobs - 1]; + cur_thread_args[parallel_jobs - 1] = tmp_args; + } +#endif + + /* do this after job has been removed */ + parallel_jobs--; + + return true; +} diff --git a/src/bin/pg_upgrade/pg_upgrade.c b/src/bin/pg_upgrade/pg_upgrade.c new file mode 100644 index 00000000000..fbccc2e8304 --- /dev/null +++ b/src/bin/pg_upgrade/pg_upgrade.c @@ -0,0 +1,616 @@ +/* + * pg_upgrade.c + * + * main source file + * + * Copyright (c) 2010-2015, PostgreSQL Global Development Group + * src/bin/pg_upgrade/pg_upgrade.c + */ + +/* + * To simplify the upgrade process, we force certain system values to be + * identical between old and new clusters: + * + * We control all assignments of pg_class.oid (and relfilenode) so toast + * oids are the same between old and new clusters. This is important + * because toast oids are stored as toast pointers in user tables. + * + * While pg_class.oid and pg_class.relfilenode are initially the same + * in a cluster, they can diverge due to CLUSTER, REINDEX, or VACUUM + * FULL. In the new cluster, pg_class.oid and pg_class.relfilenode will + * be the same and will match the old pg_class.oid value. Because of + * this, old/new pg_class.relfilenode values will not match if CLUSTER, + * REINDEX, or VACUUM FULL have been performed in the old cluster. + * + * We control all assignments of pg_type.oid because these oids are stored + * in user composite type values. + * + * We control all assignments of pg_enum.oid because these oids are stored + * in user tables as enum values. + * + * We control all assignments of pg_authid.oid because these oids are stored + * in pg_largeobject_metadata. + */ + + + +#include "postgres_fe.h" + +#include "pg_upgrade.h" +#include "common/restricted_token.h" + +#ifdef HAVE_LANGINFO_H +#include +#endif + +static void prepare_new_cluster(void); +static void prepare_new_databases(void); +static void create_new_objects(void); +static void copy_clog_xlog_xid(void); +static void set_frozenxids(bool minmxid_only); +static void setup(char *argv0, bool *live_check); +static void cleanup(void); + +ClusterInfo old_cluster, + new_cluster; +OSInfo os_info; + +char *output_files[] = { + SERVER_LOG_FILE, +#ifdef WIN32 + /* unique file for pg_ctl start */ + SERVER_START_LOG_FILE, +#endif + UTILITY_LOG_FILE, + INTERNAL_LOG_FILE, + NULL +}; + + +int +main(int argc, char **argv) +{ + char *analyze_script_file_name = NULL; + char *deletion_script_file_name = NULL; + bool live_check = false; + + parseCommandLine(argc, argv); + + get_restricted_token(os_info.progname); + + adjust_data_dir(&old_cluster); + adjust_data_dir(&new_cluster); + + setup(argv[0], &live_check); + + output_check_banner(live_check); + + check_cluster_versions(); + + get_sock_dir(&old_cluster, live_check); + get_sock_dir(&new_cluster, false); + + check_cluster_compatibility(live_check); + + check_and_dump_old_cluster(live_check); + + + /* -- NEW -- */ + start_postmaster(&new_cluster, true); + + check_new_cluster(); + report_clusters_compatible(); + + pg_log(PG_REPORT, "\nPerforming Upgrade\n"); + pg_log(PG_REPORT, "------------------\n"); + + prepare_new_cluster(); + + stop_postmaster(false); + + /* + * Destructive Changes to New Cluster + */ + + copy_clog_xlog_xid(); + + /* New now using xids of the old system */ + + /* -- NEW -- */ + start_postmaster(&new_cluster, true); + + prepare_new_databases(); + + create_new_objects(); + + stop_postmaster(false); + + /* + * Most failures happen in create_new_objects(), which has completed at + * this point. We do this here because it is just before linking, which + * will link the old and new cluster data files, preventing the old + * cluster from being safely started once the new cluster is started. + */ + if (user_opts.transfer_mode == TRANSFER_MODE_LINK) + disable_old_cluster(); + + transfer_all_new_tablespaces(&old_cluster.dbarr, &new_cluster.dbarr, + old_cluster.pgdata, new_cluster.pgdata); + + /* + * Assuming OIDs are only used in system tables, there is no need to + * restore the OID counter because we have not transferred any OIDs from + * the old system, but we do it anyway just in case. We do it late here + * because there is no need to have the schema load use new oids. + */ + prep_status("Setting next OID for new cluster"); + exec_prog(UTILITY_LOG_FILE, NULL, true, + "\"%s/pg_resetxlog\" -o %u \"%s\"", + new_cluster.bindir, old_cluster.controldata.chkpnt_nxtoid, + new_cluster.pgdata); + check_ok(); + + prep_status("Sync data directory to disk"); + exec_prog(UTILITY_LOG_FILE, NULL, true, + "\"%s/initdb\" --sync-only \"%s\"", new_cluster.bindir, + new_cluster.pgdata); + check_ok(); + + create_script_for_cluster_analyze(&analyze_script_file_name); + create_script_for_old_cluster_deletion(&deletion_script_file_name); + + issue_warnings(); + + pg_log(PG_REPORT, "\nUpgrade Complete\n"); + pg_log(PG_REPORT, "----------------\n"); + + output_completion_banner(analyze_script_file_name, + deletion_script_file_name); + + pg_free(analyze_script_file_name); + pg_free(deletion_script_file_name); + + cleanup(); + + return 0; +} + + +static void +setup(char *argv0, bool *live_check) +{ + char exec_path[MAXPGPATH]; /* full path to my executable */ + + /* + * make sure the user has a clean environment, otherwise, we may confuse + * libpq when we connect to one (or both) of the servers. + */ + check_pghost_envvar(); + + verify_directories(); + + /* no postmasters should be running, except for a live check */ + if (pid_lock_file_exists(old_cluster.pgdata)) + { + /* + * If we have a postmaster.pid file, try to start the server. If it + * starts, the pid file was stale, so stop the server. If it doesn't + * start, assume the server is running. If the pid file is left over + * from a server crash, this also allows any committed transactions + * stored in the WAL to be replayed so they are not lost, because WAL + * files are not transfered from old to new servers. + */ + if (start_postmaster(&old_cluster, false)) + stop_postmaster(false); + else + { + if (!user_opts.check) + pg_fatal("There seems to be a postmaster servicing the old cluster.\n" + "Please shutdown that postmaster and try again.\n"); + else + *live_check = true; + } + } + + /* same goes for the new postmaster */ + if (pid_lock_file_exists(new_cluster.pgdata)) + { + if (start_postmaster(&new_cluster, false)) + stop_postmaster(false); + else + pg_fatal("There seems to be a postmaster servicing the new cluster.\n" + "Please shutdown that postmaster and try again.\n"); + } + + /* get path to pg_upgrade executable */ + if (find_my_exec(argv0, exec_path) < 0) + pg_fatal("Could not get path name to pg_upgrade: %s\n", getErrorText(errno)); + + /* Trim off program name and keep just path */ + *last_dir_separator(exec_path) = '\0'; + canonicalize_path(exec_path); + os_info.exec_path = pg_strdup(exec_path); +} + + +static void +prepare_new_cluster(void) +{ + /* + * It would make more sense to freeze after loading the schema, but that + * would cause us to lose the frozenids restored by the load. We use + * --analyze so autovacuum doesn't update statistics later + */ + prep_status("Analyzing all rows in the new cluster"); + exec_prog(UTILITY_LOG_FILE, NULL, true, + "\"%s/vacuumdb\" %s --all --analyze %s", + new_cluster.bindir, cluster_conn_opts(&new_cluster), + log_opts.verbose ? "--verbose" : ""); + check_ok(); + + /* + * We do freeze after analyze so pg_statistic is also frozen. template0 is + * not frozen here, but data rows were frozen by initdb, and we set its + * datfrozenxid, relfrozenxids, and relminmxid later to match the new xid + * counter later. + */ + prep_status("Freezing all rows on the new cluster"); + exec_prog(UTILITY_LOG_FILE, NULL, true, + "\"%s/vacuumdb\" %s --all --freeze %s", + new_cluster.bindir, cluster_conn_opts(&new_cluster), + log_opts.verbose ? "--verbose" : ""); + check_ok(); + + get_pg_database_relfilenode(&new_cluster); +} + + +static void +prepare_new_databases(void) +{ + /* + * We set autovacuum_freeze_max_age to its maximum value so autovacuum + * does not launch here and delete clog files, before the frozen xids are + * set. + */ + + set_frozenxids(false); + + prep_status("Restoring global objects in the new cluster"); + + /* + * We have to create the databases first so we can install support + * functions in all the other databases. Ideally we could create the + * support functions in template1 but pg_dumpall creates database using + * the template0 template. + */ + exec_prog(UTILITY_LOG_FILE, NULL, true, + "\"%s/psql\" " EXEC_PSQL_ARGS " %s -f \"%s\"", + new_cluster.bindir, cluster_conn_opts(&new_cluster), + GLOBALS_DUMP_FILE); + check_ok(); + + /* we load this to get a current list of databases */ + get_db_and_rel_infos(&new_cluster); +} + + +static void +create_new_objects(void) +{ + int dbnum; + + prep_status("Restoring database schemas in the new cluster\n"); + + for (dbnum = 0; dbnum < old_cluster.dbarr.ndbs; dbnum++) + { + char sql_file_name[MAXPGPATH], + log_file_name[MAXPGPATH]; + DbInfo *old_db = &old_cluster.dbarr.dbs[dbnum]; + + pg_log(PG_STATUS, "%s", old_db->db_name); + snprintf(sql_file_name, sizeof(sql_file_name), DB_DUMP_FILE_MASK, old_db->db_oid); + snprintf(log_file_name, sizeof(log_file_name), DB_DUMP_LOG_FILE_MASK, old_db->db_oid); + + /* + * pg_dump only produces its output at the end, so there is little + * parallelism if using the pipe. + */ + parallel_exec_prog(log_file_name, + NULL, + "\"%s/pg_restore\" %s --exit-on-error --verbose --dbname \"%s\" \"%s\"", + new_cluster.bindir, + cluster_conn_opts(&new_cluster), + old_db->db_name, + sql_file_name); + } + + /* reap all children */ + while (reap_child(true) == true) + ; + + end_progress_output(); + check_ok(); + + /* + * We don't have minmxids for databases or relations in pre-9.3 + * clusters, so set those after we have restores the schemas. + */ + if (GET_MAJOR_VERSION(old_cluster.major_version) < 903) + set_frozenxids(true); + + optionally_create_toast_tables(); + + /* regenerate now that we have objects in the databases */ + get_db_and_rel_infos(&new_cluster); +} + +/* + * Delete the given subdirectory contents from the new cluster + */ +static void +remove_new_subdir(char *subdir, bool rmtopdir) +{ + char new_path[MAXPGPATH]; + + prep_status("Deleting files from new %s", subdir); + + snprintf(new_path, sizeof(new_path), "%s/%s", new_cluster.pgdata, subdir); + if (!rmtree(new_path, rmtopdir)) + pg_fatal("could not delete directory \"%s\"\n", new_path); + + check_ok(); +} + +/* + * Copy the files from the old cluster into it + */ +static void +copy_subdir_files(char *subdir) +{ + char old_path[MAXPGPATH]; + char new_path[MAXPGPATH]; + + remove_new_subdir(subdir, true); + + snprintf(old_path, sizeof(old_path), "%s/%s", old_cluster.pgdata, subdir); + snprintf(new_path, sizeof(new_path), "%s/%s", new_cluster.pgdata, subdir); + + prep_status("Copying old %s to new server", subdir); + + exec_prog(UTILITY_LOG_FILE, NULL, true, +#ifndef WIN32 + "cp -Rf \"%s\" \"%s\"", +#else + /* flags: everything, no confirm, quiet, overwrite read-only */ + "xcopy /e /y /q /r \"%s\" \"%s\\\"", +#endif + old_path, new_path); + + check_ok(); +} + +static void +copy_clog_xlog_xid(void) +{ + /* copy old commit logs to new data dir */ + copy_subdir_files("pg_clog"); + + /* set the next transaction id and epoch of the new cluster */ + prep_status("Setting next transaction ID and epoch for new cluster"); + exec_prog(UTILITY_LOG_FILE, NULL, true, + "\"%s/pg_resetxlog\" -f -x %u \"%s\"", + new_cluster.bindir, old_cluster.controldata.chkpnt_nxtxid, + new_cluster.pgdata); + exec_prog(UTILITY_LOG_FILE, NULL, true, + "\"%s/pg_resetxlog\" -f -e %u \"%s\"", + new_cluster.bindir, old_cluster.controldata.chkpnt_nxtepoch, + new_cluster.pgdata); + /* must reset commit timestamp limits also */ + exec_prog(UTILITY_LOG_FILE, NULL, true, + "\"%s/pg_resetxlog\" -f -c %u,%u \"%s\"", + new_cluster.bindir, + old_cluster.controldata.chkpnt_nxtxid, + old_cluster.controldata.chkpnt_nxtxid, + new_cluster.pgdata); + check_ok(); + + /* + * If the old server is before the MULTIXACT_FORMATCHANGE_CAT_VER change + * (see pg_upgrade.h) and the new server is after, then we don't copy + * pg_multixact files, but we need to reset pg_control so that the new + * server doesn't attempt to read multis older than the cutoff value. + */ + if (old_cluster.controldata.cat_ver >= MULTIXACT_FORMATCHANGE_CAT_VER && + new_cluster.controldata.cat_ver >= MULTIXACT_FORMATCHANGE_CAT_VER) + { + copy_subdir_files("pg_multixact/offsets"); + copy_subdir_files("pg_multixact/members"); + + prep_status("Setting next multixact ID and offset for new cluster"); + + /* + * we preserve all files and contents, so we must preserve both "next" + * counters here and the oldest multi present on system. + */ + exec_prog(UTILITY_LOG_FILE, NULL, true, + "\"%s/pg_resetxlog\" -O %u -m %u,%u \"%s\"", + new_cluster.bindir, + old_cluster.controldata.chkpnt_nxtmxoff, + old_cluster.controldata.chkpnt_nxtmulti, + old_cluster.controldata.chkpnt_oldstMulti, + new_cluster.pgdata); + check_ok(); + } + else if (new_cluster.controldata.cat_ver >= MULTIXACT_FORMATCHANGE_CAT_VER) + { + /* + * Remove offsets/0000 file created by initdb that no longer matches + * the new multi-xid value. "members" starts at zero so no need to + * remove it. + */ + remove_new_subdir("pg_multixact/offsets", false); + + prep_status("Setting oldest multixact ID on new cluster"); + + /* + * We don't preserve files in this case, but it's important that the + * oldest multi is set to the latest value used by the old system, so + * that multixact.c returns the empty set for multis that might be + * present on disk. We set next multi to the value following that; it + * might end up wrapped around (i.e. 0) if the old cluster had + * next=MaxMultiXactId, but multixact.c can cope with that just fine. + */ + exec_prog(UTILITY_LOG_FILE, NULL, true, + "\"%s/pg_resetxlog\" -m %u,%u \"%s\"", + new_cluster.bindir, + old_cluster.controldata.chkpnt_nxtmulti + 1, + old_cluster.controldata.chkpnt_nxtmulti, + new_cluster.pgdata); + check_ok(); + } + + /* now reset the wal archives in the new cluster */ + prep_status("Resetting WAL archives"); + exec_prog(UTILITY_LOG_FILE, NULL, true, + "\"%s/pg_resetxlog\" -l %s \"%s\"", new_cluster.bindir, + old_cluster.controldata.nextxlogfile, + new_cluster.pgdata); + check_ok(); +} + + +/* + * set_frozenxids() + * + * We have frozen all xids, so set datfrozenxid, relfrozenxid, and + * relminmxid to be the old cluster's xid counter, which we just set + * in the new cluster. User-table frozenxid and minmxid values will + * be set by pg_dump --binary-upgrade, but objects not set by the pg_dump + * must have proper frozen counters. + */ +static +void +set_frozenxids(bool minmxid_only) +{ + int dbnum; + PGconn *conn, + *conn_template1; + PGresult *dbres; + int ntups; + int i_datname; + int i_datallowconn; + + if (!minmxid_only) + prep_status("Setting frozenxid and minmxid counters in new cluster"); + else + prep_status("Setting minmxid counter in new cluster"); + + conn_template1 = connectToServer(&new_cluster, "template1"); + + if (!minmxid_only) + /* set pg_database.datfrozenxid */ + PQclear(executeQueryOrDie(conn_template1, + "UPDATE pg_catalog.pg_database " + "SET datfrozenxid = '%u'", + old_cluster.controldata.chkpnt_nxtxid)); + + /* set pg_database.datminmxid */ + PQclear(executeQueryOrDie(conn_template1, + "UPDATE pg_catalog.pg_database " + "SET datminmxid = '%u'", + old_cluster.controldata.chkpnt_nxtmulti)); + + /* get database names */ + dbres = executeQueryOrDie(conn_template1, + "SELECT datname, datallowconn " + "FROM pg_catalog.pg_database"); + + i_datname = PQfnumber(dbres, "datname"); + i_datallowconn = PQfnumber(dbres, "datallowconn"); + + ntups = PQntuples(dbres); + for (dbnum = 0; dbnum < ntups; dbnum++) + { + char *datname = PQgetvalue(dbres, dbnum, i_datname); + char *datallowconn = PQgetvalue(dbres, dbnum, i_datallowconn); + + /* + * We must update databases where datallowconn = false, e.g. + * template0, because autovacuum increments their datfrozenxids, + * relfrozenxids, and relminmxid even if autovacuum is turned off, + * and even though all the data rows are already frozen To enable + * this, we temporarily change datallowconn. + */ + if (strcmp(datallowconn, "f") == 0) + PQclear(executeQueryOrDie(conn_template1, + "ALTER DATABASE %s ALLOW_CONNECTIONS = true", + quote_identifier(datname))); + + conn = connectToServer(&new_cluster, datname); + + if (!minmxid_only) + /* set pg_class.relfrozenxid */ + PQclear(executeQueryOrDie(conn, + "UPDATE pg_catalog.pg_class " + "SET relfrozenxid = '%u' " + /* only heap, materialized view, and TOAST are vacuumed */ + "WHERE relkind IN ('r', 'm', 't')", + old_cluster.controldata.chkpnt_nxtxid)); + + /* set pg_class.relminmxid */ + PQclear(executeQueryOrDie(conn, + "UPDATE pg_catalog.pg_class " + "SET relminmxid = '%u' " + /* only heap, materialized view, and TOAST are vacuumed */ + "WHERE relkind IN ('r', 'm', 't')", + old_cluster.controldata.chkpnt_nxtmulti)); + PQfinish(conn); + + /* Reset datallowconn flag */ + if (strcmp(datallowconn, "f") == 0) + PQclear(executeQueryOrDie(conn_template1, + "ALTER DATABASE %s ALLOW_CONNECTIONS = false", + quote_identifier(datname))); + } + + PQclear(dbres); + + PQfinish(conn_template1); + + check_ok(); +} + + +static void +cleanup(void) +{ + fclose(log_opts.internal); + + /* Remove dump and log files? */ + if (!log_opts.retain) + { + int dbnum; + char **filename; + + for (filename = output_files; *filename != NULL; filename++) + unlink(*filename); + + /* remove dump files */ + unlink(GLOBALS_DUMP_FILE); + + if (old_cluster.dbarr.dbs) + for (dbnum = 0; dbnum < old_cluster.dbarr.ndbs; dbnum++) + { + char sql_file_name[MAXPGPATH], + log_file_name[MAXPGPATH]; + DbInfo *old_db = &old_cluster.dbarr.dbs[dbnum]; + + snprintf(sql_file_name, sizeof(sql_file_name), DB_DUMP_FILE_MASK, old_db->db_oid); + unlink(sql_file_name); + + snprintf(log_file_name, sizeof(log_file_name), DB_DUMP_LOG_FILE_MASK, old_db->db_oid); + unlink(log_file_name); + } + } +} diff --git a/src/bin/pg_upgrade/pg_upgrade.h b/src/bin/pg_upgrade/pg_upgrade.h new file mode 100644 index 00000000000..4683c6f71c3 --- /dev/null +++ b/src/bin/pg_upgrade/pg_upgrade.h @@ -0,0 +1,481 @@ +/* + * pg_upgrade.h + * + * Copyright (c) 2010-2015, PostgreSQL Global Development Group + * src/bin/pg_upgrade/pg_upgrade.h + */ + +#include +#include +#include +#include + +#include "libpq-fe.h" + +/* Use port in the private/dynamic port number range */ +#define DEF_PGUPORT 50432 + +/* Allocate for null byte */ +#define USER_NAME_SIZE 128 + +#define MAX_STRING 1024 +#define LINE_ALLOC 4096 +#define QUERY_ALLOC 8192 + +#define MIGRATOR_API_VERSION 1 + +#define MESSAGE_WIDTH 60 + +#define GET_MAJOR_VERSION(v) ((v) / 100) + +/* contains both global db information and CREATE DATABASE commands */ +#define GLOBALS_DUMP_FILE "pg_upgrade_dump_globals.sql" +#define DB_DUMP_FILE_MASK "pg_upgrade_dump_%u.custom" + +#define DB_DUMP_LOG_FILE_MASK "pg_upgrade_dump_%u.log" +#define SERVER_LOG_FILE "pg_upgrade_server.log" +#define UTILITY_LOG_FILE "pg_upgrade_utility.log" +#define INTERNAL_LOG_FILE "pg_upgrade_internal.log" + +extern char *output_files[]; + +/* + * WIN32 files do not accept writes from multiple processes + * + * On Win32, we can't send both pg_upgrade output and command output to the + * same file because we get the error: "The process cannot access the file + * because it is being used by another process." so send the pg_ctl + * command-line output to a new file, rather than into the server log file. + * Ideally we could use UTILITY_LOG_FILE for this, but some Windows platforms + * keep the pg_ctl output file open by the running postmaster, even after + * pg_ctl exits. + * + * We could use the Windows pgwin32_open() flags to allow shared file + * writes but is unclear how all other tools would use those flags, so + * we just avoid it and log a little differently on Windows; we adjust + * the error message appropriately. + */ +#ifndef WIN32 +#define SERVER_START_LOG_FILE SERVER_LOG_FILE +#define SERVER_STOP_LOG_FILE SERVER_LOG_FILE +#else +#define SERVER_START_LOG_FILE "pg_upgrade_server_start.log" +/* + * "pg_ctl start" keeps SERVER_START_LOG_FILE and SERVER_LOG_FILE open + * while the server is running, so we use UTILITY_LOG_FILE for "pg_ctl + * stop". + */ +#define SERVER_STOP_LOG_FILE UTILITY_LOG_FILE +#endif + + +#ifndef WIN32 +#define pg_copy_file copy_file +#define pg_mv_file rename +#define pg_link_file link +#define PATH_SEPARATOR '/' +#define RM_CMD "rm -f" +#define RMDIR_CMD "rm -rf" +#define SCRIPT_PREFIX "./" +#define SCRIPT_EXT "sh" +#define ECHO_QUOTE "'" +#define ECHO_BLANK "" +#else +#define pg_copy_file CopyFile +#define pg_mv_file pgrename +#define pg_link_file win32_pghardlink +#define PATH_SEPARATOR '\\' +#define RM_CMD "DEL /q" +#define RMDIR_CMD "RMDIR /s/q" +#define SCRIPT_PREFIX "" +#define SCRIPT_EXT "bat" +#define EXE_EXT ".exe" +#define ECHO_QUOTE "" +#define ECHO_BLANK "." +#endif + +#define CLUSTER_NAME(cluster) ((cluster) == &old_cluster ? "old" : \ + (cluster) == &new_cluster ? "new" : "none") + +#define atooid(x) ((Oid) strtoul((x), NULL, 10)) + +/* OID system catalog preservation added during PG 9.0 development */ +#define TABLE_SPACE_SUBDIRS_CAT_VER 201001111 +/* postmaster/postgres -b (binary_upgrade) flag added during PG 9.1 development */ +#define BINARY_UPGRADE_SERVER_FLAG_CAT_VER 201104251 +/* + * Visibility map changed with this 9.2 commit, + * 8f9fe6edce358f7904e0db119416b4d1080a83aa; pick later catalog version. + */ +#define VISIBILITY_MAP_CRASHSAFE_CAT_VER 201107031 + +/* + * pg_multixact format changed in 9.3 commit 0ac5ad5134f2769ccbaefec73844f85, + * ("Improve concurrency of foreign key locking") which also updated catalog + * version to this value. pg_upgrade behavior depends on whether old and new + * server versions are both newer than this, or only the new one is. + */ +#define MULTIXACT_FORMATCHANGE_CAT_VER 201301231 + +/* + * large object chunk size added to pg_controldata, + * commit 5f93c37805e7485488480916b4585e098d3cc883 + */ +#define LARGE_OBJECT_SIZE_PG_CONTROL_VER 942 + +/* + * change in JSONB format during 9.4 beta + */ +#define JSONB_FORMAT_CHANGE_CAT_VER 201409291 + +/* + * Each relation is represented by a relinfo structure. + */ +typedef struct +{ + /* Can't use NAMEDATALEN; not guaranteed to fit on client */ + char *nspname; /* namespace name */ + char *relname; /* relation name */ + Oid reloid; /* relation oid */ + Oid relfilenode; /* relation relfile node */ + /* relation tablespace path, or "" for the cluster default */ + char *tablespace; + bool nsp_alloc; + bool tblsp_alloc; +} RelInfo; + +typedef struct +{ + RelInfo *rels; + int nrels; +} RelInfoArr; + +/* + * The following structure represents a relation mapping. + */ +typedef struct +{ + const char *old_tablespace; + const char *new_tablespace; + const char *old_tablespace_suffix; + const char *new_tablespace_suffix; + Oid old_db_oid; + Oid new_db_oid; + + /* + * old/new relfilenodes might differ for pg_largeobject(_metadata) indexes + * due to VACUUM FULL or REINDEX. Other relfilenodes are preserved. + */ + Oid old_relfilenode; + Oid new_relfilenode; + /* the rest are used only for logging and error reporting */ + char *nspname; /* namespaces */ + char *relname; +} FileNameMap; + +/* + * Structure to store database information + */ +typedef struct +{ + Oid db_oid; /* oid of the database */ + char *db_name; /* database name */ + char db_tablespace[MAXPGPATH]; /* database default tablespace + * path */ + char *db_collate; + char *db_ctype; + int db_encoding; + RelInfoArr rel_arr; /* array of all user relinfos */ +} DbInfo; + +typedef struct +{ + DbInfo *dbs; /* array of db infos */ + int ndbs; /* number of db infos */ +} DbInfoArr; + +/* + * The following structure is used to hold pg_control information. + * Rather than using the backend's control structure we use our own + * structure to avoid pg_control version issues between releases. + */ +typedef struct +{ + uint32 ctrl_ver; + uint32 cat_ver; + char nextxlogfile[25]; + uint32 chkpnt_tli; + uint32 chkpnt_nxtxid; + uint32 chkpnt_nxtepoch; + uint32 chkpnt_nxtoid; + uint32 chkpnt_nxtmulti; + uint32 chkpnt_nxtmxoff; + uint32 chkpnt_oldstMulti; + uint32 align; + uint32 blocksz; + uint32 largesz; + uint32 walsz; + uint32 walseg; + uint32 ident; + uint32 index; + uint32 toast; + uint32 large_object; + bool date_is_int; + bool float8_pass_by_value; + bool data_checksum_version; +} ControlData; + +/* + * Enumeration to denote link modes + */ +typedef enum +{ + TRANSFER_MODE_COPY, + TRANSFER_MODE_LINK +} transferMode; + +/* + * Enumeration to denote pg_log modes + */ +typedef enum +{ + PG_VERBOSE, + PG_STATUS, + PG_REPORT, + PG_WARNING, + PG_FATAL +} eLogType; + + +typedef long pgpid_t; + + +/* + * cluster + * + * information about each cluster + */ +typedef struct +{ + ControlData controldata; /* pg_control information */ + DbInfoArr dbarr; /* dbinfos array */ + char *pgdata; /* pathname for cluster's $PGDATA directory */ + char *pgconfig; /* pathname for cluster's config file + * directory */ + char *bindir; /* pathname for cluster's executable directory */ + char *pgopts; /* options to pass to the server, like pg_ctl + * -o */ + char *sockdir; /* directory for Unix Domain socket, if any */ + unsigned short port; /* port number where postmaster is waiting */ + uint32 major_version; /* PG_VERSION of cluster */ + char major_version_str[64]; /* string PG_VERSION of cluster */ + uint32 bin_version; /* version returned from pg_ctl */ + Oid pg_database_oid; /* OID of pg_database relation */ + const char *tablespace_suffix; /* directory specification */ +} ClusterInfo; + + +/* + * LogOpts +*/ +typedef struct +{ + FILE *internal; /* internal log FILE */ + bool verbose; /* TRUE -> be verbose in messages */ + bool retain; /* retain log files on success */ +} LogOpts; + + +/* + * UserOpts +*/ +typedef struct +{ + bool check; /* TRUE -> ask user for permission to make + * changes */ + transferMode transfer_mode; /* copy files or link them? */ + int jobs; +} UserOpts; + + +/* + * OSInfo + */ +typedef struct +{ + const char *progname; /* complete pathname for this program */ + char *exec_path; /* full path to my executable */ + char *user; /* username for clusters */ + bool user_specified; /* user specified on command-line */ + char **old_tablespaces; /* tablespaces */ + int num_old_tablespaces; + char **libraries; /* loadable libraries */ + int num_libraries; + ClusterInfo *running_cluster; +} OSInfo; + + +/* + * Global variables + */ +extern LogOpts log_opts; +extern UserOpts user_opts; +extern ClusterInfo old_cluster, + new_cluster; +extern OSInfo os_info; + + +/* check.c */ + +void output_check_banner(bool live_check); +void check_and_dump_old_cluster(bool live_check); +void check_new_cluster(void); +void report_clusters_compatible(void); +void issue_warnings(void); +void output_completion_banner(char *analyze_script_file_name, + char *deletion_script_file_name); +void check_cluster_versions(void); +void check_cluster_compatibility(bool live_check); +void create_script_for_old_cluster_deletion(char **deletion_script_file_name); +void create_script_for_cluster_analyze(char **analyze_script_file_name); + + +/* controldata.c */ + +void get_control_data(ClusterInfo *cluster, bool live_check); +void check_control_data(ControlData *oldctrl, ControlData *newctrl); +void disable_old_cluster(void); + + +/* dump.c */ + +void generate_old_dump(void); +void optionally_create_toast_tables(void); + + +/* exec.c */ + +#define EXEC_PSQL_ARGS "--echo-queries --set ON_ERROR_STOP=on --no-psqlrc --dbname=template1" + +bool exec_prog(const char *log_file, const char *opt_log_file, + bool throw_error, const char *fmt,...) pg_attribute_printf(4, 5); +void verify_directories(void); +bool pid_lock_file_exists(const char *datadir); + + +/* file.c */ + +#ifdef PAGE_CONVERSION +typedef const char *(*pluginStartup) (uint16 migratorVersion, + uint16 *pluginVersion, uint16 newPageVersion, + uint16 oldPageVersion, void **pluginData); +typedef const char *(*pluginConvertFile) (void *pluginData, + const char *dstName, const char *srcName); +typedef const char *(*pluginConvertPage) (void *pluginData, + const char *dstPage, const char *srcPage); +typedef const char *(*pluginShutdown) (void *pluginData); + +typedef struct +{ + uint16 oldPageVersion; /* Page layout version of the old cluster */ + uint16 newPageVersion; /* Page layout version of the new cluster */ + uint16 pluginVersion; /* API version of converter plugin */ + void *pluginData; /* Plugin data (set by plugin) */ + pluginStartup startup; /* Pointer to plugin's startup function */ + pluginConvertFile convertFile; /* Pointer to plugin's file converter + * function */ + pluginConvertPage convertPage; /* Pointer to plugin's page converter + * function */ + pluginShutdown shutdown; /* Pointer to plugin's shutdown function */ +} pageCnvCtx; + +const pageCnvCtx *setupPageConverter(void); +#else +/* dummy */ +typedef void *pageCnvCtx; +#endif + +const char *copyAndUpdateFile(pageCnvCtx *pageConverter, const char *src, + const char *dst, bool force); +const char *linkAndUpdateFile(pageCnvCtx *pageConverter, const char *src, + const char *dst); + +void check_hard_link(void); +FILE *fopen_priv(const char *path, const char *mode); + +/* function.c */ + +void get_loadable_libraries(void); +void check_loadable_libraries(void); + +/* info.c */ + +FileNameMap *gen_db_file_maps(DbInfo *old_db, + DbInfo *new_db, int *nmaps, const char *old_pgdata, + const char *new_pgdata); +void get_db_and_rel_infos(ClusterInfo *cluster); +void print_maps(FileNameMap *maps, int n, + const char *db_name); + +/* option.c */ + +void parseCommandLine(int argc, char *argv[]); +void adjust_data_dir(ClusterInfo *cluster); +void get_sock_dir(ClusterInfo *cluster, bool live_check); + +/* relfilenode.c */ + +void get_pg_database_relfilenode(ClusterInfo *cluster); +void transfer_all_new_tablespaces(DbInfoArr *old_db_arr, + DbInfoArr *new_db_arr, char *old_pgdata, char *new_pgdata); +void transfer_all_new_dbs(DbInfoArr *old_db_arr, + DbInfoArr *new_db_arr, char *old_pgdata, char *new_pgdata, + char *old_tablespace); + +/* tablespace.c */ + +void init_tablespaces(void); + + +/* server.c */ + +PGconn *connectToServer(ClusterInfo *cluster, const char *db_name); +PGresult *executeQueryOrDie(PGconn *conn, const char *fmt,...) pg_attribute_printf(2, 3); + +char *cluster_conn_opts(ClusterInfo *cluster); + +bool start_postmaster(ClusterInfo *cluster, bool throw_error); +void stop_postmaster(bool fast); +uint32 get_major_server_version(ClusterInfo *cluster); +void check_pghost_envvar(void); + + +/* util.c */ + +char *quote_identifier(const char *s); +int get_user_info(char **user_name_p); +void check_ok(void); +void report_status(eLogType type, const char *fmt,...) pg_attribute_printf(2, 3); +void pg_log(eLogType type, const char *fmt,...) pg_attribute_printf(2, 3); +void pg_fatal(const char *fmt,...) pg_attribute_printf(1, 2) pg_attribute_noreturn(); +void end_progress_output(void); +void prep_status(const char *fmt,...) pg_attribute_printf(1, 2); +void check_ok(void); +const char *getErrorText(int errNum); +unsigned int str2uint(const char *str); +void pg_putenv(const char *var, const char *val); + + +/* version.c */ + +void new_9_0_populate_pg_largeobject_metadata(ClusterInfo *cluster, + bool check_mode); +void old_9_3_check_for_line_data_type_usage(ClusterInfo *cluster); + +/* parallel.c */ +void parallel_exec_prog(const char *log_file, const char *opt_log_file, + const char *fmt,...) pg_attribute_printf(3, 4); +void parallel_transfer_all_new_dbs(DbInfoArr *old_db_arr, DbInfoArr *new_db_arr, + char *old_pgdata, char *new_pgdata, + char *old_tablespace); +bool reap_child(bool wait_for_child); diff --git a/src/bin/pg_upgrade/relfilenode.c b/src/bin/pg_upgrade/relfilenode.c new file mode 100644 index 00000000000..fe058807b68 --- /dev/null +++ b/src/bin/pg_upgrade/relfilenode.c @@ -0,0 +1,294 @@ +/* + * relfilenode.c + * + * relfilenode functions + * + * Copyright (c) 2010-2015, PostgreSQL Global Development Group + * src/bin/pg_upgrade/relfilenode.c + */ + +#include "postgres_fe.h" + +#include "pg_upgrade.h" + +#include "catalog/pg_class.h" +#include "access/transam.h" + + +static void transfer_single_new_db(pageCnvCtx *pageConverter, + FileNameMap *maps, int size, char *old_tablespace); +static void transfer_relfile(pageCnvCtx *pageConverter, FileNameMap *map, + const char *suffix); + + +/* + * transfer_all_new_tablespaces() + * + * Responsible for upgrading all database. invokes routines to generate mappings and then + * physically link the databases. + */ +void +transfer_all_new_tablespaces(DbInfoArr *old_db_arr, DbInfoArr *new_db_arr, + char *old_pgdata, char *new_pgdata) +{ + pg_log(PG_REPORT, "%s user relation files\n", + user_opts.transfer_mode == TRANSFER_MODE_LINK ? "Linking" : "Copying"); + + /* + * Transfering files by tablespace is tricky because a single database can + * use multiple tablespaces. For non-parallel mode, we just pass a NULL + * tablespace path, which matches all tablespaces. In parallel mode, we + * pass the default tablespace and all user-created tablespaces and let + * those operations happen in parallel. + */ + if (user_opts.jobs <= 1) + parallel_transfer_all_new_dbs(old_db_arr, new_db_arr, old_pgdata, + new_pgdata, NULL); + else + { + int tblnum; + + /* transfer default tablespace */ + parallel_transfer_all_new_dbs(old_db_arr, new_db_arr, old_pgdata, + new_pgdata, old_pgdata); + + for (tblnum = 0; tblnum < os_info.num_old_tablespaces; tblnum++) + parallel_transfer_all_new_dbs(old_db_arr, + new_db_arr, + old_pgdata, + new_pgdata, + os_info.old_tablespaces[tblnum]); + /* reap all children */ + while (reap_child(true) == true) + ; + } + + end_progress_output(); + check_ok(); + + return; +} + + +/* + * transfer_all_new_dbs() + * + * Responsible for upgrading all database. invokes routines to generate mappings and then + * physically link the databases. + */ +void +transfer_all_new_dbs(DbInfoArr *old_db_arr, DbInfoArr *new_db_arr, + char *old_pgdata, char *new_pgdata, char *old_tablespace) +{ + int old_dbnum, + new_dbnum; + + /* Scan the old cluster databases and transfer their files */ + for (old_dbnum = new_dbnum = 0; + old_dbnum < old_db_arr->ndbs; + old_dbnum++, new_dbnum++) + { + DbInfo *old_db = &old_db_arr->dbs[old_dbnum], + *new_db = NULL; + FileNameMap *mappings; + int n_maps; + pageCnvCtx *pageConverter = NULL; + + /* + * Advance past any databases that exist in the new cluster but not in + * the old, e.g. "postgres". (The user might have removed the + * 'postgres' database from the old cluster.) + */ + for (; new_dbnum < new_db_arr->ndbs; new_dbnum++) + { + new_db = &new_db_arr->dbs[new_dbnum]; + if (strcmp(old_db->db_name, new_db->db_name) == 0) + break; + } + + if (new_dbnum >= new_db_arr->ndbs) + pg_fatal("old database \"%s\" not found in the new cluster\n", + old_db->db_name); + + mappings = gen_db_file_maps(old_db, new_db, &n_maps, old_pgdata, + new_pgdata); + if (n_maps) + { + print_maps(mappings, n_maps, new_db->db_name); + +#ifdef PAGE_CONVERSION + pageConverter = setupPageConverter(); +#endif + transfer_single_new_db(pageConverter, mappings, n_maps, + old_tablespace); + } + /* We allocate something even for n_maps == 0 */ + pg_free(mappings); + } + + return; +} + + +/* + * get_pg_database_relfilenode() + * + * Retrieves the relfilenode for a few system-catalog tables. We need these + * relfilenodes later in the upgrade process. + */ +void +get_pg_database_relfilenode(ClusterInfo *cluster) +{ + PGconn *conn = connectToServer(cluster, "template1"); + PGresult *res; + int i_relfile; + + res = executeQueryOrDie(conn, + "SELECT c.relname, c.relfilenode " + "FROM pg_catalog.pg_class c, " + " pg_catalog.pg_namespace n " + "WHERE c.relnamespace = n.oid AND " + " n.nspname = 'pg_catalog' AND " + " c.relname = 'pg_database' " + "ORDER BY c.relname"); + + i_relfile = PQfnumber(res, "relfilenode"); + cluster->pg_database_oid = atooid(PQgetvalue(res, 0, i_relfile)); + + PQclear(res); + PQfinish(conn); +} + + +/* + * transfer_single_new_db() + * + * create links for mappings stored in "maps" array. + */ +static void +transfer_single_new_db(pageCnvCtx *pageConverter, + FileNameMap *maps, int size, char *old_tablespace) +{ + int mapnum; + bool vm_crashsafe_match = true; + + /* + * Do the old and new cluster disagree on the crash-safetiness of the vm + * files? If so, do not copy them. + */ + if (old_cluster.controldata.cat_ver < VISIBILITY_MAP_CRASHSAFE_CAT_VER && + new_cluster.controldata.cat_ver >= VISIBILITY_MAP_CRASHSAFE_CAT_VER) + vm_crashsafe_match = false; + + for (mapnum = 0; mapnum < size; mapnum++) + { + if (old_tablespace == NULL || + strcmp(maps[mapnum].old_tablespace, old_tablespace) == 0) + { + /* transfer primary file */ + transfer_relfile(pageConverter, &maps[mapnum], ""); + + /* fsm/vm files added in PG 8.4 */ + if (GET_MAJOR_VERSION(old_cluster.major_version) >= 804) + { + /* + * Copy/link any fsm and vm files, if they exist + */ + transfer_relfile(pageConverter, &maps[mapnum], "_fsm"); + if (vm_crashsafe_match) + transfer_relfile(pageConverter, &maps[mapnum], "_vm"); + } + } + } +} + + +/* + * transfer_relfile() + * + * Copy or link file from old cluster to new one. + */ +static void +transfer_relfile(pageCnvCtx *pageConverter, FileNameMap *map, + const char *type_suffix) +{ + const char *msg; + char old_file[MAXPGPATH]; + char new_file[MAXPGPATH]; + int fd; + int segno; + char extent_suffix[65]; + + /* + * Now copy/link any related segments as well. Remember, PG breaks large + * files into 1GB segments, the first segment has no extension, subsequent + * segments are named relfilenode.1, relfilenode.2, relfilenode.3. copied. + */ + for (segno = 0;; segno++) + { + if (segno == 0) + extent_suffix[0] = '\0'; + else + snprintf(extent_suffix, sizeof(extent_suffix), ".%d", segno); + + snprintf(old_file, sizeof(old_file), "%s%s/%u/%u%s%s", + map->old_tablespace, + map->old_tablespace_suffix, + map->old_db_oid, + map->old_relfilenode, + type_suffix, + extent_suffix); + snprintf(new_file, sizeof(new_file), "%s%s/%u/%u%s%s", + map->new_tablespace, + map->new_tablespace_suffix, + map->new_db_oid, + map->new_relfilenode, + type_suffix, + extent_suffix); + + /* Is it an extent, fsm, or vm file? */ + if (type_suffix[0] != '\0' || segno != 0) + { + /* Did file open fail? */ + if ((fd = open(old_file, O_RDONLY, 0)) == -1) + { + /* File does not exist? That's OK, just return */ + if (errno == ENOENT) + return; + else + pg_fatal("error while checking for file existence \"%s.%s\" (\"%s\" to \"%s\"): %s\n", + map->nspname, map->relname, old_file, new_file, + getErrorText(errno)); + } + close(fd); + } + + unlink(new_file); + + /* Copying files might take some time, so give feedback. */ + pg_log(PG_STATUS, "%s", old_file); + + if ((user_opts.transfer_mode == TRANSFER_MODE_LINK) && (pageConverter != NULL)) + pg_fatal("This upgrade requires page-by-page conversion, " + "you must use copy mode instead of link mode.\n"); + + if (user_opts.transfer_mode == TRANSFER_MODE_COPY) + { + pg_log(PG_VERBOSE, "copying \"%s\" to \"%s\"\n", old_file, new_file); + + if ((msg = copyAndUpdateFile(pageConverter, old_file, new_file, true)) != NULL) + pg_fatal("error while copying relation \"%s.%s\" (\"%s\" to \"%s\"): %s\n", + map->nspname, map->relname, old_file, new_file, msg); + } + else + { + pg_log(PG_VERBOSE, "linking \"%s\" to \"%s\"\n", old_file, new_file); + + if ((msg = linkAndUpdateFile(pageConverter, old_file, new_file)) != NULL) + pg_fatal("error while creating link for relation \"%s.%s\" (\"%s\" to \"%s\"): %s\n", + map->nspname, map->relname, old_file, new_file, msg); + } + } + + return; +} diff --git a/src/bin/pg_upgrade/server.c b/src/bin/pg_upgrade/server.c new file mode 100644 index 00000000000..8d8e7d70734 --- /dev/null +++ b/src/bin/pg_upgrade/server.c @@ -0,0 +1,350 @@ +/* + * server.c + * + * database server functions + * + * Copyright (c) 2010-2015, PostgreSQL Global Development Group + * src/bin/pg_upgrade/server.c + */ + +#include "postgres_fe.h" + +#include "pg_upgrade.h" + + +static PGconn *get_db_conn(ClusterInfo *cluster, const char *db_name); + + +/* + * connectToServer() + * + * Connects to the desired database on the designated server. + * If the connection attempt fails, this function logs an error + * message and calls exit() to kill the program. + */ +PGconn * +connectToServer(ClusterInfo *cluster, const char *db_name) +{ + PGconn *conn = get_db_conn(cluster, db_name); + + if (conn == NULL || PQstatus(conn) != CONNECTION_OK) + { + pg_log(PG_REPORT, "connection to database failed: %s\n", + PQerrorMessage(conn)); + + if (conn) + PQfinish(conn); + + printf("Failure, exiting\n"); + exit(1); + } + + return conn; +} + + +/* + * get_db_conn() + * + * get database connection, using named database + standard params for cluster + */ +static PGconn * +get_db_conn(ClusterInfo *cluster, const char *db_name) +{ + char conn_opts[2 * NAMEDATALEN + MAXPGPATH + 100]; + + if (cluster->sockdir) + snprintf(conn_opts, sizeof(conn_opts), + "dbname = '%s' user = '%s' host = '%s' port = %d", + db_name, os_info.user, cluster->sockdir, cluster->port); + else + snprintf(conn_opts, sizeof(conn_opts), + "dbname = '%s' user = '%s' port = %d", + db_name, os_info.user, cluster->port); + + return PQconnectdb(conn_opts); +} + + +/* + * cluster_conn_opts() + * + * Return standard command-line options for connecting to this cluster when + * using psql, pg_dump, etc. Ideally this would match what get_db_conn() + * sets, but the utilities we need aren't very consistent about the treatment + * of database name options, so we leave that out. + * + * Note result is in static storage, so use it right away. + */ +char * +cluster_conn_opts(ClusterInfo *cluster) +{ + static char conn_opts[MAXPGPATH + NAMEDATALEN + 100]; + + if (cluster->sockdir) + snprintf(conn_opts, sizeof(conn_opts), + "--host \"%s\" --port %d --username \"%s\"", + cluster->sockdir, cluster->port, os_info.user); + else + snprintf(conn_opts, sizeof(conn_opts), + "--port %d --username \"%s\"", + cluster->port, os_info.user); + + return conn_opts; +} + + +/* + * executeQueryOrDie() + * + * Formats a query string from the given arguments and executes the + * resulting query. If the query fails, this function logs an error + * message and calls exit() to kill the program. + */ +PGresult * +executeQueryOrDie(PGconn *conn, const char *fmt,...) +{ + static char query[QUERY_ALLOC]; + va_list args; + PGresult *result; + ExecStatusType status; + + va_start(args, fmt); + vsnprintf(query, sizeof(query), fmt, args); + va_end(args); + + pg_log(PG_VERBOSE, "executing: %s\n", query); + result = PQexec(conn, query); + status = PQresultStatus(result); + + if ((status != PGRES_TUPLES_OK) && (status != PGRES_COMMAND_OK)) + { + pg_log(PG_REPORT, "SQL command failed\n%s\n%s\n", query, + PQerrorMessage(conn)); + PQclear(result); + PQfinish(conn); + printf("Failure, exiting\n"); + exit(1); + } + else + return result; +} + + +/* + * get_major_server_version() + * + * gets the version (in unsigned int form) for the given datadir. Assumes + * that datadir is an absolute path to a valid pgdata directory. The version + * is retrieved by reading the PG_VERSION file. + */ +uint32 +get_major_server_version(ClusterInfo *cluster) +{ + FILE *version_fd; + char ver_filename[MAXPGPATH]; + int integer_version = 0; + int fractional_version = 0; + + snprintf(ver_filename, sizeof(ver_filename), "%s/PG_VERSION", + cluster->pgdata); + if ((version_fd = fopen(ver_filename, "r")) == NULL) + pg_fatal("could not open version file: %s\n", ver_filename); + + if (fscanf(version_fd, "%63s", cluster->major_version_str) == 0 || + sscanf(cluster->major_version_str, "%d.%d", &integer_version, + &fractional_version) != 2) + pg_fatal("could not get version from %s\n", cluster->pgdata); + + fclose(version_fd); + + return (100 * integer_version + fractional_version) * 100; +} + + +static void +stop_postmaster_atexit(void) +{ + stop_postmaster(true); +} + + +bool +start_postmaster(ClusterInfo *cluster, bool throw_error) +{ + char cmd[MAXPGPATH * 4 + 1000]; + PGconn *conn; + bool exit_hook_registered = false; + bool pg_ctl_return = false; + char socket_string[MAXPGPATH + 200]; + + if (!exit_hook_registered) + { + atexit(stop_postmaster_atexit); + exit_hook_registered = true; + } + + socket_string[0] = '\0'; + +#ifdef HAVE_UNIX_SOCKETS + /* prevent TCP/IP connections, restrict socket access */ + strcat(socket_string, + " -c listen_addresses='' -c unix_socket_permissions=0700"); + + /* Have a sockdir? Tell the postmaster. */ + if (cluster->sockdir) + snprintf(socket_string + strlen(socket_string), + sizeof(socket_string) - strlen(socket_string), + " -c %s='%s'", + (GET_MAJOR_VERSION(cluster->major_version) < 903) ? + "unix_socket_directory" : "unix_socket_directories", + cluster->sockdir); +#endif + + /* + * Since PG 9.1, we have used -b to disable autovacuum. For earlier + * releases, setting autovacuum=off disables cleanup vacuum and analyze, + * but freeze vacuums can still happen, so we set autovacuum_freeze_max_age + * to its maximum. (autovacuum_multixact_freeze_max_age was introduced + * after 9.1, so there is no need to set that.) We assume all datfrozenxid + * and relfrozenxid values are less than a gap of 2000000000 from the current + * xid counter, so autovacuum will not touch them. + * + * Turn off durability requirements to improve object creation speed, and + * we only modify the new cluster, so only use it there. If there is a + * crash, the new cluster has to be recreated anyway. fsync=off is a big + * win on ext4. + */ + snprintf(cmd, sizeof(cmd), + "\"%s/pg_ctl\" -w -l \"%s\" -D \"%s\" -o \"-p %d%s%s %s%s\" start", + cluster->bindir, SERVER_LOG_FILE, cluster->pgconfig, cluster->port, + (cluster->controldata.cat_ver >= + BINARY_UPGRADE_SERVER_FLAG_CAT_VER) ? " -b" : + " -c autovacuum=off -c autovacuum_freeze_max_age=2000000000", + (cluster == &new_cluster) ? + " -c synchronous_commit=off -c fsync=off -c full_page_writes=off" : "", + cluster->pgopts ? cluster->pgopts : "", socket_string); + + /* + * Don't throw an error right away, let connecting throw the error because + * it might supply a reason for the failure. + */ + pg_ctl_return = exec_prog(SERVER_START_LOG_FILE, + /* pass both file names if they differ */ + (strcmp(SERVER_LOG_FILE, + SERVER_START_LOG_FILE) != 0) ? + SERVER_LOG_FILE : NULL, + false, + "%s", cmd); + + /* Did it fail and we are just testing if the server could be started? */ + if (!pg_ctl_return && !throw_error) + return false; + + /* + * We set this here to make sure atexit() shuts down the server, but only + * if we started the server successfully. We do it before checking for + * connectivity in case the server started but there is a connectivity + * failure. If pg_ctl did not return success, we will exit below. + * + * Pre-9.1 servers do not have PQping(), so we could be leaving the server + * running if authentication was misconfigured, so someday we might went + * to be more aggressive about doing server shutdowns even if pg_ctl + * fails, but now (2013-08-14) it seems prudent to be cautious. We don't + * want to shutdown a server that might have been accidentally started + * during the upgrade. + */ + if (pg_ctl_return) + os_info.running_cluster = cluster; + + /* + * pg_ctl -w might have failed because the server couldn't be started, or + * there might have been a connection problem in _checking_ if the server + * has started. Therefore, even if pg_ctl failed, we continue and test + * for connectivity in case we get a connection reason for the failure. + */ + if ((conn = get_db_conn(cluster, "template1")) == NULL || + PQstatus(conn) != CONNECTION_OK) + { + pg_log(PG_REPORT, "\nconnection to database failed: %s\n", + PQerrorMessage(conn)); + if (conn) + PQfinish(conn); + pg_fatal("could not connect to %s postmaster started with the command:\n" + "%s\n", + CLUSTER_NAME(cluster), cmd); + } + PQfinish(conn); + + /* + * If pg_ctl failed, and the connection didn't fail, and throw_error is + * enabled, fail now. This could happen if the server was already + * running. + */ + if (!pg_ctl_return) + pg_fatal("pg_ctl failed to start the %s server, or connection failed\n", + CLUSTER_NAME(cluster)); + + return true; +} + + +void +stop_postmaster(bool fast) +{ + ClusterInfo *cluster; + + if (os_info.running_cluster == &old_cluster) + cluster = &old_cluster; + else if (os_info.running_cluster == &new_cluster) + cluster = &new_cluster; + else + return; /* no cluster running */ + + exec_prog(SERVER_STOP_LOG_FILE, NULL, !fast, + "\"%s/pg_ctl\" -w -D \"%s\" -o \"%s\" %s stop", + cluster->bindir, cluster->pgconfig, + cluster->pgopts ? cluster->pgopts : "", + fast ? "-m fast" : ""); + + os_info.running_cluster = NULL; +} + + +/* + * check_pghost_envvar() + * + * Tests that PGHOST does not point to a non-local server + */ +void +check_pghost_envvar(void) +{ + PQconninfoOption *option; + PQconninfoOption *start; + + /* Get valid libpq env vars from the PQconndefaults function */ + + start = PQconndefaults(); + + if (!start) + pg_fatal("out of memory\n"); + + for (option = start; option->keyword != NULL; option++) + { + if (option->envvar && (strcmp(option->envvar, "PGHOST") == 0 || + strcmp(option->envvar, "PGHOSTADDR") == 0)) + { + const char *value = getenv(option->envvar); + + if (value && strlen(value) > 0 && + /* check for 'local' host values */ + (strcmp(value, "localhost") != 0 && strcmp(value, "127.0.0.1") != 0 && + strcmp(value, "::1") != 0 && value[0] != '/')) + pg_fatal("libpq environment variable %s has a non-local server value: %s\n", + option->envvar, value); + } + } + + /* Free the memory that libpq allocated on our behalf */ + PQconninfoFree(start); +} diff --git a/src/bin/pg_upgrade/tablespace.c b/src/bin/pg_upgrade/tablespace.c new file mode 100644 index 00000000000..ce7097e71bf --- /dev/null +++ b/src/bin/pg_upgrade/tablespace.c @@ -0,0 +1,124 @@ +/* + * tablespace.c + * + * tablespace functions + * + * Copyright (c) 2010-2015, PostgreSQL Global Development Group + * src/bin/pg_upgrade/tablespace.c + */ + +#include "postgres_fe.h" + +#include "pg_upgrade.h" + +#include + +static void get_tablespace_paths(void); +static void set_tablespace_directory_suffix(ClusterInfo *cluster); + + +void +init_tablespaces(void) +{ + get_tablespace_paths(); + + set_tablespace_directory_suffix(&old_cluster); + set_tablespace_directory_suffix(&new_cluster); + + if (os_info.num_old_tablespaces > 0 && + strcmp(old_cluster.tablespace_suffix, new_cluster.tablespace_suffix) == 0) + pg_fatal("Cannot upgrade to/from the same system catalog version when\n" + "using tablespaces.\n"); +} + + +/* + * get_tablespace_paths() + * + * Scans pg_tablespace and returns a malloc'ed array of all tablespace + * paths. Its the caller's responsibility to free the array. + */ +static void +get_tablespace_paths(void) +{ + PGconn *conn = connectToServer(&old_cluster, "template1"); + PGresult *res; + int tblnum; + int i_spclocation; + char query[QUERY_ALLOC]; + + snprintf(query, sizeof(query), + "SELECT %s " + "FROM pg_catalog.pg_tablespace " + "WHERE spcname != 'pg_default' AND " + " spcname != 'pg_global'", + /* 9.2 removed the spclocation column */ + (GET_MAJOR_VERSION(old_cluster.major_version) <= 901) ? + "spclocation" : "pg_catalog.pg_tablespace_location(oid) AS spclocation"); + + res = executeQueryOrDie(conn, "%s", query); + + if ((os_info.num_old_tablespaces = PQntuples(res)) != 0) + os_info.old_tablespaces = (char **) pg_malloc( + os_info.num_old_tablespaces * sizeof(char *)); + else + os_info.old_tablespaces = NULL; + + i_spclocation = PQfnumber(res, "spclocation"); + + for (tblnum = 0; tblnum < os_info.num_old_tablespaces; tblnum++) + { + struct stat statBuf; + + os_info.old_tablespaces[tblnum] = pg_strdup( + PQgetvalue(res, tblnum, i_spclocation)); + + /* + * Check that the tablespace path exists and is a directory. + * Effectively, this is checking only for tables/indexes in + * non-existent tablespace directories. Databases located in + * non-existent tablespaces already throw a backend error. + * Non-existent tablespace directories can occur when a data directory + * that contains user tablespaces is moved as part of pg_upgrade + * preparation and the symbolic links are not updated. + */ + if (stat(os_info.old_tablespaces[tblnum], &statBuf) != 0) + { + if (errno == ENOENT) + report_status(PG_FATAL, + "tablespace directory \"%s\" does not exist\n", + os_info.old_tablespaces[tblnum]); + else + report_status(PG_FATAL, + "cannot stat() tablespace directory \"%s\": %s\n", + os_info.old_tablespaces[tblnum], getErrorText(errno)); + } + if (!S_ISDIR(statBuf.st_mode)) + report_status(PG_FATAL, + "tablespace path \"%s\" is not a directory\n", + os_info.old_tablespaces[tblnum]); + } + + PQclear(res); + + PQfinish(conn); + + return; +} + + +static void +set_tablespace_directory_suffix(ClusterInfo *cluster) +{ + if (GET_MAJOR_VERSION(cluster->major_version) <= 804) + cluster->tablespace_suffix = pg_strdup(""); + else + { + /* This cluster has a version-specific subdirectory */ + + /* The leading slash is needed to start a new directory. */ + cluster->tablespace_suffix = psprintf("/PG_%s_%d", + cluster->major_version_str, + cluster->controldata.cat_ver); + } +} diff --git a/src/bin/pg_upgrade/test.sh b/src/bin/pg_upgrade/test.sh new file mode 100644 index 00000000000..0903f30b119 --- /dev/null +++ b/src/bin/pg_upgrade/test.sh @@ -0,0 +1,224 @@ +#!/bin/sh + +# src/bin/pg_upgrade/test.sh +# +# Test driver for pg_upgrade. Initializes a new database cluster, +# runs the regression tests (to put in some data), runs pg_dumpall, +# runs pg_upgrade, runs pg_dumpall again, compares the dumps. +# +# Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group +# Portions Copyright (c) 1994, Regents of the University of California + +set -e + +: ${MAKE=make} + +# Guard against parallel make issues (see comments in pg_regress.c) +unset MAKEFLAGS +unset MAKELEVEL + +# Run a given "initdb" binary and overlay the regression testing +# authentication configuration. +standard_initdb() { + "$1" -N + ../../test/regress/pg_regress --config-auth "$PGDATA" +} + +# Establish how the server will listen for connections +testhost=`uname -s` + +case $testhost in + MINGW*) + LISTEN_ADDRESSES="localhost" + PGHOST=localhost + ;; + *) + LISTEN_ADDRESSES="" + # Select a socket directory. The algorithm is from the "configure" + # script; the outcome mimics pg_regress.c:make_temp_sockdir(). + PGHOST=$PG_REGRESS_SOCK_DIR + if [ "x$PGHOST" = x ]; then + { + dir=`(umask 077 && + mktemp -d /tmp/pg_upgrade_check-XXXXXX) 2>/dev/null` && + [ -d "$dir" ] + } || + { + dir=/tmp/pg_upgrade_check-$$-$RANDOM + (umask 077 && mkdir "$dir") + } || + { + echo "could not create socket temporary directory in \"/tmp\"" + exit 1 + } + + PGHOST=$dir + trap 'rm -rf "$PGHOST"' 0 + trap 'exit 3' 1 2 13 15 + fi + ;; +esac + +POSTMASTER_OPTS="-F -c listen_addresses=$LISTEN_ADDRESSES -k \"$PGHOST\"" +export PGHOST + +temp_root=$PWD/tmp_check + +if [ "$1" = '--install' ]; then + temp_install=$temp_root/install + bindir=$temp_install/$bindir + libdir=$temp_install/$libdir + + "$MAKE" -s -C ../.. install DESTDIR="$temp_install" + "$MAKE" -s -C . install DESTDIR="$temp_install" + + # platform-specific magic to find the shared libraries; see pg_regress.c + LD_LIBRARY_PATH=$libdir:$LD_LIBRARY_PATH + export LD_LIBRARY_PATH + DYLD_LIBRARY_PATH=$libdir:$DYLD_LIBRARY_PATH + export DYLD_LIBRARY_PATH + LIBPATH=$libdir:$LIBPATH + export LIBPATH + PATH=$libdir:$PATH + + # We need to make it use psql from our temporary installation, + # because otherwise the installcheck run below would try to + # use psql from the proper installation directory, which might + # be outdated or missing. But don't override anything else that's + # already in EXTRA_REGRESS_OPTS. + EXTRA_REGRESS_OPTS="$EXTRA_REGRESS_OPTS --psqldir='$bindir'" + export EXTRA_REGRESS_OPTS +fi + +: ${oldbindir=$bindir} + +: ${oldsrc=../../..} +oldsrc=`cd "$oldsrc" && pwd` +newsrc=`cd ../../.. && pwd` + +PATH=$bindir:$PATH +export PATH + +BASE_PGDATA=$temp_root/data +PGDATA="$BASE_PGDATA.old" +export PGDATA +rm -rf "$BASE_PGDATA" "$PGDATA" + +logdir=$PWD/log +rm -rf "$logdir" +mkdir "$logdir" + +# Clear out any environment vars that might cause libpq to connect to +# the wrong postmaster (cf pg_regress.c) +# +# Some shells, such as NetBSD's, return non-zero from unset if the variable +# is already unset. Since we are operating under 'set -e', this causes the +# script to fail. To guard against this, set them all to an empty string first. +PGDATABASE=""; unset PGDATABASE +PGUSER=""; unset PGUSER +PGSERVICE=""; unset PGSERVICE +PGSSLMODE=""; unset PGSSLMODE +PGREQUIRESSL=""; unset PGREQUIRESSL +PGCONNECT_TIMEOUT=""; unset PGCONNECT_TIMEOUT +PGHOSTADDR=""; unset PGHOSTADDR + +# Select a non-conflicting port number, similarly to pg_regress.c +PG_VERSION_NUM=`grep '#define PG_VERSION_NUM' "$newsrc"/src/include/pg_config.h | awk '{print $3}'` +PGPORT=`expr $PG_VERSION_NUM % 16384 + 49152` +export PGPORT + +i=0 +while psql -X postgres /dev/null +do + i=`expr $i + 1` + if [ $i -eq 16 ] + then + echo port $PGPORT apparently in use + exit 1 + fi + PGPORT=`expr $PGPORT + 1` + export PGPORT +done + +# buildfarm may try to override port via EXTRA_REGRESS_OPTS ... +EXTRA_REGRESS_OPTS="$EXTRA_REGRESS_OPTS --port=$PGPORT" +export EXTRA_REGRESS_OPTS + +# enable echo so the user can see what is being executed +set -x + +standard_initdb "$oldbindir"/initdb +"$oldbindir"/pg_ctl start -l "$logdir/postmaster1.log" -o "$POSTMASTER_OPTS" -w +if "$MAKE" -C "$oldsrc" installcheck; then + pg_dumpall -f "$temp_root"/dump1.sql || pg_dumpall1_status=$? + if [ "$newsrc" != "$oldsrc" ]; then + oldpgversion=`psql -A -t -d regression -c "SHOW server_version_num"` + fix_sql="" + case $oldpgversion in + 804??) + fix_sql="UPDATE pg_proc SET probin = replace(probin::text, '$oldsrc', '$newsrc')::bytea WHERE probin LIKE '$oldsrc%'; DROP FUNCTION public.myfunc(integer);" + ;; + 900??) + fix_sql="SET bytea_output TO escape; UPDATE pg_proc SET probin = replace(probin::text, '$oldsrc', '$newsrc')::bytea WHERE probin LIKE '$oldsrc%';" + ;; + 901??) + fix_sql="UPDATE pg_proc SET probin = replace(probin, '$oldsrc', '$newsrc') WHERE probin LIKE '$oldsrc%';" + ;; + esac + psql -d regression -c "$fix_sql;" || psql_fix_sql_status=$? + + mv "$temp_root"/dump1.sql "$temp_root"/dump1.sql.orig + sed "s;$oldsrc;$newsrc;g" "$temp_root"/dump1.sql.orig >"$temp_root"/dump1.sql + fi +else + make_installcheck_status=$? +fi +"$oldbindir"/pg_ctl -m fast stop +if [ -n "$make_installcheck_status" ]; then + exit 1 +fi +if [ -n "$psql_fix_sql_status" ]; then + exit 1 +fi +if [ -n "$pg_dumpall1_status" ]; then + echo "pg_dumpall of pre-upgrade database cluster failed" + exit 1 +fi + +PGDATA=$BASE_PGDATA + +standard_initdb 'initdb' + +pg_upgrade $PG_UPGRADE_OPTS -d "${PGDATA}.old" -D "${PGDATA}" -b "$oldbindir" -B "$bindir" -p "$PGPORT" -P "$PGPORT" + +pg_ctl start -l "$logdir/postmaster2.log" -o "$POSTMASTER_OPTS" -w + +case $testhost in + MINGW*) cmd /c analyze_new_cluster.bat ;; + *) sh ./analyze_new_cluster.sh ;; +esac + +pg_dumpall -f "$temp_root"/dump2.sql || pg_dumpall2_status=$? +pg_ctl -m fast stop + +# no need to echo commands anymore +set +x +echo + +if [ -n "$pg_dumpall2_status" ]; then + echo "pg_dumpall of post-upgrade database cluster failed" + exit 1 +fi + +case $testhost in + MINGW*) cmd /c delete_old_cluster.bat ;; + *) sh ./delete_old_cluster.sh ;; +esac + +if diff -q "$temp_root"/dump1.sql "$temp_root"/dump2.sql; then + echo PASSED + exit 0 +else + echo "dumps were not identical" + exit 1 +fi diff --git a/src/bin/pg_upgrade/util.c b/src/bin/pg_upgrade/util.c new file mode 100644 index 00000000000..7f328f06444 --- /dev/null +++ b/src/bin/pg_upgrade/util.c @@ -0,0 +1,298 @@ +/* + * util.c + * + * utility functions + * + * Copyright (c) 2010-2015, PostgreSQL Global Development Group + * src/bin/pg_upgrade/util.c + */ + +#include "postgres_fe.h" + +#include "common/username.h" +#include "pg_upgrade.h" + +#include + + +LogOpts log_opts; + +static void pg_log_v(eLogType type, const char *fmt, va_list ap) pg_attribute_printf(2, 0); + + +/* + * report_status() + * + * Displays the result of an operation (ok, failed, error message,...) + */ +void +report_status(eLogType type, const char *fmt,...) +{ + va_list args; + char message[MAX_STRING]; + + va_start(args, fmt); + vsnprintf(message, sizeof(message), fmt, args); + va_end(args); + + pg_log(type, "%s\n", message); +} + + +/* force blank output for progress display */ +void +end_progress_output(void) +{ + /* + * In case nothing printed; pass a space so gcc doesn't complain about + * empty format string. + */ + prep_status(" "); +} + + +/* + * prep_status + * + * Displays a message that describes an operation we are about to begin. + * We pad the message out to MESSAGE_WIDTH characters so that all of the "ok" and + * "failed" indicators line up nicely. + * + * A typical sequence would look like this: + * prep_status("about to flarb the next %d files", fileCount ); + * + * if(( message = flarbFiles(fileCount)) == NULL) + * report_status(PG_REPORT, "ok" ); + * else + * pg_log(PG_FATAL, "failed - %s\n", message ); + */ +void +prep_status(const char *fmt,...) +{ + va_list args; + char message[MAX_STRING]; + + va_start(args, fmt); + vsnprintf(message, sizeof(message), fmt, args); + va_end(args); + + if (strlen(message) > 0 && message[strlen(message) - 1] == '\n') + pg_log(PG_REPORT, "%s", message); + else + /* trim strings that don't end in a newline */ + pg_log(PG_REPORT, "%-*s", MESSAGE_WIDTH, message); +} + + +static void +pg_log_v(eLogType type, const char *fmt, va_list ap) +{ + char message[QUERY_ALLOC]; + + vsnprintf(message, sizeof(message), fmt, ap); + + /* PG_VERBOSE and PG_STATUS are only output in verbose mode */ + /* fopen() on log_opts.internal might have failed, so check it */ + if (((type != PG_VERBOSE && type != PG_STATUS) || log_opts.verbose) && + log_opts.internal != NULL) + { + if (type == PG_STATUS) + /* status messages need two leading spaces and a newline */ + fprintf(log_opts.internal, " %s\n", message); + else + fprintf(log_opts.internal, "%s", message); + fflush(log_opts.internal); + } + + switch (type) + { + case PG_VERBOSE: + if (log_opts.verbose) + printf("%s", _(message)); + break; + + case PG_STATUS: + /* for output to a display, do leading truncation and append \r */ + if (isatty(fileno(stdout))) + /* -2 because we use a 2-space indent */ + printf(" %s%-*.*s\r", + /* prefix with "..." if we do leading truncation */ + strlen(message) <= MESSAGE_WIDTH - 2 ? "" : "...", + MESSAGE_WIDTH - 2, MESSAGE_WIDTH - 2, + /* optional leading truncation */ + strlen(message) <= MESSAGE_WIDTH - 2 ? message : + message + strlen(message) - MESSAGE_WIDTH + 3 + 2); + else + printf(" %s\n", _(message)); + break; + + case PG_REPORT: + case PG_WARNING: + printf("%s", _(message)); + break; + + case PG_FATAL: + printf("\n%s", _(message)); + printf("Failure, exiting\n"); + exit(1); + break; + + default: + break; + } + fflush(stdout); +} + + +void +pg_log(eLogType type, const char *fmt,...) +{ + va_list args; + + va_start(args, fmt); + pg_log_v(type, fmt, args); + va_end(args); +} + + +void +pg_fatal(const char *fmt,...) +{ + va_list args; + + va_start(args, fmt); + pg_log_v(PG_FATAL, fmt, args); + va_end(args); + printf("Failure, exiting\n"); + exit(1); +} + + +void +check_ok(void) +{ + /* all seems well */ + report_status(PG_REPORT, "ok"); + fflush(stdout); +} + + +/* + * quote_identifier() + * Properly double-quote a SQL identifier. + * + * The result should be pg_free'd, but most callers don't bother because + * memory leakage is not a big deal in this program. + */ +char * +quote_identifier(const char *s) +{ + char *result = pg_malloc(strlen(s) * 2 + 3); + char *r = result; + + *r++ = '"'; + while (*s) + { + if (*s == '"') + *r++ = *s; + *r++ = *s; + s++; + } + *r++ = '"'; + *r++ = '\0'; + + return result; +} + + +/* + * get_user_info() + */ +int +get_user_info(char **user_name_p) +{ + int user_id; + const char *user_name; + char *errstr; + +#ifndef WIN32 + user_id = geteuid(); +#else + user_id = 1; +#endif + + user_name = get_user_name(&errstr); + if (!user_name) + pg_fatal("%s\n", errstr); + + /* make a copy */ + *user_name_p = pg_strdup(user_name); + + return user_id; +} + + +/* + * getErrorText() + * + * Returns the text of the error message for the given error number + * + * This feature is factored into a separate function because it is + * system-dependent. + */ +const char * +getErrorText(int errNum) +{ +#ifdef WIN32 + _dosmaperr(GetLastError()); +#endif + return pg_strdup(strerror(errNum)); +} + + +/* + * str2uint() + * + * convert string to oid + */ +unsigned int +str2uint(const char *str) +{ + return strtoul(str, NULL, 10); +} + + +/* + * pg_putenv() + * + * This is like putenv(), but takes two arguments. + * It also does unsetenv() if val is NULL. + */ +void +pg_putenv(const char *var, const char *val) +{ + if (val) + { +#ifndef WIN32 + char *envstr; + + envstr = psprintf("%s=%s", var, val); + putenv(envstr); + + /* + * Do not free envstr because it becomes part of the environment on + * some operating systems. See port/unsetenv.c::unsetenv. + */ +#else + SetEnvironmentVariableA(var, val); +#endif + } + else + { +#ifndef WIN32 + unsetenv(var); +#else + SetEnvironmentVariableA(var, ""); +#endif + } +} diff --git a/src/bin/pg_upgrade/version.c b/src/bin/pg_upgrade/version.c new file mode 100644 index 00000000000..e3e7387c92d --- /dev/null +++ b/src/bin/pg_upgrade/version.c @@ -0,0 +1,178 @@ +/* + * version.c + * + * Postgres-version-specific routines + * + * Copyright (c) 2010-2015, PostgreSQL Global Development Group + * src/bin/pg_upgrade/version.c + */ + +#include "postgres_fe.h" + +#include "pg_upgrade.h" + + + +/* + * new_9_0_populate_pg_largeobject_metadata() + * new >= 9.0, old <= 8.4 + * 9.0 has a new pg_largeobject permission table + */ +void +new_9_0_populate_pg_largeobject_metadata(ClusterInfo *cluster, bool check_mode) +{ + int dbnum; + FILE *script = NULL; + bool found = false; + char output_path[MAXPGPATH]; + + prep_status("Checking for large objects"); + + snprintf(output_path, sizeof(output_path), "pg_largeobject.sql"); + + for (dbnum = 0; dbnum < cluster->dbarr.ndbs; dbnum++) + { + PGresult *res; + int i_count; + DbInfo *active_db = &cluster->dbarr.dbs[dbnum]; + PGconn *conn = connectToServer(cluster, active_db->db_name); + + /* find if there are any large objects */ + res = executeQueryOrDie(conn, + "SELECT count(*) " + "FROM pg_catalog.pg_largeobject "); + + i_count = PQfnumber(res, "count"); + if (atoi(PQgetvalue(res, 0, i_count)) != 0) + { + found = true; + if (!check_mode) + { + if (script == NULL && (script = fopen_priv(output_path, "w")) == NULL) + pg_fatal("could not open file \"%s\": %s\n", output_path, getErrorText(errno)); + fprintf(script, "\\connect %s\n", + quote_identifier(active_db->db_name)); + fprintf(script, + "SELECT pg_catalog.lo_create(t.loid)\n" + "FROM (SELECT DISTINCT loid FROM pg_catalog.pg_largeobject) AS t;\n"); + } + } + + PQclear(res); + PQfinish(conn); + } + + if (script) + fclose(script); + + if (found) + { + report_status(PG_WARNING, "warning"); + if (check_mode) + pg_log(PG_WARNING, "\n" + "Your installation contains large objects. The new database has an\n" + "additional large object permission table. After upgrading, you will be\n" + "given a command to populate the pg_largeobject permission table with\n" + "default permissions.\n\n"); + else + pg_log(PG_WARNING, "\n" + "Your installation contains large objects. The new database has an\n" + "additional large object permission table, so default permissions must be\n" + "defined for all large objects. The file\n" + " %s\n" + "when executed by psql by the database superuser will set the default\n" + "permissions.\n\n", + output_path); + } + else + check_ok(); +} + + +/* + * old_9_3_check_for_line_data_type_usage() + * 9.3 -> 9.4 + * Fully implement the 'line' data type in 9.4, which previously returned + * "not enabled" by default and was only functionally enabled with a + * compile-time switch; 9.4 "line" has different binary and text + * representation formats; checks tables and indexes. + */ +void +old_9_3_check_for_line_data_type_usage(ClusterInfo *cluster) +{ + int dbnum; + FILE *script = NULL; + bool found = false; + char output_path[MAXPGPATH]; + + prep_status("Checking for invalid \"line\" user columns"); + + snprintf(output_path, sizeof(output_path), "tables_using_line.txt"); + + for (dbnum = 0; dbnum < cluster->dbarr.ndbs; dbnum++) + { + PGresult *res; + bool db_used = false; + int ntups; + int rowno; + int i_nspname, + i_relname, + i_attname; + DbInfo *active_db = &cluster->dbarr.dbs[dbnum]; + PGconn *conn = connectToServer(cluster, active_db->db_name); + + res = executeQueryOrDie(conn, + "SELECT n.nspname, c.relname, a.attname " + "FROM pg_catalog.pg_class c, " + " pg_catalog.pg_namespace n, " + " pg_catalog.pg_attribute a " + "WHERE c.oid = a.attrelid AND " + " NOT a.attisdropped AND " + " a.atttypid = 'pg_catalog.line'::pg_catalog.regtype AND " + " c.relnamespace = n.oid AND " + /* exclude possible orphaned temp tables */ + " n.nspname !~ '^pg_temp_' AND " + " n.nspname !~ '^pg_toast_temp_' AND " + " n.nspname NOT IN ('pg_catalog', 'information_schema')"); + + ntups = PQntuples(res); + i_nspname = PQfnumber(res, "nspname"); + i_relname = PQfnumber(res, "relname"); + i_attname = PQfnumber(res, "attname"); + for (rowno = 0; rowno < ntups; rowno++) + { + found = true; + if (script == NULL && (script = fopen_priv(output_path, "w")) == NULL) + pg_fatal("could not open file \"%s\": %s\n", output_path, getErrorText(errno)); + if (!db_used) + { + fprintf(script, "Database: %s\n", active_db->db_name); + db_used = true; + } + fprintf(script, " %s.%s.%s\n", + PQgetvalue(res, rowno, i_nspname), + PQgetvalue(res, rowno, i_relname), + PQgetvalue(res, rowno, i_attname)); + } + + PQclear(res); + + PQfinish(conn); + } + + if (script) + fclose(script); + + if (found) + { + pg_log(PG_REPORT, "fatal\n"); + pg_fatal("Your installation contains the \"line\" data type in user tables. This\n" + "data type changed its internal and input/output format between your old\n" + "and new clusters so this cluster cannot currently be upgraded. You can\n" + "remove the problem tables and restart the upgrade. A list of the problem\n" + "columns is in the file:\n" + " %s\n\n", output_path); + } + else + check_ok(); +} diff --git a/src/tools/msvc/Mkvcbuild.pm b/src/tools/msvc/Mkvcbuild.pm index 39281db9011..e4dbebf0604 100644 --- a/src/tools/msvc/Mkvcbuild.pm +++ b/src/tools/msvc/Mkvcbuild.pm @@ -31,18 +31,18 @@ my $libpq; # Set of variables for contrib modules my $contrib_defines = { 'refint' => 'REFINT_VERBOSE' }; my @contrib_uselibpq = - ('dblink', 'oid2name', 'pg_upgrade', 'postgres_fdw', 'vacuumlo'); + ('dblink', 'oid2name', 'postgres_fdw', 'vacuumlo'); my @contrib_uselibpgport = ( 'oid2name', 'pg_standby', 'pg_test_fsync', 'pg_test_timing', - 'pg_upgrade', 'pg_xlogdump', + 'pg_xlogdump', 'vacuumlo'); my @contrib_uselibpgcommon = ( 'oid2name', 'pg_standby', 'pg_test_fsync', 'pg_test_timing', - 'pg_upgrade', 'pg_xlogdump', + 'pg_xlogdump', 'vacuumlo'); my $contrib_extralibs = undef; my $contrib_extraincludes = @@ -54,9 +54,9 @@ my @contrib_excludes = ('pgcrypto', 'intagg', 'sepgsql'); # Set of variables for frontend modules my $frontend_defines = { 'initdb' => 'FRONTEND' }; -my @frontend_uselibpq = ('pg_ctl', 'pgbench', 'psql'); -my @frontend_uselibpgport = ( 'pg_archivecleanup', 'pgbench' ); -my @frontend_uselibpgcommon = ( 'pg_archivecleanup', 'pgbench' ); +my @frontend_uselibpq = ('pg_ctl', 'pg_upgrade', 'pgbench', 'psql'); +my @frontend_uselibpgport = ( 'pg_archivecleanup', 'pg_upgrade', 'pgbench' ); +my @frontend_uselibpgcommon = ( 'pg_archivecleanup', 'pg_upgrade', 'pgbench' ); my $frontend_extralibs = { 'initdb' => ['ws2_32.lib'], 'pg_restore' => ['ws2_32.lib'], diff --git a/src/tools/msvc/vcregress.pl b/src/tools/msvc/vcregress.pl index bd3dd2ca1e1..4812a0361f6 100644 --- a/src/tools/msvc/vcregress.pl +++ b/src/tools/msvc/vcregress.pl @@ -269,7 +269,7 @@ sub upgradecheck $ENV{PGHOST} = 'localhost'; $ENV{PGPORT} ||= 50432; - my $tmp_root = "$topdir/contrib/pg_upgrade/tmp_check"; + my $tmp_root = "$topdir/src/bin/pg_upgrade/tmp_check"; (mkdir $tmp_root || die $!) unless -d $tmp_root; my $tmp_install = "$tmp_root/install"; print "Setting up temp install\n\n"; @@ -282,7 +282,7 @@ sub upgradecheck $ENV{PATH} = "$bindir;$ENV{PATH}"; my $data = "$tmp_root/data"; $ENV{PGDATA} = "$data.old"; - my $logdir = "$topdir/contrib/pg_upgrade/log"; + my $logdir = "$topdir/src/bin/pg_upgrade/log"; (mkdir $logdir || die $!) unless -d $logdir; print "\nRunning initdb on old cluster\n\n"; standard_initdb() or exit 1; @@ -292,7 +292,7 @@ sub upgradecheck installcheck(); # now we can chdir into the source dir - chdir "$topdir/contrib/pg_upgrade"; + chdir "$topdir/src/bin/pg_upgrade"; print "\nDumping old cluster\n\n"; system("pg_dumpall -f $tmp_root/dump1.sql") == 0 or exit 1; print "\nStopping old cluster\n\n"; -- cgit v1.2.3