summaryrefslogtreecommitdiff
diff options
authorChristoph Berg <[email protected]>2016-04-04 12:08:06 +0200
committerChristoph Berg <[email protected]>2016-04-04 13:01:48 +0200
commit89777ea0f37e7d6dbebd82330ce59b3a82a5d871 (patch)
treee579cd21f294e6e6f3f1f7302771abf4c712db6d
parent9f15a89ee3deb26e02ffa2fa07c99270568e6343 (diff)
parent4d758754408ed9256b8a1b257e0ce493c4b8ee3d (diff)
Merge tag 'debian/3.1.1-1' into wheezy-backports-sloppydebian/3.1.1-1.bpo70+1wheezy-backports-sloppy
repmgr Debian release 3.1.1-1
-rw-r--r--CONTRIBUTING.md2
-rw-r--r--COPYRIGHT2
-rw-r--r--FAILOVER.rst239
-rw-r--r--FAQ.md16
-rw-r--r--HISTORY11
-rw-r--r--Makefile21
-rw-r--r--QUICKSTART.md119
-rw-r--r--README.md1386
-rw-r--r--TODO10
-rw-r--r--check_dir.c2
-rw-r--r--check_dir.h2
-rw-r--r--config.c63
-rw-r--r--config.h5
-rw-r--r--dbutils.c302
-rw-r--r--dbutils.h32
-rw-r--r--debian/changelog13
-rw-r--r--debian/control2
-rw-r--r--debian/control.in2
-rw-r--r--errcode.h2
-rw-r--r--log.c2
-rw-r--r--log.h2
-rw-r--r--repmgr.c1957
-rw-r--r--repmgr.conf.sample52
-rw-r--r--repmgr.h25
-rw-r--r--repmgr.sql11
-rw-r--r--repmgrd.c72
-rw-r--r--sql/Makefile2
-rw-r--r--sql/repmgr3.0_repmgr3.1.sql35
-rw-r--r--sql/repmgr_funcs.sql.in2
-rw-r--r--sql/uninstall_repmgr_funcs.sql2
-rw-r--r--strutil.c2
-rw-r--r--strutil.h2
-rw-r--r--uninstall_repmgr.sql2
-rw-r--r--version.h2
34 files changed, 3237 insertions, 1164 deletions
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index c43d567..819f6a6 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -2,7 +2,7 @@ License and Contributions
=========================
`repmgr` is licensed under the GPL v3. All of its code and documentation is
-Copyright 2010-2015, 2ndQuadrant Limited. See the files COPYRIGHT and LICENSE for
+Copyright 2010-2016, 2ndQuadrant Limited. See the files COPYRIGHT and LICENSE for
details.
The development of repmgr has primarily been sponsored by 2ndQuadrant customers.
diff --git a/COPYRIGHT b/COPYRIGHT
index 5d7a352..dd2cc12 100644
--- a/COPYRIGHT
+++ b/COPYRIGHT
@@ -1,4 +1,4 @@
-Copyright (c) 2010-2015, 2ndQuadrant Limited
+Copyright (c) 2010-2016, 2ndQuadrant Limited
All rights reserved.
This program is free software: you can redistribute it and/or modify
diff --git a/FAILOVER.rst b/FAILOVER.rst
index 7212863..5fa7004 100644
--- a/FAILOVER.rst
+++ b/FAILOVER.rst
@@ -1,238 +1 @@
-====================================================
- PostgreSQL Automatic Failover - User Documentation
-====================================================
-
-Automatic Failover
-==================
-
-repmgr allows for automatic failover when it detects the failure of the master node.
-Following is a quick setup for this.
-
-Installation
-============
-
-For convenience, we define:
-
-**node1**
- is the fully qualified domain name of the Master server, IP 192.168.1.10
-**node2**
- is the fully qualified domain name of the Standby server, IP 192.168.1.11
-**witness**
- is the fully qualified domain name of the server used as a witness, IP 192.168.1.12
-
-**Note:** We don't recommend using names with the status of a server like «masterserver»,
-because it would be confusing once a failover takes place and the Master is
-now on the «standbyserver».
-
-Summary
--------
-
-2 PostgreSQL servers are involved in the replication. Automatic failover needs
-a vote to decide what server it should promote, so an odd number is required.
-A witness-repmgrd is installed in a third server where it uses a PostgreSQL
-cluster to communicate with other repmgrd daemons.
-
-1. Install PostgreSQL in all the servers involved (including the witness server)
-
-2. Install repmgr in all the servers involved (including the witness server)
-
-3. Configure the Master PostreSQL
-
-4. Clone the Master to the Standby using "repmgr standby clone" command
-
-5. Configure repmgr in all the servers involved (including the witness server)
-
-6. Register Master and Standby nodes
-
-7. Initiate witness server
-
-8. Start the repmgrd daemons in all nodes
-
-**Note** A complete High-Availability design needs at least 3 servers to still have
-a backup node after a first failure.
-
-Install PostgreSQL
-------------------
-
-You can install PostgreSQL using any of the recommended methods. You should ensure
-it's 9.0 or later.
-
-Install repmgr
---------------
-
-Install repmgr following the steps in the README file.
-
-Configure PostreSQL
--------------------
-
-Log in to node1.
-
-Edit the file postgresql.conf and modify the parameters::
-
- listen_addresses='*'
- wal_level = 'hot_standby'
- archive_mode = on
- archive_command = 'cd .' # we can also use exit 0, anything that
- # just does nothing
- max_wal_senders = 10
- wal_keep_segments = 5000 # 80 GB required on pg_xlog
- hot_standby = on
- shared_preload_libraries = 'repmgr_funcs'
-
-Edit the file pg_hba.conf and add lines for the replication::
-
- host repmgr repmgr 127.0.0.1/32 trust
- host repmgr repmgr 192.168.1.10/30 trust
- host replication all 192.168.1.10/30 trust
-
-**Note:** It is also possible to use a password authentication (md5), .pgpass file
-should be edited to allow connection between each node.
-
-Create the user and database to manage replication::
-
- su - postgres
- createuser -s repmgr
- createdb -O repmgr repmgr
-
-Restart the PostgreSQL server::
-
- pg_ctl -D $PGDATA restart
-
-And check everything is fine in the server log.
-
-Create the ssh-key for the postgres user and copy it to other servers::
-
- su - postgres
- ssh-keygen # /!\ do not use a passphrase /!\
- cat ~/.ssh/id_rsa.pub > ~/.ssh/authorized_keys
- chmod 600 ~/.ssh/authorized_keys
- exit
- rsync -avz ~postgres/.ssh/authorized_keys node2:~postgres/.ssh/
- rsync -avz ~postgres/.ssh/authorized_keys witness:~postgres/.ssh/
- rsync -avz ~postgres/.ssh/id_rsa* node2:~postgres/.ssh/
- rsync -avz ~postgres/.ssh/id_rsa* witness:~postgres/.ssh/
-
-Clone Master
-------------
-
-Log in to node2.
-
-Clone node1 (the current Master)::
-
- su - postgres
- repmgr -d repmgr -U repmgr -h node1 standby clone
-
-Start the PostgreSQL server::
-
- pg_ctl -D $PGDATA start
-
-And check everything is fine in the server log.
-
-Configure repmgr
-----------------
-
-Log in to each server and configure repmgr by editing the file
-/etc/repmgr/repmgr.conf::
-
- cluster=my_cluster
- node=1
- node_name=earth
- conninfo='host=192.168.1.10 dbname=repmgr user=repmgr'
- master_response_timeout=60
- reconnect_attempts=6
- reconnect_interval=10
- failover=automatic
- promote_command='promote_command.sh'
- follow_command='repmgr standby follow -f /etc/repmgr/repmgr.conf'
-
-**cluster**
- is the name of the current replication.
-**node**
- is the number of the current node (1, 2 or 3 in the current example).
-**node_name**
- is an identifier for every node.
-**conninfo**
- is used to connect to the local PostgreSQL server (where the configuration file is) from any node. In the witness server configuration you need to add a 'port=5499' to the conninfo.
-**master_response_timeout**
- is the maximum amount of time we are going to wait before deciding the master has died and start the failover procedure.
-**reconnect_attempts**
- is the number of times we will try to reconnect to master after a failure has been detected and before start the failover procedure.
-**reconnect_interval**
- is the amount of time between retries to reconnect to master after a failure has been detected and before start the failover procedure.
-**failover**
- configure behavior: *manual* or *automatic*.
-**promote_command**
- the command executed to do the failover (including the PostgreSQL failover itself). The command must return 0 on success.
-**follow_command**
- the command executed to address the current standby to another Master. The command must return 0 on success.
-
-Register Master and Standby
----------------------------
-
-Log in to node1.
-
-Register the node as master::
-
- su - postgres
- repmgr -f /etc/repmgr/repmgr.conf master register
-
-This will also create the repmgr schema and functions.
-
-Log in to node2. Register it as a standby::
-
- su - postgres
- repmgr -f /etc/repmgr/repmgr.conf standby register
-
-Initialize witness server
--------------------------
-
-Log in to witness.
-
-Initialize the witness server::
-
- su - postgres
- repmgr -d repmgr -U repmgr -h 192.168.1.10 -D $WITNESS_PGDATA -f /etc/repmgr/repmgr.conf witness create
-
-The witness server needs the following information from the command
-line:
-
-* Connection details for the current master, to copy the cluster
- configuration.
-* A location for initializing its own $PGDATA.
-
-repmgr will also ask for the superuser password on the witness database so
-it can reconnect when needed (the command line option --initdb-no-pwprompt
-will set up a password-less superuser).
-
-By default the witness server will listen on port 5499; this value can be
-overridden by explicitly providing the port number in the conninfo string
-in repmgr.conf. (Note that it is also possible to specify the port number
-with the -l/--local-port option, however this option is now deprecated and
-will be overridden by a port setting in the conninfo string).
-
-Start the repmgrd daemons
--------------------------
-
-Log in to node2 and witness::
-
- su - postgres
- repmgrd -f /etc/repmgr/repmgr.conf --daemonize -> /var/log/postgresql/repmgr.log 2>&1
-
-**Note:** The Master does not need a repmgrd daemon.
-
-Suspend Automatic behavior
-==========================
-
-Edit the repmgr.conf of the node to remove from automatic processing and change::
-
- failover=manual
-
-Then, signal repmgrd daemon::
-
- su - postgres
- kill -HUP $(pidof repmgrd)
-
-Usage
-=====
-
-The repmgr documentation is in the README file (how to build, options, etc.)
+The contents of this file have been incorporated into the main README.md document.
diff --git a/FAQ.md b/FAQ.md
index eb9d4c2..0ecca1e 100644
--- a/FAQ.md
+++ b/FAQ.md
@@ -120,6 +120,22 @@ General
permission is for PostgreSQL's streaming replication and doesn't
necessarily need to be the `repmgr` user.
+- When cloning a standby, why do I need to provide the connection parameters
+ for the primary server on the command line, not in the configuration file?
+
+ Cloning a standby is a one-time action; the role of the server being cloned
+ from could change, so fixing it in the configuration file would create
+ confusion. If `repmgr` needs to establish a connection to the primary
+ server, it can retrieve this from the `repl_nodes` table or if necessary
+ scan the replication cluster until it locates the active primary.
+
+- Why is there no foreign key on the `node_id` column in the `repl_events`
+ table?
+
+ Under some circumstances event notifications can be generated for servers
+ which have not yet been registered; it's also useful to retain a record
+ of events which includes servers removed from the replication cluster
+ which no longer have an entry in the `repl_nodes` table.
`repmgrd`
---------
diff --git a/HISTORY b/HISTORY
index 2311637..3796111 100644
--- a/HISTORY
+++ b/HISTORY
@@ -1,3 +1,14 @@
+3.1.1 2016-02-
+ Add '-P/--pwprompt' option for "repmgr create witness" (Ian)
+ Prevent repmgr/repmgrd running as root (Ian)
+
+3.1.0 2016-02-01
+ Add "repmgr standby switchover" command (Ian)
+ Revised README file (Ian)
+ Remove requirement for 'archive_mode' to be enabled (Ian)
+ Improve -?/--help output, showing default values if relevant (Ian)
+ Various bugfixes to command line/configuration parameter handling (Ian)
+
3.0.3 2016-01-04
Create replication slot if required before base backup is run (Abhijit)
standy clone: when using rsync, clean up "pg_replslot" directory (Ian)
diff --git a/Makefile b/Makefile
index e4e48e3..d67e8fa 100644
--- a/Makefile
+++ b/Makefile
@@ -1,6 +1,6 @@
#
# Makefile
-# Copyright (c) 2ndQuadrant, 2010-2015
+# Copyright (c) 2ndQuadrant, 2010-2016
repmgrd_OBJS = dbutils.o config.o repmgrd.o log.o strutil.o
repmgr_OBJS = dbutils.o check_dir.o config.o repmgr.o log.o strutil.o
@@ -67,16 +67,21 @@ clean:
rm -f repmgr
$(MAKE) -C sql clean
+# Get correct version numbers and install paths, depending on your postgres version
+PG_VERSION = $(shell pg_config --version | cut -d ' ' -f 2 | cut -d '.' -f 1,2)
+REPMGR_VERSION = $(shell grep REPMGR_VERSION version.h | cut -d ' ' -f 3 | cut -d '"' -f 2)
+PKGLIBDIR = $(shell pg_config --pkglibdir)
+SHAREDIR = $(shell pg_config --sharedir)
+
deb: repmgrd repmgr
mkdir -p ./debian/usr/bin
cp repmgrd repmgr ./debian/usr/bin/
- mkdir -p ./debian/usr/share/postgresql/9.0/contrib/
- cp sql/repmgr_funcs.sql ./debian/usr/share/postgresql/9.0/contrib/
- cp sql/uninstall_repmgr_funcs.sql ./debian/usr/share/postgresql/9.0/contrib/
- mkdir -p ./debian/usr/lib/postgresql/9.0/lib/
- cp sql/repmgr_funcs.so ./debian/usr/lib/postgresql/9.0/lib/
+ mkdir -p ./debian$(SHAREDIR)/contrib/
+ cp sql/repmgr_funcs.sql ./debian$(SHAREDIR)/contrib/
+ cp sql/uninstall_repmgr_funcs.sql ./debian$(SHAREDIR)/contrib/
+ mkdir -p ./debian$(PKGLIBDIR)/
+ cp sql/repmgr_funcs.so ./debian$(PKGLIBDIR)/
dpkg-deb --build debian
- mv debian.deb ../postgresql-repmgr-9.0_1.0.0.deb
+ mv debian.deb ../postgresql-repmgr-$(PG_VERSION)_$(REPMGR_VERSION).deb
rm -rf ./debian/usr
-
diff --git a/QUICKSTART.md b/QUICKSTART.md
index 2dd3e49..5fa7004 100644
--- a/QUICKSTART.md
+++ b/QUICKSTART.md
@@ -1,118 +1 @@
-repmgr quickstart guide
-=======================
-
-This quickstart guide provides some annotated examples on basic
-`repmgr` setup. It assumes you are familiar with PostgreSQL replication
-concepts setup and Linux/UNIX system administration.
-
-For the purposes of this guide, we'll assume the database user will be
-`repmgr_usr` and the database will be `repmgr_db`.
-
-
-Master setup
-------------
-
-1. Configure PostgreSQL
-
- - create user and database:
-
- ```
- CREATE ROLE repmgr_usr LOGIN SUPERUSER;
- CREATE DATABASE repmgr_db OWNER repmgr_usr;
- ```
-
- - configure `postgresql.conf` for replication (see README.md for sample
- settings)
-
- - update `pg_hba.conf`, e.g.:
-
- ```
- host repmgr_db repmgr_usr 192.168.1.0/24 trust
- host replication repmgr_usr 192.168.1.0/24 trust
- ```
-
- Restart the PostgreSQL server after making these changes.
-
-2. Create the `repmgr` configuration file:
-
- $ cat /path/to/repmgr/node1/repmgr.conf
- cluster=test
- node=1
- node_name=node1
- conninfo='host=repmgr_node1 user=repmgr_usr dbname=repmgr_db'
- pg_bindir=/path/to/postgres/bin
-
- (For an annotated `repmgr.conf` file, see `repmgr.conf.sample` in the
- repository's root directory).
-
-3. Register the master node with `repmgr`:
-
- $ repmgr -f /path/to/repmgr/node1/repmgr.conf --verbose master register
- [2015-03-03 17:45:53] [INFO] repmgr connecting to master database
- [2015-03-03 17:45:53] [INFO] repmgr connected to master, checking its state
- [2015-03-03 17:45:53] [INFO] master register: creating database objects inside the repmgr_test schema
- [2015-03-03 17:45:53] [NOTICE] Master node correctly registered for cluster test with id 1 (conninfo: host=localhost user=repmgr_usr dbname=repmgr_db)
-
-Standby setup
--------------
-
-1. Use `repmgr standby clone` to clone a standby from the master:
-
- repmgr -D /path/to/standby/data -d repmgr_db -U repmgr_usr --verbose standby clone 192.168.1.2
- [2015-03-03 18:18:21] [NOTICE] No configuration file provided and default file './repmgr.conf' not found - continuing with default values
- [2015-03-03 18:18:21] [NOTICE] repmgr Destination directory ' /path/to/standby/data' provided
- [2015-03-03 18:18:21] [INFO] repmgr connecting to upstream node
- [2015-03-03 18:18:21] [INFO] repmgr connected to upstream node, checking its state
- [2015-03-03 18:18:21] [INFO] Successfully connected to upstream node. Current installation size is 27 MB
- [2015-03-03 18:18:21] [NOTICE] Starting backup...
- [2015-03-03 18:18:21] [INFO] creating directory " /path/to/standby/data"...
- [2015-03-03 18:18:21] [INFO] Executing: 'pg_basebackup -l "repmgr base backup" -h localhost -p 9595 -U repmgr_usr -D /path/to/standby/data '
- NOTICE: pg_stop_backup complete, all required WAL segments have been archived
- [2015-03-03 18:18:23] [NOTICE] repmgr standby clone (using pg_basebackup) complete
- [2015-03-03 18:18:23] [NOTICE] HINT: You can now start your postgresql server
- [2015-03-03 18:18:23] [NOTICE] for example : pg_ctl -D /path/to/standby/data start
-
- Note that the `repmgr.conf` file is not required when cloning a standby.
- However we recommend providing a valid `repmgr.conf` if you wish to use
- replication slots, or want `repmgr` to log the clone event to the
- `repl_events` table.
-
- This will clone the PostgreSQL database files from the master, including its
- `postgresql.conf` and `pg_hba.conf` files, and additionally automatically create
- the `recovery.conf` file containing the correct parameters to start streaming
- from the primary node.
-
-2. Start the PostgreSQL server
-
-3. Create the `repmgr` configuration file:
-
- $ cat /path/node2/repmgr/repmgr.conf
- cluster=test
- node=2
- node_name=node2
- conninfo='host=repmgr_node2 user=repmgr_usr dbname=repmgr_db'
- pg_bindir=/path/to/postgres/bin
-
-4. Register the standby node with `repmgr`:
-
- $ repmgr -f /path/to/repmgr/node2/repmgr.conf --verbose standby register
- [2015-03-03 18:24:34] [NOTICE] Opening configuration file: /path/to/repmgr/node2/repmgr.conf
- [2015-03-03 18:24:34] [INFO] repmgr connecting to standby database
- [2015-03-03 18:24:34] [INFO] repmgr connecting to master database
- [2015-03-03 18:24:34] [INFO] finding node list for cluster 'test'
- [2015-03-03 18:24:34] [INFO] checking role of cluster node '1'
- [2015-03-03 18:24:34] [INFO] repmgr connected to master, checking its state
- [2015-03-03 18:24:34] [INFO] repmgr registering the standby
- [2015-03-03 18:24:34] [INFO] repmgr registering the standby complete
- [2015-03-03 18:24:34] [NOTICE] Standby node correctly registered for cluster test with id 2 (conninfo: host=localhost user=repmgr_usr dbname=repmgr_db)
-
-
-This concludes the basic `repmgr` setup of master and standby. The records
-created in the `repl_nodes` table should look something like this:
-
- repmgr_db=# SELECT * from repmgr_test.repl_nodes;
- id | type | upstream_node_id | cluster | name | conninfo | slot_name | priority | active
- ----+---------+------------------+---------+-------+----------------------------------------------------+-----------+----------+--------
- 1 | primary | | test | node1 | host=repmgr_node1 user=repmgr_usr dbname=repmgr_db | | 0 | t
- 2 | standby | 1 | test | node2 | host=repmgr_node2 user=repmgr_usr dbname=repmgr_db | | 0 | t
- (2 rows)
+The contents of this file have been incorporated into the main README.md document.
diff --git a/README.md b/README.md
index 6ce9dcb..e76335b 100644
--- a/README.md
+++ b/README.md
@@ -1,278 +1,991 @@
repmgr: Replication Manager for PostgreSQL
==========================================
-`repmgr` is an open-source tool to manage replication and failover
-between multiple PostgreSQL servers. It enhances PostgreSQL's built-in
-hot-standby capabilities with tools to set up standby servers, monitor
-replication, and perform administrative tasks such as failover or manual
-switchover operations.
+`repmgr` is a suite of open-source tools to manage replication and failover
+within a cluster of PostgreSQL servers. It enhances PostgreSQL's built-in
+replication capabilities with utilities to set up standby servers, monitor
+replication, and perform administrative tasks such as failover or switchover
+operations.
-This document covers `repmgr 3`, which supports PostgreSQL 9.3 and later.
-This version can use `pg_basebackup` to clone standby servers, supports
-replication slots and cascading replication, doesn't require a restart
-after promotion, and has many usability improvements.
-
-Please continue to use `repmgr 2` with PostgreSQL 9.2 and earlier.
-For a list of changes since `repmgr 2` and instructions on upgrading to
-`repmgr 3`, see the "Upgrading from repmgr 2" section below.
-
-For a list of frequently asked questions about `repmgr`, please refer
-to the file `FAQ.md`.
Overview
--------
-The `repmgr` command-line tool is used to perform administrative tasks,
-and the `repmgrd` daemon is used to optionally monitor replication and
-manage automatic failover.
+The `repmgr` suite provides two main tools:
-To get started, each PostgreSQL node in your cluster must have a
-`repmgr.conf` file. The current master node must be registered using
-`repmgr master register`. Existing standby servers can be registered
-using `repmgr standby register`. A new standby server can be created
-using `repmgr standby clone` followed by `repmgr standby register`.
+- `repmgr` - a command-line tool used to perform administrative tasks such as:
+ - setting up standby servers
+ - promoting a standby server to master
+ - switching over master and standby servers
+ - displaying the status of servers in the replication cluster
-See the `QUICKSTART.md` file for examples of how to use these commands.
+- `repmgrd` is a daemon which actively monitors servers in a replication cluster
+ and performs the following tasks:
+ - monitoring and recording replication performance
+ - performing failover by detecting failure of the master and
+ promoting the most suitable standby server
+ - provide notifications about events in the cluster to a user-defined
+ script which can perform tasks such as sending alerts by email
-Once the cluster is in operation, run `repmgr cluster show` to see the
-status of the registered primary and standby nodes. Any standby can be
-manually promoted using `repmgr standby promote`. Other standby nodes
-can be told to follow the new master using `repmgr standby follow`. We
-show examples of these commands below.
-Next, for detailed monitoring, you must run `repmgrd` (with the same
-configuration file) on all your nodes. Replication status information is
-stored in a custom schema along with information about registered nodes.
-You also need `repmgrd` to configure automatic failover in your cluster.
+`repmgr` supports and enhances PostgreSQL's built-in streaming replication, which
+provides a single read/write master server and one or more read-only standbys
+containing near-real time copies of the master server's database.
-See the `FAILOVER.rst` file for an explanation of how to set up
-automatic failover.
+For a multi-master replication solution, please see 2ndQuadrant's BDR
+(bi-directional replication) extension.
-Requirements
-------------
+http://2ndquadrant.com/en-us/resources/bdr/
+
+For selective replication, e.g. of individual tables or databases from one server
+to another, please see 2ndQuadrant's pglogical extension.
+
+http://2ndquadrant.com/en-us/resources/pglogical/
+
+### Concepts
+
+This guide assumes that you are familiar with PostgreSQL administration and
+streaming replication concepts. For further details on streaming
+replication, see this link:
+
+ http://www.postgresql.org/docs/current/interactive/warm-standby.html#STREAMING-REPLICATION
+
+The following terms are used throughout the `repmgr` documentation.
+
+- `replication cluster`
+
+In the `repmgr` documentation, "replication cluster" refers to the network
+of PostgreSQL servers connected by streaming replication.
+
+- `node`
+
+A `node` is a server within a replication cluster.
+
+- `upstream node`
+
+This is the node a standby server is connected to; either the master server or in
+the case of cascading replication, another standby.
+
+- `failover`
+
+This is the action which occurs if a master server fails and a suitable standby
+is promoted as the new master. The `repmgrd` daemon supports automatic failover
+to minimise downtime.
-`repmgr` is developed and tested on Linux and OS X, but it should work
-on any UNIX-like system which PostgreSQL itself supports.
+- `switchover`
-All nodes must be running the same major version of PostgreSQL, and we
-recommend that they also run the same minor version. This version of
-`repmgr` (v3) supports PostgreSQL 9.3 and later.
+In certain circumstances, such as hardware or operating system maintenance,
+it's necessary to take a master server offline; in this case a controlled
+switchover is necessary, whereby a suitable standby is promoted and the
+existing master removed from the replication cluster in a controlled manner.
+The `repmgr` command line client provides this functionality.
-Earlier versions of `repmgr` needed password-less SSH access between
-nodes in order to clone standby servers using `rsync`. `repmgr 3` can
-use `pg_basebackup` instead in most circumstances; ssh is not required.
+- `witness server`
-You will need to use rsync only if your PostgreSQL configuration files
-are outside your data directory (as on Debian) and you wish these to
-be copied by `repmgr`. See the `SSH-RSYNC.md` file for details on
-configuring password-less SSH between your nodes.
+`repmgr` provides functionality to set up a so-called "witness server" to
+assist in determining a new master server in a failover situation with more
+than one standby. The witness server itself is not part of the replication
+cluster, although it does contain a copy of the repmgr metadata schema
+(see below).
+
+The purpose of a witness server is to provide a "casting vote" where servers
+in the replication cluster are split over more than one location. In the event
+of a loss of connectivity between locations, the presence or absence of
+the witness server will decide whether a server at that location is promoted
+to master; this is to prevent a "split-brain" situation where an isolated
+location interprets a network outage as a failure of the (remote) master and
+promotes a (local) standby.
+
+A witness server only needs to be created if `repmgrd` is in use.
+
+### repmgr user and metadata
+
+In order to effectively manage a replication cluster, `repmgr` needs to store
+information about the servers in the cluster in a dedicated database schema.
+This schema is automatically created during the first step in initialising
+a `repmgr`-controlled cluster (`repmgr master register`) and contains the
+following objects:
+
+tables:
+ - `repl_events`: records events of interest
+ - `repl_nodes`: connection and status information for each server in the
+ replication cluster
+ - `repl_monitor`: historical standby monitoring information written by `repmgrd`
+
+views:
+ - `repl_show_nodes`: based on the table `repl_nodes`, additionally showing the
+ name of the server's upstream node
+ - `repl_status`: when `repmgrd`'s monitoring is enabled, shows current monitoring
+ status for each node
+
+The `repmgr` metadata schema can be stored in an existing database or in its own
+dedicated database.
+
+A dedicated database superuser is required to own the meta-database as well as carry
+out administrative actions.
Installation
------------
-`repmgr` must be installed on each PostgreSQL server node.
+### System requirements
+
+`repmgr` is developed and tested on Linux and OS X, but should work on any
+UNIX-like system supported by PostgreSQL itself.
+
+Current versions of `repmgr` support PostgreSQL from version 9.3. If you are
+interested in using `repmgr` on earlier versions of PostgreSQL you can download
+version 2.1 which supports PostgreSQL from version 9.1.
+
+All servers in the replication cluster must be running the same major version of
+PostgreSQL, and we recommend that they also run the same minor version.
+
+The `repmgr` tools must be installed on each server in the replication cluster.
+
+A dedicated system user for `repmgr` is *not* required; as many `repmgr` and
+`repmgrd` actions require direct access to the PostgreSQL data directory,
+it should be executed by the `postgres` user.
+
+Additionally, we recommend installing `rsync` and enabling passwordless
+`ssh` connectivity between all servers in the replication cluster.
+
+### Packages
+
+We recommend installing `repmgr` using the available packages for your
+system.
+
+- RedHat/CentOS: RPM packages for `repmgr` are available via Yum through
+ the PostgreSQL Global Development Group RPM repository ( http://yum.postgresql.org/ ).
+ You need to follow the instructions for your distribution (RedHat, CentOS,
+ Fedora, etc.) and architecture as detailed at yum.postgresql.org.
-* Packages
- - PGDG publishes RPM packages for RedHat-based distributions
- - Debian/Ubuntu provide .deb packages.
- - See `PACKAGES.md` for details on building .deb and .rpm packages
- from the `repmgr` source code.
+- Debian/Ubuntu: the most recent `repmgr` packages are available from the
+ PostgreSQL Community APT repository ( http://apt.postgresql.org/ ).
+ Instructions can be found in the APT section of the PostgreSQL Wiki
+ ( https://wiki.postgresql.org/wiki/Apt ).
+
+See `PACKAGES.md` for details on building .deb and .rpm packages from the
+`repmgr` source code.
+
+
+### Source installation
+
+`repmgr` source code can be obtained directly from the project GitHub repository:
+
+ git clone https://github.com/2ndQuadrant/repmgr
+
+Release tarballs are also available:
-* Source installation
- - `git clone https://github.com/2ndQuadrant/repmgr`
- - Or download tar.gz files from
https://github.com/2ndQuadrant/repmgr/releases
- - To install from source, run `sudo make USE_PGXS=1 install`
+ http://repmgr.org/downloads.php
+
+`repmgr` is compiled in the same way as a PostgreSQL extension using the PGXS
+infrastructure, e.g.:
+
+ sudo make USE_PGXS=1 install
+
+`repmgr` can be built from source in any environment suitable for building
+PostgreSQL itself.
+
+
+### Configuration
+
+`repmgr` and `repmgrd` use a common configuration file, by default called
+`repmgr.conf` (although any name can be used if explicitly specified).
+At the very least, `repmgr.conf` must contain the connection parameters
+for the local `repmgr` database; see `repmgr configuration file` below
+for more details.
+
+The configuration file will be searched for in the following locations:
+
+- a configuration file specified by the `-f/--config-file` command line option
+- `repmgr.conf` in the local directory
+- `/etc/repmgr.conf`
+- the directory reported by `pg_config --sysconfdir`
+
+Note that if a file is explicitly specified with `-f/--config-file`, an error will
+be raised if it is not found or not readable and no attempt will be made to check
+default locations; this is to prevent `repmgr` reading the wrong file.
+
+For a full list of annotated configuration items, see the file `repmgr.conf.sample`.
+
+The following parameters in the configuration file can be overridden with
+command line options:
-After installation, you should be able to run `repmgr --version` and
-`repmgrd --version`. These binaries should be installed in the same
-directory as other PostgreSQL binaries, such as `psql`.
+- `-L/--log-level`
+- `-b/--pg_bindir`
-Configuration
--------------
-### Server configuration
+Setting up a simple replication cluster with repmgr
+---------------------------------------------------
-By default, `repmgr` uses PostgreSQL's built-in replication protocol to
-clone a primary and create a standby server. If your configuration files
-live outside your data directory, however, you will still need to set up
-password-less SSH so that rsync can be used. See the `SSH-RSYNC.md` file
-for details.
+The following section will describe how to set up a basic replication cluster
+with a master and a standby server using the `repmgr` command line tool.
+It is assumed PostgreSQL is installed on both servers in the cluster,
+`rsync` is available and password-less SSH connections are possible between
+both servers.
+
+* * *
+
+> *TIP*: for testing `repmgr`, it's possible to use multiple PostgreSQL
+> instances running on different ports on the same computer, with
+> password-less SSH access to `localhost` enabled.
+
+* * *
### PostgreSQL configuration
-The primary server needs to be configured for replication with settings
-like the following in `postgresql.conf`:
+On the master server, a PostgreSQL instance must be initialised and running.
+The following replication settings must be included in `postgresql.conf`:
- # Allow read-only queries on standby servers. The number of WAL
- # senders should be larger than the number of standby servers.
+ # Ensure WAL files contain enough information to enable read-only queries
+ # on the standby
- hot_standby = on
wal_level = 'hot_standby'
+
+ # Enable up to 10 replication connections
+
max_wal_senders = 10
- # How much WAL to retain on the primary to allow a temporarily
+ # How much WAL to retain on the master to allow a temporarily
# disconnected standby to catch up again. The larger this is, the
# longer the standby can be disconnected. This is needed only in
# 9.3; from 9.4, replication slots can be used instead (see below).
wal_keep_segments = 5000
- # Enable archiving, but leave it unconfigured (so that it can be
- # configured without a restart later). Recommended, not required.
+ # Enable read-only queries on a standby
+ # (Note: this will be ignored on a master but we recommend including
+ # it anyway)
- archive_mode = on
- archive_command = 'cd .'
+ hot_standby = on
- # If you plan to use repmgrd, ensure that shared_preload_libraries
- # is configured to load 'repmgr_funcs'
- shared_preload_libraries = 'repmgr_funcs'
+* * *
-PostgreSQL 9.4 makes it possible to use replication slots, which means
-the value of `wal_keep_segments` need no longer be set. See section
-"Replication slots" below for more details.
+> *TIP*: rather than editing these settings in the default `postgresql.conf`
+> file, create a separate file such as `postgresql.replication.conf` and
+> include it from the end of the main configuration file with:
+> `include 'postgresql.replication.conf'`
-With PostgreSQL 9.3, `repmgr` expects `wal_keep_segments` to be set to
-at least 5000 (= 80GB of WAL) by default, though this can be overriden
-with the `-w N` argument.
+* * *
-A dedicated PostgreSQL superuser account and a database in which to
-store monitoring and replication data are required. Create them by
-running the following commands:
+Create a dedicated PostgreSQL superuser account and a database for
+the `repmgr` metadata, e.g.
createuser -s repmgr
createdb repmgr -O repmgr
-We recommend using the name `repmgr` for both user and database, but you
-can use whatever name you like (and you need to set the names you chose
-in the `conninfo` string in `repmgr.conf`; see below). We also recommend
-that you set the `repmgr` user's search path to include the `repmgr` schema
-for convenience when querying the metadata tables and views.
+For the examples in this document, the name `repmgr` will be used for both
+user and database, but any names can be used.
+
+Ensure the `repmgr` user has appropriate permissions in `pg_hba.conf` and
+can connect in replication mode; `pg_hba.conf` should contain entries
+similar to the following:
-The `repmgr` application will create its metadata schema in the `repmgr`
-database when the master server is registered.
+ local replication repmgr trust
+ host replication repmgr 127.0.0.1/32 trust
+ host replication repmgr 192.168.1.0/32 trust
-### repmgr configuration
+ local repmgr repmgr trust
+ host repmgr repmgr 127.0.0.1/32 trust
+ host repmgr repmgr 192.168.1.0/32 trust
-Create a `repmgr.conf` file on each server. Here's a minimal sample:
+Adjust according to your network environment and authentication requirements.
+
+On the standby, do not create a PostgreSQL instance, but do ensure an empty
+directory is available for the `postgres` system user to create a data
+directory.
+
+
+### repmgr configuration file
+
+Create a `repmgr.conf` file on the master server. The file must contain at
+least the following parameters:
cluster=test
node=1
node_name=node1
conninfo='host=repmgr_node1 user=repmgr dbname=repmgr'
-The `cluster` name must be the same on all nodes. The `node` (an
-integer) and `node_name` must be unique to each node.
+- `cluster`: an arbitrary name for the replication cluster; this must be identical
+ on all nodes
+- `node`: a unique integer identifying the node
+- `node_name`: a unique string identifying the node; we recommend a name
+ specific to the server (e.g. 'server_1'); avoid names indicating the
+ current replication role like 'master' or 'standby' as the server's
+ role could change.
+- `conninfo`: a valid connection string for the `repmgr` database on the
+ *current* server. (On the standby, the database will not yet exist, but
+ `repmgr` needs to know the connection details to complete the setup
+ process).
-The `conninfo` string must point to repmgr's database *on this node*.
-The host must be an IP or a name that all the nodes in the cluster can
-resolve (not `localhost`!). All nodes must use the same username and
-database name, but other parameters, such as the port, can vary between
-nodes.
+`repmgr.conf` should not be stored inside the PostgreSQL data directory,
+as it could be overwritten when setting up or reinitialising the PostgreSQL
+server. See section `Configuration` above for further details about `repmgr.conf`.
-Your `repmgr.conf` should not be stored inside the PostgreSQL data
-directory. We recommend `/etc/repmgr/repmgr.conf`, but you can place it
-anywhere and use the `-f /path/to/repmgr.conf` option to tell `repmgr`
-where it is. If not specified, `repmgr` will search for `repmgr.conf` in
-the current working directory.
+`repmgr` will create a schema named after the cluster and prefixed with `repmgr_`,
+e.g. `repmgr_test`; we also recommend that you set the `repmgr` user's search path
+to include this schema name, e.g.
-If your PostgreSQL binaries (`pg_ctl`, `pg_basebackup`) are not in your
-`PATH`, you can specify an alternate location in `repmgr.conf`:
+ ALTER USER repmgr SET search_path TO repmgr_test, "$user", public;
- pg_bindir=/path/to/postgres/bin
+### Initialise the master server
-See `repmgr.conf.sample` for an example configuration file with all
-available configuration settings annotated.
+To enable `repmgr` to support a replication cluster, the master node must
+be registered with `repmgr`, which creates the `repmgr` database and adds
+a metadata record for the server:
-### Starting up
+ $ repmgr -f repmgr.conf master register
+ [2016-01-07 16:56:46] [NOTICE] master node correctly registered for cluster test with id 1 (conninfo: host=repmgr_node1 user=repmgr dbname=repmgr)
-The master node must be registered first using `repmgr master register`,
-and standby servers must be registered using `repmgr standby register`;
-this inserts details about each node into the control database. Use
-`repmgr cluster show` to see the result.
+The metadata record looks like this:
-See the `QUICKSTART.md` file for examples of how to use these commands.
+ repmgr=# SELECT * FROM repmgr_test.repl_nodes;
+ id | type | upstream_node_id | cluster | name | conninfo | slot_name | priority | active
+ ----+---------+------------------+---------+-------+---------------------------------------------+-----------+----------+--------
+ 1 | master | | test | node1 | host=repmgr_node1 dbname=repmgr user=repmgr | | 100 | t
+ (1 row)
-Failover
---------
+Each server in the replication cluster will have its own record and will be updated
+when its status or role changes.
+
+### Clone the standby server
+
+Create a `repmgr.conf` file on the standby server. It must contain at
+least the same parameters as the master's `repmgr.conf`, but with
+the values `node`, `node_name` and `conninfo` adjusted accordingly, e.g.:
+
+ cluster=test
+ node=2
+ node_name=node2
+ conninfo='host=repmgr_node2 user=repmgr dbname=repmgr'
+
+Clone the standby with:
+
+ $ repmgr -h repmgr_node1 -U repmgr -d repmgr -D /path/to/node2/data/ -f /etc/repmgr.conf standby clone
+ [2016-01-07 17:21:26] [NOTICE] destination directory '/path/to/node2/data/' provided
+ [2016-01-07 17:21:26] [NOTICE] starting backup...
+ [2016-01-07 17:21:26] [HINT] this may take some time; consider using the -c/--fast-checkpoint option
+ NOTICE: pg_stop_backup complete, all required WAL segments have been archived
+ [2016-01-07 17:21:28] [NOTICE] standby clone (using pg_basebackup) complete
+ [2016-01-07 17:21:28] [NOTICE] you can now start your PostgreSQL server
+ [2016-01-07 17:21:28] [HINT] for example : pg_ctl -D /path/to/node2/data/ start
+
+This will clone the PostgreSQL data directory files from the master at repmgr_node1
+using PostgreSQL's pg_basebackup utility. A `recovery.conf` file containing the
+correct parameters to start streaming from this master server will be created
+automatically, and unless otherwise the `postgresql.conf` and `pg_hba.conf`
+files will be copied from the master.
+
+Make any adjustments to the PostgreSQL configuration files now, then start the
+standby server.
+
+* * *
+
+> *NOTE*: `repmgr standby clone` does not require `repmgr.conf`, however we
+> recommend providing this as `repmgr` will set the `application_name` parameter
+> in `recovery.conf` as the value provided in `node_name`, making it easier to
+> identify the node in `pg_stat_replication`. It's also possible to provide some
+> advanced options for controlling the standby cloning process; see next section
+> for details.
+
+* * *
+
+### Verify replication is functioning
+
+Connect to the master server and execute:
+
+ repmgr=# SELECT * FROM pg_stat_replication;
+ -[ RECORD 1 ]----+------------------------------
+ pid | 7704
+ usesysid | 16384
+ usename | repmgr
+ application_name | node2
+ client_addr | 192.168.1.2
+ client_hostname |
+ client_port | 46196
+ backend_start | 2016-01-07 17:32:58.322373+09
+ backend_xmin |
+ state | streaming
+ sent_location | 0/3000220
+ write_location | 0/3000220
+ flush_location | 0/3000220
+ replay_location | 0/3000220
+ sync_priority | 0
+ sync_state | async
+
+
+### Register the standby
+
+Register the standby server with:
+
+ repmgr -f /etc/repmgr.conf standby register
+ [2016-01-08 11:13:16] [NOTICE] standby node correctly registered for cluster test with id 2 (conninfo: host=repmgr_node2 user=repmgr dbname=repmgr)
+
+Connect to the standby server's `repmgr` database and check the `repl_nodes`
+table:
+
+ repmgr=# SELECT * FROM repmgr_test.repl_nodes ORDER BY id;
+ id | type | upstream_node_id | cluster | name | conninfo | slot_name | priority | active
+ ----+---------+------------------+---------+-------+---------------------------------------------+-----------+----------+--------
+ 1 | master | | test | node1 | host=repmgr_node1 dbname=repmgr user=repmgr | | 100 | t
+ 2 | standby | 1 | test | node2 | host=repmgr_node2 dbname=repmgr user=repmgr | | 100 | t
+ (2 rows)
+
+The standby server now has a copy of the records for all servers in the
+replication cluster. Note that the relationship between master and standby is
+explicitly defined via the `upstream_node_id` value, which shows here that the
+standby's upstream server is the replication cluster master. While of limited
+use in a simple master/standby replication cluster, this information is required
+to effectively manage cascading replication (see below).
+
+
+Advanced options for cloning a standby
+--------------------------------------
+
+The above section demonstrates the simplest possible way to cloneb a standby
+server. Depending on your circumstances, finer-grained controlover the cloning
+process may be necessary.
+
+### pg_basebackup options when cloning a standby
+
+By default, `pg_basebackup` performs a checkpoint before beginning the backup
+process. However, a normal checkpoint may take some time to complete;
+a fast checkpoint can be forced with the `-c/--fast-checkpoint` option.
+However this may impact performance of the server being cloned from
+so should be used with care.
+
+Further options can be passed to the `pg_basebackup` utility via
+the setting `pg_basebackup_options` in `repmgr.conf`. See the PostgreSQL
+documentation for more details of available options:
+ http://www.postgresql.org/docs/current/static/app-pgbasebackup.html
+
+### Using rsync to clone a standby
+
+By default `repmgr` uses the `pg_basebackup` utility to clone a standby's
+data directory from the master. Under some circumstances it may be
+desirable to use `rsync` to do this, such as when resyncing the data
+directory of a failed server with an active replication node.
+
+To use `rsync` instead of `pg_basebackup`, provide the `-r/--rsync-only`
+option when executing `repmgr standby clone`.
+
+Note that `repmgr` forces `rsync` to use `--checksum` mode to ensure that all
+the required files are copied. This results in additional I/O on both source
+and destination server as the contents of files existing on both servers need
+to be compared, meaning this method is not necessarily faster than making a
+fresh clone with `pg_basebackup`.
+
+
+### Dealing with PostgreSQL configuration files
+
+By default, `repmgr` will attempt to copy the standard configuration files
+(`postgresql.conf`, `pg_hba.conf` and `pg_ident.conf`) even if they are located
+outside of the data directory (though currently they will be copied
+into the standby's data directory). To prevent this happening, when executing
+`repmgr standby clone` provide the `--ignore-external-config-files` option.
+
+If using `rsync` to clone a standby, additional control over which files
+not to transfer is possible by configuring `rsync_options` in `repmgr.conf`,
+which enables any valid `rsync` options to be passed to that command, e.g.:
+
+ rsync_options='--exclude=postgresql.local.conf'
+
+
+Setting up cascading replication with repmgr
+--------------------------------------------
+
+Cascading replication, introduced with PostgreSQL 9.2, enables a standby server
+to replicate from another standby server rather than directly from the master,
+meaning replication changes "cascade" down through a hierarchy of servers. This
+can be used to reduce load on the master and minimize bandwith usage between
+sites.
+
+`repmgr` supports cascading replication. When cloning a standby, in `repmgr.conf`
+set the parameter `upstream_node` to the id of the server the standby
+should connect to, and `repmgr` will perform the clone using this server
+and create `recovery.conf` to point to it. Note that if `upstream_node`
+is not explicitly provided, `repmgr` will use the master as the server
+to clone from.
+
+To demonstrate cascading replication, ensure you have a master and standby
+set up as shown above in the section "Setting up a simple replication cluster
+with repmgr". Create an additional standby server with `repmgr.conf` looking
+like this:
+
+ cluster=test
+ node=3
+ node_name=node3
+ conninfo='host=repmgr_node3 user=repmgr dbname=repmgr'
+ upstream_node=2
+
+Ensure `upstream_node` contains the `node` id of the previously
+created standby. Clone this standby (using the connection parameters
+for the existing standby) and register it:
+
+ $ repmgr -h repmgr_node2 -U repmgr -d repmgr -D /path/to/node3/data/ -f /etc/repmgr.conf standby clone
+ [2016-01-08 13:44:52] [NOTICE] destination directory 'node_3/data/' provided
+ [2016-01-08 13:44:52] [NOTICE] starting backup (using pg_basebackup)...
+ [2016-01-08 13:44:52] [HINT] this may take some time; consider using the -c/--fast-checkpoint option
+ [2016-01-08 13:44:52] [NOTICE] standby clone (using pg_basebackup) complete
+ [2016-01-08 13:44:52] [NOTICE] you can now start your PostgreSQL server
+ [2016-01-08 13:44:52] [HINT] for example : pg_ctl -D /path/to/node_3/data start
+
+ $ repmgr -f /etc/repmgr.conf standby register
+ [2016-01-08 14:04:32] [NOTICE] standby node correctly registered for cluster test with id 3 (conninfo: host=repmgr_node3 dbname=repmgr user=repmgr)
+
+After starting the standby, the `repl_nodes` table will look like this:
+
+ repmgr=# SELECT * FROM repmgr_test.repl_nodes ORDER BY id;
+ id | type | upstream_node_id | cluster | name | conninfo | slot_name | priority | active
+ ----+---------+------------------+---------+-------+---------------------------------------------+-----------+----------+--------
+ 1 | master | | test | node1 | host=repmgr_node1 dbname=repmgr user=repmgr | | 100 | t
+ 2 | standby | 1 | test | node2 | host=repmgr_node2 dbname=repmgr user=repmgr | | 100 | t
+ 3 | standby | 2 | test | node3 | host=repmgr_node3 dbname=repmgr user=repmgr | | 100 | t
+ (3 rows)
+
+
+Using replication slots with repmgr
+-----------------------------------
+
+Replication slots were introduced with PostgreSQL 9.4 and are designed to ensure
+that any standby connected to the master using a replication slot will always
+be able to retrieve the required WAL files. This removes the need to manually
+manage WAL file retention by estimating the number of WAL files that need to
+be maintained on the master using `wal_keep_segments`. Do however be aware
+that if a standby is disconnected, WAL will continue to accumulate on the master
+until either the standby reconnects or the replication slot is dropped.
+
+To enable `repmgr` to use replication slots, set the boolean parameter
+`use_replication_slots` in `repmgr.conf`:
+
+ use_replication_slots=1
+
+Note that `repmgr` will fail with an error if this option is specified when
+working with PostgreSQL 9.3.
+
+When cloning a standby, `repmgr` will automatically generate an appropriate
+slot name, which is stored in the `repl_nodes` table, and create the slot
+on the master:
+
+ repmgr=# SELECT * from repl_nodes ORDER BY id;
+ id | type | upstream_node_id | cluster | name | conninfo | slot_name | priority | active
+ ----+---------+------------------+---------+-------+------------------------------------------+---------------+----------+--------
+ 1 | master | | test | node1 | host=localhost dbname=repmgr user=repmgr | repmgr_slot_1 | 100 | t
+ 2 | standby | 1 | test | node2 | host=localhost dbname=repmgr user=repmgr | repmgr_slot_2 | 100 | t
+ 3 | standby | 1 | test | node3 | host=localhost dbname=repmgr user=repmgr | repmgr_slot_3 | 100 | t
+
+ repmgr=# SELECT * FROM pg_replication_slots ;
+ slot_name | plugin | slot_type | datoid | database | active | active_pid | xmin | catalog_xmin | restart_lsn
+ ---------------+--------+-----------+--------+----------+--------+------------+------+--------------+-------------
+ repmgr_slot_3 | | physical | | | t | 26060 | | | 0/50028F0
+ repmgr_slot_2 | | physical | | | t | 26079 | | | 0/50028F0
+ (2 rows)
+
+Note that a slot name will be created by default for the master but not
+actually used unless the master is converted to a standby using e.g.
+`repmgr standby switchover`.
+
+Be aware that when initially cloning a standby, you will need to ensure
+that all required WAL files remain available while the cloning is taking
+place. If using the default `pg_basebackup` method, we recommend setting
+`pg_basebackup`'s `--xlog-method` parameter to `stream` like this:
+
+ pg_basebackup_options='--xlog-method=stream'
+
+See the `pg_basebackup` documentation for details:
+ http://www.postgresql.org/docs/current/static/app-pgbasebackup.html
+
+Otherwise it's necessary to set `wal_keep_segments` to an appropriately high
+value.
+
+Further information on replication slots in the PostgreSQL documentation:
+ http://www.postgresql.org/docs/current/interactive/warm-standby.html#STREAMING-REPLICATION-SLOTS
+
+
+Promoting a standby server with repmgr
+--------------------------------------
+
+If a master server fails or needs to be removed from the replication cluster,
+a new master server must be designated, to ensure the cluster continues
+working correctly. This can be done with `repmgr standby promote`, which promotes
+the standby on the current server to master
-To promote a standby to master, on the standby execute e.g.:
+To demonstrate this, set up a replication cluster with a master and two attached
+standby servers so that the `repl_nodes` table looks like this:
- repmgr -f /etc/repmgr/repmgr.conf --verbose standby promote
+ repmgr=# SELECT * FROM repmgr_test.repl_nodes ORDER BY id;
+ id | type | upstream_node_id | cluster | name | conninfo | slot_name | priority | active
+ ----+---------+------------------+---------+-------+---------------------------------------------+-----------+----------+--------
+ 1 | master | | test | node1 | host=repmgr_node1 dbname=repmgr user=repmgr | | 100 | t
+ 2 | standby | 1 | test | node2 | host=repmgr_node2 dbname=repmgr user=repmgr | | 100 | t
+ 3 | standby | 1 | test | node3 | host=repmgr_node3 dbname=repmgr user=repmgr | | 100 | t
+ (3 rows)
+
+Stop the current master with e.g.:
+
+ $ pg_ctl -D /path/to/node_1/data -m fast stop
-`repmgr` will attempt to connect to the current master to verify that it
-is not available (if it is, `repmgr` will not promote the standby).
+At this point the replication cluster will be in a partially disabled state with
+both standbys accepting read-only connections while attempting to connect to the
+stopped master. Note that the `repl_nodes` table will not yet have been updated
+and will still show the master as active.
-Other standby servers need to be told to follow the new master with e.g.:
+Promote the first standby with:
- repmgr -f /etc/repmgr/repmgr.conf --verbose standby follow
+ $ repmgr -f /etc/repmgr.conf standby promote
-See file `FAILOVER.rst` for details on setting up automated failover.
+This will produce output similar to the following:
+ [2016-01-08 16:07:31] [ERROR] connection to database failed: could not connect to server: Connection refused
+ Is the server running on host "repmgr_node1" (192.161.2.1) and accepting
+ TCP/IP connections on port 5432?
+ could not connect to server: Connection refused
+ Is the server running on host "repmgr_node1" (192.161.2.1) and accepting
+ TCP/IP connections on port 5432?
-Converting a failed master to a standby
----------------------------------------
+ [2016-01-08 16:07:31] [NOTICE] promoting standby
+ [2016-01-08 16:07:31] [NOTICE] promoting server using '/usr/bin/postgres/pg_ctl -D /path/to/node_2/data promote'
+ server promoting
+ [2016-01-08 16:07:33] [NOTICE] STANDBY PROMOTE successful
-Often it's desirable to bring a failed master back into replication
-as a standby. First, ensure that the master's PostgreSQL server is
-no longer running; then use `repmgr standby clone` to re-sync its
-data directory with the current master, e.g.:
+Note: the first `[ERROR]` is `repmgr` attempting to connect to the current
+master to verify that it has failed. If a valid master is found, `repmgr`
+will refuse to promote a standby.
- repmgr -f /etc/repmgr/repmgr.conf \
- --force --rsync-only \
- -h node2 -d repmgr -U repmgr --verbose \
- standby clone
+The `repl_nodes` table will now look like this:
+
+ id | type | upstream_node_id | cluster | name | conninfo | slot_name | priority | active
+ ----+---------+------------------+---------+-------+---------------------------------------------+-----------+----------+--------
+ 1 | master | | test | node1 | host=repmgr_node1 dbname=repmgr user=repmgr | | 100 | f
+ 2 | master | | test | node2 | host=repmgr_node2 dbname=repmgr user=repmgr | | 100 | t
+ 3 | standby | 1 | test | node3 | host=repmgr_node3 dbname=repmgr user=repmgr | | 100 | t
+ (3 rows)
-Here it's essential to use the command line options `--force`, to
-ensure `repmgr` will re-use the existing data directory, and
-`--rsync-only`, which causes `repmgr` to use `rsync` rather than
-`pg_basebackup`, as the latter can only be used to clone a fresh
-standby.
+The previous master has been marked as inactive, and `node2`'s `upstream_node_id`
+has been cleared as it's now the "topmost" server in the replication cluster.
-The node can then be restarted.
+However the sole remaining standby is still trying to replicate from the failed
+master; `repmgr standby follow` must now be executed to rectify this situation.
-The node will then need to be re-registered with `repmgr`; again
-the `--force` option is required to update the existing record:
- repmgr -f /etc/repmgr/repmgr.conf \
- --force \
- standby register
+Following a new master server with repmgr
+-----------------------------------------
+Following the failure or removal of the replication cluster's existing master
+server, `repmgr standby follow` can be used to make 'orphaned' standbys
+follow the new master and catch up to its current state.
+To demonstrate this, assuming a replication cluster in the same state as the
+end of the preceding section ("Promoting a standby server with repmgr"),
+execute this:
+
+ $ repmgr -f /etc/repmgr.conf -D /path/to/node_3/data/ -h repmgr_node2 -U repmgr -d repmgr standby follow
+ [2016-01-08 16:57:06] [NOTICE] restarting server using '/usr/bin/postgres/pg_ctl -D /path/to/node_3/data/ -w -m fast restart'
+ waiting for server to shut down.... done
+ server stopped
+ waiting for server to start.... done
+ server started
+
+The standby is now replicating from the new master and `repl_nodes` has been
+updated to reflect this:
+
+ id | type | upstream_node_id | cluster | name | conninfo | slot_name | priority | active
+ ----+---------+------------------+---------+-------+---------------------------------------------+-----------+----------+--------
+ 1 | master | | test | node1 | host=repmgr_node1 dbname=repmgr user=repmgr | | 100 | f
+ 2 | master | | test | node2 | host=repmgr_node2 dbname=repmgr user=repmgr | | 100 | t
+ 3 | standby | 2 | test | node3 | host=repmgr_node3 dbname=repmgr user=repmgr | | 100 | t
+ (3 rows)
-Replication management with repmgrd
+
+Note that with cascading replication, `repmgr standby follow` can also be
+used to detach a standby from its current upstream server and follow another
+upstream server, including the master.
+
+
+Performing a switchover with repmgr
-----------------------------------
+A typical use-case for replication is a combination of master and standby
+server, with the standby serving as a backup which can easily be activated
+in case of a problem with the master. Such an unplanned failover would
+normally be handled by promoting the standby, after which an appropriate
+action must be taken to restore the old master.
+
+In some cases however it's desirable to promote the standby in a planned
+way, e.g. so maintenance can be performed on the master; this kind of switchover
+is supported by the `repmgr standby switchover` command.
+
+`repmgr standby switchover` differs from other `repmgr` actions in that it
+also performs actions on another server, for which reason you must provide
+both passwordless SSH access and the path of `repmgr.conf` on that server.
+
+* * *
+
+> *NOTE* `repmgr standby switchover` performs a relatively complex series
+> of operations on two servers, and should therefore be performed after
+> careful preparation and with adequate attention. In particular you should
+> be confident that your network environment is stable and reliable.
+>
+> We recommend running `repmgr standby switchover` at the most verbose
+> logging level (`--log-level DEBUG --verbose`) and capturing all output
+> to assist troubleshooting any problems.
+>
+> Please also read carefully the list of caveats below.
+
+* * *
+
+To demonstrate switchover, we will assume a replication cluster running on
+PostgreSQL 9.5 or later with a master (`node1`) and a standby (`node2`);
+after the switchover `node2` should become the master with `node1` following it.
+
+The switchover command must be run from the standby which is to be promoted,
+and in its simplest form looks like this:
+
+ repmgr -f /etc/repmgr.conf -C /etc/repmgr.conf standby switchover
+
+`-f /etc/repmgr.conf` is, as usual the local `repmgr` node's configuration file.
+`-C /etc/repmgr.conf` is the path to the configuration file on the current
+master, which is required to execute `repmgr` remotely on that server;
+if it is not provided with `-C`, `repmgr` will check the same path as on the
+local server, as well as the normal default locations. `repmgr` will check
+this file can be found before performing any further actions.
+
+ $ repmgr -f /etc/repmgr.conf -C /etc/repmgr.conf standby switchover -v
+ [2016-01-27 16:38:33] [NOTICE] using configuration file "/etc/repmgr.conf"
+ [2016-01-27 16:38:33] [NOTICE] switching current node 2 to master server and demoting current master to standby...
+ [2016-01-27 16:38:34] [NOTICE] 5 files copied to /tmp/repmgr-node1-archive
+ [2016-01-27 16:38:34] [NOTICE] connection to database failed: FATAL: the database system is shutting down
+
+ [2016-01-27 16:38:34] [NOTICE] current master has been stopped
+ [2016-01-27 16:38:34] [ERROR] connection to database failed: FATAL: the database system is shutting down
+
+ [2016-01-27 16:38:34] [NOTICE] promoting standby
+ [2016-01-27 16:38:34] [NOTICE] promoting server using '/usr/local/bin/pg_ctl -D /var/lib/postgresql/9.5/node_2/data promote'
+ server promoting
+ [2016-01-27 16:38:36] [NOTICE] STANDBY PROMOTE successful
+ [2016-01-27 16:38:36] [NOTICE] Executing pg_rewind on old master server
+ [2016-01-27 16:38:36] [NOTICE] 5 files copied to /var/lib/postgresql/9.5/data
+ [2016-01-27 16:38:36] [NOTICE] restarting server using '/usr/local/bin/pg_ctl -w -D /var/lib/postgresql/9.5/node_1/data -m fast restart'
+ pg_ctl: PID file "/var/lib/postgresql/9.5/node_1/data/postmaster.pid" does not exist
+ Is server running?
+ starting server anyway
+ [2016-01-27 16:38:37] [NOTICE] node 1 is replicating in state "streaming"
+ [2016-01-27 16:38:37] [NOTICE] switchover was successful
+
+Messages containing the line `connection to database failed: FATAL: the database
+system is shutting down` are not errors - `repmgr` is polling the old master database
+to make sure it has shut down correctly. `repmgr` will also archive any
+configuration files in the old master's data directory as they will otherwise
+be overwritten by `pg_rewind`; they are restored once the `pg_rewind` operation
+has completed.
+
+The old master is now replicating as a standby from the new master and `repl_nodes`
+should have been updated to reflect this:
+
+ repmgr=# SELECT * from repl_nodes ORDER BY id;
+ id | type | upstream_node_id | cluster | name | conninfo | slot_name | priority | active
+ ----+---------+------------------+---------+-------+------------------------------------------+-----------+----------+--------
+ 1 | standby | 2 | test | node1 | host=localhost dbname=repmgr user=repmgr | | 100 | t
+ 2 | master | | test | node2 | host=localhost dbname=repmgr user=repmgr | | 100 | t
+ (2 rows)
+
+
+### Caveats
+
+- the functionality provided `repmgr standby switchover` is primarily aimed
+ at a two-server master/standby replication cluster and currently does
+ not support additional standbys.
+- `repmgr standby switchover` is designed to use the `pg_rewind` utility,
+ standard in 9.5 and later and available for seperately in 9.3 and 9.4
+ (see note below)
+- `pg_rewind` *requires* that either `wal_log_hints` is enabled, or that
+ data checksums were enabled when the cluster was initialized. See the
+ `pg_rewind` documentation for details:
+ http://www.postgresql.org/docs/current/static/app-pgrewind.html
+- `repmgrd` should not be running when a switchover is carried out, otherwise
+ the `repmgrd` may try and promote a standby by itself.
+- Any other standbys attached to the old master will need to be manually
+ instructed to point to the new master (e.g. with `repmgr standby follow`).
+
+We hope to remove some of these restrictions in future versions of `repmgr`.
+
+
+### Switchover and PostgreSQL 9.3/9.4
+
+In order to efficiently reintegrate a demoted master into the replication
+cluster as a standby, it's necessary to resynchronise its data directory
+with that of the current master, as it's very likely that their timelines
+will have diverged slightly following the shutdown of the old master.
+
+The utility `pg_rewind` provides an efficient way of doing this, however
+is not included in the core PostgreSQL distribution for versions 9.3 and 9.4.
+Hoever, `pg_rewind` is available separately for these versions and we
+strongly recommend its installation. To use it with versions 9.3 and 9.4,
+provide the command line option `--pg_rewind`, optionally with the
+path to the `pg_rewind` binary location if not installed in the PostgreSQL
+`bin` directory.
+
+`pg_rewind` for versions 9.3 and 9.4 can be obtained from:
+ https://github.com/vmware/pg_rewind
+
+If `pg_rewind` is not available, as a fallback `repmgr` will use `repmgr
+standby clone` to resynchronise the old master's data directory using
+`rsync`. However, in order to ensure all files are synchronised, the
+entire data directory on both servers must be scanned, a process which
+can take some time on larger databases, in which case you should
+consider making a fresh standby clone.
+
+
+Unregistering a standby from a replication cluster
+--------------------------------------------------
+
+To unregister a running standby, execute:
+
+ repmgr standby unregister -f /etc/repmgr.conf
+
+This will remove the standby record from `repmgr`'s internal metadata
+table (`repl_nodes`). A `standby_unregister` event notification will be
+recorded in the `repl_events` table.
+
+Note that this command will not stop the server itself or remove
+it from the replication cluster.
+
+If the standby is not running, the standby record must be manually
+removed from the `repl_nodes` table with e.g.:
+
+ DELETE FROM repmgr_test.repl_nodes WHERE id = 3;
+
+Adjust schema and node ID accordingly. A future `repmgr` release
+will make it possible to unregister failed standbys.
+
+
+Automatic failover with repmgrd
+-------------------------------
+
`repmgrd` is a management and monitoring daemon which runs on standby nodes
and which can automate actions such as failover and updating standbys to
-follow the new master.`repmgrd` can be started simply with e.g.:
+follow the new master.
+
+To use `repmgrd` for automatic failover, the following `repmgrd` options must
+be set in `repmgr.conf`:
+
+ failover=automatic
+ promote_command='repmgr standby promote -f /etc/repmgr/repmgr.conf'
+ follow_command='repmgr standby follow -f /etc/repmgr/repmgr.conf'
- repmgrd -f /etc/repmgr/repmgr.conf --verbose > $HOME/repmgr/repmgr.log 2>&1
+(See `repmgr.conf.sample` for further `repmgrd`-specific settings).
-or alternatively:
+When `failover` is set to `automatic`, upon detecting failure of the current
+master, `repmgrd` will execute one of `promote_command` or `follow_command`,
+depending on whether the current server is becoming the new master or
+needs to follow another server which has become the new master. Note that
+these commands can be any valid shell script which results in one of these
+actions happening, but we strongly recommend executing `repmgr` directly.
- repmgrd -f /etc/repmgr/repmgr.conf --verbose --monitoring-history > $HOME/repmgr/repmgrd.log 2>&1
+`repmgrd` can be started simply with e.g.:
-which will track replication advance or lag on all registered standbys.
+ repmgrd -f /etc/repmgr.conf --verbose > $HOME/repmgr/repmgr.log 2>&1
For permanent operation, we recommend using the options `-d/--daemonize` to
detach the `repmgrd` process, and `-p/--pid-file` to write the process PID
to a file.
-Example log output (at default log level):
+Note that currently `repmgrd` is not required to run on the master server.
+
+To demonstrate automatic failover, set up a 3-node replication cluster (one master
+and two standbys streaming directly from the master) so that the `repl_nodes`
+table looks like this:
+
+ repmgr=# SELECT * FROM repmgr_test.repl_nodes ORDER BY id;
+ id | type | upstream_node_id | cluster | name | conninfo | slot_name | priority | active
+ ----+---------+------------------+---------+-------+---------------------------------------------+-----------+----------+--------
+ 1 | master | | test | node1 | host=repmgr_node1 dbname=repmgr user=repmgr | | 100 | t
+ 2 | standby | 1 | test | node2 | host=repmgr_node2 dbname=repmgr user=repmgr | | 100 | t
+ 3 | standby | 1 | test | node3 | host=repmgr_node3 dbname=repmgr user=repmgr | | 100 | t
+ (3 rows)
+
+
+Start `repmgrd` on each standby and verify that it's running by examining
+the log output, which at default log level will look like this:
+
+ [2016-01-05 13:15:40] [INFO] checking cluster configuration with schema 'repmgr_test'
+ [2016-01-05 13:15:40] [INFO] checking node 2 in cluster 'test'
+ [2016-01-05 13:15:40] [INFO] reloading configuration file and updating repmgr tables
+ [2016-01-05 13:15:40] [INFO] starting continuous standby node monitoring
+
+Each `repmgrd` should also have noted its successful startup in the `repl_events`
+table:
+
+ repmgr=# SELECT * FROM repl_events WHERE event = 'repmgrd_start';
+ node_id | event | successful | event_timestamp | details
+ ---------+---------------+------------+-------------------------------+---------
+ 2 | repmgrd_start | t | 2016-01-27 18:22:38.080231+09 |
+ 3 | repmgrd_start | t | 2016-01-27 18:22:38.08756+09 |
+ (2 rows)
+
+Now stop the current master server with e.g.:
+
+ pg_ctl -D /path/to/node1/data -m immediate stop
+
+This will force the master node to shut down straight away, aborting all
+processes and transactions. This will cause a flurry of activity in
+the `repmgrd` log files as each `repmgrd` detects the failure of the master
+and a failover decision is made. Here extracts from the standby server
+promoted to new master:
+
+ [2016-01-06 18:32:58] [WARNING] connection to upstream has been lost, trying to recover... 15 seconds before failover decision
+ [2016-01-06 18:33:03] [WARNING] connection to upstream has been lost, trying to recover... 10 seconds before failover decision
+ [2016-01-06 18:33:08] [WARNING] connection to upstream has been lost, trying to recover... 5 seconds before failover decision
+ ...
+ [2016-01-06 18:33:18] [NOTICE] this node is the best candidate to be the new master, promoting...
+ ...
+ [2016-01-06 18:33:20] [NOTICE] STANDBY PROMOTE successful
+
+and here from the standby server which is now following the new master:
+
+ [2016-01-06 18:32:58] [WARNING] connection to upstream has been lost, trying to recover... 15 seconds before failover decision
+ [2016-01-06 18:33:03] [WARNING] connection to upstream has been lost, trying to recover... 10 seconds before failover decision
+ [2016-01-06 18:33:08] [WARNING] connection to upstream has been lost, trying to recover... 5 seconds before failover decision
+ ...
+ [2016-01-06 18:33:23] [NOTICE] node 2 is the best candidate for new master, attempting to follow...
+ [2016-01-06 18:33:23] [INFO] changing standby's master
+ ...
+ [2016-01-06 18:33:25] [NOTICE] node 3 now following new upstream node 2
+
+The `repl_nodes` table should have been updated to reflect the new situation,
+with the original master (`node1`) marked as inactive, and standby `node3`
+now following the new master (`node2`):
+
+ repmgr=# SELECT * from repl_nodes ORDER BY id;
+ id | type | upstream_node_id | cluster | name | conninfo | slot_name | priority | active
+ ----+---------+------------------+---------+-------+------------------------------------------+-----------+----------+--------
+ 1 | master | | test | node1 | host=localhost dbname=repmgr user=repmgr | | 100 | f
+ 2 | master | | test | node2 | host=localhost dbname=repmgr user=repmgr | | 100 | t
+ 3 | standby | 2 | test | node3 | host=localhost dbname=repmgr user=repmgr | | 100 | t
+ (3 rows)
- [2015-03-11 13:15:40] [INFO] checking cluster configuration with schema 'repmgr_test'
- [2015-03-11 13:15:40] [INFO] checking node 2 in cluster 'test'
- [2015-03-11 13:15:40] [INFO] reloading configuration file and updating repmgr tables
- [2015-03-11 13:15:40] [INFO] starting continuous standby node monitoring
+The `repl_events` table will contain a summary of what happened to each server
+during the failover:
+
+ repmgr=# SELECT * from repmgr_test.repl_events where event_timestamp>='2016-01-06 18:30';
+ node_id | event | successful | event_timestamp | details
+ ---------+--------------------------+------------+-------------------------------+----------------------------------------------------------
+ 2 | standby_promote | t | 2016-01-06 18:33:20.061736+09 | node 2 was successfully promoted to master
+ 2 | repmgrd_failover_promote | t | 2016-01-06 18:33:20.067132+09 | node 2 promoted to master; old master 1 marked as failed
+ 3 | repmgrd_failover_follow | t | 2016-01-06 18:33:25.331012+09 | node 3 now following new upstream node 2
+ (3 rows)
+
+
+repmgrd log rotation
+--------------------
Note that currently `repmgrd` does not provide logfile rotation. To ensure
the current logfile does not grow indefinitely, configure your system's `logrotate`
to do this. Sample configuration to rotate logfiles weekly with retention
for up to 52 weeks and rotation forced if a file grows beyond 100Mb:
- /var/log/postgresql/repmgr-9.4.log {
+ /var/log/postgresql/repmgr-9.5.log {
missingok
compress
rotate 52
@@ -281,31 +994,6 @@ for up to 52 weeks and rotation forced if a file grows beyond 100Mb:
create 0600 postgres postgres
}
-
-Witness server
---------------
-
-In a situation caused e.g. by a network interruption between two
-data centres, it's important to avoid a "split-brain" situation where
-both sides of the network assume they are the active segment and the
-side without an active master unilaterally promotes one of its standbys.
-
-To prevent this situation happening, it's essential to ensure that one
-network segment has a "voting majority", so other segments will know
-they're in the minority and not attempt to promote a new master. Where
-an odd number of servers exists, this is not an issue. However, if each
-network has an even number of nodes, it's necessary to provide some way
-of ensuring a majority, which is where the witness server becomes useful.
-
-This is not a fully-fledged standby node and is not integrated into
-replication, but it effectively represents the "casting vote" when
-deciding which network segment has a majority. A witness server can
-be set up using `repmgr witness create` (see below for details) and
-can run on a dedicated server or an existing node. Note that it only
-makes sense to create a witness server in conjunction with running
-`repmgrd`; the witness server will require its own `repmgrd` instance.
-
-
Monitoring
----------
@@ -320,7 +1008,7 @@ be queried easily using the view `repl_status`:
standby_name | node2
node_type | standby
active | t
- last_monitor_time | 2015-03-11 14:02:34.51713+09
+ last_monitor_time | 2016-01-05 14:02:34.51713+09
last_wal_primary_location | 0/3012AF0
last_wal_standby_location | 0/3012AF0
replication_lag | 0 bytes
@@ -328,61 +1016,41 @@ be queried easily using the view `repl_status`:
apply_lag | 0 bytes
communication_time_lag | 00:00:00.955385
+The interval in which monitoring history is written is controlled by the
+configuration parameter `monitor_interval_secs`; default is 2.
-Event logging and notifications
--------------------------------
-
-To help understand what significant events (e.g. failure of a node) happened
-when and for what reason, `repmgr` logs such events into the `repl_events`
-table, e.g.:
-
- repmgr_db=# SELECT * from repmgr_test.repl_events ;
- node_id | event | successful | event_timestamp | details
- ---------+------------------+------------+-------------------------------+-----------------------------------------------------------------------------------
- 1 | master_register | t | 2015-03-16 17:36:21.711796+09 |
- 2 | standby_clone | t | 2015-03-16 17:36:31.286934+09 | Cloned from host 'localhost', port 5500; backup method: pg_basebackup; --force: N
- 2 | standby_register | t | 2015-03-16 17:36:32.391567+09 |
- (3 rows)
-
-
-Additionally `repmgr` can execute an external program each time an event is
-logged. This program is defined with the configuration variable
-`event_notification_command`; the command string can contain the following
-placeholders, which will be replaced with the same content which is
-written to the `repl_events` table:
+As this can generate a large amount of monitoring data in the `repl_monitor`
+table , it's advisable to regularly purge historical data with
+`repmgr cluster cleanup`; use the `-k/--keep-history` to specify how
+many day's worth of data should be retained.
- %n - node id
- %e - event type
- %s - success (1 or 0)
- %t - timestamp
- %d - description
-Example:
+Using a witness server with repmgrd
+------------------------------------
- event_notification_command=/path/to/some-script %n %e %s "%t" "%d"
-
-By default the program defined with `event_notification_command` will be
-executed for every event; to restrict execution to certain events, list
-these in the parameter `event_notifications`
-
- event_notifications=master_register,standby_register
+In a situation caused e.g. by a network interruption between two
+data centres, it's important to avoid a "split-brain" situation where
+both sides of the network assume they are the active segment and the
+side without an active master unilaterally promotes one of its standbys.
-Following event types currently exist:
+To prevent this situation happening, it's essential to ensure that one
+network segment has a "voting majority", so other segments will know
+they're in the minority and not attempt to promote a new master. Where
+an odd number of servers exists, this is not an issue. However, if each
+network has an even number of nodes, it's necessary to provide some way
+of ensuring a majority, which is where the witness server becomes useful.
- master_register
- standby_register
- standby_unregister
- standby_clone
- standby_promote
- witness_create
- repmgrd_start
- repmgrd_monitor
- repmgrd_failover_promote
- repmgrd_failover_follow
+This is not a fully-fledged standby node and is not integrated into
+replication, but it effectively represents the "casting vote" when
+deciding which network segment has a majority. A witness server can
+be set up using `repmgr witness create` (see below for details) and
+can run on a dedicated server or an existing node. Note that it only
+makes sense to create a witness server in conjunction with running
+`repmgrd`; the witness server will require its own `repmgrd` instance.
-Cascading replication
----------------------
+repmgrd and cascading replication
+---------------------------------
Cascading replication - where a standby can connect to an upstream node and not
the master server itself - was introduced in PostgreSQL 9.2. `repmgr` and
@@ -396,79 +1064,114 @@ and continue working as normal (even if the upstream standby it's connected
to becomes the master node). If however the node's direct upstream fails,
the "cascaded standby" will attempt to reconnect to that node's parent.
-To configure standby servers for cascading replication, add the parameter
-`upstream_node` to `repmgr.conf` and set it to the id of the node it should
-connect to, e.g.:
- cluster=test
- node=2
- node_name=node2
- upstream_node=1
+Generating event notifications with repmgr/repmgrd
+--------------------------------------------------
-Replication slots
------------------
+Each time `repmgr` or `repmgrd` perform a significant event, a record
+of that event is written into the `repl_events` table together with
+a timestamp, an indication of failure or success, and further details
+if appropriate. This is useful for gaining an overview of events
+affecting the replication cluster. However note that this table has
+advisory character and should be used in combination with the `repmgr`
+and PostgreSQL logs to obtain details of any events.
-Replication slots were introduced with PostgreSQL 9.4 and enable standbys to
-notify the master of their WAL consumption, ensuring that the master will
-not remove any WAL files until they have been received by all standbys.
-This mitigates the requirement to manage WAL file retention using
-`wal_keep_segments` etc., with the caveat that if a standby fails, no WAL
-files will be removed until the standby's replication slot is deleted.
+Example output after a master was registered and a standby cloned
+and registered:
-To enable replication slots, set the boolean parameter `use_replication_slots`
-in `repmgr.conf`:
+ repmgr=# SELECT * from repmgr_test.repl_events ;
+ node_id | event | successful | event_timestamp | details
+ ---------+------------------+------------+-------------------------------+-------------------------------------------------------------------------------------
+ 1 | master_register | t | 2016-01-08 15:04:39.781733+09 |
+ 2 | standby_clone | t | 2016-01-08 15:04:49.530001+09 | Cloned from host 'repmgr_node1', port 5432; backup method: pg_basebackup; --force: N
+ 2 | standby_register | t | 2016-01-08 15:04:50.621292+09 |
+ (3 rows)
- use_replication_slots=1
+Additionally, event notifications can be passed to a user-defined program
+or script which can take further action, e.g. send email notifications.
+This is done by setting the `event_notification_command` parameter in
+`repmgr.conf`.
-`repmgr` will automatically generate an appropriate slot name, which is
-stored in the `repl_nodes` table.
+This parameter accepts the following format placeholders:
-Note that `repmgr` will fail with an error if this option is specified when
-working with PostgreSQL 9.3.
+ %n - node ID
+ %e - event type
+ %s - success (1 or 0)
+ %t - timestamp
+ %d - details
-Be aware that when initially cloning a standby, you will need to ensure
-that all required WAL files remain available while the cloning is taking
-place. If using the default `pg_basebackup` method, we recommend setting
-`pg_basebackup`'s `--xlog-method` parameter to `stream` like this:
+The values provided for "%t" and "%d" will probably contain spaces,
+so should be quoted in the provided command configuration, e.g.:
- pg_basebackup_options='--xlog-method=stream'
+ event_notification_command='/path/to/some/script %n %e %s "%t" "%d"'
-See the `pg_basebackup` documentation [*] for details. Otherwise you'll need
-to set `wal_keep_segments` to an appropriately high value.
+By default, all notifications will be passed; the notification types
+can be filtered to explicitly named ones:
-[*] http://www.postgresql.org/docs/current/static/app-pgbasebackup.html
+ event_notifications=master_register,standby_register,witness_create
-Further reading:
- * http://www.postgresql.org/docs/current/interactive/warm-standby.html#STREAMING-REPLICATION-SLOTS
- * http://blog.2ndquadrant.com/postgresql-9-4-slots/
+The following event types are available:
-Upgrading from repmgr 2
------------------------
+ * `master_register`
+ * `standby_register`
+ * `standby_unregister`
+ * `standby_clone`
+ * `standby_promote`
+ * `standby_follow`
+ * `standby_switchover`
+ * `witness_create`
+ * `witness_create`
+ * `repmgrd_start`
+ * `repmgrd_shutdown`
+ * `repmgrd_failover_promote`
+ * `repmgrd_failover_follow`
-`repmgr 3` is largely compatible with `repmgr 2`; the only step required
-to upgrade is to update the `repl_nodes` table to the definition needed
-by `repmgr 3`. See the file `sql/repmgr2_repmgr3.sql` for details on how
-to do this.
+Note that under some circumstances (e.g. no replication cluster master could
+be located), it will not be possible to write an entry into the `repl_events`
+table, in which case `event_notification_command` can serve as a fallback.
-`repmgrd` must *not* be running while `repl_nodes` is being updated.
-Existing `repmgr.conf` files can be retained as-is.
+Upgrading repmgr
+----------------
----------------------------------------
+`repmgr` is updated regularly with point releases (e.g. 3.0.2 to 3.0.3)
+containing bugfixes and other minor improvements. Any substantial new
+functionality will be included in a feature release (e.g. 3.0.x to 3.1.x).
+
+In general `repmgr` can be upgraded as-is without any further action required,
+however feature releases may require the `repmgr` database to be upgraded.
+An SQL script will be provided - please check the release notes for details.
Reference
---------
-### repmgr command reference
+### Default values
+
+For some command line and most configuration file parameters, `repmgr` falls
+back to default values if values for these are not explicitly provided.
-Not all of these commands need the ``repmgr.conf`` file, but they need to be able to
-connect to the remote and local databases.
+The file `repmgr.conf.sample` documents the default value of configuration
+parameters if one is set. Of particular note is the log level, which
+defaults to NOTICE; particularly when using repmgr from the command line
+it may be useful to set this to a higher level with `-L/--log-level`. e.g.
+to `INFO`.
+
+Execute `repmgr --help` to see the default values for various command
+line parameters, particularly database connection parameters.
+
+See the section `Configuration` above for information on how the
+configuration file is located if `-f/--config-file` is not supplied.
+
+### repmgr commands
+
+The `repmgr` command line tool accepts commands for specific servers in the
+replication in the format "`server_type` `action`", or for the entire
+replication cluster in the format "`cluster` `action`". Each command is
+described below.
+
+In general, each command needs to be provided with the path to `repmgr.conf`,
+which contains connection details for the local database.
-You can teach it which is the remote database by using the -h parameter or
-as a last parameter in standby clone and standby follow. If you need to specify
-a port different then the default 5432 you can specify a -p parameter.
-Standby is always considered as localhost and a second -p parameter will indicate
-its port if is different from the default one.
* `master register`
@@ -486,7 +1189,7 @@ its port if is different from the default one.
* `standby unregister`
Unregisters a standby with `repmgr`. This command does not affect the actual
- replication.
+ replication, just removes the standby's entry from the `repl_nodes` table.
* `standby clone [node to be cloned]`
@@ -520,6 +1223,27 @@ its port if is different from the default one.
This command will not function if the current master is still running.
+* `standby switchover`
+
+ Promotes a standby to master and demotes the existing master to a standby.
+ This command must be run on the standby to be promoted, and requires a
+ password-less SSH connection to the current master. Additionally the
+ location of the master's `repmgr.conf` file must be provided with
+ `-C/--remote-config-file`.
+
+ `repmgrd` should not be active if a switchover is attempted. This
+ restriction may be lifted in a later version.
+
+* `standby follow`
+
+ Attaches the standby to a new master. This command requires a valid
+ `repmgr.conf` file for the standby, either specified explicitly with
+ `-f/--config-file` or located in the current working directory; no
+ additional arguments are required.
+
+ This command will force a restart of the standby server. It can only be used
+ to attach a standby to a new master node.
+
* `witness create`
Creates a witness server as a separate PostgreSQL instance. This instance
@@ -529,41 +1253,44 @@ its port if is different from the default one.
time a failover occurs.
Note that it only makes sense to create a witness server if `repmgrd`
- is in use; see section "witness server" above.
+ is in use; see section "Using a witness server" above.
+
+ This command requires a `repmgr.conf` file containing a valid conninfo
+ string for the server to be created, as well as the other minimum required
+ parameters detailed in the section `repmgr configuration file` above.
By default the witness server will use port 5499 to facilitate easier setup
- on a server running an existing node.
+ on a server running an existing node. To use a different port, supply
+ this explicitly in the `repmgr.conf` conninfo string.
-* `standby follow`
+ This command also requires the location of the witness server's data
+ directory to be provided (`-D/--datadir`) as well as valid connection
+ parameters for the master server.
- Attaches the standby to a new master. This command requires a valid
- `repmgr.conf` file for the standby, either specified explicitly with
- `-f/--config-file` or located in the current working directory; no
- additional arguments are required.
-
- This command will force a restart of the standby server. It can only be used
- to attach a standby to a new master node.
+ By default this command will create a superuser and a repmgr user.
+ The `repmgr` user name will be extracted from the `conninfo` string
+ in `repmgr.conf`.
* `cluster show`
- Displays information about each node in the replication cluster. This
+ Displays information about each active node in the replication cluster. This
command polls each registered server and shows its role (master / standby /
- witness) or "FAILED" if the node doesn't respond. It polls each server
+ witness) or `FAILED` if the node doesn't respond. It polls each server
directly and can be run on any node in the cluster; this is also useful
when analyzing connectivity from a particular node.
- This command requires a valid `repmgr.conf` file for the node on which it is
- executed, either specified explicitly with `-f/--config-file` or located in
- the current working directory; no additional arguments are required.
+ This command requires a valid `repmgr.conf` file to be provided; no
+ additional arguments are required.
Example:
- repmgr -f /path/to/repmgr.conf cluster show
- Role | Connection String
- * master | host=node1 dbname=repmgr user=repmgr
- standby | host=node2 dbname=repmgr user=repmgr
- standby | host=node3 dbname=repmgr user=repmgr
+ $ repmgr -f /etc/repmgr.conf cluster show
+ Role | Name | Upstream | Connection String
+ ----------+-------|----------|--------------------------------------------
+ * master | node1 | | host=repmgr_node1 dbname=repmgr user=repmgr
+ standby | node2 | node1 | host=repmgr_node1 dbname=repmgr user=repmgr
+ standby | node3 | node2 | host=repmgr_node1 dbname=repmgr user=repmgr
* `cluster cleanup`
@@ -576,31 +1303,6 @@ its port if is different from the default one.
executed, either specified explicitly with `-f/--config-file` or located in
the current working directory; no additional arguments are required.
-### repmgr configuration file
-
-See `repmgr.conf.sample` for an example configuration file with available
-configuration settings annotated.
-
-### repmgr database schema
-
-`repmgr` creates a small schema for its own use in the database specified in
-each node's `conninfo` configuration parameter. This database can in principle
-be any database. The schema name is the global `cluster` name prefixed
-with `repmgr_`, so for the example setup above the schema name is
-`repmgr_test`.
-
-The schema contains two tables:
-
-* `repl_nodes`
- stores information about all registered servers in the cluster
-* `repl_monitor`
- stores monitoring information about each node (generated by `repmgrd` with
- `-m/--monitoring-history` option enabled)
-
-and one view:
-* `repl_status`
- summarizes the latest monitoring information for each node (generated by `repmgrd` with
- `-m/--monitoring-history` option enabled)
### Error codes
@@ -625,17 +1327,22 @@ exit:
Support and Assistance
----------------------
-2ndQuadrant provides 24x7 production support for repmgr, including
+2ndQuadrant provides 24x7 production support for `repmgr`, including
configuration assistance, installation verification and training for
running a robust replication cluster. For further details see:
* http://2ndquadrant.com/en/support/
-There is a mailing list/forum to discuss contributions or issues
-http://groups.google.com/group/repmgr
+There is a mailing list/forum to discuss contributions or issues:
+
+* http://groups.google.com/group/repmgr
The IRC channel #repmgr is registered with freenode.
+Please report bugs and other issues to:
+
+* https://github.com/2ndQuadrant/repmgr
+
Further information is available at http://www.repmgr.org/
We'd love to hear from you about how you use repmgr. Case studies and
@@ -661,6 +1368,5 @@ Thanks from the repmgr core team.
Further reading
---------------
-* http://blog.2ndquadrant.com/announcing-repmgr-2-0/
* http://blog.2ndquadrant.com/managing-useful-clusters-repmgr/
* http://blog.2ndquadrant.com/easier_postgresql_90_clusters/
diff --git a/TODO b/TODO
index 3e377a3..4ec153c 100644
--- a/TODO
+++ b/TODO
@@ -40,13 +40,6 @@ Planned feature improvements
* make old master node ID available for event notification commands
(See github issue #80).
-* Have pg_basebackup use replication slots, if and when support for
- this is added; see:
- http://www.postgresql.org/message-id/[email protected]
-
-* use "primary/standby" terminology in place of "master/slave" for consistency
- with main PostrgreSQL usage
-
* repmgr standby clone: possibility to use barman instead of performing a new base backup
* possibility to transform a failed master into a new standby with pg_rewind
@@ -60,6 +53,9 @@ Planned feature improvements
requested, activate the replication slot using pg_receivexlog to negate the
need to set `wal_keep_segments` just for the initial clone (9.4 and 9.5).
+* Take into account the fact that a standby can obtain WAL from an archive,
+ so even if direct streaming replication is interrupted, it may be up-to-date
+
Usability improvements
======================
diff --git a/check_dir.c b/check_dir.c
index adf446b..0f363e1 100644
--- a/check_dir.c
+++ b/check_dir.c
@@ -1,6 +1,6 @@
/*
* check_dir.c - Directories management functions
- * Copyright (C) 2ndQuadrant, 2010-2015
+ * Copyright (C) 2ndQuadrant, 2010-2016
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/check_dir.h b/check_dir.h
index b3d4413..5e38d05 100644
--- a/check_dir.h
+++ b/check_dir.h
@@ -1,6 +1,6 @@
/*
* check_dir.h
- * Copyright (c) 2ndQuadrant, 2010-2015
+ * Copyright (c) 2ndQuadrant, 2010-2016
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/config.c b/config.c
index 484c562..46ab9c2 100644
--- a/config.c
+++ b/config.c
@@ -1,6 +1,6 @@
/*
* config.c - Functions to parse the config file
- * Copyright (C) 2ndQuadrant, 2010-2015
+ * Copyright (C) 2ndQuadrant, 2010-2016
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -31,7 +31,7 @@ static void exit_with_errors(ErrorList *config_errors);
const static char *_progname = '\0';
static char config_file_path[MAXPGPATH];
static bool config_file_provided = false;
-static bool config_file_found = false;
+bool config_file_found = false;
void
@@ -149,7 +149,7 @@ load_config(const char *config_file, bool verbose, t_configuration_options *opti
if (verbose == true)
{
- log_notice(_("looking for configuration file in %s"), sysconf_etc_path);
+ log_notice(_("looking for configuration file in %s\n"), sysconf_etc_path);
}
snprintf(config_file_path, MAXPGPATH, "%s/%s", sysconf_etc_path, CONFIG_FILE_NAME);
@@ -198,11 +198,13 @@ parse_config(t_configuration_options *options)
/* For sanity-checking provided conninfo string */
PQconninfoOption *conninfo_options;
- char *conninfo_errmsg = NULL;
+ char *conninfo_errmsg = NULL;
/* Collate configuration file errors here for friendlier reporting */
static ErrorList config_errors = { NULL, NULL };
+ bool node_found = false;
+
/* Initialize configuration options with sensible defaults
* note: the default log level is set in log.c and does not need
* to be initialised here
@@ -244,7 +246,7 @@ parse_config(t_configuration_options *options)
*/
if (config_file_found == false)
{
- log_notice(_("no configuration file provided and no default file found - "
+ log_verbose(LOG_NOTICE, _("no configuration file provided and no default file found - "
"continuing with default values\n"));
return true;
}
@@ -290,9 +292,12 @@ parse_config(t_configuration_options *options)
if (strcmp(name, "cluster") == 0)
strncpy(options->cluster_name, value, MAXLEN);
else if (strcmp(name, "node") == 0)
- options->node = repmgr_atoi(value, "node", &config_errors);
+ {
+ options->node = repmgr_atoi(value, "node", &config_errors, false);
+ node_found = true;
+ }
else if (strcmp(name, "upstream_node") == 0)
- options->upstream_node = repmgr_atoi(value, "upstream_node", &config_errors);
+ options->upstream_node = repmgr_atoi(value, "upstream_node", &config_errors, false);
else if (strcmp(name, "conninfo") == 0)
strncpy(options->conninfo, value, MAXLEN);
else if (strcmp(name, "rsync_options") == 0)
@@ -323,7 +328,7 @@ parse_config(t_configuration_options *options)
}
}
else if (strcmp(name, "priority") == 0)
- options->priority = repmgr_atoi(value, "priority", &config_errors);
+ options->priority = repmgr_atoi(value, "priority", &config_errors, true);
else if (strcmp(name, "node_name") == 0)
strncpy(options->node_name, value, MAXLEN);
else if (strcmp(name, "promote_command") == 0)
@@ -331,16 +336,16 @@ parse_config(t_configuration_options *options)
else if (strcmp(name, "follow_command") == 0)
strncpy(options->follow_command, value, MAXLEN);
else if (strcmp(name, "master_response_timeout") == 0)
- options->master_response_timeout = repmgr_atoi(value, "master_response_timeout", &config_errors);
+ options->master_response_timeout = repmgr_atoi(value, "master_response_timeout", &config_errors, false);
/* 'primary_response_timeout' as synonym for 'master_response_timeout' -
* we'll switch terminology in a future release (3.1?)
*/
else if (strcmp(name, "primary_response_timeout") == 0)
- options->master_response_timeout = repmgr_atoi(value, "primary_response_timeout", &config_errors);
+ options->master_response_timeout = repmgr_atoi(value, "primary_response_timeout", &config_errors, false);
else if (strcmp(name, "reconnect_attempts") == 0)
- options->reconnect_attempts = repmgr_atoi(value, "reconnect_attempts", &config_errors);
+ options->reconnect_attempts = repmgr_atoi(value, "reconnect_attempts", &config_errors, false);
else if (strcmp(name, "reconnect_interval") == 0)
- options->reconnect_interval = repmgr_atoi(value, "reconnect_interval", &config_errors);
+ options->reconnect_interval = repmgr_atoi(value, "reconnect_interval", &config_errors, false);
else if (strcmp(name, "pg_bindir") == 0)
strncpy(options->pg_bindir, value, MAXLEN);
else if (strcmp(name, "pg_ctl_options") == 0)
@@ -350,12 +355,12 @@ parse_config(t_configuration_options *options)
else if (strcmp(name, "logfile") == 0)
strncpy(options->logfile, value, MAXLEN);
else if (strcmp(name, "monitor_interval_secs") == 0)
- options->monitor_interval_secs = repmgr_atoi(value, "monitor_interval_secs", &config_errors);
+ options->monitor_interval_secs = repmgr_atoi(value, "monitor_interval_secs", &config_errors, false);
else if (strcmp(name, "retry_promote_interval_secs") == 0)
- options->retry_promote_interval_secs = repmgr_atoi(value, "retry_promote_interval_secs", &config_errors);
+ options->retry_promote_interval_secs = repmgr_atoi(value, "retry_promote_interval_secs", &config_errors, false);
else if (strcmp(name, "use_replication_slots") == 0)
/* XXX we should have a dedicated boolean argument format */
- options->use_replication_slots = repmgr_atoi(value, "use_replication_slots", &config_errors);
+ options->use_replication_slots = repmgr_atoi(value, "use_replication_slots", &config_errors, false);
else if (strcmp(name, "event_notification_command") == 0)
strncpy(options->event_notification_command, value, MAXLEN);
else if (strcmp(name, "event_notifications") == 0)
@@ -387,29 +392,17 @@ parse_config(t_configuration_options *options)
fclose(fp);
- /* Check config settings */
-
- /* The following checks are for the presence of the parameter */
- if (*options->cluster_name == '\0')
- {
- error_list_append(&config_errors, _("\"cluster\": parameter was not found\n"));
- }
- if (options->node == -1)
+ if (node_found == false)
{
- error_list_append(&config_errors, _("\"node\": parameter was not found\n"));
+ error_list_append(&config_errors, _("\"node\": parameter was not found"));
}
-
- if (*options->node_name == '\0')
+ else if (options->node == 0)
{
- error_list_append(&config_errors, _("\"node_name\": parameter was not found\n"));
+ error_list_append(&config_errors, _("\"node\": must be greater than zero"));
}
- if (*options->conninfo == '\0')
- {
- error_list_append(&config_errors, _("\"conninfo\": parameter was not found\n"));
- }
- else
+ if (strlen(options->conninfo))
{
/* Sanity check the provided conninfo string
@@ -791,7 +784,7 @@ error_list_append(ErrorList *error_list, char *error_message)
* otherwise exit
*/
int
-repmgr_atoi(const char *value, const char *config_item, ErrorList *error_list)
+repmgr_atoi(const char *value, const char *config_item, ErrorList *error_list, bool allow_negative)
{
char *endptr;
long longval = 0;
@@ -822,8 +815,8 @@ repmgr_atoi(const char *value, const char *config_item, ErrorList *error_list)
}
}
- /* Currently there are no values which could be negative */
- if (longval < 0)
+ /* Disallow negative values for most parameters */
+ if (allow_negative == false && longval < 0)
{
snprintf(error_message_buf,
MAXLEN,
diff --git a/config.h b/config.h
index 4307802..3d65637 100644
--- a/config.h
+++ b/config.h
@@ -1,6 +1,6 @@
/*
* config.h
- * Copyright (c) 2ndQuadrant, 2010-2015
+ * Copyright (c) 2ndQuadrant, 2010-2016
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -106,6 +106,7 @@ char *trim(char *s);
void error_list_append(ErrorList *error_list, char *error_message);
int repmgr_atoi(const char *s,
const char *config_item,
- ErrorList *error_list);
+ ErrorList *error_list,
+ bool allow_negative);
#endif
diff --git a/dbutils.c b/dbutils.c
index b9f8b99..0b12517 100644
--- a/dbutils.c
+++ b/dbutils.c
@@ -1,6 +1,6 @@
/*
* dbutils.c - Database connection/management functions
- * Copyright (C) 2ndQuadrant, 2010-2015
+ * Copyright (C) 2ndQuadrant, 2010-2016
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -26,11 +26,14 @@
#include "strutil.h"
#include "log.h"
+#include "catalog/pg_control.h"
+
char repmgr_schema[MAXLEN] = "";
char repmgr_schema_quoted[MAXLEN] = "";
+
PGconn *
-establish_db_connection(const char *conninfo, const bool exit_on_error)
+_establish_db_connection(const char *conninfo, const bool exit_on_error, const bool log_notice)
{
/* Make a connection to the database */
PGconn *conn = NULL;
@@ -46,8 +49,16 @@ establish_db_connection(const char *conninfo, const bool exit_on_error)
/* Check to see that the backend connection was successfully made */
if ((PQstatus(conn) != CONNECTION_OK))
{
- log_err(_("connection to database failed: %s\n"),
- PQerrorMessage(conn));
+ if (log_notice)
+ {
+ log_notice(_("connection to database failed: %s\n"),
+ PQerrorMessage(conn));
+ }
+ else
+ {
+ log_err(_("connection to database failed: %s\n"),
+ PQerrorMessage(conn));
+ }
if (exit_on_error)
{
@@ -60,6 +71,19 @@ establish_db_connection(const char *conninfo, const bool exit_on_error)
}
PGconn *
+establish_db_connection(const char *conninfo, const bool exit_on_error)
+{
+ return _establish_db_connection(conninfo, exit_on_error, false);
+}
+
+PGconn *
+test_db_connection(const char *conninfo, const bool exit_on_error)
+{
+ return _establish_db_connection(conninfo, exit_on_error, true);
+}
+
+
+PGconn *
establish_db_connection_by_params(const char *keywords[], const char *values[],
const bool exit_on_error)
{
@@ -308,7 +332,7 @@ get_master_node_id(PGconn *conn, char *cluster)
}
else if (PQntuples(res) == 0)
{
- log_warning(_("get_master_node_id(): no active primary found\n"));
+ log_verbose(LOG_WARNING, _("get_master_node_id(): no active primary found\n"));
retval = NODE_NOT_FOUND;
}
else
@@ -445,7 +469,6 @@ get_cluster_size(PGconn *conn, char *size)
}
-
bool
get_pg_setting(PGconn *conn, const char *setting, char *output)
{
@@ -488,7 +511,7 @@ get_pg_setting(PGconn *conn, const char *setting, char *output)
if (success == true)
{
- log_debug(_("get_pg_setting(): returned value is \"%s\"\n"), output);
+ log_verbose(LOG_DEBUG, _("get_pg_setting(): returned value is \"%s\"\n"), output);
}
PQclear(res);
@@ -498,6 +521,48 @@ get_pg_setting(PGconn *conn, const char *setting, char *output)
/*
+ * get_conninfo_value()
+ *
+ * Extract the value represented by 'keyword' in 'conninfo' and copy
+ * it to the 'output' buffer.
+ *
+ * Returns true on success, or false on failure (conninfo string could
+ * not be parsed, or provided keyword not found).
+ */
+
+bool
+get_conninfo_value(const char *conninfo, const char *keyword, char *output)
+{
+ PQconninfoOption *conninfo_options;
+ PQconninfoOption *conninfo_option;
+
+ conninfo_options = PQconninfoParse(conninfo, NULL);
+
+ if (conninfo_options == false)
+ {
+ log_err(_("Unable to parse provided conninfo string \"%s\""), conninfo);
+ return false;
+ }
+
+ for (conninfo_option = conninfo_options; conninfo_option->keyword != NULL; conninfo_option++)
+ {
+ if (strcmp(conninfo_option->keyword, keyword) == 0)
+ {
+ if (conninfo_option->val != NULL && conninfo_option->val[0] != '\0')
+ {
+ strncpy(output, conninfo_option->val, MAXLEN);
+ break;
+ }
+ }
+ }
+
+ PQconninfoFree(conninfo_options);
+
+ return true;
+}
+
+
+/*
* get_upstream_connection()
*
* Returns connection to node's upstream node
@@ -598,6 +663,13 @@ get_master_connection(PGconn *standby_conn, char *cluster,
int i,
node_id;
+ /*
+ * If the caller wanted to get a copy of the connection info string, sub
+ * out the local stack pointer for the pointer passed by the caller.
+ */
+ if (master_conninfo_out != NULL)
+ remote_conninfo = master_conninfo_out;
+
if (master_id != NULL)
{
*master_id = NODE_NOT_FOUND;
@@ -819,8 +891,10 @@ get_repmgr_schema_quoted(PGconn *conn)
bool
create_replication_slot(PGconn *conn, char *slot_name)
{
- char sqlquery[QUERY_STR_LEN];
- PGresult *res;
+ char sqlquery[QUERY_STR_LEN];
+ int query_res;
+ PGresult *res;
+ t_replication_slot slot_info;
/*
* Check whether slot exists already; if it exists and is active, that
@@ -828,40 +902,25 @@ create_replication_slot(PGconn *conn, char *slot_name)
* if not we can reuse it as-is
*/
- sqlquery_snprintf(sqlquery,
- "SELECT active, slot_type "
- " FROM pg_replication_slots "
- " WHERE slot_name = '%s' ",
- slot_name);
-
- log_verbose(LOG_DEBUG, "create_replication_slot():\n%s\n", sqlquery);
+ query_res = get_slot_record(conn, slot_name, &slot_info);
- res = PQexec(conn, sqlquery);
- if (!res || PQresultStatus(res) != PGRES_TUPLES_OK)
+ if (query_res)
{
- log_err(_("unable to query pg_replication_slots: %s\n"),
- PQerrorMessage(conn));
- PQclear(res);
- return false;
- }
-
- if (PQntuples(res))
- {
- if (strcmp(PQgetvalue(res, 0, 1), "physical") != 0)
+ if (strcmp(slot_info.slot_type, "physical") != 0)
{
log_err(_("Slot '%s' exists and is not a physical slot\n"),
slot_name);
- PQclear(res);
+ return false;
}
- if (strcmp(PQgetvalue(res, 0, 0), "f") == 0)
+
+ if (slot_info.active == false)
{
- PQclear(res);
log_debug("Replication slot '%s' exists but is inactive; reusing\n",
slot_name);
return true;
}
- PQclear(res);
+
log_err(_("Slot '%s' already exists as an active slot\n"),
slot_name);
return false;
@@ -888,6 +947,46 @@ create_replication_slot(PGconn *conn, char *slot_name)
return true;
}
+
+int
+get_slot_record(PGconn *conn, char *slot_name, t_replication_slot *record)
+{
+ char sqlquery[QUERY_STR_LEN];
+ PGresult *res;
+
+ sqlquery_snprintf(sqlquery,
+ "SELECT slot_name, slot_type, active "
+ " FROM pg_replication_slots "
+ " WHERE slot_name = '%s' ",
+ slot_name);
+
+ log_verbose(LOG_DEBUG, "get_slot_record():\n%s\n", sqlquery);
+
+ res = PQexec(conn, sqlquery);
+ if (!res || PQresultStatus(res) != PGRES_TUPLES_OK)
+ {
+ log_err(_("unable to query pg_replication_slots: %s\n"),
+ PQerrorMessage(conn));
+ PQclear(res);
+ return -1;
+ }
+
+ if (!PQntuples(res))
+ {
+ return 0;
+ }
+
+ strncpy(record->slot_name, PQgetvalue(res, 0, 0), MAXLEN);
+ strncpy(record->slot_type, PQgetvalue(res, 0, 1), MAXLEN);
+ record->active = (strcmp(PQgetvalue(res, 0, 2), "t") == 0)
+ ? true
+ : false;
+
+ PQclear(res);
+
+ return 1;
+}
+
bool
drop_replication_slot(PGconn *conn, char *slot_name)
{
@@ -1427,6 +1526,7 @@ create_event_record(PGconn *conn, t_configuration_options *options, int node_id,
return success;
}
+
/*
* Update node record following change of status
* (e.g. inactive primary converted to standby)
@@ -1435,7 +1535,7 @@ bool
update_node_record_status(PGconn *conn, char *cluster_name, int this_node_id, char *type, int upstream_node_id, bool active)
{
PGresult *res;
- char sqlquery[QUERY_STR_LEN];
+ char sqlquery[QUERY_STR_LEN];
sqlquery_snprintf(sqlquery,
" UPDATE %s.repl_nodes "
@@ -1508,21 +1608,135 @@ update_node_record_set_upstream(PGconn *conn, char *cluster_name, int this_node_
}
-PGresult *
-get_node_record(PGconn *conn, char *cluster, int node_id)
+int
+get_node_record(PGconn *conn, char *cluster, int node_id, t_node_info *node_info)
{
char sqlquery[QUERY_STR_LEN];
-
- sprintf(sqlquery,
- "SELECT id, upstream_node_id, conninfo, type, slot_name, active "
- " FROM %s.repl_nodes "
- " WHERE cluster = '%s' "
- " AND id = %i",
- get_repmgr_schema_quoted(conn),
- cluster,
- node_id);
+ PGresult *res;
+ int ntuples;
+
+ sqlquery_snprintf(
+ sqlquery,
+ "SELECT id, type, upstream_node_id, name, conninfo, slot_name, priority, active"
+ " FROM %s.repl_nodes "
+ " WHERE cluster = '%s' "
+ " AND id = %i",
+ get_repmgr_schema_quoted(conn),
+ cluster,
+ node_id);
log_verbose(LOG_DEBUG, "get_node_record():\n%s\n", sqlquery);
- return PQexec(conn, sqlquery);
+ res = PQexec(conn, sqlquery);
+ if (PQresultStatus(res) != PGRES_TUPLES_OK)
+ {
+ return -1;
+ }
+
+ ntuples = PQntuples(res);
+
+ if (ntuples == 0)
+ {
+ log_verbose(LOG_DEBUG, "get_node_record(): no record found for node %i\n", node_id);
+ return 0;
+ }
+
+ node_info->node_id = atoi(PQgetvalue(res, 0, 0));
+ node_info->type = parse_node_type(PQgetvalue(res, 0, 1));
+ node_info->upstream_node_id = atoi(PQgetvalue(res, 0, 2));
+ strncpy(node_info->name, PQgetvalue(res, 0, 3), MAXLEN);
+ strncpy(node_info->conninfo_str, PQgetvalue(res, 0, 4), MAXLEN);
+ strncpy(node_info->slot_name, PQgetvalue(res, 0, 5), MAXLEN);
+ node_info->priority = atoi(PQgetvalue(res, 0, 6));
+ node_info->active = (strcmp(PQgetvalue(res, 0, 7), "t") == 0)
+ ? true
+ : false;
+
+ PQclear(res);
+
+ return ntuples;
+}
+
+
+int
+get_node_replication_state(PGconn *conn, char *node_name, char *output)
+{
+ char sqlquery[QUERY_STR_LEN];
+ PGresult * res;
+
+ sqlquery_snprintf(
+ sqlquery,
+ " SELECT state "
+ " FROM pg_catalog.pg_stat_replication"
+ " WHERE application_name = '%s'",
+ node_name
+ );
+
+ res = PQexec(conn, sqlquery);
+
+ if (PQresultStatus(res) != PGRES_TUPLES_OK)
+ {
+ PQclear(res);
+ return -1;
+ }
+
+ if (PQntuples(res) == 0)
+ {
+ PQclear(res);
+ return 0;
+ }
+
+ strncpy(output, PQgetvalue(res, 0, 0), MAXLEN);
+ PQclear(res);
+
+ return true;
+
+}
+
+t_server_type
+parse_node_type(const char *type)
+{
+ if (strcmp(type, "master") == 0)
+ {
+ return MASTER;
+ }
+ else if (strcmp(type, "standby") == 0)
+ {
+ return STANDBY;
+ }
+ else if (strcmp(type, "witness") == 0)
+ {
+ return WITNESS;
+ }
+
+ return UNKNOWN;
+}
+
+
+int
+get_data_checksum_version(const char *data_directory)
+{
+ ControlFileData control_file;
+ int fd;
+ char control_file_path[MAXPGPATH];
+
+ snprintf(control_file_path, MAXPGPATH, "%s/global/pg_control", data_directory);
+ if ((fd = open(control_file_path, O_RDONLY | PG_BINARY, 0)) == -1)
+ {
+ log_err(_("Unable to open control file \"%s\" for reading: %s\n"),
+ control_file_path, strerror(errno));
+ return -1;
+ }
+
+ if (read(fd, &control_file, sizeof(ControlFileData)) != sizeof(ControlFileData))
+ {
+ log_err(_("could not read file \"%s\": %s\n"),
+ control_file_path, strerror(errno));
+ close(fd);
+ return -1;
+ }
+
+ close(fd);
+
+ return (int)control_file.data_checksum_version;
}
diff --git a/dbutils.h b/dbutils.h
index 5232ed8..df9f106 100644
--- a/dbutils.h
+++ b/dbutils.h
@@ -1,6 +1,6 @@
/*
* dbutils.h
- * Copyright (c) 2ndQuadrant, 2010-2015
+ * Copyright (c) 2ndQuadrant, 2010-2016
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -52,6 +52,18 @@ typedef struct s_node_info
} t_node_info;
+/*
+ * Struct to store replication slot information
+ */
+
+typedef struct s_replication_slot
+{
+ char slot_name[MAXLEN];
+ char slot_type[MAXLEN];
+ bool active;
+} t_replication_slot;
+
+
#define T_NODE_INFO_INITIALIZER { \
NODE_NOT_FOUND, \
NO_UPSTREAM_NODE, \
@@ -66,8 +78,13 @@ typedef struct s_node_info
InvalidXLogRecPtr \
}
+PGconn *_establish_db_connection(const char *conninfo,
+ const bool exit_on_error,
+ const bool log_notice);
PGconn *establish_db_connection(const char *conninfo,
- const bool exit_on_error);
+ const bool exit_on_error);
+PGconn *test_db_connection(const char *conninfo,
+ const bool exit_on_error);
PGconn *establish_db_connection_by_params(const char *keywords[],
const char *values[],
const bool exit_on_error);
@@ -86,7 +103,7 @@ int guc_set(PGconn *conn, const char *parameter, const char *op,
const char *value);
int guc_set_typed(PGconn *conn, const char *parameter, const char *op,
const char *value, const char *datatype);
-
+bool get_conninfo_value(const char *conninfo, const char *keyword, char *output);
PGconn *get_upstream_connection(PGconn *standby_conn, char *cluster,
int node_id,
int *upstream_node_id_ptr,
@@ -99,17 +116,20 @@ bool cancel_query(PGconn *conn, int timeout);
char *get_repmgr_schema(void);
char *get_repmgr_schema_quoted(PGconn *conn);
bool create_replication_slot(PGconn *conn, char *slot_name);
+int get_slot_record(PGconn *conn, char *slot_name, t_replication_slot *record);
bool drop_replication_slot(PGconn *conn, char *slot_name);
-
bool start_backup(PGconn *conn, char *first_wal_segment, bool fast_checkpoint);
bool stop_backup(PGconn *conn, char *last_wal_segment);
bool set_config_bool(PGconn *conn, const char *config_param, bool state);
bool copy_configuration(PGconn *masterconn, PGconn *witnessconn, char *cluster_name);
bool create_node_record(PGconn *conn, char *action, int node, char *type, int upstream_node, char *cluster_name, char *node_name, char *conninfo, int priority, char *slot_name);
bool delete_node_record(PGconn *conn, int node, char *action);
-bool create_event_record(PGconn *conn, t_configuration_options *options, int node_id, char *event, bool successful, char *details);
+int get_node_record(PGconn *conn, char *cluster, int node_id, t_node_info *node_info);
bool update_node_record_status(PGconn *conn, char *cluster_name, int this_node_id, char *type, int upstream_node_id, bool active);
bool update_node_record_set_upstream(PGconn *conn, char *cluster_name, int this_node_id, int new_upstream_node_id);
-PGresult * get_node_record(PGconn *conn, char *cluster, int node_id);
+bool create_event_record(PGconn *conn, t_configuration_options *options, int node_id, char *event, bool successful, char *details);
+int get_node_replication_state(PGconn *conn, char *node_name, char *output);
+t_server_type parse_node_type(const char *type);
+int get_data_checksum_version(const char *data_directory);
#endif
diff --git a/debian/changelog b/debian/changelog
index 6e70832..0dbdefa 100644
--- a/debian/changelog
+++ b/debian/changelog
@@ -1,3 +1,16 @@
+repmgr (3.1.1-1~bpo70+1) wheezy-backports-sloppy; urgency=medium
+
+ * Rebuild for wheezy-backports-sloppy.
+
+ -- Christoph Berg <[email protected]> Mon, 04 Apr 2016 11:55:30 +0200
+
+repmgr (3.1.1-1) unstable; urgency=medium
+
+ * Imported Upstream version 3.1.1
+ * Use HTTPS protocol in Vcs-Browser URI inside debian/control file
+
+ -- Marco Nenciarini <[email protected]> Tue, 23 Feb 2016 19:11:25 +0100
+
repmgr (3.0.3-2~bpo70+1) wheezy-backports-sloppy; urgency=low
* Rebuild for wheezy-backports-sloppy.
diff --git a/debian/control b/debian/control
index 1aab00d..3546581 100644
--- a/debian/control
+++ b/debian/control
@@ -11,7 +11,7 @@ Build-Depends:
libxslt1-dev
Standards-Version: 3.9.6
Homepage: http://www.repmgr.org/
-Vcs-Browser: http://anonscm.debian.org/gitweb/?p=pkg-postgresql/repmgr.git
+Vcs-Browser: https://anonscm.debian.org/gitweb/?p=pkg-postgresql/repmgr.git
Vcs-Git: https://alioth.debian.org/anonscm/git/pkg-postgresql/repmgr.git
XS-Testsuite: autopkgtest
diff --git a/debian/control.in b/debian/control.in
index 66202a1..17d81ec 100644
--- a/debian/control.in
+++ b/debian/control.in
@@ -11,7 +11,7 @@ Build-Depends:
libxslt1-dev
Standards-Version: 3.9.6
Homepage: http://www.repmgr.org/
-Vcs-Browser: http://anonscm.debian.org/gitweb/?p=pkg-postgresql/repmgr.git
+Vcs-Browser: https://anonscm.debian.org/gitweb/?p=pkg-postgresql/repmgr.git
Vcs-Git: https://alioth.debian.org/anonscm/git/pkg-postgresql/repmgr.git
XS-Testsuite: autopkgtest
diff --git a/errcode.h b/errcode.h
index b6ebd73..d1e566f 100644
--- a/errcode.h
+++ b/errcode.h
@@ -1,6 +1,6 @@
/*
* errcode.h
- * Copyright (C) 2ndQuadrant, 2010-2015
+ * Copyright (C) 2ndQuadrant, 2010-2016
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/log.c b/log.c
index 6a43c79..7acbd01 100644
--- a/log.c
+++ b/log.c
@@ -1,6 +1,6 @@
/*
* log.c - Logging methods
- * Copyright (C) 2ndQuadrant, 2010-2015
+ * Copyright (C) 2ndQuadrant, 2010-2016
*
* This module is a set of methods for logging (currently only syslog)
*
diff --git a/log.h b/log.h
index b74f1db..8d1cf10 100644
--- a/log.h
+++ b/log.h
@@ -1,6 +1,6 @@
/*
* log.h
- * Copyright (c) 2ndQuadrant, 2010-2015
+ * Copyright (c) 2ndQuadrant, 2010-2016
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/repmgr.c b/repmgr.c
index f15491f..72a0c82 100644
--- a/repmgr.c
+++ b/repmgr.c
@@ -1,6 +1,6 @@
/*
* repmgr.c - Command interpreter for the repmgr package
- * Copyright (C) 2ndQuadrant, 2010-2015
+ * Copyright (C) 2ndQuadrant, 2010-2016
*
* This module is a command-line utility to easily setup a cluster of
* hot standby servers for an HA environment
@@ -14,12 +14,17 @@
* STANDBY CLONE
* STANDBY FOLLOW
* STANDBY PROMOTE
+ * STANDBY SWITCHOVER
*
* WITNESS CREATE
*
* CLUSTER SHOW
* CLUSTER CLEANUP
*
+ * For internal use:
+ * STANDBY ARCHIVE-CONFIG
+ * STANDBY RESTORE-CONFIG
+ *
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
@@ -37,8 +42,11 @@
#include "repmgr.h"
+#include <sys/types.h>
+#include <dirent.h>
#include <stdio.h>
#include <stdlib.h>
+#include <sys/stat.h>
#include <sys/wait.h>
#include <time.h>
#include <unistd.h>
@@ -68,10 +76,12 @@
#define STANDBY_CLONE 4
#define STANDBY_PROMOTE 5
#define STANDBY_FOLLOW 6
-#define WITNESS_CREATE 7
-#define CLUSTER_SHOW 8
-#define CLUSTER_CLEANUP 9
-
+#define STANDBY_SWITCHOVER 7
+#define STANDBY_ARCHIVE_CONFIG 8
+#define STANDBY_RESTORE_CONFIG 9
+#define WITNESS_CREATE 10
+#define CLUSTER_SHOW 11
+#define CLUSTER_CLEANUP 12
static bool create_recovery_file(const char *data_dir);
@@ -96,14 +106,21 @@ static void do_standby_unregister(void);
static void do_standby_clone(void);
static void do_standby_promote(void);
static void do_standby_follow(void);
+static void do_standby_switchover(void);
+static void do_standby_archive_config(void);
+static void do_standby_restore_config(void);
static void do_witness_create(void);
static void do_cluster_show(void);
static void do_cluster_cleanup(void);
static void do_check_upstream_config(void);
+static void do_help(void);
static void exit_with_errors(void);
static void print_error_list(ErrorList *error_list, int log_level);
-static void help(void);
+
+static bool remote_command(const char *host, const char *user, const char *command, PQExpBufferData *outputbuf);
+static void format_db_cli_params(const char *conninfo, char *output);
+static bool copy_file(const char *old_filename, const char *new_filename);
/* Global variables */
static const char *keywords[6];
@@ -113,8 +130,10 @@ static bool config_file_required = true;
/* Initialization of runtime options */
t_runtime_options runtime_options = T_RUNTIME_OPTIONS_INITIALIZER;
t_configuration_options options = T_CONFIGURATION_OPTIONS_INITIALIZER;
-static bool wal_keep_segments_used = false;
+bool wal_keep_segments_used = false;
+bool connection_param_provided = false;
+bool pg_rewind_supplied = false;
static char *server_mode = NULL;
static char *server_cmd = NULL;
@@ -140,6 +159,7 @@ main(int argc, char **argv)
{"username", required_argument, NULL, 'U'},
{"superuser", required_argument, NULL, 'S'},
{"data-dir", required_argument, NULL, 'D'},
+ /* -l/--local-port is deprecated */
{"local-port", required_argument, NULL, 'l'},
{"config-file", required_argument, NULL, 'f'},
{"remote-user", required_argument, NULL, 'R'},
@@ -153,10 +173,16 @@ main(int argc, char **argv)
{"fast-checkpoint", no_argument, NULL, 'c'},
{"log-level", required_argument, NULL, 'L'},
{"terse", required_argument, NULL, 't'},
+ {"mode", required_argument, NULL, 'm'},
+ {"remote-config-file", required_argument, NULL, 'C'},
+ /* deprecated from 3.2; replaced with -P/----pwprompt */
{"initdb-no-pwprompt", no_argument, NULL, 1},
{"check-upstream-config", no_argument, NULL, 2},
{"recovery-min-apply-delay", required_argument, NULL, 3},
{"ignore-external-config-files", no_argument, NULL, 4},
+ {"config-archive-dir", required_argument, NULL, 5},
+ {"pg_rewind", optional_argument, NULL, 6},
+ {"pwprompt", optional_argument, NULL, 7},
{"help", no_argument, NULL, '?'},
{"version", no_argument, NULL, 'V'},
{NULL, 0, NULL, 0}
@@ -168,14 +194,65 @@ main(int argc, char **argv)
bool check_upstream_config = false;
bool config_file_parsed = false;
char *ptr = NULL;
+ const char *env;
set_progname(argv[0]);
+ /* Disallow running as root to prevent directory ownership problems */
+ if (geteuid() == 0)
+ {
+ fprintf(stderr,
+ _("%s: cannot be run as root\n"
+ "Please log in (using, e.g., \"su\") as the "
+ "(unprivileged) user that owns\n"
+ "the data directory.\n"
+ ),
+ progname());
+ exit(1);
+ }
+
+ /* Initialise some defaults */
+
+ /* set default user */
+ env = getenv("PGUSER");
+ if (!env)
+ {
+ struct passwd *pw = NULL;
+ pw = getpwuid(geteuid());
+ if (pw)
+ {
+ env = pw->pw_name;
+ }
+ else
+ {
+ fprintf(stderr, _("could not get current user name: %s\n"), strerror(errno));
+ exit(ERR_BAD_CONFIG);
+ }
+ }
+ strncpy(runtime_options.username, env, MAXLEN);
+
+ /* set default database */
+ env = getenv("PGDATABASE");
+ if (!env)
+ {
+ env = runtime_options.username;
+ }
+ strncpy(runtime_options.dbname, env, MAXLEN);
+
+ /* set default port */
+
+ env = getenv("PGPORT");
+ if (!env)
+ {
+ env = DEF_PGPORT_STR;
+ }
+
+ strncpy(runtime_options.masterport, env, MAXLEN);
/* Prevent getopt_long() from printing an error message */
opterr = 0;
- while ((c = getopt_long(argc, argv, "?Vd:h:p:U:S:D:l:f:R:w:k:FWIvb:rcL:t", long_options,
+ while ((c = getopt_long(argc, argv, "?Vd:h:p:U:S:D:l:f:R:w:k:FWIvb:rcL:tm:C:", long_options,
&optindex)) != -1)
{
/*
@@ -187,25 +264,29 @@ main(int argc, char **argv)
switch (c)
{
case '?':
- help();
+ do_help();
exit(SUCCESS);
case 'V':
printf("%s %s (PostgreSQL %s)\n", progname(), REPMGR_VERSION, PG_VERSION);
exit(SUCCESS);
case 'd':
strncpy(runtime_options.dbname, optarg, MAXLEN);
+ connection_param_provided = true;
break;
case 'h':
strncpy(runtime_options.host, optarg, MAXLEN);
+ connection_param_provided = true;
break;
case 'p':
- repmgr_atoi(optarg, "-p/--port", &cli_errors);
+ repmgr_atoi(optarg, "-p/--port", &cli_errors, false);
strncpy(runtime_options.masterport,
optarg,
MAXLEN);
+ connection_param_provided = true;
break;
case 'U':
strncpy(runtime_options.username, optarg, MAXLEN);
+ connection_param_provided = true;
break;
case 'S':
strncpy(runtime_options.superuser, optarg, MAXLEN);
@@ -215,7 +296,7 @@ main(int argc, char **argv)
break;
case 'l':
/* -l/--local-port is deprecated */
- repmgr_atoi(optarg, "-l/--local-port", &cli_errors);
+ repmgr_atoi(optarg, "-l/--local-port", &cli_errors, false);
strncpy(runtime_options.localport,
optarg,
MAXLEN);
@@ -227,14 +308,14 @@ main(int argc, char **argv)
strncpy(runtime_options.remote_user, optarg, MAXLEN);
break;
case 'w':
- repmgr_atoi(optarg, "-w/--wal-keep-segments", &cli_errors);
+ repmgr_atoi(optarg, "-w/--wal-keep-segments", &cli_errors, false);
strncpy(runtime_options.wal_keep_segments,
optarg,
MAXLEN);
wal_keep_segments_used = true;
break;
case 'k':
- runtime_options.keep_history = repmgr_atoi(optarg, "-k/--keep-history", &cli_errors);
+ runtime_options.keep_history = repmgr_atoi(optarg, "-k/--keep-history", &cli_errors, false);
break;
case 'F':
runtime_options.force = true;
@@ -270,12 +351,35 @@ main(int argc, char **argv)
initPQExpBuffer(&invalid_log_level);
appendPQExpBuffer(&invalid_log_level, _("Invalid log level \"%s\" provided"), optarg);
error_list_append(&cli_errors, invalid_log_level.data);
+ termPQExpBuffer(&invalid_log_level);
}
break;
}
case 't':
runtime_options.terse = true;
break;
+ case 'm':
+ {
+ if (strcmp(optarg, "smart") == 0 ||
+ strcmp(optarg, "fast") == 0 ||
+ strcmp(optarg, "immediate") == 0
+ )
+ {
+ strncpy(runtime_options.pg_ctl_mode, optarg, MAXLEN);
+ }
+ else
+ {
+ PQExpBufferData invalid_mode;
+ initPQExpBuffer(&invalid_mode);
+ appendPQExpBuffer(&invalid_mode, _("Invalid pg_ctl shutdown mode \"%s\" provided"), optarg);
+ error_list_append(&cli_errors, invalid_mode.data);
+ termPQExpBuffer(&invalid_mode);
+ }
+ }
+ break;
+ case 'C':
+ strncpy(runtime_options.remote_config_file, optarg, MAXLEN);
+ break;
case 1:
runtime_options.initdb_no_pwprompt = true;
break;
@@ -306,6 +410,20 @@ main(int argc, char **argv)
case 4:
runtime_options.ignore_external_config_files = true;
break;
+ case 5:
+ strncpy(runtime_options.config_archive_dir, optarg, MAXLEN);
+ break;
+ case 6:
+ if (optarg != NULL)
+ {
+ strncpy(runtime_options.pg_rewind, optarg, MAXFILENAME);
+ }
+ pg_rewind_supplied = true;
+ break;
+ case 7:
+ runtime_options.witness_pwprompt = true;
+ break;
+
default:
{
PQExpBufferData unknown_option;
@@ -333,8 +451,8 @@ main(int argc, char **argv)
/*
* Now we need to obtain the action, this comes in one of these forms:
- * MASTER REGISTER |
- * STANDBY {REGISTER | UNREGISTER | CLONE [node] | PROMOTE | FOLLOW [node]} |
+ * { MASTER | PRIMARY } REGISTER |
+ * STANDBY {REGISTER | UNREGISTER | CLONE [node] | PROMOTE | FOLLOW [node] | SWITCHOVER | REWIND} |
* WITNESS CREATE |
* CLUSTER {SHOW | CLEANUP}
*
@@ -379,6 +497,12 @@ main(int argc, char **argv)
action = STANDBY_PROMOTE;
else if (strcasecmp(server_cmd, "FOLLOW") == 0)
action = STANDBY_FOLLOW;
+ else if (strcasecmp(server_cmd, "SWITCHOVER") == 0)
+ action = STANDBY_SWITCHOVER;
+ else if (strcasecmp(server_cmd, "ARCHIVE-CONFIG") == 0)
+ action = STANDBY_ARCHIVE_CONFIG;
+ else if (strcasecmp(server_cmd, "RESTORE-CONFIG") == 0)
+ action = STANDBY_RESTORE_CONFIG;
}
else if (strcasecmp(server_mode, "CLUSTER") == 0)
{
@@ -448,25 +572,6 @@ main(int argc, char **argv)
print_error_list(&cli_warnings, LOG_WARNING);
}
- if (!runtime_options.dbname[0])
- {
- if (getenv("PGDATABASE"))
- strncpy(runtime_options.dbname, getenv("PGDATABASE"), MAXLEN);
- else if (getenv("PGUSER"))
- strncpy(runtime_options.dbname, getenv("PGUSER"), MAXLEN);
- else
- strncpy(runtime_options.dbname, DEFAULT_DBNAME, MAXLEN);
- }
-
- /*
- * If no primary port (-p/--port) provided, explicitly set the
- * default PostgreSQL port.
- */
- if (!runtime_options.masterport[0])
- {
- strncpy(runtime_options.masterport, DEFAULT_MASTER_PORT, MAXLEN);
- }
-
/*
* The configuration file is not required for some actions (e.g. 'standby clone'),
* however if available we'll parse it anyway for options like 'log_level',
@@ -477,6 +582,13 @@ main(int argc, char **argv)
&options,
argv[0]);
+ /* Some configuration file items can be overriden by command line options */
+ /* Command-line parameter -L/--log-level overrides any setting in config file*/
+ if (*runtime_options.loglevel != '\0')
+ {
+ strncpy(options.loglevel, runtime_options.loglevel, MAXLEN);
+ }
+
/*
* Initialise pg_bindir - command line parameter will override
* any setting in the configuration file
@@ -517,12 +629,6 @@ main(int argc, char **argv)
* logging to troubleshoot problems.
*/
- /* Command-line parameter -L/--log-level overrides any setting in config file*/
- if (*runtime_options.loglevel != '\0')
- {
- strncpy(options.loglevel, runtime_options.loglevel, MAXLEN);
- }
-
logger_init(&options, progname());
if (runtime_options.verbose)
@@ -586,7 +692,6 @@ main(int argc, char **argv)
log_verbose(LOG_DEBUG, "slot name initialised as: %s\n", repmgr_slot_name);
}
-
switch (action)
{
case MASTER_REGISTER:
@@ -607,6 +712,15 @@ main(int argc, char **argv)
case STANDBY_FOLLOW:
do_standby_follow();
break;
+ case STANDBY_SWITCHOVER:
+ do_standby_switchover();
+ break;
+ case STANDBY_ARCHIVE_CONFIG:
+ do_standby_archive_config();
+ break;
+ case STANDBY_RESTORE_CONFIG:
+ do_standby_restore_config();
+ break;
case WITNESS_CREATE:
do_witness_create();
break;
@@ -635,15 +749,23 @@ do_cluster_show(void)
char sqlquery[QUERY_STR_LEN];
char node_role[MAXLEN];
int i;
+ char name_header[MAXLEN];
+ char upstream_header[MAXLEN];
+ int name_length,
+ upstream_length,
+ conninfo_length = 0;
/* We need to connect to check configuration */
log_info(_("connecting to database\n"));
conn = establish_db_connection(options.conninfo, true);
sqlquery_snprintf(sqlquery,
- "SELECT conninfo, type "
- " FROM %s.repl_nodes ",
- get_repmgr_schema_quoted(conn));
+ "SELECT conninfo, type, name, upstream_node_name"
+ " FROM %s.repl_show_nodes",
+ get_repmgr_schema_quoted(conn));
+
+ log_verbose(LOG_DEBUG, "do_cluster_show(): \n%s\n",sqlquery );
+
res = PQexec(conn, sqlquery);
if (PQresultStatus(res) != PGRES_TUPLES_OK)
@@ -658,7 +780,51 @@ do_cluster_show(void)
}
PQfinish(conn);
- printf("Role | Connection String\n");
+ /* Format header nicely */
+
+ strncpy(name_header, _("Name"), MAXLEN);
+ strncpy(upstream_header, _("Upstream"), MAXLEN);
+
+ /*
+ * XXX if repmgr is ever localized into non-ASCII locales,
+ * use pg_wcssize() or similar to establish printed column length
+ */
+ name_length = strlen(name_header);
+ upstream_length = strlen(upstream_header);
+
+ for (i = 0; i < PQntuples(res); i++)
+ {
+ int conninfo_length_cur, name_length_cur, upstream_length_cur;
+
+ conninfo_length_cur = strlen(PQgetvalue(res, i, 0));
+ if (conninfo_length_cur > conninfo_length)
+ conninfo_length = conninfo_length_cur;
+
+ name_length_cur = strlen(PQgetvalue(res, i, 2));
+ if (name_length_cur > name_length)
+ name_length = name_length_cur;
+
+ upstream_length_cur = strlen(PQgetvalue(res, i, 3));
+ if (upstream_length_cur > upstream_length)
+ upstream_length = upstream_length_cur;
+ }
+
+ printf("Role | %-*s | %-*s | Connection String\n", name_length, name_header, upstream_length, upstream_header);
+ printf("----------+-");
+
+ for (i = 0; i < name_length; i++)
+ printf("-");
+
+ printf("-|-");
+ for (i = 0; i < upstream_length; i++)
+ printf("-");
+
+ printf("-|-");
+ for (i = 0; i < conninfo_length; i++)
+ printf("-");
+
+ printf("\n");
+
for (i = 0; i < PQntuples(res); i++)
{
conn = establish_db_connection(PQgetvalue(res, i, 0), false);
@@ -672,6 +838,8 @@ do_cluster_show(void)
strcpy(node_role, "* master");
printf("%-10s", node_role);
+ printf("| %-*s ", name_length, PQgetvalue(res, i, 2));
+ printf("| %-*s ", upstream_length, PQgetvalue(res, i, 3));
printf("| %s\n", PQgetvalue(res, i, 0));
PQfinish(conn);
@@ -872,11 +1040,12 @@ do_master_register(void)
/* Delete any existing record for this node if --force set */
if (runtime_options.force)
{
- PGresult *res;
bool node_record_deleted;
+ t_node_info node_info = T_NODE_INFO_INITIALIZER;
+
+ begin_transaction(conn);
- res = get_node_record(conn, options.cluster_name, options.node);
- if (PQntuples(res))
+ if (get_node_record(conn, options.cluster_name, options.node, &node_info))
{
log_notice(_("deleting existing master record with id %i\n"), options.node);
@@ -890,10 +1059,8 @@ do_master_register(void)
exit(ERR_BAD_CONFIG);
}
}
-
}
-
/* Now register the master */
record_created = create_node_record(conn,
"master register",
@@ -1016,7 +1183,7 @@ do_standby_register(void)
log_hint(_("use option -F/--force to overwrite an existing node record\n"));
}
- // XXX log registration failure?
+ /* XXX log registration failure? */
PQfinish(master_conn);
PQfinish(conn);
exit(ERR_BAD_CONFIG);
@@ -1122,6 +1289,7 @@ do_standby_unregister(void)
static void
do_standby_clone(void)
{
+ PGconn *primary_conn = NULL;
PGconn *upstream_conn;
PGresult *res;
@@ -1197,6 +1365,23 @@ do_standby_clone(void)
cluster_size);
/*
+ * If the upstream node is a standby, try to connect to the primary too so we
+ * can write an event record
+ */
+ if (is_standby(upstream_conn))
+ {
+ if (strlen(options.cluster_name))
+ {
+ primary_conn = get_master_connection(upstream_conn, options.cluster_name,
+ NULL, NULL);
+ }
+ }
+ else
+ {
+ primary_conn = upstream_conn;
+ }
+
+ /*
* If --recovery-min-apply-delay was passed, check that
* we're connected to PostgreSQL 9.4 or later
*/
@@ -1408,10 +1593,15 @@ do_standby_clone(void)
}
}
- log_notice(_("starting backup...\n"));
- if (runtime_options.fast_checkpoint == false)
+ if (runtime_options.rsync_only)
{
- log_hint(_("this may take some time; consider using the -c/--fast-checkpoint option\n"));
+ log_notice(_("starting backup (using rsync)...\n"));
+ }
+ else
+ {
+ log_notice(_("starting backup (using pg_basebackup)...\n"));
+ if (runtime_options.fast_checkpoint == false)
+ log_hint(_("this may take some time; consider using the -c/--fast-checkpoint option\n"));
}
if (runtime_options.rsync_only)
@@ -1637,7 +1827,7 @@ do_standby_clone(void)
}
/*
- * If configuration files were not inside the data directory, we;ll need to
+ * If configuration files were not inside the data directory, we'll need to
* copy them via SSH (unless `--ignore-external-config-files` was provided)
*
* TODO: add option to place these files in the same location on the
@@ -1839,29 +2029,33 @@ stop_backup:
log_hint(_("for example : /etc/init.d/postgresql start\n"));
}
- /* Log the event */
- initPQExpBuffer(&event_details);
+ /* Log the event - if we could connect to the primary */
- /* Add details about relevant runtime options used */
- appendPQExpBuffer(&event_details,
- _("Cloned from host '%s', port %s"),
- runtime_options.host,
- runtime_options.masterport);
+ if (primary_conn != NULL)
+ {
+ initPQExpBuffer(&event_details);
- appendPQExpBuffer(&event_details,
- _("; backup method: %s"),
- runtime_options.rsync_only ? "rsync" : "pg_basebackup");
+ /* Add details about relevant runtime options used */
+ appendPQExpBuffer(&event_details,
+ _("Cloned from host '%s', port %s"),
+ runtime_options.host,
+ runtime_options.masterport);
- appendPQExpBuffer(&event_details,
- _("; --force: %s"),
- runtime_options.force ? "Y" : "N");
+ appendPQExpBuffer(&event_details,
+ _("; backup method: %s"),
+ runtime_options.rsync_only ? "rsync" : "pg_basebackup");
- create_event_record(upstream_conn,
- &options,
- options.node,
- "standby_clone",
- true,
- event_details.data);
+ appendPQExpBuffer(&event_details,
+ _("; --force: %s"),
+ runtime_options.force ? "Y" : "N");
+
+ create_event_record(primary_conn,
+ &options,
+ options.node,
+ "standby_clone",
+ true,
+ event_details.data);
+ }
PQfinish(upstream_conn);
exit(retval);
@@ -1998,7 +2192,7 @@ do_standby_promote(void)
initPQExpBuffer(&details);
appendPQExpBuffer(&details,
- "Node %i was successfully promoted to master",
+ "node %i was successfully promoted to master",
options.node);
log_notice(_("STANDBY PROMOTE successful\n"));
@@ -2017,6 +2211,17 @@ do_standby_promote(void)
}
+/*
+ * Follow a new primary.
+ *
+ * This function has two "modes":
+ * 1) no primary info provided - determine primary from standby metadata
+ * 2) primary info provided - use that info to connect to the primary.
+ *
+ * (2) is mainly for when a node has been stopped as part of a switchover
+ * and needs to be started with recovery.conf correctly configured.
+ */
+
static void
do_standby_follow(void)
{
@@ -2025,55 +2230,96 @@ do_standby_follow(void)
char script[MAXLEN];
char master_conninfo[MAXLEN];
PGconn *master_conn;
- int master_id;
+ int master_id = 0;
int r,
retval;
- char data_dir[MAXLEN];
+ char data_dir[MAXFILENAME];
bool success;
+ log_debug("do_standby_follow()\n");
- /* We need to connect to check configuration */
- log_info(_("connecting to standby database\n"));
- conn = establish_db_connection(options.conninfo, true);
- log_verbose(LOG_INFO, _("connected to standby, checking its state\n"));
-
- /* Check we are in a standby node */
- retval = is_standby(conn);
- if (retval == 0 || retval == -1)
+ /*
+ * If -h/--host wasn't provided, attempt to connect to standby
+ * to determine primary, and carry out some other checks while we're
+ * at it.
+ */
+ if ( *runtime_options.host == '\0')
{
- log_err(_(retval == 0 ? "this command should be executed on a standby node\n" :
- "connection to node lost!\n"));
+ /* We need to connect to check configuration */
+ log_info(_("connecting to standby database\n"));
+ conn = establish_db_connection(options.conninfo, true);
+ log_verbose(LOG_INFO, _("connected to standby, checking its state\n"));
- PQfinish(conn);
- exit(ERR_BAD_CONFIG);
- }
+ /* Check we are in a standby node */
+ retval = is_standby(conn);
+ if (retval == 0 || retval == -1)
+ {
+ log_err(_(retval == 0 ? "this command should be executed on a standby node\n" :
+ "connection to node lost!\n"));
- /*
- * we also need to check if there is any master in the cluster or wait for
- * one to appear if we have set the wait option
- */
- log_info(_("discovering new master...\n"));
+ PQfinish(conn);
+ exit(ERR_BAD_CONFIG);
+ }
- do
- {
- if (!is_pgup(conn, options.master_response_timeout))
+ /* Get the data directory full path */
+ success = get_pg_setting(conn, "data_directory", data_dir);
+
+ if (success == false)
{
- conn = establish_db_connection(options.conninfo, true);
+ log_err(_("unable to determine data directory\n"));
+ exit(ERR_BAD_CONFIG);
}
- master_conn = get_master_connection(conn,
- options.cluster_name, &master_id, (char *) &master_conninfo);
- }
- while (master_conn == NULL && runtime_options.wait_for_master);
+ /*
+ * we also need to check if there is any master in the cluster or wait for
+ * one to appear if we have set the wait option
+ */
+ log_info(_("discovering new master...\n"));
+
+ do
+ {
+ if (!is_pgup(conn, options.master_response_timeout))
+ {
+ conn = establish_db_connection(options.conninfo, true);
+ }
+
+ master_conn = get_master_connection(conn,
+ options.cluster_name, &master_id, (char *) &master_conninfo);
+ }
+ while (master_conn == NULL && runtime_options.wait_for_master);
+
+ if (master_conn == NULL)
+ {
+ log_err(_("unable to determine new master node\n"));
+ PQfinish(conn);
+ exit(ERR_BAD_CONFIG);
+ }
+
+ /*
+ * Verify that standby and master are supported and compatible server
+ * versions
+ */
+ check_master_standby_version_match(conn, master_conn);
- if (master_conn == NULL)
- {
- log_err(_("unable to determine new master node\n"));
PQfinish(conn);
- exit(ERR_BAD_CONFIG);
}
+ /* primary server info explictly provided - attempt to connect to that */
+ else
+ {
+ keywords[0] = "host";
+ values[0] = runtime_options.host;
+ keywords[1] = "port";
+ values[1] = runtime_options.masterport;
+
+ master_conn = establish_db_connection_by_params(keywords, values, true);
+
+ master_id = get_master_node_id(master_conn, options.cluster_name);
+
+ strncpy(data_dir, runtime_options.dest_dir, MAXFILENAME);
+ }
+
/* Check we are going to point to a master */
retval = is_standby(master_conn);
@@ -2082,16 +2328,10 @@ do_standby_follow(void)
log_err(_(retval == 1 ? "the node to follow should be a master\n" :
"connection to node lost!\n"));
- PQfinish(conn);
PQfinish(master_conn);
exit(ERR_BAD_CONFIG);
}
- /*
- * Verify that standby and master are supported and compatible server
- * versions
- */
- check_master_standby_version_match(conn, master_conn);
/*
* set the host and masterport variables with the master ones before
@@ -2124,29 +2364,18 @@ do_standby_follow(void)
create_event_record(master_conn,
&options,
options.node,
- "repmgr_follow",
+ "standby_follow",
false,
event_details.data);
- PQfinish(conn);
PQfinish(master_conn);
exit(ERR_DB_QUERY);
}
}
-
+ /* XXX add more detail! */
log_info(_("changing standby's master\n"));
- /* Get the data directory full path */
- success = get_pg_setting(conn, "data_directory", data_dir);
- PQfinish(conn);
-
- if (success == false)
- {
- log_err(_("unable to determine data directory\n"));
- exit(ERR_BAD_CONFIG);
- }
-
/* write the recovery.conf file */
if (!create_recovery_file(data_dir))
exit(ERR_BAD_CONFIG);
@@ -2165,20 +2394,972 @@ do_standby_follow(void)
exit(ERR_NO_RESTART);
}
- if (update_node_record_set_upstream(master_conn, options.cluster_name,
- options.node, master_id) == false)
- {
- log_err(_("unable to update upstream node"));
+ /*
+ * It's possible this node was an inactive primary - update the
+ * relevant fields to ensure it's marked as an active standby
+ */
+ if (update_node_record_status(master_conn,
+ options.cluster_name,
+ options.node,
+ "standby",
+ master_id,
+ true) == false)
+ {
+ log_err(_("unable to update upstream node\n"));
PQfinish(master_conn);
exit(ERR_BAD_CONFIG);
}
+
+ /* XXX add event record - possible move from repmgrd? */
PQfinish(master_conn);
return;
}
+/*
+ * Perform a switchover by:
+ * - stopping current primary node
+ * - promoting this standby node to primary
+ * - forcing previous primary node to follow this node
+ *
+ * Caveats:
+ * - repmgrd must not be running, otherwise it may
+ * attempt a failover
+ * (TODO: find some way of notifying repmgrd of planned
+ * activity like this)
+ * - currently only set up for two-node operation; any other
+ * standbys will probably become downstream cascaded standbys
+ * of the old primary once it's restarted
+ * - as we're executing repmgr remotely (on the old primary),
+ * we'll need the location of its configuration file; this
+ * can be provided explicitly with -C/--remote-config-file,
+ * otherwise repmgr will look in default locations on the
+ * remote server
+ *
+ * TODO:
+ * - make connection test timeouts/intervals configurable (see below)
+ */
+
+static void
+do_standby_switchover(void)
+{
+ PGconn *local_conn;
+ PGconn *remote_conn;
+ int server_version_num;
+ bool use_pg_rewind;
+
+ /* the remote server is the primary which will be demoted */
+ char remote_conninfo[MAXCONNINFO] = "";
+ char remote_host[MAXLEN];
+ char remote_data_directory[MAXLEN];
+ int remote_node_id;
+ char remote_node_replication_state[MAXLEN] = "";
+ char remote_archive_config_dir[MAXLEN];
+ char remote_pg_rewind[MAXLEN];
+ int i,
+ r = 0;
+
+ char command[MAXLEN];
+ PQExpBufferData command_output;
+
+ char repmgr_db_cli_params[MAXLEN] = "";
+ int query_result;
+ t_node_info remote_node_record;
+ bool connection_success;
+
+
+ /*
+ * SANITY CHECKS
+ *
+ * We'll be doing a bunch of operations on the remote server (primary
+ * to be demoted) - careful checks needed before proceding.
+ */
+
+ log_notice(_("switching current node %i to master server and demoting current master to standby...\n"), options.node);
+
+ local_conn = establish_db_connection(options.conninfo, true);
+
+ /* Check that this is a standby */
+
+ if (!is_standby(local_conn))
+ {
+ log_err(_("switchover must be executed from the standby node to be promoted\n"));
+ PQfinish(local_conn);
+
+ exit(ERR_BAD_CONFIG);
+ }
+
+ server_version_num = check_server_version(local_conn, "master", true, NULL);
+
+ /*
+ * Add a friendly notice if --pg_rewind supplied for 9.5 and later - we'll
+ * be ignoring it anyway
+ */
+ if (pg_rewind_supplied == true && server_version_num >= 90500)
+ {
+ log_notice(_("--pg_rewind not required for PostgreSQL 9.5 and later\n"));
+ }
+
+ /*
+ * TODO: check that standby's upstream node is the primary
+ * (it's probably not feasible to switch over to a cascaded standby)
+ */
+
+ /* Check that primary is available */
+ remote_conn = get_master_connection(local_conn, options.cluster_name, &remote_node_id, remote_conninfo);
+
+ if (remote_conn == NULL)
+ {
+ log_err(_("unable to connect to current master node\n"));
+ log_hint(_("check that the cluster is correctly configured and this standby is registered\n"));
+ PQfinish(local_conn);
+ exit(ERR_DB_CON);
+ }
+
+ /* Get the remote's node record */
+ query_result = get_node_record(remote_conn, options.cluster_name, remote_node_id, &remote_node_record);
+
+ if (query_result < 1)
+ {
+ log_err(_("unable to retrieve node record for node %i\n"), remote_node_id);
+
+ PQfinish(local_conn);
+
+ exit(ERR_DB_QUERY);
+ }
+
+ log_debug("remote node name is \"%s\"\n", remote_node_record.name);
+
+ /*
+ * Check that we can connect by SSH to the remote (current primary) server,
+ * and read its data directory
+ *
+ * TODO: check we can read contents of PG_VERSION??
+ * -> assuming the remote user/directory is set up correctly,
+ * we should only be able to see the file as the PostgreSQL
+ * user, so it should be readable anyway
+ */
+ get_conninfo_value(remote_conninfo, "host", remote_host);
+
+ r = test_ssh_connection(remote_host, runtime_options.remote_user);
+
+ if (r != 0)
+ {
+ log_err(_("unable to connect via ssh to host %s, user %s\n"), remote_host, runtime_options.remote_user);
+ }
+
+ if (get_pg_setting(remote_conn, "data_directory", remote_data_directory) == false)
+ {
+ log_err(_("unable to retrieve master's data directory location\n"));
+ PQfinish(remote_conn);
+ PQfinish(local_conn);
+ exit(ERR_DB_CON);
+ }
+
+ log_debug("master's data directory is: %s\n", remote_data_directory);
+
+ maxlen_snprintf(command,
+ "ls %s/PG_VERSION >/dev/null 2>&1 && echo 1 || echo 0",
+ remote_data_directory);
+ initPQExpBuffer(&command_output);
+
+ (void)remote_command(
+ remote_host,
+ runtime_options.remote_user,
+ command,
+ &command_output);
+
+ if (*command_output.data == '1')
+ {
+ log_verbose(LOG_DEBUG, "PG_VERSION found in %s\n", remote_data_directory);
+ }
+ else if (*command_output.data == '0')
+ {
+ log_err(_("%s is not a PostgreSQL data directory or is not accessible to user %s\n"), remote_data_directory, runtime_options.remote_user);
+ PQfinish(remote_conn);
+ PQfinish(local_conn);
+ exit(ERR_BAD_CONFIG);
+ }
+ else
+ {
+ log_err(_("Unexpected output from remote command:\n%s\n"), command_output.data);
+ PQfinish(remote_conn);
+ PQfinish(local_conn);
+ exit(ERR_BAD_CONFIG);
+ }
+
+ termPQExpBuffer(&command_output);
+
+
+ if (server_version_num >= 90500)
+ {
+ /* 9.5 and later have pg_rewind built-in - always use that */
+ use_pg_rewind = true;
+ maxlen_snprintf(remote_pg_rewind,
+ "%s/pg_rewind",
+ pg_bindir);
+ }
+ else
+ {
+ /* 9.3/9.4 - user can use separately-compiled pg_rewind */
+ if (pg_rewind_supplied == true)
+ {
+ use_pg_rewind = true;
+
+ /* User has specified pg_rewind path */
+ if (strlen(runtime_options.pg_rewind))
+ {
+ maxlen_snprintf(remote_pg_rewind,
+ "%s",
+ runtime_options.pg_rewind);
+ }
+ /* No path supplied - assume in normal bindir */
+ else
+ {
+ maxlen_snprintf(remote_pg_rewind,
+ "%s/pg_rewind",
+ pg_bindir);
+ }
+ }
+ else
+ {
+ use_pg_rewind = false;
+ }
+ }
+
+ /* Sanity checks so we're sure pg_rewind can be used */
+ if (use_pg_rewind == true)
+ {
+ bool wal_log_hints = false;
+
+ /* check pg_rewind actually exists on remote */
+
+ maxlen_snprintf(command,
+ "ls -1 %s >/dev/null 2>&1 && echo 1 || echo 0",
+ remote_pg_rewind);
+
+ initPQExpBuffer(&command_output);
+
+ (void)remote_command(
+ remote_host,
+ runtime_options.remote_user,
+ command,
+ &command_output);
+
+ if (*command_output.data == '0')
+ {
+ log_err(_("unable to find pg_rewind on the remote server\n"));
+ log_err(_("expected location is: %s\n"), remote_pg_rewind);
+ exit(ERR_BAD_CONFIG);
+ }
+
+ /* check that server is appropriately configured */
+
+ /*
+ * "full_page_writes" must be enabled in any case
+ */
+
+ if (guc_set(remote_conn, "full_page_writes", "=", "off"))
+ {
+ log_err(_("\"full_page_writes\" must be set to \"on\""));
+ exit(ERR_BAD_CONFIG);
+ }
+
+ /*
+ * Check whether wal_log_hints is on - if so we're fine and don't need
+ * to check for checksums
+ */
+
+ wal_log_hints = guc_set(remote_conn, "wal_log_hints", "=", "on");
+
+ if (wal_log_hints == false)
+ {
+ char local_data_directory[MAXLEN];
+ int data_checksum_version;
+
+ /*
+ * check the *local* server's control data for the date checksum
+ * version - much easier than doing it on the remote server
+ */
+
+ if (get_pg_setting(local_conn, "data_directory", local_data_directory) == false)
+ {
+ log_err(_("unable to retrieve standby's data directory location\n"));
+ PQfinish(remote_conn);
+ PQfinish(local_conn);
+ exit(ERR_DB_CON);
+ }
+
+ data_checksum_version = get_data_checksum_version(local_data_directory);
+
+ if (data_checksum_version == 0)
+ {
+ log_err(_("pg_rewind cannot be used - data checksums are not enabled for this cluster and \"wal_log_hints\" is \"off\"\n"));
+ exit(ERR_BAD_CONFIG);
+ }
+ }
+ }
+
+ PQfinish(local_conn);
+ PQfinish(remote_conn);
+
+ /* Determine the remote's configuration file location */
+
+ /* Remote configuration file provided - check it exists */
+ if (runtime_options.remote_config_file[0])
+ {
+ log_verbose(LOG_INFO, _("looking for file \"%s\" on remote server \"%s\"\n"),
+ runtime_options.remote_config_file,
+ remote_host);
+
+ maxlen_snprintf(command,
+ "ls -1 %s >/dev/null 2>&1 && echo 1 || echo 0",
+ runtime_options.remote_config_file);
+
+ initPQExpBuffer(&command_output);
+
+ (void)remote_command(
+ remote_host,
+ runtime_options.remote_user,
+ command,
+ &command_output);
+
+ if (*command_output.data == '0')
+ {
+ log_err(_("unable to find the specified repmgr configuration file on remote server\n"));
+ exit(ERR_BAD_CONFIG);
+ }
+
+ termPQExpBuffer(&command_output);
+
+ log_verbose(LOG_INFO, _("remote configuration file \"%s\" found on remote server\n"),
+ runtime_options.remote_config_file);
+
+ termPQExpBuffer(&command_output);
+ }
+ /*
+ * No remote configuration file provided - check some default locations:
+ * - path of configuration file for this repmgr
+ * - /etc/repmgr.conf
+ */
+ else
+ {
+ int i;
+ bool config_file_found = false;
+
+ const char *config_paths[] = {
+ runtime_options.config_file,
+ "/etc/repmgr.conf",
+ NULL
+ };
+
+ log_verbose(LOG_INFO, _("no remote configuration file provided - checking default locations\n"));
+
+ for(i = 0; config_paths[i] && config_file_found == false; ++i)
+ {
+ log_verbose(LOG_INFO, _("checking \"%s\"\n"), config_paths[i]);
+
+ maxlen_snprintf(command,
+ "ls -1 %s >/dev/null 2>&1 && echo 1 || echo 0",
+ config_paths[i]);
+
+ initPQExpBuffer(&command_output);
+
+ (void)remote_command(
+ remote_host,
+ runtime_options.remote_user,
+ command,
+ &command_output);
+
+ if (*command_output.data == '1')
+ {
+ strncpy(runtime_options.remote_config_file, config_paths[i], MAXLEN);
+ log_verbose(LOG_INFO, _("configuration file \"%s\" found on remote server\n"),
+ runtime_options.remote_config_file);
+ config_file_found = true;
+ }
+
+ termPQExpBuffer(&command_output);
+ }
+
+ if (config_file_found == false)
+ {
+ log_err(_("no remote configuration file supplied or found in a default location - terminating\n"));
+ log_hint(_("specify the remote configuration file with -C/--remote-config-file\n"));
+ exit(ERR_BAD_CONFIG);
+ }
+ }
+
+
+
+ /*
+ * Sanity checks completed - prepare for the switchover
+ */
+
+ /*
+ * When using pg_rewind (the preferable option, and default from 9.5
+ * onwards), we need to archive any configuration files in the remote
+ * server's data directory as they'll be overwritten by pg_rewind
+ *
+ * Possible todo item: enable the archive location to be specified
+ * by the user
+ */
+ if (use_pg_rewind == true)
+ {
+ maxlen_snprintf(remote_archive_config_dir,
+ "/tmp/repmgr-%s-archive",
+ remote_node_record.name);
+
+ log_verbose(LOG_DEBUG, "remote_archive_config_dir: %s\n", remote_archive_config_dir);
+
+ maxlen_snprintf(command,
+ "%s/repmgr standby archive-config -f %s --config-archive-dir=%s",
+ pg_bindir,
+ runtime_options.remote_config_file,
+ remote_archive_config_dir);
+
+ log_debug("Executing:\n%s\n", command);
+
+ initPQExpBuffer(&command_output);
+
+ (void)remote_command(
+ remote_host,
+ runtime_options.remote_user,
+ command,
+ &command_output);
+
+ termPQExpBuffer(&command_output);
+ }
+
+ /*
+ * Stop the remote primary
+ *
+ * We'll issue the pg_ctl command but not force it not to wait; we'll check
+ * the connection from here - and error out if no shutdown is detected
+ * after a certain time.
+ *
+ * XXX currently we assume the same Postgres binary path on the primary
+ * as configured on the local standby; we may need to add a command
+ * line option to provide an explicit path (--remote-pg-bindir)?
+ */
+
+ /*
+ * TODO
+ * - notify repmgrd instances that this is a controlled
+ * event so they don't initiate failover
+ * - optional "immediate" shutdown?
+ * -> use -F/--force?
+ */
+
+ maxlen_snprintf(command,
+ "%s/pg_ctl -D %s -m %s -W stop >/dev/null 2>&1 && echo 1 || echo 0",
+ pg_bindir,
+ remote_data_directory,
+ runtime_options.pg_ctl_mode);
+
+ initPQExpBuffer(&command_output);
+
+ // XXX handle failure
+
+ (void)remote_command(
+ remote_host,
+ runtime_options.remote_user,
+ command,
+ &command_output);
+
+ termPQExpBuffer(&command_output);
+
+ connection_success = false;
+
+ /* loop for timeout waiting for current primary to stop */
+
+ for(i = 0; i < options.reconnect_attempts; i++)
+ {
+ /* Check whether primary is available */
+
+ remote_conn = test_db_connection(remote_conninfo, false); /* don't fail on error */
+
+ /* XXX failure to connect doesn't mean the server is necessarily
+ * completely stopped - we need to better detect the reason for
+ * connection failure ("server not listening" vs "shutting down")
+ *
+ * -> check is_pgup()
+ */
+ if (PQstatus(remote_conn) != CONNECTION_OK)
+ {
+ connection_success = true;
+
+ log_notice(_("current master has been stopped\n"));
+ break;
+ }
+ PQfinish(remote_conn);
+
+ // configurable?
+ sleep(options.reconnect_interval);
+ i++;
+ }
+
+ if (connection_success == false)
+ {
+ log_err(_("master server did not shut down\n"));
+ log_hint(_("check the master server status before performing any further actions"));
+ exit(ERR_FAILOVER_FAIL);
+ }
+
+ /* promote this standby */
+
+ do_standby_promote();
+
+ /*
+ * TODO: optionally have any other downstream nodes from old primary
+ * follow new primary? Currently they'll just latch onto the old
+ * primary as cascaded standbys.
+ */
+
+ /* restore old primary */
+
+ /* TODO: additional check old primary is shut down */
+
+ if (use_pg_rewind == true)
+ {
+ PQExpBufferData recovery_done_remove;
+
+ /* Execute pg_rewind */
+ maxlen_snprintf(command,
+ "%s/pg_rewind -D %s --source-server=\\'%s\\'",
+ pg_bindir,
+ remote_data_directory,
+ options.conninfo);
+
+ log_notice("Executing pg_rewind on old master server\n");
+ log_debug("pg_rewind command is:\n%s\n", command);
+
+ initPQExpBuffer(&command_output);
+
+ // XXX handle failure
+
+ (void)remote_command(
+ remote_host,
+ runtime_options.remote_user,
+ command,
+ &command_output);
+
+ termPQExpBuffer(&command_output);
+
+ /* Restore any previously archived config files */
+ maxlen_snprintf(command,
+ "%s/repmgr standby restore-config -D %s --config-archive-dir=%s",
+ pg_bindir,
+ remote_data_directory,
+ remote_archive_config_dir);
+
+ initPQExpBuffer(&command_output);
+
+ // XXX handle failure
+
+ (void)remote_command(
+ remote_host,
+ runtime_options.remote_user,
+ command,
+ &command_output);
+
+ termPQExpBuffer(&command_output);
+
+ /* remove any recovery.done file copied in by pg_rewind */
+
+ initPQExpBuffer(&recovery_done_remove);
+
+ appendPQExpBuffer(&recovery_done_remove,
+ "test -e %s/recovery.done && rm -f %s/recovery.done",
+ remote_data_directory,
+ remote_data_directory);
+ initPQExpBuffer(&command_output);
+
+ // XXX handle failure
+
+ (void)remote_command(
+ remote_host,
+ runtime_options.remote_user,
+ recovery_done_remove.data,
+ &command_output);
+
+ termPQExpBuffer(&command_output);
+ termPQExpBuffer(&recovery_done_remove);
+
+
+
+ }
+ else
+ {
+ /*
+ * For 9.3/9.4, if pg_rewind is not available on the remote server,
+ * we'll need to force a reclone of the standby sing rsync - this may
+ * take some time on larger databases, so use with care!
+ *
+ * Note that following this clone we'll be using `repmgr standby follow`
+ * to start the server - that will mean recovery.conf will be created
+ * for a second time, but as this is a workaround for the absence
+ * of pg_rewind. It's preferable to have `repmgr standby follow` start
+ * the remote database as it can access the remote config file
+ * directly.
+ */
+
+ format_db_cli_params(options.conninfo, repmgr_db_cli_params);
+ maxlen_snprintf(command,
+ "%s/repmgr -D %s -f %s %s --rsync-only --force --ignore-external-config-files standby clone",
+ pg_bindir,
+ remote_data_directory,
+ runtime_options.remote_config_file,
+ repmgr_db_cli_params
+ );
+
+ log_debug("Executing:\n%s\n", command);
+
+ initPQExpBuffer(&command_output);
+
+ (void)remote_command(
+ remote_host,
+ runtime_options.remote_user,
+ command,
+ &command_output);
+
+ termPQExpBuffer(&command_output);
+ }
+
+ /*
+ * Execute `repmgr standby follow` to create recovery.conf and start
+ * the remote server
+ */
+ format_db_cli_params(options.conninfo, repmgr_db_cli_params);
+ maxlen_snprintf(command,
+ "%s/repmgr -D %s -f %s %s standby follow",
+ pg_bindir,
+ remote_data_directory,
+ runtime_options.remote_config_file,
+ repmgr_db_cli_params
+ );
+
+ log_debug("Executing:\n%s\n", command);
+
+ initPQExpBuffer(&command_output);
+
+ (void)remote_command(
+ remote_host,
+ runtime_options.remote_user,
+ command,
+ &command_output);
+
+ termPQExpBuffer(&command_output);
+
+ /* verify that new standby is connected and replicating */
+
+ connection_success = false;
+
+ for(i = 0; i < options.reconnect_attempts; i++)
+ {
+ /* Check whether primary is available */
+
+ remote_conn = test_db_connection(remote_conninfo, false); /* don't fail on error */
+
+ if (PQstatus(remote_conn) == CONNECTION_OK)
+ {
+ log_debug("connected to new standby (old master)\n");
+ if (is_standby(remote_conn) == 0)
+ {
+ log_err(_("new standby (old master) is not a standby\n"));
+ exit(ERR_FAILOVER_FAIL);
+ }
+ connection_success = true;
+ break;
+ }
+ PQfinish(remote_conn);
+
+ sleep(options.reconnect_interval);
+ i++;
+ }
+
+ if (connection_success == false)
+ {
+ log_err(_("unable to connect to new standby (old master)\n"));
+ exit(ERR_FAILOVER_FAIL);
+ }
+
+ log_debug("new standby is in recovery\n");
+
+ /* Check for entry in pg_stat_replication */
+
+ local_conn = establish_db_connection(options.conninfo, true);
+
+
+ query_result = get_node_replication_state(local_conn, remote_node_record.name, remote_node_replication_state);
+ if (query_result == -1)
+ {
+ log_err(_("unable to retrieve replication status for node %i\n"), remote_node_id);
+ PQfinish(local_conn);
+
+ // errcode?
+ exit(ERR_DB_QUERY);
+ }
+
+ if (query_result == 0)
+ {
+ log_err(_("node %i not replicating\n"), remote_node_id);
+ }
+ else
+ {
+ /* XXX other valid values? */
+ /* XXX we should poll for a while in case the node takes time to connect to the primary */
+ if (strcmp(remote_node_replication_state, "streaming") == 0 ||
+ strcmp(remote_node_replication_state, "catchup") == 0)
+ {
+ log_verbose(LOG_NOTICE, _("node %i is replicating in state \"%s\"\n"), remote_node_id, remote_node_replication_state);
+ }
+ else
+ {
+ log_err(_("node %i replication state is \"%s\"\n"), remote_node_id, remote_node_replication_state);
+ PQfinish(local_conn);
+ exit(ERR_DB_QUERY);
+ }
+ }
+
+ /*
+ * If replication slots are in use, and an inactive one for this node
+ * (a former standby) exists on the remote node (a former primary),
+ * drop it.
+ */
+
+ if (options.use_replication_slots)
+ {
+ t_node_info local_node_record;
+
+ query_result = get_node_record(local_conn, options.cluster_name, options.node, &local_node_record);
+
+ remote_conn = establish_db_connection(remote_conninfo, false);
+
+ if (PQstatus(remote_conn) != CONNECTION_OK)
+ {
+ log_warning(_("unable to connect to former master to clean up replication slots \n"));
+ }
+ else
+ {
+ t_replication_slot slot_info;
+ int query_res;
+
+ query_res = get_slot_record(remote_conn, local_node_record.slot_name, &slot_info);
+
+ if (query_res)
+ {
+ if (slot_info.active == false)
+ {
+ if (drop_replication_slot(remote_conn, local_node_record.slot_name) == true)
+ {
+ log_notice(_("replication slot \"%s\" deleted on former master\n"), local_node_record.slot_name);
+ }
+ else
+ {
+ log_err(_("unable to delete replication slot \"%s\" on former master\n"), local_node_record.slot_name);
+ }
+ }
+ /* if active replication slot exists, call Houston as we have a problem */
+ else
+ {
+ log_err(_("replication slot \"%s\" is still active on former master\n"), local_node_record.slot_name);
+ }
+ }
+ }
+
+ PQfinish(remote_conn);
+ }
+
+ /* TODO: verify this node's record was updated correctly */
+
+ PQfinish(local_conn);
+
+ log_notice(_("switchover was successful\n"));
+ return;
+}
+
+
+/*
+ * Intended mainly for "internal" use by `standby switchover`, which
+ * calls this on the target server to archive any configuration files
+ * in the data directory, which may be overwritten by an operation
+ * like pg_rewind
+ */
+static void
+do_standby_archive_config(void)
+{
+ PGconn *local_conn = NULL;
+ char sqlquery[QUERY_STR_LEN];
+ PGresult *res;
+ int i, copied_count = 0;
+
+ if (mkdir(runtime_options.config_archive_dir, S_IRWXU) != 0 && errno != EEXIST)
+ {
+ log_err(_("unable to create temporary directory\n"));
+ exit(ERR_BAD_CONFIG);
+ }
+
+ // XXX check if directory is directory and we own it
+ // XXX delete any files in dir in case it existed already
+
+ local_conn = establish_db_connection(options.conninfo, true);
+
+ /*
+ * Detect which config files are actually inside the data directory;
+ * this query will include any settings from included files too
+ */
+ sqlquery_snprintf(sqlquery,
+ "WITH files AS ( "
+ " WITH dd AS ( "
+ " SELECT setting "
+ " FROM pg_settings "
+ " WHERE name = 'data_directory') "
+ " SELECT distinct(sourcefile) AS config_file"
+ " FROM dd, pg_settings ps "
+ " WHERE ps.sourcefile IS NOT NULL "
+ " AND ps.sourcefile ~ ('^' || dd.setting) "
+ " UNION "
+ " SELECT ps.setting AS config_file"
+ " FROM dd, pg_settings ps "
+ " WHERE ps.name IN ( 'config_file', 'hba_file', 'ident_file') "
+ " AND ps.setting ~ ('^' || dd.setting) "
+ ") "
+ " SELECT config_file, "
+ " regexp_replace(config_file, '^.*\\/','') AS filename "
+ " FROM files "
+ "ORDER BY config_file");
+
+ log_verbose(LOG_DEBUG, "do_standby_archive_config(): %s\n", sqlquery);
+
+ res = PQexec(local_conn, sqlquery);
+ if (PQresultStatus(res) != PGRES_TUPLES_OK)
+ {
+ log_err(_("unable to query config file locations\n"));
+ PQclear(res);
+ PQfinish(local_conn);
+ exit(ERR_DB_QUERY);
+ }
+
+ /* Copy any configuration files to the specified directory */
+ for (i = 0; i < PQntuples(res); i++)
+ {
+ PQExpBufferData buf;
+
+ initPQExpBuffer(&buf);
+ appendPQExpBuffer(&buf, "%s/%s",
+ runtime_options.config_archive_dir, PQgetvalue(res, i, 1));
+
+ log_verbose(LOG_DEBUG, "Copying %s to %s/\n", PQgetvalue(res, i, 0), buf.data);
+ /* XXX check result */
+ copy_file(PQgetvalue(res, i, 0), buf.data);
+
+ termPQExpBuffer(&buf);
+
+ copied_count++;
+ }
+
+ PQclear(res);
+
+ PQfinish(local_conn);
+
+ log_notice(_("%i files copied to %s\n"), copied_count, runtime_options.config_archive_dir);
+}
+
+/*
+ * Intended mainly for "internal" use by `standby switchover`, which
+ * calls this on the target server to restore any configuration files
+ * to the data directory, which may have been overwritten by an operation
+ * like pg_rewind
+ *
+ * Not designed to be called if the instance is running, but does
+ * not currently check.
+ *
+ * Requires -D/--data-dir and --config_archive_dir
+ *
+ * Removes --config_archive_dir after successful copy
+ */
+static void
+do_standby_restore_config(void)
+{
+ DIR *arcdir;
+ struct dirent *arcdir_ent;
+ int copied_count = 0;
+ bool copy_ok = true;
+
+ arcdir = opendir(runtime_options.config_archive_dir);
+ if (arcdir == NULL)
+ {
+ log_err(_("Unable to open directory '%s'\n"), runtime_options.config_archive_dir);
+ exit(ERR_BAD_CONFIG);
+ }
+
+ while ((arcdir_ent = readdir(arcdir)) != NULL) {
+ PQExpBufferData src_file;
+ PQExpBufferData dst_file;
+
+ if (arcdir_ent->d_type != DT_REG)
+ {
+ continue;
+ }
+ initPQExpBuffer(&src_file);
+ initPQExpBuffer(&dst_file);
+
+ appendPQExpBuffer(&src_file, "%s/%s",
+ runtime_options.config_archive_dir, arcdir_ent->d_name);
+
+ appendPQExpBuffer(&dst_file, "%s/%s",
+ runtime_options.dest_dir, arcdir_ent->d_name);
+
+ log_verbose(LOG_DEBUG, "Copying %s to %s\n", src_file.data, dst_file.data);
+
+ /* XXX check result */
+
+ if (copy_file(src_file.data, dst_file.data) == false)
+ {
+ copy_ok = false;
+ log_warning(_("Unable to copy %s from %s\n"), arcdir_ent->d_name, runtime_options.config_archive_dir);
+ }
+ else
+ {
+ unlink(src_file.data);
+ copied_count++;
+ }
+
+ termPQExpBuffer(&src_file);
+ termPQExpBuffer(&dst_file);
+ }
+
+ closedir(arcdir);
+
+
+ if (copy_ok == false)
+ {
+ log_err(_("Unable to copy all files from %s\n"), runtime_options.config_archive_dir);
+ exit(ERR_BAD_CONFIG);
+ }
+
+ log_notice(_("%i files copied to %s\n"), copied_count, runtime_options.dest_dir);
+
+ /*
+ * Finally, delete directory - it should be empty unless it's been interfered
+ * with for some reason, in which case manual attention is required
+ */
+
+ if (rmdir(runtime_options.config_archive_dir) != 0 && errno != EEXIST)
+ {
+ log_err(_("Unable to delete %s\n"), runtime_options.config_archive_dir);
+ exit(ERR_BAD_CONFIG);
+ }
+
+ log_verbose(LOG_NOTICE, "Directory %s deleted\n", runtime_options.config_archive_dir);
+
+ return;
+}
+
+
static void
do_witness_create(void)
{
@@ -2197,9 +3378,8 @@ do_witness_create(void)
char master_hba_file[MAXLEN];
bool success;
bool record_created;
-
- PQconninfoOption *conninfo_options;
- PQconninfoOption *conninfo_option;
+ char repmgr_user[MAXLEN];
+ char repmgr_db[MAXLEN];
/* Connection parameters for master only */
keywords[0] = "host";
@@ -2207,6 +3387,13 @@ do_witness_create(void)
keywords[1] = "port";
values[1] = runtime_options.masterport;
+ /*
+ * Extract the repmgr user and database names from the conninfo string
+ * provided in repmgr.conf
+ */
+ get_conninfo_value(options.conninfo, "user", repmgr_user);
+ get_conninfo_value(options.conninfo, "dbname", repmgr_db);
+
/* We need to connect to check configuration and copy it */
masterconn = establish_db_connection_by_params(keywords, values, true);
if (!masterconn)
@@ -2293,11 +3480,11 @@ do_witness_create(void)
if (!runtime_options.superuser[0])
strncpy(runtime_options.superuser, "postgres", MAXLEN);
- sprintf(script, "%s %s -D %s init -o \"%s-U %s\"",
- make_pg_path("pg_ctl"),
- options.pg_ctl_options, runtime_options.dest_dir,
- runtime_options.initdb_no_pwprompt ? "" : "-W ",
- runtime_options.superuser);
+ maxlen_snprintf(script, "%s %s -D %s init -o \"%s-U %s\"",
+ make_pg_path("pg_ctl"),
+ options.pg_ctl_options, runtime_options.dest_dir,
+ runtime_options.witness_pwprompt ? "-W " : "",
+ runtime_options.superuser);
log_info(_("initializing cluster for witness: %s.\n"), script);
r = system(script);
@@ -2342,26 +3529,14 @@ do_witness_create(void)
xsnprintf(buf, sizeof(buf), "\n#Configuration added by %s\n", progname());
fputs(buf, pg_conf);
-
- /* Attempt to extract a port number from the provided conninfo string
+ /*
+ * Attempt to extract a port number from the provided conninfo string.
* This will override any value provided with '-l/--local-port', as it's
* what we'll later try and connect to anyway. '-l/--local-port' should
* be deprecated.
*/
- conninfo_options = PQconninfoParse(options.conninfo, NULL);
- for (conninfo_option = conninfo_options; conninfo_option->keyword != NULL; conninfo_option++)
- {
- if (strcmp(conninfo_option->keyword, "port") == 0)
- {
- if (conninfo_option->val != NULL && conninfo_option->val[0] != '\0')
- {
- strncpy(runtime_options.localport, conninfo_option->val, MAXLEN);
- break;
- }
- }
- }
- PQconninfoFree(conninfo_options);
+ get_conninfo_value(options.conninfo, "port", runtime_options.localport);
/*
* If not specified by the user, the default port for the witness server
@@ -2385,9 +3560,9 @@ do_witness_create(void)
/* start new instance */
- sprintf(script, "%s %s -w -D %s start",
- make_pg_path("pg_ctl"),
- options.pg_ctl_options, runtime_options.dest_dir);
+ maxlen_snprintf(script, "%s %s -w -D %s start",
+ make_pg_path("pg_ctl"),
+ options.pg_ctl_options, runtime_options.dest_dir);
log_info(_("starting witness server: %s\n"), script);
r = system(script);
if (r != 0)
@@ -2406,13 +3581,18 @@ do_witness_create(void)
exit(ERR_BAD_CONFIG);
}
+
/* check if we need to create a user */
- if (runtime_options.username[0] && runtime_options.localport[0] && strcmp(runtime_options.username,"postgres") != 0)
- {
- /* create required user; needs to be superuser to create untrusted language function in c */
- sprintf(script, "%s -p %s --superuser --login -U %s %s",
- make_pg_path("createuser"),
- runtime_options.localport, runtime_options.superuser, runtime_options.username);
+ if (strcmp(repmgr_user, "postgres") != 0)
+ {
+ /* create required user; needs to be superuser to create untrusted
+ * language function in C */
+ maxlen_snprintf(script, "%s -p %s --superuser --login %s-U %s %s",
+ make_pg_path("createuser"),
+ runtime_options.localport,
+ runtime_options.witness_pwprompt ? "-P " : "",
+ runtime_options.superuser,
+ repmgr_user);
log_info(_("creating user for witness db: %s.\n"), script);
r = system(script);
@@ -2436,9 +3616,12 @@ do_witness_create(void)
if (runtime_options.dbname[0] && strcmp(runtime_options.dbname,"postgres") != 0 && runtime_options.localport[0])
{
/* create required db */
- sprintf(script, "%s -p %s -U %s --owner=%s %s",
- make_pg_path("createdb"),
- runtime_options.localport, runtime_options.superuser, runtime_options.username, runtime_options.dbname);
+ maxlen_snprintf(script, "%s -p %s -U %s --owner=%s %s",
+ make_pg_path("createdb"),
+ runtime_options.localport,
+ runtime_options.superuser,
+ repmgr_user,
+ repmgr_db);
log_info("creating database for witness db: %s.\n", script);
r = system(script);
@@ -2464,7 +3647,7 @@ do_witness_create(void)
if (success == false)
{
- char *errmsg = _("unable to retrieve location of pg_hba.conf");
+ char *errmsg = _("Unable to retrieve location of pg_hba.conf");
log_err("%s\n", errmsg);
create_event_record(masterconn,
@@ -2481,7 +3664,7 @@ do_witness_create(void)
master_hba_file, runtime_options.dest_dir, false, -1);
if (r != 0)
{
- char *errmsg = _("unable to copy pg_hba.conf from master");
+ char *errmsg = _("Unable to copy pg_hba.conf from master");
log_err("%s\n", errmsg);
create_event_record(masterconn,
@@ -2495,10 +3678,10 @@ do_witness_create(void)
exit(ERR_BAD_CONFIG);
}
- /* reload to adapt for changed pg_hba.conf */
- sprintf(script, "%s %s -w -D %s reload",
- make_pg_path("pg_ctl"),
- options.pg_ctl_options, runtime_options.dest_dir);
+ /* reload witness server to activate the copied pg_hba.conf */
+ maxlen_snprintf(script, "%s %s -w -D %s reload",
+ make_pg_path("pg_ctl"),
+ options.pg_ctl_options, runtime_options.dest_dir);
log_info(_("reloading witness server configuration: %s"), script);
r = system(script);
if (r != 0)
@@ -2517,7 +3700,47 @@ do_witness_create(void)
exit(ERR_BAD_CONFIG);
}
- /* register ourselves in the master */
+ /* establish a connection to the witness, and create the schema */
+ witnessconn = establish_db_connection(options.conninfo, false);
+
+ if (PQstatus(witnessconn) != CONNECTION_OK)
+ {
+ create_event_record(masterconn,
+ &options,
+ options.node,
+ "witness_create",
+ false,
+ _("Unable to connect to witness servetr"));
+ PQfinish(masterconn);
+ exit(ERR_BAD_CONFIG);
+ }
+
+
+ log_info(_("starting copy of configuration from master...\n"));
+
+ begin_transaction(witnessconn);
+
+ if (!create_schema(witnessconn))
+ {
+ rollback_transaction(witnessconn);
+ create_event_record(masterconn,
+ &options,
+ options.node,
+ "witness_create",
+ false,
+ _("Unable to create schema on witness"));
+ PQfinish(masterconn);
+ PQfinish(witnessconn);
+ exit(ERR_BAD_CONFIG);
+ }
+
+ commit_transaction(witnessconn);
+
+ /*
+ * Register new witness server on the primary
+ * Do this as late as possible to avoid having to delete
+ * the record if the server creation fails
+ */
if (runtime_options.force)
{
@@ -2556,29 +3779,6 @@ do_witness_create(void)
exit(ERR_DB_QUERY);
}
- /* establish a connection to the witness, and create the schema */
- witnessconn = establish_db_connection(options.conninfo, true);
-
- log_info(_("starting copy of configuration from master...\n"));
-
- begin_transaction(witnessconn);
-
-
- if (!create_schema(witnessconn))
- {
- rollback_transaction(witnessconn);
- create_event_record(masterconn,
- &options,
- options.node,
- "witness_create",
- false,
- _("unable to create schema on witness"));
- PQfinish(masterconn);
- PQfinish(witnessconn);
- exit(ERR_BAD_CONFIG);
- }
-
- commit_transaction(witnessconn);
/* copy configuration from master, only repl_nodes is needed */
if (!copy_configuration(masterconn, witnessconn, options.cluster_name))
@@ -2589,24 +3789,33 @@ do_witness_create(void)
"witness_create",
false,
_("Unable to copy configuration from master"));
+
+ /*
+ * delete previously created witness node record
+ * XXX maybe set inactive?
+ */
+ delete_node_record(masterconn,
+ options.node,
+ "witness create");
+
PQfinish(masterconn);
PQfinish(witnessconn);
exit(ERR_BAD_CONFIG);
}
/* drop superuser powers if needed */
- if (runtime_options.username[0] && runtime_options.localport[0] && strcmp(runtime_options.username,"postgres") != 0)
+ if (strcmp(repmgr_user, "postgres") != 0)
{
- sqlquery_snprintf(sqlquery, "ALTER ROLE %s NOSUPERUSER", runtime_options.username);
+ sqlquery_snprintf(sqlquery, "ALTER ROLE %s NOSUPERUSER", repmgr_user);
log_info(_("revoking superuser status on user %s: %s.\n"),
- runtime_options.username, sqlquery);
+ repmgr_user, sqlquery);
log_debug(_("witness create: %s\n"), sqlquery);
res = PQexec(witnessconn, sqlquery);
if (!res || PQresultStatus(res) != PGRES_COMMAND_OK)
{
- log_err(_("unable to alter user privileges for user %s: %s\n"),
- runtime_options.username,
+ log_err(_("Unable to alter user privileges for user %s: %s\n"),
+ repmgr_user,
PQerrorMessage(witnessconn));
PQfinish(masterconn);
PQfinish(witnessconn);
@@ -2614,6 +3823,10 @@ do_witness_create(void)
}
}
+ /* Finished with the witness server */
+
+ PQfinish(witnessconn);
+
/* Log the event */
create_event_record(masterconn,
&options,
@@ -2623,20 +3836,21 @@ do_witness_create(void)
NULL);
PQfinish(masterconn);
- PQfinish(witnessconn);
log_notice(_("configuration has been successfully copied to the witness\n"));
}
static void
-help(void)
+do_help(void)
{
+ const char *host;
+
printf(_("%s: replication management tool for PostgreSQL\n"), progname());
printf(_("\n"));
printf(_("Usage:\n"));
printf(_(" %s [OPTIONS] master register\n"), progname());
- printf(_(" %s [OPTIONS] standby {register|unregister|clone|promote|follow}\n"),
+ printf(_(" %s [OPTIONS] standby {register|unregister|clone|promote|follow|switchover}\n"),
progname());
printf(_(" %s [OPTIONS] cluster {show|cleanup}\n"), progname());
printf(_("\n"));
@@ -2645,15 +3859,16 @@ help(void)
printf(_(" -V, --version output version information, then exit\n"));
printf(_("\n"));
printf(_("Logging options:\n"));
- printf(_(" -L, --log-level set log level (overrides configuration file)\n"));
+ printf(_(" -L, --log-level set log level (overrides configuration file; default: NOTICE)\n"));
printf(_(" -v, --verbose display additional log output (useful for debugging)\n"));
printf(_(" -t, --terse don't display hints and other non-critical output\n"));
printf(_("\n"));
printf(_("Connection options:\n"));
- printf(_(" -d, --dbname=DBNAME database to connect to\n"));
- printf(_(" -h, --host=HOSTNAME database server host or socket directory\n"));
- printf(_(" -p, --port=PORT database server port\n"));
- printf(_(" -U, --username=USERNAME database user name to connect as\n"));
+ printf(_(" -d, --dbname=DBNAME database to connect to (default: \"%s\")\n"), runtime_options.dbname);
+ host = getenv("PGHOST");
+ printf(_(" -h, --host=HOSTNAME database server host or socket directory (default: \"%s\")\n"), host ? host : _("local socket"));
+ printf(_(" -p, --port=PORT database server port (default: \"%s\")\n"), runtime_options.masterport);
+ printf(_(" -U, --username=USERNAME database user name to connect as (default: \"%s\")\n"), runtime_options.username);
printf(_("\n"));
printf(_("General configuration options:\n"));
printf(_(" -b, --pg_bindir=PATH path to PostgreSQL binaries (optional)\n"));
@@ -2674,30 +3889,32 @@ help(void)
printf(_(" -w, --wal-keep-segments=VALUE (standby clone) minimum value for the GUC\n" \
" wal_keep_segments (default: %s)\n"), DEFAULT_WAL_KEEP_SEGMENTS);
printf(_(" -W, --wait (standby follow) wait for a master to appear\n"));
- printf(_(" -k, --keep-history=VALUE (cluster cleanup) retain indicated number of days of history\n"));
-
-
- printf(_(" --initdb-no-pwprompt (witness server) no superuser password prompt during initdb\n"));
-/* remove this line in the next significant release */
- printf(_(" -l, --local-port=PORT (witness server) witness server local port, default: %s \n" \
- " (DEPRECATED, put port in conninfo)\n"), WITNESS_DEFAULT_PORT);
+ printf(_(" -m, --mode (standby switchover) shutdown mode (smart|fast|immediate)\n"));
+ printf(_(" -C, --remote-config-file (standby switchover) path to the configuration file on\n" \
+ " the current master\n"));
+ printf(_(" --pg_rewind[=VALUE] (standby switchover) 9.3/9.4 only - use pg_rewind if available,\n" \
+ " optionally providing a path to the binary\n"));
+ printf(_(" -k, --keep-history=VALUE (cluster cleanup) retain indicated number of days of history (default: 0)\n"));
+/* printf(_(" --initdb-no-pwprompt (witness server) no superuser password prompt during initdb\n"));*/
+ printf(_(" -P, --pwprompt (witness server) prompt for password when creating users\n"));
printf(_(" -S, --superuser=USERNAME (witness server) superuser username for witness database\n" \
" (default: postgres)\n"));
printf(_("\n"));
printf(_("%s performs the following node management tasks:\n"), progname());
printf(_("\n"));
printf(_("COMMANDS:\n"));
- printf(_(" master register - registers the master in a cluster\n"));
- printf(_(" standby clone [node] - creates a new standby\n"));
- printf(_(" standby register - registers a standby in a cluster\n"));
- printf(_(" standby unregister - unregisters a standby in a cluster\n"));
- printf(_(" standby promote - promotes a specific standby to master\n"));
- printf(_(" standby follow - makes standby follow a new master\n"));
- printf(_(" witness create - creates a new witness server\n"));
- printf(_(" cluster show - displays information about cluster nodes\n"));
- printf(_(" cluster cleanup - prunes or truncates monitoring history\n" \
- " (monitoring history creation requires repmgrd\n" \
- " with --monitoring-history option)\n"));
+ printf(_(" master register - registers the master in a cluster\n"));
+ printf(_(" standby clone [node] - creates a new standby\n"));
+ printf(_(" standby register - registers a standby in a cluster\n"));
+ printf(_(" standby unregister - unregisters a standby in a cluster\n"));
+ printf(_(" standby promote - promotes a specific standby to master\n"));
+ printf(_(" standby follow - makes standby follow a new master\n"));
+ printf(_(" standby switchover - switch this standby with the current master\n"));
+ printf(_(" witness create - creates a new witness server\n"));
+ printf(_(" cluster show - displays information about cluster nodes\n"));
+ printf(_(" cluster cleanup - prunes or truncates monitoring history\n" \
+ " (monitoring history creation requires repmgrd\n" \
+ " with --monitoring-history option)\n"));
}
@@ -2809,14 +4026,14 @@ test_ssh_connection(char *host, char *remote_user)
for(i = 0; truebin_paths[i] && r != 0; ++i)
{
if (!remote_user[0])
- maxlen_snprintf(script, "ssh -o Batchmode=yes %s %s %s",
+ maxlen_snprintf(script, "ssh -o Batchmode=yes %s %s %s 2>/dev/null",
options.ssh_options, host, truebin_paths[i]);
else
- maxlen_snprintf(script, "ssh -o Batchmode=yes %s %s -l %s %s",
+ maxlen_snprintf(script, "ssh -o Batchmode=yes %s %s -l %s %s 2>/dev/null",
options.ssh_options, host, remote_user,
truebin_paths[i]);
- log_debug(_("command is: %s\n"), script);
+ log_verbose(LOG_DEBUG, _("test_ssh_connection(): executing %s\n"), script);
r = system(script);
}
@@ -2876,6 +4093,9 @@ copy_remote_files(char *host, char *remote_user, char *remote_path,
appendPQExpBuffer(&rsync_flags, "%s",
" --exclude=postmaster.pid --exclude=postmaster.opts --exclude=global/pg_control");
+ appendPQExpBuffer(&rsync_flags, "%s",
+ " --exclude=recovery.conf --exclude=recovery.done");
+
if (server_version_num >= 90400)
{
/*
@@ -2998,8 +4218,7 @@ check_parameters_for_action(const int action)
* parameters are at least useless and could be confusing so
* reject them
*/
- if (runtime_options.host[0] || runtime_options.masterport[0] ||
- runtime_options.username[0] || runtime_options.dbname[0])
+ if (connection_param_provided)
{
error_list_append(&cli_warnings, _("master connection parameters not required when executing MASTER REGISTER"));
}
@@ -3015,8 +4234,7 @@ check_parameters_for_action(const int action)
* need connection parameters to the master because we can detect
* the master in repl_nodes
*/
- if (runtime_options.host[0] || runtime_options.masterport[0] ||
- runtime_options.username[0] || runtime_options.dbname[0])
+ if (connection_param_provided)
{
error_list_append(&cli_warnings, _("master connection parameters not required when executing STANDBY REGISTER"));
}
@@ -3032,8 +4250,7 @@ check_parameters_for_action(const int action)
* need connection parameters to the master because we can detect
* the master in repl_nodes
*/
- if (runtime_options.host[0] || runtime_options.masterport[0] ||
- runtime_options.username[0] || runtime_options.dbname[0])
+ if (connection_param_provided)
{
error_list_append(&cli_warnings, _("master connection parameters not required when executing STANDBY UNREGISTER"));
}
@@ -3050,8 +4267,7 @@ check_parameters_for_action(const int action)
* detect the master in repl_nodes if we can't find it then the
* promote action will be cancelled
*/
- if (runtime_options.host[0] || runtime_options.masterport[0] ||
- runtime_options.username[0] || runtime_options.dbname[0])
+ if (connection_param_provided)
{
error_list_append(&cli_warnings, _("master connection parameters not required when executing STANDBY PROMOTE"));
}
@@ -3060,24 +4276,30 @@ check_parameters_for_action(const int action)
error_list_append(&cli_warnings, _("destination directory not required when executing STANDBY PROMOTE"));
}
break;
+
case STANDBY_FOLLOW:
/*
* To make a standby follow a master we only need the repmgr.conf
* we don't want connection parameters to the new master because
- * we will try to detect the master in repl_nodes if we can't find
+ * we will try to detect the master in repl_nodes; if we can't find
* it then the follow action will be cancelled
*/
- if (runtime_options.host[0] || runtime_options.masterport[0] ||
- runtime_options.username[0] || runtime_options.dbname[0])
- {
- error_list_append(&cli_warnings, _("master connection parameters not required when executing STANDBY FOLLOW"));
- }
- if (runtime_options.dest_dir[0])
+
+ if (runtime_options.host[0] || runtime_options.dest_dir[0])
{
- error_list_append(&cli_warnings, _("destination directory not required when executing STANDBY FOLLOW"));
+ if (!runtime_options.host[0])
+ {
+ error_list_append(&cli_errors, _("master hostname (-h/--host) required when executing STANDBY FOLLOW with -D/--data-dir option"));
+ }
+
+ if (!runtime_options.dest_dir[0])
+ {
+ error_list_append(&cli_errors, _("local data directory (-D/--data-dir) required when executing STANDBY FOLLOW with -h/--host option"));
+ }
}
break;
+
case STANDBY_CLONE:
/*
@@ -3091,19 +4313,40 @@ check_parameters_for_action(const int action)
error_list_append(&cli_errors, _("master hostname (-h/--host) required when executing STANDBY CLONE"));
}
- if (strcmp(runtime_options.dbname, "") == 0)
+ if (runtime_options.fast_checkpoint && runtime_options.rsync_only)
+ {
+ error_list_append(&cli_warnings, _("-c/--fast-checkpoint has no effect when using -r/--rsync-only"));
+ }
+ config_file_required = false;
+ break;
+ case STANDBY_SWITCHOVER:
+ /* allow all parameters to be supplied */
+ break;
+ case STANDBY_ARCHIVE_CONFIG:
+ if (strcmp(runtime_options.config_archive_dir, "") == 0)
{
- error_list_append(&cli_errors, _("master database name (-d/--dbname) required when executing STANDBY CLONE"));
+ error_list_append(&cli_errors, _("--config-archive-dir required when executing STANDBY ARCHIVE_CONFIG"));
+ }
+ break;
+ case STANDBY_RESTORE_CONFIG:
+ if (strcmp(runtime_options.config_archive_dir, "") == 0)
+ {
+ error_list_append(&cli_errors, _("--config-archive-dir required when executing STANDBY RESTORE_CONFIG"));
}
- if (strcmp(runtime_options.username, "") == 0)
+ if (strcmp(runtime_options.dest_dir, "") == 0)
{
- error_list_append(&cli_errors, _("master database username (-U/--username) required when executing STANDBY CLONE"));
+ error_list_append(&cli_errors, _("-D/--data-dir required when executing STANDBY RESTORE_CONFIG"));
}
config_file_required = false;
break;
case WITNESS_CREATE:
+ /* Require data directory */
+ if (strcmp(runtime_options.dest_dir, "") == 0)
+ {
+ error_list_append(&cli_errors, _("-D/--data-dir required when executing WITNESS CREATE"));
+ }
/* allow all parameters to be supplied */
break;
case CLUSTER_SHOW:
@@ -3143,6 +4386,15 @@ check_parameters_for_action(const int action)
}
}
+ /* Warn about parameters which apply to STANDBY SWITCHOVER only */
+ if (action != STANDBY_SWITCHOVER)
+ {
+ if (pg_rewind_supplied == true)
+ {
+ error_list_append(&cli_warnings, _("--pg_rewind can only be used when executing STANDBY SWITCHOVER"));
+ }
+ }
+
return;
}
@@ -3340,6 +4592,7 @@ create_schema(PGconn *conn)
}
PQclear(res);
+
/* an index to improve performance of the view */
sqlquery_snprintf(sqlquery,
"CREATE INDEX idx_repl_status_sort "
@@ -3360,6 +4613,35 @@ create_schema(PGconn *conn)
}
PQclear(res);
+
+ /* CREATE VIEW repl_show_nodes */
+ sqlquery_snprintf(sqlquery,
+ "CREATE VIEW %s.repl_show_nodes AS "
+ "SELECT rn.id, rn.conninfo, rn.type, rn.name, rn.cluster,"
+ " rn.priority, rn.active, sq.name AS upstream_node_name"
+ " FROM %s.repl_nodes as rn"
+ " LEFT JOIN %s.repl_nodes AS sq"
+ " ON sq.id=rn.upstream_node_id",
+ get_repmgr_schema_quoted(conn),
+ get_repmgr_schema_quoted(conn),
+ get_repmgr_schema_quoted(conn));
+
+ log_debug(_("master register: %s\n"), sqlquery);
+
+ res = PQexec(conn, sqlquery);
+ if (!res || PQresultStatus(res) != PGRES_COMMAND_OK)
+ {
+ log_err(_("unable to create view %s.repl_show_nodes: %s\n"),
+ get_repmgr_schema_quoted(conn), PQerrorMessage(conn));
+
+ if (res != NULL)
+ PQclear(res);
+
+ return false;
+ }
+ PQclear(res);
+
+
/*
* XXX Here we MUST try to load the repmgr_function.sql not hardcode it
* here
@@ -3667,31 +4949,18 @@ check_upstream_config(PGconn *conn, int server_version_num, bool exit_on_error)
}
}
- i = guc_set(conn, "archive_mode", "=", "on");
- if (i == 0 || i == -1)
- {
- if (i == 0)
- log_err(_("parameter 'archive_mode' must be set to 'on'\n"));
-
- if (exit_on_error == true)
- {
- PQfinish(conn);
- exit(ERR_BAD_CONFIG);
- }
-
- config_ok = false;
- }
-
/*
- * check that 'archive_command' is non empty (however it's not practical to
- * check that it's actually valid)
+ * If archive_mode is enabled, check that 'archive_command' is non empty
+ * (however it's not practical to check that it actually represents a valid
+ * command).
*
- * if 'archive_mode' is not on, pg_settings returns '(disabled)' regardless
- * of what's in 'archive_command', so until 'archive_mode' is on we can't
- * properly check it.
+ * From PostgreSQL 9.5, archive_mode can be one of 'off', 'on' or 'always'
+ * so for ease of backwards compatibility, rather than explicitly check for an
+ * enabled mode, check that it's not "off".
*/
- if (guc_set(conn, "archive_mode", "=", "on"))
+
+ if (guc_set(conn, "archive_mode", "!=", "off"))
{
i = guc_set(conn, "archive_command", "!=", "");
@@ -3713,9 +4982,11 @@ check_upstream_config(PGconn *conn, int server_version_num, bool exit_on_error)
/*
* Check that 'hot_standby' is on. This isn't strictly necessary
- * for the primary server, however the assumption is that configuration
- * should be consistent for all servers in a cluster.
+ * for the primary server, however the assumption is that we'll be
+ * cloning standbys and thus copying the primary configuration;
+ * this way the standby will be correctly configured by default.
*/
+
i = guc_set(conn, "hot_standby", "=", "on");
if (i == 0 || i == -1)
{
@@ -3893,3 +5164,145 @@ print_error_list(ErrorList *error_list, int log_level)
}
}
+
+
+/*
+ * Execute a command via ssh on the remote host.
+ *
+ * TODO: implement SSH calls using libssh2.
+ */
+static bool
+remote_command(const char *host, const char *user, const char *command, PQExpBufferData *outputbuf)
+{
+ FILE *fp;
+ char ssh_command[MAXLEN];
+ PQExpBufferData ssh_host;
+
+ char output[MAXLEN];
+
+ initPQExpBuffer(&ssh_host);
+
+ if (*user != '\0')
+ {
+ appendPQExpBuffer(&ssh_host, "%s@", user);
+ }
+
+ appendPQExpBuffer(&ssh_host, "%s",host);
+
+ maxlen_snprintf(ssh_command,
+ "ssh -o Batchmode=yes %s %s",
+ ssh_host.data,
+ command);
+
+ termPQExpBuffer(&ssh_host);
+
+ log_debug("remote_command(): %s\n", ssh_command);
+
+ fp = popen(ssh_command, "r");
+
+ if (fp == NULL)
+ {
+ log_err(_("unable to execute remote command:\n%s\n"), ssh_command);
+ return false;
+ }
+
+ /* TODO: better error handling */
+ while (fgets(output, MAXLEN, fp) != NULL)
+ {
+ appendPQExpBuffer(outputbuf, "%s", output);
+ }
+
+ pclose(fp);
+
+ log_verbose(LOG_DEBUG, "remote_command(): output returned was:\n%s", outputbuf->data);
+
+ return true;
+}
+
+
+/*
+ * Extract values from provided conninfo string and return
+ * formatted as command-line parameters suitable for passing to repmgr
+ */
+static void
+format_db_cli_params(const char *conninfo, char *output)
+{
+ PQExpBufferData buf;
+ char host[MAXLEN] = "";
+ char port[MAXLEN] = "";
+ char dbname[MAXLEN] = "";
+ char user[MAXLEN] = "";
+
+ initPQExpBuffer(&buf);
+
+ get_conninfo_value(conninfo, "host", host);
+ get_conninfo_value(conninfo, "port", port);
+ get_conninfo_value(conninfo, "dbname", dbname);
+ get_conninfo_value(conninfo, "user", user);
+
+ if (host[0])
+ {
+ appendPQExpBuffer(&buf, "-h %s ", host);
+ }
+
+ if (port[0])
+ {
+ appendPQExpBuffer(&buf, "-p %s ", port);
+ }
+
+ if (dbname[0])
+ {
+ appendPQExpBuffer(&buf, "-d %s ", dbname);
+ }
+
+ if (user[0])
+ {
+ appendPQExpBuffer(&buf, "-U %s ", user);
+ }
+
+ strncpy(output, buf.data, MAXLEN);
+
+ termPQExpBuffer(&buf);
+
+}
+
+bool
+copy_file(const char *old_filename, const char *new_filename)
+{
+ FILE *ptr_old, *ptr_new;
+ int a;
+
+ ptr_old = fopen(old_filename, "r");
+ ptr_new = fopen(new_filename, "w");
+
+ if (ptr_old == NULL)
+ return false;
+
+ if (ptr_new == NULL)
+ {
+ fclose(ptr_old);
+ return false;
+ }
+
+ chmod(new_filename, S_IRUSR | S_IWUSR);
+
+ while(1)
+ {
+ a = fgetc(ptr_old);
+
+ if (!feof(ptr_old))
+ {
+ fputc(a, ptr_new);
+ }
+ else
+ {
+ break;
+ }
+ }
+
+ fclose(ptr_new);
+ fclose(ptr_old);
+
+ return true;
+}
+
diff --git a/repmgr.conf.sample b/repmgr.conf.sample
index bd0e5e8..cba8888 100644
--- a/repmgr.conf.sample
+++ b/repmgr.conf.sample
@@ -2,6 +2,10 @@
# Replication Manager sample configuration file
###################################################
+# Some configuration items will be set with a default value; this
+# is noted for each item. Where no default value is shown, the
+# parameter will be treated as empty or false.
+
# Required configuration items
# ============================
#
@@ -37,14 +41,12 @@ conninfo='host=192.168.204.104 dbname=repmgr_db user=repmgr_usr'
# upstream standby, specify that node's ID with 'upstream_node'. The node
# must exist before the new standby can be registered. If a standby is
# to connect directly to a primary node, this parameter is not required.
-#
-# upstream_node=1
+upstream_node=1
-# physical replication slots - PostgreSQL 9.4 and later only
+# use physical replication slots - PostgreSQL 9.4 and later only
# (default: 0)
-#
-# use_replication_slots=0
-#
+use_replication_slots=0
+
# NOTE: 'max_replication_slots' should be configured for at least the
# number of standbys which will connect to the primary.
@@ -61,7 +63,7 @@ logfacility=STDERR
# stderr can be redirected to an arbitrary file:
#
-# logfile='/var/log/repmgr.log'
+logfile='/var/log/repmgr/repmgr.log'
# event notifications can be passed to an arbitrary external program
# together with the following parameters:
@@ -75,12 +77,12 @@ logfacility=STDERR
# the values provided for "%t" and "%d" will probably contain spaces,
# so should be quoted in the provided command configuration, e.g.:
#
-# event_notification_command='/path/to/some/script %n %e %s "%t" "%d"'
+event_notification_command='/path/to/some/script %n %e %s "%t" "%d"'
# By default, all notifications will be passed; the notification types
# can be filtered to explicitly named ones:
#
-# event_notifications=master_register,standby_register,witness_create
+event_notifications=master_register,standby_register,witness_create
# Environment/command settings
@@ -88,17 +90,17 @@ logfacility=STDERR
# path to PostgreSQL binary directory (location of pg_ctl, pg_basebackup etc.)
# (if not provided, defaults to system $PATH)
-# pg_bindir=/usr/bin/
+pg_bindir=/usr/bin/
# external command options
-# rsync_options=--archive --checksum --compress --progress --rsh="ssh -o \"StrictHostKeyChecking no\""
-# ssh_options=-o "StrictHostKeyChecking no"
+rsync_options=--archive --checksum --compress --progress --rsh="ssh -o \"StrictHostKeyChecking no\""
+ssh_options=-o "StrictHostKeyChecking no"
-# external command arguments
+# external command arguments. Values shown are examples.
-# pg_ctl_options='-s'
-# pg_basebackup_options='--xlog-method=s'
+pg_ctl_options='-s'
+pg_basebackup_options='--xlog-method=s'
# Standby clone settings
@@ -114,31 +116,33 @@ logfacility=STDERR
# Failover settings (repmgrd)
# ---------------------------
#
-# These settings are only applied when repmgrd is running.
+# These settings are only applied when repmgrd is running. Values shown
+# are defaults.
# Number of seconds to wait for a response from the primary server before
-# deciding it has failed
+# deciding it has failed.
master_response_timeout=60
-# Number of times to try and reconnect to the primary before starting
-# the failover procedure
+# Number of attempts at what interval (in seconds) to try and
+# connect to a server to establish its status (e.g. master
+# during failover)
reconnect_attempts=6
reconnect_interval=10
# Autofailover options
-failover=automatic # one of 'automatic', 'manual'
+failover=manual # one of 'automatic', 'manual'
+ # (default: manual)
priority=100 # a value of zero or less prevents the node being promoted to primary
+ # (default: 100)
promote_command='repmgr standby promote -f /path/to/repmgr.conf'
follow_command='repmgr standby follow -f /path/to/repmgr.conf -W'
# monitoring interval in seconds; default is 2
-#
-# monitor_interval_secs=2
+monitor_interval_secs=2
# change wait time for primary; before we bail out and exit when the primary
# disappears, we wait 'reconnect_attempts' * 'retry_promote_interval_secs'
# seconds; by default this would be half an hour, as 'retry_promote_interval_secs'
# default value is 300)
-#
-# retry_promote_interval_secs=300
+retry_promote_interval_secs=300
diff --git a/repmgr.h b/repmgr.h
index c5d2ecf..ad8a34d 100644
--- a/repmgr.h
+++ b/repmgr.h
@@ -1,6 +1,6 @@
/*
* repmgr.h
- * Copyright (c) 2ndQuadrant, 2010-2015
+ * Copyright (c) 2ndQuadrant, 2010-2016
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -38,8 +38,6 @@
#define DEFAULT_WAL_KEEP_SEGMENTS "5000"
#define DEFAULT_DEST_DIR "."
-#define DEFAULT_MASTER_PORT "5432"
-#define DEFAULT_DBNAME "postgres"
#define DEFAULT_REPMGR_SCHEMA_PREFIX "repmgr_"
#define DEFAULT_PRIORITY 100
#define FAILOVER_NODES_MAX_CHECK 50
@@ -69,25 +67,38 @@ typedef struct
bool force;
bool wait_for_master;
bool ignore_rsync_warn;
- bool initdb_no_pwprompt;
+ bool witness_pwprompt;
bool rsync_only;
bool fast_checkpoint;
bool ignore_external_config_files;
+ char pg_ctl_mode[MAXLEN];
char masterport[MAXLEN];
- char localport[MAXLEN];
+ /*
+ * configuration file parameters which can be overridden on the
+ * command line
+ */
char loglevel[MAXLEN];
+ /* parameter used by STANDBY SWITCHOVER */
+ char remote_config_file[MAXLEN];
+ char pg_rewind[MAXFILENAME];
+ /* parameter used by STANDBY {ARCHIVE_CONFIG | RESTORE_CONFIG} */
+ char config_archive_dir[MAXLEN];
/* parameter used by CLUSTER CLEANUP */
int keep_history;
char pg_bindir[MAXLEN];
char recovery_min_apply_delay[MAXLEN];
+
+ /* deprecated command line options */
+ char localport[MAXLEN];
+ bool initdb_no_pwprompt;
} t_runtime_options;
-#define T_RUNTIME_OPTIONS_INITIALIZER { "", "", "", "", "", "", "", DEFAULT_WAL_KEEP_SEGMENTS, false, false, false, false, false, false, false, false, false, "", "", "", 0, "", "" }
+#define T_RUNTIME_OPTIONS_INITIALIZER { "", "", "", "", "", "", "", DEFAULT_WAL_KEEP_SEGMENTS, false, false, false, false, false, false, false, false, false, "smart", "", "", "", "", "", 0, "", "", "", false }
extern char repmgr_schema[MAXLEN];
-
+extern bool config_file_found;
#endif
diff --git a/repmgr.sql b/repmgr.sql
index 52f4e58..9628d3f 100644
--- a/repmgr.sql
+++ b/repmgr.sql
@@ -1,7 +1,7 @@
/*
* repmgr.sql
*
- * Copyright (C) 2ndQuadrant, 2010-2015
+ * Copyright (C) 2ndQuadrant, 2010-2016
*
*/
@@ -59,3 +59,12 @@ WHERE (standby_node, last_monitor_time) IN (SELECT standby_node, MAX(last_monito
ALTER VIEW repl_status OWNER TO repmgr;
CREATE INDEX idx_repl_status_sort ON repl_monitor(last_monitor_time, standby_node);
+
+/*
+ * This view shows the list of nodes with the information of which one is the upstream
+ * in each case (when appliable)
+ */
+CREATE VIEW repl_show_nodes AS
+SELECT rn.id, rn.conninfo, rn.type, rn.name, rn.cluster,
+ rn.priority, rn.active, sq.name AS upstream_node_name
+FROM repl_nodes as rn LEFT JOIN repl_nodes AS sq ON sq.id=rn.upstream_node_id;
diff --git a/repmgrd.c b/repmgrd.c
index 21271a1..bb4d3d0 100644
--- a/repmgrd.c
+++ b/repmgrd.c
@@ -1,6 +1,6 @@
/*
* repmgrd.c - Replication manager daemon
- * Copyright (C) 2ndQuadrant, 2010-2015
+ * Copyright (C) 2ndQuadrant, 2010-2016
*
* This module connects to the nodes of a replication cluster and monitors
* how far are they from master
@@ -79,7 +79,6 @@ static void do_master_failover(void);
static bool do_upstream_standby_failover(t_node_info upstream_node);
static t_node_info get_node_info(PGconn *conn, char *cluster, int node_id);
-static t_server_type parse_node_type(const char *type);
static XLogRecPtr lsn_to_xlogrecptr(char *lsn, bool *format_ok);
/*
@@ -143,6 +142,20 @@ main(int argc, char **argv)
set_progname(argv[0]);
+ /* Disallow running as root to prevent directory ownership problems */
+ if (geteuid() == 0)
+ {
+ fprintf(stderr,
+ _("%s: cannot be run as root\n"
+ "Please log in (using, e.g., \"su\") as the "
+ "(unprivileged) user that owns "
+ "the data directory.\n"
+ ),
+ progname());
+ exit(1);
+ }
+
+
while ((c = getopt_long(argc, argv, "?Vf:vmdp:", long_options, &optindex)) != -1)
{
switch (c)
@@ -1322,7 +1335,7 @@ do_master_failover(void)
log_crit(
_("unable to obtain LSN from node %i"), nodes[i].node_id
);
- log_info(
+ log_hint(
_("please check that 'shared_preload_libraries=repmgr_funcs' is set in postgresql.conf\n")
);
@@ -1479,7 +1492,7 @@ do_master_failover(void)
/* wait */
sleep(10);
- log_info(_("node %d is the best candidate for new master, attempting to follow...\n"),
+ log_notice(_("node %d is the best candidate for new master, attempting to follow...\n"),
best_candidate.node_id);
/*
@@ -1525,11 +1538,11 @@ do_master_failover(void)
node_info = get_node_info(new_master_conn, local_options.cluster_name, local_options.node);
appendPQExpBuffer(&event_details,
- _("Node %i now following new upstream node %i"),
+ _("node %i now following new upstream node %i"),
node_info.node_id,
best_candidate.node_id);
- log_info("%s\n", event_details.data);
+ log_notice("%s\n", event_details.data);
create_event_record(new_master_conn,
&local_options,
@@ -1685,7 +1698,7 @@ do_upstream_standby_failover(t_node_info upstream_node)
}
appendPQExpBuffer(&event_details,
- _("Node %i is now following upstream node %i"),
+ _("node %i is now following upstream node %i"),
node_info.node_id,
upstream_node_id);
@@ -2254,13 +2267,13 @@ check_and_create_pid_file(const char *pid_file)
t_node_info
get_node_info(PGconn *conn, char *cluster, int node_id)
{
- PGresult *res;
+ int res;
t_node_info node_info = T_NODE_INFO_INITIALIZER;
- res = get_node_record(conn, cluster, node_id);
+ res = get_node_record(conn, cluster, node_id, &node_info);
- if (PQresultStatus(res) != PGRES_TUPLES_OK)
+ if (res == -1)
{
PQExpBufferData errmsg;
initPQExpBuffer(&errmsg);
@@ -2279,47 +2292,14 @@ get_node_info(PGconn *conn, char *cluster, int node_id)
false,
errmsg.data);
- PQclear(res);
+ PQfinish(conn);
terminate(ERR_DB_QUERY);
}
- if (!PQntuples(res)) {
+ if (res == 0)
+ {
log_warning(_("No record found record for node %i\n"), node_id);
- PQclear(res);
- node_info.node_id = NODE_NOT_FOUND;
- return node_info;
}
- node_info.node_id = atoi(PQgetvalue(res, 0, 0));
- node_info.upstream_node_id = atoi(PQgetvalue(res, 0, 1));
- strncpy(node_info.conninfo_str, PQgetvalue(res, 0, 2), MAXLEN);
- node_info.type = parse_node_type(PQgetvalue(res, 0, 3));
- strncpy(node_info.slot_name, PQgetvalue(res, 0, 4), MAXLEN);
- node_info.active = (strcmp(PQgetvalue(res, 0, 5), "t") == 0)
- ? true
- : false;
-
- PQclear(res);
-
return node_info;
}
-
-
-static t_server_type
-parse_node_type(const char *type)
-{
- if (strcmp(type, "master") == 0)
- {
- return MASTER;
- }
- else if (strcmp(type, "standby") == 0)
- {
- return STANDBY;
- }
- else if (strcmp(type, "witness") == 0)
- {
- return WITNESS;
- }
-
- return UNKNOWN;
-}
diff --git a/sql/Makefile b/sql/Makefile
index 47f5271..a63ae13 100644
--- a/sql/Makefile
+++ b/sql/Makefile
@@ -1,7 +1,7 @@
#
# Makefile
#
-# Copyright (c) 2ndQuadrant, 2010-2015
+# Copyright (c) 2ndQuadrant, 2010-2016
#
MODULE_big = repmgr_funcs
diff --git a/sql/repmgr3.0_repmgr3.1.sql b/sql/repmgr3.0_repmgr3.1.sql
new file mode 100644
index 0000000..117d039
--- /dev/null
+++ b/sql/repmgr3.0_repmgr3.1.sql
@@ -0,0 +1,35 @@
+/*
+ * Update a repmgr 3.0 installation to repmgr 3.1
+ * ----------------------------------------------
+ *
+ * The new repmgr package should be installed first. Then
+ * carry out these steps:
+ *
+ * 1. (If repmgrd is used) stop any running repmgrd instances
+ * 2. On the master node, execute the SQL statements listed below
+ * 3. (If repmgrd is used) restart repmgrd
+ */
+
+/*
+ * If your repmgr installation is not included in your repmgr
+ * user's search path, please set the search path to the name
+ * of the repmgr schema to ensure objects are installed in
+ * the correct location.
+ *
+ * The repmgr schema is "repmgr_" + the cluster name defined in
+ * 'repmgr.conf'.
+ */
+
+-- SET search_path TO 'name_of_repmgr_schema';
+
+BEGIN;
+
+-- New view "repl_show_nodes" which also displays the server's
+-- upstream node
+
+CREATE VIEW repl_show_nodes AS
+SELECT rn.id, rn.conninfo, rn.type, rn.name, rn.cluster,
+ rn.priority, rn.active, sq.name AS upstream_node_name
+FROM repl_nodes as rn LEFT JOIN repl_nodes AS sq ON sq.id=rn.upstream_node_id;
+
+COMMIT;
diff --git a/sql/repmgr_funcs.sql.in b/sql/repmgr_funcs.sql.in
index 864c1c9..cf38aa3 100644
--- a/sql/repmgr_funcs.sql.in
+++ b/sql/repmgr_funcs.sql.in
@@ -1,6 +1,6 @@
/*
* repmgr_function.sql
- * Copyright (c) 2ndQuadrant, 2010-2015
+ * Copyright (c) 2ndQuadrant, 2010-2016
*
*/
diff --git a/sql/uninstall_repmgr_funcs.sql b/sql/uninstall_repmgr_funcs.sql
index d32637e..9cdf607 100644
--- a/sql/uninstall_repmgr_funcs.sql
+++ b/sql/uninstall_repmgr_funcs.sql
@@ -1,6 +1,6 @@
/*
* uninstall_repmgr_funcs.sql
- * Copyright (c) 2ndQuadrant, 2010-2015
+ * Copyright (c) 2ndQuadrant, 2010-2016
*
*/
diff --git a/strutil.c b/strutil.c
index 1234fb1..3c565c8 100644
--- a/strutil.c
+++ b/strutil.c
@@ -1,7 +1,7 @@
/*
* strutil.c
*
- * Copyright (C) 2ndQuadrant, 2010-2015
+ * Copyright (C) 2ndQuadrant, 2010-2016
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/strutil.h b/strutil.h
index 5c041cc..25d1f34 100644
--- a/strutil.h
+++ b/strutil.h
@@ -1,6 +1,6 @@
/*
* strutil.h
- * Copyright (C) 2ndQuadrant, 2010-2015
+ * Copyright (C) 2ndQuadrant, 2010-2016
*
*
* This program is free software: you can redistribute it and/or modify
diff --git a/uninstall_repmgr.sql b/uninstall_repmgr.sql
index efe327f..b1a3d17 100644
--- a/uninstall_repmgr.sql
+++ b/uninstall_repmgr.sql
@@ -1,7 +1,7 @@
/*
* uninstall_repmgr.sql
*
- * Copyright (C) 2ndQuadrant, 2010-2015
+ * Copyright (C) 2ndQuadrant, 2010-2016
*
*/
diff --git a/version.h b/version.h
index 6affd7a..6b4d55e 100644
--- a/version.h
+++ b/version.h
@@ -1,6 +1,6 @@
#ifndef _VERSION_H_
#define _VERSION_H_
-#define REPMGR_VERSION "3.0.3"
+#define REPMGR_VERSION "3.1.1"
#endif