From d4be3f72e758285d8e69517724873aa0b949c3bb Mon Sep 17 00:00:00 2001 From: Dj Walker-Morgan Date: Tue, 21 Jan 2025 17:49:05 +0000 Subject: [PATCH] Fixes and start of CLI updates Signed-off-by: Dj Walker-Morgan --- .../pgd/5/cli/command_ref/assess/index.mdx | 7 + .../pgd/5/cli/command_ref/cluster/index.mdx | 12 + .../pgd/5/cli/command_ref/cluster/show.mdx | 28 + .../pgd/5/cli/command_ref/cluster/verify.mdx | 28 + .../5/cli/command_ref/completion/index.mdx | 7 + .../pgd/5/cli/command_ref/events/index.mdx | 11 + .../pgd/5/cli/command_ref/events/show.mdx | 26 + .../pgd/5/cli/command_ref/group/index.mdx | 14 + .../pgd/5/cli/command_ref/groups/groups.mdx | 11 + .../docs/pgd/5/cli/command_ref/index.mdx | 83 +- .../docs/pgd/5/cli/command_ref/node/index.mdx | 14 + .../pgd/5/cli/command_ref/nodes/index.mdx | 11 + .../5/cli/command_ref/pgd_check-health.mdx | 73 - .../5/cli/command_ref/pgd_create-proxy.mdx | 55 - .../5/cli/command_ref/pgd_delete-proxy.mdx | 35 - .../cli/command_ref/pgd_set-group-options.mdx | 67 - .../cli/command_ref/pgd_set-node-options.mdx | 64 - .../cli/command_ref/pgd_set-proxy-options.mdx | 79 - .../5/cli/command_ref/pgd_show-clockskew.mdx | 63 - .../pgd/5/cli/command_ref/pgd_show-events.mdx | 72 - .../pgd/5/cli/command_ref/pgd_show-groups.mdx | 46 - .../pgd/5/cli/command_ref/pgd_show-nodes.mdx | 79 - .../5/cli/command_ref/pgd_show-proxies.mdx | 38 - .../pgd/5/cli/command_ref/pgd_show-raft.mdx | 53 - .../5/cli/command_ref/pgd_show-replslots.mdx | 144 - .../command_ref/pgd_show-subscriptions.mdx | 101 - .../5/cli/command_ref/pgd_show-version.mdx | 50 - .../pgd/5/cli/command_ref/pgd_switchover.mdx | 61 - .../5/cli/command_ref/pgd_verify-cluster.mdx | 54 - .../5/cli/command_ref/pgd_verify-settings.mdx | 203 - product_docs/docs/pgd/beforeworkdiff.diff | 4163 ----------------- 31 files changed, 224 insertions(+), 5528 deletions(-) create mode 100644 product_docs/docs/pgd/5/cli/command_ref/assess/index.mdx create mode 100644 product_docs/docs/pgd/5/cli/command_ref/cluster/index.mdx create mode 100644 product_docs/docs/pgd/5/cli/command_ref/cluster/show.mdx create mode 100644 product_docs/docs/pgd/5/cli/command_ref/cluster/verify.mdx create mode 100644 product_docs/docs/pgd/5/cli/command_ref/completion/index.mdx create mode 100644 product_docs/docs/pgd/5/cli/command_ref/events/index.mdx create mode 100644 product_docs/docs/pgd/5/cli/command_ref/events/show.mdx create mode 100644 product_docs/docs/pgd/5/cli/command_ref/group/index.mdx create mode 100644 product_docs/docs/pgd/5/cli/command_ref/groups/groups.mdx create mode 100644 product_docs/docs/pgd/5/cli/command_ref/node/index.mdx create mode 100644 product_docs/docs/pgd/5/cli/command_ref/nodes/index.mdx delete mode 100644 product_docs/docs/pgd/5/cli/command_ref/pgd_check-health.mdx delete mode 100644 product_docs/docs/pgd/5/cli/command_ref/pgd_create-proxy.mdx delete mode 100644 product_docs/docs/pgd/5/cli/command_ref/pgd_delete-proxy.mdx delete mode 100644 product_docs/docs/pgd/5/cli/command_ref/pgd_set-group-options.mdx delete mode 100644 product_docs/docs/pgd/5/cli/command_ref/pgd_set-node-options.mdx delete mode 100644 product_docs/docs/pgd/5/cli/command_ref/pgd_set-proxy-options.mdx delete mode 100644 product_docs/docs/pgd/5/cli/command_ref/pgd_show-clockskew.mdx delete mode 100644 product_docs/docs/pgd/5/cli/command_ref/pgd_show-events.mdx delete mode 100644 product_docs/docs/pgd/5/cli/command_ref/pgd_show-groups.mdx delete mode 100644 product_docs/docs/pgd/5/cli/command_ref/pgd_show-nodes.mdx delete mode 100644 product_docs/docs/pgd/5/cli/command_ref/pgd_show-proxies.mdx delete mode 100644 product_docs/docs/pgd/5/cli/command_ref/pgd_show-raft.mdx delete mode 100644 product_docs/docs/pgd/5/cli/command_ref/pgd_show-replslots.mdx delete mode 100644 product_docs/docs/pgd/5/cli/command_ref/pgd_show-subscriptions.mdx delete mode 100644 product_docs/docs/pgd/5/cli/command_ref/pgd_show-version.mdx delete mode 100644 product_docs/docs/pgd/5/cli/command_ref/pgd_switchover.mdx delete mode 100644 product_docs/docs/pgd/5/cli/command_ref/pgd_verify-cluster.mdx delete mode 100644 product_docs/docs/pgd/5/cli/command_ref/pgd_verify-settings.mdx delete mode 100644 product_docs/docs/pgd/beforeworkdiff.diff diff --git a/product_docs/docs/pgd/5/cli/command_ref/assess/index.mdx b/product_docs/docs/pgd/5/cli/command_ref/assess/index.mdx new file mode 100644 index 00000000000..daba48e5da7 --- /dev/null +++ b/product_docs/docs/pgd/5/cli/command_ref/assess/index.mdx @@ -0,0 +1,7 @@ +--- +title: pgd assess +navTitle: Assess +--- + +The `pgd assess` commands are used to assess the suitability of a Postgres server instance for migration to the EDB Postgres Distributed cluster. + diff --git a/product_docs/docs/pgd/5/cli/command_ref/cluster/index.mdx b/product_docs/docs/pgd/5/cli/command_ref/cluster/index.mdx new file mode 100644 index 00000000000..86798da5766 --- /dev/null +++ b/product_docs/docs/pgd/5/cli/command_ref/cluster/index.mdx @@ -0,0 +1,12 @@ +--- +title: pgd cluster +navTitle: Cluster +--- + +The `pgd cluster` commands are used to manage the EDB Postgres Distributed cluster. + +## Subcommands + +- [show](show): Show cluster-level information. +- [verify](verify): Verify cluster-level information. + diff --git a/product_docs/docs/pgd/5/cli/command_ref/cluster/show.mdx b/product_docs/docs/pgd/5/cli/command_ref/cluster/show.mdx new file mode 100644 index 00000000000..e3bc21b583a --- /dev/null +++ b/product_docs/docs/pgd/5/cli/command_ref/cluster/show.mdx @@ -0,0 +1,28 @@ +--- +title: pgd cluster show +navTitle: Show +--- + +## Synopsis + +The `pgd cluster show` command is used to display the cluster-level information in the EDB Postgres Distributed cluster. + +## Syntax + +```plaintext +pgd cluster show [OPTIONS] +``` + +## Options + +The following table lists the options available for the `pgd events show` command: + +| Short | Long | Description | +|-------------|---------------------|---------------------------------------------------------| +| | --clock-drift | Only show detailed clock drift information. | +| | --summary | Only show cluster summary information. | +| | --health | Only show cluster health information. | + +Only one of the above options can be specified at a time. + +## Examples diff --git a/product_docs/docs/pgd/5/cli/command_ref/cluster/verify.mdx b/product_docs/docs/pgd/5/cli/command_ref/cluster/verify.mdx new file mode 100644 index 00000000000..4ab5b79597b --- /dev/null +++ b/product_docs/docs/pgd/5/cli/command_ref/cluster/verify.mdx @@ -0,0 +1,28 @@ +--- +title: pgd cluster verify +navTitle: Verify +--- + +## Synopsis + +The `pgd cluster verify` command is used to verify the configuration of an EDB Postgres Distributed cluster. + +## Syntax + +```plaintext +pgd cluster show [OPTIONS] +``` + +## Options + +The following table lists the options available for the `pgd events show` command: + +| Short | Long | Description | +|-------|------------|------------------------------------------| +| | --settings | Verify Postgres settings in the cluster. | +| | --arch | Verify the cluster architecture | +| | --verbose | Display verbose output. | + +With no option set, both setting and arch are verified by default and output is not verbose. + +## Examples diff --git a/product_docs/docs/pgd/5/cli/command_ref/completion/index.mdx b/product_docs/docs/pgd/5/cli/command_ref/completion/index.mdx new file mode 100644 index 00000000000..1b536a7270f --- /dev/null +++ b/product_docs/docs/pgd/5/cli/command_ref/completion/index.mdx @@ -0,0 +1,7 @@ +--- +title: pgd completion +navTitle: Completion +--- + +The `pgd completion` commands are used to manage the completion settings for the EDB Postgres Distributed CLI. + diff --git a/product_docs/docs/pgd/5/cli/command_ref/events/index.mdx b/product_docs/docs/pgd/5/cli/command_ref/events/index.mdx new file mode 100644 index 00000000000..9880dab0192 --- /dev/null +++ b/product_docs/docs/pgd/5/cli/command_ref/events/index.mdx @@ -0,0 +1,11 @@ +--- +title: pgd events +navTitle: Events +--- + +The `pgd events` commands are used to display the events in the EDB Postgres Distributed cluster. + +## Subcommands + +- [show](show): Show events. + diff --git a/product_docs/docs/pgd/5/cli/command_ref/events/show.mdx b/product_docs/docs/pgd/5/cli/command_ref/events/show.mdx new file mode 100644 index 00000000000..71300d226ec --- /dev/null +++ b/product_docs/docs/pgd/5/cli/command_ref/events/show.mdx @@ -0,0 +1,26 @@ +--- +title: pgd events show +navTitle: show +--- + +## Synopsis + +The `pgd events show` command is used to display the events in the EDB Postgres Distributed cluster. With no additional flags, the command displays the 20 most recent events for all nodes and groups. + +## Syntax + +```plaintext +pgd events show [OPTIONS] +``` + +## Options + +The following table lists the options available for the `pgd events show` command: + +| Short | Long | Description | +|-------------|---------------------|---------------------------------------------------------| +| | --node _nodename_ | Only show events for the node with the specified name. | +| | --group _groupname_ | Only show events for the group with the specified name. | +| -n _number_ | --number _number_ | Show the specified number of events. Defaults to 20. | + +## Examples diff --git a/product_docs/docs/pgd/5/cli/command_ref/group/index.mdx b/product_docs/docs/pgd/5/cli/command_ref/group/index.mdx new file mode 100644 index 00000000000..83dd0aa6d73 --- /dev/null +++ b/product_docs/docs/pgd/5/cli/command_ref/group/index.mdx @@ -0,0 +1,14 @@ +--- +title: pgd group +navTitle: Group +--- + +The `pgd group` commands are used to manage the groups in the EDB Postgres Distributed cluster. + +## Subcommands + +- [show](pgd_group_show): Show group-level information. +- [set-option](pgd_group_set-option): Set group-level options. +- [get-option](pgd_group_get-option): Get group-level options. +- [set-leader](pgd_group_set-leader): Set the leader of a group (perform a switchover). + diff --git a/product_docs/docs/pgd/5/cli/command_ref/groups/groups.mdx b/product_docs/docs/pgd/5/cli/command_ref/groups/groups.mdx new file mode 100644 index 00000000000..9ee14320cbb --- /dev/null +++ b/product_docs/docs/pgd/5/cli/command_ref/groups/groups.mdx @@ -0,0 +1,11 @@ +--- +title: pgd groups +navTitle: Groups +--- + +The `pgd groups` commands are used to display the groups in the EDB Postgres Distributed cluster. + +## Subcommands + +- [list](pgd_groups_list): List groups. + diff --git a/product_docs/docs/pgd/5/cli/command_ref/index.mdx b/product_docs/docs/pgd/5/cli/command_ref/index.mdx index 87e57279b7d..364dd495570 100644 --- a/product_docs/docs/pgd/5/cli/command_ref/index.mdx +++ b/product_docs/docs/pgd/5/cli/command_ref/index.mdx @@ -13,35 +13,62 @@ manage your EDB Postgres Distributed cluster. It allows you to run commands against EDB Postgres Distributed clusters. You can use it to inspect and manage cluster resources. +## Commands + +- [cluster](cluster): Cluster-level commands for managing the cluster. + - [show](cluster/show): Show cluster-level information. + - [verify](cluster/verify): Verify cluster-level information. +- [group](group): Group-level commands for managing groups. + - [show](group/show): Show group-level information. + - [set-option](group/set-option): Set group-level options. + - [get-option](group/get-option): Get group-level options. + - [set-leader](group/set-leader): Set the leader of a group (perform a switchover). +- [groups](groups): Group related commands for listing groups. + - [list](groups/list): List groups. +- [node](node): Node-level commands for managing nodes. + - [show](node/show): Show node-level information. + - [set-option](node/set-option): Set node-level options. + - [get-option](node/get-option): Get node-level options. + - [upgrade-postgres](node/upgrade-postgres): Performa major version upgrade of a PGD Postgres node. +- [nodes](nodes): Node related commands for listing nodes. + - [list](nodes/list): List nodes. +- [events](events): Event log commands for viewing events. + - [show](events/show): Show events. +- [replication](replication): Replication related-commands for managing replication. + - [show](replication/show): Show replication information. +- [raft](raft): Raft related commands for managing Raft consensus. + - [show](raft/show): Show information about Raft state. +- [commit-scope](commit-scope): Commit scope related commands for managing PGD commit scopes. + - [show](commit-scope/show): Show information about a commit-scope. + - [create](commit-scope/create): Create a commit-scope. + - [update](commit-scope/update): Update a commit-scope. + - [drop](commit-scope/drop): Drop a commit-scope. +- [assess](pgd_assess): Assesses a Postgres server's PGD compatibility. +- [completion](pgd_completion): Generate shell completion scripts. + + + + ## Global Options All commands accept the following global options: -| Short | Long             | Description | -|-------|---------------|-----------------------------------------------------------------------------------------------| -| | --dsn | Database connection string
For example "host=bdr-a1 port=5432 dbname=bdrdb user=postgres" | -| -f | --config-file | Name/Path to config file.
This is ignored if --dsn flag is present
Default "/etc/edb/pgd-cli/pgd-cli-config.yml" | -| -h | --help | Help for pgd - will show specific help for any command used | -| -L | --log-level | Logging level: debug, info, warn, error (default "error") | -| -o | --output | Output format: json, yaml | - -## See also - -- [check-health](pgd_check-health) -- [create-proxy](pgd_create-proxy) -- [delete-proxy](pgd_delete-proxy) -- [set-group-options](pgd_set-group-options) -- [set-node-options](pgd_set-node-options) -- [set-proxy-options](pgd_set-proxy-options) -- [show-clockskew](pgd_show-clockskew) -- [show-events](pgd_show-events) -- [show-groups](pgd_show-groups) -- [show-nodes](pgd_show-nodes) -- [show-proxies](pgd_show-proxies) -- [show-raft](pgd_show-raft) -- [show-replslots](pgd_show-replslots) -- [show-subscriptions](pgd_show-subscriptions) -- [show-version](pgd_show-version) -- [switchover](pgd_switchover) -- [verify-cluster](pgd_verify-cluster) -- [verify-settings](pgd_verify-settings) +| Short | Long             | Description | +|-------|------------------------------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------| +| -f | --config-file | Name/Path to config file.
This is ignored if --dsn flag is present
Default "/etc/edb/pgd-cli/pgd-cli-config.yml" | +| | --dsn | Database connection string
For example "host=bdr-a1 port=5432 dbname=bdrdb user=postgres" | +| -h | --help | Help for pgd - will show specific help for any command used | +| -o | --output | Output format: `json`, `ascii`, `psql`, `modern`, `markdown-table`, `default` (see [Output formats](#output-formats)) | +| | --prefer-tabular-output | Prefer tabular output over non-tabular output, even for commands which normally use a report format. Default is false. | + +## Output formats + +| Format | Description | +|----------------|---------------------------------------------------------------------| +| json | JSON format - Output as a JSON document, non-tabular | +| ascii | ASCII format - Output as an ASCII table with boxes | +| psql | PSQL format - Output as an ASCII table in the style of PSQL | +| modern | Modern format - Output as a table using box characters | +| markdown-table | Markdown table format - Output as a markdown compatible ASCII table | +| default | Default format - Output as ASCII table with no boxes | + diff --git a/product_docs/docs/pgd/5/cli/command_ref/node/index.mdx b/product_docs/docs/pgd/5/cli/command_ref/node/index.mdx new file mode 100644 index 00000000000..da3ddeff5eb --- /dev/null +++ b/product_docs/docs/pgd/5/cli/command_ref/node/index.mdx @@ -0,0 +1,14 @@ +--- +title: pgd node +navTitle: Node +--- + +The `pgd node` commands are used to manage the nodes in the EDB Postgres Distributed cluster. + +## Subcommands + +- [show](pgd_node_show): Show node-level information. +- [set-option](pgd_node_set-option): Set node-level options. +- [get-option](pgd_node_get-option): Get node-level options. +- [upgrade-postgres](pgd_node_upgrade-postgres): Perform a major version upgrade of a PGD Postgres node. + diff --git a/product_docs/docs/pgd/5/cli/command_ref/nodes/index.mdx b/product_docs/docs/pgd/5/cli/command_ref/nodes/index.mdx new file mode 100644 index 00000000000..48b574c6fd1 --- /dev/null +++ b/product_docs/docs/pgd/5/cli/command_ref/nodes/index.mdx @@ -0,0 +1,11 @@ +--- +title: pgd nodes +navTitle: Nodes +--- + +The `pgd nodes` commands are used to display the nodes in the EDB Postgres Distributed cluster. + +## Subcommands + +- [list](pgd_nodes_list): List nodes. + diff --git a/product_docs/docs/pgd/5/cli/command_ref/pgd_check-health.mdx b/product_docs/docs/pgd/5/cli/command_ref/pgd_check-health.mdx deleted file mode 100644 index 67bae92170f..00000000000 --- a/product_docs/docs/pgd/5/cli/command_ref/pgd_check-health.mdx +++ /dev/null @@ -1,73 +0,0 @@ ---- -title: check-health -deepToC: true ---- - -Checks the health of the EDB Postgres Distributed cluster. - -### Synopsis - -Performs various checks such as if all nodes are accessible and all -replication slots are working. - -Please note that the current implementation of clock skew may return an -inaccurate skew value if the cluster is under high load while running this -command or has large number of nodes in it. - -```sh -pgd check-health [flags] -``` - -### Options - -No specific command options. See [global options](../command_ref#global-options) for global options. - -### Examples - -#### Checking health with a node down - -In this example, we have a 3 node cluster, bdr-a1 and bdr-c1 are up, bdr-b1 is down. - -```bash -$ pgd check-health -__OUTPUT__ -Check Status Message ------ ------ ------- -ClockSkew Critical Clockskew cannot be determined for at least 1 BDR node pair -Connection Critical The node bdr-b1 is not accessible -Raft Warning There is at least 1 node that is not accessible -Replslots Critical There is at least 1 BDR replication slot which is inactive -Version Warning There is at least 1 node that is not accessible -``` - -#### Checking health with clock skew - -In this example there is a 3 node cluster with all nodes up but the system clocks are not in sync. - -```bash -$ pgd check-health -__OUTPUT__ -Check Status Message ------ ------ ------- -ClockSkew Warning At least 1 BDR node pair has clockskew greater than 2 seconds -Connection Ok All BDR nodes are accessible -Raft Ok Raft Consensus is working correctly -Replslots Ok All BDR replication slots are working correctly -Version Ok All nodes are running same BDR versions -``` - -#### Checking health with all nodes working correctly - -In this example, there is a 3 node cluster with all nodes are up and all checks are Ok. - -```bash -$ pgd check-health -__OUTPUT__ -Check Status Message ------ ------ ------- -ClockSkew Ok All BDR node pairs have clockskew within permissible limit -Connection Ok All BDR nodes are accessible -Raft Ok Raft Consensus is working correctly -Replslots Ok All BDR replication slots are working correctly -Version Ok All nodes are running same BDR versions -``` diff --git a/product_docs/docs/pgd/5/cli/command_ref/pgd_create-proxy.mdx b/product_docs/docs/pgd/5/cli/command_ref/pgd_create-proxy.mdx deleted file mode 100644 index 4676f795297..00000000000 --- a/product_docs/docs/pgd/5/cli/command_ref/pgd_create-proxy.mdx +++ /dev/null @@ -1,55 +0,0 @@ ---- -title: create-proxy -deepToC: true ---- - -Creates proxy in the EDB Postgres Distributed cluster. - -### Synopsis - -Creates proxy in the EDB Postgres Distributed cluster and attaches it to the -given group. The proxy name must be unique across the cluster and match with -the name given in the corresponding proxy config file. - -Use the proxy mode to route connections to Write Leader (default), Read Nodes -(read-only), or both (any). Proxy listens on 'listen_port' for Write Leader -connections while on 'read_listen_port' for Read Nodes connections. - - -```sh -pgd create-proxy [flags] -``` - -### Options - -| Flag | Description | -|----------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| `--group-name` | Group name | -| `--proxy-mode` | Proxy mode (default, read-only, any); proxy will route connections to -
default - Write Leader
read-only - Read Nodes
any - both Write Leader and Read Nodes (default "default") | -| `--proxy-name` | Proxy name | - -See [global options](../command_ref#global-options) for global options. - -### Examples - -#### Attaching in default mode. - -In this example, we attach a new proxy called proxy-a1 to group group_a, with 'default' mode. - -```bash -$ pgd create-proxy --proxy-name proxy-a1 --group-name group_a -__OUTPUT__ -proxy created successfully -``` - -#### Attaching in any mode. - -In this example, we attach anew proxy called proxy-b1 to group group_b, with 'any' mode. - -```bash -$ pgd create-proxy --proxy-name proxy-b1 --group-name group_b --proxy-mode any -__OUTPUT__ -proxy created successfully -``` - - diff --git a/product_docs/docs/pgd/5/cli/command_ref/pgd_delete-proxy.mdx b/product_docs/docs/pgd/5/cli/command_ref/pgd_delete-proxy.mdx deleted file mode 100644 index 6edcc659c4e..00000000000 --- a/product_docs/docs/pgd/5/cli/command_ref/pgd_delete-proxy.mdx +++ /dev/null @@ -1,35 +0,0 @@ ---- -title: delete-proxy -deepToC: true ---- - -Deletes a proxy from the EDB Postgres Distributed cluster. - -### Synopsis - -Deletes a proxy from the EDB Postgres Distributed cluster. - -```sh -pgd delete-proxy [flags] -``` - -### Options - -| Flag | Description | -|----------------|-------------| -| `--proxy-name` | proxy name | - -See [global options](../command_ref#global-options) for global options. - -### Examples - -#### Deleting a proxy - -```bash -$ pgd delete-proxy --proxy-name proxy-a1 -__OUTPUT__ -proxy deleted successfully -``` - - - diff --git a/product_docs/docs/pgd/5/cli/command_ref/pgd_set-group-options.mdx b/product_docs/docs/pgd/5/cli/command_ref/pgd_set-group-options.mdx deleted file mode 100644 index c5edd679004..00000000000 --- a/product_docs/docs/pgd/5/cli/command_ref/pgd_set-group-options.mdx +++ /dev/null @@ -1,67 +0,0 @@ ---- -title: set-group-options -deepToC: true ---- - -Sets group options such as `enable_raft`, `enable_proxy_routing`, and `location`. - -### Synopsis - -You can set the following group options with this command: - -- `enable_raft` -- `enable_proxy_routing` -- `location` -- `route_writer_max_lag` -- `route_reader_max_lag` - -Both `enable_raft` and `enable_proxy_routing` must be true if proxy is -attached to the group. - -Use `pgd show-groups -o json` to view option values for each group. - -```sh -pgd set-group-options [flags] -``` - -### Options - -| Flag | Description | -| --- | --- | -| `--group-name` | group name | -| `--option` | option in name=value format | - -See [global options](../command_ref#global-options) for global options. - -### Examples - -#### Setting group options with multiple options - -In this example, we use comma separated multiple options. Spaces are not allowed in the option values. - -```bash -$ pgd set-group-options --group-name bdrgroup --option enable_proxy_routing=true,route_writer_max_lag=1000000 -__OUTPUT__ -group options updated successfully -``` - -#### Setting group options with multiple option flags - -In this example, we use multiple option flags. Spaces are not allowed in the option values. - -```bash -$ pgd set-group-options --group-name bdrgroup --option enable_proxy_routing=true --option route_writer_max_lag=1000000 -__OUTPUT__ -group options updated successfully -``` - -#### Setting group options with double quotes - -In this example, we use double quotes around options if the option value has spaces or special characters. - -```bash -$ pgd set-group-options --group-name bdrgroup --option "location = mumbai" --option "route_writer_max_lag = 1000000" -__OUTPUT__ -group options updated successfully -``` - diff --git a/product_docs/docs/pgd/5/cli/command_ref/pgd_set-node-options.mdx b/product_docs/docs/pgd/5/cli/command_ref/pgd_set-node-options.mdx deleted file mode 100644 index 176d1c62942..00000000000 --- a/product_docs/docs/pgd/5/cli/command_ref/pgd_set-node-options.mdx +++ /dev/null @@ -1,64 +0,0 @@ ---- -title: set-node-options -deepToC: true ---- - -Sets node options such as `route_fence`, `route_priority`, and `route_writes`. - -### Synopsis - -You can set the following node options with this command: - -- `route_dsn` -- `route_fence` -- `route_priority` -- `route_writes` -- `route_reads` - -Use `pgd show-nodes -o json` to view option values for each node. - - -```sh -pgd set-node-options [flags] -``` - -### Options - -| Flag | Description | -| --- | --- | -| `--node-name` | node name | -| `--option` | option in name=value format | - -See [global options](../command_ref#global-options) for global options. - -### Examples - -#### Setting node options with multiple options - -In this example, we use comma separated multiple options. Spaces are not allowed in the option values. - -```bash -$ pgd set-node-options --node-name bdr-a1 --option route_priority=100,route_fence=true -__OUTPUT__ -node options updated successfully -``` - -#### Setting node options with multiple option flags - -In this example, we use multiple option flags. Spaces are not allowed in the option values. - -```bash -$ pgd set-node-options --node-name bdr-a1 --option route_priority=100 --option route_fence=true -__OUTPUT__ -node options updated successfully -``` - -#### Setting node options with double quotes - -In this example, we use double quotes around options if the option value has spaces or special characters. - -```bash -$ pgd set-node-options --node-name bdr-a1 --option "route_priority = 100" --option "route_fence = true" -__OUTPUT__ -node options updated successfully -``` diff --git a/product_docs/docs/pgd/5/cli/command_ref/pgd_set-proxy-options.mdx b/product_docs/docs/pgd/5/cli/command_ref/pgd_set-proxy-options.mdx deleted file mode 100644 index e78d982b1ac..00000000000 --- a/product_docs/docs/pgd/5/cli/command_ref/pgd_set-proxy-options.mdx +++ /dev/null @@ -1,79 +0,0 @@ ---- -title: set-proxy-options -deepToC: true ---- - -Sets proxy options such as `listen_address`, `listen_port`, and `max_client_conn`. - -### Synopsis - -You can set the following proxy options with this command: - -- `listen_address` -- `listen_port` -- `max_client_conn` -- `max_server_conn` -- `server_conn_keepalive` -- `server_conn_timeout` -- `consensus_grace_period` -- `read_listen_address` -- `read_listen_port` -- `read_max_client_conn` -- `read_max_server_conn` -- `read_server_conn_keepalive` -- `read_server_conn_timeout` -- `read_consensus_grace_period` - -After updating any of these options, restart proxy. - -Set `listen_port` to non-zero value to route traffic to the Write Leader and -set `read_listen_port` to non-zero value to route traffic to Read nodes. -Setting it to zero will disable the respective routing. - -Use `pgd show-proxies -o json` to view option values for each proxy. - - -```sh -pgd set-proxy-options [flags] -``` -### Options - -| Flag | Description | -| --- | --- | -| `--proxy-name` | proxy name | -| `--option` | option in name=value format | - -See [global options](../command_ref#global-options) for global options. - -### Examples - -#### Setting proxy options with multiple options - -In this example, we use comma separated multiple options. Spaces are not allowed in the option values. - -```bash -$ pgd set-proxy-options --proxy-name proxy-a1 --option listen_address=0.0.0.0,listen_port=6432 -__OUTPUT__ -proxy options updated successfully, please restart proxy service -``` - -#### Setting proxy options with multiple option flags - -In this example, we use multiple option flags. Spaces are not allowed in the option values. - -```bash -$ pgd set-proxy-options --proxy-name proxy-a1 --option listen_address=0.0.0.0 --option listen_port=0 -__OUTPUT__ -proxy options updated successfully, please restart proxy service -``` - -#### Setting proxy options with double quotes - -In this example, we use double quotes around options if the option value has spaces or special characters. - -```bash -$ pgd set-proxy-options --proxy-name proxy-a1 --option "listen_address = 0.0.0.0" --option "consensus_grace_period=1h 30m 5s" -__OUTPUT__ -proxy options updated successfully, please restart proxy service -``` - diff --git a/product_docs/docs/pgd/5/cli/command_ref/pgd_show-clockskew.mdx b/product_docs/docs/pgd/5/cli/command_ref/pgd_show-clockskew.mdx deleted file mode 100644 index f670da21f43..00000000000 --- a/product_docs/docs/pgd/5/cli/command_ref/pgd_show-clockskew.mdx +++ /dev/null @@ -1,63 +0,0 @@ ---- -title: show-clockskew -deepToC: true ---- - -Shows the status of clock skew between each BDR node pair. - -### Synopsis - -Shows the status of clock skew between each BDR node pair in the cluster. - -Please note that the current implementation of clock skew may return an -inaccurate skew value if the cluster is under high load while running this -command or has large number of nodes in it. - -| Symbol | Meaning | -|--------|------------------------------| -| `*` | ok | -| `~` | warning (skew > 2 seconds) | -| `!` | critical ( skew > 5 seconds) | -| `x` | down / unreachable | -| `?` | unknown | -| `-` | n/a | - - -```sh -pgd show-clockskew [flags] -``` - -### Options - -No specific command options. See [global options](../command_ref#global-options) for global options. - -### Examples - -#### Show clock skew with a node down - -In this example, there is a 3 node cluster, bdr-a1 and bdr-c1 are up, bdr-b1 is down. - -```bash -$ pgd show-clockskew -__OUTPUT__ -Node bdr-a1 bdr-b1 bdr-c1 Current Time ----- ------ ------ ------ ------------ -bdr-a1 * ? * 2022-03-30 07:02:21.334472 -bdr-b1 x * x x -bdr-c1 * ? * 2022-03-30 07:02:21.186809 -``` - -#### Show clock skew with all nodes working correctly - -In this example, there is a 3 node cluster with all nodes are up and all clocks are in sync. - -```bash -$ pgd show-clockskew -__OUTPUT__ -Node bdr-a1 bdr-b1 bdr-c1 Current Time ----- ------ ------ ------ ------------ -bdr-a1 * * * 2022-03-30 07:04:54.147017 -bdr-b1 * * * 2022-03-30 07:04:54.340543 -bdr-c1 * * * 2022-03-30 07:04:53.90451 -``` - diff --git a/product_docs/docs/pgd/5/cli/command_ref/pgd_show-events.mdx b/product_docs/docs/pgd/5/cli/command_ref/pgd_show-events.mdx deleted file mode 100644 index 29f743411b3..00000000000 --- a/product_docs/docs/pgd/5/cli/command_ref/pgd_show-events.mdx +++ /dev/null @@ -1,72 +0,0 @@ ---- -title: show-events -deepToC: true ---- - -Shows events such as background worker errors and node membership changes. - -### Synopsis - -Shows events such as background worker errors and node membership changes. -Output is sorted by Time column in descending order. Message column is -truncated after a few lines. To view complete message use json output format -(`-o json`). - -For more details on each node state, see show-nodes command help -(`pgd show-nodes -h`). - -```sh -pgd show-events [flags] -``` - -### Node States -| State | Description | -|--------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| NONE | Node state is unset when the worker starts, expected to be set quickly to the current known state. | -| CREATED | bdr.create_node() has been executed, but the node isn't a member of any EDB Postgres Distributed cluster yet. | -| JOIN_START | bdr.join_node_group() begins to join the local node to an existing EDB Postgres Distributed cluster. | -| JOINING | The node join has started and is currently at the initial sync phase, creating the schema and data on the node. | -| CATCHUP | Initial sync phase is complete; now the join is at the last step of retrieving and applying transactions that were performed on the upstream peer node since the join started. | -| STANDBY | Node join has finished, but not yet started to broadcast changes. All joins spend some time in this state, but if defined as a Logical Standby, the node will continue in this state. | -| PROMOTE | Node was a logical standby and we just called bdr.promote_node to move the node state to ACTIVE. These two PROMOTE states have to be coherent to the fact, that only one node can be with a state higher than STANDBY but lower than ACTIVE. | -| PROMOTING | Promotion from logical standby to full BDR node is in progress. | -| ACTIVE | The node is a full BDR node and is currently ACTIVE. This is the most common node status. | -| PART_START | Node was ACTIVE or STANDBY and we just called bdr.part_node to remove the node from the EDB Postgres Distributed cluster. | -| PARTING | Node disconnects from other nodes and plays no further part in consensus or replication. | -| PART_CATCHUP | Non-parting nodes synchronize any missing data from the recently parted node. | -| PARTED | Node parting operation is now complete on all nodes. | - - -Only one node at a time can be in either of the states PROMOTE or PROMOTING. -STANDBY indicates that the node is in a read-only state. - -### Options - -| Flag | Description | -| --- | --- | -| `-n, --lines` | show top n lines | - -See [global options](../command_ref#global-options) for global options. - -### Examples - -#### Showing top 10 events - -In this example, we show top 10 events on a three node cluster. - -```bash -$ pgd show-events --lines 10 -__OUTPUT__ -Time Observer Node Subject Node Source Type Subtype Message ----- ------------- ------------ ------ ---- ------- ------- -2023-03-23 05:38:25.243257+00 witness-a1 witness-a1 consensus RAFT STATE_CHANGE RAFT_LEADER -2023-03-23 05:38:25.23815+00 witness-a1 witness-a1 consensus RAFT STATE_CHANGE RAFT_CANDIDATE -2023-03-23 05:38:21.197974+00 bdr-a1 bdr-a1 consensus RAFT STATE_CHANGE RAFT_FOLLOWER -2023-03-23 05:38:21.197107+00 witness-a1 witness-a1 consensus RAFT STATE_CHANGE RAFT_FOLLOWER -2023-03-23 05:38:21.169781+00 bdr-a2 bdr-a2 consensus RAFT STATE_CHANGE RAFT_FOLLOWER -2023-03-23 05:38:17.949669+00 witness-a1 bdr-a1 consensus NODE STATE_CHANGE ACTIVE -2023-03-23 05:38:17.949544+00 bdr-a1 bdr-a1 consensus NODE STATE_CHANGE ACTIVE -2023-03-23 05:38:17.946857+00 bdr-a2 bdr-a1 consensus NODE STATE_CHANGE ACTIVE -2023-03-23 05:38:17.91628+00 bdr-a1 bdr-a2 receiver WORKER ERROR pglogical worker received fast finish request, exiting -2023-03-23 05:38:17.915236+00 witness-a1 bdr-a1 consensus NODE STATE_CHANGE PROMOTING -``` diff --git a/product_docs/docs/pgd/5/cli/command_ref/pgd_show-groups.mdx b/product_docs/docs/pgd/5/cli/command_ref/pgd_show-groups.mdx deleted file mode 100644 index 8970aeda75c..00000000000 --- a/product_docs/docs/pgd/5/cli/command_ref/pgd_show-groups.mdx +++ /dev/null @@ -1,46 +0,0 @@ ---- -title: show-groups -deepToC: true ---- - -Shows all groups in the EDB Postgres Distributed cluster and their summary. - -### Synopsis - -Shows all groups in the EDB Postgres Distributed cluster and their summary, including type, parent group, location, Raft and Routing status, Raft leader, and write leader. - -In some cases when Raft isn't working properly or the group Raft leader isn't present, this command might show stale or incorrect write leader for that group. - - -```sh -pgd show-groups [flags] -``` - -### Options - -No specific command options. See [global options](../command_ref#global-options) for global options. - -### Examples - - -#### Show all groups in the cluster - -In this example, there is a 4 group cluster, 3 data groups and one subscriber-only group. `bdrgroup` is the global group. `group_a`, `group_b` and `group_c` are data groups. `group_so` is the subscriber-only group. - -Note: -1. For write leader election both Raft and Routing options should be true for that group. -2. Raft is always true for global group. - -```bash -$ pgd show-groups -__OUTPUT__ -Group Group ID Type Parent Group Location Raft Routing Raft Leader Write Leader ------ -------- ---- ------------ -------- ---- ------- ----------- ------------ -bdrgroup 1360502012 global world true false bdr-a2 -group_a 3618712053 data bdrgroup a true true bdr-a2 bdr-a1 -group_b 402614658 data bdrgroup b true true bdr-b1 bdr-b1 -group_c 2808307099 data bdrgroup c false false -group_so 2123208041 subscriber-only bdrgroup c false false -``` - - diff --git a/product_docs/docs/pgd/5/cli/command_ref/pgd_show-nodes.mdx b/product_docs/docs/pgd/5/cli/command_ref/pgd_show-nodes.mdx deleted file mode 100644 index c4a4f7d08f2..00000000000 --- a/product_docs/docs/pgd/5/cli/command_ref/pgd_show-nodes.mdx +++ /dev/null @@ -1,79 +0,0 @@ ---- -title: show-nodes -deepToC: true ---- - -Shows all nodes in the EDB Postgres Distributed cluster and their summary. - -### Synopsis - -Shows all nodes in the EDB Postgres Distributed cluster and their summary, -including name, node id, group, and current/target state. - -#### Node States -| State | Description | -|--------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| NONE | Node state is unset when the worker starts, expected to be set quickly to the current known state. | -| CREATED | bdr.create_node() has been executed, but the node isn't a member of any EDB Postgres Distributed cluster yet. | -| JOIN_START | bdr.join_node_group() begins to join the local node to an existing EDB Postgres Distributed cluster. | -| JOINING | The node join has started and is currently at the initial sync phase, creating the schema and data on the node. | -| CATCHUP | Initial sync phase is complete; now the join is at the last step of retrieving and applying transactions that were performed on the upstream peer node since the join started. | -| STANDBY | Node join has finished, but not yet started to broadcast changes. All joins spend some time in this state, but if defined as a Logical Standby, the node will continue in this state. | -| PROMOTE | Node was a logical standby and we just called bdr.promote_node to move the node state to ACTIVE. These two PROMOTE states have to be coherent to the fact, that only one node can be with a state higher than STANDBY but lower than ACTIVE. | -| PROMOTING | Promotion from logical standby to full BDR node is in progress. | -| ACTIVE | The node is a full BDR node and is currently ACTIVE. This is the most common node status. | -| PART_START | Node was ACTIVE or STANDBY and we just called bdr.part_node to remove the node from the EDB Postgres Distributed cluster. | -| PARTING | Node disconnects from other nodes and plays no further part in consensus or replication. | -| PART_CATCHUP | Non-parting nodes synchronize any missing data from the recently parted node. | -| PARTED | Node parting operation is now complete on all nodes. | - - -Only one node at a time can be in either of the states PROMOTE or PROMOTING. -STANDBY, in the Current State or Target State columns, indicates that the node is or will be in a read-only state. - -```sh -pgd show-nodes [flags] -``` - -### Options - -No specific command options. See [global options](../command_ref#global-options) for global options. - -### Examples - -#### Show all nodes in the cluster with a node down - -In this example, there is a multi-node cluster with a data node down. - -```bash -$ pgd show-nodes -__OUTPUT__ -Node Node ID Group Type Current State Target State Status Seq ID ----- ------- ----- ---- ------------- ------------ ------ ------ -bdr-a1 3136956818 group_a data ACTIVE ACTIVE Up 1 -bdr-a2 2133699692 group_a data ACTIVE ACTIVE Unreachable 2 -witness-a 3889635963 group_a witness ACTIVE ACTIVE Up 3 -``` - -#### Show all nodes in the cluster with different node types - -In this example, there is a multi-node cluster with logical standby, witness and subscriber-only nodes. -Note that, unlike logical standby nodes, the subscriber-only nodes are fully joined node to the cluster. - -```bash -$ pgd show-nodes -__OUTPUT__ -Node Node ID Group Type Current State Target State Status Seq ID ----- ------- ----- ---- ------------- ------------ ------ ------ -bdr-a1 3136956818 group_a data ACTIVE ACTIVE Up 6 -bdr-a2 2133699692 group_a data ACTIVE ACTIVE Up 3 -logical-standby-a1 1140256918 group_a standby STANDBY STANDBY Up 9 -witness-a 3889635963 group_a witness ACTIVE ACTIVE Up 7 -bdr-b1 2380210996 group_b data ACTIVE ACTIVE Up 1 -bdr-b2 2244996162 group_b data ACTIVE ACTIVE Up 2 -logical-standby-b1 3541792022 group_b standby STANDBY STANDBY Up 10 -witness-b 661050297 group_b witness ACTIVE ACTIVE Up 5 -witness-c 1954444188 group_c witness ACTIVE ACTIVE Up 4 -subscriber-only-c1 2448841809 group_so subscriber-only ACTIVE ACTIVE Up 8 -``` - diff --git a/product_docs/docs/pgd/5/cli/command_ref/pgd_show-proxies.mdx b/product_docs/docs/pgd/5/cli/command_ref/pgd_show-proxies.mdx deleted file mode 100644 index ca0c458fdd4..00000000000 --- a/product_docs/docs/pgd/5/cli/command_ref/pgd_show-proxies.mdx +++ /dev/null @@ -1,38 +0,0 @@ ---- -title: show-proxies -deepToC: true ---- - -Shows all proxies in the EDB Postgres Distributed cluster and their summary. - -### Synopsis - -Shows all proxies in the EDB Postgres Distributed cluster and their summary. - -We recommend giving all the proxies attached to the same group the same proxy option values. - -```sh -pgd show-proxies [flags] -``` - -### Options - -No specific command options. See [global options](../command_ref#global-options) for global options. - -### Examples - -#### Show all proxies in the cluster - -In this example, there is a multi-group cluster, with 2 proxies attached to each data group. - -```bash -$ pgd show-proxies -__OUTPUT__ -Proxy Group Listen Addrs Listen Port Read Listen Addrs Read Listen Port ------ ----- ------------ ----------- ----------------- ---------------- -proxy-a1 group_a [0.0.0.0] 6432 [0.0.0.0] 6433 -proxy-a2 group_a [0.0.0.0] 6432 [0.0.0.0] 6433 -proxy-b1 group_b [0.0.0.0] 6432 [0.0.0.0] 6433 -proxy-b2 group_b [0.0.0.0] 6432 [0.0.0.0] 6433 -``` - diff --git a/product_docs/docs/pgd/5/cli/command_ref/pgd_show-raft.mdx b/product_docs/docs/pgd/5/cli/command_ref/pgd_show-raft.mdx deleted file mode 100644 index 0a0839b7924..00000000000 --- a/product_docs/docs/pgd/5/cli/command_ref/pgd_show-raft.mdx +++ /dev/null @@ -1,53 +0,0 @@ ---- -title: show-raft -deepToC: true ---- - -Shows BDR Raft (consensus protocol) details. - -### Synopsis - -Shows BDR Raft (consensus protocol) details such as Raft instance id, Raft state (leader, follower), and Raft term. If Raft is enabled at subgroup level, then that subgroup's Raft instance is also shown. - -In some cases, such as network partition, output might vary based on the node to which the CLI is connected. - -```sh -pgd show-raft [flags] -``` - -### Options - -No specific command options. See [global options](../command_ref#global-options) for global options. - -### Examples - -#### Show Raft details - -In this example, there is a multi-group cluster with subgroup Raft and with witness, logical standby, subscriber-only nodes. -Note that logical standby and subscriber-only nodes don't have Raft voting rights, unlike data or witness nodes. - -``` -$ pgd show-raft -__OUTPUT__ -Instance Group Node Raft State Raft Term Commit Index Nodes Voting Nodes Protocol Version --------- ----- ---- ---------- --------- ------------ ----- ------------ ---------------- -1 bdrgroup bdr-b1 RAFT_LEADER 0 383 10 7 5000 -1 bdrgroup bdr-a1 RAFT_FOLLOWER 0 383 10 7 5000 -1 bdrgroup bdr-a2 RAFT_FOLLOWER 0 383 10 7 5000 -1 bdrgroup bdr-b2 RAFT_FOLLOWER 0 383 10 7 5000 -1 bdrgroup logical-standby-a1 RAFT_FOLLOWER 0 383 10 7 5000 -1 bdrgroup logical-standby-b1 RAFT_FOLLOWER 0 383 10 7 5000 -1 bdrgroup subscriber-only-c1 RAFT_FOLLOWER 0 383 10 7 5000 -1 bdrgroup witness-a RAFT_FOLLOWER 0 383 10 7 5000 -1 bdrgroup witness-b RAFT_FOLLOWER 0 383 10 7 5000 -1 bdrgroup witness-c RAFT_FOLLOWER 0 383 10 7 5000 -2 group_a witness-a RAFT_LEADER 1 2 4 3 0 -2 group_a bdr-a1 RAFT_FOLLOWER 1 2 4 3 0 -2 group_a bdr-a2 RAFT_FOLLOWER 1 2 4 3 0 -2 group_a logical-standby-a1 RAFT_FOLLOWER 1 2 4 3 0 -3 group_b witness-b RAFT_LEADER 1 2 4 3 0 -3 group_b bdr-b1 RAFT_FOLLOWER 1 2 4 3 0 -3 group_b bdr-b2 RAFT_FOLLOWER 1 2 4 3 0 -3 group_b logical-standby-b1 RAFT_FOLLOWER 1 2 4 3 0 -``` - diff --git a/product_docs/docs/pgd/5/cli/command_ref/pgd_show-replslots.mdx b/product_docs/docs/pgd/5/cli/command_ref/pgd_show-replslots.mdx deleted file mode 100644 index 57f717e4edc..00000000000 --- a/product_docs/docs/pgd/5/cli/command_ref/pgd_show-replslots.mdx +++ /dev/null @@ -1,144 +0,0 @@ ---- -title: show-replslots -deepToC: true ---- - -Shows the status of BDR replication slots. - -### Synopsis - -Shows the status of BDR replication slots. Output with the verbose flag gives details such as is slot active, replication state (disconnected, streaming, catchup), and approximate lag. - -| Symbol | Meaning | -|--------|---------------------------------------------------------------| -| `*` | ok | -| `~` | warning (lag > 10M) | -| `!` | critical (lag > 100M OR slot is 'inactive' OR 'disconnected') | -| `x` | down / unreachable | -| `-` | n/a | - -In matrix view, sometimes byte lag is shown in parentheses. It is maxOf(WriteLag, FlushLag, ReplayLag, SentLag). - -```sh -pgd show-replslots [flags] -``` - -### Options - -| Flag | Description | -| --------- | ----------- | -| -v, --verbose | verbose output | - -See [global options](../command_ref#global-options) for global options. - -### Examples - -#### Show replication slots with a node down - -In this example, there is a 3 node cluster, bdr-a1 and bdr-c1 are up, bdr-b1 is down. - -```bash -$ pgd show-replslots -__OUTPUT__ -Node bdr-a1 bdr-b1 bdr-c1 ----- ------ ------ ------ -bdr-a1 * !(6.6G) * -bdr-b1 x * x -bdr-c1 * !(6.9G) * -``` - -Or in Verbose mode: - -```bash -$ pgd show-replslots --verbose -__OUTPUT__ -Origin Node Target Node Status (active/state) Write Lag (bytes/duration) Flush Lag (bytes/duration) Replay Lag (bytes/duration) Sent Lag (bytes) ------------ ----------- --------------------- -------------------------- -------------------------- --------------------------- ---------------- -bdr-a1 bdr-b1 f / disconnected 6.6G / 8 days 02:58:36.243723 6.6G / 8 days 02:58:36.243723 6.6G / 8 days 02:58:36.243723 6.6G -bdr-a1 bdr-c1 t / streaming 0B / 00:00:00 0B / 00:00:00 0B / 00:00:00 0B -bdr-c1 bdr-a1 t / streaming 0B / 00:00:00.000812 0B / 00:00:00.000812 0B / 00:00:00.000812 0B -bdr-c1 bdr-b1 f / disconnected 6.9G / 8 days 02:58:36.004415 6.9G / 8 days 02:58:36.004415 6.9G / 8 days 02:58:36.004415 6.9G -``` - -#### Show replication slots with a recently restarted node - -In this example, there is a 3 node cluster, bdr-b1 was down and it has just been restarted. - -```bash -$ pgd show-replslots -__OUTPUT__ -Node bdr-a1 bdr-b1 bdr-c1 ----- ------ ------ ------ -bdr-a1 * !(6.9G) * -bdr-b1 * * * -bdr-c1 * !(5.8G) * -``` - -Or in Verbose mode: - -```bash -$ pgd show-replslots --verbose -__OUTPUT__ -Origin Node Target Node Status (active/state) Write Lag (bytes/duration) Flush Lag (bytes/duration) Replay Lag (bytes/duration) Sent Lag (bytes) ------------ ----------- --------------------- -------------------------- -------------------------- --------------------------- ---------------- -bdr-a1 bdr-b1 t / catchup 6.9G / 00:00:00.000778 6.9G / 00:00:00.000778 6.9G / 00:00:00.000778 6.9G -bdr-a1 bdr-c1 t / streaming 0B / 00:00:00.104121 0B / 00:00:00.104133 0B / 00:00:00.104133 0B -bdr-b1 bdr-a1 t / streaming 0B / 00:00:00 0B / 00:00:00 0B / 00:00:00 0B -bdr-b1 bdr-c1 t / streaming 0B / 00:00:00 0B / 00:00:00 0B / 00:00:00 0B -bdr-c1 bdr-a1 t / streaming 6.8K / 00:00:00 6.8K / 00:00:00 6.8K / 00:00:00 6.8K -bdr-c1 bdr-b1 t / catchup 5.5G / 00:00:00.008257 5.5G / 00:00:00.008257 5.5G / 00:00:00.008257 5.5G -``` - -#### Show replication slots with all nodes working correctly - -In this example, there is a 3 node cluster with all nodes are up and in 'streaming' state. - -```bash -$ pgd show-replslots -__OUTPUT__ -Node bdr-a1 bdr-b1 bdr-c1 ----- ------ ------ ------ -bdr-a1 * * * -bdr-b1 * * * -bdr-c1 * * * -``` - -Or in Verbose mode: - -```bash -$ pgd show-replslots --verbose -__OUTPUT__ -Origin Node Target Node Status (active/state) Write Lag (bytes/duration) Flush Lag (bytes/duration) Replay Lag (bytes/duration) Sent Lag (bytes) ------------ ----------- --------------------- -------------------------- -------------------------- --------------------------- ---------------- -bdr-a1 bdr-b1 t / streaming 0B / 00:00:00 0B / 00:00:00 0B / 00:00:00 0B -bdr-a1 bdr-c1 t / streaming 0B / 00:00:00 0B / 00:00:00 0B / 00:00:00 0B -bdr-b1 bdr-a1 t / streaming 0B / 00:00:00 0B / 00:00:00 0B / 00:00:00 0B -bdr-b1 bdr-c1 t / streaming 0B / 00:00:00 0B / 00:00:00 0B / 00:00:00 0B -bdr-c1 bdr-a1 t / streaming 0B / 00:00:00 528B / 00:00:00 528B / 00:00:00 0B -bdr-c1 bdr-b1 t / streaming 528B / 00:00:00 528B / 00:00:00 528B / 00:00:00 0B -``` - -#### Show replication slots in a multi-node cluster - -In this example, there is a 4 node cluster, with a witness node, a subscriber-only node, and two logical standbys. bdr-a1 and bdr-b1 are up and in 'streaming' state. bdr-a1 is replicating to logical-standby-a1 and bdr-b1 is replicating to logical-standby-b1. bdr-a1 is also replicating to subscriber-only-c1. - -Note: -1. Data for a logical standby is only sent by one source node. No other nodes receive replication changes from the logical standby. -2. Subscriber-only node subscribes to replication changes from other nodes in the cluster, but no other nodes receive replication changes from it - -```bash -$ pgd show-replslots -__OUTPUT__ -Node bdr-a1 bdr-b1 logical-standby-a1 logical-standby-b1 subscriber-only-c1 witness-c1 ----- ------ ------ ------------------ ------------------ ------------------ ---------- -bdr-a1 * * * - * * -bdr-b1 * * - * * * -logical-standby-a1 - - * - - - -logical-standby-b1 - - - * - - -subscriber-only-c1 - - - - * - -witness-c1 * * - - * * -``` - - - - diff --git a/product_docs/docs/pgd/5/cli/command_ref/pgd_show-subscriptions.mdx b/product_docs/docs/pgd/5/cli/command_ref/pgd_show-subscriptions.mdx deleted file mode 100644 index 93ab93ed1bb..00000000000 --- a/product_docs/docs/pgd/5/cli/command_ref/pgd_show-subscriptions.mdx +++ /dev/null @@ -1,101 +0,0 @@ ---- -title: show-subscriptions -deepToC: true ---- - -Shows BDR subscription (incoming replication) details. - -### Synopsis - -Shows BDR subscription (incoming replication) details such as origin/target node, timestamp of the last replayed transaction, and lag between now and the timestamp of the last replayed transaction. - -```sh -pgd show-subscriptions [flags] -``` - -### Options - -No specific command options. See [global options](../command_ref#global-options) for global options. - -### Examples - -#### Show subscriptions with a node down - -In this example, there is a 3 node cluster, bdr-a1 and bdr-c1 are up, bdr-b1 is down. - -```bash -$ pgd show-subscriptions -__OUTPUT__ -Origin Node Target Node Last Transaction Replayed At Lag Duration (seconds) ------------ ----------- ---------------------------- ---------------------- -bdr-a1 bdr-c1 2022-04-23 13:13:40.854433+00 0.514275 -bdr-b1 bdr-a1 -bdr-b1 bdr-c1 -bdr-c1 bdr-a1 2022-04-23 13:13:40.852233+00 0.335464 -``` - -#### Show subscriptions with a recently restarted node - -In this example, there is a 3 node cluster, bdr-b1 was down and it has just been restarted. - -```bash -$ pgd show-subscriptions -__OUTPUT__ -Origin Node Target Node Last Transaction Replayed At Lag Duration (seconds) ------------ ----------- ---------------------------- ---------------------- -bdr-a1 bdr-b1 2022-04-23 13:14:45.669254+00 0.001686 -bdr-a1 bdr-c1 2022-04-23 13:14:46.157913+00 -0.002009 -bdr-b1 bdr-a1 -bdr-b1 bdr-c1 -bdr-c1 bdr-a1 2022-04-23 13:14:45.698472+00 0.259521 -bdr-c1 bdr-b1 2022-04-23 13:14:45.667979+00 0.002961 -``` - -#### Show subscriptions with all nodes working correctly - -In this example, there is a 3 node cluster, all nodes are up and in 'streaming' state. - -```bash -$ pgd show-subscriptions -__OUTPUT__ -Origin Node Target Node Last Transaction Replayed At Lag Duration (seconds) ------------ ----------- ---------------------------- ---------------------- -bdr-a1 bdr-b1 2022-04-23 13:15:39.732375+00 0.034462 -bdr-a1 bdr-c1 2022-04-23 13:15:40.179618+00 0.002647 -bdr-b1 bdr-a1 2022-04-23 13:15:39.719994+00 0.305814 -bdr-b1 bdr-c1 2022-04-23 13:15:40.180886+00 0.001379 -bdr-c1 bdr-a1 2022-04-23 13:15:39.714397+00 0.311411 -bdr-c1 bdr-b1 2022-04-23 13:15:39.714397+00 0.052440 -``` - -#### Show subscriptions in a multi-node cluster - -In this example, there is a 4 node cluster. -bdr-a1 and bdr-b1 are the origin nodes for logical-standby-a1 and logical-standby-b1 respectively. -bdr-a1 and bdr-b1 are the origin nodes for subscriber-only-c1. -bdr-a1 and bdr-b1 are the origin nodes for witness-c1. - -Note: Logical standby and subscriber-only nodes receive changes but do not -send changes made locally to other nodes - -```bash -$ pgd show-subscriptions -__OUTPUT__ -Origin Node Target Node Last Transaction Replayed At Lag Duration (seconds) ------------ ----------- ---------------------------- ---------------------- -bdr-a1 bdr-b1 2022-04-23 13:40:49.106411+00 0.853665 -bdr-a1 logical-standby-a1 2022-04-23 13:40:50.72036+00 0.138430 -bdr-a1 logical-standby-b1 -bdr-a1 subscriber-only-c1 2022-04-23 13:40:50.72036+00 0.016226 -bdr-a1 witness-c1 2022-04-23 13:40:50.470142+00 0.001514 -bdr-b1 bdr-a1 2022-04-23 13:40:49.10174+00 1.095422 -bdr-b1 logical-standby-a1 -bdr-b1 logical-standby-b1 2022-04-23 13:40:50.713666+00 0.271213 -bdr-b1 subscriber-only-c1 2022-04-23 13:40:50.713666+00 0.022920 -bdr-b1 witness-c1 2022-04-23 13:40:50.471789+00 -0.000133 -witness-c1 bdr-a1 2022-04-23 13:40:49.107706+00 1.089456 -witness-c1 bdr-b1 2022-04-23 13:40:49.107706+00 0.852370 -witness-c1 logical-standby-a1 -witness-c1 logical-standby-b1 -witness-c1 subscriber-only-c1 2022-04-23 13:40:50.719844+00 0.016742 -``` diff --git a/product_docs/docs/pgd/5/cli/command_ref/pgd_show-version.mdx b/product_docs/docs/pgd/5/cli/command_ref/pgd_show-version.mdx deleted file mode 100644 index ede1174ef8c..00000000000 --- a/product_docs/docs/pgd/5/cli/command_ref/pgd_show-version.mdx +++ /dev/null @@ -1,50 +0,0 @@ ---- -title: show-version -deepToC: true ---- - -Shows the version of BDR and Postgres installed on each node. - -### Synopsis - -Shows the version of BDR and Postgres installed on each node in the cluster. - -```sh -pgd show-version [flags] -``` - -### Options - -No specific command options. See [global options](../command_ref#global-options) for global options. - -### Examples - -#### Show version with a node down - -In this example, there is a 3 node cluster, bdr-a1 and bdr-c1 are up, bdr-b1 is down. - -```bash -$ pgd show-version -__OUTPUT__ -Node BDR Version Postgres Version ----- ----------- ---------------- -bdr-c1 4.1.0 14.2 (EDB Postgres Extended Server 14.2.0) (Debian 2:14.2.0edbpge-1.buster+1) -bdr-a1 4.1.0 14.2 (EDB Postgres Extended Server 14.2.0) (Debian 2:14.2.0edbpge-1.buster+1) -bdr-b1 -``` - -#### Show version with all nodes up - -In this example, there is a 3 node cluster, all nodes are up. - -```bash -$ pgd show-version -__OUTPUT__ -Node BDR Version Postgres Version ----- ----------- ---------------- -bdr-c1 4.1.0 14.2 (EDB Postgres Extended Server 14.2.0) (Debian 2:14.2.0edbpge-1.buster+1) -bdr-a1 4.1.0 14.2 (EDB Postgres Extended Server 14.2.0) (Debian 2:14.2.0edbpge-1.buster+1) -bdr-b1 4.1.0 14.2 (EDB Postgres Extended Server 14.2.0) (Debian 2:14.2.0edbpge-1.buster+1) -``` - - diff --git a/product_docs/docs/pgd/5/cli/command_ref/pgd_switchover.mdx b/product_docs/docs/pgd/5/cli/command_ref/pgd_switchover.mdx deleted file mode 100644 index 65782234e31..00000000000 --- a/product_docs/docs/pgd/5/cli/command_ref/pgd_switchover.mdx +++ /dev/null @@ -1,61 +0,0 @@ ---- -title: switchover -deepToC: true ---- - -Switches over to new write leader. - -### Synopsis - -Switches over to new write leader. Use switchover method `fast` for immediate switchover. Use `strict` to wait until lag is less than `route_writer_max_lag` on the given target node. - -If switchover fails due to timeout or any other issue, BDR might elect a write leader that's different from the given target node. - -```sh -pgd switchover [flags] -``` - -### Options - -| Flag                     | Value | Description | -|----------------|----------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| `--group-name` | string | Group name | -| `--method` | string | Switchover method (strict, fast)
strict - waits until lag on given node is less than route_writer_max_lag
fast - immediate switchover, route_writer_max_lag is ignored (default "strict") | -| `--node-name` | string | Node name | -| `--timeout` | interval | Timeout period when switchover method is strict (default 10s) | - -See [global options](../command_ref#global-options) for global options. - - -### Examples - -#### Using defaults - -Running the command with only required arguments. The default method is `strict` and default timeout is `10s`. - -```bash -$ pgd switchover --group-name group_a --node-name bdr-a1 -__OUTPUT__ -switchover is complete -``` - -#### Using optional arguments - -Running the command with optional arguments. - -```bash -$ pgd switchover --group-name group_a --node-name bdr-a1 --method strict --timeout 15s -__OUTPUT__ -switchover is complete -``` - -#### Immediate switchover - -Running the command with `fast` method for immediate switchover. - -```bash -$ pgd switchover --group-name group_a --node-name bdr-a1 --method fast -__OUTPUT__ -switchover is complete -``` - diff --git a/product_docs/docs/pgd/5/cli/command_ref/pgd_verify-cluster.mdx b/product_docs/docs/pgd/5/cli/command_ref/pgd_verify-cluster.mdx deleted file mode 100644 index 39a1c0784a3..00000000000 --- a/product_docs/docs/pgd/5/cli/command_ref/pgd_verify-cluster.mdx +++ /dev/null @@ -1,54 +0,0 @@ ---- -title: verify-cluster -deepToC: true ---- - -Verifies whether the cluster follows the rules as per the AlwaysOn architecture. - -### Synopsis - -Verifies whether the cluster follows the rules as per the AlwaysOn architecture. - -```sh -pgd verify-cluster [flags] -``` - -### Options - -No specific command options. See [global options](../command_ref#global-options) for global options. - -### Examples - -#### Verifying a cluster with an unrecommended architecture - -In this example, we verify the cluster with an unrecommended architecture. - -```bash -$ pgd verify-cluster -__OUTPUT__ -Check Status Groups ------ ------ ------ -There is always at least 1 Global Group and 1 Data Group Ok -There are at least 2 data nodes in a Data Group (except for the witness-only group) Critical group_b -There is at most 1 witness node in a Data Group Warning group_a -Witness-only group does not have any child groups Ok -There is at max 1 witness-only group iff there is even number of local Data Groups Warning bdrgroup -There are at least 2 proxies configured per Data Group if routing is enabled Warning group_a, group_b -``` - -#### Verifying a cluster with recommended architecture - -In this example, we verify the cluster with a recommended architecture. - -```bash -$ pgd verify-cluster -__OUTPUT__ -Check Status Groups ------ ------ ------ -There is always at least 1 Global Group and 1 Data Group Ok -There are at least 2 data nodes in a Data Group (except for the witness-only group) Ok -There is at most 1 witness node in a Data Group Ok -Witness-only group does not have any child groups Ok -There is at max 1 witness-only group iff there is even number of local Data Groups Ok -There are at least 2 proxies configured per Data Group if routing is enabled Ok -``` diff --git a/product_docs/docs/pgd/5/cli/command_ref/pgd_verify-settings.mdx b/product_docs/docs/pgd/5/cli/command_ref/pgd_verify-settings.mdx deleted file mode 100644 index 9bec41a3016..00000000000 --- a/product_docs/docs/pgd/5/cli/command_ref/pgd_verify-settings.mdx +++ /dev/null @@ -1,203 +0,0 @@ ---- -title: verify-settings -deepToC: true ---- - -Verifies the EDB Postgres Distributed cluster settings. - -### Synopsis - -Verifies the EDB Postgres Distributed cluster settings. - - -```sh -pgd verify-settings [flags] -``` - -### Options - -No specific command options. See [global options](../command_ref#global-options) for global options. - -### Examples - -#### Verifying the cluster settings - -``` -$ pgd verify-settings -__OUTPUT__ -# bdr.accept_connections -Node Status Pending Restart Value Message ----- ------ --------------- ----- ------- -bdr-a1 Critical false off must be set to on -bdr-a2 Ok false on -bdr-b1 Ok false on -bdr-b2 Ok false on -logical-standby-a1 Ok false on -logical-standby-b1 Ok false on -subscriber-only-c1 Ok false on -witness-a Ok false on -witness-b Ok false on -witness-c Ok false on -Warning: value must be same on all primary nodes - - -# bdr.ddl_locking -Ok: all node values are ok - - -# bdr.ddl_replication -Node Status Pending Restart Value Message ----- ------ --------------- ----- ------- -bdr-a1 Warning false 0 must be set to on -bdr-a2 Ok false on -bdr-b1 Ok false on -bdr-b2 Ok false on -logical-standby-a1 Ok false on -logical-standby-b1 Ok false on -subscriber-only-c1 Ok false on -witness-a Ok false on -witness-b Ok false on -witness-c Ok false on -Warning: value must be same on all primary nodes - - -# bdr.max_writers_per_subscription -Ok: all node values are ok - - -# bdr.raft_group_max_connections -Ok: all node values are ok - - -# bdr.replay_progress_frequency -Node Status Pending Restart Value Message ----- ------ --------------- ----- ------- -bdr-a1 Warning false 61000 must be <= 60000 -bdr-a2 Ok false 60000 -bdr-b1 Ok false 60000 -bdr-b2 Ok false 60000 -logical-standby-a1 Ok false 60000 -logical-standby-b1 Ok false 60000 -subscriber-only-c1 Ok false 60000 -witness-a Ok false 60000 -witness-b Ok false 60000 -witness-c Ok false 60000 -Warning: value must be same on all primary nodes - - -# bdr.role_replication -Node Status Pending Restart Value Message ----- ------ --------------- ----- ------- -bdr-a1 Warning false off must be set to on -bdr-a2 Ok false on -bdr-b1 Ok false on -bdr-b2 Ok false on -logical-standby-a1 Ok false on -logical-standby-b1 Ok false on -subscriber-only-c1 Ok false on -witness-a Ok false on -witness-b Ok false on -witness-c Ok false on -Warning: value must be same on all primary nodes - - -# bdr.standby_slot_names -Node Status Pending Restart Value Message ----- ------ --------------- ----- ------- -bdr-a1 Warning false bdr_bdrdb_ja… must contain valid logical slots of peer data nodes only -bdr-a2 Warning false bdr_bdrdb_ja… must contain valid logical slots of peer data nodes only -bdr-b1 Warning false must contain valid logical slots of peer data nodes only -bdr-b2 Warning false must contain valid logical slots of peer data nodes only -logical-standby-a1 Ok false -logical-standby-b1 Ok false -subscriber-only-c1 Ok false -witness-a Ok false -witness-b Ok false -witness-c Ok false - - -# bdr.standby_slots_min_confirmed -Node Status Pending Restart Value Message ----- ------ --------------- ----- ------- -bdr-a1 Warning false -1 must be >= 1 -bdr-a2 Warning false -1 must be >= 1 -bdr-b1 Warning false -1 must be >= 1 -bdr-b2 Warning false -1 must be >= 1 -logical-standby-a1 Ok false -1 -logical-standby-b1 Ok false -1 -subscriber-only-c1 Ok false -1 -witness-a Ok false -1 -witness-b Ok false -1 -witness-c Ok false -1 - - -# bdr.start_workers -Ok: all node values are ok - - -# bdr.xact_replication -Ok: all node values are ok - - -# max_prepared_transactions -Node Status Pending Restart Value Message ----- ------ --------------- ----- ------- -bdr-a1 Warning false 16 must be >= 250 -bdr-a2 Warning false 16 must be >= 250 -bdr-b1 Warning false 16 must be >= 250 -bdr-b2 Warning false 16 must be >= 250 -logical-standby-a1 Warning false 16 must be >= 250 -logical-standby-b1 Warning false 16 must be >= 250 -subscriber-only-c1 Warning false 16 must be >= 250 -witness-a Warning false 16 must be >= 250 -witness-b Warning false 16 must be >= 250 -witness-c Warning false 16 must be >= 250 - - -# max_replication_slots -Node Status Pending Restart Value Message ----- ------ --------------- ----- ------- -bdr-a1 Critical false 8 must be >= 10 -bdr-a2 Ok false 12 -bdr-b1 Ok false 12 -bdr-b2 Ok false 12 -logical-standby-a1 Ok false 12 -logical-standby-b1 Ok false 12 -subscriber-only-c1 Ok false 12 -witness-a Ok false 12 -witness-b Ok false 12 -witness-c Ok false 12 -Warning: value must be same on all primary nodes - - -# max_wal_senders -Ok: all node values are ok - - -# max_worker_processes -Ok: all node values are ok - - -# shared_preload_libraries -Node Status Pending Restart Value Message ----- ------ --------------- ----- ------- -bdr-a1 Warning false pg_stat_stat… must contain bdr as first entry -bdr-a2 Warning false pg_stat_stat… must contain bdr as first entry -bdr-b1 Warning false pg_stat_stat… must contain bdr as first entry -bdr-b2 Warning false pg_stat_stat… must contain bdr as first entry -logical-standby-a1 Warning false pg_stat_stat… must contain bdr as first entry -logical-standby-b1 Warning false pg_stat_stat… must contain bdr as first entry -subscriber-only-c1 Warning false pg_stat_stat… must contain bdr as first entry -witness-a Warning false pg_stat_stat… must contain bdr as first entry -witness-b Warning false pg_stat_stat… must contain bdr as first entry -witness-c Warning false pg_stat_stat… must contain bdr as first entry - - -# track_commit_timestamp -Ok: all node values are ok - - -# wal_level -Ok: all node values are ok -``` - diff --git a/product_docs/docs/pgd/beforeworkdiff.diff b/product_docs/docs/pgd/beforeworkdiff.diff deleted file mode 100644 index 8e5478f7192..00000000000 --- a/product_docs/docs/pgd/beforeworkdiff.diff +++ /dev/null @@ -1,4163 +0,0 @@ -Files 5/deploy-config/deploy-kubernetes/index.mdx and 5.6/deploy-config/deploy-kubernetes/index.mdx differ -Files 5/deploy-config/deploy-manual/deploying/01-provisioning-hosts.mdx and 5.6/deploy-config/deploy-manual/deploying/01-provisioning-hosts.mdx differ -Files 5/deploy-config/deploy-manual/deploying/02-install-postgres.mdx and 5.6/deploy-config/deploy-manual/deploying/02-install-postgres.mdx differ -Files 5/deploy-config/deploy-manual/deploying/03-configuring-repositories.mdx and 5.6/deploy-config/deploy-manual/deploying/03-configuring-repositories.mdx differ -Files 5/deploy-config/deploy-manual/deploying/04-installing-software.mdx and 5.6/deploy-config/deploy-manual/deploying/04-installing-software.mdx differ -Files 5/deploy-config/deploy-manual/deploying/05-creating-cluster.mdx and 5.6/deploy-config/deploy-manual/deploying/05-creating-cluster.mdx differ -Files 5/deploy-config/deploy-manual/deploying/06-check-cluster.mdx and 5.6/deploy-config/deploy-manual/deploying/06-check-cluster.mdx differ -Files 5/deploy-config/deploy-manual/deploying/07-configure-proxies.mdx and 5.6/deploy-config/deploy-manual/deploying/07-configure-proxies.mdx differ -Files 5/deploy-config/deploy-manual/deploying/08-using-pgd-cli.mdx and 5.6/deploy-config/deploy-manual/deploying/08-using-pgd-cli.mdx differ -Files 5/deploy-config/deploy-manual/deploying/index.mdx and 5.6/deploy-config/deploy-manual/deploying/index.mdx differ -Files 5/deploy-config/deploy-tpa/deploying/01-configuring.mdx and 5.6/deploy-config/deploy-tpa/deploying/01-configuring.mdx differ -Files 5/deploy-config/deploy-tpa/deploying/02-deploying.mdx and 5.6/deploy-config/deploy-tpa/deploying/02-deploying.mdx differ -Files 5/deploy-config/deploy-tpa/deploying/index.mdx and 5.6/deploy-config/deploy-tpa/deploying/index.mdx differ -Files 5/deploy-config/deploy-tpa/index.mdx and 5.6/deploy-config/deploy-tpa/index.mdx differ -Files 5/deploy-config/index.mdx and 5.6/deploy-config/index.mdx differ -Only in 5: durability -Files 5/index.mdx and 5.6/index.mdx differ -Files 5/known_issues.mdx and 5.6/known_issues.mdx differ -Files 5/monitoring/sql.mdx and 5.6/monitoring/sql.mdx differ -Files 5/node_management/creating_and_joining.mdx and 5.6/node_management/creating_and_joining.mdx differ -Only in 5.6/node_management: creating_nodes.mdx -Only in 5/node_management: decoding_worker.mdx -Files 5/node_management/groups_and_subgroups.mdx and 5.6/node_management/groups_and_subgroups.mdx differ -Files 5/node_management/heterogeneous_clusters.mdx and 5.6/node_management/heterogeneous_clusters.mdx differ -Files 5/node_management/index.mdx and 5.6/node_management/index.mdx differ -Only in 5/node_management: logical_standby_nodes.mdx -Files 5/node_management/maintainance_with_proxies.mdx and 5.6/node_management/maintainance_with_proxies.mdx differ -Files 5/node_management/node_recovery.mdx and 5.6/node_management/node_recovery.mdx differ -Only in 5/node_management: node_types.mdx -Files 5/node_management/removing_nodes_and_groups.mdx and 5.6/node_management/removing_nodes_and_groups.mdx differ -Files 5/node_management/replication_slots.mdx and 5.6/node_management/replication_slots.mdx differ -Only in 5/node_management: subscriber_only.mdx -Files 5/node_management/viewing_topology.mdx and 5.6/node_management/viewing_topology.mdx differ -Only in 5/node_management: witness_nodes.mdx -Only in 5.6: nodes -Only in 5.6/overview: architecture-and-performance.mdx -Only in 5.6/overview: basic-architecture.mdx -Only in 5.6/overview: compared.mdx -Files 5/overview/index.mdx and 5.6/overview/index.mdx differ -Files 5/parallelapply.mdx and 5.6/parallelapply.mdx differ -Files 5/planning/architectures.mdx and 5.6/planning/architectures.mdx differ -Files 5/planning/choosing_server.mdx and 5.6/planning/choosing_server.mdx differ -Files 5/planning/deployments.mdx and 5.6/planning/deployments.mdx differ -Files 5/planning/limitations.mdx and 5.6/planning/limitations.mdx differ -Files 5/planning/other_considerations.mdx and 5.6/planning/other_considerations.mdx differ -Files 5/postgres-configuration.mdx and 5.6/postgres-configuration.mdx differ -Files 5/quickstart/connecting_applications.mdx and 5.6/quickstart/connecting_applications.mdx differ -Files 5/quickstart/further_explore_conflicts.mdx and 5.6/quickstart/further_explore_conflicts.mdx differ -Files 5/quickstart/quick_start_aws.mdx and 5.6/quickstart/quick_start_aws.mdx differ -Files 5/quickstart/quick_start_cloud.mdx and 5.6/quickstart/quick_start_cloud.mdx differ -Files 5/quickstart/quick_start_docker.mdx and 5.6/quickstart/quick_start_docker.mdx differ -Files 5/quickstart/quick_start_linux.mdx and 5.6/quickstart/quick_start_linux.mdx differ -Files 5/reference/autopartition.mdx and 5.6/reference/autopartition.mdx differ -Files 5/reference/catalogs-internal.mdx and 5.6/reference/catalogs-internal.mdx differ -Files 5/reference/catalogs-visible.mdx and 5.6/reference/catalogs-visible.mdx differ -Files 5/reference/commit-scopes.mdx and 5.6/reference/commit-scopes.mdx differ -Files 5/reference/conflict_functions.mdx and 5.6/reference/conflict_functions.mdx differ -Files 5/reference/conflicts.mdx and 5.6/reference/conflicts.mdx differ -Files 5/reference/functions-internal.mdx and 5.6/reference/functions-internal.mdx differ -Files 5/reference/functions.mdx and 5.6/reference/functions.mdx differ -Files 5/reference/index.json and 5.6/reference/index.json differ -Files 5/reference/index.mdx and 5.6/reference/index.mdx differ -Files 5/reference/nodes-management-interfaces.mdx and 5.6/reference/nodes-management-interfaces.mdx differ -Files 5/reference/nodes.mdx and 5.6/reference/nodes.mdx differ -Files 5/reference/pgd-settings.mdx and 5.6/reference/pgd-settings.mdx differ -Files 5/reference/sequences.mdx and 5.6/reference/sequences.mdx differ -Files 5/reference/streamtriggers/rowfunctions.mdx and 5.6/reference/streamtriggers/rowfunctions.mdx differ -Only in 5.6/rel_notes: .DS_Store -Files 5/rel_notes/index.mdx and 5.6/rel_notes/index.mdx differ -Only in 5/rel_notes: pgd_5.0.0_rel_notes.mdx -Only in 5/rel_notes: pgd_5.0.1_rel_notes.mdx -Only in 5/rel_notes: pgd_5.1.0_rel_notes.mdx -Only in 5/rel_notes: pgd_5.2.0_rel_notes.mdx -Only in 5/rel_notes: pgd_5.3.0_rel_notes.mdx -Only in 5/rel_notes: pgd_5.4.0_rel_notes.mdx -Only in 5/rel_notes: pgd_5.4.1_rel_notes.mdx -Only in 5/rel_notes: pgd_5.5.0_rel_notes.mdx -Only in 5/rel_notes: pgd_5.5.1_rel_notes.mdx -Only in 5.6/rel_notes: pgd_5.6.0_rel_notes.mdx -Only in 5.6/rel_notes: pgd_5.6.1_rel_notes.mdx -Only in 5.6/rel_notes: src -Files 5/repsets.mdx and 5.6/repsets.mdx differ -Files 5/routing/administering.mdx and 5.6/routing/administering.mdx differ -Files 5/routing/configuration.mdx and 5.6/routing/configuration.mdx differ -Files 5/routing/index.mdx and 5.6/routing/index.mdx differ -Files 5/routing/monitoring.mdx and 5.6/routing/monitoring.mdx differ -Files 5/routing/proxy.mdx and 5.6/routing/proxy.mdx differ -Files 5/scaling.mdx and 5.6/scaling.mdx differ -Files 5/security/pgd-predefined-roles.mdx and 5.6/security/pgd-predefined-roles.mdx differ -Files 5/security/role-management.mdx and 5.6/security/role-management.mdx differ -Files 5/security/roles.mdx and 5.6/security/roles.mdx differ -Files 5/sequences.mdx and 5.6/sequences.mdx differ -Files 5/terminology.mdx and 5.6/terminology.mdx differ -Files 5/testingandtuning.mdx and 5.6/testingandtuning.mdx differ -Files 5/transaction-streaming.mdx and 5.6/transaction-streaming.mdx differ -Files 5/twophase.mdx and 5.6/twophase.mdx differ -Files 5/upgrades/compatibility.mdx and 5.6/upgrades/compatibility.mdx differ -Files 5/upgrades/manual_overview.mdx and 5.6/upgrades/manual_overview.mdx differ -Files 5/upgrades/upgrade_paths.mdx and 5.6/upgrades/upgrade_paths.mdx differ -Files 5/upgrades/upgrading_major_rolling.mdx and 5.6/upgrades/upgrading_major_rolling.mdx differ - ------------------------ - -Only in 5.6: commit-scopes -Only in 5.6: conflict-management -Only in 5: consistency -diff -r 5/ddl/ddl-command-handling.mdx 5.6/ddl/ddl-command-handling.mdx -121c121 -< | CREATE TABLE | [Details](#bdr_ddl_allowed_CreateStmt) | Y | DDL | ---- -> | CREATE TABLE | Y | Y | DDL | -231c231 -< ## Command Notes ---- -> ## Command notes -256,265c256,257 -< - `ADD CONSTRAINT ... EXCLUDE` — Exclusion constraints are not -< supported. Exclusion constraints are a way to ensure that no two rows in a -< table have overlapping ranges. This is useful in a synchronous system where -< you can ensure that the constraint is enforced on all nodes at the same -< time. But in an asynchronous system, you can't ensure that the constraint is -< enforced on all nodes at the same time. This can lead to data inconsistency -< and changes that can't be replayed; therefore, exclusion constraints aren't -< supported. -< - `ALTER TABLE ... SET WITH[OUT] OIDS` — Isn't supported for the same reasons -< as in `CREATE TABLE`. ---- -> You can override this behavior using `bdr.permit_unsafe_commands` if you're sure the command is -> safe. -267a260,261 -> You can override this behavior using `bdr.permit_unsafe_commands` if you're sure the command is -> safe. -282a277,278 -> This can be overriden using `bdr.permit_unsafe_commands` if user is sure the command is -> safe. -510,516d505 -< -<
-< -< ### CREATE TABLE -< -< Generally `CREATE TABLE` is supported, but `CREATE TABLE WITH OIDS` isn't -< allowed on a PGD node. -diff -r 5/ddl/ddl-locking.mdx 5.6/ddl/ddl-locking.mdx -76c76 -< Specify locking behavior with the [`bdr.ddl_locking`](/pgd/5/reference/pgd-settings#bdrddl_locking) parameter, as ---- -> Specify locking behavior with the [`bdr.ddl_locking`](/pgd/latest/reference/pgd-settings#bdrddl_locking) parameter, as -diff -r 5/ddl/ddl-managing-with-pgd-replication.mdx 5.6/ddl/ddl-managing-with-pgd-replication.mdx -35c35 -< We recommend using the [`bdr.run_on_all_nodes()`](/pgd/5/reference/functions#bdrrun_on_all_nodes) technique with `CREATE ---- -> We recommend using the [`bdr.run_on_all_nodes()`](/pgd/latest/reference/functions#bdrrun_on_all_nodes) technique with `CREATE -63c63 -< timeout settings. [`bdr.global_lock_timeout`](/pgd/5/reference/pgd-settings#bdrglobal_lock_timeout) limits how long the wait for ---- -> timeout settings. [`bdr.global_lock_timeout`](/pgd/latest/reference/pgd-settings#bdrglobal_lock_timeout) limits how long the wait for -65,66c65,66 -< [`bdr.global_lock_statement_timeout`](/pgd/5/reference/pgd-settings#bdrglobal_lock_statement_timeout) limits the runtime length of any statement -< in transaction that holds global locks, and [`bdr.global_lock_idle_timeout`](/pgd/5/reference/pgd-settings#bdrglobal_lock_idle_timeout) sets ---- -> [`bdr.global_lock_statement_timeout`](/pgd/latest/reference/pgd-settings#bdrglobal_lock_statement_timeout) limits the runtime length of any statement -> in transaction that holds global locks, and [`bdr.global_lock_idle_timeout`](/pgd/latest/reference/pgd-settings#bdrglobal_lock_idle_timeout) sets -87c87 -< the bdr_superuser to update the [`bdr.ddl_locking`](/pgd/5/reference/pgd-settings#bdrddl_locking) value. ---- -> the bdr_superuser to update the [`bdr.ddl_locking`](/pgd/latest/reference/pgd-settings#bdrddl_locking) value. -105c105 -< by turning [`bdr.ddl_replication`](/pgd/5/reference/pgd-settings#bdrddl_replication) off. ---- -> by turning [`bdr.ddl_replication`](/pgd/latest/reference/pgd-settings#bdrddl_replication) off. -diff -r 5/ddl/ddl-overview.mdx 5.6/ddl/ddl-overview.mdx -74c74 -< When using the [`bdr.replicate_ddl_command`](/pgd/5/reference/functions#bdrreplicate_ddl_command), you can set this ---- -> When using the [`bdr.replicate_ddl_command`](/pgd/latest/reference/functions#bdrreplicate_ddl_command), you can set this -76c76 -< [`bdr.ddl_locking`](/pgd/5/reference/pgd-settings#bdrddl_locking) setting only for the DDL commands passed to that ---- -> [`bdr.ddl_locking`](/pgd/latest/reference/pgd-settings#bdrddl_locking) setting only for the DDL commands passed to that -diff -r 5/ddl/ddl-pgd-functions-like-ddl.mdx 5.6/ddl/ddl-pgd-functions-like-ddl.mdx -13,19c13,19 -< - [`bdr.create_replication_set`](/pgd/5/reference/repsets-management#bdrcreate_replication_set) -< - [`bdr.alter_replication_set`](/pgd/5/reference/repsets-management#bdralter_replication_set) -< - [`bdr.drop_replication_set`](/pgd/5/reference/repsets-management#bdrdrop_replication_set) -< - [`bdr.replication_set_add_table`](/pgd/5/reference/repsets-membership#bdrreplication_set_add_table) -< - [`bdr.replication_set_remove_table`](/pgd/5/reference/repsets-membership#bdrreplication_set_remove_table) -< - [`bdr.replication_set_add_ddl_filter`](/pgd/5/reference/repsets-ddl-filtering#bdrreplication_set_add_ddl_filter) -< - [`bdr.replication_set_remove_ddl_filter`](/pgd/5/reference/repsets-ddl-filtering#bdrreplication_set_remove_ddl_filter) ---- -> - [`bdr.create_replication_set`](/pgd/latest/reference/repsets-management#bdrcreate_replication_set) -> - [`bdr.alter_replication_set`](/pgd/latest/reference/repsets-management#bdralter_replication_set) -> - [`bdr.drop_replication_set`](/pgd/latest/reference/repsets-management#bdrdrop_replication_set) -> - [`bdr.replication_set_add_table`](/pgd/latest/reference/repsets-membership#bdrreplication_set_add_table) -> - [`bdr.replication_set_remove_table`](/pgd/latest/reference/repsets-membership#bdrreplication_set_remove_table) -> - [`bdr.replication_set_add_ddl_filter`](/pgd/latest/reference/repsets-ddl-filtering#bdrreplication_set_add_ddl_filter) -> - [`bdr.replication_set_remove_ddl_filter`](/pgd/latest/reference/repsets-ddl-filtering#bdrreplication_set_remove_ddl_filter) -29c29 -< - [`bdr.alter_sequence_set_kind`](/pgd/5/reference/sequences#bdralter_sequence_set_kind) ---- -> - [`bdr.alter_sequence_set_kind`](/pgd/latest/reference/sequences#bdralter_sequence_set_kind) -33,35c33,35 -< - [`bdr.create_conflict_trigger`](/pgd/5/reference/streamtriggers/interfaces#bdrcreate_conflict_trigger) -< - [`bdr.create_transform_trigger`](/pgd/5/reference/streamtriggers/interfaces#bdrcreate_transform_trigger) -< - [`bdr.drop_trigger`](/pgd/5/reference/streamtriggers/interfaces#bdrdrop_trigger) ---- -> - [`bdr.create_conflict_trigger`](/pgd/latest/reference/streamtriggers/interfaces#bdrcreate_conflict_trigger) -> - [`bdr.create_transform_trigger`](/pgd/latest/reference/streamtriggers/interfaces#bdrcreate_transform_trigger) -> - [`bdr.drop_trigger`](/pgd/latest/reference/streamtriggers/interfaces#bdrdrop_trigger) -diff -r 5/ddl/ddl-replication-options.mdx 5.6/ddl/ddl-replication-options.mdx -6c6 -< The [`bdr.ddl_replication`](/pgd/5/reference/pgd-settings#bdrddl_replication) parameter specifies replication behavior. ---- -> The [`bdr.ddl_replication`](/pgd/latest/reference/pgd-settings#bdrddl_replication) parameter specifies replication behavior. -15c15 -< function [`bdr.replicate_ddl_command()`](/pgd/5/reference/functions#bdrreplicate_ddl_command). This function can be helpful if you ---- -> function [`bdr.replicate_ddl_command()`](/pgd/latest/reference/functions#bdrreplicate_ddl_command). This function can be helpful if you -29c29 -< execute it manually on each node using the [`bdr.ddl_replication`](/pgd/5/reference/pgd-settings#bdrddl_replication) configuration ---- -> execute it manually on each node using the [`bdr.ddl_replication`](/pgd/latest/reference/pgd-settings#bdrddl_replication) configuration -diff -r 5/ddl/ddl-role-manipulation.mdx 5.6/ddl/ddl-role-manipulation.mdx -14c14 -< PGD replicates role manipulation statements if [`bdr.role_replication`](/pgd/5/reference/pgd-settings#bdrrole_replication) is ---- -> PGD replicates role manipulation statements if [`bdr.role_replication`](/pgd/latest/reference/pgd-settings#bdrrole_replication) is -diff -r 5/ddl/ddl-workarounds.mdx 5.6/ddl/ddl-workarounds.mdx -133c133 -< the [`bdr.wait_slot_confirm_lsn()`](/pgd/5/reference/functions#bdrwait_slot_confirm_lsn) function. ---- -> the [`bdr.wait_slot_confirm_lsn()`](/pgd/latest/reference/functions#bdrwait_slot_confirm_lsn) function. -Only in 5.6: decoding_worker.mdx -Only in 5/deploy-config: deploy-biganimal -Only in 5.6/deploy-config: deploy-cloudservice -diff -r 5/deploy-config/deploy-kubernetes/index.mdx 5.6/deploy-config/deploy-kubernetes/index.mdx -5,6c5,6 -< - /pgd/5/install-admin/admin-kubernetes/ #generated for pgd deploy-config-planning reorg -< - /pgd/5/admin-kubernetes/ #generated for pgd deploy-config-planning reorg ---- -> - /pgd/latest/install-admin/admin-kubernetes/ #generated for pgd deploy-config-planning reorg -> - /pgd/latest/admin-kubernetes/ #generated for pgd deploy-config-planning reorg -9c9 -< EDB Postgres Distributed for Kubernetes is a Kubernetes operator designed, developed, and supported by EDB. It covers the full lifecycle of highly available Postgres database clusters with a multi-master architecture, using PGD replication. It's based on the open source CloudNativePG operator and provides additional value, such as compatibility with Oracle using EDB Postgres Advanced Server, Transparent Data Encryption (TDE) using EDB Postgres Extended or Advanced Server, and additional supported platforms including IBM Power and OpenShift. ---- -> EDB CloudNativePG Global Cluster is a Kubernetes operator designed, developed, and supported by EDB. It covers the full lifecycle of highly available Postgres database clusters with a multi-master architecture, using PGD replication. It's based on the open source CloudNativePG operator and provides additional value, such as compatibility with Oracle using EDB Postgres Advanced Server, Transparent Data Encryption (TDE) using EDB Postgres Extended or Advanced Server, and additional supported platforms including IBM Power and OpenShift. -11c11 -< This section covers how to deploy and configure EDB Postgres Distributed using the Kubernetes operator. ---- -> This section covers how to deploy and configure EDB Postgres Distributed using the EDB CloudNativePG Global Cluster operator. -13c13 -< * [Quick start](/postgres_distributed_for_kubernetes/latest/quickstart) in the PGD for Kubernetes documentation works through the steps needed to: ---- -> * [Quick start](/postgres_distributed_for_kubernetes/latest/quickstart) in the EDB CloudNativePG Global Cluster documentation works through the steps needed to: -15c15 -< * Install Helm and the Helm chart for PGD for Kubernetes. ---- -> * Install Helm and the Helm chart for EDB CloudNativePG Global Cluster. -19,20c19 -< -< * [Installation and upgrade](/postgres_distributed_for_kubernetes/latest/installation_upgrade) provides detailed instructions for installing and upgrading PGD for Kubernetes. ---- -> * [Installation and upgrade](/postgres_distributed_for_kubernetes/latest/installation_upgrade) provides detailed instructions for installing and upgrading EDB CloudNativePG Global Cluster. -diff -r 5/deploy-config/deploy-manual/deploying/01-provisioning-hosts.mdx 5.6/deploy-config/deploy-manual/deploying/01-provisioning-hosts.mdx -6,7c6,7 -< - /pgd/5/install-admin/admin-manual/installing/01-provisioning-hosts/ #generated for pgd deploy-config-planning reorg -< - /pgd/5/admin-manual/installing/01-provisioning-hosts/ #generated for pgd deploy-config-planning reorg ---- -> - /pgd/latest/install-admin/admin-manual/installing/01-provisioning-hosts/ #generated for pgd deploy-config-planning reorg -> - /pgd/latest/admin-manual/installing/01-provisioning-hosts/ #generated for pgd deploy-config-planning reorg -diff -r 5/deploy-config/deploy-manual/deploying/02-install-postgres.mdx 5.6/deploy-config/deploy-manual/deploying/02-install-postgres.mdx -6,7c6,7 -< - /pgd/5/install-admin/admin-manual/installing/02-install-postgres/ #generated for pgd deploy-config-planning reorg -< - /pgd/5/admin-manual/installing/02-install-postgres/ #generated for pgd deploy-config-planning reorg ---- -> - /pgd/latest/install-admin/admin-manual/installing/02-install-postgres/ #generated for pgd deploy-config-planning reorg -> - /pgd/latest/admin-manual/installing/02-install-postgres/ #generated for pgd deploy-config-planning reorg -diff -r 5/deploy-config/deploy-manual/deploying/03-configuring-repositories.mdx 5.6/deploy-config/deploy-manual/deploying/03-configuring-repositories.mdx -6,7c6,7 -< - /pgd/5/install-admin/admin-manual/installing/03-configuring-repositories/ #generated for pgd deploy-config-planning reorg -< - /pgd/5/admin-manual/installing/03-configuring-repositories/ #generated for pgd deploy-config-planning reorg ---- -> - /pgd/latest/install-admin/admin-manual/installing/03-configuring-repositories/ #generated for pgd deploy-config-planning reorg -> - /pgd/latest/admin-manual/installing/03-configuring-repositories/ #generated for pgd deploy-config-planning reorg -14c14 -< Perform the following operations on each host. For the purposes of this exercise, each host is a standard data node, but the procedure would be the same for other [node types](../../../node_management/node_types), such as witness or subscriber-only nodes. ---- -> Perform the following operations on each host. For the purposes of this exercise, each host is a standard data node, but the procedure would be the same for other [node types](/pgd/latest/nodes/overview), such as witness or subscriber-only nodes. -diff -r 5/deploy-config/deploy-manual/deploying/04-installing-software.mdx 5.6/deploy-config/deploy-manual/deploying/04-installing-software.mdx -6,7c6,7 -< - /pgd/5/install-admin/admin-manual/installing/04-installing-software/ #generated for pgd deploy-config-planning reorg -< - /pgd/5/admin-manual/installing/04-installing-software/ #generated for pgd deploy-config-planning reorg ---- -> - /pgd/latest/install-admin/admin-manual/installing/04-installing-software/ #generated for pgd deploy-config-planning reorg -> - /pgd/latest/admin-manual/installing/04-installing-software/ #generated for pgd deploy-config-planning reorg -diff -r 5/deploy-config/deploy-manual/deploying/05-creating-cluster.mdx 5.6/deploy-config/deploy-manual/deploying/05-creating-cluster.mdx -6,7c6,7 -< - /pgd/5/install-admin/admin-manual/installing/05-creating-cluster/ #generated for pgd deploy-config-planning reorg -< - /pgd/5/admin-manual/installing/05-creating-cluster/ #generated for pgd deploy-config-planning reorg ---- -> - /pgd/latest/install-admin/admin-manual/installing/05-creating-cluster/ #generated for pgd deploy-config-planning reorg -> - /pgd/latest/admin-manual/installing/05-creating-cluster/ #generated for pgd deploy-config-planning reorg -84c84 -< Call the [`bdr.create_node`](/pgd/5/reference/nodes-management-interfaces#bdrcreate_node) function to create a node, passing it the node name and a connection string that other nodes can use to connect to it. ---- -> Call the [`bdr.create_node`](/pgd/latest/reference/nodes-management-interfaces#bdrcreate_node) function to create a node, passing it the node name and a connection string that other nodes can use to connect to it. -92c92 -< Call the [`bdr.create_node_group`](/pgd/5/reference/nodes-management-interfaces#bdrcreate_node_group) function to create a top-level group for your PGD cluster. Passing a single string parameter creates the top-level group with that name. This example creates a top-level group named `pgd`. ---- -> Call the [`bdr.create_node_group`](/pgd/latest/reference/nodes-management-interfaces#bdrcreate_node_group) function to create a top-level group for your PGD cluster. Passing a single string parameter creates the top-level group with that name. This example creates a top-level group named `pgd`. -104c104 -< Call the [`bdr.create_node_group`](/pgd/5/reference/nodes-management-interfaces#bdrcreate_node_group) function again to create a subgroup of the top-level group. ---- -> Call the [`bdr.create_node_group`](/pgd/latest/reference/nodes-management-interfaces#bdrcreate_node_group) function again to create a subgroup of the top-level group. -124c124 -< Call the [`bdr.create_node`](/pgd/5/reference/nodes-management-interfaces#bdrcreate_node) function to create this node, passing it the node name and a connection string that other nodes can use to connect to it. ---- -> Call the [`bdr.create_node`](/pgd/latest/reference/nodes-management-interfaces#bdrcreate_node) function to create this node, passing it the node name and a connection string that other nodes can use to connect to it. -132c132 -< Using [`bdr.join_node_group`](/pgd/5/reference/nodes-management-interfaces#bdrjoin_node_group), you can ask node-two to join node-one's `dc1` group. The function takes as a first parameter the connection string of a node already in the group and the group name as a second parameter. ---- -> Using [`bdr.join_node_group`](/pgd/latest/reference/nodes-management-interfaces#bdrjoin_node_group), you can ask node-two to join node-one's `dc1` group. The function takes as a first parameter the connection string of a node already in the group and the group name as a second parameter. -149c149 -< Call the [`bdr.create_node`](/pgd/5/reference/nodes-management-interfaces#bdrcreate_node) function to create this node, passing it the node name and a connection string that other nodes can use to connect to it. ---- -> Call the [`bdr.create_node`](/pgd/latest/reference/nodes-management-interfaces#bdrcreate_node) function to create this node, passing it the node name and a connection string that other nodes can use to connect to it. -157c157 -< Using [`bdr.join_node_group`](/pgd/5/reference/nodes-management-interfaces#bdrjoin_node_group), you can ask node-three to join node-one's `dc1` group. The function takes as a first parameter the connection string of a node already in the group and the group name as a second parameter. ---- -> Using [`bdr.join_node_group`](/pgd/latest/reference/nodes-management-interfaces#bdrjoin_node_group), you can ask node-three to join node-one's `dc1` group. The function takes as a first parameter the connection string of a node already in the group and the group name as a second parameter. -diff -r 5/deploy-config/deploy-manual/deploying/06-check-cluster.mdx 5.6/deploy-config/deploy-manual/deploying/06-check-cluster.mdx -6,7c6,7 -< - /pgd/5/install-admin/admin-manual/installing/06-check-cluster/ #generated for pgd deploy-config-planning reorg -< - /pgd/5/admin-manual/installing/06-check-cluster/ #generated for pgd deploy-config-planning reorg ---- -> - /pgd/latest/install-admin/admin-manual/installing/06-check-cluster/ #generated for pgd deploy-config-planning reorg -> - /pgd/latest/admin-manual/installing/06-check-cluster/ #generated for pgd deploy-config-planning reorg -diff -r 5/deploy-config/deploy-manual/deploying/07-configure-proxies.mdx 5.6/deploy-config/deploy-manual/deploying/07-configure-proxies.mdx -6,7c6,7 -< - /pgd/5/install-admin/admin-manual/installing/07-configure-proxies/ #generated for pgd deploy-config-planning reorg -< - /pgd/5/admin-manual/installing/07-configure-proxies/ #generated for pgd deploy-config-planning reorg ---- -> - /pgd/latest/install-admin/admin-manual/installing/07-configure-proxies/ #generated for pgd deploy-config-planning reorg -> - /pgd/latest/admin-manual/installing/07-configure-proxies/ #generated for pgd deploy-config-planning reorg -24,26c24,26 -< * Logging in and setting the `enable_raft` and `enable_proxy_routing` node group options to `true` for the subgroup. Use [`bdr.alter_node_group_option`](/pgd/5/reference/nodes-management-interfaces#bdralter_node_group_option), passing the subgroup name, option name, and new value as parameters. -< * Create as many uniquely named proxies as you plan to deploy using [`bdr.create_proxy`](/pgd/5/reference/routing#bdrcreate_proxy) and passing the new proxy name and the subgroup to attach it to. The [`bdr.create_proxy`](/pgd/5/reference/routing#bdrcreate_proxy) does not create a proxy, but creates a space for a proxy to register itself with the cluster. The space contains configuration values which can be modified later. Initially it is configured with default proxy options such as setting the `listen_address` to `0.0.0.0`. -< * Configure proxy routes to each node by setting route_dsn for each node in the subgroup. The route_dsn is the connection string that the proxy should use to connect to that node. Use [`bdr.alter_node_option`](/pgd/5/reference/nodes-management-interfaces#bdralter_node_option) to set the route_dsn for each node in the subgroup. ---- -> * Logging in and setting the `enable_raft` and `enable_proxy_routing` node group options to `true` for the subgroup. Use [`bdr.alter_node_group_option`](/pgd/latest/reference/nodes-management-interfaces#bdralter_node_group_option), passing the subgroup name, option name, and new value as parameters. -> * Create as many uniquely named proxies as you plan to deploy using [`bdr.create_proxy`](/pgd/latest/reference/routing#bdrcreate_proxy) and passing the new proxy name and the subgroup to attach it to. The [`bdr.create_proxy`](/pgd/latest/reference/routing#bdrcreate_proxy) does not create a proxy, but creates a space for a proxy to register itself with the cluster. The space contains configuration values which can be modified later. Initially it is configured with default proxy options such as setting the `listen_address` to `0.0.0.0`. -> * Configure proxy routes to each node by setting route_dsn for each node in the subgroup. The route_dsn is the connection string that the proxy should use to connect to that node. Use [`bdr.alter_node_option`](/pgd/latest/reference/nodes-management-interfaces#bdralter_node_option) to set the route_dsn for each node in the subgroup. -56c56 -< You can use the [`bdr.node_group_summary`](/pgd/5/reference/catalogs-visible#bdrnode_group_summary) view to check the status of options previously set with `bdr.alter_node_group_option()`: ---- -> You can use the [`bdr.node_group_summary`](/pgd/latest/reference/catalogs-visible#bdrnode_group_summary) view to check the status of options previously set with `bdr.alter_node_group_option()`: -83c83 -< You can use the [`bdr.proxy_config_summary`](/pgd/5/reference/catalogs-internal#bdrproxy_config_summary) view to check that the proxies were created: ---- -> You can use the [`bdr.proxy_config_summary`](/pgd/latest/reference/catalogs-internal#bdrproxy_config_summary) view to check that the proxies were created: -diff -r 5/deploy-config/deploy-manual/deploying/08-using-pgd-cli.mdx 5.6/deploy-config/deploy-manual/deploying/08-using-pgd-cli.mdx -6,7c6,7 -< - /pgd/5/install-admin/admin-manual/installing/08-using-pgd-cli/ #generated for pgd deploy-config-planning reorg -< - /pgd/5/admin-manual/installing/08-using-pgd-cli/ #generated for pgd deploy-config-planning reorg ---- -> - /pgd/latest/install-admin/admin-manual/installing/08-using-pgd-cli/ #generated for pgd deploy-config-planning reorg -> - /pgd/latest/admin-manual/installing/08-using-pgd-cli/ #generated for pgd deploy-config-planning reorg -diff -r 5/deploy-config/deploy-manual/deploying/index.mdx 5.6/deploy-config/deploy-manual/deploying/index.mdx -14,15c14,15 -< - /pgd/5/install-admin/admin-manual/installing/ #generated for pgd deploy-config-planning reorg -< - /pgd/5/admin-manual/installing/ #generated for pgd deploy-config-planning reorg ---- -> - /pgd/latest/install-admin/admin-manual/installing/ #generated for pgd deploy-config-planning reorg -> - /pgd/latest/admin-manual/installing/ #generated for pgd deploy-config-planning reorg -diff -r 5/deploy-config/deploy-tpa/deploying/01-configuring.mdx 5.6/deploy-config/deploy-tpa/deploying/01-configuring.mdx -5,6c5,6 -< - /pgd/5/install-admin/admin-tpa/installing/01-configuring/ #generated for pgd deploy-config-planning reorg -< - /pgd/5/admin-tpa/installing/01-configuring/ #generated for pgd deploy-config-planning reorg ---- -> - /pgd/latest/install-admin/admin-tpa/installing/01-configuring/ #generated for pgd deploy-config-planning reorg -> - /pgd/latest/admin-tpa/installing/01-configuring/ #generated for pgd deploy-config-planning reorg -diff -r 5/deploy-config/deploy-tpa/deploying/02-deploying.mdx 5.6/deploy-config/deploy-tpa/deploying/02-deploying.mdx -5,6c5,6 -< - /pgd/5/install-admin/admin-tpa/installing/02-deploying/ #generated for pgd deploy-config-planning reorg -< - /pgd/5/admin-tpa/installing/02-deploying/ #generated for pgd deploy-config-planning reorg ---- -> - /pgd/latest/install-admin/admin-tpa/installing/02-deploying/ #generated for pgd deploy-config-planning reorg -> - /pgd/latest/admin-tpa/installing/02-deploying/ #generated for pgd deploy-config-planning reorg -diff -r 5/deploy-config/deploy-tpa/deploying/index.mdx 5.6/deploy-config/deploy-tpa/deploying/index.mdx -7,9c7,9 -< - /pgd/5/tpa/ -< - /pgd/5/deployments/tpaexec/using_tpaexec/ -< - /pgd/5/tpa/using_tpa/ ---- -> - /pgd/latest/tpa/ -> - /pgd/latest/deployments/tpaexec/using_tpaexec/ -> - /pgd/latest/tpa/using_tpa/ -14,15c14,15 -< - /pgd/5/install-admin/admin-tpa/installing/ #generated for pgd deploy-config-planning reorg -< - /pgd/5/admin-tpa/installing/ #generated for pgd deploy-config-planning reorg ---- -> - /pgd/latest/install-admin/admin-tpa/installing/ #generated for pgd deploy-config-planning reorg -> - /pgd/latest/admin-tpa/installing/ #generated for pgd deploy-config-planning reorg -25c25 -< If you want to experiment with a local deployment as quickly as possible, you can [deploy an EDB Postgres Distributed example cluster on Docker](/pgd/5/quickstart/quick_start_docker) to configure, provision, and deploy a PGD 5 Always-on cluster on Docker. ---- -> If you want to experiment with a local deployment as quickly as possible, you can [deploy an EDB Postgres Distributed example cluster on Docker](/pgd/latest/quickstart/quick_start_docker) to configure, provision, and deploy a PGD 5 Always-on cluster on Docker. -27c27 -< If deploying to the cloud is your aim, you can [deploy an EDB Postgres Distributed example cluster on AWS](/pgd/5/quickstart/quick_start_aws) to get a PGD 5 cluster on your own Amazon account. ---- -> If deploying to the cloud is your aim, you can [deploy an EDB Postgres Distributed example cluster on AWS](/pgd/latest/quickstart/quick_start_aws) to get a PGD 5 cluster on your own Amazon account. -29c29 -< If you want to run on your own Linux systems or VMs, you can also use TPA to [deploy EDB Postgres Distributed directly to your own Linux hosts](/pgd/5/quickstart/quick_start_linux). ---- -> If you want to run on your own Linux systems or VMs, you can also use TPA to [deploy EDB Postgres Distributed directly to your own Linux hosts](/pgd/latest/quickstart/quick_start_linux). -diff -r 5/deploy-config/deploy-tpa/index.mdx 5.6/deploy-config/deploy-tpa/index.mdx -5,6c5,6 -< - /pgd/5/install-admin/admin-tpa/ #generated for pgd deploy-config-planning reorg -< - /pgd/5/admin-tpa/ #generated for pgd deploy-config-planning reorg ---- -> - /pgd/latest/install-admin/admin-tpa/ #generated for pgd deploy-config-planning reorg -> - /pgd/latest/admin-tpa/ #generated for pgd deploy-config-planning reorg -diff -r 5/deploy-config/index.mdx 5.6/deploy-config/index.mdx -9c9 -< - deploy-cloudservice ---- -> - deploy-biganimal -17a18 -> -19a21 -> -22c24,25 -< * [EDB BigAnimal](deploy-biganimal) describes how to deploy and configure EDB Postgres Distributed on the EDB BigAnimal service. ---- -> -> * [EDB Postgres AI Cloud Service](deploy-cloudservice) describes how to deploy and configure EDB Postgres Distributed on the EDB Postgres AI Cloud Service. -Only in 5: durability -diff -r 5/index.mdx 5.6/index.mdx -3a4,7 -> redirects: -> - /pgd/5/compatibility_matrix -> - /pgd/latest/bdr -> - /edb-postgres-ai/migration-etl/pgd/ -6a11 -> - compatibility -31,32c36,38 -< - durability -< - consistency ---- -> - nodes -> - commit-scopes -> - conflict-management -37a44 -> - decoding_worker -42d48 -< - compatibility -66d71 -< -69,70c74 -< [Group Commit](durability/group-commit), [CAMO](durability/camo), or -< [Eager Replication](durability/group-commit#eager-conflict-resolution). ---- -> [Synchronous Commit](/pgd/latest/commit-scopes/synchronous_commit/), [Group Commit](commit-scopes/group-commit) (optionally with [Eager Conflict Resolution](/pgd/latest/commit-scopes/group-commit/#eager-conflict-resolution)), or [CAMO](commit-scopes/camo). -74,85c78 -< EDB Postgres Distributed 5 is compatible with the package versions shown in the table. -< -< Package | Versions -< -------- | ------------ -< Community PostgreSQL | 12-16 -< EDB Postgres Extended Server | 12-16 -< EDB Postgres Advanced Server | 12-16 -< -< !!! Note Postgres 16 support -< Postgres 16 support is available only in EDB Postgres Distributed 5.3 and later. -< !!! -< ---- -> EDB Postgres Distributed 5 is compatible with PostgreSQL, EDB Postgres Extended, and EDB Postgres Advanced versions 12-17. See [Compatibility](compatibility) for more details, including information about compatibility with different operating systems and architectures. -87a81 -> --- -diff -r 5/known_issues.mdx 5.6/known_issues.mdx -11,15c11,23 -< - If the resolver for the `update_origin_change` conflict is set to `skip`, `synchronous_commit=remote_apply` is used, and concurrent updates of the same row are repeatedly applied on two different nodes, then one of the update statements might hang due -< to a deadlock with the PGD writer. -< As mentioned in [Conflicts](consistency/conflicts/), `skip` isn't the default resolver for the `update_origin_change` conflict, and this combination isn't intended to be used in production. -< It discards one of the two conflicting updates based on the order of arrival on that node, which is likely to cause a divergent cluster. -< In the rare situation that you do choose to use the `skip` conflict resolver, note the issue with the use of the `remote_apply` mode. ---- -> - If the resolver for the `update_origin_change` conflict -> is set to `skip`, `synchronous_commit=remote_apply` is used, and -> concurrent updates of the same row are repeatedly applied on two -> different nodes, then one of the update statements might hang due -> to a deadlock with the PGD writer. As mentioned in -> [Conflicts](conflict-management/conflicts/), `skip` isn't the default -> resolver for the `update_origin_change` conflict, and this -> combination isn't intended to be used in production. It discards -> one of the two conflicting updates based on the order of arrival -> on that node, which is likely to cause a divergent cluster. -> In the rare situation that you do choose to use the `skip` -> conflict resolver, note the issue with the use of the -> `remote_apply` mode. -29c37 -< - Group Commit can't be combined with [CAMO](durability/camo/). ---- -> - Group Commit can't be combined with [CAMO](commit-scopes/camo/). -33c41 -< - Parallel Apply isn't currently supported in combination with Group Commit. Make sure to disable it when using Group Commit by either (a) Setting `num_writers` to 1 for the node group using [`bdr.alter_node_group_option`](/pgd/5/reference/nodes-management-interfaces/#bdralter_node_group_option) or (b) using the GUC [`bdr.writers_per_subscription`](/pgd/5/reference/pgd-settings#bdrwriters_per_subscription). See [Configuration of generic replication](/pgd/5/reference/pgd-settings#generic-replication). ---- -> - Parallel Apply isn't currently supported in combination with Group Commit. Make sure to disable it when using Group Commit by either (a) Setting `num_writers` to 1 for the node group using [`bdr.alter_node_group_option`](/pgd/latest/reference/nodes-management-interfaces/#bdralter_node_group_option) or (b) using the GUC [`bdr.writers_per_subscription`](/pgd/latest/reference/pgd-settings#bdrwriters_per_subscription). See [Configuration of generic replication](/pgd/latest/reference/pgd-settings#generic-replication). -42,43c50 -< - When using [`bdr.add_commit_scope`](/pgd/5/reference/functions#bdradd_commit_scope), if a new commit scope is added that has the same name as a commit scope on any group, then the commit scope silently overwrites the commit scope but retains the original group the scope was associated with (if any). -< To modify a commit scope safely, use [`bdr.alter_commit_scope`](/pgd/5/reference/functions#bdralter_commit_scope). ---- -> To modify a commit scope safely, use [`bdr.alter_commit_scope`](/pgd/latest/reference/functions#bdralter_commit_scope). -45,46c52,56 -< - Tables configured with `REPLICA IDENTITY FULL` and include `box`, `polygon` or `json` types in their columns are using PGD aren't able to be replicated. -< You can mitigate this issue by configuring a primary key for `REPLICA IDENTITY` to use or, for `json` columns only, using the `jsonb` type instead. ---- -> - DDL run in serializable transactions can face the error: `ERROR: could not serialize access due to read/write dependencies among transactions`. A workaround is to run the DDL outside serializable transactions. -> -> - The EBD Postgres Advanced Server 17 data type [`BFILE`](/epas/latest/reference/sql_reference/02_data_types/03a_bfiles/) is not currently supported. This is due to `BFILE` being a file reference that is stored in the database, and the file itself is stored outside the database and not replicated. -> -> - EDB Postgres Advanced Server's native autopartioning is not supported in PGD. See [Restrictions on EDB Postgres Advanced Server-native automatic partitioning](scaling#restrictions-on-edb-postgres-advanced-server-native-automatic-partitioning) for more information. -diff -r 5/monitoring/sql.mdx 5.6/monitoring/sql.mdx -77c77 -< Also, the table [`bdr.node_catchup_info`](/pgd/5/reference/catalogs-visible/#bdrnode_catchup_info) gives information ---- -> Also, the table [`bdr.node_catchup_info`](/pgd/latest/reference/catalogs-visible/#bdrnode_catchup_info) gives information -92a93,109 -> ## Monitoring the manager worker -> -> The manager worker is responsible for many background tasks, including the managing of all the other workers. As such it is important to know what it's doing, especially in cases where it might seem stuck. -> -> Accordingly, the [`bdr.stat_worker`](/pgd/latest/reference/catalogs-visible/#bdrstat_worker) view provides per worker statistics for PGD workers, including manager workers. With respect to ensuring manager workers do not get stuck, the current task they are executing would be reported in their `query` field prefixed by "pgd manager:". -> -> The `worker_backend_state` field for manager workers also reports whether the manager is idle or busy. -> -> ## Monitoring Routing -> -> Routing is a critical part of PGD for ensuring a seemless application experience and conflict avoidance. Routing changes should happen quickly, including the detections of failures. At the same time we want to have as few disruptions as possible. We also want to ensure good load balancing for use-cases where it's supported. -> -> Monitoring all of these is important for noticing issues, debugging issues, as well as informing more optimal configurations. Accoringly, there are two main views for monitoring statistics to do with routing: -> -> - [`bdr.stat_routing_state`](/pgd/latest/reference/catalogs-visible/#bdrstat_routing_state) for monitoring the state of the connection routing with PGD Proxy uses to route the connections. -> - [`bdr.stat_routing_candidate_state`](/pgd/latest/reference/catalogs-visible/#bdrstat_routing_candidate_state) for information about routing candidate nodes from the point of view of the Raft leader (the view is empty on other nodes). -> -97,98c114,115 -< - [`bdr.node_slots`](/pgd/5/reference/catalogs-visible/#bdrnode_slots) for monitoring outgoing replication -< - [`bdr.subscription_summary`](/pgd/5/reference/catalogs-visible/#bdrsubscription_summary) for monitoring incoming replication ---- -> - [`bdr.node_slots`](/pgd/latest/reference/catalogs-visible/#bdrnode_slots) for monitoring outgoing replication -> - [`bdr.subscription_summary`](/pgd/latest/reference/catalogs-visible/#bdrsubscription_summary) for monitoring incoming replication -114c131 -< - [`bdr.node_replication_rates`](/pgd/5/reference/catalogs-visible/#bdrnode_replication_rates) for monitoring outgoing replication ---- -> - [`bdr.node_replication_rates`](/pgd/latest/reference/catalogs-visible/#bdrnode_replication_rates) for monitoring outgoing replication -116c133 -< The [`bdr.node_replication_rates`](/pgd/5/reference/catalogs-visible/#bdrnode_replication_rates) view gives an overall picture of the outgoing ---- -> The [`bdr.node_replication_rates`](/pgd/latest/reference/catalogs-visible/#bdrnode_replication_rates) view gives an overall picture of the outgoing -149c166 -< local node data. The other fields are also available from the [`bdr.node_slots`](/pgd/5/reference/catalogs-visible/#bdrnode_slots) ---- -> local node data. The other fields are also available from the [`bdr.node_slots`](/pgd/latest/reference/catalogs-visible/#bdrnode_slots) -152c169 -< Administrators can query [`bdr.node_slots`](/pgd/5/reference/catalogs-visible/#bdrnode_slots) for outgoing replication from the ---- -> Administrators can query [`bdr.node_slots`](/pgd/latest/reference/catalogs-visible/#bdrnode_slots) for outgoing replication from the -247c264 -< You can monitor incoming replication (also called subscriptions) by querying ---- -> You can monitor incoming replication (also called subscriptions) at a high level by querying -268a286,287 -> You can further monitor subscriptions by monitoring subscription summary statistics through [`bdr.stat_subscription`](/pgd/latest/reference/catalogs-visible/#bdrstat_subscription), and by monitoring the subscription replication receivers and subscription replication writers, using [`bdr.stat_receiver`](/pgd/latest/reference/catalogs-visible/#bdrstat_receiver) and [`bdr.stat_writer`](/pgd/latest/reference/catalogs-visible/#bdrstat_writer), respectively. -> -271c290 -< If the [decoding worker](../node_management/decoding_worker/) is enabled, you can monitor information about the ---- -> If the [decoding worker](../decoding_worker/) is enabled, you can monitor information about the -273c292 -< using the function [`bdr.wal_sender_stats()`](/pgd/5/reference/functions/#bdrwal_sender_stats). For example: ---- -> using the function [`bdr.wal_sender_stats()`](/pgd/latest/reference/functions/#bdrwal_sender_stats). For example: -287c306 -< serving a [logical standby](../node_management/logical_standby_nodes/). ---- -> serving a [logical standby](../nodes/logical_standby_nodes/). -290c309 -< [`bdr.get_decoding_worker_stat()`](/pgd/5/reference/functions/#bdrget_decoding_worker_stat). For example: ---- -> [`bdr.get_decoding_worker_stat()`](/pgd/latest/reference/functions/#bdrget_decoding_worker_stat). For example: -310c329 -< when a writer process is executing DDL. ---- -> when a writer process is executing DDL, or for when a manager worker is active (in which case the entry in the `query` column will be prefixed with "`pgd manager:`"). -342a362,371 -> ## Monitoring commit scopes -> -> Commit scopes are our durability and consistency configuration framework. As such, they affect the performance of transactions, so it is important to get statistics on them. Moreover, because in failure scenarios transactions might appear to be stuck due to the commit scope configuration, we need insight into what commit scope is being used, what it's waiting on, and so on. -> -> Accordingly, these two views show relevant statistics about commit scopes: -> -> - [bdr.stat_commit_scope](/pgd/latest/reference/catalogs-visible/#bdrstat_commit_scope) for cumulative statistics for each commit scope. -> -> - [bdr.stat_commit_scope_state](/pgd/latest/reference/catalogs-visible/#bdrstat_commit_scope_state) for information about the current use of commit scopes by backend processes. -> -358c387 -< Global locks held on the local node are visible in the [`bdr.global_locks`](/pgd/5/reference/catalogs-visible/#bdrglobal_locks) view. ---- -> Global locks held on the local node are visible in the [`bdr.global_locks`](/pgd/latest/reference/catalogs-visible/#bdrglobal_locks) view. -380c409 -< See [Catalogs](/pgd/5/reference/catalogs-visible/) for details on all fields, including lock ---- -> See [Catalogs](/pgd/latest/reference/catalogs-visible/) for details on all fields, including lock -385c414 -< Replication [conflicts](../consistency/conflicts) can arise when multiple nodes make ---- -> Replication [conflicts](../conflict-management/conflicts) can arise when multiple nodes make -395c424 -< For monitoring purposes, use `bdr.conflict_history_summary`, which doesn't ---- -> For monitoring purposes, use [`bdr.conflict_history_summary`](/pgd/latest/reference/catalogs-visible#bdrconflict_history_summary), which doesn't -411,412c440,441 -< Two monitoring views exist: `bdr.stat_subscription` for subscription statistics -< and `bdr.stat_relation` for relation statistics. These views both provide: ---- -> Two monitoring views exist: [`bdr.stat_subscription`](/pgd/latest/reference/catalogs-visible#bdrstat_subscription) for subscription statistics -> and [`bdr.stat_relation`](/pgd/latest/reference/catalogs-visible#bdrstat_relation) for relation statistics. These views both provide: -421c450 -< For relations only, `bdr.stat_relation` also includes: ---- -> For relations only, [`bdr.stat_relation`](/pgd/latest/reference/catalogs-visible#bdrstat_relation) also includes: -426c455 -< For subscriptions only, `bdr.stat_subscription` includes: ---- -> For subscriptions only, [`bdr.stat_subscription`](/pgd/latest/reference/catalogs-visible#bdrstat_subscription) includes: -432c461 -< `bdr.track_subscription_apply` and `bdr.track_relation_apply`, ---- -> [`bdr.track_subscription_apply`](/pgd/latest/reference/pgd-settings#bdrtrack_subscription_apply) and [`bdr.track_relation_apply`](/pgd/latest/reference/pgd-settings#bdrtrack_relation_apply), -454,455c483 -< You can reset the stats counters for these views to zero using the functions -< `bdr.reset_subscription_stats` and `bdr.reset_relation_stats`. ---- -> You can reset the stats counters for these views to zero using the functions [`bdr.reset_subscription_stats`](/pgd/latest/reference/functions-internal#bdrreset_subscription_stats) and [`bdr.reset_relation_stats`](/pgd/latest/reference/functions-internal#bdrreset_relation_stats). -456a485,486 -> PGD also monitors statistics regarding subscription replication receivers and subscription replication writers for each subscription, using [`bdr.stat_receiver`](/pgd/latest/reference/catalogs-visible/#bdrstat_receiver) and [`bdr.stat_writer`](/pgd/latest/reference/catalogs-visible/#bdrstat_writer), respectively. -> -497,498c527,528 -< The view `bdr.group_versions_details` uses the function -< `bdr.run_on_all_nodes()` to retrieve Postgres and BDR extension versions from all ---- -> The view [`bdr.group_versions_details`](/pgd/latest/reference/catalogs-visible#bdrgroup_versions_details) uses the function -> [`bdr.run_on_all_nodes()`](/pgd/latest/reference/functions#bdrrun_on_all_nodes) to retrieve Postgres and BDR extension versions from all -523c553 -< `bdr.monitor_group_versions()`, which uses PGD version ---- -> [`bdr.monitor_group_versions()`](/pgd/latest/reference/functions#bdrmonitor_group_versions), which uses PGD version -550,551c580,581 -< The view `bdr.group_raft_details` uses the functions -< `bdr.run_on_all_nodes()` and `bdr.get_raft_status()` to retrieve Raft ---- -> The view [`bdr.group_raft_details`](/pgd/latest/reference/catalogs-visible#bdrgroup_raft_details) uses the functions -> [`bdr.run_on_all_nodes()`](/pgd/latest/reference/functions#bdrrun_on_all_nodes) and [`bdr.get_raft_status()`](/pgd/latest/reference/functions#bdrget_raft_status) to retrieve Raft -618,619c648,649 -< `bdr.monitor_group_raft()`, which uses Raft consensus status -< information returned from the view `bdr.group_raft_details` ---- -> [`bdr.monitor_group_raft()`](/pgd/latest/reference/functions#bdrmonitor_group_raft), which uses Raft consensus status -> information returned from the view [`bdr.group_raft_details`](/pgd/latest/reference/catalogs-visible#bdrgroup_raft_details) -626c656 -< myroup | OK | Raft Consensus is working correctly ---- -> mygroup | OK | Raft Consensus is working correctly -628a659,660 -> Two further views that can give a finer-grained look at the state of Raft consensus are [`bdr.stat_raft_state`](/pgd/latest/reference/catalogs-visible/#bdrstat_raft_state), which provides the state of the Raft consensus on the local node, and [`bdr.stat_raft_followers_state`](/pgd/latest/reference/catalogs-visible/#bdrstat_raft_followers_state), which provides a view when on the Raft leader (it is empty on other nodes) regarding the state of the followers of that Raft leader. -> -652c684 -< `bdr.local_group_slot_name()`. ---- -> [`bdr.local_group_slot_name()`](/pgd/latest/reference/functions#bdrlocal_group_slot_name). -669,670c701,702 -< The function `bdr.monitor_local_replslots()` provides a summary of whether all -< PGD node replication slots are working as expected. For example: ---- -> The function [`bdr.monitor_local_replslots()`](/pgd/latest/reference/functions#bdrmonitor_local_replslots) provides a summary of whether all -> PGD node replication slots are working as expected. This summary is also available on subscriber-only nodes that are operating as subscriber-only group leaders in a PGD cluster when [optimized topology](../nodes/subscriber_only/optimizing-so) is enabled. For example: -681,685c713,719 -< - `UNKNOWN`: `This node is not part of any BDR group` -< - `OK`: `All BDR replication slots are working correctly` -< - `OK`: `This node is part of a subscriber-only group` -< - `CRITICAL`: `There is at least 1 BDR replication slot which is inactive` -< - `CRITICAL`: `There is at least 1 BDR replication slot which is missing` ---- -> | Status | Message | -> |----------|------------------------------------------------------------| -> | UNKNOWN | This node is not part of any BDR group | -> | OK | All BDR replication slots are working correctly | -> | OK | This node is part of a subscriber-only group | -> | CRITICAL | There is at least 1 BDR replication slot which is inactive | -> | CRITICAL | There is at least 1 BDR replication slot which is missing | -686a721 -> -691,692c726,727 -< PGD's [Commit Scopes](../durability/commit-scopes) feature offers a range of synchronous transaction commit scopes that allow you to balance durability, consistency, and performance for your particular queries. -< You can monitor these transactions by examining the [`bdr.stat_activity`](/pgd/5/reference/catalogs-visible#bdrstat_activity) catalog. The processes report different `wait_event` states as a transaction is committed. This monitoring only covers transactions in progress and doesn't provide historical timing information. ---- -> PGD's [Commit Scopes](../commit-scopes/commit-scopes) feature offers a range of synchronous transaction commit scopes that allow you to balance durability, consistency, and performance for your particular queries. -> You can monitor these transactions by examining the [`bdr.stat_activity`](/pgd/latest/reference/catalogs-visible#bdrstat_activity) catalog. The processes report different `wait_event` states as a transaction is committed. This monitoring only covers transactions in progress and doesn't provide historical timing information. -diff -r 5/node_management/creating_and_joining.mdx 5.6/node_management/creating_and_joining.mdx -21c21 -< [`bdr.create_node_group()`](/pgd/5/reference/nodes-management-interfaces#bdrcreate_node_group) ---- -> [`bdr.create_node_group()`](/pgd/latest/reference/nodes-management-interfaces#bdrcreate_node_group) -29c29 -< [`bdr.join_node_group()`](/pgd/5/reference/nodes-management-interfaces#bdrjoin_node_group) ---- -> [`bdr.join_node_group()`](/pgd/latest/reference/nodes-management-interfaces#bdrjoin_node_group) -33c33 -< [bdr_init_physical](/pgd/5/reference/nodes/#bdr_init_physical) to create a ---- -> [bdr_init_physical](/pgd/latest/reference/nodes/#bdr_init_physical) to create a -65c65 -< [`bdr.join_node_group`](/pgd/5/reference/nodes-management-interfaces#bdrjoin_node_group) ---- -> [`bdr.join_node_group`](/pgd/latest/reference/nodes-management-interfaces#bdrjoin_node_group) -76c76 -< [`bdr.join_node_group`](/pgd/5/reference/nodes-management-interfaces#bdrjoin_node_group) ---- -> [`bdr.join_node_group`](/pgd/latest/reference/nodes-management-interfaces#bdrjoin_node_group) -102c102 -< because of cache-lookup failures. Since [`bdr.join_node_group`](/pgd/5/reference/nodes-management-interfaces#bdrjoin_node_group) uses pg_dump ---- -> because of cache-lookup failures. Since [`bdr.join_node_group`](/pgd/latest/reference/nodes-management-interfaces#bdrjoin_node_group) uses pg_dump -Only in 5.6/node_management: creating_nodes.mdx -Only in 5/node_management: decoding_worker.mdx -diff -r 5/node_management/groups_and_subgroups.mdx 5.6/node_management/groups_and_subgroups.mdx -20c20 -< example, a [subscriber-only](subscriber_only) sub-group will make all the nodes ---- -> example, a [subscriber-only](../nodes/subscriber_only) sub-group will make all the nodes -diff -r 5/node_management/heterogeneous_clusters.mdx 5.6/node_management/heterogeneous_clusters.mdx -25c25 -< physical backup taken with [`bdr_init_physical`](/pgd/5/reference/nodes#bdr_init_physical), and the node must join ---- -> physical backup taken with [`bdr_init_physical`](/pgd/latest/reference/nodes#bdr_init_physical), and the node must join -diff -r 5/node_management/index.mdx 5.6/node_management/index.mdx -6c6 -< - node_types ---- -> - creating_nodes -9,12d8 -< - witness_nodes -< - logical_standby_nodes -< - physical_standby_nodes -< - subscriber_only -17d12 -< - decoding_worker -20,21d14 -< redirects: -< - /pgd/5/nodes/ -33,34c26,27 -< * [Node types](node_types) covers the kinds of node that can exist in PGD -< clusters. ---- -> * [Creating nodes](creating_nodes) covers the steps needed to create a new node -> in a PGD cluster. -42,51d34 -< * [Witness nodes](witness_nodes) looks at a special class of PGD node, dedicated -< to establishing consensus in a group. -< -< * [Logical standby nodes](logical_standby_nodes) shows how to efficiently keep a -< node on standby synchronized and ready to step in as a primary in the case of -< failure. -< -< * [Subscriber-only nodes and groups](subscriber_only) looks at how subscriber-only nodes work and -< how they're configured. -< -66,68d48 -< * [Decoding worker](decoding_worker) covers a feature of PGD that allows groups -< of nodes to reduce CPU overhead due to replication. -< -72,74c52,54 -< * [Node recovery](node_recovery) details the steps needed to bring a node back -< into service after a failure or scheduled downtime and the impact it has on -< the cluster as it returns. ---- -> * [Node recovery](node_recovery) details the steps needed to bring a node back -> into service after a failure or scheduled downtime and the impact it has on -> the cluster as it returns. -Only in 5/node_management: logical_standby_nodes.mdx -diff -r 5/node_management/maintainance_with_proxies.mdx 5.6/node_management/maintainance_with_proxies.mdx -42c42 -< For more details, see the [`bdr.node`](/pgd/5/reference/catalogs-visible#bdrnode) table. ---- -> For more details, see the [`bdr.node`](/pgd/latest/reference/catalogs-visible#bdrnode) table. -52c52 -< For more details, see the [`bdr.node_summary`](/pgd/5/reference/catalogs-visible#bdrnode_summary) table. ---- -> For more details, see the [`bdr.node_summary`](/pgd/latest/reference/catalogs-visible#bdrnode_summary) table. -diff -r 5/node_management/node_recovery.mdx 5.6/node_management/node_recovery.mdx -10c10 -< When a node starts up, each connection begins showing up in [`bdr.node_slots`](/pgd/5/reference/catalogs-visible#bdrnode_slots) with ---- -> When a node starts up, each connection begins showing up in [`bdr.node_slots`](/pgd/latest/reference/catalogs-visible#bdrnode_slots) with -43c43 -< (see [Origin conflict detection](../consistency/conflicts)). ---- -> (see [Origin conflict detection](../conflict-management/conflicts)). -Only in 5/node_management: node_types.mdx -diff -r 5/node_management/removing_nodes_and_groups.mdx 5.6/node_management/removing_nodes_and_groups.mdx -13c13 -< Node removal, also called *parting*, is done using the [`bdr.part_node()`](/pgd/5/reference/nodes-management-interfaces#bdrpart_node) ---- -> Node removal, also called *parting*, is done using the [`bdr.part_node()`](/pgd/latest/reference/nodes-management-interfaces#bdrpart_node) -15c15 -< to remove a node. You can call the [`bdr.part_node()`](/pgd/5/reference/nodes-management-interfaces#bdrpart_node) function from any active ---- -> to remove a node. You can call the [`bdr.part_node()`](/pgd/latest/reference/nodes-management-interfaces#bdrpart_node) function from any active -29c29 -< node by using the function [`bdr.drop_node()`](/pgd/5/reference/functions-internal#bdrdrop_node). ---- -> node by using the function [`bdr.drop_node()`](/pgd/latest/reference/functions-internal#bdrdrop_node). -diff -r 5/node_management/replication_slots.mdx 5.6/node_management/replication_slots.mdx -45c45 -< The group slot name is given by the function [`bdr.local_group_slot_name()`](/pgd/5/reference/functions#bdrlocal_group_slot_name). ---- -> The group slot name is given by the function [`bdr.local_group_slot_name()`](/pgd/latest/reference/functions#bdrlocal_group_slot_name). -Only in 5/node_management: subscriber_only.mdx -diff -r 5/node_management/viewing_topology.mdx 5.6/node_management/viewing_topology.mdx -29c29 -< [`bdr.local_node_summary`](/pgd/5/reference/catalogs-visible#bdrlocal_node_summary). ---- -> [`bdr.local_node_summary`](/pgd/latest/reference/catalogs-visible#bdrlocal_node_summary). -88c88 -< from the [`bdr.node_summary`](/pgd/5/reference/catalogs-visible#bdrnode_summary)` view. For example: ---- -> from the [`bdr.node_summary`](/pgd/latest/reference/catalogs-visible#bdrnode_summary)` view. For example: -Only in 5/node_management: witness_nodes.mdx -Only in 5.6: nodes -Only in 5.6/overview: architecture-and-performance.mdx -Only in 5.6/overview: basic-architecture.mdx -Only in 5.6/overview: compared.mdx -diff -r 5/overview/index.mdx 5.6/overview/index.mdx -4c4 -< deepToC: true ---- -> indexCards: simple -6a7,10 -> navigation: -> - basic-architecture -> - architecture-and-performance -> - compared -11,123c15,17 -<
-<
-<
-< Read about why PostgreSQL is better when it’s distributed with EDB Postgres Distributed in Distributed PostgreSQL:The Key to Always On Database Availability -<
-<
-<
-< -< -<
-<
-< -< PGD provides loosely coupled, multimaster logical replication using a mesh topology. This means that you can write to any server and the changes are sent directly, row-by-row, to all the other servers that are part of the same PGD group. -< -< By default, PGD uses asynchronous replication, applying changes on the peer nodes only after the local commit. Multiple synchronous replication options are also available. -< -< ## Basic architecture -< -< ### Multiple groups -< -< A PGD node is a member of at least one *node group*. In the most basic architecture, there's a single node group for the whole PGD cluster. -< -< ### Multiple masters -< -< Each node (database) participating in a PGD group both receives changes from other members and can be written to directly by the user. -< -< This is distinct from hot or warm standby, where only one master server accepts writes and all the other nodes are standbys that replicate either from the master or from another standby. -< -< You don't have to write to all the masters all of the time. A frequent configuration directs writes mostly to just one master called the [write leader](../terminology/#write-leader). -< -< ### Asynchronous, by default -< -< Changes made on one PGD node aren't replicated to other nodes until they're committed locally. As a result, the data isn't exactly the same on all nodes at any given time. Some nodes have data that hasn't yet arrived at other nodes. PostgreSQL's block-based replication solutions default to asynchronous replication as well. In PGD, there are multiple masters and, as a result, multiple data streams. So data on different nodes might differ even when `synchronous_commit` and `synchronous_standby_names` are used. -< -< ### Mesh topology -< -< PGD is structured around a mesh network where every node connects to every other node, and all nodes exchange data directly with each other. There's no forwarding of data in PGD except in special circumstances, such as adding and removing nodes. Data can arrive from outside the EDB Postgres Distributed cluster or be sent onward using native PostgreSQL logical replication. -< -< ### Logical replication -< -< Logical replication is a method of replicating data rows and their changes based on their replication identity (usually a primary key). We use the term logical in contrast to physical replication, which uses exact block addresses and byte-by-byte replication. Index changes aren't replicated, thereby avoiding write amplification and reducing bandwidth. -< -< Logical replication starts by copying a snapshot of the data from the source node. Once that's done, later commits are sent to other nodes as they occur in real time. Changes are replicated without executing SQL again, so the exact data written is replicated quickly and accurately. -< -< Nodes apply data in the order in which commits were made on the source node, ensuring transactional consistency is guaranteed for the changes from any single node. Changes from different nodes are applied independently of other nodes to ensure the rapid replication of changes. -< -< Replicated data is sent in binary form when it's safe to do so. -< -< -< ### Connection management -< -< [Connection management](../routing) leverages consensus-driven quorum to determine the correct connection endpoint in a semi-exclusive manner to prevent unintended multi-node writes from an application. This approach reduces the potential for data conflicts. The node selected as the correct connection endpoint at any point in time is referred to as the [write leader](../terminology/#write-leader). -< -< [PGD Proxy](../routing/proxy) is the tool for application connection management provided as part of EDB Postgres Distributed. -< -< ### High availability -< -< Each master node can be protected by one or more standby nodes, so any node that goes down can be quickly replaced and continue. Each standby node is a logical standby node. -< (Postgres physical standbys aren't supported by PGD.) -< -< Replication continues between currently connected nodes even if one or more nodes are currently unavailable. When the node recovers, replication can restart from where it left off without missing any changes. -< -< Nodes can run different release levels, negotiating the required protocols to communicate. As a result, EDB Postgres Distributed clusters can use rolling upgrades, even for [major versions](../upgrades/upgrading_major_rolling/) of database software. -< -< DDL is replicated across nodes by default. DDL execution can be user controlled to allow rolling application upgrades, if desired. -< -< ## Architectural options and performance -< -< ### Always-on architectures -< -< A number of different architectures can be configured, each of which has different performance and scalability characteristics. -< -< The group is the basic building block consisting of 2+ nodes (servers). In a group, each node is in a different availability zone, with a dedicated router and backup, giving immediate switchover and high availability. Each group has a dedicated replication set defined on it. If the group loses a node, you can easily repair or replace it by copying an existing node from the group. -< -< The Always-on architectures are built from either one group in a single location or two groups in two separate locations. Each group provides high availability. When two groups are leveraged in remote locations, they together also provide disaster recovery (DR). -< -< Tables are created across both groups, so any change goes to all nodes, not just to nodes in the local group. -< -< One node in each group is selected as the group's write leader. Proxies then direct application writes and queries to the write leader. The other nodes are replicas of the write leader. If, at any point, the write leader is seen to be unavailable, the remaining nodes in the group select a new write leader from the group the proxies direct traffic to that node. Scalability isn't the goal of this architecture. -< -< Since writes are mainly to only one node, the possibility of contention between nodes is reduced to almost zero. As a result, performance impact is much reduced. -< -< Secondary applications might execute against the shadow nodes, although these are reduced or interrupted if the main application begins using that node. -< -< In the future, one node will be elected as the main replicator to other groups, limiting CPU overhead of replication as the cluster grows and minimizing the bandwidth to other groups. -< -< ### Supported Postgres database servers -< -< PGD is compatible with [PostgreSQL](https://www.postgresql.org/), [EDB Postgres Extended Server](/pge/latest), and [EDB Postgres Advanced Server](/epas/latest) and is deployed as a standard Postgres extension named BDR. See [Compatibility](../#compatibility) for details about supported version combinations. -< -< Some key PGD features depend on certain core capabilities being available in the target Postgres database server. Therefore, PGD users must also adopt the Postgres database server distribution that's best suited to their business needs. For example, if having the PGD feature Commit At Most Once (CAMO) is mission critical to your use case, don't adopt the community PostgreSQL distribution. It doesn't have the core capability required to handle CAMO. See the full feature matrix compatibility in [Choosing a Postgres distribution](../planning/choosing_server/). -< -< PGD offers close-to-native Postgres compatibility. However, some access patterns don't necessarily work as well in multi-node setup as they do on a single instance. There are also some limitations in what you can safely replicate in a multi-node setting. [Application usage](../appusage) goes into detail about how PGD behaves from an application development perspective. -< -< ### Characteristics affecting performance -< -< By default, PGD keeps one copy of each table on each node in the group, and any changes propagate to all nodes in the group. -< -< Since copies of data are everywhere, SELECTs need only ever access the local node. On a read-only cluster, performance on any one node isn't affected by the number of nodes and is immune to replication conflicts on other nodes caused by long-running SELECT queries. Thus, adding nodes increases linearly the total possible SELECT throughput. -< -< If an INSERT, UPDATE, and DELETE (DML) is performed locally, then the changes propagate to all nodes in the group. The overhead of DML apply is less than the original execution. So if you run a pure write workload on multiple nodes concurrently, a multi-node cluster can handle more TPS than a single node. -< -< Conflict handling has a cost that acts to reduce the throughput. The throughput then depends on how much contention the application displays in practice. Applications with very low contention perform better than a single node. Applications with high contention can perform worse than a single node. These results are consistent with any multimaster technology and aren't particular to PGD. -< -< Synchronous replication options can send changes concurrently to multiple nodes so that the replication lag is minimized. Adding more nodes means using more CPU for replication, so peak TPS reduces slightly as each node is added. -< -< If the workload tries to use all CPU resources, then this resource constrains replication, which can then affect the replication lag. -< -< In summary, adding more master nodes to a PGD group doesn't result in significant write throughput increase when most tables are replicated because all the writes are replayed on all nodes. Because PGD writes are in general more effective than writes coming from Postgres clients by way of SQL, you can increase performance. Read throughput generally scales linearly with the number of nodes. ---- -> * [Basic architecture](basic-architecture) -> * [Architectural options and performance](architecture-and-performance) -> * [Comparison with other replication solutions](compared) -diff -r 5/parallelapply.mdx 5.6/parallelapply.mdx -16c16 -< [`bdr.max_writers_per_subscription`](/pgd/5/reference/pgd-settings#bdrmax_writers_per_subscription) ---- -> [`bdr.max_writers_per_subscription`](/pgd/latest/reference/pgd-settings#bdrmax_writers_per_subscription) -18c18 -< [`bdr.writers_per_subscription`](/pgd/5/reference/pgd-settings#bdrwriters_per_subscription) ---- -> [`bdr.writers_per_subscription`](/pgd/latest/reference/pgd-settings#bdrwriters_per_subscription) -29c29 -< Changing [`bdr.max_writers_per_subscription`](/pgd/5/reference/pgd-settings#bdrmax_writers_per_subscription) ---- -> Changing [`bdr.max_writers_per_subscription`](/pgd/latest/reference/pgd-settings#bdrmax_writers_per_subscription) -33c33 -< [`bdr.writers_per_subscription`](/pgd/5/reference/pgd-settings#bdrwriters_per_subscription) ---- -> [`bdr.writers_per_subscription`](/pgd/latest/reference/pgd-settings#bdrwriters_per_subscription) -37c37 -< [`bdr.alter_subscription_disable`](/pgd/5/reference/nodes-management-interfaces#bdralter_subscription_disable). ---- -> [`bdr.alter_subscription_disable`](/pgd/latest/reference/nodes-management-interfaces#bdralter_subscription_disable). -40c40 -< [`bdr.alter_subscription_enable`](/pgd/5/reference/nodes-management-interfaces#bdralter_subscription_enable). ---- -> [`bdr.alter_subscription_enable`](/pgd/latest/reference/nodes-management-interfaces#bdralter_subscription_enable). -64c64 -< [`bdr.stat_subscription`](/pgd/5/reference/catalogs-visible#bdrstat_subscription). ---- -> [`bdr.stat_subscription`](/pgd/latest/reference/catalogs-visible#bdrstat_subscription). -80c80 -< To disable Parallel Apply, set [`bdr.writers_per_subscription`](/pgd/5/reference/pgd-settings#bdrwriters_per_subscription) to `1`. ---- -> To disable Parallel Apply, set [`bdr.writers_per_subscription`](/pgd/latest/reference/pgd-settings#bdrwriters_per_subscription) to `1`. -diff -r 5/planning/architectures.mdx 5.6/planning/architectures.mdx -4,8c4,8 -< - /pgd/5/architectures/bronze/ -< - /pgd/5/architectures/gold/ -< - /pgd/5/architectures/platinum/ -< - /pgd/5/architectures/silver/ -< - /pgd/5/architectures/ ---- -> - /pgd/latest/architectures/bronze/ -> - /pgd/latest/architectures/gold/ -> - /pgd/latest/architectures/platinum/ -> - /pgd/latest/architectures/silver/ -> - /pgd/latest/architectures/ -diff -r 5/planning/choosing_server.mdx 5.6/planning/choosing_server.mdx -4c4 -< - /pgd/5/choosing_server/ ---- -> - /pgd/latest/choosing_server/ -11,38c11,38 -< | Feature | PostgreSQL | EDB Postgres Extended | EDB Postgres Advanced | -< |-------------------------------------------------|------------|-----------------------|-----------------------| -< | [Rolling application and database upgrades](/pgd/5/upgrades/) | Y | Y | Y | -< | [Row-level last-update wins conflict resolution](/pgd/5/consistency/conflicts/) | Y | Y | Y | -< | [DDL replication](/pgd/5/ddl/) | Y | Y | Y | -< | [Granular DDL Locking](/pgd/5/ddl/ddl-locking/) | Y | Y | Y | -< | [Streaming of large transactions](/pgd/5/transaction-streaming/) | v14+ | v13+ | v14+ | -< | [Distributed sequences](/pgd/5/sequences/#pgd-global-sequences) | Y | Y | Y | -< | [Subscribe-only nodes](/pgd/5/node_management/subscriber_only/) | Y | Y | Y | -< | [Monitoring](/pgd/5/monitoring/) | Y | Y | Y | -< | [OpenTelemetry support](/pgd/5/monitoring/otel/) | Y | Y | Y | -< | [Parallel apply](/pgd/5/parallelapply) | Y | Y | Y | -< | [Conflict-free replicated data types (CRDTs)](/pgd/5/consistency/crdt/) | Y | Y | Y | -< | [Column-level conflict resolution](/pgd/5/consistency/column-level-conflicts/) | Y | Y | Y | -< | [Transform triggers](/pgd/5/striggers/#transform-triggers) | Y | Y | Y | -< | [Conflict triggers](/pgd/5/striggers/#conflict-triggers) | Y | Y | Y | -< | [Asynchronous replication](/pgd/5/durability/) | Y | Y | Y | -< | [Legacy synchronous replication](/pgd/5/durability/legacy-sync) | Y | Y | Y | -< | [Group Commit](/pgd/5/durability/group-commit/) | N | Y | 14+ | -< | [Commit At Most Once (CAMO)](/pgd/5/durability/camo/) | N | Y | 14+ | -< | [Eager Conflict Resolution](/pgd/5/durability/group-commit#eager-conflict-resolution) | N | Y | 14+ | -< | [Lag Control](/pgd/5/durability/lag-control/) | N | Y | 14+ | -< | [Decoding Worker](/pgd/5/node_management/decoding_worker) | N | 13+ | 14+ | -< | [Lag tracker](/pgd/5/monitoring/sql/#monitoring-outgoing-replication) | N | Y | 14+ | -< | [Missing partition conflict](../reference/conflicts/#target_table_note) | N | Y | 14+ | -< | [No need for UPDATE Trigger on tables with TOAST](../consistency/conflicts/02_types_of_conflict/#toast-support-details) | N | Y | 14+ | -< | [Automatically hold back FREEZE](../consistency/conflicts/03_conflict_detection/#origin-conflict-detection) | N | Y | 14+ | -< | [Transparent Data Encryption](/tde/latest/) | N | 15+ | 15+ | ---- -> | Feature | PostgreSQL | EDB Postgres Extended | EDB Postgres Advanced | -> | ----------------------------------------------------------------------------------------------------------------------- | ---------- | --------------------- | --------------------- | -> | [Rolling application and database upgrades](/pgd/latest/upgrades/) | Y | Y | Y | -> | [Row-level last-update wins conflict resolution](/pgd/latest/conflict-management/conflicts/) | Y | Y | Y | -> | [DDL replication](/pgd/latest/ddl/) | Y | Y | Y | -> | [Granular DDL Locking](/pgd/latest/ddl/ddl-locking/) | Y | Y | Y | -> | [Streaming of large transactions](/pgd/latest/transaction-streaming/) | v14+ | v13+ | v14+ | -> | [Distributed sequences](/pgd/latest/sequences/#pgd-global-sequences) | Y | Y | Y | -> | [Subscriber-only nodes](/pgd/latest/nodes/subscriber_only/) | Y | Y | Y | -> | [Monitoring](/pgd/latest/monitoring/) | Y | Y | Y | -> | [OpenTelemetry support](/pgd/latest/monitoring/otel/) | Y | Y | Y | -> | [Parallel apply](/pgd/latest/parallelapply) | Y | Y | Y | -> | [Conflict-free replicated data types (CRDTs)](/pgd/latest/conflict-management/crdt/) | Y | Y | Y | -> | [Column-level conflict resolution](/pgd/latest/conflict-management/column-level-conflicts/) | Y | Y | Y | -> | [Transform triggers](/pgd/latest/striggers/#transform-triggers) | Y | Y | Y | -> | [Conflict triggers](/pgd/latest/striggers/#conflict-triggers) | Y | Y | Y | -> | [Asynchronous replication](/pgd/latest/commit-scopes/) | Y | Y | Y | -> | [Legacy synchronous replication](/pgd/latest/commit-scopes/legacy-sync/) | Y | Y | Y | -> | [Group Commit](/pgd/latest/commit-scopes/group-commit/) | N | Y | 14+ | -> | [Commit At Most Once (CAMO)](/pgd/latest/commit-scopes/camo/) | N | Y | 14+ | -> | [Eager Conflict Resolution](/pgd/latest/commit-scopes/group-commit/#eager-conflict-resolution) | N | Y | 14+ | -> | [Lag Control](/pgd/latest/commit-scopes/lag-control/) | N | Y | 14+ | -> | [Decoding Worker](/pgd/latest/decoding_worker) | N | 13+ | 14+ | -> | [Lag tracker](/pgd/latest/monitoring/sql/#monitoring-outgoing-replication) | N | Y | 14+ | -> | [Missing partition conflict](../reference/conflicts/#target_table_note) | N | Y | 14+ | -> | [No need for UPDATE Trigger on tables with TOAST](../conflict-management/conflicts/02_types_of_conflict/#toast-support-details) | N | Y | 14+ | -> | [Automatically hold back FREEZE](../conflict-management/conflicts/03_conflict_detection/#origin-conflict-detection) | N | Y | 14+ | -> | [Transparent Data Encryption](/tde/latest/) | N | 15+ | 15+ | -diff -r 5/planning/deployments.mdx 5.6/planning/deployments.mdx -5c5 -< - /pgd/5/deployments ---- -> - /pgd/latest/deployments -diff -r 5/planning/limitations.mdx 5.6/planning/limitations.mdx -4c4 -< - /pgd/5/limitations ---- -> - /pgd/latest/limitations -10c10 -< ## Nodes ---- -> ## Nodes -12,16c12,16 -< - PGD can run hundreds of nodes, assuming adequate hardware and network. However, -< for mesh-based deployments, we generally don’t recommend running more than 48 -< nodes in one cluster. If you need extra read scalability beyond the 48-node -< limit, you can add subscriber-only nodes without adding connections to the -< mesh network. ---- -> - PGD can run hundreds of nodes, assuming adequate hardware and network. However, -> for mesh-based deployments, we generally don’t recommend running more than 48 -> nodes in one cluster. If you need extra read scalability beyond the 48-node -> limit, you can add subscriber-only nodes without adding connections to the -> mesh network. -18,23c18,23 -< - The minimum recommended number of nodes in a group is three to provide fault -< tolerance for PGD's consensus mechanism. With just two nodes, consensus would -< fail if one of the nodes were unresponsive. Consensus is required for some PGD -< operations, such as distributed sequence generation. For more information about -< the consensus mechanism used by EDB Postgres Distributed, see [Architectural -< details](architectures/#architecture-details). ---- -> - The minimum recommended number of nodes in a group is three to provide fault -> tolerance for PGD's consensus mechanism. With just two nodes, consensus would -> fail if one of the nodes were unresponsive. Consensus is required for some PGD -> operations, such as distributed sequence generation. For more information about -> the consensus mechanism used by EDB Postgres Distributed, see [Architectural -> details](architectures/#architecture-details). -25d24 -< -41,43c40,42 -< - If PGD configuration changes are needed, you must execute administrative commands -< for each database. Doing so increases the risk for potential -< inconsistencies and errors. ---- -> - If PGD configuration changes are needed, you must execute administrative commands -> for each database. Doing so increases the risk for potential -> inconsistencies and errors. -45c44 -< - You must monitor each database separately, adding overhead. ---- -> - You must monitor each database separately, adding overhead. -47,48c46,47 -< - TPAexec assumes one database. Additional coding is needed by customers or by the EDB Professional Services team -< in a post-deploy hook to set up replication for more databases. ---- -> - TPAexec assumes one database. Additional coding is needed by customers or by the EDB Professional Services team -> in a post-deploy hook to set up replication for more databases. -50,51c49,50 -< - PGD Proxy works at the Postgres instance level, not at the database level, -< meaning the leader node is the same for all databases. ---- -> - PGD Proxy works at the Postgres instance level, not at the database level, -> meaning the leader node is the same for all databases. -53,57c52,56 -< - Each additional database increases the resource requirements on the server. -< Each one needs its own set of worker processes maintaining replication, for example, -< logical workers, WAL senders, and WAL receivers. Each one also needs its own -< set of connections to other instances in the replication cluster. These needs might -< severely impact performance of all databases. ---- -> - Each additional database increases the resource requirements on the server. -> Each one needs its own set of worker processes maintaining replication, for example, -> logical workers, WAL senders, and WAL receivers. Each one also needs its own -> set of connections to other instances in the replication cluster. These needs might -> severely impact performance of all databases. -59,62c58,61 -< - Synchronous replication methods, for example, CAMO and Group Commit, won’t work as -< expected. Since the Postgres WAL is shared between the databases, a -< synchronous commit confirmation can come from any database, not necessarily in -< the right order of commits. ---- -> - Synchronous replication methods, for example, CAMO and Group Commit, won’t work as -> expected. Since the Postgres WAL is shared between the databases, a -> synchronous commit confirmation can come from any database, not necessarily in -> the right order of commits. -64c63 -< - CLI and OTEL integration (new with v5) assumes one database. ---- -> - CLI and OTEL integration (new with v5) assumes one database. -69c68 -< These limitations are a product of the interactions between Group Commit and CAMO, and how they interact with PGD features such as the [WAL decoder](../node_management/decoding_worker/) and [transaction streaming](../transaction-streaming/). ---- -> These limitations are a product of the interactions between Group Commit and CAMO, and how they interact with PGD features such as the [WAL decoder](../decoding_worker/) and [transaction streaming](../transaction-streaming/). -75c74 -< See [Durability limitations](/pgd/5/durability/limitations) for a full ---- -> See [Durability limitations](/pgd/latest/commit-scopes/limitations/) for a full -80c79 -< PGD was developed to [enable rolling upgrades of PGD](/pgd/5/upgrades) by allowing mixed versions of PGD to operate during the upgrade process. ---- -> PGD was developed to [enable rolling upgrades of PGD](/pgd/latest/upgrades) by allowing mixed versions of PGD to operate during the upgrade process. -89d87 -< -diff -r 5/planning/other_considerations.mdx 5.6/planning/other_considerations.mdx -4c4 -< - /pgd/5/other_considerations ---- -> - /pgd/latest/other_considerations -11c11 -< Read about [Conflicts](/pgd/5/consistency/conflicts/) to understand the implications of the asynchronous operation mode in terms of data consistency. ---- -> Read about [Conflicts](/pgd/latest/conflict-management/conflicts/) to understand the implications of the asynchronous operation mode in terms of data consistency. -35c35 -< Clock synchronization isn't critical to performance, as it is with some other solutions. Clock skew can affect origin conflict detection, though EDB Postgres Distributed provides controls to report and manage any skew that exists. EDB Postgres Distributed also provides row-version conflict detection, as described in [Conflict detection](/pgd/5/consistency/conflicts). ---- -> Clock synchronization isn't critical to performance, as it is with some other solutions. Clock skew can affect origin conflict detection, though EDB Postgres Distributed provides controls to report and manage any skew that exists. EDB Postgres Distributed also provides row-version conflict detection, as described in [Conflict detection](/pgd/latest/conflict-management/conflicts/). -diff -r 5/postgres-configuration.mdx 5.6/postgres-configuration.mdx -46c46 -< reconnected. See [CAMO failure scenarios](durability/camo/#failure-scenarios) for ---- -> reconnected. See [CAMO failure scenarios](commit-scopes/camo#failure-scenarios) for -59c59 -< When the [decoding worker](node_management/decoding_worker/) is enabled, this ---- -> When the [decoding worker](decoding_worker) is enabled, this -65c65 -< parameters. See [Durability and performance options](durability) for details and ---- -> parameters. See [Commit scopes](commit-scopes) for details and -diff -r 5/quickstart/connecting_applications.mdx 5.6/quickstart/connecting_applications.mdx -46c46 -< Your Docker quick start cluster is by default accessible on the IP addresses 172.17.0.2 (kaboom), 172.17.0.3 (kaftan), 172.17.04 (kaolin), and 172.17.0.5 (kapok). Docker generates these addresses. ---- -> Your Docker quick start cluster is by default accessible on the IP addresses 10.33.111.18 (kaboom), 10.33.111.19 (kaftan), 10.33.111.20 (kaolin), and 10.33.111.21 (kapok). TPA generates these addresses. -diff -r 5/quickstart/further_explore_conflicts.mdx 5.6/quickstart/further_explore_conflicts.mdx -13c13 -< ---- -> is -135c135 -< A row in the conflict history now notes a conflict in the table where the `insert_exists`. It also notes that the resolution for this conflict is that the newer record, based on the timing of the commit, is retained. This conflict is called an INSERT/INSERT conflict. You can read more about this type of conflict in [INSERT/INSERT conflicts](../consistency/conflicts/02_types_of_conflict/#insertinsert-conflicts). ---- -> A row in the conflict history now notes a conflict in the table where the `insert_exists`. It also notes that the resolution for this conflict is that the newer record, based on the timing of the commit, is retained. This conflict is called an INSERT/INSERT conflict. You can read more about this type of conflict in [INSERT/INSERT conflicts](../conflict-management/conflicts/02_types_of_conflict/#insertinsert-conflicts). -164c164 -< An additional row in the conflict history shows an `update_origin_change` conflict occurred and that the resolution was `apply_remote`. This resolution means that the remote change was applied, updating the record. This conflict is called an UPDATE/UPDATE conflict and is explained in more detail in [UPDATE/UPDATE conflicts](../consistency/conflicts/02_types_of_conflict/#updateupdate-conflicts). ---- -> An additional row in the conflict history shows an `update_origin_change` conflict occurred and that the resolution was `apply_remote`. This resolution means that the remote change was applied, updating the record. This conflict is called an UPDATE/UPDATE conflict and is explained in more detail in [UPDATE/UPDATE conflicts](../conflict-management/conflicts/02_types_of_conflict/#updateupdate-conflicts). -172c172 -< You're now equipped to explore all the possible conflict scenarios and resolutions that can occur. For full details of how conflicts are managed, see [Conflicts](../consistency/conflicts/). While ideally you should avoid conflicts, it's important to know that, when they do happen, they're recorded and managed by Postgres Distributed's integrated and configurable conflict resolver. ---- -> You're now equipped to explore all the possible conflict scenarios and resolutions that can occur. For full details of how conflicts are managed, see [Conflict management](../conflict-management/). While ideally you should avoid conflicts, it's important to know that, when they do happen, they're recorded and managed by Postgres Distributed's integrated and configurable conflict resolver. -diff -r 5/quickstart/quick_start_aws.mdx 5.6/quickstart/quick_start_aws.mdx -7,9c7,9 -< - /pgd/5/deployments/tpaexec/quick_start/ -< - /pgd/5/tpa/quick_start/ -< - /pgd/5/quick_start_aws/ ---- -> - /pgd/latest/deployments/tpaexec/quick_start/ -> - /pgd/latest/tpa/quick_start/ -> - /pgd/latest/quick_start_aws/ -diff -r 5/quickstart/quick_start_cloud.mdx 5.6/quickstart/quick_start_cloud.mdx -7c7 -< - /pgd/5/quick_start_cloud/ ---- -> - /pgd/latest/quick_start_cloud/ -diff -r 5/quickstart/quick_start_docker.mdx 5.6/quickstart/quick_start_docker.mdx -7c7 -< - /pgd/5/quick_start_docker/ ---- -> - /pgd/latest/quick_start_docker/ -diff -r 5/quickstart/quick_start_linux.mdx 5.6/quickstart/quick_start_linux.mdx -7c7 -< - /pgd/5/quick_start_bare/ ---- -> - /pgd/latest/quick_start_bare/ -diff -r 5/reference/autopartition.mdx 5.6/reference/autopartition.mdx -25,26c25,27 -< managed_locally boolean DEFAULT false, -< enabled boolean DEFAULT on); ---- -> managed_locally boolean DEFAULT true, -> enabled boolean DEFAULT on, -> analytics_offload_period); -43c44 -< - `managed_locally` — If true, then the partitions are managed locally. ---- -> - `managed_locally` — Whether partitions are managed locally. Setting this to `false` is not recommended. -44a46 -> - `analytics_offload_period` — Provides support for partition offloading. Reserved for future use. -diff -r 5/reference/catalogs-internal.mdx 5.6/reference/catalogs-internal.mdx -74c74 -< * Whenever a worker has errored out (see [bdr.workers](/pgd/5/reference/catalogs-visible/#bdrworkers) ---- -> * Whenever a worker has errored out (see [bdr.workers](/pgd/latest/reference/catalogs-visible/#bdrworkers) -94a95,100 -> ### `bdr.local_leader_change` -> -> This is a local cache of the recent portion of leader change history. It has the same fields as [`bdr.leader`](/pgd/5.6/reference/catalogs-visible#bdrleader), except that it is an ordered set of (node_group_id, leader_kind, generation) instead of a map tracking merely the current version. -> -> -> -117a124 -> | node_name | text | The name of this node | -162,167c169,180 -< | Name | Type | Description | -< |--------------------|-------|-----------------------------| -< | node_group_id | oid | Node group ID | -< | write_node_id | oid | Current write node | -< | prev_write_node_id | oid | Previous write node | -< | read_node_ids | oid[] | List of read-only nodes IDs | ---- -> | Name | Type | Description | -> |----------------------|-------------|------------------------------------------------------------------------------------------| -> | node_group_id | oid | Node group ID. | -> | write_node_id | oid | Current write node. | -> | prev_write_node_id | oid | Previous write node. | -> | read_node_ids | oid[] | List of read-only nodes IDs. | -> | record_version | bigint | Record version. Incremented by 1 on every material change to the routing record. | -> | record_ts | timestamptz | Timestamp of last update to record_version. | -> | write_leader_version | bigint | Write leader version. Copied from record_version every time write_node_id is changed. | -> | write_leader_ts | timestamptz | Write leader timestamp. Copied from record_ts every time write_node_id is changed. | -> | read_nodes_version | bigint | Read nodes version. Copied from record_version every time read_node_ids list is changed. | -> | read_nodes_ts | timestamptz | Read nodes timestamp. Copied from record_tw every time read_node_ids list is changed. | -204,207c217,220 -< | Name | Type | Description | -< |-----------------------------|----------|------------------------------------------------------------------------------| -< | proxy_name | name | Name of the proxy | -< | node_group_id | oid | Node group ID that this proxy uses | ---- -> | Name | Type | Description | -> |-----------------------------|----------|----------------------------------------------------------------------------------| -> | proxy_name | name | Name of the proxy | -> | node_group_id | oid | Node group ID that this proxy uses | -209,215c222,228 -< | max_client_conn | int | Number of maximum read-write client connections that the proxy accepts | -< | max_server_conn | int | Number of maximum read-write connections that the server accepts | -< | server_conn_timeout | interval | Timeout for the read-write server connections | -< | server_conn_keepalive | interval | Interval between the server connection keep-alive | -< | fallback_group_timeout | interval | Timeout needed for the fallback | -< | fallback_group_ids | oid[] | List of group IDs to use for the fallback | -< | listen_addrs | text[] | Listen address | ---- -> | max_client_conn | int | Number of maximum read-write client connections that the proxy accepts | -> | max_server_conn | int | Number of maximum read-write connections that the server accepts | -> | server_conn_timeout | interval | Timeout for the read-write server connections | -> | server_conn_keepalive | interval | Interval between the server connection keep-alive | -> | fallback_group_timeout | interval | Timeout needed for the fallback | -> | fallback_group_ids | oid[] | List of group IDs to use for the fallback | -> | listen_addrs | text[] | Listen address | -217,222c230,235 -< | read_max_client_conn | int | Number of maximum read-only client connections that the proxy accepts | -< | read_max_server_conn | int | Number of maximum read-only connections that the server accepts | -< | read_server_conn_timeout | interval | Timeout for the server read-only connections | -< | read_server_conn_keepalive | interval | Interval between the server read-only connection keep-alive | -< | read_listen_addrs | text[] | Listen address for read-only connections | -< | read_consensus_grace_period | interval | Duration for which proxy continues to route even upon loss of consensus | ---- -> | read_max_client_conn | int | Number of maximum read-only client connections that the proxy accepts | -> | read_max_server_conn | int | Number of maximum read-only connections that the server accepts | -> | read_server_conn_timeout | interval | Timeout for the server read-only connections | -> | read_server_conn_keepalive | interval | Interval between the server read-only connection keep-alive | -> | read_listen_addrs | text[] | Listen address for read-only connections | -> | read_consensus_grace_period | interval | Duration for which proxy continues to route even upon loss of consensus | -231,234c244,247 -< | Name | Type | Description | -< |---------------------------------|----------|-------------------------------------------------------------------------------| -< | proxy_name | name | Name of the proxy | -< | node_group_name | name | Node group name that this proxy uses | ---- -> | Name | Type | Description | -> |---------------------------------|----------|-----------------------------------------------------------------------------------| -> | proxy_name | name | Name of the proxy | -> | node_group_name | name | Node group name that this proxy uses | -236,244c249,257 -< | max_client_conn | int | Number of maximum read-write client connections that the proxy accepts | -< | max_server_conn | int | Number of maximum read-write connections that the server accepts | -< | server_conn_timeout | interval | Timeout for the read-write server connections | -< | server_conn_keepalive | interval | Interval between the server connection keep-alive | -< | node_group_enable_proxy_routing | boolean | Does the group the proxy is in enable proxy routing? | -< | node_group_location | name | The group's location value | -< | fallback_group_timeout | interval | Timeout needed for the fallback | -< | fallback_group_ids | oid[] | List of group IDs to use for the fallback | -< | listen_addrs | text[] | Listen address | ---- -> | max_client_conn | int | Number of maximum read-write client connections that the proxy accepts | -> | max_server_conn | int | Number of maximum read-write connections that the server accepts | -> | server_conn_timeout | interval | Timeout for the read-write server connections | -> | server_conn_keepalive | interval | Interval between the server connection keep-alive | -> | node_group_enable_proxy_routing | boolean | Does the group the proxy is in enable proxy routing? | -> | node_group_location | name | The group's location value | -> | fallback_group_timeout | interval | Timeout needed for the fallback | -> | fallback_group_ids | oid[] | List of group IDs to use for the fallback | -> | listen_addrs | text[] | Listen address | -246,251c259,264 -< | read_max_client_conn | int | Number of maximum read-only client connections that the proxy accepts | -< | read_max_server_conn | int | Number of maximum read-only connections that the server accepts | -< | read_server_conn_timeout | interval | Timeout for the server read-only connections | -< | read_server_conn_keepalive | interval | Interval between the server read-only connection keep-alive | -< | read_listen_addrs | text[] | Listen address for read-only connections | -< | read_consensus_grace_period | interval | Duration for which proxy continues to route even upon loss of consensus | ---- -> | read_max_client_conn | int | Number of maximum read-only client connections that the proxy accepts | -> | read_max_server_conn | int | Number of maximum read-only connections that the server accepts | -> | read_server_conn_timeout | interval | Timeout for the server read-only connections | -> | read_server_conn_keepalive | interval | Interval between the server read-only connection keep-alive | -> | read_listen_addrs | text[] | Listen address for read-only connections | -> | read_consensus_grace_period | interval | Duration for which proxy continues to route even upon loss of consensus | -260,262c273,275 -< | Name | Type | Description | -< | ------- | ---- | ----------------------------------------------------------- | -< | seqid | oid | Internal OID of the sequence | ---- -> | Name | Type | Description | -> |---------|------|-----------------------------------------------------------------------------| -> | seqid | oid | Internal OID of the sequence | -diff -r 5/reference/catalogs-visible.mdx 5.6/reference/catalogs-visible.mdx -52c52 -< For details, see [Logging conflicts to a table](../consistency/conflicts). ---- -> For details, see [Logging conflicts to a table](../conflict-management/conflicts). -379a380,399 -> ### `bdr.leader` -> -> Tracks leader nodes across subgroups in the cluster. Shows the status of all write leaders and subscriber-only group leaders (when optimized topology is enabled) in the cluster. -> -> #### `bdr.leader` columns -> -> | Name | Type | Description | -> | ---------------- | ---- | ------------------------------ | -> | node_group_id | oid | ID of the node group. | -> | leader_node_id | oid | ID of the leader node. | -> | generation | int | Generation of the leader node. Leader_kind sets semantics. | -> | leader_kind | "char" | Kind of the leader node. | -> -> Leader_kind values can be: -> -> | Value | Description | -> |-------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -> | W | Write leader, as per proxy routing. In this case leader is maintained by subgroup Raft instance.
`generation` corresponds to `write_leader_version` of respective `bdr.node_group_routing_info` record. | -> | S | Subscriber-only group leader. This designated member of a SO group subscribes to upstream data nodes and is tasked with publishing upstream changes to remaining SO group members. Leader is maintained by top-level Raft instance.
`generation` is updated sequentially upon leader change. | -> -855c875 -< This contains the same information as `pg_stat_activity`, except `wait_event` ---- -> This contains the same information as [`pg_stat_activity`](https://www.postgresql.org/docs/current/monitoring-stats.html#MONITORING-PG-STAT-ACTIVITY-VIEW), except `wait_event` -857a878,986 -> ### `bdr.stat_commit_scope` -> -> A view containing statistics for each commit scope. -> -> #### `bdr.stat_commit_scope` columns -> -> | Column | Type | Description | -> | ------------------- | ------------------------ | ------------------------------------------------------------------------------------------------------------------------------- | -> | commit_scope_name | name | Name of the commit scope -> | group_name | name | Name of group for which the commit scope is defined -> | ncalls | bigint | The number of times the commit scope was used -> | ncommits | bigint | The number of successful commits were made with the commit scope -> | naborts | bigint | The number of times the commit scope used was eventually aborted -> | total_commit_time | double precision | Total time spent committing using the commit scope, in milliseconds -> | min_commit_time | double precision | Minimum time spent committing using the commit scope, in milliseconds -> | max_commit_time | double precision | Maximum time spend committing using the commit scope, in milliseconds -> | mean_commit_time | double precision | Mean time spent committing using the commit scope, in milliseconds -> | stats_reset | timestamp with time zone | Time at which all statistics in the view were last reset -> -> ### `bdr.stat_commit_scope_state` -> -> A view of information about the current use of commit scopes by backends. -> -> #### `bdr.stat_commit_scope_state` columns -> -> | Column | Type | Description | -> | ------------------- | ------------------------ | ------------------------------------------------------------------------------------------------------------------------------- | -> | pid | integer | Process ID of the backend -> | commit_scope_name | name | Name of the commit scope being used -> | group_name | name | Name of group for which the commit scope is defined -> | waiting_op_num | integer | Index of the first operation in the commit scope that is not satisfied yet -> | waiting_prepare_confirmations| integer | The number of PREPARE confirmations that are still needed by the operation -> | waiting_commit_confirmations| integer | The number of COMMIT confirmations that are still needed by the operation -> | waiting_lsn_confirmations| integer | The number of LSN confirmations that are still needed by the operation -> -> ### `bdr.stat_raft_followers_state` -> -> A view of the state of the raft leader's followers on the Raft leader node (empty on other nodes). -> -> #### `bdr.stat_raft_followers_state` columns -> -> | Column | Type | Description | -> | ------------------- | ------------------------ | ------------------------------------------------------------------------------------------------------------------------------- | -> | group_name | name | The group this information is for (each group can have a separate consensus configured) -> | node_name | name | Name of the follower node -> | sent_commit_index | bigint | Latest Raft index sent to the follower node -> | match_index | bigint | Raft index we expect to match the next response from the follower node -> | last_message_time | timestamp with time zone | Last message (any, including requests) seen from the follower node -> | last_heartbeat_send_time| timestamp with time zone| Last time the leader sent heartbeat to the follower node -> | last_heartbeat_response_time| Lasat time the leader has seen a heartbeat response from the follower node -> | approx_clock_drift_ms| bigint | Approximate clock drift seen by the leader against the follower node in milliseconds -> -> ### `bdr.stat_raft_state` -> -> A view describing the state of the Raft consensus on the local node. -> -> #### `bdr.stat_raft_state` columns -> -> | Column | Type | Description | -> | ------------------- | ------------------------ | ------------------------------------------------------------------------------------------------------------------------------- | -> | group_name | name | The group this information is for (each group can have a separate consensus configured) -> | raft_stat | text | State of the local node in the Raft ('LEADER', 'CANDIDATE', 'FOLLOWER', 'STOPPED') -> | leader_name | name | Name of the Raft leader, if any -> | voted_for_name | name | The node the local node voted for as leader last vote -> | is_voting | boolean | The local node part of Raft is voting -> | heartbeat_timeout_ms| bigint | The heartbeat timeout on the local node -> | heartbeat_elapsed_ms| bigint | The number of milliseconds that have elapsed since the local node has seen a heartbeat from the leader -> | current_term | bigint | The current Raft term the local node is at -> | commit_index | bigint | The current Raft commit index the local node is at -> | apply_index | bigint | The Raft commit index the local node applied to catalogs -> | last_log_term | bigint | Last Raft term in the request log -> | last_log_index | bigint | Last Raft index in the request log -> | oldest_log_index | bigint | Oldest Raft index still in the request log -> | newest_prunable_log_index| bigint | Newest Raft index that can be safely removed from the request log -> | snapshot_term | bigint | Raft term of the last snapshot -> | snapshot_index | bigint | Raft index of the last snapshot -> | nnodes | integer | Number of nodes in the Raft consensus (should normally be the same as the number of nodes in the group) -> | nvoting_nodes | integer | Number of voting nodes in the Raft consensus -> -> ### `bdr.stat_receiver` -> -> A view containing all the necessary info about the replication subscription receiver processes. -> -> #### `bdr.stat_receiver` columns -> -> | Column | Type | Description | -> | ------------------- | ------------------------ | ------------------------------------------------------------------------------------------------------------------------------- | -> | worker_role | text | Role of the BDR worker (always 'receiver') -> | worker_state | text | State of receiver worker (can be 'running', 'down', or 'disabled') -> | worker_pid | integer | Process id of the receiver worker -> | sub_name | name | Name of the subscription the receiver belongs to -> | sub_slot_name | name | Replication slot name used by the receiver -> | source_name | name | Source node for this receiver (the one it connects to), this is normally the same as the origin node, but is different for forward mode subscriptions -> | origin_name | name | The origin node for this receiver (the one it receives forwarded changes from), this is normally the same as the source node, but is different for forward mode subscriptions -> | subscription_mode | char | Mode of the subscription, see [`bdr.subscription_summary`](/pgd/latest/reference/catalogs-visible/#bdrsubscription_summary) for more details -> | sub_replication_sets| text[] | Replication sets this receiver is subscribed to -> | sub_apply_delay | interval | Apply delay interval -> | receive_lsn | pg_lsn | LSN of the last change received so far -> | receive_commit_lsn | pg_lsn | LSN of the last commit received so far -> | xact_apply_lsn | pg_lsn | Last applied transaction LSN -> | xact_flush_lsn | pg_lsn | Last flushed transaction LSN -> | xact_apply_timestamp| timestamp with time zone | Last applied transaction (commit) timestamp -> | worker_start | timestamp with time zone | Time at which the receiver started -> | worker_xact_start | timestamp with time zome | Time at which the receiver started local db transaction (if it is currently processing a local transaction), usually NULL, see `xact_start` in [`pg_stat_activity`](https://www.postgresql.org/docs/current/monitoring-stats.html#MONITORING-PG-STAT-ACTIVITY-VIEW) for more details -> | worker_backend_state_change | timestamp with time zone | Backend state change timestamp, see `state_change` in [`pg_stat_activity`](https://www.postgresql.org/docs/current/monitoring-stats.html#MONITORING-PG-STAT-ACTIVITY-VIEW) for more details -> | worker_backend_state| text | Current backend state, see `state` in [`pg_stat_activity`](https://www.postgresql.org/docs/current/monitoring-stats.html#MONITORING-PG-STAT-ACTIVITY-VIEW) for more details -> | wait_event_type | text | Type of wait event the receiver is currently waiting on (if any), see `wait_event_type` in [`pg_stat_activity`](https://www.postgresql.org/docs/current/monitoring-stats.html#MONITORING-PG-STAT-ACTIVITY-VIEW) for more details -> | wait_event | text | Exact event the receiver is currently waiting on (if any, see `wait_event` in [`pg_stat_activity`](https://www.postgresql.org/docs/current/monitoring-stats.html#MONITORING-PG-STAT-ACTIVITY-VIEW) for more details) -> -877c1006 -< | total_time | double precision | Total time spent processing replication for the relation | ---- -> | total_time | double precision | Total time spent processing replication for the relation, in milliseconds | -890a1020 -> ### `bdr.stat_routing_candidate_state` -891a1022,1049 -> A view of information about the routing candidate nodes on the Raft leader (empty on other nodes). -> -> #### `bdr.stat_routing_candidate_state` columns -> -> | Column | Type | Description | -> | ------------------- | ------------------------ | ------------------------------------------------------------------------------------------------------------------------------- | -> | node_group_name | name | The group this information is for (each group can have a separate routing proxy) -> | node_name | name | Candidate node name -> | node_route_fence | boolean | The node is fenced (when true it cannot become leader or read-only connection target) -> | node_route_reads | boolean | The node is being considered as a read-only connection target -> | node_route_writes | boolean | The node is being considered as a write lead candidate. -> | last_message_time | timestamp with time zone | The time of the last Raft message (any, including requests) seen by this node (used to check liveness of node) -> -> ### `bdr.stat_routing_state` -> -> A view of the state of the connection routing which PGD Proxy uses to route the connections. -> -> #### `bdr.stat_routing_state` columns -> -> | Column | Type | Description | -> | -------------------- | ------------------------ | ------------------------------------------------------------------------------------------------------------------- | -> | node_group_name | name | The group this is information for (each group can have a separate routing proxy) -> | write_lead_name | name | Name of the write lead node -> | previous_write_lead_name| name | Name of the previous write lead node -> | read_names | name[] | Array of nodes to which read-only connections are routed -> | write_candidate_names| name[] | Nodes that match all criteria needed to become write lead in case of failover -> | read_candidate_names | name[] | Nodes that match all criteria needed to become read-only connection targets in case of failover -> -950a1109,1177 -> -> ### `bdr.stat_worker` -> -> A view containing summary information and per worker statistics for PGD manager workers. -> -> #### `bdr.stat_worker` columns -> -> | Column | Type | Description | -> | -------------------- | ------------------------ | ------------------------------------------------------------------------------------------------------------------- | -> | worker_role | text | Role of the BDR worker -> | worker_pid | integer | Process id of the worker -> | sub_name | name | Name of the subscription the worker is related to, if any -> | worker_start | timestamp with time zone | Time at which the worker started -> | worker_xact_start | timestamp with time zone | Time at which the worker started the local db transaction, see `xact_start` in [`pg_stat_activity`](https://www.postgresql.org/docs/current/monitoring-stats.html#MONITORING-PG-STAT-ACTIVITY-VIEW) for more details -> | worker_xid | xid | Transaction id of the worker, see `backend_xid` in [`pg_stat_activity`](https://www.postgresql.org/docs/current/monitoring-stats.html#MONITORING-PG-STAT-ACTIVITY-VIEW) for more details -> | worker_xmin | xid | Oldest transaction id needed by the worker, see `backend_xmin` in [`pg_stat_activity`](https://www.postgresql.org/docs/current/monitoring-stats.html#MONITORING-PG-STAT-ACTIVITY-VIEW) for more details -> | worker_backend_state_change| timestamp with time zone| Backend state change timestamp see `state_change` in [`pg_stat_activity`](https://www.postgresql.org/docs/current/monitoring-stats.html#MONITORING-PG-STAT-ACTIVITY-VIEW) for more details -> | worker_backend_state | text | Current backend state see `state` in [`pg_stat_activity`](https://www.postgresql.org/docs/current/monitoring-stats.html#MONITORING-PG-STAT-ACTIVITY-VIEW) for more details -> | wait_event_type | text | The type of wait event the worker is currently waitiing on, if any (see `wait_event_type` in [`pg_stat_activity`](https://www.postgresql.org/docs/current/monitoring-stats.html#MONITORING-PG-STAT-ACTIVITY-VIEW) for more details) -> | wait_event | text | The exact event the worker is waiting on, if any (see `wait_event` in [`pg_stat_activity`](https://www.postgresql.org/docs/current/monitoring-stats.html#MONITORING-PG-STAT-ACTIVITY-VIEW) for more details) -> | blocked_by_pids | integer[] | List of PIDs blocking the worker, if any -> | query | text | Query currently being run by the worker -> | worker_query_start | timestamp with time zone | Timestamp at which the current query run by the worker started -> -> ### `bdr.stat_writer` -> -> A view containing summary information and statistics for each subscription replication writer. There can be multiple writers for each subscription. -> -> #### `bdr.stat_writer` columns -> -> | Column | Type | Description | -> | -------------------- | ------------------------ | ------------------------------------------------------------------------------------------------------------------- | -> | worker_role | text | Role of the BDR worker (always 'writer') -> | worker_state | text | State of the worker (can be 'running', 'down', or 'disabled') -> | worker_pid | integer | Process id of the writer -> | sub_name | name | Name of the subscription the writer belongs to -> | writer_nr | integer | Writer index in the writer group for the same subscription -> | nxacts | bigint | The number of transactions the writer has processed since start -> | ncommits | bigint | The number of commits the writer processed since start -> | naborts | bigint | The number of aborts the writer processed since start -> | commit_queue_position| integer | Position in the commit queue, when serializing transactions against other writers in the same writer group -> | xact_source_xid | xid | Transaction id of the currently processed transaction on the source node -> | xact_source_commit_lsn| pg_lsn | LSN of the currently processed transaction on the source node -> | xact_nchanges | bigint | The number of changes in the currently processed transaction that have been written (updated every 1000 changes) -> | xact_origin_node_name| name | Origin node of the currently processed transaction -> | xact_origin_lsn | pg_lsn | Origin LSN of the currently processed transaction -> | xact_origin_timestamp| timestamp with time zone | Origin commit timestamp of the currently processed transaction -> | streaming_allowed | boolean | The writer can receive direct stream for large transactions -> | is_streaming | boolean | The writer is currently receiving a direct stream of a large transaction -> | nstream_file | bigint | The number of stream files the writer has processed -> | nstream_writer | bigint | The number of directly streamed transactions the writer has processed -> | worker_start | timestamp with time zone | The time at which the writer started -> | worker_xact_start | timestamp with time zone | The time at which the writer start the local db transaction (see xact_start in [`pg_stat_activity`](https://www.postgresql.org/docs/current/monitoring-stats.html#MONITORING-PG-STAT-ACTIVITY-VIEW) for more details) -> | worker_xid | xid | Transaction id of the worker (see `backend_xid` in [`pg_stat_activity`](https://www.postgresql.org/docs/current/monitoring-stats.html#MONITORING-PG-STAT-ACTIVITY-VIEW) for more details) -> | worker_xmin | xid | Oldest transaction id needed by the worker (see `backend_xmin` in [`pg_stat_activity`](https://www.postgresql.org/docs/current/monitoring-stats.html#MONITORING-PG-STAT-ACTIVITY-VIEW) for more details) -> | worker_backend_state_change| timestamp with time zone| Backend state change timestamp (see `state_change` in [`pg_stat_activity`](https://www.postgresql.org/docs/current/monitoring-stats.html#MONITORING-PG-STAT-ACTIVITY-VIEW) for more details) -> | worker_backend_state | text | Current backend state (see `state` in [`pg_stat_activity`](https://www.postgresql.org/docs/current/monitoring-stats.html#MONITORING-PG-STAT-ACTIVITY-VIEW) for more details) -> | wait_event_type | text | The type of wait event the writer is currently waiting on, if any (see `event_type` in [`pg_stat_activity`](https://www.postgresql.org/docs/current/monitoring-stats.html#MONITORING-PG-STAT-ACTIVITY-VIEW) for more details) -> | wait_event | text | The exact event the writer is waiting on, if any (see `wait_event` in [`pg_stat_activity`](https://www.postgresql.org/docs/current/monitoring-stats.html#MONITORING-PG-STAT-ACTIVITY-VIEW) for more details) -> | blocked_by_pids | integer[] | List of PIDs blocking the writer, if any -> | query | text | Query currently being run by the writer (normally only set for DDL) -> | worker_query_start | timestamp with time zone | Timestamp at which the current query run by the worker started -> | command_progress_cmdtag| text | For commands with progress tracking, identifies the command current processed by the writer (can be one of 'CREATE INDEX', 'CREATE INDEX CONCURRENTLY', 'REINDEX', 'REINDEX CONCURRENTLY', 'CLUSTER', and 'VACUUM FULL') -> | command_progress_relation| text | For commands with progress tracking, identifies therelation which the command is working on -> | command_progress_phase| text | For commands with progress tracking, name of the current phase the command is in, refer to [Progress Reporting](https://www.postgresql.org/docs/current/progress-reporting.html) in the Postgres documentation for details -> | command_progress_count| integer | For commands with progress tracking, the number of phases this command has gone through -> | command_progress_phase_nr | integer | For commands with progress tracking, the number of the phase of `command_progress_count` -> | command_progress_phase_tuples_total| real | For commands with progress tracking, the number of rows the current phase of the command has to process (if the phase is process rows) -> | command_progress_tuples_done| bigint | For commands with progress tracking, the number of rows the current phase of the command has already processed (if the phase is process rows) -diff -r 5/reference/commit-scopes.mdx 5.6/reference/commit-scopes.mdx -10c10 -< Commit scopes are rules that determine how transaction commits and conflicts are handled within a PGD system. You can read more about them in [Durability](/pgd/5/durability). ---- -> Commit scopes are rules that determine how transaction commits and conflicts are handled within a PGD system. You can read more about them in [Commit Scopes](/pgd/latest/commit-scopes/). -14,16c14,16 -< - [`bdr.add_commit_scope`](/pgd/5/reference/functions#bdradd_commit_scope) -< - [`bdr.alter_commit_scope`](/pgd/5/reference/functions#bdralter_commit_scope) -< - [`bdr.remove_commit_scope`](/pgd/5/reference/functions#bdrremove_commit_scope) ---- -> - [`bdr.create_commit_scope`](/pgd/latest/reference/functions#bdrcreate_commit_scope) -> - [`bdr.alter_commit_scope`](/pgd/latest/reference/functions#bdralter_commit_scope) -> - [`bdr.drop_commit_scope`](/pgd/latest/reference/functions#bdrdrop_commit_scope) -28a29,32 -> commit_scope_target: -> { (node_group [, ...]) -> | ORIGIN_GROUP } -> -30,32c34,36 -< { ANY num [NOT] (node_group [, ...]) -< | MAJORITY [NOT] (node_group [, ...]) -< | ALL [NOT] (node_group [, ...]) } ---- -> { ANY num [NOT] commit_scope_target -> | MAJORITY [NOT] commit_scope_target -> | ALL [NOT] commit_scope_target } -38,39c42,43 -< { GROUP COMMIT [ ( group_commit_parameter = value [, ...] ) ] [ ABORT ON ( abort_on_parameter = value ) ] -< | CAMO [ DEGRADE ON ( degrade_on_parameter = value ) TO ASYNC ] ---- -> { GROUP COMMIT [ ( group_commit_parameter = value [, ... ] ) ] [ ABORT ON ( abort_on_parameter = value ) ] [ DEGRADE ON (degrade_on_parameter = value [, ... ] ) TO commit_scope_degrade_operation ] -> | CAMO [ DEGRADE ON ( degrade_on_parameter = value [, ... ] ) TO ASYNC ] -41c45,49 -< | SYNCHRONOUS_COMMIT } ---- -> | SYNCHRONOUS COMMIT [ DEGRADE ON (degrade_on_parameter = value ) TO commit_scope_degrade_operation ] } -> -> commit_scope_degrade_operation: -> commit_scope_group confirmation_level commit_scope_kind -> -44c52,54 -< Where `node_group` is the name of a PGD data node group. ---- -> Where `node_group` is the name of a PGD data node group. -> -> ### commit_scope_degrade_operation -45a56 -> The `commit_scope_degrade_operation` is either the same commit scope kind with a less restrictive commit scope group as the overall rule being defined, or is asynchronous (`ASYNC`). -46a58,82 -> For instance, [you can degrade](/pgd/latest/commit-scopes/degrading/) from an `ALL SYNCHRONOUS COMMIT` to a `MAJORITY SYNCHRONOUS COMMIT` or a `MAJORITY SYNCHRONOUS COMMIT` to an `ANY 3 SYNCHRONOUS COMMIT` or even an `ANY 3 SYNCHRONOUS COMMIT` to an `ANY 2 SYNCHRONOUS COMMIT`. You can also degrade from `SYNCHRONOUS COMMIT` to `ASYNC`. However, you cannot degrade from `SYNCHRONOUS COMMIT` to `GROUP COMMIT` or the other way around, regardless of the commit scope groups involved. -> -> It is also possible to combine rules using `AND`, each with their own degradation clause: -> -> ``` -> ALL ORIGIN_GROUP SYNCHRONOUS COMMIT DEGRADE ON (timeout = 10s) TO MAJORITY ORIGIN_GROUP SYNCHRONOUS COMMIT AND ANY 1 NOT ORIGIN_GROUP SYNCHRONOUS COMMIT DEGRADE ON (timeout = 20s) TO ASYNC -> ``` -> -> ## Commit scope targets -> -> ### ORIGIN_GROUP -> -> Instead of targeting a specific group, you can also use `ORIGIN_GROUP`, which dynamically refers to the bottommost group from which a transaction originates. Therefore, if you have a top level group, `top_group`, and two subgroups as children, `left_dc` and `right_dc`, then adding a commit scope like: -> -> ```sql -> SELECT bdr.create_commit_scope( -> commit_scope_name := 'example_scope', -> origin_node_group := 'top_level_group', -> rule := 'MAJORITY ORIGIN_GROUP SYNCHRONOUS COMMIT', -> wait_for_ready := true -> ); -> ``` -> -> would mean that for transactions originating on a node in `left_dc`, a majority of the nodes of `left_dc` would need to confirm the transaction synchronously before the transaction is committed. Moreover, the same rule would also mean that for transactions originating from a node in `right_dc`, a majority of nodes from `right_dc` are required to confirm the transaction synchronously before it is committed. This saves the need to add two seperate rules, one for `left_dc` and one for `right_dc`, to the commit scope. -> -110,113d145 -< * [Group Commit](#group-commit) -< * [CAMO (Commit At Most Once)](#camo) -< * [Lag Control](#lag-control) -< * [Synchronous Commit](#synchronous_commit) -115c147,154 -< !!! Note Parameter values ---- -> - [Synchronous Commit](#synchronous-commit) -> - [Group Commit](#group-commit) -> - [CAMO (Commit At Most Once)](#camo) -> - [Lag Control](#lag-control) -> -> -> !!!Note Parameter values -> -118a158,176 -> ## SYNCHRONOUS COMMIT -> -> ``` -> SYNCHRONOUS COMMIT [ DEGRADE ON (degrade_on_parameter = value ) TO commit_scope_degrade_operation ] -> ``` -> -> ### DEGRADE ON parameters -> -> | Parameter | Type | Default | Description | -> | ------------------ | -------- | ------- | ------------------------------------------------------------------------------------------------------------ | -> | `timeout` | interval | 0 | Timeout in milliseconds (accepts other units) after which operation degrades. (0 means not set.) | -> | `require_write_lead` | Boolean | False | Specifies whether the node must be a write lead to be able to switch to degraded operation. | -> -> These set the conditions on which the commit scope rule will degrade to a less restrictive mode of operation. -> -> ### commit_scope_degrade_operation -> -> The `commit_scope_degrade_operation` must be `SYNCHRONOUS COMMIT` with a less restrictive commit scope group—or must be asynchronous (`ASYNC`). -> -121c179 -< Allows commits to be confirmed by a consensus of nodes and controls conflict resolution settings. ---- -> Allows commits to be confirmed by a consensus of nodes, controls conflict resolution settings, and, like [`SYNCHRONOUS COMMIT`](#synchronous-commit), has optional rule-degredation parameters. -124c182 -< GROUP COMMIT [ ( group_commit_parameter = value [, ...] ) ] [ ABORT ON ( abort_on_parameter = value ) ] ---- -> GROUP COMMIT [ ( group_commit_parameter = value [, ...] ) ] [ ABORT ON ( abort_on_parameter = value ) ] [ DEGRADE ON (degrade_on_parameter = value ) TO commit_scope_degrade_operation ] -128,132d185 -< Parameter | Type | Default | Description -< --- | --- | --- | --- -< `transaction_tracking` | Boolean | Off/False | Specifies whether to track status of transaction. See [transaction_tracking settings](#transaction_tracking-settings). -< `conflict_resolution` | enum | async | Specifies how to handle conflicts. (`async`\|`eager` ). See [conflict_resolution settings](#conflict_resolution-settings). -< `commit_decision` | enum | group | Specifies how the COMMIT decision is made. (`group`\|`partner`\|`raft`). See [commit_decision settings](#commit_decision-settings). -133a187,191 -> | Parameter | Type | Default | Description | -> | ---------------------- | ------- | --------- | ----------------------------------------------------------------------------------------------------------------------------------- | -> | `transaction_tracking` | Boolean | Off/False | Specifies whether to track status of transaction. See [transaction_tracking settings](#transaction_tracking-settings). | -> | `conflict_resolution` | enum | async | Specifies how to handle conflicts. (`async`\|`eager` ). See [conflict_resolution settings](#conflict_resolution-settings). | -> | `commit_decision` | enum | group | Specifies how the COMMIT decision is made. (`group`\|`partner`\|`raft`). See [commit_decision settings](#commit_decision-settings). | -137,140c195,198 -< Parameter | Type | Default | Description -< --- | --- | --- | --- -< `timeout` | interval | 0 | Timeout in milliseconds (accepts other units). (0 means not set.) -< `require_write_lead` | bool | false | CAMO only. If set, then for a transaction to switch to local (async) mode, a consensus request is required. | ---- -> | Parameter | Type | Default | Description | -> | -------------------- | -------- | ------- | ----------------------------------------------------------------------------------------------------------- | -> | `timeout` | interval | 0 | Timeout in milliseconds (accepts other units). (0 means not set.) | -> | `require_write_lead` | Boolean | False | CAMO only. If set, then for a transaction to switch to local (async) mode, a consensus request is required. | -141a200,206 -> ### DEGRADE ON parameters -> -> | Parameter | Type | Default | Description | -> | ------------------ | -------- | ------- | ------------------------------------------------------------------------------------------------------------ | -> | `timeout` | interval | 0 | Timeout in milliseconds (accepts other units) after which operation degrades. (0 means not set.) | -> | `require_write_lead` | Boolean | False | Specifies whether the node must be a write lead to be able to switch to degraded operation. | -> -145,146d209 -< * Look up commit decisions when a writer is processing a PREPARE message. -< * When recovering from an interruption, look up the transactions prepared before the interruption. When found, it then looks up the commit scope of the transaction and any corresponding RAFT commit decision. Suppose the node is the origin of the transaction and doesn't have a RAFT commit decision, and `transaction_tracking` is on in the commit scope. In that case, it periodically looks for a RAFT commit decision for this unresolved transaction until it's committed or aborted. -147a211,213 -> - Look up commit decisions when a writer is processing a PREPARE message. -> - When recovering from an interruption, look up the transactions prepared before the interruption. When found, it then looks up the commit scope of the transaction and any corresponding RAFT commit decision. Suppose the node is the origin of the transaction and doesn't have a RAFT commit decision, and `transaction_tracking` is on in the commit scope. In that case, it periodically looks for a RAFT commit decision for this unresolved transaction until it's committed or aborted. -> -157a224 -> See ["Conflict resolution" in Group Commit](../commit-scopes/group-commit/#conflict-resolution). -159,160d225 -< See ["Conflict resolution" in Group Commit](../durability/group-commit/#conflict-resolution). -< -162c227 -< ---- -> -171c236 -< See ["Commit decisions" in Group Commit](../durability/group-commit/#commit-decisions). ---- -> See ["Commit decisions" in Group Commit](../commit-scopes/group-commit/#commit-decisions). -172a238,242 -> ### commit_scope_degrade_operation settings -> -> The `commit_scope_degrade_operation` must be `GROUP_COMMIT` with a less restrictive commit scope group—or must be asynchronous (`ASYNC`). -> -> -177c247 -< See ["CAMO" in Durability](../durability/camo) for more details. ---- -> See ["CAMO" in Durability](../commit-scopes/camo) for more details. -183c253 -< ### Degrade On parameters ---- -> ### DEGRADE ON parameters -187,190c257,260 -< Parameter | Type | Default | Description -< --- | --- | --- | ---- -< timeout | interval | 0 | Timeout in milliseconds (accepts other units) after which operation becomes asynchronous. (0 means not set.) -< require_write_lead | Boolean | False | Specifies whether the node must be a write lead to be able to switch to asynchronous mode. ---- -> | Parameter | Type | Default | Description | -> | ------------------ | -------- | ------- | ------------------------------------------------------------------------------------------------------------ | -> | `timeout` | interval | 0 | Timeout in milliseconds (accepts other units) after which operation becomes asynchronous. (0 means not set.) | -> | `require_write_lead` | Boolean | False | Specifies whether the node must be a write lead to be able to switch to asynchronous mode. | -192d261 -< -197c266 -< See ["Lag Control" in Durability](../durability/lag-control) for more details. ---- -> See ["Lag Control" in Durability](../commit-scopes/lag-control) for more details. -205,209c274,278 -< Parameter | Type | Default | Description -< --- | --- | --- | --- -< `max_lag_size` | int | 0 | The maximum lag in kB that a given node can have in the replication connection to another node. When the lag exceeds this maximum scaled by `max_commit_delay`, lag control adjusts the commit delay. -< `max_lag_time` | interval | 0 | The maximum replication lag in milliseconds that the given origin can have with regard to a replication connection to a given downstream node. -< `max_commit_delay` | interval | 0 | Configures the maximum delay each commit can take, in fractional milliseconds. If set to 0, it disables Lag Control. After each commit delay adjustment (for example, if the replication is lagging more than `max_lag_size` or `max_lag_time`), the commit delay is recalculated with the weight of the `bdr.lag_control_commit_delay_adjust` GUC. The `max_commit_delay` is a ceiling for the commit delay. ---- -> | Parameter | Type | Default | Description | -> | ------------------ | -------- | ------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -> | `max_lag_size` | int | 0 | The maximum lag in kB that a given node can have in the replication connection to another node. When the lag exceeds this maximum scaled by `max_commit_delay`, lag control adjusts the commit delay. | -> | `max_lag_time` | interval | 0 | The maximum replication lag in milliseconds that the given origin can have with regard to a replication connection to a given downstream node. | -> | `max_commit_delay` | interval | 0 | Configures the maximum delay each commit can take, in fractional milliseconds. If set to 0, it disables Lag Control. After each commit delay adjustment (for example, if the replication is lagging more than `max_lag_size` or `max_lag_time`), the commit delay is recalculated with the weight of the `bdr.lag_control_commit_delay_adjust` GUC. The `max_commit_delay` is a ceiling for the commit delay. | -211,212c280,281 -< * If `max_lag_size` and `max_lag_time` are set to 0, the LAG CONTROL is disabled. -< * If `max_commit_delay` is not set or set to 0, the LAG CONTROL is disabled. ---- -> - If `max_lag_size` and `max_lag_time` are set to 0, the LAG CONTROL is disabled. -> - If `max_commit_delay` is not set or set to 0, the LAG CONTROL is disabled. -232,239d300 -< + `prev_apply_rate` was the previously configured `apply_rate`, before -< recalculating the new rate. -< + `apply_rate_weight` is the value of the GUC `bdr.lag_tracker_apply_rate_weight`. -< + `apply_ptr_diff` is the difference between the current `apply_ptr` and -< the `apply_ptr` at the point in time when the apply rate was last -< computed. -< + `diff_secs` is the delta in seconds from the last time the apply rate -< was calculated. -240a302,309 -> - `prev_apply_rate` was the previously configured `apply_rate`, before -> recalculating the new rate. -> - `apply_rate_weight` is the value of the GUC `bdr.lag_tracker_apply_rate_weight`. -> - `apply_ptr_diff` is the difference between the current `apply_ptr` and -> the `apply_ptr` at the point in time when the apply rate was last -> computed. -> - `diff_secs` is the delta in seconds from the last time the apply rate -> was calculated. -242,250d310 -< -< ## SYNCHRONOUS_COMMIT -< -< -< ``` -< SYNCHRONOUS_COMMIT -< ``` -< -< The `SYNCHRONOUS_COMMIT` commit scope kind has no parameters. It's effectively configured only by the commit scope group and commit scope visibility. -diff -r 5/reference/conflict_functions.mdx 5.6/reference/conflict_functions.mdx -26,29c26,29 -< - `row_origin` — Origin of the previous change made on the tuple (see [Origin conflict detection](../consistency/conflicts/03_conflict_detection/#origin-conflict-detection)). This is the only method supported that doesn't require an extra column in the table. -< - `row_version` — Row version column (see [Row version conflict detection](../consistency/conflicts/03_conflict_detection/#row-version-conflict-detection)). -< - `column_commit_timestamp` — Per-column commit timestamps (described in [CLCD](../consistency/column-level-conflicts)). -< - `column_modify_timestamp` — Per-column modification timestamp (described in [CLCD](../consistency/column-level-conflicts)). ---- -> - `row_origin` — Origin of the previous change made on the tuple (see [Origin conflict detection](../conflict-management/conflicts/03_conflict_detection/#origin-conflict-detection)). This is the only method supported that doesn't require an extra column in the table. -> - `row_version` — Row version column (see [Row version conflict detection](../conflict-management/conflicts/03_conflict_detection/#row-version-conflict-detection)). -> - `column_commit_timestamp` — Per-column commit timestamps (described in [CLCD](../conflict-management/column-level-conflicts)). -> - `column_modify_timestamp` — Per-column modification timestamp (described in [CLCD](../conflict-management/column-level-conflicts)). -33c33 -< For more information about the difference between `column_commit_timestamp` and `column_modify_timestamp` conflict detection methods, see [Current versus commit timestamp](../consistency/column-level-conflicts/03_timestamps). ---- -> For more information about the difference between `column_commit_timestamp` and `column_modify_timestamp` conflict detection methods, see [Current versus commit timestamp](../conflict-management/column-level-conflicts/03_timestamps). -diff -r 5/reference/conflicts.mdx 5.6/reference/conflicts.mdx -16c16 -< | `update_differing` | An incoming update's key row differs from a local row. This can happen only when using [row version conflict detection](../consistency/conflicts/03_conflict_detection/#row-version-conflict-detection). | ---- -> | `update_differing` | An incoming update's key row differs from a local row. This can happen only when using [row version conflict detection](../conflict-management/conflicts/03_conflict_detection/#row-version-conflict-detection). | -21,22c21,22 -< | `multiple_unique_conflicts` | The incoming row conflicts with multiple UNIQUE constraints/indexes in the target table. | -< | `delete_recently_updated` | An incoming delete with an older commit timestamp than the most recent update of the row on the current node or when using [row version conflict detection](../consistency/conflicts/03_conflict_detection/#row-version-conflict-detection). | ---- -> | `multiple_unique_conflicts`| An incoming row conflicts with multiple rows per UNIQUE/EXCLUDE indexes of the target table. | -> | `delete_recently_updated` | An incoming delete with an older commit timestamp than the most recent update of the row on the current node or when using [row version conflict detection](../conflict-management/conflicts/03_conflict_detection/#row-version-conflict-detection). | -diff -r 5/reference/functions-internal.mdx 5.6/reference/functions-internal.mdx -180c180 -< Use [`bdr.part_node`](/pgd/5/reference/nodes-management-interfaces#bdrpart_node) to remove a node from a PGD group. That function sets the node to `PARTED` state and enables reuse of the node name. ---- -> Use [`bdr.part_node`](/pgd/latest/reference/nodes-management-interfaces#bdrpart_node) to remove a node from a PGD group. That function sets the node to `PARTED` state and enables reuse of the node name. -495c495 -< Internal function underlying view `bdr.stat_activity`. Do not use directly. Use the [`bdr.stat_activity`](/pgd/5/reference/catalogs-visible#bdrstat_activity) view instead. ---- -> Internal function underlying view `bdr.stat_activity`. Do not use directly. Use the [`bdr.stat_activity`](/pgd/latest/reference/catalogs-visible#bdrstat_activity) view instead. -499c499 -< Internal helper function used when generating view `bdr.worker_tasks`. Do not use directly. Use the [`bdr.worker_tasks`](/pgd/5/reference/catalogs-visible#bdrworker_tasks) view instead. ---- -> Internal helper function used when generating view `bdr.worker_tasks`. Do not use directly. Use the [`bdr.worker_tasks`](/pgd/latest/reference/catalogs-visible#bdrworker_tasks) view instead. -503c503 -< Internal function used when generating view `bdr.node_replication_rates`. Do not use directly. Use the [`bdr.node_replication_rates`](/pgd/5/reference/catalogs-visible#bdrnode_replication_rates) view instead. ---- -> Internal function used when generating view `bdr.node_replication_rates`. Do not use directly. Use the [`bdr.node_replication_rates`](/pgd/latest/reference/catalogs-visible#bdrnode_replication_rates) view instead. -507c507 -< Internal function used when generating view `bdr.group_raft_details`. Do not use directly. Use the [`bdr.group_raft_details`](/pgd/5/reference/catalogs-visible#bdrgroup_raft_details) view instead. ---- -> Internal function used when generating view `bdr.group_raft_details`. Do not use directly. Use the [`bdr.group_raft_details`](/pgd/latest/reference/catalogs-visible#bdrgroup_raft_details) view instead. -511c511 -< Internal function used when generating view `bdr.group_camo_details`. Do not use directly. Use the [`bdr.group_camo_details`](/pgd/5/reference/catalogs-visible#bdrgroup_camo_details) view instead. ---- -> Internal function used when generating view `bdr.group_camo_details`. Do not use directly. Use the [`bdr.group_camo_details`](/pgd/latest/reference/catalogs-visible#bdrgroup_camo_details) view instead. -515c515 -< Internal function used when generating view `bdr.group_raft_details`. Do not use directly. Use the [`bdr.group_raft_details`](/pgd/5/reference/catalogs-visible#bdrgroup_raft_details) view instead. ---- -> Internal function used when generating view `bdr.group_raft_details`. Do not use directly. Use the [`bdr.group_raft_details`](/pgd/latest/reference/catalogs-visible#bdrgroup_raft_details) view instead. -519c519 -< Internal function used when generating view `bdr.group_replslots_details`. Do not use directly. Use the [`bdr.group_replslots_details`](/pgd/5/reference/catalogs-visible#bdrgroup_replslots_details) view instead. ---- -> Internal function used when generating view `bdr.group_replslots_details`. Do not use directly. Use the [`bdr.group_replslots_details`](/pgd/latest/reference/catalogs-visible#bdrgroup_replslots_details) view instead. -523c523 -< Internal function used when generating view `bdr.group_subscription_summary`. Do not use directly. Use the [`bdr.group_subscription_summary`](/pgd/5/reference/catalogs-visible#bdrgroup_subscription_summary) view instead. ---- -> Internal function used when generating view `bdr.group_subscription_summary`. Do not use directly. Use the [`bdr.group_subscription_summary`](/pgd/latest/reference/catalogs-visible#bdrgroup_subscription_summary) view instead. -527c527 -< Internal function used when generating view `bdr.group_versions_details`. Do not use directly. Use the [`bdr.group_versions_details`](/pgd/5/reference/catalogs-visible#bdrgroup_versions_details) view instead. ---- -> Internal function used when generating view `bdr.group_versions_details`. Do not use directly. Use the [`bdr.group_versions_details`](/pgd/latest/reference/catalogs-visible#bdrgroup_versions_details) view instead. -531c531 -< Internal function used when generating view `bdr.group_raft_details`. Do not use directly. Use the [`bdr.group_raft_details`](/pgd/5/reference/catalogs-visible#bdrgroup_raft_details) view instead. -\ No newline at end of file ---- -> Internal function used when generating view `bdr.group_raft_details`. Do not use directly. Use the [`bdr.group_raft_details`](/pgd/latest/reference/catalogs-visible#bdrgroup_raft_details) view instead. -\ No newline at end of file -diff -r 5/reference/functions.mdx 5.6/reference/functions.mdx -59c59 -< It's also used with [Connection pools and proxies](../durability/camo#connection-pools-and-proxies). ---- -> It's also used with [Connection pools and proxies](../commit-scopes/camo#connection-pools-and-proxies). -73c73 -< using `PQparameterStatus` or equivalent. See [Application use](../durability/camo#application-use) ---- -> using `PQparameterStatus` or equivalent. See [Application use](../commit-scopes/camo#application-use) -271,272c271 -< Allows you to wait until the last write on this session was replayed -< to one or all nodes. ---- -> Allows you to wait until the last write on this session was replayed to one or all nodes. -274,275c273,274 -< Waits until a slot passes a certain LSN. If no position is supplied, the -< current write position is used on the local node. ---- -> Waits until a slot passes a certain LSN. -> If no position is supplied, the current write position is used on the local node. -284a284,286 -> If you are using [Optimized Topology](../nodes/subscriber_only/optimizing-so), we recommend using [`bdr.wait_node_confirm_lsn`](/pgd/5.6/reference/functions#bdrwait_node_confirm_lsn) instead. -> ) -> -290a293,296 -> #### Notes -> -> Requires `bdr_application` privileges to use. -> -293,295c299,302 -< - `slot_name` — Name of replication slot or, if NULL, all PGD slots (only). -< - `target_lsn` — LSN to wait for or, if NULL, use the current write LSN on the -< local node. ---- -> | Parameter | Description | -> |--------------|-----------------------------------------------------------------------------| -> | `slot_name` | Name of the replication slot to wait for. If NULL, waits for all PGD slots. | -> | `target_lsn` | LSN to wait for. If NULL, uses the current write LSN on the local node. | -296a304,338 -> ### `bdr.wait_node_confirm_lsn` -> -> Wait until a node passes a certain LSN. -> -> This function allows you to wait until the last write on this session was replayed to one or all nodes. -> -> Upon being called, the function waits for a node to pass a certain LSN. -> If no LSN is supplied, the current wal_flush_lsn (using the `pg_current_wal_flush_lsn()` function) position is used on the local node. -> Supplying a node name parameter tells the function to wait for that node to pass the LSN. -> If no node name is supplied (by passing NULL), the function waits until all the nodes pass the LSN, -> -> We recommend using this function if you are using [Optimized Topology](../nodes/subscriber_only/optimizing-so) instead of [`bdr.wait_slot_confirm_lsn`](/pgd/5.6/reference/functions#bdrwait_slot_confirm_lsn). -> -> This is because in an Optimized Topology, not all nodes have replication slots, so the function `bdr.wait_slot_confirm_lsn` might not work as expected. `bdr.wait_node_confirm_lsn` is designed to work with nodes that don't have replication slots, using alternative strategies to determine the progress of a node. -> -> If a node is currently down, isn't updating, or simply can't be connected to, the wait will continue indefinitely. To avoid this condition, set the statement_timeout to the maximum amount of time you are prepared to wait. -> -> #### Synopsis -> -> ```sql -> bdr.wait_node_confirm_lsn(node_name text DEFAULT NULL, target_lsn pg_lsn DEFAULT NULL) -> ``` -> -> #### Parameters -> -> | Parameter | Description | -> |--------------|-----------------------------------------------------------------------------| -> | `node_name` | Name of the node to wait for. If NULL, waits for all nodes. | -> | `target_lsn` | LSN to wait for. If NULL, uses the current wal_flush_lsn on the local node. | -> -> #### Notes -> -> Requires `bdr_application` privileges to use. -> -> -330,335c372,375 -< - `peer_node_name` — The name of the peer node from which incoming -< transactions are expected to be queued and to wait -< for. If NULL, waits for all peer node's apply queue to be consumed. -< - `target_lsn` — The LSN in the replication stream from the peer node -< to wait for, usually learned by way of `bdr.last_committed_lsn` from the -< peer node. ---- -> | Parameter | Description | -> | --------- | ----------- | -> | `peer_node_name` | The name of the peer node from which incoming transactions are expected to be queued and to wait for. If NULL, waits for all peer node's apply queue to be consumed. | -> | `target_lsn` | The LSN in the replication stream from the peer node to wait for, usually learned by way of `bdr.last_committed_lsn` from the peer node. | -355,360c395,398 -< - `node_name` — The name of the node that's the source of the -< replication stream whose LSN is being retrieved. -< - `committed` —; The default (true) makes this function take into -< account only commits of transactions received rather than the last -< LSN overall. This includes actions that have no effect on the subscriber -< node. ---- -> | Parameter | Description | -> | --------- | ----------- | -> | `node_name` | The name of the node that's the source of the replication stream whose LSN is being retrieved. | -> | `committed` | The default (true) makes this function take into account only commits of transactions received rather than the last LSN overall. This includes actions that have no effect on the subscriber node. | -375,376c413,415 -< - `node_name` — the name of the node that's the source of the -< replication stream whose LSN is being retrieved. ---- -> | Parameter | Description | -> | --------- | ----------- | -> | `node_name` | The name of the node that's the source of the replication stream whose LSN is being retrieved. | -393,396c432,437 -< - `ddl_cmd` — DDL command to execute. -< - `replication_sets` — An array of replication set names to apply the `ddlcommand` to. If NULL (or the function is only passed the `ddlcommand`), this is set to the active PGD groups's default replication set. -< - `ddl_locking` — A string that sets the [`bdr.ddl_locking`](/pgd/5/reference/pgd-settings#bdrddl_locking) value while replicating. Defaults to the GUC value for `bdr.ddl_locking` on the local system that's running `replicate_ddl_command`. -< - `execute_locally` — A Boolean that determines whether the DDL command executes locally. Defaults to true. ---- -> | Parameter | Description | -> | --------- | ----------- | -> | `ddl_cmd` | DDL command to execute. | -> | `replication_sets` | An array of replication set names to apply the `ddlcommand` to. If NULL (or the function is passed only the `ddlcommand`), this parameter is set to the active PGD groups's default replication set. | -> | `ddl_locking` | A string that sets the [`bdr.ddl_locking`](/pgd/latest/reference/pgd-settings#bdrddl_locking) value while replicating. Defaults to the GUC value for `bdr.ddl_locking` on the local system that's running `replicate_ddl_command`. | -> | `execute_locally` | A Boolean that determines whether the DDL command executes locally. Defaults to true. | -421c462,464 -< - `query` — Arbitrary query to execute. ---- -> | Parameter | Description | -> |-----------|-----------------------------| -> | `query` | Arbitrary query to execute. | -511,512c554,557 -< - `node_names` — Text ARRAY of node names where query is executed. -< - `query` — Arbitrary query to execute. ---- -> | Parameter | Description | -> |--------------|-------------------------------------------------------| -> | `node_names` | Text ARRAY of node names where the query is executed. | -> | `query` | Arbitrary query to execute. | -550,551c595,598 -< - `node_group_name` — Name of node group where query is executed. -< - `query` — Arbitrary query to execute. ---- -> | Parameter | Description | -> |-----------|-------------------------------------------------------| -> | `node_group_name` | Name of the node group where the query is executed. | -> | `query` | Arbitrary query to execute. | -586c633,635 -< - `relation` — Name or oid of the relation to lock. ---- -> | Parameter | Description | -> |------------|--------------------------------------| -> | `relation` | Name or oid of the relation to lock. | -613,614c662,666 -< - `origin_node_id` — Node id of the node where the transaction -< originated. ---- -> | Parameter | Description | -> |------------------|----------------------------------------------------------------------------------------------------------| -> | `origin_node_id` | Node id of the node where the transaction originated. | -> | `origin_topxid` | XID of the transaction. | -> | `allnodes` | If `true`, wait for the transaction to progress on all nodes. Otherwise, wait only for the current node. | -616,620d667 -< - `origin_topxid` — XID of the transaction. -< -< - `allnodes` — If `true` then wait for the transaction to progress on -< all nodes. Otherwise wait only for the current node. -< -674,675c721,724 -< - `node_name` — Name of the node to change kind. -< - `node_kind` — Kind of the node, which can be one of: `data`, `standby`, `witness`, or `subscriber-only`. ---- -> | Parameter | Description | -> |------------|--------------------------------------| -> | `node_name` | Name of the node to change kind. | -> | `node_kind` | Kind of the node. | -832,833c881,884 -< - `key1` — First part of the composite key. -< - `key2` — second part of the composite key. ---- -> | Parameter | Description | -> |-----------|-------------| -> | `key1` | First part of the composite key. | -> | `key2` | Second part of the composite key. | -849c900,902 -< - `key` — The object on which an advisory lock is acquired. ---- -> | Parameter | Description | -> |-----------|---------------------------------------------------| -> | `key` | The object on which an advisory lock is acquired. | -850a904 -> -859,860c913,916 -< - `key1` — First part of the composite key. -< - `key2` — Second part of the composite key. ---- -> | Parameter | Description | -> |-----------|-----------------------------------| -> | `key1` | First part of the composite key. | -> | `key2` | Second part of the composite key. | -897c953,955 -< - `node_group_name` — The node group name to check. ---- -> | Parameter | Description | -> |-------------------|-------------------------------| -> | `node_group_name` | The node group name to check. | -899d956 -< -913a971,972 -> This function also provides status information on subscriber-only nodes that are operating as subscriber-only group leaders in a PGD cluster when [optimized topology](../nodes/subscriber_only/optimizing-so) is enabled. -> -922,923c981 -< This function returns a record with fields `status` and `message`, -< as explained in [Monitoring replication slots](../monitoring/sql/#monitoring-replication-slots). ---- -> This function returns a record with fields `status` and `message`. -924a983,992 -> | Status | Message | -> |----------|------------------------------------------------------------| -> | UNKNOWN | This node is not part of any BDR group | -> | OK | All BDR replication slots are working correctly | -> | OK | This node is part of a subscriber-only group | -> | CRITICAL | There is at least 1 BDR replication slot which is inactive | -> | CRITICAL | There is at least 1 BDR replication slot which is missing | -> -> Further explaination is available in [Monitoring replication slots](../monitoring/sql/#monitoring-replication-slots). -> -927c995 -< If the [decoding worker](../node_management/decoding_worker/) is enabled, this ---- -> If the [decoding worker](../decoding_worker/) is enabled, this -939c1007,1012 -< - `pid` — PID of the WAL sender (corresponds to the `pid` column of `pg_stat_replication`). ---- -> | Column name | Description | -> |---------------------|-------------------------------------------------------------------------------------| -> | `pid` | PID of the WAL sender. (Corresponds to the `pid` column of `pg_stat_replication`). | -> | `is_using_lcr` | Whether the WAL sender is sending LCR files. | -> | `decoder_slot_name` | Name of the decoder replication slot. | -> | `lcr_file_name` | Name of the current LCR file. | -941d1013 -< - `is_using_lcr` — Whether the WAL sender is sending LCR files. The next columns are `NULL` if `is_using_lcr` is `FALSE`. -943,947d1014 -< - `decoder_slot_name` — The name of the decoder replication slot. -< -< - `lcr_file_name` — The name of the current LCR file. -< -< -950c1017 -< If the [decoding worker](../node_management/decoding_worker/) is enabled, this function ---- -> If the [decoding worker](../decoding_worker/) is enabled, this function -963c1030,1035 -< - `pid` — The PID of the decoding worker (corresponds to the column `active_pid` in `pg_replication_slots`). ---- -> | Column name | Description | -> |-------------|-------------| -> | `pid` | The PID of the decoding worker. (Corresponds to the column `active_pid` in `pg_replication_slots`.) | -> | `decoded_upto_lsn` | LSN up to which the decoding worker read transactional logs. | -> | `waiting` | Whether the decoding worker is waiting for new WAL. | -> | `waiting_for_lsn` | The LSN of the next expected WAL. | -965,970d1036 -< - `decoded_upto_lsn` — LSN up to which the decoding worker read transactional logs. -< -< - `waiting` — Whether the decoding worker is waiting for new WAL. -< -< - `waiting_for_lsn` — The LSN of the next expected WAL. -< -977c1043 -< If [Lag Control](../durability/lag-control#configuration) is enabled, this function ---- -> If [Lag Control](../commit-scopes/lag-control#configuration) is enabled, this function -989c1055,1068 -< - `commit_scope_id` — OID of the commit scope (see [`bdr.commit_scopes`](/pgd/5/reference/catalogs-visible#bdrcommit_scopes))). ---- -> | Column name | Description | -> |----------------------------|---------------------------------------------------------------------------------------------------------------------------| -> | `commit_scope_id` | OID of the commit scope (see [`bdr.commit_scopes`](/pgd/latest/reference/catalogs-visible#bdrcommit_scopes)). | -> | `sessions` | Number of sessions referencing the lag control entry. | -> | `current_commit_delay` | Current runtime commit delay, in fractional milliseconds. | -> | `maximum_commit_delay` | Configured maximum commit delay, in fractional milliseconds. | -> | `commit_delay_adjust` | Change to runtime commit delay possible during a sample interval, in fractional milliseconds. | -> | `current_conforming_nodes` | Current runtime number of nodes conforming to lag measures. | -> | `minimum_conforming_nodes` | Configured minimum number of nodes required to conform to lag measures, below which a commit delay adjustment is applied. | -> | `lag_bytes_threshold` | Lag size at which a commit delay is applied, in kilobytes. | -> | `maximum_lag_bytes` | Configured maximum lag size, in kilobytes. | -> | `lag_time_threshold` | Lag time at which a commit delay is applied, in milliseconds. | -> | `maximum_lag_time` | Configured maximum lag time, in milliseconds. | -> | `sample_interval` | Configured minimum time between lag samples and possible commit delay adjustments, in milliseconds. | -991d1069 -< - `sessions` — Number of sessions referencing the lag control entry. -993,1015d1070 -< - `current_commit_delay` — Current runtime commit delay, in fractional milliseconds. -< -< - `maximum_commit_delay` — Configured maximum commit delay, in fractional milliseconds. -< -< - `commit_delay_adjust` — Change to runtime commit delay possible during -< a sample interval, in fractional milliseconds. -< -< - `curent_conforming_nodes` — Current runtime number of nodes conforming to lag measures. -< -< - `minimum_conforming_nodes` — Configured minimum number of nodes required to -< conform to lag measures, below which a commit delay adjustment is applied. -< -< - `lag_bytes_threshold` — Lag size at which a commit delay is applied, in kilobytes. -< -< - `maximum_lag_bytes` — Configured maximum lag size, in kilobytes. -< -< - `lag_time_threshold` — Lag time at which a commit delay is applied, in milliseconds. -< -< - `maximum_lag_time` — Configured maximum lag time, in milliseconds. -< -< - `sample_interval` — Configured minimum time between lag samples and possible -< commit delay adjustments, in milliseconds. -< -1018c1073 -< CAMO requires that a client actively participates in the committing of a transaction by following the transactions progress. The functions listed here are used for that purpose and explained in [CAMO](../durability/camo). ---- -> CAMO requires that a client actively participates in the committing of a transaction by following the transactions progress. The functions listed here are used for that purpose and explained in [CAMO](../commit-scopes/camo). -1081c1136 -< ### bdr.logical_transaction_status ---- -> ### `bdr.logical_transaction_status` -1095,1097c1150,1154 -< - `node_id` — The node id of the PGD node the transaction originates from, usually retrieved by the client before `COMMIT` from the [PQ parameter](https://www.postgresql.org/docs/current/libpq-status.html#LIBPQ-PQPARAMETERSTATUS) `bdr.local_node_id`. -< - `xid` — The transaction id on the origin node, usually retrieved by the client before `COMMIT` from the [PQ parameter](https://www.postgresql.org/docs/current/libpq-status.html#LIBPQ-PQPARAMETERSTATUS) `transaction_id`. -< - `require_camo_partner` — Defaults to true and enables configuration checks. Set to false to disable these checks and query the status of a transaction that wasn't a CAMO transaction. ---- -> | Parameter | Description | -> |------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -> | `node_id` | The node id of the PGD node the transaction originates from, usually retrieved by the client before `COMMIT` from the [PQ parameter](https://www.postgresql.org/docs/current/libpq-status.html#LIBPQ-PQPARAMETERSTATUS) `bdr.local_node_id`. | -> | `xid` | The transaction id on the origin node, usually retrieved by the client before `COMMIT` from the [PQ parameter](https://www.postgresql.org/docs/current/libpq-status.html#LIBPQ-PQPARAMETERSTATUS) `transaction_id`. | -> | `require_camo_partner` | Defaults to true and enables configuration checks. Set to false to disable these checks and query the status of a transaction that wasn't a CAMO transaction. | -1120c1177 -< `bdr.add_commit_scope` creates a rule for the given commit scope name and origin node group. If the rule is the same for all nodes in the EDB Postgres Distributed cluster, invoking this function once for the top-level node group is enough to fully define the commit scope. ---- -> **Deprecated**. Use [`bdr.create_commit_scope`](/pgd/5.6/reference/functions#bdrcreate_commit_scope) instead. Previously, this function was used to add a commit scope to a node group. It's now deprecated and will emit a warning until it is removed in a future release, at which point it will raise an error. -1121a1179,1182 -> ### `bdr.create_commit_scope` -> -> `bdr.create_commit_scope` creates a rule for the given commit scope name and origin node group. If the rule is the same for all nodes in the EDB Postgres Distributed cluster, invoking this function once for the top-level node group is enough to fully define the commit scope. -> -1127c1188 -< bdr.add_commit_scope( ---- -> bdr.create_commit_scope( -1133a1195,1198 -> #### Note -> -> `bdr.create_commit_scope` replaces the deprecated [`bdr.add_commit_scope`](/pgd/5.6/reference/functions#bdradd_commit_scope) function. Unlike `add_commit_scope`, it does not silently overwrite existing commit scopes when the same name is used. Instead, an error is reported. -> -1147c1212 -< #### Note ---- -> ### `bdr.drop_commit_scope` -1149,1156d1213 -< When using `bdr.add_commit_scope`, if a new commit scope is added that has the -< same name as a commit scope on any group, then the commit scope silently -< overwrites the commit scope but retains the original group the scope was -< associated with (if any). To modify a commit scope safely, use -< [`bdr.alter_commit_scope`](#bdralter_commit_scope). -< -< ### `bdr.remove_commit_scope` -< -1162c1219 -< bdr.remove_commit_scope( ---- -> bdr.drop_commit_scope( -1168c1225,1229 -< Removing a commit scope that's still used as default by a node group isn't allowed. ---- -> Dropping a commit scope that's still used as default by a node group isn't allowed. -> -> ### `bdr.remove_commit_scope` -> -> **Deprecated**. Use [`bdr.drop_commit_scope`](/pgd/5.6/reference/functions#bdrdrop_commit_scope) instead. Previously, this function was used to remove a commit scope from a node group. It's now deprecated and will emit a warning until it is removed in a future release, at which point it will raise an error. -diff -r 5/reference/index.json 5.6/reference/index.json -2,351c2,371 -< "bdrcamo_decision_journal": "/pgd/latest/reference/catalogs-visible#bdrcamo_decision_journal", -< "bdrcommit_scopes": "/pgd/latest/reference/catalogs-visible#bdrcommit_scopes", -< "bdrconflict_history": "/pgd/latest/reference/catalogs-visible#bdrconflict_history", -< "bdrconflict_history_summary": "/pgd/latest/reference/catalogs-visible#bdrconflict_history_summary", -< "bdrconsensus_kv_data": "/pgd/latest/reference/catalogs-visible#bdrconsensus_kv_data", -< "bdrcrdt_handlers": "/pgd/latest/reference/catalogs-visible#bdrcrdt_handlers", -< "bdrddl_replication": "/pgd/latest/reference/pgd-settings#bdrddl_replication", -< "bdrdepend": "/pgd/latest/reference/catalogs-visible#bdrdepend", -< "bdrglobal_consensus_journal": "/pgd/latest/reference/catalogs-visible#bdrglobal_consensus_journal", -< "bdrglobal_consensus_journal_details": "/pgd/latest/reference/catalogs-visible#bdrglobal_consensus_journal_details", -< "bdrglobal_consensus_response_journal": "/pgd/latest/reference/catalogs-visible#bdrglobal_consensus_response_journal", -< "bdrglobal_lock": "/pgd/latest/reference/catalogs-visible#bdrglobal_lock", -< "bdrglobal_locks": "/pgd/latest/reference/catalogs-visible#bdrglobal_locks", -< "bdrgroup_camo_details": "/pgd/latest/reference/catalogs-visible#bdrgroup_camo_details", -< "bdrgroup_raft_details": "/pgd/latest/reference/catalogs-visible#bdrgroup_raft_details", -< "bdrgroup_replslots_details": "/pgd/latest/reference/catalogs-visible#bdrgroup_replslots_details", -< "bdrgroup_subscription_summary": "/pgd/latest/reference/catalogs-visible#bdrgroup_subscription_summary", -< "bdrgroup_versions_details": "/pgd/latest/reference/catalogs-visible#bdrgroup_versions_details", -< "bdrlocal_consensus_snapshot": "/pgd/latest/reference/catalogs-visible#bdrlocal_consensus_snapshot", -< "bdrlocal_consensus_state": "/pgd/latest/reference/catalogs-visible#bdrlocal_consensus_state", -< "bdrlocal_node": "/pgd/latest/reference/catalogs-visible#bdrlocal_node", -< "bdrlocal_node_summary": "/pgd/latest/reference/catalogs-visible#bdrlocal_node_summary", -< "bdrlocal_sync_status": "/pgd/latest/reference/catalogs-visible#bdrlocal_sync_status", -< "bdrnode": "/pgd/latest/reference/catalogs-visible#bdrnode", -< "bdrnode_catchup_info": "/pgd/latest/reference/catalogs-visible#bdrnode_catchup_info", -< "bdrnode_catchup_info_details": "/pgd/latest/reference/catalogs-visible#bdrnode_catchup_info_details", -< "bdrnode_conflict_resolvers": "/pgd/latest/reference/catalogs-visible#bdrnode_conflict_resolvers", -< "bdrnode_group": "/pgd/latest/reference/catalogs-visible#bdrnode_group", -< "bdrnode_group_replication_sets": "/pgd/latest/reference/catalogs-visible#bdrnode_group_replication_sets", -< "bdrnode_group_summary": "/pgd/latest/reference/catalogs-visible#bdrnode_group_summary", -< "bdrnode_local_info": "/pgd/latest/reference/catalogs-visible#bdrnode_local_info", -< "bdrnode_log_config": "/pgd/latest/reference/catalogs-visible#bdrnode_log_config", -< "bdrnode_peer_progress": "/pgd/latest/reference/catalogs-visible#bdrnode_peer_progress", -< "bdrnode_replication_rates": "/pgd/latest/reference/catalogs-visible#bdrnode_replication_rates", -< "bdrnode_slots": "/pgd/latest/reference/catalogs-visible#bdrnode_slots", -< "bdrnode_summary": "/pgd/latest/reference/catalogs-visible#bdrnode_summary", -< "bdrqueue": "/pgd/latest/reference/catalogs-visible#bdrqueue", -< "bdrreplication_set": "/pgd/latest/reference/catalogs-visible#bdrreplication_set", -< "bdrreplication_set_table": "/pgd/latest/reference/catalogs-visible#bdrreplication_set_table", -< "bdrreplication_set_ddl": "/pgd/latest/reference/catalogs-visible#bdrreplication_set_ddl", -< "bdrreplication_sets": "/pgd/latest/reference/catalogs-visible#bdrreplication_sets", -< "bdrschema_changes": "/pgd/latest/reference/catalogs-visible#bdrschema_changes", -< "bdrsequence_alloc": "/pgd/latest/reference/catalogs-visible#bdrsequence_alloc", -< "bdrsequences": "/pgd/latest/reference/catalogs-visible#bdrsequences", -< "bdrstat_activity": "/pgd/latest/reference/catalogs-visible#bdrstat_activity", -< "bdrstat_relation": "/pgd/latest/reference/catalogs-visible#bdrstat_relation", -< "bdrstat_subscription": "/pgd/latest/reference/catalogs-visible#bdrstat_subscription", -< "bdrsubscription": "/pgd/latest/reference/catalogs-visible#bdrsubscription", -< "bdrsubscription_summary": "/pgd/latest/reference/catalogs-visible#bdrsubscription_summary", -< "bdrtables": "/pgd/latest/reference/catalogs-visible#bdrtables", -< "bdrtaskmgr_work_queue": "/pgd/latest/reference/catalogs-visible#bdrtaskmgr_work_queue", -< "bdrtaskmgr_workitem_status": "/pgd/latest/reference/catalogs-visible#bdrtaskmgr_workitem_status", -< "bdrtaskmgr_local_work_queue": "/pgd/latest/reference/catalogs-visible#bdrtaskmgr_local_work_queue", -< "bdrtaskmgr_local_workitem_status": "/pgd/latest/reference/catalogs-visible#bdrtaskmgr_local_workitem_status", -< "bdrtrigger": "/pgd/latest/reference/catalogs-visible#bdrtrigger", -< "bdrtriggers": "/pgd/latest/reference/catalogs-visible#bdrtriggers", -< "bdrworkers": "/pgd/latest/reference/catalogs-visible#bdrworkers", -< "bdrwriters": "/pgd/latest/reference/catalogs-visible#bdrwriters", -< "bdrworker_tasks": "/pgd/latest/reference/catalogs-visible#bdrworker_tasks", -< "bdrbdr_version": "/pgd/latest/reference/functions#bdrbdr_version", -< "bdrbdr_version_num": "/pgd/latest/reference/functions#bdrbdr_version_num", -< "bdrget_relation_stats": "/pgd/latest/reference/functions#bdrget_relation_stats", -< "bdrget_subscription_stats": "/pgd/latest/reference/functions#bdrget_subscription_stats", -< "bdrlocal_node_id": "/pgd/latest/reference/functions#bdrlocal_node_id", -< "bdrlast_committed_lsn": "/pgd/latest/reference/functions#bdrlast_committed_lsn", -< "transaction_id": "/pgd/latest/reference/functions#transaction_id", -< "bdris_node_connected": "/pgd/latest/reference/functions#bdris_node_connected", -< "bdris_node_ready": "/pgd/latest/reference/functions#bdris_node_ready", -< "bdrconsensus_disable": "/pgd/latest/reference/functions#bdrconsensus_disable", -< "bdrconsensus_enable": "/pgd/latest/reference/functions#bdrconsensus_enable", -< "bdrconsensus_proto_version": "/pgd/latest/reference/functions#bdrconsensus_proto_version", -< "bdrconsensus_snapshot_export": "/pgd/latest/reference/functions#bdrconsensus_snapshot_export", -< "bdrconsensus_snapshot_import": "/pgd/latest/reference/functions#bdrconsensus_snapshot_import", -< "bdrconsensus_snapshot_verify": "/pgd/latest/reference/functions#bdrconsensus_snapshot_verify", -< "bdrget_consensus_status": "/pgd/latest/reference/functions#bdrget_consensus_status", -< "bdrget_raft_status": "/pgd/latest/reference/functions#bdrget_raft_status", -< "bdrraft_leadership_transfer": "/pgd/latest/reference/functions#bdrraft_leadership_transfer", -< "bdrwait_slot_confirm_lsn": "/pgd/latest/reference/functions#bdrwait_slot_confirm_lsn", -< "bdrwait_for_apply_queue": "/pgd/latest/reference/functions#bdrwait_for_apply_queue", -< "bdrget_node_sub_receive_lsn": "/pgd/latest/reference/functions#bdrget_node_sub_receive_lsn", -< "bdrget_node_sub_apply_lsn": "/pgd/latest/reference/functions#bdrget_node_sub_apply_lsn", -< "bdrreplicate_ddl_command": "/pgd/latest/reference/functions#bdrreplicate_ddl_command", -< "bdrrun_on_all_nodes": "/pgd/latest/reference/functions#bdrrun_on_all_nodes", -< "bdrrun_on_nodes": "/pgd/latest/reference/functions#bdrrun_on_nodes", -< "bdrrun_on_group": "/pgd/latest/reference/functions#bdrrun_on_group", -< "bdrglobal_lock_table": "/pgd/latest/reference/functions#bdrglobal_lock_table", -< "bdrwait_for_xid_progress": "/pgd/latest/reference/functions#bdrwait_for_xid_progress", -< "bdrlocal_group_slot_name": "/pgd/latest/reference/functions#bdrlocal_group_slot_name", -< "bdrnode_group_type": "/pgd/latest/reference/functions#bdrnode_group_type", -< "bdralter_node_kind": "/pgd/latest/reference/functions#bdralter_node_kind", -< "bdralter_subscription_skip_changes_upto": "/pgd/latest/reference/functions#bdralter_subscription_skip_changes_upto", -< "bdrglobal_advisory_lock": "/pgd/latest/reference/functions#bdrglobal_advisory_lock", -< "bdrglobal_advisory_unlock": "/pgd/latest/reference/functions#bdrglobal_advisory_unlock", -< "bdrmonitor_group_versions": "/pgd/latest/reference/functions#bdrmonitor_group_versions", -< "bdrmonitor_group_raft": "/pgd/latest/reference/functions#bdrmonitor_group_raft", -< "bdrmonitor_local_replslots": "/pgd/latest/reference/functions#bdrmonitor_local_replslots", -< "bdrwal_sender_stats": "/pgd/latest/reference/functions#bdrwal_sender_stats", -< "bdrget_decoding_worker_stat": "/pgd/latest/reference/functions#bdrget_decoding_worker_stat", -< "bdrlag_control": "/pgd/latest/reference/functions#bdrlag_control", -< "bdris_camo_partner_connected": "/pgd/latest/reference/functions#bdris_camo_partner_connected", -< "bdris_camo_partner_ready": "/pgd/latest/reference/functions#bdris_camo_partner_ready", -< "bdrget_configured_camo_partner": "/pgd/latest/reference/functions#bdrget_configured_camo_partner", -< "bdrwait_for_camo_partner_queue": "/pgd/latest/reference/functions#bdrwait_for_camo_partner_queue", -< "bdrcamo_transactions_resolved": "/pgd/latest/reference/functions#bdrcamo_transactions_resolved", -< "bdrlogical_transaction_status": "/pgd/latest/reference/functions#bdrlogical_transaction_status", -< "bdradd_commit_scope": "/pgd/latest/reference/functions#bdradd_commit_scope", -< "bdralter_commit_scope": "/pgd/latest/reference/functions#bdralter_commit_scope", -< "bdrremove_commit_scope": "/pgd/latest/reference/functions#bdrremove_commit_scope", -< "bdrdefault_conflict_detection": "/pgd/latest/reference/pgd-settings#bdrdefault_conflict_detection", -< "bdrdefault_sequence_kind": "/pgd/latest/reference/pgd-settings#bdrdefault_sequence_kind", -< "bdrdefault_replica_identity": "/pgd/latest/reference/pgd-settings#bdrdefault_replica_identity", -< "bdrrole_replication": "/pgd/latest/reference/pgd-settings#bdrrole_replication", -< "bdrddl_locking": "/pgd/latest/reference/pgd-settings#bdrddl_locking", -< "bdrtruncate_locking": "/pgd/latest/reference/pgd-settings#bdrtruncate_locking", -< "bdrglobal_lock_max_locks": "/pgd/latest/reference/pgd-settings#bdrglobal_lock_max_locks", -< "bdrglobal_lock_timeout": "/pgd/latest/reference/pgd-settings#bdrglobal_lock_timeout", -< "bdrglobal_lock_statement_timeout": "/pgd/latest/reference/pgd-settings#bdrglobal_lock_statement_timeout", -< "bdrglobal_lock_idle_timeout": "/pgd/latest/reference/pgd-settings#bdrglobal_lock_idle_timeout", -< "bdrlock_table_locking": "/pgd/latest/reference/pgd-settings#bdrlock_table_locking", -< "bdrpredictive_checks": "/pgd/latest/reference/pgd-settings#bdrpredictive_checks", -< "bdrreplay_progress_frequency": "/pgd/latest/reference/pgd-settings#bdrreplay_progress_frequency", -< "bdrstandby_slot_names": "/pgd/latest/reference/pgd-settings#bdrstandby_slot_names", -< "bdrwriters_per_subscription": "/pgd/latest/reference/pgd-settings#bdrwriters_per_subscription", -< "bdrmax_writers_per_subscription": "/pgd/latest/reference/pgd-settings#bdrmax_writers_per_subscription", -< "bdrxact_replication": "/pgd/latest/reference/pgd-settings#bdrxact_replication", -< "bdrpermit_unsafe_commands": "/pgd/latest/reference/pgd-settings#bdrpermit_unsafe_commands", -< "bdrbatch_inserts": "/pgd/latest/reference/pgd-settings#bdrbatch_inserts", -< "bdrmaximum_clock_skew": "/pgd/latest/reference/pgd-settings#bdrmaximum_clock_skew", -< "bdrmaximum_clock_skew_action": "/pgd/latest/reference/pgd-settings#bdrmaximum_clock_skew_action", -< "bdraccept_connections": "/pgd/latest/reference/pgd-settings#bdraccept_connections", -< "bdrstandby_slots_min_confirmed": "/pgd/latest/reference/pgd-settings#bdrstandby_slots_min_confirmed", -< "bdrwriter_input_queue_size": "/pgd/latest/reference/pgd-settings#bdrwriter_input_queue_size", -< "bdrwriter_output_queue_size": "/pgd/latest/reference/pgd-settings#bdrwriter_output_queue_size", -< "bdrmin_worker_backoff_delay": "/pgd/latest/reference/pgd-settings#bdrmin_worker_backoff_delay", -< "bdrcrdt_raw_value": "/pgd/latest/reference/pgd-settings#bdrcrdt_raw_value", -< "bdrcommit_scope": "/pgd/latest/reference/pgd-settings#bdrcommit_scope", -< "bdrcamo_local_mode_delay": "/pgd/latest/reference/pgd-settings#bdrcamo_local_mode_delay", -< "bdrcamo_enable_client_warnings": "/pgd/latest/reference/pgd-settings#bdrcamo_enable_client_warnings", -< "bdrdefault_streaming_mode": "/pgd/latest/reference/pgd-settings#bdrdefault_streaming_mode", -< "bdrlag_control_max_commit_delay": "/pgd/latest/reference/pgd-settings#bdrlag_control_max_commit_delay", -< "bdrlag_control_max_lag_size": "/pgd/latest/reference/pgd-settings#bdrlag_control_max_lag_size", -< "bdrlag_control_max_lag_time": "/pgd/latest/reference/pgd-settings#bdrlag_control_max_lag_time", -< "bdrlag_control_min_conforming_nodes": "/pgd/latest/reference/pgd-settings#bdrlag_control_min_conforming_nodes", -< "bdrlag_control_commit_delay_adjust": "/pgd/latest/reference/pgd-settings#bdrlag_control_commit_delay_adjust", -< "bdrlag_control_sample_interval": "/pgd/latest/reference/pgd-settings#bdrlag_control_sample_interval", -< "bdrlag_control_commit_delay_start": "/pgd/latest/reference/pgd-settings#bdrlag_control_commit_delay_start", -< "bdrtimestamp_snapshot_keep": "/pgd/latest/reference/pgd-settings#bdrtimestamp_snapshot_keep", -< "bdrdebug_level": "/pgd/latest/reference/pgd-settings#bdrdebug_level", -< "bdrtrace_level": "/pgd/latest/reference/pgd-settings#bdrtrace_level", -< "bdrtrack_subscription_apply": "/pgd/latest/reference/pgd-settings#bdrtrack_subscription_apply", -< "bdrtrack_relation_apply": "/pgd/latest/reference/pgd-settings#bdrtrack_relation_apply", -< "bdrtrack_apply_lock_timing": "/pgd/latest/reference/pgd-settings#bdrtrack_apply_lock_timing", -< "bdrenable_wal_decoder": "/pgd/latest/reference/pgd-settings#bdrenable_wal_decoder", -< "bdrreceive_lcr": "/pgd/latest/reference/pgd-settings#bdrreceive_lcr", -< "bdrlcr_cleanup_interval": "/pgd/latest/reference/pgd-settings#bdrlcr_cleanup_interval", -< "bdrglobal_connection_timeout": "/pgd/latest/reference/pgd-settings#bdrglobal_connection_timeout", -< "bdrglobal_keepalives": "/pgd/latest/reference/pgd-settings#bdrglobal_keepalives", -< "bdrglobal_keepalives_idle": "/pgd/latest/reference/pgd-settings#bdrglobal_keepalives_idle", -< "bdrglobal_keepalives_interval": "/pgd/latest/reference/pgd-settings#bdrglobal_keepalives_interval", -< "bdrglobal_keepalives_count": "/pgd/latest/reference/pgd-settings#bdrglobal_keepalives_count", -< "bdrglobal_tcp_user_timeout": "/pgd/latest/reference/pgd-settings#bdrglobal_tcp_user_timeout", -< "bdrraft_global_election_timeout": "/pgd/latest/reference/pgd-settings#bdrraft_global_election_timeout", -< "bdrraft_group_election_timeout": "/pgd/latest/reference/pgd-settings#bdrraft_group_election_timeout", -< "bdrraft_response_timeout": "/pgd/latest/reference/pgd-settings#bdrraft_response_timeout", -< "bdrraft_keep_min_entries": "/pgd/latest/reference/pgd-settings#bdrraft_keep_min_entries", -< "bdrraft_log_min_apply_duration": "/pgd/latest/reference/pgd-settings#bdrraft_log_min_apply_duration", -< "bdrraft_log_min_message_duration": "/pgd/latest/reference/pgd-settings#bdrraft_log_min_message_duration", -< "bdrraft_group_max_connections": "/pgd/latest/reference/pgd-settings#bdrraft_group_max_connections", -< "bdrbackwards_compatibility": "/pgd/latest/reference/pgd-settings#bdrbackwards_compatibility", -< "bdrtrack_replication_estimates": "/pgd/latest/reference/pgd-settings#bdrtrack_replication_estimates", -< "bdrlag_tracker_apply_rate_weight": "/pgd/latest/reference/pgd-settings#bdrlag_tracker_apply_rate_weight", -< "bdrenable_auto_sync_reconcile": "/pgd/latest/reference/pgd-settings#bdrenable_auto_sync_reconcile", -< "list-of-node-states": "/pgd/latest/reference/nodes#list-of-node-states", -< "node-management-commands": "/pgd/latest/reference/nodes#node-management-commands", -< "bdr_init_physical": "/pgd/latest/reference/nodes#bdr_init_physical", -< "bdralter_node_group_option": "/pgd/latest/reference/nodes-management-interfaces#bdralter_node_group_option", -< "bdralter_node_interface": "/pgd/latest/reference/nodes-management-interfaces#bdralter_node_interface", -< "bdralter_node_option": "/pgd/latest/reference/nodes-management-interfaces#bdralter_node_option", -< "bdralter_subscription_enable": "/pgd/latest/reference/nodes-management-interfaces#bdralter_subscription_enable", -< "bdralter_subscription_disable": "/pgd/latest/reference/nodes-management-interfaces#bdralter_subscription_disable", -< "bdrcreate_node": "/pgd/latest/reference/nodes-management-interfaces#bdrcreate_node", -< "bdrcreate_node_group": "/pgd/latest/reference/nodes-management-interfaces#bdrcreate_node_group", -< "bdrjoin_node_group": "/pgd/latest/reference/nodes-management-interfaces#bdrjoin_node_group", -< "bdrpart_node": "/pgd/latest/reference/nodes-management-interfaces#bdrpart_node", -< "bdrpromote_node": "/pgd/latest/reference/nodes-management-interfaces#bdrpromote_node", -< "bdrswitch_node_group": "/pgd/latest/reference/nodes-management-interfaces#bdrswitch_node_group", -< "bdrwait_for_join_completion": "/pgd/latest/reference/nodes-management-interfaces#bdrwait_for_join_completion", -< "bdralter_node_group_config": "/pgd/latest/reference/nodes-management-interfaces#bdralter_node_group_config", -< "bdrdrop_node_group": "/pgd/latest/reference/nodes-management-interfaces#bdrdrop_node_group", -< "bdrcreate_proxy": "/pgd/latest/reference/routing#bdrcreate_proxy", -< "bdralter_proxy_option": "/pgd/latest/reference/routing#bdralter_proxy_option", -< "bdrdrop_proxy": "/pgd/latest/reference/routing#bdrdrop_proxy", -< "bdrrouting_leadership_transfer": "/pgd/latest/reference/routing#bdrrouting_leadership_transfer", -< "cs.commit-scope-syntax": "/pgd/latest/reference/commit-scopes#commit-scope-syntax", -< "cs.commit-scope-groups": "/pgd/latest/reference/commit-scopes#commit-scope-groups", -< "cs.any": "/pgd/latest/reference/commit-scopes#any", -< "cs.any-not": "/pgd/latest/reference/commit-scopes#any-not", -< "cs.majority": "/pgd/latest/reference/commit-scopes#majority", -< "cs.majority-not": "/pgd/latest/reference/commit-scopes#majority-not", -< "cs.all": "/pgd/latest/reference/commit-scopes#all", -< "cs.all-not": "/pgd/latest/reference/commit-scopes#all-not", -< "cs.confirmation-level": "/pgd/latest/reference/commit-scopes#confirmation-level", -< "cs.on-received": "/pgd/latest/reference/commit-scopes#on-received", -< "cs.on-replicated": "/pgd/latest/reference/commit-scopes#on-replicated", -< "cs.on-durable": "/pgd/latest/reference/commit-scopes#on-durable", -< "cs.on-visible": "/pgd/latest/reference/commit-scopes#on-visible", -< "cs.commit-scope-kinds": "/pgd/latest/reference/commit-scopes#commit-scope-kinds", -< "cs.group-commit": "/pgd/latest/reference/commit-scopes#group-commit", -< "cs.group-commit-parameters": "/pgd/latest/reference/commit-scopes#group-commit-parameters", -< "cs.abort-on-parameters": "/pgd/latest/reference/commit-scopes#abort-on-parameters", -< "cs.transaction_tracking-settings": "/pgd/latest/reference/commit-scopes#transaction_tracking-settings", -< "cs.conflict_resolution-settings": "/pgd/latest/reference/commit-scopes#conflict_resolution-settings", -< "cs.commit_decision-settings": "/pgd/latest/reference/commit-scopes#commit_decision-settings", -< "cs.camo": "/pgd/latest/reference/commit-scopes#camo", -< "cs.degrade-on-parameters": "/pgd/latest/reference/commit-scopes#degrade-on-parameters", -< "cs.lag-control": "/pgd/latest/reference/commit-scopes#lag-control", -< "cs.lag-control-parameters": "/pgd/latest/reference/commit-scopes#lag-control-parameters", -< "cs.synchronous_commit": "/pgd/latest/reference/commit-scopes#synchronous_commit", -< "conflict-detection": "/pgd/latest/reference/conflicts#conflict-detection", -< "list-of-conflict-types": "/pgd/latest/reference/conflicts#list-of-conflict-types", -< "conflict-resolution": "/pgd/latest/reference/conflicts#conflict-resolution", -< "list-of-conflict-resolvers": "/pgd/latest/reference/conflicts#list-of-conflict-resolvers", -< "default-conflict-resolvers": "/pgd/latest/reference/conflicts#default-conflict-resolvers", -< "list-of-conflict-resolutions": "/pgd/latest/reference/conflicts#list-of-conflict-resolutions", -< "conflict-logging": "/pgd/latest/reference/conflicts#conflict-logging", -< "bdralter_table_conflict_detection": "/pgd/latest/reference/conflict_functions#bdralter_table_conflict_detection", -< "bdralter_node_set_conflict_resolver": "/pgd/latest/reference/conflict_functions#bdralter_node_set_conflict_resolver", -< "bdralter_node_set_log_config": "/pgd/latest/reference/conflict_functions#bdralter_node_set_log_config", -< "bdrcreate_replication_set": "/pgd/latest/reference/repsets-management#bdrcreate_replication_set", -< "bdralter_replication_set": "/pgd/latest/reference/repsets-management#bdralter_replication_set", -< "bdrdrop_replication_set": "/pgd/latest/reference/repsets-management#bdrdrop_replication_set", -< "bdralter_node_replication_sets": "/pgd/latest/reference/repsets-management#bdralter_node_replication_sets", -< "bdrreplication_set_add_table": "/pgd/latest/reference/repsets-membership#bdrreplication_set_add_table", -< "bdrreplication_set_remove_table": "/pgd/latest/reference/repsets-membership#bdrreplication_set_remove_table", -< "bdrreplication_set_add_ddl_filter": "/pgd/latest/reference/repsets-ddl-filtering#bdrreplication_set_add_ddl_filter", -< "bdrreplication_set_remove_ddl_filter": "/pgd/latest/reference/repsets-ddl-filtering#bdrreplication_set_remove_ddl_filter", -< "pgd_bench": "/pgd/latest/reference/testingandtuning#pgd_bench", -< "bdralter_sequence_set_kind": "/pgd/latest/reference/sequences#bdralter_sequence_set_kind", -< "bdrextract_timestamp_from_snowflakeid": "/pgd/latest/reference/sequences#bdrextract_timestamp_from_snowflakeid", -< "bdrextract_nodeid_from_snowflakeid": "/pgd/latest/reference/sequences#bdrextract_nodeid_from_snowflakeid", -< "bdrextract_localseqid_from_snowflakeid": "/pgd/latest/reference/sequences#bdrextract_localseqid_from_snowflakeid", -< "bdrtimestamp_to_snowflakeid": "/pgd/latest/reference/sequences#bdrtimestamp_to_snowflakeid", -< "bdrextract_timestamp_from_timeshard": "/pgd/latest/reference/sequences#bdrextract_timestamp_from_timeshard", -< "bdrextract_nodeid_from_timeshard": "/pgd/latest/reference/sequences#bdrextract_nodeid_from_timeshard", -< "bdrextract_localseqid_from_timeshard": "/pgd/latest/reference/sequences#bdrextract_localseqid_from_timeshard", -< "bdrtimestamp_to_timeshard": "/pgd/latest/reference/sequences#bdrtimestamp_to_timeshard", -< "bdrgen_ksuuid_v2": "/pgd/latest/reference/sequences#bdrgen_ksuuid_v2", -< "bdrksuuid_v2_cmp": "/pgd/latest/reference/sequences#bdrksuuid_v2_cmp", -< "bdrextract_timestamp_from_ksuuid_v2": "/pgd/latest/reference/sequences#bdrextract_timestamp_from_ksuuid_v2", -< "bdrgen_ksuuid": "/pgd/latest/reference/sequences#bdrgen_ksuuid", -< "bdruuid_v1_cmp": "/pgd/latest/reference/sequences#bdruuid_v1_cmp", -< "bdrextract_timestamp_from_ksuuid": "/pgd/latest/reference/sequences#bdrextract_timestamp_from_ksuuid", -< "bdrautopartition": "/pgd/latest/reference/autopartition#bdrautopartition", -< "bdrdrop_autopartition": "/pgd/latest/reference/autopartition#bdrdrop_autopartition", -< "bdrautopartition_wait_for_partitions": "/pgd/latest/reference/autopartition#bdrautopartition_wait_for_partitions", -< "bdrautopartition_wait_for_partitions_on_all_nodes": "/pgd/latest/reference/autopartition#bdrautopartition_wait_for_partitions_on_all_nodes", -< "bdrautopartition_find_partition": "/pgd/latest/reference/autopartition#bdrautopartition_find_partition", -< "bdrautopartition_enable": "/pgd/latest/reference/autopartition#bdrautopartition_enable", -< "bdrautopartition_disable": "/pgd/latest/reference/autopartition#bdrautopartition_disable", -< "internal-functions": "/pgd/latest/reference/autopartition#internal-functions", -< "bdrautopartition_create_partition": "/pgd/latest/reference/autopartition#bdrautopartition_create_partition", -< "bdrautopartition_drop_partition": "/pgd/latest/reference/autopartition#bdrautopartition_drop_partition", -< "bdrcreate_conflict_trigger": "/pgd/latest/reference/streamtriggers/interfaces#bdrcreate_conflict_trigger", -< "bdrcreate_transform_trigger": "/pgd/latest/reference/streamtriggers/interfaces#bdrcreate_transform_trigger", -< "bdrdrop_trigger": "/pgd/latest/reference/streamtriggers/interfaces#bdrdrop_trigger", -< "bdrtrigger_get_row": "/pgd/latest/reference/streamtriggers/rowfunctions#bdrtrigger_get_row", -< "bdrtrigger_get_committs": "/pgd/latest/reference/streamtriggers/rowfunctions#bdrtrigger_get_committs", -< "bdrtrigger_get_xid": "/pgd/latest/reference/streamtriggers/rowfunctions#bdrtrigger_get_xid", -< "bdrtrigger_get_type": "/pgd/latest/reference/streamtriggers/rowfunctions#bdrtrigger_get_type", -< "bdrtrigger_get_conflict_type": "/pgd/latest/reference/streamtriggers/rowfunctions#bdrtrigger_get_conflict_type", -< "bdrtrigger_get_origin_node_id": "/pgd/latest/reference/streamtriggers/rowfunctions#bdrtrigger_get_origin_node_id", -< "bdrri_fkey_on_del_trigger": "/pgd/latest/reference/streamtriggers/rowfunctions#bdrri_fkey_on_del_trigger", -< "tg_name": "/pgd/latest/reference/streamtriggers/rowvariables#tg_name", -< "tg_when": "/pgd/latest/reference/streamtriggers/rowvariables#tg_when", -< "tg_level": "/pgd/latest/reference/streamtriggers/rowvariables#tg_level", -< "tg_op": "/pgd/latest/reference/streamtriggers/rowvariables#tg_op", -< "tg_relid": "/pgd/latest/reference/streamtriggers/rowvariables#tg_relid", -< "tg_table_name": "/pgd/latest/reference/streamtriggers/rowvariables#tg_table_name", -< "tg_table_schema": "/pgd/latest/reference/streamtriggers/rowvariables#tg_table_schema", -< "tg_nargs": "/pgd/latest/reference/streamtriggers/rowvariables#tg_nargs", -< "tg_argv": "/pgd/latest/reference/streamtriggers/rowvariables#tg_argv", -< "bdrautopartition_partitions": "/pgd/latest/reference/catalogs-internal#bdrautopartition_partitions", -< "bdrautopartition_rules": "/pgd/latest/reference/catalogs-internal#bdrautopartition_rules", -< "bdrddl_epoch": "/pgd/latest/reference/catalogs-internal#bdrddl_epoch", -< "bdrevent_history": "/pgd/latest/reference/catalogs-internal#bdrevent_history", -< "bdrevent_summary": "/pgd/latest/reference/catalogs-internal#bdrevent_summary", -< "bdrnode_config": "/pgd/latest/reference/catalogs-internal#bdrnode_config", -< "bdrnode_config_summary": "/pgd/latest/reference/catalogs-internal#bdrnode_config_summary", -< "bdrnode_group_config": "/pgd/latest/reference/catalogs-internal#bdrnode_group_config", -< "bdrnode_group_routing_config_summary": "/pgd/latest/reference/catalogs-internal#bdrnode_group_routing_config_summary", -< "bdrnode_group_routing_info": "/pgd/latest/reference/catalogs-internal#bdrnode_group_routing_info", -< "bdrnode_group_routing_summary": "/pgd/latest/reference/catalogs-internal#bdrnode_group_routing_summary", -< "bdrnode_routing_config_summary": "/pgd/latest/reference/catalogs-internal#bdrnode_routing_config_summary", -< "bdrproxy_config": "/pgd/latest/reference/catalogs-internal#bdrproxy_config", -< "bdrproxy_config_summary": "/pgd/latest/reference/catalogs-internal#bdrproxy_config_summary", -< "bdrsequence_kind": "/pgd/latest/reference/catalogs-internal#bdrsequence_kind", -< "bdrbdr_get_commit_decisions": "/pgd/latest/reference/functions-internal#bdrbdr_get_commit_decisions", -< "bdrbdr_track_commit_decision": "/pgd/latest/reference/functions-internal#bdrbdr_track_commit_decision", -< "bdrconsensus_kv_fetch": "/pgd/latest/reference/functions-internal#bdrconsensus_kv_fetch", -< "bdrconsensus_kv_store": "/pgd/latest/reference/functions-internal#bdrconsensus_kv_store", -< "bdrdecode_message_payload": "/pgd/latest/reference/functions-internal#bdrdecode_message_payload", -< "bdrdecode_message_response_payload": "/pgd/latest/reference/functions-internal#bdrdecode_message_response_payload", -< "bdrdifference_fix_origin_create": "/pgd/latest/reference/functions-internal#bdrdifference_fix_origin_create", -< "bdrdifference_fix_session_reset": "/pgd/latest/reference/functions-internal#bdrdifference_fix_session_reset", -< "bdrdifference_fix_session_setup": "/pgd/latest/reference/functions-internal#bdrdifference_fix_session_setup", -< "bdrdifference_fix_xact_set_avoid_conflict": "/pgd/latest/reference/functions-internal#bdrdifference_fix_xact_set_avoid_conflict", -< "bdrdrop_node": "/pgd/latest/reference/functions-internal#bdrdrop_node", -< "bdrget_global_locks": "/pgd/latest/reference/functions-internal#bdrget_global_locks", -< "bdrget_node_conflict_resolvers": "/pgd/latest/reference/functions-internal#bdrget_node_conflict_resolvers", -< "bdrget_slot_flush_timestamp": "/pgd/latest/reference/functions-internal#bdrget_slot_flush_timestamp", -< "bdrinternal_alter_sequence_set_kind": "/pgd/latest/reference/functions-internal#bdrinternal_alter_sequence_set_kind", -< "bdrinternal_replication_set_add_table": "/pgd/latest/reference/functions-internal#bdrinternal_replication_set_add_table", -< "bdrinternal_replication_set_remove_table": "/pgd/latest/reference/functions-internal#bdrinternal_replication_set_remove_table", -< "bdrinternal_submit_join_request": "/pgd/latest/reference/functions-internal#bdrinternal_submit_join_request", -< "bdrisolation_test_session_is_blocked": "/pgd/latest/reference/functions-internal#bdrisolation_test_session_is_blocked", -< "bdrlocal_node_info": "/pgd/latest/reference/functions-internal#bdrlocal_node_info", -< "bdrmsgb_connect": "/pgd/latest/reference/functions-internal#bdrmsgb_connect", -< "bdrmsgb_deliver_message": "/pgd/latest/reference/functions-internal#bdrmsgb_deliver_message", -< "bdrnode_catchup_state_name": "/pgd/latest/reference/functions-internal#bdrnode_catchup_state_name", -< "bdrnode_kind_name": "/pgd/latest/reference/functions-internal#bdrnode_kind_name", -< "bdrpeer_state_name": "/pgd/latest/reference/functions-internal#bdrpeer_state_name", -< "bdrpg_xact_origin": "/pgd/latest/reference/functions-internal#bdrpg_xact_origin", -< "bdrrequest_replay_progress_update": "/pgd/latest/reference/functions-internal#bdrrequest_replay_progress_update", -< "bdrreset_relation_stats": "/pgd/latest/reference/functions-internal#bdrreset_relation_stats", -< "bdrreset_subscription_stats": "/pgd/latest/reference/functions-internal#bdrreset_subscription_stats", -< "bdrresynchronize_table_from_node": "/pgd/latest/reference/functions-internal#bdrresynchronize_table_from_node", -< "bdrseq_currval": "/pgd/latest/reference/functions-internal#bdrseq_currval", -< "bdrseq_lastval": "/pgd/latest/reference/functions-internal#bdrseq_lastval", -< "bdrseq_nextval": "/pgd/latest/reference/functions-internal#bdrseq_nextval", -< "bdrshow_subscription_status": "/pgd/latest/reference/functions-internal#bdrshow_subscription_status", -< "bdrshow_workers": "/pgd/latest/reference/functions-internal#bdrshow_workers", -< "bdrshow_writers": "/pgd/latest/reference/functions-internal#bdrshow_writers", -< "bdrtaskmgr_set_leader": "/pgd/latest/reference/functions-internal#bdrtaskmgr_set_leader", -< "bdrtaskmgr_get_last_completed_workitem": "/pgd/latest/reference/functions-internal#bdrtaskmgr_get_last_completed_workitem", -< "bdrtaskmgr_work_queue_check_status": "/pgd/latest/reference/functions-internal#bdrtaskmgr_work_queue_check_status", -< "bdrpglogical_proto_version_ranges": "/pgd/latest/reference/functions-internal#bdrpglogical_proto_version_ranges", -< "bdrget_min_required_replication_slots": "/pgd/latest/reference/functions-internal#bdrget_min_required_replication_slots", -< "bdrget_min_required_worker_processes": "/pgd/latest/reference/functions-internal#bdrget_min_required_worker_processes", -< "bdrstat_get_activity": "/pgd/latest/reference/functions-internal#bdrstat_get_activity", -< "bdrworker_role_id_name": "/pgd/latest/reference/functions-internal#bdrworker_role_id_name", -< "bdrlag_history": "/pgd/latest/reference/functions-internal#bdrlag_history", -< "bdrget_raft_instance_by_nodegroup": "/pgd/latest/reference/functions-internal#bdrget_raft_instance_by_nodegroup", -< "bdrmonitor_camo_on_all_nodes": "/pgd/latest/reference/functions-internal#bdrmonitor_camo_on_all_nodes", -< "bdrmonitor_raft_details_on_all_nodes": "/pgd/latest/reference/functions-internal#bdrmonitor_raft_details_on_all_nodes", -< "bdrmonitor_replslots_details_on_all_nodes": "/pgd/latest/reference/functions-internal#bdrmonitor_replslots_details_on_all_nodes", -< "bdrmonitor_subscription_details_on_all_nodes": "/pgd/latest/reference/functions-internal#bdrmonitor_subscription_details_on_all_nodes", -< "bdrmonitor_version_details_on_all_nodes": "/pgd/latest/reference/functions-internal#bdrmonitor_version_details_on_all_nodes", -< "bdrnode_group_member_info": "/pgd/latest/reference/functions-internal#bdrnode_group_member_info", -< "bdrcolumn_timestamps_create": "/pgd/latest/reference/clcd#bdrcolumn_timestamps_create" -< } ---- -> "bdrcamo_decision_journal": "/pgd/5.6/reference/catalogs-visible#bdrcamo_decision_journal", -> "bdrcommit_scopes": "/pgd/5.6/reference/catalogs-visible#bdrcommit_scopes", -> "bdrconflict_history": "/pgd/5.6/reference/catalogs-visible#bdrconflict_history", -> "bdrconflict_history_summary": "/pgd/5.6/reference/catalogs-visible#bdrconflict_history_summary", -> "bdrconsensus_kv_data": "/pgd/5.6/reference/catalogs-visible#bdrconsensus_kv_data", -> "bdrcrdt_handlers": "/pgd/5.6/reference/catalogs-visible#bdrcrdt_handlers", -> "bdrddl_replication": "/pgd/5.6/reference/pgd-settings#bdrddl_replication", -> "bdrdepend": "/pgd/5.6/reference/catalogs-visible#bdrdepend", -> "bdrglobal_consensus_journal": "/pgd/5.6/reference/catalogs-visible#bdrglobal_consensus_journal", -> "bdrglobal_consensus_journal_details": "/pgd/5.6/reference/catalogs-visible#bdrglobal_consensus_journal_details", -> "bdrglobal_consensus_response_journal": "/pgd/5.6/reference/catalogs-visible#bdrglobal_consensus_response_journal", -> "bdrglobal_lock": "/pgd/5.6/reference/catalogs-visible#bdrglobal_lock", -> "bdrglobal_locks": "/pgd/5.6/reference/catalogs-visible#bdrglobal_locks", -> "bdrgroup_camo_details": "/pgd/5.6/reference/catalogs-visible#bdrgroup_camo_details", -> "bdrgroup_raft_details": "/pgd/5.6/reference/catalogs-visible#bdrgroup_raft_details", -> "bdrgroup_replslots_details": "/pgd/5.6/reference/catalogs-visible#bdrgroup_replslots_details", -> "bdrgroup_subscription_summary": "/pgd/5.6/reference/catalogs-visible#bdrgroup_subscription_summary", -> "bdrgroup_versions_details": "/pgd/5.6/reference/catalogs-visible#bdrgroup_versions_details", -> "bdrleader": "/pgd/5.6/reference/catalogs-visible#bdrleader", -> "bdrlocal_consensus_snapshot": "/pgd/5.6/reference/catalogs-visible#bdrlocal_consensus_snapshot", -> "bdrlocal_consensus_state": "/pgd/5.6/reference/catalogs-visible#bdrlocal_consensus_state", -> "bdrlocal_node": "/pgd/5.6/reference/catalogs-visible#bdrlocal_node", -> "bdrlocal_node_summary": "/pgd/5.6/reference/catalogs-visible#bdrlocal_node_summary", -> "bdrlocal_sync_status": "/pgd/5.6/reference/catalogs-visible#bdrlocal_sync_status", -> "bdrnode": "/pgd/5.6/reference/catalogs-visible#bdrnode", -> "bdrnode_catchup_info": "/pgd/5.6/reference/catalogs-visible#bdrnode_catchup_info", -> "bdrnode_catchup_info_details": "/pgd/5.6/reference/catalogs-visible#bdrnode_catchup_info_details", -> "bdrnode_conflict_resolvers": "/pgd/5.6/reference/catalogs-visible#bdrnode_conflict_resolvers", -> "bdrnode_group": "/pgd/5.6/reference/catalogs-visible#bdrnode_group", -> "bdrnode_group_replication_sets": "/pgd/5.6/reference/catalogs-visible#bdrnode_group_replication_sets", -> "bdrnode_group_summary": "/pgd/5.6/reference/catalogs-visible#bdrnode_group_summary", -> "bdrnode_local_info": "/pgd/5.6/reference/catalogs-visible#bdrnode_local_info", -> "bdrnode_log_config": "/pgd/5.6/reference/catalogs-visible#bdrnode_log_config", -> "bdrnode_peer_progress": "/pgd/5.6/reference/catalogs-visible#bdrnode_peer_progress", -> "bdrnode_replication_rates": "/pgd/5.6/reference/catalogs-visible#bdrnode_replication_rates", -> "bdrnode_slots": "/pgd/5.6/reference/catalogs-visible#bdrnode_slots", -> "bdrnode_summary": "/pgd/5.6/reference/catalogs-visible#bdrnode_summary", -> "bdrqueue": "/pgd/5.6/reference/catalogs-visible#bdrqueue", -> "bdrreplication_set": "/pgd/5.6/reference/catalogs-visible#bdrreplication_set", -> "bdrreplication_set_table": "/pgd/5.6/reference/catalogs-visible#bdrreplication_set_table", -> "bdrreplication_set_ddl": "/pgd/5.6/reference/catalogs-visible#bdrreplication_set_ddl", -> "bdrreplication_sets": "/pgd/5.6/reference/catalogs-visible#bdrreplication_sets", -> "bdrschema_changes": "/pgd/5.6/reference/catalogs-visible#bdrschema_changes", -> "bdrsequence_alloc": "/pgd/5.6/reference/catalogs-visible#bdrsequence_alloc", -> "bdrsequences": "/pgd/5.6/reference/catalogs-visible#bdrsequences", -> "bdrstat_activity": "/pgd/5.6/reference/catalogs-visible#bdrstat_activity", -> "bdrstat_commit_scope": "/pgd/5.6/reference/catalogs-visible#bdrstat_commit_scope", -> "bdrstat_commit_scope_state": "/pgd/5.6/reference/catalogs-visible#bdrstat_commit_scope_state", -> "bdrstat_raft_followers_state": "/pgd/5.6/reference/catalogs-visible#bdrstat_raft_followers_state", -> "bdrstat_raft_state": "/pgd/5.6/reference/catalogs-visible#bdrstat_raft_state", -> "bdrstat_receiver": "/pgd/5.6/reference/catalogs-visible#bdrstat_receiver", -> "bdrstat_relation": "/pgd/5.6/reference/catalogs-visible#bdrstat_relation", -> "bdrstat_routing_candidate_state": "/pgd/5.6/reference/catalogs-visible#bdrstat_routing_candidate_state", -> "bdrstat_routing_state": "/pgd/5.6/reference/catalogs-visible#bdrstat_routing_state", -> "bdrstat_subscription": "/pgd/5.6/reference/catalogs-visible#bdrstat_subscription", -> "bdrstat_worker": "/pgd/5.6/reference/catalogs-visible#bdrstat_worker", -> "bdrstat_writer": "/pgd/5.6/reference/catalogs-visible#bdrstat_writer", -> "bdrsubscription": "/pgd/5.6/reference/catalogs-visible#bdrsubscription", -> "bdrsubscription_summary": "/pgd/5.6/reference/catalogs-visible#bdrsubscription_summary", -> "bdrtables": "/pgd/5.6/reference/catalogs-visible#bdrtables", -> "bdrtaskmgr_work_queue": "/pgd/5.6/reference/catalogs-visible#bdrtaskmgr_work_queue", -> "bdrtaskmgr_workitem_status": "/pgd/5.6/reference/catalogs-visible#bdrtaskmgr_workitem_status", -> "bdrtaskmgr_local_work_queue": "/pgd/5.6/reference/catalogs-visible#bdrtaskmgr_local_work_queue", -> "bdrtaskmgr_local_workitem_status": "/pgd/5.6/reference/catalogs-visible#bdrtaskmgr_local_workitem_status", -> "bdrtrigger": "/pgd/5.6/reference/catalogs-visible#bdrtrigger", -> "bdrtriggers": "/pgd/5.6/reference/catalogs-visible#bdrtriggers", -> "bdrworkers": "/pgd/5.6/reference/catalogs-visible#bdrworkers", -> "bdrwriters": "/pgd/5.6/reference/catalogs-visible#bdrwriters", -> "bdrworker_tasks": "/pgd/5.6/reference/catalogs-visible#bdrworker_tasks", -> "bdrbdr_version": "/pgd/5.6/reference/functions#bdrbdr_version", -> "bdrbdr_version_num": "/pgd/5.6/reference/functions#bdrbdr_version_num", -> "bdrget_relation_stats": "/pgd/5.6/reference/functions#bdrget_relation_stats", -> "bdrget_subscription_stats": "/pgd/5.6/reference/functions#bdrget_subscription_stats", -> "bdrlocal_node_id": "/pgd/5.6/reference/functions#bdrlocal_node_id", -> "bdrlast_committed_lsn": "/pgd/5.6/reference/functions#bdrlast_committed_lsn", -> "transaction_id": "/pgd/5.6/reference/functions#transaction_id", -> "bdris_node_connected": "/pgd/5.6/reference/functions#bdris_node_connected", -> "bdris_node_ready": "/pgd/5.6/reference/functions#bdris_node_ready", -> "bdrconsensus_disable": "/pgd/5.6/reference/functions#bdrconsensus_disable", -> "bdrconsensus_enable": "/pgd/5.6/reference/functions#bdrconsensus_enable", -> "bdrconsensus_proto_version": "/pgd/5.6/reference/functions#bdrconsensus_proto_version", -> "bdrconsensus_snapshot_export": "/pgd/5.6/reference/functions#bdrconsensus_snapshot_export", -> "bdrconsensus_snapshot_import": "/pgd/5.6/reference/functions#bdrconsensus_snapshot_import", -> "bdrconsensus_snapshot_verify": "/pgd/5.6/reference/functions#bdrconsensus_snapshot_verify", -> "bdrget_consensus_status": "/pgd/5.6/reference/functions#bdrget_consensus_status", -> "bdrget_raft_status": "/pgd/5.6/reference/functions#bdrget_raft_status", -> "bdrraft_leadership_transfer": "/pgd/5.6/reference/functions#bdrraft_leadership_transfer", -> "bdrwait_slot_confirm_lsn": "/pgd/5.6/reference/functions#bdrwait_slot_confirm_lsn", -> "bdrwait_node_confirm_lsn": "/pgd/5.6/reference/functions#bdrwait_node_confirm_lsn", -> "bdrwait_for_apply_queue": "/pgd/5.6/reference/functions#bdrwait_for_apply_queue", -> "bdrget_node_sub_receive_lsn": "/pgd/5.6/reference/functions#bdrget_node_sub_receive_lsn", -> "bdrget_node_sub_apply_lsn": "/pgd/5.6/reference/functions#bdrget_node_sub_apply_lsn", -> "bdrreplicate_ddl_command": "/pgd/5.6/reference/functions#bdrreplicate_ddl_command", -> "bdrrun_on_all_nodes": "/pgd/5.6/reference/functions#bdrrun_on_all_nodes", -> "bdrrun_on_nodes": "/pgd/5.6/reference/functions#bdrrun_on_nodes", -> "bdrrun_on_group": "/pgd/5.6/reference/functions#bdrrun_on_group", -> "bdrglobal_lock_table": "/pgd/5.6/reference/functions#bdrglobal_lock_table", -> "bdrwait_for_xid_progress": "/pgd/5.6/reference/functions#bdrwait_for_xid_progress", -> "bdrlocal_group_slot_name": "/pgd/5.6/reference/functions#bdrlocal_group_slot_name", -> "bdrnode_group_type": "/pgd/5.6/reference/functions#bdrnode_group_type", -> "bdralter_node_kind": "/pgd/5.6/reference/functions#bdralter_node_kind", -> "bdralter_subscription_skip_changes_upto": "/pgd/5.6/reference/functions#bdralter_subscription_skip_changes_upto", -> "bdrglobal_advisory_lock": "/pgd/5.6/reference/functions#bdrglobal_advisory_lock", -> "bdrglobal_advisory_unlock": "/pgd/5.6/reference/functions#bdrglobal_advisory_unlock", -> "bdrmonitor_group_versions": "/pgd/5.6/reference/functions#bdrmonitor_group_versions", -> "bdrmonitor_group_raft": "/pgd/5.6/reference/functions#bdrmonitor_group_raft", -> "bdrmonitor_local_replslots": "/pgd/5.6/reference/functions#bdrmonitor_local_replslots", -> "bdrwal_sender_stats": "/pgd/5.6/reference/functions#bdrwal_sender_stats", -> "bdrget_decoding_worker_stat": "/pgd/5.6/reference/functions#bdrget_decoding_worker_stat", -> "bdrlag_control": "/pgd/5.6/reference/functions#bdrlag_control", -> "bdris_camo_partner_connected": "/pgd/5.6/reference/functions#bdris_camo_partner_connected", -> "bdris_camo_partner_ready": "/pgd/5.6/reference/functions#bdris_camo_partner_ready", -> "bdrget_configured_camo_partner": "/pgd/5.6/reference/functions#bdrget_configured_camo_partner", -> "bdrwait_for_camo_partner_queue": "/pgd/5.6/reference/functions#bdrwait_for_camo_partner_queue", -> "bdrcamo_transactions_resolved": "/pgd/5.6/reference/functions#bdrcamo_transactions_resolved", -> "bdrlogical_transaction_status": "/pgd/5.6/reference/functions#bdrlogical_transaction_status", -> "bdradd_commit_scope": "/pgd/5.6/reference/functions#bdradd_commit_scope", -> "bdrcreate_commit_scope": "/pgd/5.6/reference/functions#bdrcreate_commit_scope", -> "bdralter_commit_scope": "/pgd/5.6/reference/functions#bdralter_commit_scope", -> "bdrdrop_commit_scope": "/pgd/5.6/reference/functions#bdrdrop_commit_scope", -> "bdrremove_commit_scope": "/pgd/5.6/reference/functions#bdrremove_commit_scope", -> "bdrdefault_conflict_detection": "/pgd/5.6/reference/pgd-settings#bdrdefault_conflict_detection", -> "bdrdefault_sequence_kind": "/pgd/5.6/reference/pgd-settings#bdrdefault_sequence_kind", -> "bdrdefault_replica_identity": "/pgd/5.6/reference/pgd-settings#bdrdefault_replica_identity", -> "bdrrole_replication": "/pgd/5.6/reference/pgd-settings#bdrrole_replication", -> "bdrddl_locking": "/pgd/5.6/reference/pgd-settings#bdrddl_locking", -> "bdrtruncate_locking": "/pgd/5.6/reference/pgd-settings#bdrtruncate_locking", -> "bdrglobal_lock_max_locks": "/pgd/5.6/reference/pgd-settings#bdrglobal_lock_max_locks", -> "bdrglobal_lock_timeout": "/pgd/5.6/reference/pgd-settings#bdrglobal_lock_timeout", -> "bdrglobal_lock_statement_timeout": "/pgd/5.6/reference/pgd-settings#bdrglobal_lock_statement_timeout", -> "bdrglobal_lock_idle_timeout": "/pgd/5.6/reference/pgd-settings#bdrglobal_lock_idle_timeout", -> "bdrlock_table_locking": "/pgd/5.6/reference/pgd-settings#bdrlock_table_locking", -> "bdrpredictive_checks": "/pgd/5.6/reference/pgd-settings#bdrpredictive_checks", -> "bdrreplay_progress_frequency": "/pgd/5.6/reference/pgd-settings#bdrreplay_progress_frequency", -> "bdrstandby_slot_names": "/pgd/5.6/reference/pgd-settings#bdrstandby_slot_names", -> "bdrwriters_per_subscription": "/pgd/5.6/reference/pgd-settings#bdrwriters_per_subscription", -> "bdrmax_writers_per_subscription": "/pgd/5.6/reference/pgd-settings#bdrmax_writers_per_subscription", -> "bdrxact_replication": "/pgd/5.6/reference/pgd-settings#bdrxact_replication", -> "bdrpermit_unsafe_commands": "/pgd/5.6/reference/pgd-settings#bdrpermit_unsafe_commands", -> "bdrbatch_inserts": "/pgd/5.6/reference/pgd-settings#bdrbatch_inserts", -> "bdrmaximum_clock_skew": "/pgd/5.6/reference/pgd-settings#bdrmaximum_clock_skew", -> "bdrmaximum_clock_skew_action": "/pgd/5.6/reference/pgd-settings#bdrmaximum_clock_skew_action", -> "bdraccept_connections": "/pgd/5.6/reference/pgd-settings#bdraccept_connections", -> "bdrstandby_slots_min_confirmed": "/pgd/5.6/reference/pgd-settings#bdrstandby_slots_min_confirmed", -> "bdrwriter_input_queue_size": "/pgd/5.6/reference/pgd-settings#bdrwriter_input_queue_size", -> "bdrwriter_output_queue_size": "/pgd/5.6/reference/pgd-settings#bdrwriter_output_queue_size", -> "bdrmin_worker_backoff_delay": "/pgd/5.6/reference/pgd-settings#bdrmin_worker_backoff_delay", -> "bdrcrdt_raw_value": "/pgd/5.6/reference/pgd-settings#bdrcrdt_raw_value", -> "bdrcommit_scope": "/pgd/5.6/reference/pgd-settings#bdrcommit_scope", -> "bdrcamo_local_mode_delay": "/pgd/5.6/reference/pgd-settings#bdrcamo_local_mode_delay", -> "bdrcamo_enable_client_warnings": "/pgd/5.6/reference/pgd-settings#bdrcamo_enable_client_warnings", -> "bdrdefault_streaming_mode": "/pgd/5.6/reference/pgd-settings#bdrdefault_streaming_mode", -> "bdrlag_control_max_commit_delay": "/pgd/5.6/reference/pgd-settings#bdrlag_control_max_commit_delay", -> "bdrlag_control_max_lag_size": "/pgd/5.6/reference/pgd-settings#bdrlag_control_max_lag_size", -> "bdrlag_control_max_lag_time": "/pgd/5.6/reference/pgd-settings#bdrlag_control_max_lag_time", -> "bdrlag_control_min_conforming_nodes": "/pgd/5.6/reference/pgd-settings#bdrlag_control_min_conforming_nodes", -> "bdrlag_control_commit_delay_adjust": "/pgd/5.6/reference/pgd-settings#bdrlag_control_commit_delay_adjust", -> "bdrlag_control_sample_interval": "/pgd/5.6/reference/pgd-settings#bdrlag_control_sample_interval", -> "bdrlag_control_commit_delay_start": "/pgd/5.6/reference/pgd-settings#bdrlag_control_commit_delay_start", -> "bdrtimestamp_snapshot_keep": "/pgd/5.6/reference/pgd-settings#bdrtimestamp_snapshot_keep", -> "bdrdebug_level": "/pgd/5.6/reference/pgd-settings#bdrdebug_level", -> "bdrtrace_level": "/pgd/5.6/reference/pgd-settings#bdrtrace_level", -> "bdrtrack_subscription_apply": "/pgd/5.6/reference/pgd-settings#bdrtrack_subscription_apply", -> "bdrtrack_relation_apply": "/pgd/5.6/reference/pgd-settings#bdrtrack_relation_apply", -> "bdrtrack_apply_lock_timing": "/pgd/5.6/reference/pgd-settings#bdrtrack_apply_lock_timing", -> "bdrenable_wal_decoder": "/pgd/5.6/reference/pgd-settings#bdrenable_wal_decoder", -> "bdrreceive_lcr": "/pgd/5.6/reference/pgd-settings#bdrreceive_lcr", -> "bdrlcr_cleanup_interval": "/pgd/5.6/reference/pgd-settings#bdrlcr_cleanup_interval", -> "bdrglobal_connection_timeout": "/pgd/5.6/reference/pgd-settings#bdrglobal_connection_timeout", -> "bdrglobal_keepalives": "/pgd/5.6/reference/pgd-settings#bdrglobal_keepalives", -> "bdrglobal_keepalives_idle": "/pgd/5.6/reference/pgd-settings#bdrglobal_keepalives_idle", -> "bdrglobal_keepalives_interval": "/pgd/5.6/reference/pgd-settings#bdrglobal_keepalives_interval", -> "bdrglobal_keepalives_count": "/pgd/5.6/reference/pgd-settings#bdrglobal_keepalives_count", -> "bdrglobal_tcp_user_timeout": "/pgd/5.6/reference/pgd-settings#bdrglobal_tcp_user_timeout", -> "bdrforce_full_mesh": "/pgd/5.6/reference/pgd-settings#bdrforce_full_mesh", -> "bdrraft_global_election_timeout": "/pgd/5.6/reference/pgd-settings#bdrraft_global_election_timeout", -> "bdrraft_group_election_timeout": "/pgd/5.6/reference/pgd-settings#bdrraft_group_election_timeout", -> "bdrraft_response_timeout": "/pgd/5.6/reference/pgd-settings#bdrraft_response_timeout", -> "bdrraft_keep_min_entries": "/pgd/5.6/reference/pgd-settings#bdrraft_keep_min_entries", -> "bdrraft_log_min_apply_duration": "/pgd/5.6/reference/pgd-settings#bdrraft_log_min_apply_duration", -> "bdrraft_log_min_message_duration": "/pgd/5.6/reference/pgd-settings#bdrraft_log_min_message_duration", -> "bdrraft_group_max_connections": "/pgd/5.6/reference/pgd-settings#bdrraft_group_max_connections", -> "bdrbackwards_compatibility": "/pgd/5.6/reference/pgd-settings#bdrbackwards_compatibility", -> "bdrtrack_replication_estimates": "/pgd/5.6/reference/pgd-settings#bdrtrack_replication_estimates", -> "bdrlag_tracker_apply_rate_weight": "/pgd/5.6/reference/pgd-settings#bdrlag_tracker_apply_rate_weight", -> "bdrenable_auto_sync_reconcile": "/pgd/5.6/reference/pgd-settings#bdrenable_auto_sync_reconcile", -> "list-of-node-states": "/pgd/5.6/reference/nodes#list-of-node-states", -> "node-management-commands": "/pgd/5.6/reference/nodes#node-management-commands", -> "bdr_init_physical": "/pgd/5.6/reference/nodes#bdr_init_physical", -> "bdr_config": "/pgd/5.6/reference/nodes#bdr_config", -> "bdralter_node_group_option": "/pgd/5.6/reference/nodes-management-interfaces#bdralter_node_group_option", -> "bdralter_node_interface": "/pgd/5.6/reference/nodes-management-interfaces#bdralter_node_interface", -> "bdralter_node_option": "/pgd/5.6/reference/nodes-management-interfaces#bdralter_node_option", -> "bdralter_subscription_enable": "/pgd/5.6/reference/nodes-management-interfaces#bdralter_subscription_enable", -> "bdralter_subscription_disable": "/pgd/5.6/reference/nodes-management-interfaces#bdralter_subscription_disable", -> "bdrcreate_node": "/pgd/5.6/reference/nodes-management-interfaces#bdrcreate_node", -> "bdrcreate_node_group": "/pgd/5.6/reference/nodes-management-interfaces#bdrcreate_node_group", -> "bdrdrop_node_group": "/pgd/5.6/reference/nodes-management-interfaces#bdrdrop_node_group", -> "bdrjoin_node_group": "/pgd/5.6/reference/nodes-management-interfaces#bdrjoin_node_group", -> "bdrpart_node": "/pgd/5.6/reference/nodes-management-interfaces#bdrpart_node", -> "bdrpromote_node": "/pgd/5.6/reference/nodes-management-interfaces#bdrpromote_node", -> "bdrswitch_node_group": "/pgd/5.6/reference/nodes-management-interfaces#bdrswitch_node_group", -> "bdrwait_for_join_completion": "/pgd/5.6/reference/nodes-management-interfaces#bdrwait_for_join_completion", -> "bdralter_node_group_config": "/pgd/5.6/reference/nodes-management-interfaces#bdralter_node_group_config", -> "bdrcreate_proxy": "/pgd/5.6/reference/routing#bdrcreate_proxy", -> "bdralter_proxy_option": "/pgd/5.6/reference/routing#bdralter_proxy_option", -> "bdrdrop_proxy": "/pgd/5.6/reference/routing#bdrdrop_proxy", -> "bdrrouting_leadership_transfer": "/pgd/5.6/reference/routing#bdrrouting_leadership_transfer", -> "cs.commit-scope-syntax": "/pgd/5.6/reference/commit-scopes#commit-scope-syntax", -> "cs.commit_scope_degrade_operation": "/pgd/5.6/reference/commit-scopes#commit_scope_degrade_operation", -> "cs.commit-scope-targets": "/pgd/5.6/reference/commit-scopes#commit-scope-targets", -> "cs.origin_group": "/pgd/5.6/reference/commit-scopes#origin_group", -> "cs.commit-scope-groups": "/pgd/5.6/reference/commit-scopes#commit-scope-groups", -> "cs.any": "/pgd/5.6/reference/commit-scopes#any", -> "cs.any-not": "/pgd/5.6/reference/commit-scopes#any-not", -> "cs.majority": "/pgd/5.6/reference/commit-scopes#majority", -> "cs.majority-not": "/pgd/5.6/reference/commit-scopes#majority-not", -> "cs.all": "/pgd/5.6/reference/commit-scopes#all", -> "cs.all-not": "/pgd/5.6/reference/commit-scopes#all-not", -> "cs.confirmation-level": "/pgd/5.6/reference/commit-scopes#confirmation-level", -> "cs.on-received": "/pgd/5.6/reference/commit-scopes#on-received", -> "cs.on-replicated": "/pgd/5.6/reference/commit-scopes#on-replicated", -> "cs.on-durable": "/pgd/5.6/reference/commit-scopes#on-durable", -> "cs.on-visible": "/pgd/5.6/reference/commit-scopes#on-visible", -> "cs.commit-scope-kinds": "/pgd/5.6/reference/commit-scopes#commit-scope-kinds", -> "cs.synchronous-commit": "/pgd/5.6/reference/commit-scopes#synchronous-commit", -> "cs.degrade-on-parameters": "/pgd/5.6/reference/commit-scopes#degrade-on-parameters", -> "cs.group-commit": "/pgd/5.6/reference/commit-scopes#group-commit", -> "cs.group-commit-parameters": "/pgd/5.6/reference/commit-scopes#group-commit-parameters", -> "cs.abort-on-parameters": "/pgd/5.6/reference/commit-scopes#abort-on-parameters", -> "cs.transaction_tracking-settings": "/pgd/5.6/reference/commit-scopes#transaction_tracking-settings", -> "cs.conflict_resolution-settings": "/pgd/5.6/reference/commit-scopes#conflict_resolution-settings", -> "cs.commit_decision-settings": "/pgd/5.6/reference/commit-scopes#commit_decision-settings", -> "cs.commit_scope_degrade_operation-settings": "/pgd/5.6/reference/commit-scopes#commit_scope_degrade_operation-settings", -> "cs.camo": "/pgd/5.6/reference/commit-scopes#camo", -> "cs.lag-control": "/pgd/5.6/reference/commit-scopes#lag-control", -> "cs.lag-control-parameters": "/pgd/5.6/reference/commit-scopes#lag-control-parameters", -> "conflict-detection": "/pgd/5.6/reference/conflicts#conflict-detection", -> "list-of-conflict-types": "/pgd/5.6/reference/conflicts#list-of-conflict-types", -> "conflict-resolution": "/pgd/5.6/reference/conflicts#conflict-resolution", -> "list-of-conflict-resolvers": "/pgd/5.6/reference/conflicts#list-of-conflict-resolvers", -> "default-conflict-resolvers": "/pgd/5.6/reference/conflicts#default-conflict-resolvers", -> "list-of-conflict-resolutions": "/pgd/5.6/reference/conflicts#list-of-conflict-resolutions", -> "conflict-logging": "/pgd/5.6/reference/conflicts#conflict-logging", -> "bdralter_table_conflict_detection": "/pgd/5.6/reference/conflict_functions#bdralter_table_conflict_detection", -> "bdralter_node_set_conflict_resolver": "/pgd/5.6/reference/conflict_functions#bdralter_node_set_conflict_resolver", -> "bdralter_node_set_log_config": "/pgd/5.6/reference/conflict_functions#bdralter_node_set_log_config", -> "bdrcreate_replication_set": "/pgd/5.6/reference/repsets-management#bdrcreate_replication_set", -> "bdralter_replication_set": "/pgd/5.6/reference/repsets-management#bdralter_replication_set", -> "bdrdrop_replication_set": "/pgd/5.6/reference/repsets-management#bdrdrop_replication_set", -> "bdralter_node_replication_sets": "/pgd/5.6/reference/repsets-management#bdralter_node_replication_sets", -> "bdrreplication_set_add_table": "/pgd/5.6/reference/repsets-membership#bdrreplication_set_add_table", -> "bdrreplication_set_remove_table": "/pgd/5.6/reference/repsets-membership#bdrreplication_set_remove_table", -> "bdrreplication_set_add_ddl_filter": "/pgd/5.6/reference/repsets-ddl-filtering#bdrreplication_set_add_ddl_filter", -> "bdrreplication_set_remove_ddl_filter": "/pgd/5.6/reference/repsets-ddl-filtering#bdrreplication_set_remove_ddl_filter", -> "pgd_bench": "/pgd/5.6/reference/testingandtuning#pgd_bench", -> "bdralter_sequence_set_kind": "/pgd/5.6/reference/sequences#bdralter_sequence_set_kind", -> "bdrextract_timestamp_from_snowflakeid": "/pgd/5.6/reference/sequences#bdrextract_timestamp_from_snowflakeid", -> "bdrextract_nodeid_from_snowflakeid": "/pgd/5.6/reference/sequences#bdrextract_nodeid_from_snowflakeid", -> "bdrextract_localseqid_from_snowflakeid": "/pgd/5.6/reference/sequences#bdrextract_localseqid_from_snowflakeid", -> "bdrtimestamp_to_snowflakeid": "/pgd/5.6/reference/sequences#bdrtimestamp_to_snowflakeid", -> "bdrextract_timestamp_from_timeshard": "/pgd/5.6/reference/sequences#bdrextract_timestamp_from_timeshard", -> "bdrextract_nodeid_from_timeshard": "/pgd/5.6/reference/sequences#bdrextract_nodeid_from_timeshard", -> "bdrextract_localseqid_from_timeshard": "/pgd/5.6/reference/sequences#bdrextract_localseqid_from_timeshard", -> "bdrtimestamp_to_timeshard": "/pgd/5.6/reference/sequences#bdrtimestamp_to_timeshard", -> "bdrgen_ksuuid_v2": "/pgd/5.6/reference/sequences#bdrgen_ksuuid_v2", -> "bdrksuuid_v2_cmp": "/pgd/5.6/reference/sequences#bdrksuuid_v2_cmp", -> "bdrextract_timestamp_from_ksuuid_v2": "/pgd/5.6/reference/sequences#bdrextract_timestamp_from_ksuuid_v2", -> "bdrgen_ksuuid": "/pgd/5.6/reference/sequences#bdrgen_ksuuid", -> "bdruuid_v1_cmp": "/pgd/5.6/reference/sequences#bdruuid_v1_cmp", -> "bdrextract_timestamp_from_ksuuid": "/pgd/5.6/reference/sequences#bdrextract_timestamp_from_ksuuid", -> "bdrautopartition": "/pgd/5.6/reference/autopartition#bdrautopartition", -> "bdrdrop_autopartition": "/pgd/5.6/reference/autopartition#bdrdrop_autopartition", -> "bdrautopartition_wait_for_partitions": "/pgd/5.6/reference/autopartition#bdrautopartition_wait_for_partitions", -> "bdrautopartition_wait_for_partitions_on_all_nodes": "/pgd/5.6/reference/autopartition#bdrautopartition_wait_for_partitions_on_all_nodes", -> "bdrautopartition_find_partition": "/pgd/5.6/reference/autopartition#bdrautopartition_find_partition", -> "bdrautopartition_enable": "/pgd/5.6/reference/autopartition#bdrautopartition_enable", -> "bdrautopartition_disable": "/pgd/5.6/reference/autopartition#bdrautopartition_disable", -> "internal-functions": "/pgd/5.6/reference/autopartition#internal-functions", -> "bdrautopartition_create_partition": "/pgd/5.6/reference/autopartition#bdrautopartition_create_partition", -> "bdrautopartition_drop_partition": "/pgd/5.6/reference/autopartition#bdrautopartition_drop_partition", -> "bdrcreate_conflict_trigger": "/pgd/5.6/reference/streamtriggers/interfaces#bdrcreate_conflict_trigger", -> "bdrcreate_transform_trigger": "/pgd/5.6/reference/streamtriggers/interfaces#bdrcreate_transform_trigger", -> "bdrdrop_trigger": "/pgd/5.6/reference/streamtriggers/interfaces#bdrdrop_trigger", -> "bdrtrigger_get_row": "/pgd/5.6/reference/streamtriggers/rowfunctions#bdrtrigger_get_row", -> "bdrtrigger_get_committs": "/pgd/5.6/reference/streamtriggers/rowfunctions#bdrtrigger_get_committs", -> "bdrtrigger_get_xid": "/pgd/5.6/reference/streamtriggers/rowfunctions#bdrtrigger_get_xid", -> "bdrtrigger_get_type": "/pgd/5.6/reference/streamtriggers/rowfunctions#bdrtrigger_get_type", -> "bdrtrigger_get_conflict_type": "/pgd/5.6/reference/streamtriggers/rowfunctions#bdrtrigger_get_conflict_type", -> "bdrtrigger_get_origin_node_id": "/pgd/5.6/reference/streamtriggers/rowfunctions#bdrtrigger_get_origin_node_id", -> "bdrri_fkey_on_del_trigger": "/pgd/5.6/reference/streamtriggers/rowfunctions#bdrri_fkey_on_del_trigger", -> "tg_name": "/pgd/5.6/reference/streamtriggers/rowvariables#tg_name", -> "tg_when": "/pgd/5.6/reference/streamtriggers/rowvariables#tg_when", -> "tg_level": "/pgd/5.6/reference/streamtriggers/rowvariables#tg_level", -> "tg_op": "/pgd/5.6/reference/streamtriggers/rowvariables#tg_op", -> "tg_relid": "/pgd/5.6/reference/streamtriggers/rowvariables#tg_relid", -> "tg_table_name": "/pgd/5.6/reference/streamtriggers/rowvariables#tg_table_name", -> "tg_table_schema": "/pgd/5.6/reference/streamtriggers/rowvariables#tg_table_schema", -> "tg_nargs": "/pgd/5.6/reference/streamtriggers/rowvariables#tg_nargs", -> "tg_argv": "/pgd/5.6/reference/streamtriggers/rowvariables#tg_argv", -> "bdrautopartition_partitions": "/pgd/5.6/reference/catalogs-internal#bdrautopartition_partitions", -> "bdrautopartition_rules": "/pgd/5.6/reference/catalogs-internal#bdrautopartition_rules", -> "bdrddl_epoch": "/pgd/5.6/reference/catalogs-internal#bdrddl_epoch", -> "bdrevent_history": "/pgd/5.6/reference/catalogs-internal#bdrevent_history", -> "bdrevent_summary": "/pgd/5.6/reference/catalogs-internal#bdrevent_summary", -> "bdrlocal_leader_change": "/pgd/5.6/reference/catalogs-internal#bdrlocal_leader_change", -> "bdrnode_config": "/pgd/5.6/reference/catalogs-internal#bdrnode_config", -> "bdrnode_config_summary": "/pgd/5.6/reference/catalogs-internal#bdrnode_config_summary", -> "bdrnode_group_config": "/pgd/5.6/reference/catalogs-internal#bdrnode_group_config", -> "bdrnode_group_routing_config_summary": "/pgd/5.6/reference/catalogs-internal#bdrnode_group_routing_config_summary", -> "bdrnode_group_routing_info": "/pgd/5.6/reference/catalogs-internal#bdrnode_group_routing_info", -> "bdrnode_group_routing_summary": "/pgd/5.6/reference/catalogs-internal#bdrnode_group_routing_summary", -> "bdrnode_routing_config_summary": "/pgd/5.6/reference/catalogs-internal#bdrnode_routing_config_summary", -> "bdrproxy_config": "/pgd/5.6/reference/catalogs-internal#bdrproxy_config", -> "bdrproxy_config_summary": "/pgd/5.6/reference/catalogs-internal#bdrproxy_config_summary", -> "bdrsequence_kind": "/pgd/5.6/reference/catalogs-internal#bdrsequence_kind", -> "bdrbdr_get_commit_decisions": "/pgd/5.6/reference/functions-internal#bdrbdr_get_commit_decisions", -> "bdrbdr_track_commit_decision": "/pgd/5.6/reference/functions-internal#bdrbdr_track_commit_decision", -> "bdrconsensus_kv_fetch": "/pgd/5.6/reference/functions-internal#bdrconsensus_kv_fetch", -> "bdrconsensus_kv_store": "/pgd/5.6/reference/functions-internal#bdrconsensus_kv_store", -> "bdrdecode_message_payload": "/pgd/5.6/reference/functions-internal#bdrdecode_message_payload", -> "bdrdecode_message_response_payload": "/pgd/5.6/reference/functions-internal#bdrdecode_message_response_payload", -> "bdrdifference_fix_origin_create": "/pgd/5.6/reference/functions-internal#bdrdifference_fix_origin_create", -> "bdrdifference_fix_session_reset": "/pgd/5.6/reference/functions-internal#bdrdifference_fix_session_reset", -> "bdrdifference_fix_session_setup": "/pgd/5.6/reference/functions-internal#bdrdifference_fix_session_setup", -> "bdrdifference_fix_xact_set_avoid_conflict": "/pgd/5.6/reference/functions-internal#bdrdifference_fix_xact_set_avoid_conflict", -> "bdrdrop_node": "/pgd/5.6/reference/functions-internal#bdrdrop_node", -> "bdrget_global_locks": "/pgd/5.6/reference/functions-internal#bdrget_global_locks", -> "bdrget_node_conflict_resolvers": "/pgd/5.6/reference/functions-internal#bdrget_node_conflict_resolvers", -> "bdrget_slot_flush_timestamp": "/pgd/5.6/reference/functions-internal#bdrget_slot_flush_timestamp", -> "bdrinternal_alter_sequence_set_kind": "/pgd/5.6/reference/functions-internal#bdrinternal_alter_sequence_set_kind", -> "bdrinternal_replication_set_add_table": "/pgd/5.6/reference/functions-internal#bdrinternal_replication_set_add_table", -> "bdrinternal_replication_set_remove_table": "/pgd/5.6/reference/functions-internal#bdrinternal_replication_set_remove_table", -> "bdrinternal_submit_join_request": "/pgd/5.6/reference/functions-internal#bdrinternal_submit_join_request", -> "bdrisolation_test_session_is_blocked": "/pgd/5.6/reference/functions-internal#bdrisolation_test_session_is_blocked", -> "bdrlocal_node_info": "/pgd/5.6/reference/functions-internal#bdrlocal_node_info", -> "bdrmsgb_connect": "/pgd/5.6/reference/functions-internal#bdrmsgb_connect", -> "bdrmsgb_deliver_message": "/pgd/5.6/reference/functions-internal#bdrmsgb_deliver_message", -> "bdrnode_catchup_state_name": "/pgd/5.6/reference/functions-internal#bdrnode_catchup_state_name", -> "bdrnode_kind_name": "/pgd/5.6/reference/functions-internal#bdrnode_kind_name", -> "bdrpeer_state_name": "/pgd/5.6/reference/functions-internal#bdrpeer_state_name", -> "bdrpg_xact_origin": "/pgd/5.6/reference/functions-internal#bdrpg_xact_origin", -> "bdrrequest_replay_progress_update": "/pgd/5.6/reference/functions-internal#bdrrequest_replay_progress_update", -> "bdrreset_relation_stats": "/pgd/5.6/reference/functions-internal#bdrreset_relation_stats", -> "bdrreset_subscription_stats": "/pgd/5.6/reference/functions-internal#bdrreset_subscription_stats", -> "bdrresynchronize_table_from_node": "/pgd/5.6/reference/functions-internal#bdrresynchronize_table_from_node", -> "bdrseq_currval": "/pgd/5.6/reference/functions-internal#bdrseq_currval", -> "bdrseq_lastval": "/pgd/5.6/reference/functions-internal#bdrseq_lastval", -> "bdrseq_nextval": "/pgd/5.6/reference/functions-internal#bdrseq_nextval", -> "bdrshow_subscription_status": "/pgd/5.6/reference/functions-internal#bdrshow_subscription_status", -> "bdrshow_workers": "/pgd/5.6/reference/functions-internal#bdrshow_workers", -> "bdrshow_writers": "/pgd/5.6/reference/functions-internal#bdrshow_writers", -> "bdrtaskmgr_set_leader": "/pgd/5.6/reference/functions-internal#bdrtaskmgr_set_leader", -> "bdrtaskmgr_get_last_completed_workitem": "/pgd/5.6/reference/functions-internal#bdrtaskmgr_get_last_completed_workitem", -> "bdrtaskmgr_work_queue_check_status": "/pgd/5.6/reference/functions-internal#bdrtaskmgr_work_queue_check_status", -> "bdrpglogical_proto_version_ranges": "/pgd/5.6/reference/functions-internal#bdrpglogical_proto_version_ranges", -> "bdrget_min_required_replication_slots": "/pgd/5.6/reference/functions-internal#bdrget_min_required_replication_slots", -> "bdrget_min_required_worker_processes": "/pgd/5.6/reference/functions-internal#bdrget_min_required_worker_processes", -> "bdrstat_get_activity": "/pgd/5.6/reference/functions-internal#bdrstat_get_activity", -> "bdrworker_role_id_name": "/pgd/5.6/reference/functions-internal#bdrworker_role_id_name", -> "bdrlag_history": "/pgd/5.6/reference/functions-internal#bdrlag_history", -> "bdrget_raft_instance_by_nodegroup": "/pgd/5.6/reference/functions-internal#bdrget_raft_instance_by_nodegroup", -> "bdrmonitor_camo_on_all_nodes": "/pgd/5.6/reference/functions-internal#bdrmonitor_camo_on_all_nodes", -> "bdrmonitor_raft_details_on_all_nodes": "/pgd/5.6/reference/functions-internal#bdrmonitor_raft_details_on_all_nodes", -> "bdrmonitor_replslots_details_on_all_nodes": "/pgd/5.6/reference/functions-internal#bdrmonitor_replslots_details_on_all_nodes", -> "bdrmonitor_subscription_details_on_all_nodes": "/pgd/5.6/reference/functions-internal#bdrmonitor_subscription_details_on_all_nodes", -> "bdrmonitor_version_details_on_all_nodes": "/pgd/5.6/reference/functions-internal#bdrmonitor_version_details_on_all_nodes", -> "bdrnode_group_member_info": "/pgd/5.6/reference/functions-internal#bdrnode_group_member_info", -> "bdrcolumn_timestamps_create": "/pgd/5.6/reference/clcd#bdrcolumn_timestamps_create" -> } -\ No newline at end of file -diff -r 5/reference/index.mdx 5.6/reference/index.mdx -54a55 -> * [`bdr.leader`](catalogs-visible#bdrleader) -81a83,87 -> * [`bdr.stat_commit_scope`](catalogs-visible#bdrstat_commit_scope) -> * [`bdr.stat_commit_scope_state`](catalogs-visible#bdrstat_commit_scope_state) -> * [`bdr.stat_raft_followers_state`](catalogs-visible#bdrstat_raft_followers_state) -> * [`bdr.stat_raft_state`](catalogs-visible#bdrstat_raft_state) -> * [`bdr.stat_receiver`](catalogs-visible#bdrstat_receiver) -82a89,90 -> * [`bdr.stat_routing_candidate_state`](catalogs-visible#bdrstat_routing_candidate_state) -> * [`bdr.stat_routing_state`](catalogs-visible#bdrstat_routing_state) -83a92,93 -> * [`bdr.stat_worker`](catalogs-visible#bdrstat_worker) -> * [`bdr.stat_writer`](catalogs-visible#bdrstat_writer) -122a133 -> * [`bdr.wait_node_confirm_lsn`](functions#bdrwait_node_confirm_lsn) -152c163 -< * [bdr.logical_transaction_status](functions#bdrlogical_transaction_status) ---- -> * [`bdr.logical_transaction_status`](functions#bdrlogical_transaction_status) -154a166 -> * [`bdr.create_commit_scope`](functions#bdrcreate_commit_scope) -155a168 -> * [`bdr.drop_commit_scope`](functions#bdrdrop_commit_scope) -229a243,244 -> ### [Topology settings](pgd-settings#topology-settings) -> * [`bdr.force_full_mesh`](pgd-settings#bdrforce_full_mesh) -249a265 -> * [`bdr_config`](nodes#bdr_config) -259a276 -> * [`bdr.drop_node_group`](nodes-management-interfaces#bdrdrop_node_group) -266d282 -< * [`bdr.drop_node_group`](nodes-management-interfaces#bdrdrop_node_group) -277a294,296 -> * [commit_scope_degrade_operation](commit-scopes#commit_scope_degrade_operation) -> * [Commit scope targets](commit-scopes#commit-scope-targets) -> * [ORIGIN_GROUP](commit-scopes#origin_group) -290a310,312 -> * [SYNCHRONOUS COMMIT](commit-scopes#synchronous-commit) -> * [DEGRADE ON parameters](commit-scopes#degrade-on-parameters) -> * [commit_scope_degrade_operation](commit-scopes#commit_scope_degrade_operation) -293a316 -> * [DEGRADE ON parameters](commit-scopes#degrade-on-parameters) -296a320 -> * [commit_scope_degrade_operation settings](commit-scopes#commit_scope_degrade_operation-settings) -298c322 -< * [Degrade On parameters](commit-scopes#degrade-on-parameters) ---- -> * [DEGRADE ON parameters](commit-scopes#degrade-on-parameters) -301d324 -< * [SYNCHRONOUS_COMMIT](commit-scopes#synchronous_commit) -409a433 -> * [`bdr.local_leader_change`](catalogs-internal#bdrlocal_leader_change) -diff -r 5/reference/nodes-management-interfaces.mdx 5.6/reference/nodes-management-interfaces.mdx -11c11 -< This function modifies a PGD node group configuration. ---- -> Modifies a PGD node group configuration. -23,25c23,27 -< - `node_group_name` — Name of the group to change. -< - `config_key` — Key of the option in the node group to change. -< - `config_value` — New value to set for the given key. ---- -> | Name | Description | -> |-------------------|------------------------------------------------| -> | `node_group_name` | Name of the group to change. | -> | `config_key` | Key of the option in the node group to change. | -> | `config_value` | New value to set for the given key. | -27c29 -< `config_value` is parsed into the data type appropriate for the option. ---- -> `config_value` is parsed into the data type appropriate for the option. -29c31 -< The table shows the group options that can be changed using this function. Some parameters can be applied only to the top-level node group. For these parameters, the Groups column contains Top. Some parameters can be applied to the top-level node group and subgroups. For these parameters, the Groups column contains All. When a parameter can be applied only to subgroups, the Groups column contains Sub. ---- -> The table shows the group options that can be changed using this function. -31,44c33,46 -< | Name | Type | Groups | Description | -< | ---- | ---- | ------ | ----------- | -< | `apply_delay` | `interval` | All | How long nodes wait to apply incoming changes. This option is useful mainly to set up a special subgroup with delayed subscriber-only nodes. Don't set this on groups that contain data nodes or on the top-level group. Default is `0s`. | -< | `check_constraints` | `boolean` | Top | Whether the apply process checks the constraints when writing replicated data. We recommend keeping the default value or you risk data loss. Valid values are `on` or `off`. Default is `on`. | -< | `default_commit_scope` | `text` | All | The commit scope to use by default, initially the `local` commit scope. This option applies only to the top-level node group. You can use individual rules for different origin groups of the same commit scope. See [Origin groups](../durability/commit-scopes/#origin-groups) for more details. | -< | `enable_proxy_routing` | `boolean` | All | Where [`pgd-proxy`](../routing/proxy) through the group leader is enabled for given group. Valid values are `on` or `off`. Default is `off`. | -< | `enable_raft` | `boolean` | Sub | Whether group has its own Raft consensus. This option is necessary for setting `enable_proxy_routing` to `on`. This option is always `on` for the top-level group. Valid values are `on` or `off`. Default is `off` for subgroups. | -< | `enable_wal_decoder` | `boolean` | Top | Enables/disables the decoding worker process. You can't enable the decoding worker process if `streaming_mode` is already enabled. Valid values are `on` or `off`. Default is `off`. | -< | `location` | `text` | All | Information about group location. This option is purely metadata for monitoring. Default is `''` (empty string). | -< | `num_writers` | `integer` | Top | Number of parallel writers for the subscription backing this node group. Valid values are `-1` or a positive integer. `-1` means the value specified by the GUC [`bdr.writers_per_subscription`](pgd-settings#bdrwriters_per_subscription) is used. `-1` is the default. | -< | `route_reader_max_lag` | `integer` | All | Maximum lag in bytes for a node to be considered a viable read-only node. Currently reserved for future use. | -< | `route_writer_max_lag` | `integer` | All | Maximum lag in bytes of the new write candidate to be selected as write leader. If no candidate passes this, no writer is selected. Default is `-1`. | -< | `route_writer_wait_flush` | `boolean` | All | Whether to switch if PGD needs to wait for the flush. Currently reserved for future use. | -< | `streaming_mode` | `text` | Top | Enables/disables streaming of large transactions. When set to `off`, streaming is disabled. When set to any other value, large transactions are decoded while they're still in progress, and the changes are sent to the downstream. If the value is set to `file`, then the incoming changes of streaming transactions are stored in a file and applied only after the transaction is committed on upstream. If the value is set to `writer`, then the incoming changes are directly sent to one of the writers, if available.
If [parallel apply](../parallelapply) is disabled or no writer is free to handle streaming transactions, then the changes are written to a file and applied after the transaction is committed. If the value is set to `auto`, PGD tries to intelligently pick between `file` and `writer`, depending on the transaction property and available resources. You can't enable `streaming_mode` if the WAL decoder is already enabled. Default is `auto`.

For more details, see [Transaction streaming](../transaction-streaming). | ---- -> | Name | Type | Description | -> |---------------------------|------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -> | `apply_delay` | `interval` | How long nodes wait to apply incoming changes. This option is useful mainly to set up a special subgroup with delayed subscriber-only nodes. Don't set this on groups that contain data nodes or on the top-level group. Default is `0s`. | -> | `check_constraints` | `boolean` | Whether the apply process checks the constraints when writing replicated data. We recommend keeping the default value or you risk data loss. Valid values are `on` or `off`. Default is `on`. | -> | `default_commit_scope` | `text` | The commit scope to use by default, initially the `local` commit scope. This option applies only to the top-level node group. You can use individual rules for different origin groups of the same commit scope. See [Origin groups](../commit-scopes/origin_groups) for more details. | -> | `enable_proxy_routing` | `boolean` | Where [`pgd-proxy`](../routing/proxy) through the group leader is enabled for given group. Valid values are `on` or `off`. Default is `off`. | -> | `enable_raft` | `boolean` | Whether group has its own Raft consensus. This option is necessary for setting `enable_proxy_routing` to `on`. This option is always `on` for the top-level group. Valid values are `on` or `off`. Default is `off` for subgroups. | -> | `enable_wal_decoder` | `boolean` | Enables/disables the decoding worker process. You can't enable the decoding worker process if `streaming_mode` is already enabled. Valid values are `on` or `off`. Default is `off`. | -> | `location` | `text` | Information about group location. This option is purely metadata for monitoring. Default is `''` (empty string). | -> | `num_writers` | `integer` | Number of parallel writers for the subscription backing this node group. Valid values are `-1` or a positive integer. `-1` means the value specified by the GUC [`bdr.writers_per_subscription`](pgd-settings#bdrwriters_per_subscription) is used. `-1` is the default. | -> | `route_reader_max_lag` | `integer` | Maximum lag in bytes for a node to be considered a viable read-only node. Currently reserved for future use. | -> | `route_writer_max_lag` | `integer` | Maximum lag in bytes of the new write candidate to be selected as write leader. If no candidate passes this, no writer is selected. Default is `-1`. | -> | `route_writer_wait_flush` | `boolean` | Whether to switch if PGD needs to wait for the flush. Currently reserved for future use. | -> | `streaming_mode` | `text` | Enables/disables streaming of large transactions. When set to `off`, streaming is disabled. When set to any other value, large transactions are decoded while they're still in progress, and the changes are sent to the downstream. If the value is set to `file`, then the incoming changes of streaming transactions are stored in a file and applied only after the transaction is committed on upstream. If the value is set to `writer`, then the incoming changes are directly sent to one of the writers, if available.
If [parallel apply](../parallelapply) is disabled or no writer is free to handle streaming transactions, then the changes are written to a file and applied after the transaction is committed. If the value is set to `auto`, PGD tries to intelligently pick between `file` and `writer`, depending on the transaction property and available resources. You can't enable `streaming_mode` if the WAL decoder is already enabled. Default is `auto`.

For more details, see [Transaction streaming](../transaction-streaming). | -70c72 -< This function changes the connection string (`DSN`) of a specified node. ---- -> Changes the connection string (`DSN`) of a specified node. -80,81c82,85 -< - `node_name` — Name of an existing node to alter. -< - `interface_dsn` — New connection string for a node. ---- -> | Name | Description | -> |-----------------|------------------------------------| -> | `node_name` | Name of an existing node to alter. | -> | `interface_dsn` | New connection string for a node. | -96c100 -< This function modifies the PGD node routing configuration. ---- -> Modifies the PGD node routing configuration. -108,110c112,116 -< - `node_name` — Name of the node to change. -< - `config_key` — Key of the option in the node to change. -< - `config_value` — New value to set for the given key. ---- -> | Name | Description | -> |----------------|------------------------------------------------| -> | `node_name` | Name of the node to change. | -> | `config_key` | Key of the option in the node to change. | -> | `config_value` | New value to set for the given key. | -113,117d118 -< - `route_priority` — Relative routing priority of the node against other nodes in the same node group. Default is `'-1'`. -< - `route_fence` — Whether the node is fenced from routing. When true, the node can't receive connections from PGD Proxy. Default is `'f'` (false). -< - `route_writes` — Whether writes can be routed to this node, that is, whether the node can become write leader. Default is `'t'` (true) for data nodes and `'f'` (false) for other node types. -< - `route_reads` — Whether read-only connections can be routed to this node. Currently reserved for future use. Default is `'t'` (true) for data and subscriber-only nodes, `'f'` (false) for witness and standby nodes. -< - `route_dsn` — The dsn for the proxy to use to connect to this node. This option is optional. If not set, it defaults to the node's `node_dsn` value. -118a120,127 -> | Config Key | Description | -> |------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -> | `route_priority` | Relative routing priority of the node against other nodes in the same node group. Default is `'-1'`. | -> | `route_fence` | Whether the node is fenced from routing. When true, the node can't receive connections from PGD Proxy. Default is `'f'` (false). | -> | `route_writes` | Whether writes can be routed to this node, that is, whether the node can become write leader. Default is `'t'` (true) for data nodes and `'f'` (false) for other node types. | -> | `route_reads` | Whether read-only connections can be routed to this node. Currently reserved for future use. Default is `'t'` (true) for data and subscriber-only nodes, `'f'` (false) for witness and standby nodes. | -> | `route_dsn` | The dsn for the proxy to use to connect to this node. This option is optional. If not set, it defaults to the node's `node_dsn` value. | -> -121,122c130,131 -< This function enables either the specified subscription or all the subscriptions of the -< local PGD node. This is also known as resume subscription. ---- -> Enables either the specified subscription or all the subscriptions of the local PGD node. -> This is also known as resume subscription. -137,139c146,149 -< - `subscription_name` — Name of the subscription to enable. If NULL -< (the default), all subscriptions on the local node are enabled. -< - `immediate` — This parameter currently has no effect. ---- -> | Name | Description | -> |---------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -> | `subscription_name` | Name of the subscription to enable. If NULL (the default), all subscriptions on the local node are enabled. | -> | `immediate` | Used to force the action immediately, starting all the workers associated with the enabled subscription. When this option is `true`, you can't run this function inside of the transaction block. | -140a151 -> -151,154c162,165 -< This function disables either the specified subscription or all the -< subscriptions of the local PGD node. Optionally, it can also immediately stop -< all the workers associated with the disabled subscriptions. This is also known as pause -< subscription. No error is thrown if the subscription is already disabled. ---- -> Disables either the specified subscription or all the subscriptions of the local PGD node. -> Optionally, it can also immediately stop all the workers associated with the disabled subscriptions. -> This is also known as pause subscription. -> No error is thrown if the subscription is already disabled. -169,176c180,184 -< - `subscription_name` — Name of the subscription to disable. If NULL -< (the default), all subscriptions on the local node are disabled. -< - `immediate` — Used to force the action immediately, stopping -< all the workers associated with the disabled subscription. When this option is -< `true`, you can't run this function inside of the transaction block. -< - `fast` — This argument influences the behavior of `immediate`. -< If set to `true` (the default), it stops all the workers associated with the -< disabled subscription without waiting for them to finish current work. ---- -> | Name | Description | -> |---------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -> | `subscription_name` | Name of the subscription to disable. If NULL (the default), all subscriptions on the local node are disabled. | -> | `immediate` | Used to force the action immediately, stopping all the workers associated with the disabled subscription. When this option is `true`, you can't run this function inside of the transaction block. | -> | `fast` | This argument influences the behavior of `immediate`. If set to `true` (the default), it stops all the workers associated with the disabled subscription without waiting for them to finish current work. | -192c200 -< This function creates a node. ---- -> Creates a node. -204,210c212,216 -< - `node_name` — Name of the new node. Only one node is allowed per -< database. Valid node names consist of lowercase letters, numbers, -< hyphens, and underscores. -< - `local_dsn` — Connection string to the node. -< - `node_kind` — One of `data` (the default), `standby`, `subscriber-only`, -< or `witness`. If you don't set this parameter, or if you provide `NULL`, -< the default `data` node kind is used. ---- -> | Name | Description | -> |-------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -> | `node_name` | Name of the new node. Only one node is allowed per database. Valid node names consist of lowercase letters, numbers, hyphens, and underscores. | -> | `local_dsn` | Connection string to the node. | -> | `node_kind` | One of `data` (the default), `standby`, `subscriber-only`, or `witness`. If you don't set this parameter, or if you provide `NULL`, the default `data` node kind is used. | -226,228c232,234 -< This function creates a PGD node group. By default, the local node joins the -< group as the only member. You can add more nodes to the group with -< [`bdr.join_node_group()`](#bdrjoin_node_group). ---- -> Creates a PGD node group. -> By default, the local node joins the group as the only member. -> You can add more nodes to the group with [`bdr.join_node_group()`](#bdrjoin_node_group). -241,255c247,252 -< - `node_group_name` — Name of the new PGD group. As with the node -< name, valid group names must consist of only lowercase letters, numbers, -< and underscores. -< - `parent_group_name` — If a node subgroup is being created, this must be the -< name of the parent group. Provide `NULL` (the default) when creating the main -< node group for the cluster. -< - `join_node_group` — Determines whether the node -< joins the group being created. The default value is `true`. Providing `false` when -< creating a subgroup means the local node won't join the new group, for example, when -< creating an independent remote group. -< In this case, you must specify `parent_group_name`. -< - `node_group_type` — The valid values are `NULL` or `subscriber-only`. `NULL` (the default) is for creating a normal, general-purpose -< node group. `subscriber-only` is for creating [subscriber-only groups](../node_management/subscriber_only/) -< whose members receive changes only from the fully joined nodes in the cluster -< but that never send changes to other nodes. ---- -> | Name | Description | -> |---------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -> | `node_group_name` | Name of the new PGD group. As with the node name, valid group names consist of only lowercase letters, numbers, and underscores. | -> | `parent_group_name` | If a node subgroup is being created, this must be the name of the parent group. Provide `NULL` (the default) when creating the main node group for the cluster. | -> | `join_node_group` | Determines whether the node joins the group being created. The default value is `true`. Providing `false` when creating a subgroup means the local node won't join the new group, for example, when creating an independent remote group. In this case, you must specify `parent_group_name`. | -> | `node_group_type` | The valid values are `NULL` or `subscriber-only`. `NULL` (the default) is for creating a normal, general-purpose node group. `subscriber-only` is for creating [subscriber-only groups](../nodes/subscriber_only/) whose members receive changes only from the fully joined nodes in the cluster but that never send changes to other nodes. | -268a266,286 -> ## `bdr.drop_node_group` -> -> Drops an empty PGD node group. If there are any joined nodes in the group, the function will fail. -> -> ### Synopsis -> -> ```sql -> bdr.drop_node_group(node_group_name text) -> ``` -> -> ### Parameters -> -> | Name | Description | -> |-------------------|--------------------------------| -> | `node_group_name` | Name of the PGD group to drop. | -> -> ### Notes -> -> This function passes a request to the group consensus mechanism to drop the group. -> The function isn't transactional. The dropping process happens in the background, and you can't roll it back. -> -271c289 -< This function joins the local node to an already existing PGD group. ---- -> Joins the local node to an already existing PGD group. -287,302c305,311 -< - `join_target_dsn` — Specifies the connection string to an existing (source) node -< in the PGD group you want to add the local node to. -< - `node_group_name` — Optional name of the PGD group. Defaults to NULL, which -< tries to detect the group name from information present on the source -< node. -< - `wait_for_completion` — Wait for the join process to complete before -< returning. Defaults to `true`. -< - `synchronize_structure` — Set the kind of structure (schema) synchronization -< to do during the join. The default setting is `all`, which synchronizes -< the complete database structure, The other available setting is `none`, which doesn't -< synchronize any structure. However, it still synchronizes data (except for witness -< nodes, which by design don't synchronize data). -< - `pause_in_standby` — Optionally tells the join process to join only as a -< logical standby node, which can be later promoted to a full member. -< This option is deprecated and will be disabled or removed in future -< versions of PGD. ---- -> | Name | Description | -> |-------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -> | `join_target_dsn` | Specifies the connection string to an existing (source) node in the PGD group you want to add the local node to. | -> | `node_group_name` | Optional name of the PGD group. Defaults to NULL, which tries to detect the group name from information present on the source node. | -> | `wait_for_completion` | Wait for the join process to complete before returning. Defaults to `true`. | -> | `synchronize_structure` | Specifies whether to perform database structure (schema) synchronization during the join. `all`, the default setting, synchronizes the complete database structure. `none` does not synchronize any structure. However, data will still be synchronized, meaning the database structure must already be present on the joining node. Note that by design, neither schema nor data will ever be synchronized to witness nodes. | -> | `pause_in_standby` | Optionally tells the join process to join only as a logical standby node, which can be later promoted to a full member. This option is deprecated and will be disabled or removed in future versions of PGD. | -311c320 -< the log files and the [`bdr.event_summary`](/pgd/5/reference/catalogs-internal#bdrevent_summary) ---- -> the log files and the [`bdr.event_summary`](/pgd/latest/reference/catalogs-internal#bdrevent_summary) -361,368c370,374 -< - `node_name` — Name of an existing node to part. -< - `wait_for_completion` — If `true`, the function doesn't return until the -< node is fully parted from the cluster. Otherwise the function -< starts the parting procedure and returns immediately without waiting. -< Always set to `false` when executing on the local node or when using `force`. -< - `force` — Forces removal of the node on the local node. This sets the -< node state locally if consensus can't be reached or if the node-parting -< process is stuck. ---- -> | Name | Description | -> |-----------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -> | `node_name` | Name of an existing node to part. | -> | `wait_for_completion` | If `true`, the function doesn't return until the node is fully parted from the cluster. Otherwise, the function starts the parting procedure and returns immediately without waiting. Always set to `false` when executing on the local node or when using `force`. | -> | `force` | Forces removal of the node on the local node. This sets the node state locally if consensus can't be reached or if the node-parting process is stuck. | -400c406 -< This function promotes a local logical standby node to a full member of the PGD group. ---- -> Promotes a local logical standby node to a full member of the PGD group. -425c431 -< This function switches the local node from its current subgroup to another subgroup in the same existing PGD node group. ---- -> Switches the local node from its current subgroup to another subgroup in the same existing PGD node group. -438,440c444,447 -< - `node_group_name` — Name of the PGD group or subgroup. -< - `wait_for_completion` — Wait for the switch process to complete before -< returning. Defaults to `true`. ---- -> | Name | Description | -> |-----------------------|-------------------------------------------------------------------------------| -> | `node_group_name` | Name of the PGD group or subgroup. | -> | `wait_for_completion` | Wait for the switch process to complete before returning. Defaults to `true`. | -442,446c449,450 -< If `wait_for_completion` is specified as `false`, -< this is an asynchronous call that returns as soon as the switching procedure starts. -< You can see progress of the switch in logs and the -< `bdr.event_summary` information view or by calling the -< `bdr.wait_for_join_completion()` function after `bdr.switch_node_group()` returns. ---- -> If `wait_for_completion` is set to `false`, this is an asynchronous call that returns as soon as the switching procedure starts. -> You can see progress of the switch in logs and the `bdr.event_summary` information view or by calling the `bdr.wait_for_join_completion()` function after `bdr.switch_node_group()` returns. -453,456c457 -< The function isn't transactional. The switching process happens in the -< background and you can't roll it back. The changes are visible only -< to the local transaction if `wait_for_completion` was set to `true` or by calling -< `bdr.wait_for_join_completion` later. ---- -> The function isn't transactional. The switching process happens in the background and you can't roll it back. The changes are visible only to the local transaction if `wait_for_completion` was set to `true` or by calling `bdr.wait_for_join_completion` later. -458c459,460 -< The local node changes membership from its current subgroup to another subgroup in the same PGD node group without needing to part the cluster. The node's kind must match that of existing nodes in the target subgroup. ---- -> The local node changes membership from its current subgroup to another subgroup in the same PGD node group without needing to part the cluster. -> The node's kind must match that of existing nodes in the target subgroup. -462c464 -< Restrictions: Currently, the function allows switching only between a subgroup and its PGD node group. To effect a move between subgroups you need to make two separate calls: 1) switch from subgroup to node group and, 2) switch from node group to other subgroup. ---- -> Restrictions: currently, the function allows switching only between a subgroup and its PGD node group. To effect a move between subgroups you need to make two separate calls: 1) switch from subgroup to node group and, 2) switch from node group to other subgroup. -476,477c478,480 -< - `verbose_progress` — Optionally prints information about individual steps -< taken during the join procedure. ---- -> | Name | Description | -> |--------------------|---------------------------------------------------------------------------------------| -> | `verbose_progress` | Optionally prints information about individual steps taken during the join procedure. | -488c491 -< This function changes the configuration parameters of an existing PGD group. ---- -> Changes the configuration parameters of an existing PGD group. -525c528 -< | `default_commit_scope` | The commit scope to use by default, initially the `local` commit scope. This parameter applies only to the top-level node group. You can use individual rules for different origin groups of the same commit scope. See [Origin groups](../durability/commit-scopes/#origin-groups) for more details. | ---- -> | `default_commit_scope` | The commit scope to use by default, initially the `local` commit scope. This parameter applies only to the top-level node group. You can use individual rules for different origin groups of the same commit scope. See [Origin groups](../commit-scopes/origin_groups) for more details. | -539,557d541 -< ## `bdr.drop_node_group` -< -< This function drops an empty PGD node group. If there are any joined nodes in the group, the function will fail. -< -< ### Synopsis -< -< ```sql -< bdr.drop_node_group(node_group_name text) -< ``` -< -< ### Parameters -< -< - `node_group_name` — Name of the PGD group to drop. -< -< -< ### Notes -< -< - This function passes a request to the group consensus mechanism to drop the group. -< - The function isn't transactional. The dropping process happens in the background, and you can't roll it back. -diff -r 5/reference/nodes.mdx 5.6/reference/nodes.mdx -4a5 -> deepToC: true -9,36c10,25 -< - `NONE`: Node state is unset when the worker starts, expected to be set quickly -< to the current known state. -< - `CREATED`: `bdr.create_node()` was executed, but the node isn't a -< member of any EDB Postgres Distributed cluster yet. -< - `JOIN_START`: `bdr.join_node_group()` begins to join the local node to an -< existing EDB Postgres Distributed cluster. -< - `JOINING`: The node join has started and is currently at the initial sync phase, -< creating the schema and data on the node. -< - `CATCHUP`: Initial sync phase is completed. Now the join is at the last step -< of retrieving and applying transactions that were performed on the upstream -< peer node since the join started. -< - `STANDBY`: Node join finished but hasn't yet started to broadcast changes. -< All joins spend some time in this state, but if defined as a logical standby, -< the node continues in this state. -< - `PROMOTE`: Node was a logical standby and `bdr.promote_node` was just called to -< move the node state to `ACTIVE`. These two `PROMOTE`states have to be coherent -< to the fact that only one node can be with a state higher than `STANDBY` but -< lower than `ACTIVE`. -< - `PROMOTING`: Promotion from logical standby to full PGD node is in progress. -< - `ACTIVE`: The node is a full PGD node and is currently `ACTIVE`. This is the -< most common node status. -< - `PART_START`: Node was `ACTIVE` or `STANDBY` and `bdr.part_node` was just called -< to remove the node from the EDB Postgres Distributed cluster. -< - `PARTING`: Node disconnects from other nodes and plays no further part in -< consensus or replication. -< - `PART_CATCHUP`: Nonparting nodes synchronize any missing data from the -< recently parted node. -< - `PARTED`: Node parting operation is now complete on all nodes. ---- -> | State | Description | -> |----------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -> | `NONE` | Node state is unset when the worker starts, expected to be set quickly to the current known state. | -> | `CREATED` | `bdr.create_node()` was executed, but the node isn't a member of any EDB Postgres Distributed cluster yet. | -> | `JOIN_START` | `bdr.join_node_group()` begins to join the local node to an existing EDB Postgres Distributed cluster. | -> | `JOINING` | The node join has started and is currently at the initial sync phase, creating the schema and data on the node. | -> | `CATCHUP` | Initial sync phase is completed. Now the join is at the last step of retrieving and applying transactions that were performed on the upstream peer node since the join started. | -> | `STANDBY` | Node join finished but hasn't yet started to broadcast changes. All joins spend some time in this state, but if defined as a logical standby, the node continues in this state. | -> | `PROMOTE` | Node was a logical standby and `bdr.promote_node` was just called to move the node state to `ACTIVE`. These two `PROMOTE` states have to be coherent to the fact that only one node can be with a state higher than `STANDBY` but lower than `ACTIVE`. | -> | `PROMOTING` | Promotion from logical standby to full PGD node is in progress. | -> | `ACTIVE` | The node is a full PGD node and is currently `ACTIVE`. This is the most common node status. | -> | `PART_START` | Node was `ACTIVE` or `STANDBY` and `bdr.part_node` was just called to remove the node from the EDB Postgres Distributed cluster. | -> | `PARTING` | Node disconnects from other nodes and plays no further part in consensus or replication. | -> | `PART_CATCHUP` | Nonparting nodes synchronize any missing data from the recently parted node. | -> | `PARTED` | Node parting operation is now complete on all nodes. | -> -124a114,170 -> -> ### `bdr_config` -> -> This command-line utility allows you to examine the configuration of a PGD installation. -> It is analogous to the `pg_config` utility that comes with PostgreSQL. You can use it to assist -> in troubleshooting and support. -> -> #### Synopsis -> -> ```shell -> bdr_config [OPTION] ... -> ``` -> -> #### Options -> -> | Option           | Description | -> | --- | --- | -> | `--all` | Show all the keys and values in the configuration. | -> | `--version` | Show only the BDR version related keys and values. This includes the full version of the BDR extension, the Postgres version and flavor it is running against, and the BDRPG and BDR plugin API versions. | -> | `--debug` | Show only the BDR debug keys and values, including build information and feature enablement. | -> -> #### Example -> -> ```shell -> $ /usr/lib/edb-as/16/bin/bdr_config --all -> __OUTPUT__ -> BDR_VERSION_COMPLETE=5.6.0 -> BDR_VERSION_NUM=50600 -> PG_VERSION=16.4.1 (Debian 16.4.1~~snapshot11329862135.2980.1.88fbec6-1.bookworm) -> PG_VERSION_NUM=160004 -> PG_FLAVOR=EPAS -> BDRPG_API_VERSION_NUM=202309131 -> BDR_PLUGIN_API_VERSION=7011 -> USE_ASSERT_CHECKING=false -> USE_VALGRIND=false -> EXT_ENABLE_DTRACE=false -> HAVE_LAG_CONTROL=true -> HAVE_ASSESS_UPDATE_RI_HOOK=false -> HAVE_BDRPG_PROBES=false -> HAVE_CAMO=true -> HAVE_DEADLOCK_DETECTOR_HOOK=true -> HAVE_HEAP_UPDATE_HOOK=true -> HAVE_LAG_TRACKER=true -> HAVE_LCR=true -> HAVE_LOG_TOAST_COLUMNS=false -> HAVE_MISC_HOOKS=true -> HAVE_MISSING_PARTITION_CONFLICT=true -> HAVE_MULTI_PITR=false -> HAVE_SELECTIVE_BASEBACKUP=false -> HAVE_SNAPSHOT_TIMESTAMP=false -> HAVE_STREAMING_XACTS=true -> HAVE_SYNC_COMMIT_HOOK=true -> HAVE_TWOPHASE_DATA_HOOKS=true -> HAVE_XLOG_FIND_NEXT_RECORD=true -> HAVE_DETACH_CONCURRENTLY=true -> HAVE_ANALYTICS=true -> ``` -diff -r 5/reference/pgd-settings.mdx 5.6/reference/pgd-settings.mdx -383c383 -< Sets the output format of [CRDT data types](../consistency/crdt). ---- -> Sets the output format of [CRDT data types](../conflict-management/crdt). -394c394 -< Sets the current (or default) [commit scope](../durability/commit-scopes) (default ---- -> Sets the current (or default) [commit scope](../commit-scopes/commit-scopes) (default -492c492 -< Tracks apply statistics for each subscription with [`bdr.stat_subscription`](/pgd/5/reference/catalogs-visible#bdrstat_subscription) (default is `on`). ---- -> Tracks apply statistics for each subscription with [`bdr.stat_subscription`](/pgd/latest/reference/catalogs-visible#bdrstat_subscription) (default is `on`). -496c496 -< Tracks apply statistics for each relation with [`bdr.stat_relation`](/pgd/5/reference/catalogs-visible#bdrstat_relation) (default is `off`). ---- -> Tracks apply statistics for each relation with [`bdr.stat_relation`](/pgd/latest/reference/catalogs-visible#bdrstat_relation) (default is `off`). -500c500 -< Tracks lock timing when tracking statistics for relations with [`bdr.stat_relation`](/pgd/5/reference/catalogs-visible#bdrstat_relation) (default is `off`). ---- -> Tracks lock timing when tracking statistics for relations with [`bdr.stat_relation`](/pgd/latest/reference/catalogs-visible#bdrstat_relation) (default is `off`). -506c506 -< Enables logical change record (LCR) sending on a single node with a [decoding worker](../node_management/decoding_worker/) (default is false). When set to true, a decoding worker process starts, and WAL senders send the LCRs it produces. If set back to false, any WAL senders using LCR are restarted and use the WAL directly. ---- -> Enables logical change record (LCR) sending on a single node with a [decoding worker](../decoding_worker/) (default is false). When set to true, a decoding worker process starts, and WAL senders send the LCRs it produces. If set back to false, any WAL senders using LCR are restarted and use the WAL directly. -528c528 -< Logical change record (LCR) file cleanup interval (default is 3 minutes). When the [decoding worker](../node_management/decoding_worker/) is enabled, the decoding worker stores LCR files as a buffer. These files are periodically cleaned, and this setting controls the interval between any two consecutive cleanups. Setting it to zero disables cleanup. ---- -> Logical change record (LCR) file cleanup interval (default is 3 minutes). When the [decoding worker](../decoding_worker/) is enabled, the decoding worker stores LCR files as a buffer. These files are periodically cleaned, and this setting controls the interval between any two consecutive cleanups. Setting it to zero disables cleanup. -583a584,589 -> ## Topology settings -> -> ### `bdr.force_full_mesh` -> -> Forces the full mesh topology (default is `on`). When set to `off`, PGD will attempt to use the optimized topology for subscriber-only groups. This setting is only effective when the requirements for the optimized topology are met. See [Optimizing subscriber-only groups](../nodes/subscriber_only/optimizing-so) for more information. -> -585c591 -< ---- -> -diff -r 5/reference/sequences.mdx 5.6/reference/sequences.mdx -28c28 -< bdr.alter_sequence_set_kind(seqoid regclass, seqkind text) ---- -> bdr.alter_sequence_set_kind(seqoid regclass, seqkind text, start bigint DEFAULT NULL) -36a37 -> - `start` — Allows specifying new starting point for galloc and local sequences. -diff -r 5/reference/streamtriggers/rowfunctions.mdx 5.6/reference/streamtriggers/rowfunctions.mdx -80c80 -< See [Conflict types](../../consistency/conflicts/02_types_of_conflict/) ---- -> See [Conflict types](../../conflict-management/conflicts/02_types_of_conflict/) -Only in 5.6/rel_notes: .DS_Store -diff -r 5/rel_notes/index.mdx 5.6/rel_notes/index.mdx -2,4c2,5 -< title: "EDB Postgres Distributed release notes" -< navTitle: "Release notes" -< description: "Release notes for EDB Postgres Distributed" ---- -> title: EDB Postgres Distributed 5.6+ release notes -> navTitle: Release notes -> description: Release notes for EDB Postgres Distributed 5.6 and later -> indexCards: none -6,14c7,8 -< - pgd_5.5.1_rel_notes -< - pgd_5.5.0_rel_notes -< - pgd_5.4.1_rel_notes -< - pgd_5.4.0_rel_notes -< - pgd_5.3.0_rel_notes -< - pgd_5.2.0_rel_notes -< - pgd_5.1.0_rel_notes -< - pgd_5.0.1_rel_notes -< - pgd_5.0.0_rel_notes ---- -> - pgd_5.6.1_rel_notes -> - pgd_5.6.0_rel_notes -17,21d10 -< The EDB Postgres Distributed documentation describes the latest version of EDB -< Postgres Distributed 5, including minor releases and patches. The release notes -< provide information on what was new in each release. For new functionality -< introduced in a minor or patch release, the content also indicates the release -< that introduced the feature. -23,33c12,18 -< | Release Date | EDB Postgres Distributed | BDR extension | PGD CLI | PGD Proxy | -< |--------------|------------------------------|---------------|---------|-----------| -< | 31 May 2024 | [5.5.1](pgd_5.5.1_rel_notes) | 5.5.1 | 5.5.0 | 5.5.0 | -< | 16 May 2024 | [5.5.0](pgd_5.5.0_rel_notes) | 5.5.0 | 5.5.0 | 5.5.0 | -< | 03 Apr 2024 | [5.4.1](pgd_5.4.1_rel_notes) | 5.4.1 | 5.4.0 | 5.4.0 | -< | 05 Mar 2024 | [5.4.0](pgd_5.4.0_rel_notes) | 5.4.0 | 5.4.0 | 5.4.0 | -< | 14 Nov 2023 | [5.3.0](pgd_5.3.0_rel_notes) | 5.3.0 | 5.3.0 | 5.3.0 | -< | 04 Aug 2023 | [5.2.0](pgd_5.2.0_rel_notes) | 5.2.0 | 5.2.0 | 5.2.0 | -< | 16 May 2023 | [5.1.0](pgd_5.1.0_rel_notes) | 5.1.0 | 5.1.0 | 5.1.0 | -< | 21 Mar 2023 | [5.0.1](pgd_5.0.1_rel_notes) | 5.0.0 | 5.0.1 | 5.0.1 | -< | 21 Feb 2023 | [5.0.0](pgd_5.0.0_rel_notes) | 5.0.0 | 5.0.0 | 5.0.0 | ---- -> The EDB Postgres Distributed documentation describes the latest version of EDB Postgres Distributed 5, including minor releases and patches. The release notes provide information on what was new in each release. For new functionality introduced in a minor or patch release, the content also indicates the release that introduced the feature. -> -> -> | Release Date | EDB Postgres Distributed | BDR extension | PGD CLI | PGD Proxy | -> |---|---|---|---|---| -> | 25 Nov 2024 | [5.6.1](./pgd_5.6.1_rel_notes) | 5.6.1 | 5.6.1 | 5.6.1 | -> | 15 Oct 2024 | [5.6.0](./pgd_5.6.0_rel_notes) | 5.6.0 | 5.6.0 | 5.6.0 | -Only in 5/rel_notes: pgd_5.0.0_rel_notes.mdx -Only in 5/rel_notes: pgd_5.0.1_rel_notes.mdx -Only in 5/rel_notes: pgd_5.1.0_rel_notes.mdx -Only in 5/rel_notes: pgd_5.2.0_rel_notes.mdx -Only in 5/rel_notes: pgd_5.3.0_rel_notes.mdx -Only in 5/rel_notes: pgd_5.4.0_rel_notes.mdx -Only in 5/rel_notes: pgd_5.4.1_rel_notes.mdx -Only in 5/rel_notes: pgd_5.5.0_rel_notes.mdx -Only in 5/rel_notes: pgd_5.5.1_rel_notes.mdx -Only in 5.6/rel_notes: pgd_5.6.0_rel_notes.mdx -Only in 5.6/rel_notes: pgd_5.6.1_rel_notes.mdx -Only in 5.6/rel_notes: src -diff -r 5/repsets.mdx 5.6/repsets.mdx -20c20 -< You can create replication sets using [`bdr.create_replication_set`](/pgd/5/reference/repsets-management#bdrcreate_replication_set), ---- -> You can create replication sets using [`bdr.create_replication_set`](/pgd/latest/reference/repsets-management#bdrcreate_replication_set), -36c36 -< administration function calls. Use [`bdr.replication_set_add_ddl_filter`](/pgd/5/reference/repsets-ddl-filtering#bdrreplication_set_add_ddl_filter) ---- -> administration function calls. Use [`bdr.replication_set_add_ddl_filter`](/pgd/latest/reference/repsets-ddl-filtering#bdrreplication_set_add_ddl_filter) -41c41 -< [`bdr.alter_node_replication_sets`](/pgd/5/reference/repsets-management#bdralter_node_replication_sets). ---- -> [`bdr.alter_node_replication_sets`](/pgd/latest/reference/repsets-management#bdralter_node_replication_sets). -149c149 -< You can control membership using [`bdr.replication_set_add_table`](/pgd/5/reference/repsets-membership#bdrreplication_set_add_table) and [`bdr.replication_set_remove_table`](/pgd/5/reference/repsets-membership#bdrreplication_set_remove_table). ---- -> You can control membership using [`bdr.replication_set_add_table`](/pgd/latest/reference/repsets-membership#bdrreplication_set_add_table) and [`bdr.replication_set_remove_table`](/pgd/latest/reference/repsets-membership#bdrreplication_set_remove_table). -248c248 -< You can use [`bdr.replication_set_add_ddl_filter`](/pgd/5/reference/repsets-ddl-filtering#bdrreplication_set_add_ddl_filter) and [`bdr.replication_set_remove_ddl_filter`](/pgd/5/reference/repsets-ddl-filtering#bdrreplication_set_remove_ddl_filter) to manipulate DDL filters. ---- -> You can use [`bdr.replication_set_add_ddl_filter`](/pgd/latest/reference/repsets-ddl-filtering#bdrreplication_set_add_ddl_filter) and [`bdr.replication_set_remove_ddl_filter`](/pgd/latest/reference/repsets-ddl-filtering#bdrreplication_set_remove_ddl_filter) to manipulate DDL filters. -diff -r 5/routing/administering.mdx 5.6/routing/administering.mdx -15c15,16 -< !!! Note ---- -> !!!Note -> -24c25 -< Use the [`bdr.routing_leadership_transfer()`](/pgd/5/reference/routing#bdrrouting_leadership_transfer) function. ---- -> Use the [`bdr.routing_leadership_transfer()`](/pgd/latest/reference/routing#bdrrouting_leadership_transfer) function. -30a32 -> -39c41 -< You can use the [`switchover`](/pgd/5/cli/command_ref/pgd_switchover/) command to perform a switchover operation. ---- -> You can use the [`switchover`](/pgd/latest/cli/command_ref/pgd_switchover/) command to perform a switchover operation. -diff -r 5/routing/configuration.mdx 5.6/routing/configuration.mdx -11c11 -< You can enable routing decisions by calling the [`bdr.alter_node_group_option()`](/pgd/5/reference/nodes-management-interfaces#bdralter_node_group_option) function. ---- -> You can enable routing decisions by calling the [`bdr.alter_node_group_option()`](/pgd/latest/reference/nodes-management-interfaces#bdralter_node_group_option) function. -30c30 -< Set per-node configuration of routing using [`bdr.alter_node_option()`](/pgd/5/reference/nodes-management-interfaces#bdralter_node_option). The ---- -> Set per-node configuration of routing using [`bdr.alter_node_option()`](/pgd/latest/reference/nodes-management-interfaces#bdralter_node_option). The -48c48 -< You can add a proxy configuration using [`bdr.create_proxy`](/pgd/5/reference/routing#bdrcreate_proxy). ---- -> You can add a proxy configuration using [`bdr.create_proxy`](/pgd/latest/reference/routing#bdrcreate_proxy). -59c59 -< You can configure options for each proxy using the [`bdr.alter_proxy_option()`](/pgd/5/reference/routing#bdralter_proxy_option) function. ---- -> You can configure options for each proxy using the [`bdr.alter_proxy_option()`](/pgd/latest/reference/routing#bdralter_proxy_option) function. -diff -r 5/routing/index.mdx 5.6/routing/index.mdx -18c18 -< * [PGD Proxy overview](/pgd/5/routing/proxy) provides an overview of the PGD Proxy, its processes, and how it interacts with the EDB Postgres Distributed cluster. ---- -> * [PGD Proxy overview](/pgd/latest/routing/proxy) provides an overview of the PGD Proxy, its processes, and how it interacts with the EDB Postgres Distributed cluster. -20c20 -< * [Installing the PGD Proxy service](/pgd/5/routing/installing_proxy) covers installation of the PGD Proxy service on a host. ---- -> * [Installing the PGD Proxy service](/pgd/latest/routing/installing_proxy) covers installation of the PGD Proxy service on a host. -22c22 -< * [Configuring PGD Proxy](/pgd/5/routing/configuration) details the three levels (group, node, and proxy) of configuration on a cluster that control how the PGD Proxy service behaves. ---- -> * [Configuring PGD Proxy](/pgd/latest/routing/configuration) details the three levels (group, node, and proxy) of configuration on a cluster that control how the PGD Proxy service behaves. -24c24 -< * [Administering PGD Proxy](/pgd/5/routing/administering) shows how to switch the write leader and manage the PGD Proxy. ---- -> * [Administering PGD Proxy](/pgd/latest/routing/administering) shows how to switch the write leader and manage the PGD Proxy. -26c26 -< * [Monitoring PGD Proxy](/pgd/5/routing/monitoring) looks at how to monitor PGD Proxy through the cluster and at a service level. ---- -> * [Monitoring PGD Proxy](/pgd/latest/routing/monitoring) looks at how to monitor PGD Proxy through the cluster and at a service level. -28c28 -< * [Read-only routing](/pgd/5/routing/readonly) explains how the read-only routing feature in PGD Proxy enables read scalability. ---- -> * [Read-only routing](/pgd/latest/routing/readonly) explains how the read-only routing feature in PGD Proxy enables read scalability. -30c30 -< * [Raft](/pgd/5/routing/raft) provides an overview of the Raft consensus mechanism used to coordinate PGD Proxy. ---- -> * [Raft](/pgd/latest/routing/raft) provides an overview of the Raft consensus mechanism used to coordinate PGD Proxy. -diff -r 5/routing/monitoring.mdx 5.6/routing/monitoring.mdx -12c12 -< The current configuration of every group is visible in the [`bdr.node_group_routing_config_summary`](/pgd/5/reference/catalogs-internal#bdrnode_group_routing_config_summary) view. ---- -> The current configuration of every group is visible in the [`bdr.node_group_routing_config_summary`](/pgd/latest/reference/catalogs-internal#bdrnode_group_routing_config_summary) view. -14c14 -< The [`bdr.node_routing_config_summary`](/pgd/5/reference/catalogs-internal#bdrnode_routing_config_summary) view shows current per-node routing configuration. ---- -> The [`bdr.node_routing_config_summary`](/pgd/latest/reference/catalogs-internal#bdrnode_routing_config_summary) view shows current per-node routing configuration. -16c16 -< [`bdr.proxy_config_summary`](/pgd/5/reference/catalogs-internal#bdrproxy_config_summary) shows per-proxy configuration. ---- -> [`bdr.proxy_config_summary`](/pgd/latest/reference/catalogs-internal#bdrproxy_config_summary) shows per-proxy configuration. -diff -r 5/routing/proxy.mdx 5.6/routing/proxy.mdx -72c72 -< The endpoints given in the config file are used only at startup. After that, actual endpoints are taken from the PGD catalog's `route_dsn` field in [`bdr.node_routing_config_summary`](/pgd/5/reference/catalogs-internal#bdrnode_routing_config_summary). ---- -> The endpoints given in the config file are used only at startup. After that, actual endpoints are taken from the PGD catalog's `route_dsn` field in [`bdr.node_routing_config_summary`](/pgd/latest/reference/catalogs-internal#bdrnode_routing_config_summary). -80c80 -< User-controlled, manual transfer of write leadership from the current write leader to a new target leader is called *switchover*. Switchover is triggered through the [PGD CLI switchover](../cli/command_ref/pgd_switchover) command. The command is submitted to PGD, which attempts to elect the given target node as the new write leader. Similar to failover, PGD Proxy closes existing client connections and redirects new client connections to the newly elected write leader. This is useful during server maintenance, for example, if the current write leader node needs to be stopped for maintenance like a server update or OS patch update. ---- -> User-controlled, manual transfer of write leadership from the current write leader to a new target leader is called *switchover*. Switchover is triggered through the [PGD CLI group set leader](../cli/command_ref/pgd_switchover) command. The command is submitted to PGD, which attempts to elect the given target node as the new write leader. Similar to failover, PGD Proxy closes existing client connections and redirects new client connections to the newly elected write leader. This is useful during server maintenance, for example, if the current write leader node needs to be stopped for maintenance like a server update or OS patch update. -diff -r 5/scaling.mdx 5.6/scaling.mdx -3c3 -< description: How to use autopartioning in PGD to split tables into several partitions. ---- -> description: How to use autopartitioning in PGD to split tables into several partitions. -22c22 -< PGD AutoPartition uses the [`bdr.autopartition()`](/pgd/5/reference/autopartition#bdrautopartition) ---- -> PGD AutoPartition uses the [`bdr.autopartition()`](/pgd/latest/reference/autopartition#bdrautopartition) -45c45 -< [`bdr.conflict_history`](/pgd/5/reference/catalogs-visible#bdrconflict_history) ---- -> [`bdr.conflict_history`](/pgd/latest/reference/catalogs-visible#bdrconflict_history) -148c148 -< [`bdr.drop_autopartition()`](/pgd/5/reference/autopartition#bdrdrop_autopartition) ---- -> [`bdr.drop_autopartition()`](/pgd/latest/reference/autopartition#bdrdrop_autopartition) -158c158 -< [`bdr.autopartition_wait_for_partitions()`](/pgd/5/reference/autopartition#bdrautopartition_wait_for_partitions) ---- -> [`bdr.autopartition_wait_for_partitions()`](/pgd/latest/reference/autopartition#bdrautopartition_wait_for_partitions) -167c167 -< [`bdr.autopartition_wait_for_partitions_on_all_nodes()`](/pgd/5/reference/autopartition#bdrautopartition_wait_for_partitions_on_all_nodes) ---- -> [`bdr.autopartition_wait_for_partitions_on_all_nodes()`](/pgd/latest/reference/autopartition#bdrautopartition_wait_for_partitions_on_all_nodes) -174c174 -< [`bdr.autopartition_find_partition()`](/pgd/5/reference/autopartition#bdrautopartition_find_partition) ---- -> [`bdr.autopartition_find_partition()`](/pgd/latest/reference/autopartition#bdrautopartition_find_partition) -182c182 -< [`bdr.autopartition_enable()`](/pgd/5/reference/autopartition#bdrautopartition_enable) ---- -> [`bdr.autopartition_enable()`](/pgd/latest/reference/autopartition#bdrautopartition_enable) -185c185 -< [`bdr.autopartition_disable()`](/pgd/5/reference/autopartition#bdrautopartition_disable) ---- -> [`bdr.autopartition_disable()`](/pgd/latest/reference/autopartition#bdrautopartition_disable) -186a187,204 -> -> ## Restrictions on EDB Postgres Advanced Server-native automatic partitioning -> -> EDB Postgres Advanced Server-native automatic partitioning is not supported in PGD. -> -> If the PGD extension is active on an EDB Postgres Advanced Server database, DDL commands to configure -> EDB Postgres Advanced Server automatic partitioning (`ALTER TABLE ... SET AUTOMATIC` and `ALTER TABLE ... SET INTERVAL`) -> are rejected. -> -> While it's possible to enable the PGD extension on an EDB Postgres Advanced Server database -> containing tables configured to use EDB Postgres Advanced Server-native automatic partitioning, it -> isn't possible to join more nodes using this node as a source node. -> -> You can disable EDB Postgres Advanced Server-native automatic partitioning with one of the following -> commands: -> -> - `ALTER TABLE ... SET MANUAL` (for list partitioned tables) -> - `ALTER TABLE ... SET INTERVAL ()` (for interval partitioned tables) -diff -r 5/security/pgd-predefined-roles.mdx 5.6/security/pgd-predefined-roles.mdx -18a19 -> -27,47c28,48 -< - [`bdr.autopartition_partitions`](/pgd/5/reference/catalogs-internal#bdrautopartition_partitions) -< - [`bdr.autopartition_rules`](/pgd/5/reference/catalogs-internal#bdrautopartition_rules) -< - [`bdr.ddl_epoch`](/pgd/5/reference/catalogs-internal#bdrddl_epoch) -< - [`bdr.ddl_replication`](/pgd/5/reference/pgd-settings#bdrddl_replication) -< - [`bdr.global_consensus_journal_details`](/pgd/5/reference/catalogs-visible#bdrglobal_consensus_journal_details) -< - [`bdr.global_lock`](/pgd/5/reference/catalogs-visible#bdrglobal_lock) -< - [`bdr.global_locks`](/pgd/5/reference/catalogs-visible#bdrglobal_locks) -< - [`bdr.group_camo_details`](/pgd/5/reference/catalogs-visible#bdrgroup_camo_details) -< - [`bdr.local_consensus_state`](/pgd/5/reference/catalogs-visible#bdrlocal_consensus_state) -< - [`bdr.local_node_summary`](/pgd/5/reference/catalogs-visible#bdrlocal_node_summary) -< - [`bdr.node`](/pgd/5/reference/catalogs-visible#bdrnode) -< - [`bdr.node_catchup_info`](/pgd/5/reference/catalogs-visible#bdrnode_catchup_info) -< - [`bdr.node_catchup_info_details`](/pgd/5/reference/catalogs-visible#bdrnode_catchup_info_details) -< - [`bdr.node_conflict_resolvers`](/pgd/5/reference/catalogs-visible#bdrnode_conflict_resolvers) -< - [`bdr.node_group`](/pgd/5/reference/catalogs-visible#bdrnode_group) -< - [`bdr.node_local_info`](/pgd/5/reference/catalogs-visible#bdrnode_local_info) -< - [`bdr.node_peer_progress`](/pgd/5/reference/catalogs-visible#bdrnode_peer_progress) -< - [`bdr.node_replication_rates`](/pgd/5/reference/catalogs-visible#bdrnode_replication_rates) -< - [`bdr.node_slots`](/pgd/5/reference/catalogs-visible#bdrnode_slots) -< - [`bdr.node_summary`](/pgd/5/reference/catalogs-visible#bdrnode_summary) -< - [`bdr.replication_sets`](/pgd/5/reference/catalogs-visible#bdrreplication_sets) ---- -> - [`bdr.autopartition_partitions`](/pgd/latest/reference/catalogs-internal#bdrautopartition_partitions) -> - [`bdr.autopartition_rules`](/pgd/latest/reference/catalogs-internal#bdrautopartition_rules) -> - [`bdr.ddl_epoch`](/pgd/latest/reference/catalogs-internal#bdrddl_epoch) -> - [`bdr.ddl_replication`](/pgd/latest/reference/pgd-settings#bdrddl_replication) -> - [`bdr.global_consensus_journal_details`](/pgd/latest/reference/catalogs-visible#bdrglobal_consensus_journal_details) -> - [`bdr.global_lock`](/pgd/latest/reference/catalogs-visible#bdrglobal_lock) -> - [`bdr.global_locks`](/pgd/latest/reference/catalogs-visible#bdrglobal_locks) -> - [`bdr.group_camo_details`](/pgd/latest/reference/catalogs-visible#bdrgroup_camo_details) -> - [`bdr.local_consensus_state`](/pgd/latest/reference/catalogs-visible#bdrlocal_consensus_state) -> - [`bdr.local_node_summary`](/pgd/latest/reference/catalogs-visible#bdrlocal_node_summary) -> - [`bdr.node`](/pgd/latest/reference/catalogs-visible#bdrnode) -> - [`bdr.node_catchup_info`](/pgd/latest/reference/catalogs-visible#bdrnode_catchup_info) -> - [`bdr.node_catchup_info_details`](/pgd/latest/reference/catalogs-visible#bdrnode_catchup_info_details) -> - [`bdr.node_conflict_resolvers`](/pgd/latest/reference/catalogs-visible#bdrnode_conflict_resolvers) -> - [`bdr.node_group`](/pgd/latest/reference/catalogs-visible#bdrnode_group) -> - [`bdr.node_local_info`](/pgd/latest/reference/catalogs-visible#bdrnode_local_info) -> - [`bdr.node_peer_progress`](/pgd/latest/reference/catalogs-visible#bdrnode_peer_progress) -> - [`bdr.node_replication_rates`](/pgd/latest/reference/catalogs-visible#bdrnode_replication_rates) -> - [`bdr.node_slots`](/pgd/latest/reference/catalogs-visible#bdrnode_slots) -> - [`bdr.node_summary`](/pgd/latest/reference/catalogs-visible#bdrnode_summary) -> - [`bdr.replication_sets`](/pgd/latest/reference/catalogs-visible#bdrreplication_sets) -49,61c50,62 -< - [`bdr.sequences`](/pgd/5/reference/catalogs-visible#bdrsequences) -< - [`bdr.stat_activity`](/pgd/5/reference/catalogs-visible#bdrstat_activity) -< - [`bdr.stat_relation`](/pgd/5/reference/catalogs-visible#bdrstat_relation) -< - [`bdr.stat_subscription`](/pgd/5/reference/catalogs-visible#bdrstat_subscription) _deprecated_ -< - [`bdr.state_journal_details`](/pgd/5/reference/catalogs-visible#) -< - [`bdr.subscription`](/pgd/5/reference/catalogs-visible#bdrsubscription) -< - [`bdr.subscription_summary`](/pgd/5/reference/catalogs-visible#bdrsubscription_summary) -< - [`bdr.tables`](/pgd/5/reference/catalogs-visible#bdrtables) -< - [`bdr.taskmgr_local_work_queue`](/pgd/5/reference/catalogs-visible#bdrtaskmgr_local_work_queue) -< - [`bdr.taskmgr_work_queue`](/pgd/5/reference/catalogs-visible#bdrtaskmgr_work_queue) -< - [`bdr.worker_errors`](/pgd/5/reference/catalogs-visible#) _deprecated_ -< - [`bdr.workers`](/pgd/5/reference/catalogs-visible#bdrworkers) -< - [`bdr.writers`](/pgd/5/reference/catalogs-visible#bdrwriters) ---- -> - [`bdr.sequences`](/pgd/latest/reference/catalogs-visible#bdrsequences) -> - [`bdr.stat_activity`](/pgd/latest/reference/catalogs-visible#bdrstat_activity) -> - [`bdr.stat_relation`](/pgd/latest/reference/catalogs-visible#bdrstat_relation) -> - [`bdr.stat_subscription`](/pgd/latest/reference/catalogs-visible#bdrstat_subscription) _deprecated_ -> - [`bdr.state_journal_details`](/pgd/latest/reference/catalogs-visible#) -> - [`bdr.subscription`](/pgd/latest/reference/catalogs-visible#bdrsubscription) -> - [`bdr.subscription_summary`](/pgd/latest/reference/catalogs-visible#bdrsubscription_summary) -> - [`bdr.tables`](/pgd/latest/reference/catalogs-visible#bdrtables) -> - [`bdr.taskmgr_local_work_queue`](/pgd/latest/reference/catalogs-visible#bdrtaskmgr_local_work_queue) -> - [`bdr.taskmgr_work_queue`](/pgd/latest/reference/catalogs-visible#bdrtaskmgr_work_queue) -> - [`bdr.worker_errors`](/pgd/latest/reference/catalogs-visible#) _deprecated_ -> - [`bdr.workers`](/pgd/latest/reference/catalogs-visible#bdrworkers) -> - [`bdr.writers`](/pgd/latest/reference/catalogs-visible#bdrwriters) -67,77c68,78 -< - [`bdr.bdr_version`](/pgd/5/reference/functions#bdrbdr_version) -< - [`bdr.bdr_version_num`](/pgd/5/reference/functions#bdrbdr_version_num) -< - [`bdr.decode_message_payload`](/pgd/5/reference/functions-internal#bdrdecode_message_payload) -< - [`bdr.get_consensus_status`](/pgd/5/reference/functions#bdrget_consensus_status) -< - [`bdr.get_decoding_worker_stat`](/pgd/5/reference/functions#bdrget_decoding_worker_stat) -< - [`bdr.get_global_locks`](/pgd/5/reference/functions-internal#bdrget_global_locks) -< - [`bdr.get_min_required_replication_slots`](/pgd/5/reference/functions-internal#bdrget_min_required_replication_slots) -< - [`bdr.get_min_required_worker_processes`](/pgd/5/reference/functions-internal#bdrget_min_required_worker_processes) -< - [`bdr.get_raft_status`](/pgd/5/reference/functions#bdrget_raft_status) -< - [`bdr.get_relation_stats`](/pgd/5/reference/functions#bdrget_relation_stats) -< - [`bdr.get_slot_flush_timestamp`](/pgd/5/reference/functions-internal#bdrget_slot_flush_timestamp) ---- -> - [`bdr.bdr_version`](/pgd/latest/reference/functions#bdrbdr_version) -> - [`bdr.bdr_version_num`](/pgd/latest/reference/functions#bdrbdr_version_num) -> - [`bdr.decode_message_payload`](/pgd/latest/reference/functions-internal#bdrdecode_message_payload) -> - [`bdr.get_consensus_status`](/pgd/latest/reference/functions#bdrget_consensus_status) -> - [`bdr.get_decoding_worker_stat`](/pgd/latest/reference/functions#bdrget_decoding_worker_stat) -> - [`bdr.get_global_locks`](/pgd/latest/reference/functions-internal#bdrget_global_locks) -> - [`bdr.get_min_required_replication_slots`](/pgd/latest/reference/functions-internal#bdrget_min_required_replication_slots) -> - [`bdr.get_min_required_worker_processes`](/pgd/latest/reference/functions-internal#bdrget_min_required_worker_processes) -> - [`bdr.get_raft_status`](/pgd/latest/reference/functions#bdrget_raft_status) -> - [`bdr.get_relation_stats`](/pgd/latest/reference/functions#bdrget_relation_stats) -> - [`bdr.get_slot_flush_timestamp`](/pgd/latest/reference/functions-internal#bdrget_slot_flush_timestamp) -79,91c80,92 -< - [`bdr.get_subscription_stats`](/pgd/5/reference/functions#bdrget_subscription_stats) -< - [`bdr.lag_control`](/pgd/5/reference/functions#bdrlag_control) -< - [`bdr.lag_history`](/pgd/5/reference/functions-internal#bdrlag_history) -< - [`bdr.node_catchup_state_name`](/pgd/5/reference/functions-internal#bdrnode_catchup_state_name) -< - [`bdr.node_kind_name`](/pgd/5/reference/functions-internal#bdrnode_kind_name) -< - [`bdr.peer_state_name`](/pgd/5/reference/functions-internal#bdrpeer_state_name) -< - [`bdr.pglogical_proto_version_ranges`](/pgd/5/reference/functions-internal#bdrpglogical_proto_version_ranges) -< - [`bdr.show_subscription_status`](/pgd/5/reference/functions-internal#bdrshow_subscription_status) -< - [`bdr.show_workers`](/pgd/5/reference/functions-internal#bdrshow_workers) -< - [`bdr.show_writers`](/pgd/5/reference/functions-internal#bdrshow_writers) -< - [`bdr.stat_get_activity`](/pgd/5/reference/functions-internal#bdrstat_get_activity) -< - [`bdr.wal_sender_stats`](/pgd/5/reference/functions#bdrwal_sender_stats) -< - [`bdr.worker_role_id_name`](/pgd/5/reference/functions-internal#bdrworker_role_id_name) ---- -> - [`bdr.get_subscription_stats`](/pgd/latest/reference/functions#bdrget_subscription_stats) -> - [`bdr.lag_control`](/pgd/latest/reference/functions#bdrlag_control) -> - [`bdr.lag_history`](/pgd/latest/reference/functions-internal#bdrlag_history) -> - [`bdr.node_catchup_state_name`](/pgd/latest/reference/functions-internal#bdrnode_catchup_state_name) -> - [`bdr.node_kind_name`](/pgd/latest/reference/functions-internal#bdrnode_kind_name) -> - [`bdr.peer_state_name`](/pgd/latest/reference/functions-internal#bdrpeer_state_name) -> - [`bdr.pglogical_proto_version_ranges`](/pgd/latest/reference/functions-internal#bdrpglogical_proto_version_ranges) -> - [`bdr.show_subscription_status`](/pgd/latest/reference/functions-internal#bdrshow_subscription_status) -> - [`bdr.show_workers`](/pgd/latest/reference/functions-internal#bdrshow_workers) -> - [`bdr.show_writers`](/pgd/latest/reference/functions-internal#bdrshow_writers) -> - [`bdr.stat_get_activity`](/pgd/latest/reference/functions-internal#bdrstat_get_activity) -> - [`bdr.wal_sender_stats`](/pgd/latest/reference/functions#bdrwal_sender_stats) -> - [`bdr.worker_role_id_name`](/pgd/latest/reference/functions-internal#bdrworker_role_id_name) -103,106c104,107 -< - [`bdr.group_raft_details`](/pgd/5/reference/catalogs-visible#bdrgroup_raft_details) -< - [`bdr.group_replslots_details`](/pgd/5/reference/catalogs-visible#bdrgroup_replslots_details) -< - [`bdr.group_subscription_summary`](/pgd/5/reference/catalogs-visible#bdrgroup_subscription_summary) -< - [`bdr.group_versions_details`](/pgd/5/reference/catalogs-visible#bdrgroup_versions_details) ---- -> - [`bdr.group_raft_details`](/pgd/latest/reference/catalogs-visible#bdrgroup_raft_details) -> - [`bdr.group_replslots_details`](/pgd/latest/reference/catalogs-visible#bdrgroup_replslots_details) -> - [`bdr.group_subscription_summary`](/pgd/latest/reference/catalogs-visible#bdrgroup_subscription_summary) -> - [`bdr.group_versions_details`](/pgd/latest/reference/catalogs-visible#bdrgroup_versions_details) -111,120c112,121 -< - [`bdr.get_raft_instance_by_nodegroup`](/pgd/5/reference/functions-internal#bdrget_raft_instance_by_nodegroup) -< - [`bdr.monitor_camo_on_all_nodes`](/pgd/5/reference/functions-internal#bdrmonitor_camo_on_all_nodes) -< - [`bdr.monitor_group_raft`](/pgd/5/reference/functions#bdrmonitor_group_raft) -< - [`bdr.monitor_group_versions`](/pgd/5/reference/functions#bdrmonitor_group_versions) -< - [`bdr.monitor_local_replslots`](/pgd/5/reference/functions#bdrmonitor_local_replslots) -< - [`bdr.monitor_raft_details_on_all_nodes`](/pgd/5/reference/functions-internal#bdrmonitor_raft_details_on_all_nodes) -< - [`bdr.monitor_replslots_details_on_all_nodes`](/pgd/5/reference/functions-internal#bdrmonitor_replslots_details_on_all_nodes) -< - [`bdr.monitor_subscription_details_on_all_nodes`](/pgd/5/reference/functions-internal#bdrmonitor_subscription_details_on_all_nodes) -< - [`bdr.monitor_version_details_on_all_nodes`](/pgd/5/reference/functions-internal#bdrmonitor_version_details_on_all_nodes) -< - [`bdr.node_group_member_info`](/pgd/5/reference/functions-internal#bdrnode_group_member_info) ---- -> - [`bdr.get_raft_instance_by_nodegroup`](/pgd/latest/reference/functions-internal#bdrget_raft_instance_by_nodegroup) -> - [`bdr.monitor_camo_on_all_nodes`](/pgd/latest/reference/functions-internal#bdrmonitor_camo_on_all_nodes) -> - [`bdr.monitor_group_raft`](/pgd/latest/reference/functions#bdrmonitor_group_raft) -> - [`bdr.monitor_group_versions`](/pgd/latest/reference/functions#bdrmonitor_group_versions) -> - [`bdr.monitor_local_replslots`](/pgd/latest/reference/functions#bdrmonitor_local_replslots) -> - [`bdr.monitor_raft_details_on_all_nodes`](/pgd/latest/reference/functions-internal#bdrmonitor_raft_details_on_all_nodes) -> - [`bdr.monitor_replslots_details_on_all_nodes`](/pgd/latest/reference/functions-internal#bdrmonitor_replslots_details_on_all_nodes) -> - [`bdr.monitor_subscription_details_on_all_nodes`](/pgd/latest/reference/functions-internal#bdrmonitor_subscription_details_on_all_nodes) -> - [`bdr.monitor_version_details_on_all_nodes`](/pgd/latest/reference/functions-internal#bdrmonitor_version_details_on_all_nodes) -> - [`bdr.node_group_member_info`](/pgd/latest/reference/functions-internal#bdrnode_group_member_info) -132,140c133,141 -< - [`bdr.alter_sequence_set_kind`](/pgd/5/reference/sequences#bdralter_sequence_set_kind) -< - [`bdr.create_conflict_trigger`](/pgd/5/reference/streamtriggers/interfaces#bdrcreate_conflict_trigger) -< - [`bdr.create_transform_trigger`](/pgd/5/reference/streamtriggers/interfaces#bdrcreate_transform_trigger) -< - [`bdr.drop_trigger`](/pgd/5/reference/streamtriggers/interfaces#bdrdrop_trigger) -< - [`bdr.get_configured_camo_partner`](/pgd/5/reference/functions#bdrget_configured_camo_partner) -< - [`bdr.global_lock_table`](/pgd/5/reference/functions#bdrglobal_lock_table) -< - [`bdr.is_camo_partner_connected`](/pgd/5/reference/functions#bdris_camo_partner_connected) -< - [`bdr.is_camo_partner_ready`](/pgd/5/reference/functions#bdris_camo_partner_ready) -< - [`bdr.logical_transaction_status`](/pgd/5/reference/functions#bdrlogical_transaction_status) ---- -> - [`bdr.alter_sequence_set_kind`](/pgd/latest/reference/sequences#bdralter_sequence_set_kind) -> - [`bdr.create_conflict_trigger`](/pgd/latest/reference/streamtriggers/interfaces#bdrcreate_conflict_trigger) -> - [`bdr.create_transform_trigger`](/pgd/latest/reference/streamtriggers/interfaces#bdrcreate_transform_trigger) -> - [`bdr.drop_trigger`](/pgd/latest/reference/streamtriggers/interfaces#bdrdrop_trigger) -> - [`bdr.get_configured_camo_partner`](/pgd/latest/reference/functions#bdrget_configured_camo_partner) -> - [`bdr.global_lock_table`](/pgd/latest/reference/functions#bdrglobal_lock_table) -> - [`bdr.is_camo_partner_connected`](/pgd/latest/reference/functions#bdris_camo_partner_connected) -> - [`bdr.is_camo_partner_ready`](/pgd/latest/reference/functions#bdris_camo_partner_ready) -> - [`bdr.logical_transaction_status`](/pgd/latest/reference/functions#bdrlogical_transaction_status) -142,152c143,154 -< - [`bdr.seq_nextval`](/pgd/5/reference/functions-internal#bdrseq_nextval) -< - [`bdr.seq_currval`](/pgd/5/reference/functions-internal#bdrseq_currval) -< - [`bdr.seq_lastval`](/pgd/5/reference/functions-internal#bdrseq_lastval) -< - [`bdr.trigger_get_committs`](/pgd/5/reference/streamtriggers/rowfunctions#bdrtrigger_get_committs) -< - [`bdr.trigger_get_conflict_type`](/pgd/5/reference/streamtriggers/rowfunctions#bdrtrigger_get_conflict_type) -< - [`bdr.trigger_get_origin_node_id`](/pgd/5/reference/streamtriggers/rowfunctions#bdrtrigger_get_origin_node_id) -< - [`bdr.trigger_get_row`](/pgd/5/reference/streamtriggers/rowfunctions#bdrtrigger_get_row) -< - [`bdr.trigger_get_type`](/pgd/5/reference/streamtriggers/rowfunctions#bdrtrigger_get_type) -< - [`bdr.trigger_get_xid`](/pgd/5/reference/streamtriggers/rowfunctions#bdrtrigger_get_xid) -< - [`bdr.wait_for_camo_partner_queue`](/pgd/5/reference/functions#bdrwait_for_camo_partner_queue) -< - [`bdr.wait_slot_confirm_lsn`](/pgd/5/reference/functions#bdrwait_slot_confirm_lsn) ---- -> - [`bdr.seq_nextval`](/pgd/latest/reference/functions-internal#bdrseq_nextval) -> - [`bdr.seq_currval`](/pgd/latest/reference/functions-internal#bdrseq_currval) -> - [`bdr.seq_lastval`](/pgd/latest/reference/functions-internal#bdrseq_lastval) -> - [`bdr.trigger_get_committs`](/pgd/latest/reference/streamtriggers/rowfunctions#bdrtrigger_get_committs) -> - [`bdr.trigger_get_conflict_type`](/pgd/latest/reference/streamtriggers/rowfunctions#bdrtrigger_get_conflict_type) -> - [`bdr.trigger_get_origin_node_id`](/pgd/latest/reference/streamtriggers/rowfunctions#bdrtrigger_get_origin_node_id) -> - [`bdr.trigger_get_row`](/pgd/latest/reference/streamtriggers/rowfunctions#bdrtrigger_get_row) -> - [`bdr.trigger_get_type`](/pgd/latest/reference/streamtriggers/rowfunctions#bdrtrigger_get_type) -> - [`bdr.trigger_get_xid`](/pgd/latest/reference/streamtriggers/rowfunctions#bdrtrigger_get_xid) -> - [`bdr.wait_for_camo_partner_queue`](/pgd/latest/reference/functions#bdrwait_for_camo_partner_queue) -> - [`bdr.wait_slot_confirm_lsn`](/pgd/latest/reference/functions#bdrwait_slot_confirm_lsn) -> - [`bdr.wait_node_confirm_lsn`](/pgd/latest/reference/functions#bdrwait_node_confirm_lsn) -162c164 -< [`bdr.conflict_history`](/pgd/5/reference/catalogs-visible#bdrconflict_history) ---- -> [`bdr.conflict_history`](/pgd/latest/reference/catalogs-visible#bdrconflict_history) -171c173 -< An explicit policy is set on [`bdr.conflict_history`](/pgd/5/reference/catalogs-visible#bdrconflict_history) that allows this role to read the `bdr.conflict_history` table. ---- -> An explicit policy is set on [`bdr.conflict_history`](/pgd/latest/reference/catalogs-visible#bdrconflict_history) that allows this role to read the `bdr.conflict_history` table. -diff -r 5/security/role-management.mdx 5.6/security/role-management.mdx -15c15 -< You can disable this automatic replication behavior by turning off the [`bdr.role_replication`](https://www.enterprisedb.com/docs/pgd/5/reference/pgd-settings/#bdrrole_replication) setting, but we don't recommend that. ---- -> You can disable this automatic replication behavior by turning off the [`bdr.role_replication`](https://www.enterprisedb.com/docs/pgd/latest/reference/pgd-settings/#bdrrole_replication) setting, but we don't recommend that. -20c20 -< New PGD nodes that are added using [`bdr_init_physical`](https://www.enterprisedb.com/docs/pgd/5/reference/nodes/#bdr_init_physical) will automatically replicate the roles from other nodes of the PGD cluster. ---- -> New PGD nodes that are added using [`bdr_init_physical`](https://www.enterprisedb.com/docs/pgd/latest/reference/nodes/#bdr_init_physical) will automatically replicate the roles from other nodes of the PGD cluster. -40c40 -< argument of [`bdr.create_node`](/pgd/5/reference/nodes-management-interfaces#bdrcreate_node) and the `join_target_dsn` of [`bdr.join_node_group`](/pgd/5/reference/nodes-management-interfaces#bdrjoin_node_group) ---- -> argument of [`bdr.create_node`](/pgd/latest/reference/nodes-management-interfaces#bdrcreate_node) and the `join_target_dsn` of [`bdr.join_node_group`](/pgd/latest/reference/nodes-management-interfaces#bdrjoin_node_group) -diff -r 5/security/roles.mdx 5.6/security/roles.mdx -15c15 -< | [**bdr_read_all_conflicts**](pgd-predefined-roles/#bdr_read_all_conflicts) | Can view all conflicts in [`bdr.conflict_history`](/pgd/5/reference/catalogs-visible#bdrconflict_history). | ---- -> | [**bdr_read_all_conflicts**](pgd-predefined-roles/#bdr_read_all_conflicts) | Can view all conflicts in [`bdr.conflict_history`](/pgd/latest/reference/catalogs-visible#bdrconflict_history). | -28c28 -< [Logging conflicts to a table](/pgd/5/reference/conflict_functions#logging-conflicts-to-a-table). ---- -> [Logging conflicts to a table](/pgd/latest/reference/conflict_functions#logging-conflicts-to-a-table). -30c30 -< You can monitor conflicts using the [`bdr.conflict_history_summary`](/pgd/5/reference/catalogs-visible#bdrconflict_history_summary) view. ---- -> You can monitor conflicts using the [`bdr.conflict_history_summary`](/pgd/latest/reference/catalogs-visible#bdrconflict_history_summary) view. -diff -r 5/sequences.mdx 5.6/sequences.mdx -69c69 -< PGD also provides the configuration variable [`bdr.default_sequence_kind`](/pgd/5/reference/pgd-settings/#bdrdefault_sequence_kind). This variable ---- -> PGD also provides the configuration variable [`bdr.default_sequence_kind`](/pgd/latest/reference/pgd-settings/#bdrdefault_sequence_kind). This variable -87c87 -< The [`bdr.sequences`](/pgd/5/reference/catalogs-visible/#bdrsequences) view shows information about individual sequence kinds. ---- -> The [`bdr.sequences`](/pgd/latest/reference/catalogs-visible/#bdrsequences) view shows information about individual sequence kinds. -223c223 -< special PGD catalog [bdr.sequence_alloc](/pgd/5/reference/catalogs-visible/#bdrsequence_alloc). This ---- -> special PGD catalog [bdr.sequence_alloc](/pgd/latest/reference/catalogs-visible/#bdrsequence_alloc). This -diff -r 5/terminology.mdx 5.6/terminology.mdx -30c30 -< Generically, a cluster is a group of multiple redundant systems arranged to appear to end users as one system. See also [PGD cluster](#pgd-cluster) and [Postgres cluster](#postgres-cluster). ---- -> Generically, a cluster is a group of multiple systems arranged to appear to end users as one system. See also [PGD cluster](#pgd-cluster) and [Postgres cluster](#postgres-cluster). -diff -r 5/testingandtuning.mdx 5.6/testingandtuning.mdx -36c36 -< [pgd_bench](/pgd/5/reference/testingandtuning#pgd_bench) is a regular command-line utility that's added to the PostgreSQL bin ---- -> [pgd_bench](/pgd/latest/reference/testingandtuning#pgd_bench) is a regular command-line utility that's added to the PostgreSQL bin -diff -r 5/transaction-streaming.mdx 5.6/transaction-streaming.mdx -59,60c59,60 -< - At node level, using the GUC [`bdr.default_streaming_mode`](/pgd/5/reference/pgd-settings/#transaction-streaming) -< - At group level, using the function [`bdr.alter_node_group_config()`](/pgd/5/reference/nodes-management-interfaces/#bdralter_node_group_config) ---- -> - At node level, using the GUC [`bdr.default_streaming_mode`](/pgd/latest/reference/pgd-settings/#transaction-streaming) -> - At group level, using the function [`bdr.alter_node_group_option()`](/pgd/latest/reference/nodes-management-interfaces/#bdralter_node_group_option) -82c82 -< ### Group configuration using bdr.alter_node_group_config() ---- -> ### Group configuration using bdr.alter_node_group_option() -84c84 -< You can use the parameter `streaming_mode` in the function [`bdr.alter_node_group_config()`](/pgd/5/reference/nodes-management-interfaces/#bdralter_node_group_config) ---- -> You can use the parameter `streaming_mode` in the function [`bdr.alter_node_group_option()`](/pgd/latest/reference/nodes-management-interfaces/#bdralter_node_group_option) -98c98 -< from the view [`bdr.node_group`](/pgd/5/reference/catalogs-visible/#bdrnode_group). The value returned is ---- -> from the view [`bdr.node_group`](/pgd/latest/reference/catalogs-visible/#bdrnode_group). The value returned is -154c154 -< You can monitor the use of transaction streaming using the [`bdr.stat_subscription`](/pgd/5/reference/catalogs-visible/#bdrstat_subscription) ---- -> You can monitor the use of transaction streaming using the [`bdr.stat_subscription`](/pgd/latest/reference/catalogs-visible/#bdrstat_subscription) -diff -r 5/twophase.mdx 5.6/twophase.mdx -11c11 -< Two-phase commit isn't available with Group Commit or CAMO. See [Durability limitations](durability/limitations). ---- -> Two-phase commit isn't available with Group Commit or CAMO. See [Commit scope limitations](commit-scopes/limitations). -diff -r 5/upgrades/compatibility.mdx 5.6/upgrades/compatibility.mdx -19c19 -< CAMO configuration is now done through [commit scopes](../durability/commit-scopes). The ---- -> CAMO configuration is now done through [commit scopes](../commit-scopes/commit-scopes). The -29c29 -< behavior, use [Group Commit](../durability/group-commit). ---- -> behavior, use [Group Commit](../commit-scopes/group-commit). -32c32 -< SELECT bdr.add_commit_scope( ---- -> SELECT bdr.create_commit_scope( -46c46 -< [commit scopes](../durability/commit-scopes) for more flexible durability configuration. ---- -> [commit scopes](../commit-scopes/commit-scopes) for more flexible durability configuration. -69c69 -< [Catalogs](/pgd/5/reference/catalogs-visible/). These ---- -> [Catalogs](/pgd/latest/reference/catalogs-visible/). These -diff -r 5/upgrades/manual_overview.mdx 5.6/upgrades/manual_overview.mdx -41c41 -< nodeB and nodeC have 5.5.1. In this state, the replication and group ---- -> nodeB and nodeC have 5.6.1. In this state, the replication and group -49c49 -< [logical change records (LCRs)](../node_management/decoding_worker/#enabling) with a ---- -> [logical change records (LCRs)](../decoding_worker/#enabling) with a -116c116 -< using the new [commit scope](../durability/commit-scopes)-based settings. ---- -> using the new [commit scope](../commit-scopes/commit-scopes)-based settings. -131c131 -< 1. Create a [commit scope](../durability/commit-scopes) for this node ---- -> 1. Create a [commit scope](../commit-scopes/commit-scopes) for this node -diff -r 5/upgrades/upgrade_paths.mdx 5.6/upgrades/upgrade_paths.mdx -6a7,9 -> EDB Postgres Distributed uses [semantic versioning](https://semver.org/). -> All changes within the same major version are backward compatible, lowering the risk when upgrading and allowing you to choose any later minor or patch release as the upgrade target. -> -diff -r 5/upgrades/upgrading_major_rolling.mdx 5.6/upgrades/upgrading_major_rolling.mdx -6,7c6,7 -< - /pgd/5/install-admin/admin-tpa/upgrading_major_rolling/ #generated for pgd deploy-config-planning reorg -< - /pgd/5/admin-tpa/upgrading_major_rolling/ #generated for pgd deploy-config-planning reorg ---- -> - /pgd/latest/install-admin/admin-tpa/upgrading_major_rolling/ #generated for pgd deploy-config-planning reorg -> - /pgd/latest/admin-tpa/upgrading_major_rolling/ #generated for pgd deploy-config-planning reorg -172c172 -< This worked example starts with a TPA-managed PGD cluster deployed using the [AWS quick start](/pgd/5/quickstart/quick_start_aws/). The cluster has three nodes: kaboom, kaolin, and kaftan, all running Postgres 15. ---- -> This worked example starts with a TPA-managed PGD cluster deployed using the [AWS quick start](/pgd/latest/quickstart/quick_start_aws/). The cluster has three nodes: kaboom, kaolin, and kaftan, all running Postgres 15.