diff --git a/doc/sphinx/Clusters_from_Scratch/active-active.rst b/doc/sphinx/Clusters_from_Scratch/active-active.rst
index d12dfa47569..5ebc214095f 100644
--- a/doc/sphinx/Clusters_from_Scratch/active-active.rst
+++ b/doc/sphinx/Clusters_from_Scratch/active-active.rst
@@ -4,6 +4,13 @@
Convert Storage to Active/Active
--------------------------------
+.. NOTE::
+
+ GFS2 is not available in a package repo for |CFS_DISTRO| |CFS_DISTRO_VER|.
+ It can still be built from source, but doing so is scope of this document.
+ The following instructions are still useful for older distributions or
+ for installation from source. They have been updated where possible.
+
The primary requirement for an active/active cluster is that the data
required for your services is available, simultaneously, on both
machines. Pacemaker makes no requirement on how this is achieved; you
@@ -68,38 +75,80 @@ Activate our new configuration, and see how the cluster responds:
* Started: [ pcmk-1 pcmk-2 ]
[root@pcmk-1 ~]# pcs resource config
Resource: ClusterIP (class=ocf provider=heartbeat type=IPaddr2)
- Attributes: cidr_netmask=24 ip=192.168.122.120
- Operations: monitor interval=30s (ClusterIP-monitor-interval-30s)
- start interval=0s timeout=20s (ClusterIP-start-interval-0s)
- stop interval=0s timeout=20s (ClusterIP-stop-interval-0s)
+ Attributes: ClusterIP-instance_attributes
+ cidr_netmask=24
+ ip=192.168.122.120
+ nic=enp1s0
+ Operations:
+ monitor: ClusterIP-monitor-interval-30s
+ interval=30s
+ start: ClusterIP-start-interval-0s
+ interval=0s timeout=20s
+ stop: ClusterIP-stop-interval-0s
+ interval=0s timeout=20s
Resource: WebSite (class=ocf provider=heartbeat type=apache)
- Attributes: configfile=/etc/httpd/conf/httpd.conf statusurl=http://localhost/server-status
- Operations: monitor interval=1min (WebSite-monitor-interval-1min)
- start interval=0s timeout=40s (WebSite-start-interval-0s)
- stop interval=0s timeout=60s (WebSite-stop-interval-0s)
+ Attributes: WebSite-instance_attributes
+ configfile=/etc/httpd/conf/httpd.conf
+ statusurl=http://localhost/server-status
+ Operations:
+ monitor: WebSite-monitor-interval-1min
+ interval=1min
+ start: WebSite-start-interval-0s
+ interval=0s timeout=40s
+ stop: WebSite-stop-interval-0s
+ interval=0s timeout=60s
+ Resource: WebFS (class=ocf provider=heartbeat type=Filesystem)
+ Attributes: WebFS-instance_attributes
+ device=/dev/drbd1
+ directory=/var/www/html
+ fstype=xfs
+ Operations:
+ monitor: WebFS-monitor-interval-20s
+ interval=20s timeout=40s
+ start: WebFS-start-interval-0s
+ interval=0s timeout=60s
+ stop: WebFS-stop-interval-0s
+ interval=0s timeout=60s
Clone: WebData-clone
- Meta Attrs: clone-max=2 clone-node-max=1 notify=true promotable=true promoted-max=1 promoted-node-max=1
+ Meta Attributes: WebData-clone-meta_attributes
+ clone-max=2
+ clone-node-max=1
+ notify=true
+ promotable=true
+ promoted-max=1
+ promoted-node-max=1
Resource: WebData (class=ocf provider=linbit type=drbd)
- Attributes: drbd_resource=wwwdata
- Operations: demote interval=0s timeout=90 (WebData-demote-interval-0s)
- monitor interval=29s role=Promoted (WebData-monitor-interval-29s)
- monitor interval=31s role=Unpromoted (WebData-monitor-interval-31s)
- notify interval=0s timeout=90 (WebData-notify-interval-0s)
- promote interval=0s timeout=90 (WebData-promote-interval-0s)
- reload interval=0s timeout=30 (WebData-reload-interval-0s)
- start interval=0s timeout=240 (WebData-start-interval-0s)
- stop interval=0s timeout=100 (WebData-stop-interval-0s)
- Resource: WebFS (class=ocf provider=heartbeat type=Filesystem)
- Attributes: device=/dev/drbd1 directory=/var/www/html fstype=xfs
- Operations: monitor interval=20s timeout=40s (WebFS-monitor-interval-20s)
- start interval=0s timeout=60s (WebFS-start-interval-0s)
- stop interval=0s timeout=60s (WebFS-stop-interval-0s)
+ Attributes: WebData-instance_attributes
+ drbd_resource=wwwdata
+ Operations:
+ demote: WebData-demote-interval-0s
+ interval=0s timeout=90
+ monitor: WebData-monitor-interval-29s
+ interval=29s role=Promoted
+ monitor: WebData-monitor-interval-31s
+ interval=31s role=Unpromoted
+ notify: WebData-notify-interval-0s
+ interval=0s timeout=90
+ promote: WebData-promote-interval-0s
+ interval=0s timeout=90
+ reload: WebData-reload-interval-0s
+ interval=0s timeout=30
+ start: WebData-start-interval-0s
+ interval=0s timeout=240
+ stop: WebData-stop-interval-0s
+ interval=0s timeout=100
Clone: dlm-clone
- Meta Attrs: interleave=true ordered=true
+ Meta Attributes: dlm-clone-meta_attributes
+ interleave=true
+ ordered=true
Resource: dlm (class=ocf provider=pacemaker type=controld)
- Operations: monitor interval=60s (dlm-monitor-interval-60s)
- start interval=0s timeout=90s (dlm-start-interval-0s)
- stop interval=0s timeout=100s (dlm-stop-interval-0s)
+ Operations:
+ monitor: dlm-monitor-interval-60s
+ interval=60s
+ start: dlm-start-interval-0s
+ interval=0s timeout=90
+ stop: dlm-stop-interval-0s
+ interval=0s timeout=100
Create and Populate GFS2 Filesystem
###################################
@@ -202,11 +251,19 @@ With the ``WebFS`` resource stopped, let's update the configuration.
[root@pcmk-1 ~]# pcs resource config WebFS
Resource: WebFS (class=ocf provider=heartbeat type=Filesystem)
- Attributes: device=/dev/drbd1 directory=/var/www/html fstype=xfs
- Meta Attrs: target-role=Stopped
- Operations: monitor interval=20s timeout=40s (WebFS-monitor-interval-20s)
- start interval=0s timeout=60s (WebFS-start-interval-0s)
- stop interval=0s timeout=60s (WebFS-stop-interval-0s)
+ Attributes: WebFS-instance_attributes
+ device=/dev/drbd1
+ directory=/var/www/html
+ fstype=xfs
+ Meta Attributes: WebFS-meta_attributes
+ target-role=Stopped
+ Operations:
+ monitor: WebFS-monitor-interval-20s
+ interval=20s timeout=40s
+ start: WebFS-start-interval-0s
+ interval=0s timeout=60s
+ stop: WebFS-stop-interval-0s
+ interval=0s timeout=60s
The fstype option needs to be updated to ``gfs2`` instead of ``xfs``.
@@ -215,11 +272,19 @@ The fstype option needs to be updated to ``gfs2`` instead of ``xfs``.
[root@pcmk-1 ~]# pcs resource update WebFS fstype=gfs2
[root@pcmk-1 ~]# pcs resource config WebFS
Resource: WebFS (class=ocf provider=heartbeat type=Filesystem)
- Attributes: device=/dev/drbd1 directory=/var/www/html fstype=gfs2
- Meta Attrs: target-role=Stopped
- Operations: monitor interval=20s timeout=40s (WebFS-monitor-interval-20s)
- start interval=0s timeout=60s (WebFS-start-interval-0s)
- stop interval=0s timeout=60s (WebFS-stop-interval-0s)
+ Attributes: WebFS-instance_attributes
+ device=/dev/drbd1
+ directory=/var/www/html
+ fstype=gfs2
+ Meta Attributes: WebFS-meta_attributes
+ target-role=Stopped
+ Operations:
+ monitor: WebFS-monitor-interval-20s
+ interval=20s timeout=40s
+ start: WebFS-start-interval-0s
+ interval=0s timeout=60s
+ stop: WebFS-stop-interval-0s
+ interval=0s timeout=60s
GFS2 requires that DLM be running, so we also need to set up new colocation
and ordering constraints for it:
@@ -231,20 +296,21 @@ and ordering constraints for it:
Adding dlm-clone WebFS (kind: Mandatory) (Options: first-action=start then-action=start)
[root@pcmk-1 ~]# pcs constraint
Location Constraints:
- Resource: WebSite
- Enabled on:
- Node: pcmk-2 (score:50)
- Ordering Constraints:
- start ClusterIP then start WebSite (kind:Mandatory)
- promote WebData-clone then start WebFS (kind:Mandatory)
- start WebFS then start WebSite (kind:Mandatory)
- start dlm-clone then start WebFS (kind:Mandatory)
+ resource 'WebSite' prefers node 'pcmk-2' with score 50
Colocation Constraints:
- WebSite with ClusterIP (score:INFINITY)
- WebFS with WebData-clone (score:INFINITY) (rsc-role:Started) (with-rsc-role:Promoted)
- WebSite with WebFS (score:INFINITY)
- WebFS with dlm-clone (score:INFINITY)
- Ticket Constraints:
+ resource 'WebSite' with resource 'ClusterIP'
+ score=INFINITY
+ resource 'WebFS' with Promoted resource 'WebData-clone'
+ score=INFINITY
+ resource 'WebSite' with resource 'WebFS'
+ score=INFINITY
+ resource 'WebFS' with resource 'dlm-clone'
+ score=INFINITY
+ Order Constraints:
+ start resource 'ClusterIP' then start resource 'WebSite'
+ promote resource 'WebData-clone' then start resource 'WebFS'
+ start resource 'WebFS' then start resource 'WebSite'
+ start resource 'dlm-clone' then start resource 'WebFS'
We also need to update the ``no-quorum-policy`` property to ``freeze``. By
default, the value of ``no-quorum-policy`` is set to ``stop`` indicating that
@@ -283,20 +349,21 @@ Notice how ``pcs`` automatically updates the relevant constraints again.
[root@pcmk-1 ~]# pcs -f active_cfg resource clone WebFS
[root@pcmk-1 ~]# pcs -f active_cfg constraint
Location Constraints:
- Resource: WebSite
- Enabled on:
- Node: pcmk-2 (score:50)
- Ordering Constraints:
- start ClusterIP then start WebSite (kind:Mandatory)
- promote WebData-clone then start WebFS-clone (kind:Mandatory)
- start WebFS-clone then start WebSite (kind:Mandatory)
- start dlm-clone then start WebFS-clone (kind:Mandatory)
+ resource 'WebSite' prefers node 'pcmk-2' with score 50
Colocation Constraints:
- WebSite with ClusterIP (score:INFINITY)
- WebFS-clone with WebData-clone (score:INFINITY) (rsc-role:Started) (with-rsc-role:Promoted)
- WebSite with WebFS-clone (score:INFINITY)
- WebFS-clone with dlm-clone (score:INFINITY)
- Ticket Constraints:
+ resource 'WebSite' with resource 'ClusterIP'
+ score=INFINITY
+ resource 'WebFS-clone' with Promoted resource 'WebData-clone'
+ score=INFINITY
+ resource 'WebSite' with resource 'WebFS-clone'
+ score=INFINITY
+ resource 'WebFS-clone' with resource 'dlm-clone'
+ score=INFINITY
+ Order Constraints:
+ start resource 'ClusterIP' then start resource 'WebSite'
+ promote resource 'WebData-clone' then start resource 'WebFS-clone'
+ start resource 'WebFS-clone' then start resource 'WebSite'
+ start resource 'dlm-clone' then start resource 'WebFS-clone'
Tell the cluster that it is now allowed to promote both instances to be DRBD
Primary.
diff --git a/doc/sphinx/Clusters_from_Scratch/active-passive.rst b/doc/sphinx/Clusters_from_Scratch/active-passive.rst
index 9b7b8feb5db..7cf313b756f 100644
--- a/doc/sphinx/Clusters_from_Scratch/active-passive.rst
+++ b/doc/sphinx/Clusters_from_Scratch/active-passive.rst
@@ -11,8 +11,9 @@ Our first resource will be a floating IP address that the cluster can bring up
on either node. Regardless of where any cluster service(s) are running, end
users need to be able to communicate with them at a consistent address. Here,
we will use ``192.168.122.120`` as the floating IP address, give it the
-imaginative name ``ClusterIP``, and tell the cluster to check whether it is
-still running every 30 seconds.
+imaginative name ``ClusterIP``, assign the IP address to the physical device
+``enp1s0``, and tell the cluster to check whether it is still running every 30
+seconds.
.. WARNING::
@@ -22,8 +23,8 @@ still running every 30 seconds.
.. code-block:: console
- [root@pcmk-1 ~]# pcs resource create ClusterIP ocf:heartbeat:IPaddr2 \
- ip=192.168.122.120 cidr_netmask=24 op monitor interval=30s
+ [root@pcmk-1 ~]# pcs resource create ClusterIP ocf:heartbeat:IPaddr2 \
+ ip=192.168.122.120 cidr_netmask=24 nic=enp1s0 op monitor interval=30s
Another important piece of information here is ``ocf:heartbeat:IPaddr2``.
This tells Pacemaker three things about the resource you want to add:
@@ -87,10 +88,10 @@ now, but it's okay if it doesn't look like the one below.
[root@pcmk-1 ~]# pcs status
Cluster name: mycluster
Cluster Summary:
- * Stack: corosync
- * Current DC: pcmk-1 (version 2.1.2-4.el9-ada5c3b36e2) - partition with quorum
- * Last updated: Wed Jul 27 00:37:28 2022
- * Last change: Wed Jul 27 00:37:14 2022 by root via cibadmin on pcmk-1
+ * Stack: corosync (Pacemaker is running)
+ * Current DC: pcmk-1 (version 3.0.1-3.el10-6a90427) - partition with quorum
+ * Last updated: Tue Feb 24 15:19:53 2026 on pcmk-1
+ * Last change: Tue Feb 24 15:19:16 2026 by root via root on pcmk-1
* 2 nodes configured
* 2 resource instances configured
@@ -115,7 +116,7 @@ address has been added.
1: lo inet 127.0.0.1/8 scope host lo\ valid_lft forever preferred_lft forever
1: lo inet6 ::1/128 scope host \ valid_lft forever preferred_lft forever
2: enp1s0 inet 192.168.122.102/24 brd 192.168.122.255 scope global noprefixroute enp1s0\ valid_lft forever preferred_lft forever
- 2: enp1s0 inet 192.168.122.120/24 brd 192.168.122.255 scope global secondary enp1s0\ valid_lft forever preferred_lft forever
+ 2: enp1s0 inet 192.168.122.120/24 scope global enp1s0\ valid_lft forever preferred_lft forever
2: enp1s0 inet6 fe80::5054:ff:fe95:209/64 scope link noprefixroute \ valid_lft forever preferred_lft forever
Perform a Failover
@@ -150,7 +151,6 @@ Verify that ``pacemaker`` and ``corosync`` are no longer running:
[root@pcmk-2 ~]# pcs status
Error: error running crm_mon, is pacemaker running?
- Could not connect to pacemakerd: Connection refused
crm_mon: Connection to cluster failed: Connection refused
Go to the other node, and check the cluster status.
@@ -160,10 +160,10 @@ Go to the other node, and check the cluster status.
[root@pcmk-1 ~]# pcs status
Cluster name: mycluster
Cluster Summary:
- * Stack: corosync
- * Current DC: pcmk-1 (version 2.1.2-4.el9-ada5c3b36e2) - partition with quorum
- * Last updated: Wed Jul 27 00:43:51 2022
- * Last change: Wed Jul 27 00:43:14 2022 by root via cibadmin on pcmk-1
+ * Stack: corosync (Pacemaker is running)
+ * Current DC: pcmk-1 (version 3.0.1-3.el10-6a90427) - partition with quorum
+ * Last updated: Tue Feb 24 15:23:32 2026 on pcmk-2
+ * Last change: Tue Feb 24 15:19:16 2026 by root via root on pcmk-1
* 2 nodes configured
* 2 resource instances configured
@@ -251,10 +251,10 @@ gets going on the node, but it eventually will look like the below.)
[root@pcmk-1 ~]# pcs status
Cluster name: mycluster
Cluster Summary:
- * Stack: corosync
- * Current DC: pcmk-1 (version 2.1.2-4.el9-ada5c3b36e2) - partition with quorum
- * Last updated: Wed Jul 27 00:45:17 2022
- * Last change: Wed Jul 27 00:45:01 2022 by root via cibadmin on pcmk-1
+ * Stack: corosync (Pacemaker is running)
+ * Current DC: pcmk-1 (version 3.0.1-3.el10-6a90427) - partition with quorum
+ * Last updated: Tue Feb 24 15:27:35 2026 on pcmk-2
+ * Last change: Tue Feb 24 15:19:16 2026 by root via root on pcmk-1
* 2 nodes configured
* 2 resource instances configured
diff --git a/doc/sphinx/Clusters_from_Scratch/apache.rst b/doc/sphinx/Clusters_from_Scratch/apache.rst
index c5c155ed689..24fc2c352a7 100644
--- a/doc/sphinx/Clusters_from_Scratch/apache.rst
+++ b/doc/sphinx/Clusters_from_Scratch/apache.rst
@@ -65,7 +65,7 @@ it fails. On both nodes, configure this URL as follows:
# cat <<-END >/etc/httpd/conf.d/status.conf
SetHandler server-status
- Require local
+ Require all granted
END
@@ -105,7 +105,6 @@ tutorial, we will adjust the global operation timeout default to 240 seconds.
.. code-block:: console
[root@pcmk-1 ~]# pcs resource op defaults
- No defaults set
[root@pcmk-1 ~]# pcs resource op defaults update timeout=240s
Warning: Defaults do not apply to resources which override them with their own defined values
[root@pcmk-1 ~]# pcs resource op defaults
@@ -135,10 +134,10 @@ After a short delay, we should see the cluster start Apache.
[root@pcmk-1 ~]# pcs status
Cluster name: mycluster
Cluster Summary:
- * Stack: corosync
- * Current DC: pcmk-1 (version 2.1.2-4.el9-ada5c3b36e2) - partition with quorum
- * Last updated: Wed Jul 27 00:47:44 2022
- * Last change: Wed Jul 27 00:47:23 2022 by root via cibadmin on pcmk-1
+ * Stack: corosync (Pacemaker is running)
+ * Current DC: pcmk-1 (version 3.0.1-3.el10-6a90427) - partition with quorum
+ * Last updated: Tue Feb 24 16:34:07 2026 on pcmk-1
+ * Last change: Tue Feb 24 16:33:50 2026 by root via root on pcmk-1
* 2 nodes configured
* 3 resource instances configured
@@ -148,7 +147,7 @@ After a short delay, we should see the cluster start Apache.
Full List of Resources:
* fence_dev (stonith:some_fence_agent): Started pcmk-1
* ClusterIP (ocf:heartbeat:IPaddr2): Started pcmk-1
- * WebSite (ocf:heartbeat:apache): Started pcmk-2
+ * WebSite (ocf:heartbeat:apache): Started pcmk-2
Daemon Status:
corosync: active/disabled
@@ -211,20 +210,18 @@ is not active anywhere, ``WebSite`` will not be permitted to run.
.. code-block:: console
- [root@pcmk-1 ~]# pcs constraint colocation add WebSite with ClusterIP INFINITY
+ [root@pcmk-1 ~]# pcs constraint colocation add WebSite with ClusterIP score=INFINITY
[root@pcmk-1 ~]# pcs constraint
- Location Constraints:
- Ordering Constraints:
Colocation Constraints:
- WebSite with ClusterIP (score:INFINITY)
- Ticket Constraints:
+ resource 'WebSite' with resource 'ClusterIP'
+ score=INFINITY
[root@pcmk-1 ~]# pcs status
Cluster name: mycluster
Cluster Summary:
- * Stack: corosync
- * Current DC: pcmk-1 (version 2.1.2-4.el9-ada5c3b36e2) - partition with quorum
- * Last updated: Wed Jul 27 00:49:33 2022
- * Last change: Wed Jul 27 00:49:16 2022 by root via cibadmin on pcmk-1
+ * Stack: corosync (Pacemaker is running)
+ * Current DC: pcmk-1 (version 3.0.1-3.el10-6a90427) - partition with quorum
+ * Last updated: Tue Feb 24 16:37:25 2026 on pcmk-1
+ * Last change: Tue Feb 24 16:36:36 2026 by root via root on pcmk-1
* 2 nodes configured
* 3 resource instances configured
@@ -234,7 +231,7 @@ is not active anywhere, ``WebSite`` will not be permitted to run.
Full List of Resources:
* fence_dev (stonith:some_fence_agent): Started pcmk-1
* ClusterIP (ocf:heartbeat:IPaddr2): Started pcmk-1
- * WebSite (ocf:heartbeat:apache): Started pcmk-1
+ * WebSite (ocf:heartbeat:apache): Started pcmk-1
Daemon Status:
corosync: active/disabled
@@ -275,12 +272,11 @@ also implies that the recovery of ``ClusterIP`` will trigger the recovery of
[root@pcmk-1 ~]# pcs constraint order ClusterIP then WebSite
Adding ClusterIP WebSite (kind: Mandatory) (Options: first-action=start then-action=start)
[root@pcmk-1 ~]# pcs constraint
- Location Constraints:
- Ordering Constraints:
- start ClusterIP then start WebSite (kind:Mandatory)
Colocation Constraints:
- WebSite with ClusterIP (score:INFINITY)
- Ticket Constraints:
+ resource 'WebSite' with resource 'ClusterIP'
+ score=INFINITY
+ Order Constraints:
+ start resource 'ClusterIP' then start 'WebSite'
.. NOTE::
@@ -324,21 +320,19 @@ how strongly we'd like the resource to run at this location.
[root@pcmk-1 ~]# pcs constraint location WebSite prefers pcmk-2=50
[root@pcmk-1 ~]# pcs constraint
Location Constraints:
- Resource: WebSite
- Enabled on:
- Node: pcmk-2 (score:50)
- Ordering Constraints:
- start ClusterIP then start WebSite (kind:Mandatory)
+ resource 'WebSite' prefers node 'pcmk-2' with score 50
Colocation Constraints:
- WebSite with ClusterIP (score:INFINITY)
- Ticket Constraints:
+ resource 'WebSite' with resource 'ClusterIP'
+ score=INFINITY
+ Order Constraints:
+ start resource 'ClusterIP' then start 'WebSite'
[root@pcmk-1 ~]# pcs status
Cluster name: mycluster
Cluster Summary:
- * Stack: corosync
- * Current DC: pcmk-1 (version 2.1.2-4.el9-ada5c3b36e2) - partition with quorum
- * Last updated: Wed Jul 27 00:51:13 2022
- * Last change: Wed Jul 27 00:51:07 2022 by root via cibadmin on pcmk-1
+ * Stack: corosync (Pacemaker is running)
+ * Current DC: pcmk-1 (version 3.0.1-3.el10-6a90427) - partition with quorum
+ * Last updated: Tue Feb 24 16:44:43 2026 on pcmk-1
+ * Last change: Tue Feb 24 16:43:35 2026 by root via root on pcmk-1
* 2 nodes configured
* 3 resource instances configured
@@ -348,7 +342,7 @@ how strongly we'd like the resource to run at this location.
Full List of Resources:
* fence_dev (stonith:some_fence_agent): Started pcmk-1
* ClusterIP (ocf:heartbeat:IPaddr2): Started pcmk-1
- * WebSite (ocf:heartbeat:apache): Started pcmk-1
+ * WebSite (ocf:heartbeat:apache): Started pcmk-1
Daemon Status:
corosync: active/disabled
@@ -367,18 +361,22 @@ To see the current placement scores, you can use a tool called
.. code-block:: console
[root@pcmk-1 ~]# crm_simulate -sL
- [ pcmk-1 pcmk-2 ]
-
- fence_dev (stonith:some_fence_agent): Started pcmk-1
- ClusterIP (ocf:heartbeat:IPaddr2): Started pcmk-1
- WebSite (ocf:heartbeat:apache): Started pcmk-1
-
- pcmk__native_allocate: fence_dev allocation score on pcmk-1: 100
- pcmk__native_allocate: fence_dev allocation score on pcmk-2: 0
- pcmk__native_allocate: ClusterIP allocation score on pcmk-1: 200
- pcmk__native_allocate: ClusterIP allocation score on pcmk-2: 50
- pcmk__native_allocate: WebSite allocation score on pcmk-1: 100
- pcmk__native_allocate: WebSite allocation score on pcmk-2: -INFINITY
+ Current cluster status:
+ * Node List:
+ * Online: [ pcmk-1 pcmk-2 ]
+
+ * Full List of Resources:
+ * ClusterIP (ocf:heartbeat:IPaddr2): Started pcmk-1
+ * fence_dev (stonith:some_fence_agent): Started pcmk-1
+ * WebSite (ocf:heartbeat:apache): Started pcmk-1
+
+ Assignment Scores:
+ * pcmk__native_allocate: ClusterIP allocation score on pcmk-1: 200
+ * pcmk__native_allocate: ClusterIP allocation score on pcmk-2: 50
+ * pcmk__native_allocate: fence_dev allocation score on pcmk-1: 100
+ * pcmk__native_allocate: fence_dev allocation score on pcmk-2: -INFINITY
+ * pcmk__native_allocate: WebSite allocation score on pcmk-1: 100
+ * pcmk__native_allocate: WebSite allocation score on pcmk-2: -INFINITY
.. index::
single: resource; moving manually
@@ -407,21 +405,19 @@ as before.
resource 'WebSite' is running on node 'pcmk-2'
[root@pcmk-1 ~]# pcs constraint
Location Constraints:
- Resource: WebSite
- Enabled on:
- Node: pcmk-2 (score:50)
- Ordering Constraints:
- start ClusterIP then start WebSite (kind:Mandatory)
+ resource 'WebSite' prefers node 'pcmk-2' with score 50
Colocation Constraints:
- WebSite with ClusterIP (score:INFINITY)
- Ticket Constraints:
+ resource 'WebSite' with resource 'ClusterIP'
+ score=INFINITY
+ Order Constraints:
+ start resource 'ClusterIP' then start 'WebSite'
[root@pcmk-1 ~]# pcs status
Cluster name: mycluster
Cluster Summary:
- * Stack: corosync
- * Current DC: pcmk-1 (version 2.1.2-4.el9-ada5c3b36e2) - partition with quorum
- * Last updated: Wed Jul 27 00:54:23 2022
- * Last change: Wed Jul 27 00:53:48 2022 by root via cibadmin on pcmk-1
+ * Stack: corosync (Pacemaker is running)
+ * Current DC: pcmk-1 (version 3.0.1-3.el10-6a90427) - partition with quorum
+ * Last updated: Tue Feb 24 16:56:37 2026 on pcmk-1
+ * Last change: Tue Feb 24 16:54:28 2026 by root via root on pcmk-1
* 2 nodes configured
* 3 resource instances configured
@@ -431,7 +427,7 @@ as before.
Full List of Resources:
* fence_dev (stonith:some_fence_agent): Started pcmk-1
* ClusterIP (ocf:heartbeat:IPaddr2): Started pcmk-2
- * WebSite (ocf:heartbeat:apache): Started pcmk-2
+ * WebSite (ocf:heartbeat:apache): Started pcmk-2
Daemon Status:
corosync: active/disabled
diff --git a/doc/sphinx/Clusters_from_Scratch/cluster-setup.rst b/doc/sphinx/Clusters_from_Scratch/cluster-setup.rst
index 748ce5049d3..cad37ffc242 100644
--- a/doc/sphinx/Clusters_from_Scratch/cluster-setup.rst
+++ b/doc/sphinx/Clusters_from_Scratch/cluster-setup.rst
@@ -49,7 +49,7 @@ that will make our lives easier:
.. code-block:: console
- # dnf install -y pacemaker pcs psmisc policycoreutils-python3
+ # dnf install -y pacemaker pcs psmisc policycoreutils-python3 policycoreutils-python-utils
.. NOTE::
@@ -303,11 +303,167 @@ example of all the options available under the status category.
xml
View xml version of status (output from crm_mon -r -1 -X).
+ wait []
+ Wait for the cluster to settle into stable state. Timeout can be
+ specified as bare number which describes number of seconds or number
+ with unit (s or sec for seconds, m or min for minutes, h or hr for
+ hours). If 'timeout' is not specified or set to zero, it defaults to
+ 60 minutes.
+ Example: pcs status wait 30min
+
+ query resource exists [--quiet]
+ Query the existence of the resource.
+
+ Print 'True' and exit with 0 if the query evaluates to true. Exit with
+ 1 if an error occurs while performing the query. Print 'False' and exit
+ with 2 otherwise.
+ If --quiet is specified, do not print any output and just exit with
+ the appropriate return code.
+
+ query resource is-stonith [--quiet]
+ Query if the resource is a stonith resource.
+
+ Print 'True' and exit with 0 if the query evaluates to true. Exit with
+ 1 if an error occurs while performing the query. Print 'False' and exit
+ with 2 otherwise.
+ If --quiet is specified, do not print any output and just exit with
+ the appropriate return code.
+
+ query resource is-type [--quiet]
+ (primitive | group | clone [unique] [promotable] | bundle [unique])
+ Query if the resource is of given type. Allows to query whether clones
+ are globally unique or promotable. Allows to query whether bundles are
+ globally unique.
+
+ Print 'True' and exit with 0 if the query evaluates to true. Exit with
+ 1 if an error occurs while performing the query. Print 'False' and exit
+ with 2 otherwise.
+ If --quiet is specified, do not print any output and just exit with
+ the appropriate return code.
+
+ query resource get-type
+ Get type of the resource. The output is one of 'primitive', 'group',
+ 'clone', 'clone unique', 'clone promotable', 'clone unique promotable',
+ 'bundle' or 'bundle unique'.
+
+ query resource is-state [on-node ]
+ [members all|any|none] [instances all|any|none] [--quiet]
+ Query if the resource is in the given state. can be one of
+ 'active', 'blocked', 'demoting', 'disabled', 'enabled', 'failed',
+ 'failure_ignored', 'locked_to', 'maintenance', 'managed', 'migrating',
+ 'monitoring', 'orphaned', 'pending', 'promoted', 'promoting', 'started',
+ 'starting', 'stopped', 'stopping', 'unmanaged' or 'unpromoted'.
+
+ States 'starting', 'stopping', 'promoting', 'demoting', 'migrating' and
+ 'monitoring' describe that resource operation (start, stop, promote,
+ demote, migrate_from/to, or monitor) is currently in progress on the
+ resource. 'pending' will evaluate to true if any of these operations is
+ currently in progress on the resource.
+
+ State 'locked_to' allows to specify node name of the node that the
+ resource is locked to.
+
+ With groups, the state is read and evaluated on the group first. If the
+ state cannot be determined from only the group, evaluate the state of
+ the member resources and return true if the query is true for ALL of
+ the members. 'members' can be used to specify how to evaluate the states
+ of the members. If 'members' is specified, then always evaluate the
+ query only on the member resources without looking at the group itself.
+ This means that for example queries for 'started' and 'stopped' on a
+ group with one started and one stopped member will both evaluate as
+ false at the same time if 'members' is not specified or it is set to
+ 'all'.
+
+ With clones and bundles, the state is read and evaluated on the clone
+ or bundle first. If the state cannot be determined from only the clone
+ or bundle, evaluate the state on the instances and return true if the
+ query is true for ANY of the instances. 'instances' can be used to
+ specify how to evaluate the state of the instances. If 'instances'
+ is specified, then always evaluate the query only on the instances
+ without looking at the clone or bundle themselves. This means that for
+ example queries for 'started' and 'stopped' on a clone with one started
+ and one stopped instance will both evaluate as true at the same time if
+ 'instances' is not specified or it is set to 'any'.
+
+ 'on-node' can be used to test whether the resource is in given state on
+ the node with the given name. If 'on-node' is used, then always check
+ only the member resources for groups and instances for clones or
+ bundles.
+
+ Queries on the state of cloned or bundled resources are evaluated
+ similarly to queries on clones or bundles. Evaluate the state of every
+ instance and return true if the query is true for ANY of the instances.
+ 'instances' can be used to specify how the query should be evaluated.
+ It is possible to query single specific instance of unique clones or
+ bundles by using resource id together with the suffix that distinguishes
+ instances for unique clones and bundles.
+ Example: Query if any instance of resource is started
+ pcs status query resource resource_id is-state started
+ Example: Query if one specific instance is started
+ pcs status query resource resource_id:0 is-state started
+
+ Print 'True' and exit with 0 if the query evaluates to true. Exit with
+ 1 if an error occurs while performing the query. Print 'False' and exit
+ with 2 otherwise.
+ If --quiet is specified, do not print any output and just exit with
+ the appropriate return code.
+
+ query resource is-in-group [] [--quiet]
+ Query if the resource is in any group or in a group with the
+ specified id.
+
+ Print 'True' and exit with 0 if the query evaluates to true. Exit with
+ 1 if an error occurs while performing the query. Print 'False' and exit
+ with 2 otherwise.
+ Print the id of the group on new line if the resource is in any group.
+ If --quiet is specified, do not print any output and just exit with
+ the appropriate return code.
+
+ query resource is-in-clone [] [--quiet]
+ Query if the resource is in any clone or in a clone with the specified
+ id. Resource that is in a cloned group is considered to be in the clone
+ itself.
+
+ Print 'True' and exit with 0 if the query evaluates to true. Exit with
+ 1 if an error occurs while performing the query. Print 'False' and exit
+ with 2 otherwise.
+ Print the id of the clone on new line if the resource is in any clone.
+ If --quiet is specified, do not print any output and just exit with
+ the appropriate return code.
+
+ query resource is-in-bundle [] [--quiet]
+ Query if the resource is in any bundle or in a bundle with the
+ specified id.
+
+ Print 'True' and exit with 0 if the query evaluates to true. Exit with
+ 1 if an error occurs while performing the query. Print 'False' and exit
+ with 2 otherwise.
+ Print the id of the bundle on new line if the resource is in any bundle.
+ If --quiet is specified, do not print any output and just exit with
+ the appropriate return code.
+
+ query resource get-nodes
+ Get all nodes on which the resource is running. With groups, return
+ nodes on which any of the members is running. For clones or bundles,
+ return nodes on which any of the instances or replicas are running.
+
+ Print each node name on new line.
+
+ query resource get-members
+ Get ids of member resources of the resource. Only useful for group,
+ clone and bundle resources.
+
+ Print each member id on new line.
+
+ query resource get-index-in-group
+ Get an index of the resource in a group. The first resource in a group
+ has an index of 0. Usable only for resources that are in a group.
+
Additionally, if you are interested in the version and supported cluster stack(s)
available with your Pacemaker installation, run:
.. code-block:: console
[root@pcmk-1 ~]# pacemakerd --features
- Pacemaker 2.1.2-4.el9 (Build: ada5c3b36e2)
- Supporting v3.13.0: agent-manpages cibsecrets corosync-ge-2 default-resource-stickiness default-sbd-sync generated-manpages monotonic nagios ncurses remote systemd
+ Pacemaker 3.0.1-3.el10 (Build: 6a90427)
+ Supporting v3.21.1: agent-manpages cibsecrets corosync-ge-2 default-resource-stickiness default-sbd-sync generated-manpages lsb monotonic ncurses service systemd
diff --git a/doc/sphinx/Clusters_from_Scratch/images/ConfigureVolumeGroup.png b/doc/sphinx/Clusters_from_Scratch/images/ConfigureVolumeGroup.png
index 00ef1ba3a5d..7349b85c785 100644
Binary files a/doc/sphinx/Clusters_from_Scratch/images/ConfigureVolumeGroup.png and b/doc/sphinx/Clusters_from_Scratch/images/ConfigureVolumeGroup.png differ
diff --git a/doc/sphinx/Clusters_from_Scratch/images/ConsolePrompt.png b/doc/sphinx/Clusters_from_Scratch/images/ConsolePrompt.png
index 336ae56ff0a..6cc1027cb4c 100644
Binary files a/doc/sphinx/Clusters_from_Scratch/images/ConsolePrompt.png and b/doc/sphinx/Clusters_from_Scratch/images/ConsolePrompt.png differ
diff --git a/doc/sphinx/Clusters_from_Scratch/images/CreateUser.png b/doc/sphinx/Clusters_from_Scratch/images/CreateUser.png
new file mode 100644
index 00000000000..99a17eafb2f
Binary files /dev/null and b/doc/sphinx/Clusters_from_Scratch/images/CreateUser.png differ
diff --git a/doc/sphinx/Clusters_from_Scratch/images/InstallationDestination.png b/doc/sphinx/Clusters_from_Scratch/images/InstallationDestination.png
index d847c818819..993e8066d85 100644
Binary files a/doc/sphinx/Clusters_from_Scratch/images/InstallationDestination.png and b/doc/sphinx/Clusters_from_Scratch/images/InstallationDestination.png differ
diff --git a/doc/sphinx/Clusters_from_Scratch/images/InstallationSummary.png b/doc/sphinx/Clusters_from_Scratch/images/InstallationSummary.png
index eefe9f0b76b..e2e53bd90c4 100644
Binary files a/doc/sphinx/Clusters_from_Scratch/images/InstallationSummary.png and b/doc/sphinx/Clusters_from_Scratch/images/InstallationSummary.png differ
diff --git a/doc/sphinx/Clusters_from_Scratch/images/ManualPartitioning.png b/doc/sphinx/Clusters_from_Scratch/images/ManualPartitioning.png
index 9047c65071e..837dc4d2836 100644
Binary files a/doc/sphinx/Clusters_from_Scratch/images/ManualPartitioning.png and b/doc/sphinx/Clusters_from_Scratch/images/ManualPartitioning.png differ
diff --git a/doc/sphinx/Clusters_from_Scratch/images/NetworkAndHostName.png b/doc/sphinx/Clusters_from_Scratch/images/NetworkAndHostName.png
index 156a1f06c4b..87ad5c8d741 100644
Binary files a/doc/sphinx/Clusters_from_Scratch/images/NetworkAndHostName.png and b/doc/sphinx/Clusters_from_Scratch/images/NetworkAndHostName.png differ
diff --git a/doc/sphinx/Clusters_from_Scratch/images/RootPassword.png b/doc/sphinx/Clusters_from_Scratch/images/RootPassword.png
index fc579ea92cf..4fc99568892 100644
Binary files a/doc/sphinx/Clusters_from_Scratch/images/RootPassword.png and b/doc/sphinx/Clusters_from_Scratch/images/RootPassword.png differ
diff --git a/doc/sphinx/Clusters_from_Scratch/images/SoftwareSelection.png b/doc/sphinx/Clusters_from_Scratch/images/SoftwareSelection.png
index d400915caf0..f379509400f 100644
Binary files a/doc/sphinx/Clusters_from_Scratch/images/SoftwareSelection.png and b/doc/sphinx/Clusters_from_Scratch/images/SoftwareSelection.png differ
diff --git a/doc/sphinx/Clusters_from_Scratch/images/SummaryOfChanges.png b/doc/sphinx/Clusters_from_Scratch/images/SummaryOfChanges.png
index 746be66b354..c7fd10efd6b 100644
Binary files a/doc/sphinx/Clusters_from_Scratch/images/SummaryOfChanges.png and b/doc/sphinx/Clusters_from_Scratch/images/SummaryOfChanges.png differ
diff --git a/doc/sphinx/Clusters_from_Scratch/images/TimeAndDate.png b/doc/sphinx/Clusters_from_Scratch/images/TimeAndDate.png
index a3ea351ba6c..ae38c3c0802 100644
Binary files a/doc/sphinx/Clusters_from_Scratch/images/TimeAndDate.png and b/doc/sphinx/Clusters_from_Scratch/images/TimeAndDate.png differ
diff --git a/doc/sphinx/Clusters_from_Scratch/images/WelcomeToAlmaLinux.png b/doc/sphinx/Clusters_from_Scratch/images/WelcomeToAlmaLinux.png
index dc573ade9bc..7b838c9cd4d 100644
Binary files a/doc/sphinx/Clusters_from_Scratch/images/WelcomeToAlmaLinux.png and b/doc/sphinx/Clusters_from_Scratch/images/WelcomeToAlmaLinux.png differ
diff --git a/doc/sphinx/Clusters_from_Scratch/images/WelcomeToCentos.png b/doc/sphinx/Clusters_from_Scratch/images/WelcomeToCentos.png
deleted file mode 100644
index ae9879c7409..00000000000
Binary files a/doc/sphinx/Clusters_from_Scratch/images/WelcomeToCentos.png and /dev/null differ
diff --git a/doc/sphinx/Clusters_from_Scratch/index.rst b/doc/sphinx/Clusters_from_Scratch/index.rst
index 3477ccd5385..6d1b0f336d4 100644
--- a/doc/sphinx/Clusters_from_Scratch/index.rst
+++ b/doc/sphinx/Clusters_from_Scratch/index.rst
@@ -11,7 +11,7 @@ The example cluster will use:
* |CFS_DISTRO| |CFS_DISTRO_VER| as the host operating system
* Corosync to provide messaging and membership services
-* Pacemaker 2 as the cluster resource manager
+* Pacemaker 3 as the cluster resource manager
* DRBD as a cost-effective alternative to shared storage
* GFS2 as the cluster filesystem (in active/active mode)
diff --git a/doc/sphinx/Clusters_from_Scratch/installation.rst b/doc/sphinx/Clusters_from_Scratch/installation.rst
index 02f5be975f4..1c324674f2f 100644
--- a/doc/sphinx/Clusters_from_Scratch/installation.rst
+++ b/doc/sphinx/Clusters_from_Scratch/installation.rst
@@ -9,10 +9,10 @@ ______________________
Download the latest |CFS_DISTRO| |CFS_DISTRO_VER| DVD ISO by navigating to
the |CFS_DISTRO| `mirrors list `_,
-selecting the latest 9.x version for your machine's architecture, selecting a
-download mirror that's close to you, and finally selecting the latest .iso file
-that has “dvd” in its name. Use the image to boot a virtual machine, or burn it
-to a DVD or USB drive and boot a physical server from that.
+selecting the latest |CFS_DISTRO_VER|.x version for your machine's architecture,
+selecting a download mirror that's close to you, and finally selecting the latest
+.iso file that has “dvd” in its name. Use the image to boot a virtual machine, or
+burn it to a DVD or USB drive and boot a physical server from that.
After starting the installation, select your language and keyboard layout at
the welcome screen.
@@ -89,9 +89,9 @@ Enter the **INSTALLATION DESTINATION** section and select the disk where you
want to install the OS. Then under **Storage Configuration**, select **Custom**
and press **Done**.
-.. figure:: images/ManualPartitioning.png
+.. figure:: images/InstallationDestination.png
:align: center
- :alt: Installation Destination Screen
+ :alt: Screen
|CFS_DISTRO| |CFS_DISTRO_VER| Installation Destination Screen
@@ -144,21 +144,34 @@ settings (such as time zone or NTP server), you can do this in the
|CFS_DISTRO| |CFS_DISTRO_VER| Time & Date Screen
-
-Root Password
+Root Account
______________________________
-In order to continue to the next step, a **Root Password** must be set. Be sure
-to check the box marked **Allow root SSH login with password**.
+In the **ROOT ACCOUNT** section, it is highly recommended to disable the root
+account. We'll use a local user to perform administration tasks with sudo.
.. figure:: images/RootPassword.png
:align: center
- :alt: Root Password Screen
+ :alt: Root Account Screen
+
+ |CFS_DISTRO| |CFS_DISTRO_VER| Root Account Screen
+
+Press **Done**.
+
+Create User
+______________________________
+
+In the **CREATE USER** section, create a new user with a secure password. Make
+sure to select **Add administrative privileges to the user account (wheel group
+membership)** so the user can administer the system.
+
+.. figure:: images/CreateUser.png
+ :align: center
+ :alt: Create User Screen
- |CFS_DISTRO| |CFS_DISTRO_VER| Root Password Screen
+ |CFS_DISTRO| |CFS_DISTRO_VER| Create User Screen
-Press **Done**. (Depending on the password you chose, you may need to do so
-twice.)
+Press **Done**.
Finish Install
______________
@@ -219,7 +232,7 @@ Next, ensure that the routes are as expected:
.. code-block:: console
[root@pcmk-1 ~]# ip route
- default via 192.168.122.1 dev enp1s0 proto static metric 100
+ default via 192.168.122.1 dev enp1s0 proto static 192.168.122.101 metric 100
192.168.122.0/24 dev enp1s0 proto kernel scope link src 192.168.122.101 metric 100
If there is no line beginning with ``default via``, then use ``nmcli`` to add a
@@ -266,7 +279,7 @@ From another host, check whether we can see the new host at all:
.. code-block:: console
- [gchin@gchin ~]$ ping -c 1 192.168.122.101
+ [chris@laptop ~]$ ping -c 1 192.168.122.101
PING 192.168.122.101 (192.168.122.101) 56(84) bytes of data.
64 bytes from 192.168.122.101: icmp_seq=1 ttl=64 time=0.344 ms
@@ -274,18 +287,25 @@ From another host, check whether we can see the new host at all:
1 packets transmitted, 1 received, 0% packet loss, time 0ms
rtt min/avg/max/mdev = 0.344/0.344/0.344/0.000 ms
-Next, login as ``root`` via SSH.
+Next, login as the user you created during installation via SSH.
.. code-block:: console
- [gchin@gchin ~]$ ssh root@192.168.122.101
+ [chris@laptop~]$ ssh 192.168.122.101
The authenticity of host '192.168.122.101 (192.168.122.101)' can't be established.
ECDSA key fingerprint is SHA256:NBvcRrPDLIt39Rf0Tz4/f2Rd/FA5wUiDOd9bZ9QWWjo.
+ This key is not known by any other names.
Are you sure you want to continue connecting (yes/no/[fingerprint])? yes
Warning: Permanently added '192.168.122.101' (ECDSA) to the list of known hosts.
- root@192.168.122.101's password:
- Last login: Tue Jan 10 20:46:30 2021
- [root@pcmk-1 ~]#
+ chris@192.168.122.101's password:
+ Last login: Tue Feb 24 13:03:51 2026
+ [chris@pcmk-1 ~]#
+
+.. NOTE::
+
+ From here on, you'll need to be ``root`` to administer the system. The
+ ``sudo`` command can be used to switch from the user you created during
+ installation to the ``root`` user.
Apply Updates
_____________
@@ -410,46 +430,42 @@ Create a new key and allow anyone with that key to log in:
.. code-block:: console
- [root@pcmk-1 ~]# ssh-keygen -f ~/.ssh/id_rsa -N ""
- Generating public/private rsa key pair.
- Your identification has been saved in /root/.ssh/id_rsa
- Your public key has been saved in /root/.ssh/id_rsa.pub
+ [root@pcmk-1 ~]# ssh-keygen -f ~/.ssh/id_ed25519 -N ""
+ Generating public/private ed25519 key pair.
+ Your identification has been saved in /root/.ssh/id_ed25519
+ Your public key has been saved in /root/.ssh/id_ed25519.pub
The key fingerprint is:
- SHA256:h5AFPmXsGU4woOxRLYHW9lnU2wIQVOxpSRrsXbo/AX8 root@pcmk-1
+ SHA256:BhlHJU3REGOpO0FA7Mqvp60WaB/2unQOZki+0mZKfee8 root@pcmk-1
The key's randomart image is:
- +---[RSA 3072]----+
- | o+*BX*. |
- | .oo+.+*O o |
- | .+. +=% O o |
- | . . =o%.o . |
- | . .S+.. |
- | ..o E |
- | . o |
- | o |
- | . |
- +----[SHA256]-----+
-
- [root@pcmk-1 ~]# cat ~/.ssh/id_rsa.pub >> ~/.ssh/authorized_keys
-
-Install the key on the other node:
-
-.. code-block:: console
-
- [root@pcmk-1 ~]# ssh-copy-id pcmk-2
- /usr/bin/ssh-copy-id: INFO: Source of key(s) to be installed: "/root/.ssh/id_rsa.pub"
- The authenticity of host 'pcmk-2 (192.168.122.102)' can't be established.
- ED25519 key fingerprint is SHA256:QkJnJ3fmszY7kAuuZ7wxUC5CC+eQThSCF13XYWnZJPo.
- This host key is known by the following other names/addresses:
- ~/.ssh/known_hosts:1: 192.168.122.102
- Are you sure you want to continue connecting (yes/no/[fingerprint])? yes
- /usr/bin/ssh-copy-id: INFO: attempting to log in with the new key(s), to filter out any that are already installed
- /usr/bin/ssh-copy-id: INFO: 1 key(s) remain to be installed -- if you are prompted now it is to install the new keys
- root@pcmk-2's password:
-
- Number of key(s) added: 1
-
- Now try logging into the machine, with: "ssh 'pcmk-2'"
- and check to make sure that only the key(s) you wanted were added.
+ +--[ED255219 256]--+
+ | .o=++=+ |
+ | o.*.. |
+ | + B |
+ | . * o |
+ | ..+oo S |
+ | +B++ . |
+ | o=*o.o |
+ |o.==oo.. |
+ | +B+.+E. |
+ +----[SHA256]------+
+
+ [root@pcmk-1 ~]# cat ~/.ssh/id_25519.pub >> ~/.ssh/authorized_keys
+ [root@pcmk-1 ~]# chmod 600 ~/.ssh/authorized_keys
+
+Install the key on the other node. Because you can't login as root on the
+console or over SSH yet (that's what we're setting up now), the easiest way
+to do this is as follows:
+
+#. SSH into both nodes as the regular user.
+#. Use ``sudo`` to become ``root`` on each node.
+#. On ``pcmk-1``, cat the ``/root/.ssh/id_ed25519`` file and copy its contents
+ with the mouse.
+#. On ``pcmk-2``, paste those contents into ``/root/.ssh/id_ed25519``.
+#. ``chmod 600 /root/.ssh/id_ed25519``.
+#. On ``pcmk-1``, cat the ``/root/.ssh/id_25519.pub`` file and copy its
+ contents with the mouse.
+#. On ``pcmk-2``, paste those contents into ``/root/.ssh/authorized_keys``.
+#. ``chmod 600 /root/.ssh/authorized_keys``
Test that you can now run commands remotely, without being prompted:
@@ -458,9 +474,8 @@ Test that you can now run commands remotely, without being prompted:
[root@pcmk-1 ~]# ssh pcmk-2 -- uname -n
pcmk-2
-Finally, repeat this same process on the other node. For convenience, you can
-also generate an SSH key on your administrative machine and use ``ssh-copy-id``
-to copy it to both cluster nodes.
+It may also be handy to generate an SSH key on your administrative machine
+as your regular user and use ``ssh-copy-id`` to copy it to both cluster nodes.
.. [#] You can also avoid this SPOF by specifying an ``addr`` option for each
node when creating the cluster. We will discuss this in a later section.
diff --git a/doc/sphinx/Clusters_from_Scratch/shared-storage.rst b/doc/sphinx/Clusters_from_Scratch/shared-storage.rst
index 898e921b0c0..e7a9db683b3 100644
--- a/doc/sphinx/Clusters_from_Scratch/shared-storage.rst
+++ b/doc/sphinx/Clusters_from_Scratch/shared-storage.rst
@@ -28,8 +28,8 @@ repository:
.. code-block:: console
- [root@pcmk-1 ~]# rpm --import https://www.elrepo.org/RPM-GPG-KEY-elrepo.org
- [root@pcmk-1 ~]# dnf install -y https://www.elrepo.org/elrepo-release-9.el9.elrepo.noarch.rpm
+ [root@pcmk-1 ~]# rpm --import https://www.elrepo.org/RPM-GPG-KEY-v2-elrepo.org
+ [root@pcmk-1 ~]# dnf install -y https://www.elrepo.org/elrepo-release-10.el10.elrepo.noarch.rpm
Now, we can install the DRBD kernel module and utilities:
@@ -37,16 +37,6 @@ Now, we can install the DRBD kernel module and utilities:
# dnf install -y kmod-drbd9x drbd9x-utils
-DRBD will not be able to run under the default SELinux security policies.
-If you are familiar with SELinux, you can modify the policies in a more
-fine-grained manner, but here we will simply exempt DRBD processes from SELinux
-control:
-
-.. code-block:: console
-
- # dnf install -y policycoreutils-python-utils
- # semanage permissive -a drbd_t
-
We will configure DRBD to use port 7789, so allow that port from each host to
the other:
@@ -105,7 +95,7 @@ Repeat for the second node, making sure to use the same size:
.. code-block:: console
- [root@pcmk-1 ~]# ssh pcmk-2 -- lvcreate --name drbd-demo --size 512M cs_pcmk-2
+ [root@pcmk-1 ~]# ssh pcmk-2 -- lvcreate --name drbd-demo --size 512M almalinux_pcmk-2
Logical volume "drbd-demo" created.
Configure DRBD
@@ -179,14 +169,6 @@ Run them on one node:
.. code-block:: console
[root@pcmk-1 ~]# drbdadm create-md wwwdata
- initializing activity log
- initializing bitmap (16 KB) to all zero
- Writing meta data...
- New drbd meta data block successfully created.
- success
-
- [root@pcmk-1 ~]# modprobe drbd
- [root@pcmk-1 ~]# drbdadm up wwwdata
@@ -210,6 +192,12 @@ Run them on one node:
The server's response is:
you are the 25212th user to install this version
+ initializing activity log
+ initializing bitmap (16 KB) to all zero
+ Writing meta data...
+ New drbd meta data block successfully created.
+ success
+ [root@pcmk-1 ~]# drbdadm up wwwdata
We can confirm DRBD's status on this node:
@@ -217,7 +205,7 @@ We can confirm DRBD's status on this node:
[root@pcmk-1 ~]# drbdadm status
wwwdata role:Secondary
- disk:Inconsistent
+ disk:Inconsistent open:no
pcmk-2 connection:Connecting
Because we have not yet initialized the data, this node's data
@@ -226,18 +214,18 @@ the second node, the ``pcmk-2`` connection is ``Connecting`` (waiting for
connection).
Now, repeat the above commands on the second node, starting with creating
-``wwwdata.res``. After giving it time to connect, when we check the status of
+``wwwdata``. After giving it time to connect, when we check the status of
the first node, it shows:
.. code-block:: console
[root@pcmk-1 ~]# drbdadm status
wwwdata role:Secondary
- disk:Inconsistent
+ disk:Inconsistent open:no
pcmk-2 role:Secondary
peer-disk:Inconsistent
-You can see that ``pcmk-2 connection:Connecting`` longer appears in the
+You can see that ``pcmk-2 connection:Connecting`` no longer appears in the
output, meaning the two DRBD nodes are communicating properly, and both
nodes are in ``Secondary`` role with ``Inconsistent`` data.
@@ -261,7 +249,7 @@ If we check the status immediately, we'll see something like this:
[root@pcmk-1 ~]# drbdadm status
wwwdata role:Primary
- disk:UpToDate
+ disk:UpToDate open:no
pcmk-2 role:Secondary
peer-disk:Inconsistent
@@ -271,7 +259,7 @@ It will be quickly followed by this:
[root@pcmk-1 ~]# drbdadm status
wwwdata role:Primary
- disk:UpToDate
+ disk:UpToDate open:no
pcmk-2 role:Secondary
replication:SyncSource peer-disk:Inconsistent
@@ -285,12 +273,12 @@ After a while, the sync should finish, and you'll see something like:
[root@pcmk-1 ~]# drbdadm status
wwwdata role:Primary
- disk:UpToDate
+ disk:UpToDate open:no
pcmk-1 role:Secondary
peer-disk:UpToDate
[root@pcmk-2 ~]# drbdadm status
wwwdata role:Secondary
- disk:UpToDate
+ disk:UpToDate open:no
pcmk-1 role:Primary
peer-disk:UpToDate
@@ -309,11 +297,12 @@ create a filesystem on the DRBD device:
meta-data=/dev/drbd1 isize=512 agcount=4, agsize=32765 blks
= sectsz=512 attr=2, projid32bit=1
= crc=1 finobt=1, sparse=1, rmapbt=0
- = reflink=1
+ = reflink=1 bigtime=1 inobtcount=1 nrext64=1
+ = exchange=0
data = bsize=4096 blocks=131059, imaxpct=25
= sunit=0 swidth=0 blks
- naming =version 2 bsize=4096 ascii-ci=0, ftype=1
- log =internal log bsize=4096 blocks=1368, version=2
+ naming =version 2 bsize=4096 ascii-ci=0, ftype=1, parent=0
+ log =internal log bsize=4096 blocks=16384, version=2
= sectsz=512 sunit=0 blks, lazy-count=1
realtime =none extsz=4096 blocks=0, rtextents=0
Discarding blocks...Done.
@@ -362,7 +351,7 @@ resource to allow the resource to run on both nodes at the same time.
[root@pcmk-1 ~]# pcs -f drbd_cfg resource create WebData ocf:linbit:drbd \
drbd_resource=wwwdata op monitor interval=29s role=Promoted \
monitor interval=31s role=Unpromoted
- [root@pcmk-1 ~]# pcs -f drbd_cfg resource promotable WebData \
+ [root@pcmk-1 ~]# pcs -f drbd_cfg resource promotable WebData meta \
promoted-max=1 promoted-node-max=1 clone-max=2 clone-node-max=1 \
notify=true
[root@pcmk-1 ~]# pcs resource status
@@ -370,15 +359,28 @@ resource to allow the resource to run on both nodes at the same time.
* WebSite (ocf::heartbeat:apache): Started pcmk-1
[root@pcmk-1 ~]# pcs resource config
Resource: ClusterIP (class=ocf provider=heartbeat type=IPaddr2)
- Attributes: cidr_netmask=24 ip=192.168.122.120
- Operations: monitor interval=30s (ClusterIP-monitor-interval-30s)
- start interval=0s timeout=20s (ClusterIP-start-interval-0s)
- stop interval=0s timeout=20s (ClusterIP-stop-interval-0s)
+ Attributes: ClusterIP-instance_attributes
+ cidr_netmask=24
+ ip=192.168.122.120
+ nic=enp1s0
+ Operations:
+ monitor: ClusterIP-monitor-interval-30s
+ interval=30s
+ start: ClusterIP-start-interval-0s
+ interval=0s timeout=20s
+ stop: ClusterIP-stop-interval-0s
+ interval=0s timeout=20s
Resource: WebSite (class=ocf provider=heartbeat type=apache)
- Attributes: configfile=/etc/httpd/conf/httpd.conf statusurl=http://localhost/server-status
- Operations: monitor interval=1min (WebSite-monitor-interval-1min)
- start interval=0s timeout=40s (WebSite-start-interval-0s)
- stop interval=0s timeout=60s (WebSite-stop-interval-0s)
+ Attributes: WebSite-instance_attributes
+ configfile=/etc/httpd/conf/httpd.conf
+ statusurl=http://localhost/server-status
+ Operations:
+ monitor: WebSite-monitor-interval-1min
+ interval=1min
+ start: WebSite-start-interval-0s
+ interval=0s timeout=40s
+ stop: WebSite-stop-interval-0s
+ interval=0s timeout=60s
After you are satisfied with all the changes, you can commit
them all at once by pushing the ``drbd_cfg`` file into the live CIB.
@@ -396,9 +398,9 @@ them all at once by pushing the ``drbd_cfg`` file into the live CIB.
[root@pcmk-1 ~]# pcs resource create WebData ocf:linbit:drbd \
drbd_resource=wwwdata op monitor interval=29s role=Promoted \
- monitor interval=31s role=Unpromoted \
- promotable promoted-max=1 promoted-node-max=1 clone-max=2 \
- clone-node-max=1 notify=true
+ monitor interval=31s role=Unpromoted promotable meta \
+ promoted-max=1 promoted-node-max=1 clone-max=2 clone-node-max=1 \
+ notify=true
Let's see what the cluster did with the new configuration:
@@ -412,27 +414,56 @@ Let's see what the cluster did with the new configuration:
* Unpromoted: [ pcmk-2 ]
[root@pcmk-1 ~]# pcs resource config
Resource: ClusterIP (class=ocf provider=heartbeat type=IPaddr2)
- Attributes: cidr_netmask=24 ip=192.168.122.120
- Operations: monitor interval=30s (ClusterIP-monitor-interval-30s)
- start interval=0s timeout=20s (ClusterIP-start-interval-0s)
- stop interval=0s timeout=20s (ClusterIP-stop-interval-0s)
+ Attributes: ClusterIP-instance_attributes
+ cidr_netmask=24
+ ip=192.168.122.120
+ nic=enp1s0
+ Operations:
+ monitor: ClusterIP-monitor-interval-30s
+ interval=30s
+ start: ClusterIP-start-interval-0s
+ interval=0s timeout=20s
+ stop: ClusterIP-stop-interval-0s
+ interval=0s timeout=20s
Resource: WebSite (class=ocf provider=heartbeat type=apache)
- Attributes: configfile=/etc/httpd/conf/httpd.conf statusurl=http://localhost/server-status
- Operations: monitor interval=1min (WebSite-monitor-interval-1min)
- start interval=0s timeout=40s (WebSite-start-interval-0s)
- stop interval=0s timeout=60s (WebSite-stop-interval-0s)
+ Attributes: WebSite-instance_attributes
+ configfile=/etc/httpd/conf/httpd.conf
+ statusurl=http://localhost/server-status
+ Operations:
+ monitor: WebSite-monitor-interval-1min
+ interval=1min
+ start: WebSite-start-interval-0s
+ interval=0s timeout=40s
+ stop: WebSite-stop-interval-0s
+ interval=0s timeout=60s
Clone: WebData-clone
- Meta Attrs: clone-max=2 clone-node-max=1 notify=true promotable=true promoted-max=1 promoted-node-max=1
+ Meta Attributes: WebData-clone-meta_attributes
+ clone-max=2
+ clone-node-max=1
+ notify=true
+ promotable=true
+ promoted-max=1
+ promoted-node-max=1
Resource: WebData (class=ocf provider=linbit type=drbd)
- Attributes: drbd_resource=wwwdata
- Operations: demote interval=0s timeout=90 (WebData-demote-interval-0s)
- monitor interval=29s role=Promoted (WebData-monitor-interval-29s)
- monitor interval=31s role=Unpromoted (WebData-monitor-interval-31s)
- notify interval=0s timeout=90 (WebData-notify-interval-0s)
- promote interval=0s timeout=90 (WebData-promote-interval-0s)
- reload interval=0s timeout=30 (WebData-reload-interval-0s)
- start interval=0s timeout=240 (WebData-start-interval-0s)
- stop interval=0s timeout=100 (WebData-stop-interval-0s)
+ Attributes: WebData-instance_attributes
+ drbd_resource=wwwdata
+ Operations:
+ demote: WebData-demote-interval-0s
+ interval=0s timeout=90
+ monitor: WebData-monitor-interval-29s
+ interval=29s role=Promoted
+ monitor: WebData-monitor-interval-31s
+ interval=31s role=Unpromoted
+ notify: WebData-notify-interval-0s
+ interval=0s timeout=90
+ promote: WebData-promote-interval-0s
+ interval=0s timeout=90
+ reload: WebData-reload-interval-0s
+ interval=0s timeout=30
+ start: WebData-start-interval-0s
+ interval=0s timeout=240
+ stop: WebData-stop-interval-0s
+ interval=0s timeout=100
We can see that ``WebData-clone`` (our DRBD device) is running as ``Promoted``
(DRBD's primary role) on ``pcmk-1`` and ``Unpromoted`` (DRBD's secondary role)
@@ -498,18 +529,18 @@ Review the updated configuration.
[root@pcmk-1 ~]# pcs -f fs_cfg constraint
Location Constraints:
- Resource: WebSite
- Enabled on:
- Node: pcmk-1 (score:50)
- Ordering Constraints:
- start ClusterIP then start WebSite (kind:Mandatory)
- promote WebData-clone then start WebFS (kind:Mandatory)
- start WebFS then start WebSite (kind:Mandatory)
+ resource 'WebSite' prefers node 'pcmk-1' with score 50
Colocation Constraints:
- WebSite with ClusterIP (score:INFINITY)
- WebFS with WebData-clone (score:INFINITY) (rsc-role:Started) (with-rsc-role:Promoted)
- WebSite with WebFS (score:INFINITY)
- Ticket Constraints:
+ resource 'WebSite' with resource 'ClusterIP'
+ score=INFINITY
+ Started resource 'WebFS' with Promoted resource 'WebData-clone'
+ score=INFINITY
+ resource 'WebSite' with resource 'WebFS'
+ score=INFINITY
+ Order Constraints:
+ start resource 'ClusterIP' then start resource 'WebSite'
+ promote resource 'WebData-clone' then start resource 'WebFS'
+ start resource 'WebFS' then start resource 'WebSite'
After reviewing the new configuration, upload it and watch the
cluster put it into effect.
@@ -527,32 +558,68 @@ cluster put it into effect.
* WebFS (ocf:heartbeat:Filesystem): Started pcmk-2
[root@pcmk-1 ~]# pcs resource config
Resource: ClusterIP (class=ocf provider=heartbeat type=IPaddr2)
- Attributes: cidr_netmask=24 ip=192.168.122.120
- Operations: monitor interval=30s (ClusterIP-monitor-interval-30s)
- start interval=0s timeout=20s (ClusterIP-start-interval-0s)
- stop interval=0s timeout=20s (ClusterIP-stop-interval-0s)
+ Attributes: ClusterIP-instance_attributes
+ cidr_netmask=24
+ ip=192.168.122.120
+ nic=enp1s0
+ Operations:
+ monitor: ClusterIP-monitor-interval-30s
+ interval=30s
+ start: ClusterIP-start-interval-0s
+ interval=0s timeout=20s
+ stop: ClusterIP-stop-interval-0s
+ interval=0s timeout=20s
Resource: WebSite (class=ocf provider=heartbeat type=apache)
- Attributes: configfile=/etc/httpd/conf/httpd.conf statusurl=http://localhost/server-status
- Operations: monitor interval=1min (WebSite-monitor-interval-1min)
- start interval=0s timeout=40s (WebSite-start-interval-0s)
- stop interval=0s timeout=60s (WebSite-stop-interval-0s)
+ Attributes: WebSite-instance_attributes
+ configfile=/etc/httpd/conf/httpd.conf
+ statusurl=http://localhost/server-status
+ Operations:
+ monitor: WebSite-monitor-interval-1min
+ interval=1min
+ start: WebSite-start-interval-0s
+ interval=0s timeout=40s
+ stop: WebSite-stop-interval-0s
+ interval=0s timeout=60s
+ Resource: WebFS (class=ocf provider=heartbeat type=Filesystem)
+ Attributes: WebFS-instance_attributes
+ device=/dev/drbd1
+ directory=/var/www/html
+ fstype=xfs
+ Operations:
+ monitor: WebFS-monitor-interval-20s
+ interval=20s timeout=40s
+ start: WebFS-start-interval-0s
+ interval=0s timeout=60s
+ stop: WebFS-stop-interval-0s
+ interval=0s timeout=60s
Clone: WebData-clone
- Meta Attrs: clone-max=2 clone-node-max=1 notify=true promotable=true promoted-max=1 promoted-node-max=1
+ Meta Attributes: WebData-clone-meta_attributes
+ clone-max=2
+ clone-node-max=1
+ notify=true
+ promotable=true
+ promoted-max=1
+ promoted-node-max=1
Resource: WebData (class=ocf provider=linbit type=drbd)
- Attributes: drbd_resource=wwwdata
- Operations: demote interval=0s timeout=90 (WebData-demote-interval-0s)
- monitor interval=29s role=Promoted (WebData-monitor-interval-29s)
- monitor interval=31s role=Unpromoted (WebData-monitor-interval-31s)
- notify interval=0s timeout=90 (WebData-notify-interval-0s)
- promote interval=0s timeout=90 (WebData-promote-interval-0s)
- reload interval=0s timeout=30 (WebData-reload-interval-0s)
- start interval=0s timeout=240 (WebData-start-interval-0s)
- stop interval=0s timeout=100 (WebData-stop-interval-0s)
- Resource: WebFS (class=ocf provider=heartbeat type=Filesystem)
- Attributes: device=/dev/drbd1 directory=/var/www/html fstype=xfs
- Operations: monitor interval=20s timeout=40s (WebFS-monitor-interval-20s)
- start interval=0s timeout=60s (WebFS-start-interval-0s)
- stop interval=0s timeout=60s (WebFS-stop-interval-0s)
+ Attributes: WebData-instance_attributes
+ drbd_resource=wwwdata
+ Operations:
+ demote: WebData-demote-interval-0s
+ interval=0s timeout=90
+ monitor: WebData-monitor-interval-29s
+ interval=29s role=Promoted
+ monitor: WebData-monitor-interval-31s
+ interval=31s role=Unpromoted
+ notify: WebData-notify-interval-0s
+ interval=0s timeout=90
+ promote: WebData-promote-interval-0s
+ interval=0s timeout=90
+ reload: WebData-reload-interval-0s
+ interval=0s timeout=30
+ start: WebData-start-interval-0s
+ interval=0s timeout=240
+ stop: WebData-stop-interval-0s
+ interval=0s timeout=100
Test Cluster Failover
#####################
@@ -577,10 +644,10 @@ it can no longer host resources, and eventually all the resources will move.
[root@pcmk-1 ~]# pcs status
Cluster name: mycluster
Cluster Summary:
- * Stack: corosync
- * Current DC: pcmk-1 (version 2.1.2-4.el9-ada5c3b36e2) - partition with quorum
- * Last updated: Wed Jul 27 05:28:01 2022
- * Last change: Wed Jul 27 05:27:57 2022 by root via cibadmin on pcmk-1
+ * Stack: corosync (Pacemaker is running)
+ * Current DC: pcmk-1 (version 3.0.1-3.el10-6a90427) - partition with quorum
+ * Last updated: Wed Feb 25 10:32:17 2026
+ * Last change: Wed Feb 25 10:32:13 2026 by root via root on pcmk-1
* 2 nodes configured
* 6 resource instances configured
@@ -612,10 +679,10 @@ eligible to host resources again.
[root@pcmk-1 ~]# pcs status
Cluster name: mycluster
Cluster Summary:
- * Stack: corosync
- * Current DC: pcmk-1 (version 2.1.2-4.el9-ada5c3b36e2) - partition with quorum
- * Last updated: Wed Jul 27 05:28:50 2022
- * Last change: Wed Jul 27 05:28:47 2022 by root via cibadmin on pcmk-1
+ * Stack: corosync (Pacemaker is running)
+ * Current DC: pcmk-1 (version 3.0.1-3.el10-6a90427) - partition with quorum
+ * Last updated: Wed Feb 25 10:32:17 2026
+ * Last change: Wed Feb 25 10:32:13 2026 by root via root on pcmk-1
* 2 nodes configured
* 6 resource instances configured
diff --git a/doc/sphinx/Clusters_from_Scratch/verification.rst b/doc/sphinx/Clusters_from_Scratch/verification.rst
index 392196683b5..a2bc8d43a88 100644
--- a/doc/sphinx/Clusters_from_Scratch/verification.rst
+++ b/doc/sphinx/Clusters_from_Scratch/verification.rst
@@ -119,12 +119,17 @@ If that looks OK, check the ``pcs status`` output:
WARNINGS:
No stonith devices and stonith-enabled is not false
+ error: Resource start-up disabled since no STONITH resources have been defined
+ error: Either configure some or disable STONITH with the stonith-enabled option
+ error: NOTE: Clusters with shared data need STONITH to ensure data integrity
+ error: CIB did not pass schema validation
+ Configuration invalid (with errors)
Cluster Summary:
- * Stack: corosync
- * Current DC: pcmk-2 (version 2.1.2-4.el9-ada5c3b36e2) - partition with quorum
- * Last updated: Wed Jul 27 00:09:55 2022
- * Last change: Wed Jul 27 00:07:08 2022 by hacluster via crmd on pcmk-2
+ * Stack: corosync (Pacemaker is running)
+ * Current DC: pcmk-1 (version 3.0.1-3.el10-6a90427) - partition with quorum
+ * Last updated: Tue Feb 24 14:55:38 2026 on pcmk-1
+ * Last change: Tue Feb 24 14:54:39 2026 by hacluster via hacluster on pcmk-1
* 2 nodes configured
* 0 resource instances configured
@@ -168,12 +173,12 @@ configuration and status by using the ``pcs cluster cib`` command.
.. code-block:: xml
-
+
-
+
@@ -191,15 +196,25 @@ configuration and status by using the ``pcs cluster cib`` command.
-
+
+
+
+
+
+
-
+
+
+
+
+
+
@@ -211,10 +226,11 @@ the configuration.
[root@pcmk-1 ~]# pcs cluster verify --full
Error: invalid cib:
- (unpack_resources) error: Resource start-up disabled since no STONITH resources have been defined
- (unpack_resources) error: Either configure some or disable STONITH with the fencing-enabled option
- (unpack_resources) error: NOTE: Clusters with shared data need STONITH to ensure data integrity
- crm_verify: Errors found during check: config not valid
+ error: Resource start-up disabled since no STONITH resources have been defined
+ error: Either configure some or disable STONITH with the fencing-enabled option
+ error: NOTE: Clusters with shared data need STONITH to ensure data integrity
+ error: CIB did not pass schema validation
+ Configuration invalid (with errors)
Error: Errors have occurred, therefore pcs is unable to continue
diff --git a/doc/sphinx/conf.py.in b/doc/sphinx/conf.py.in
index 0a251120824..3eebad30fbe 100644
--- a/doc/sphinx/conf.py.in
+++ b/doc/sphinx/conf.py.in
@@ -1,7 +1,7 @@
""" Sphinx configuration for Pacemaker documentation
"""
-__copyright__ = "Copyright 2020-2024 the Pacemaker project contributors"
+__copyright__ = "Copyright 2020-2026 the Pacemaker project contributors"
__license__ = "GNU General Public License version 2 or later (GPLv2+) WITHOUT ANY WARRANTY"
# This file is execfile()d with the current directory set to its containing dir.
@@ -29,7 +29,7 @@ doc_license += " version 4.0 or later (CC-BY-SA v4.0+)"
# where occurrences of || in the rST will be substituted with
rst_prolog="""
.. |CFS_DISTRO| replace:: AlmaLinux
-.. |CFS_DISTRO_VER| replace:: 9
+.. |CFS_DISTRO_VER| replace:: 10
.. |CRM_BLACKBOX_DIR| replace:: ``%CRM_BLACKBOX_DIR%``
.. |CRM_CONFIG_DIR| replace:: ``%CRM_CONFIG_DIR%``
.. |CRM_DAEMON_GROUP| replace:: ``%CRM_DAEMON_GROUP%``
@@ -44,7 +44,7 @@ rst_prolog="""
.. |PCMK_CONTAINER_LOG_FILE| replace:: ``/var/log/pcmk-init.log``
.. |PCMK__REMOTE_SCHEMA_DIR| replace:: %PCMK__REMOTE_SCHEMA_DIR%
.. |REMOTE_DISTRO| replace:: AlmaLinux
-.. |REMOTE_DISTRO_VER| replace:: 9
+.. |REMOTE_DISTRO_VER| replace:: 10
"""
# If extensions (or modules to document with autodoc) are in another directory,