From e40831aeb3589717782c6041e46adbbf68a1e75c Mon Sep 17 00:00:00 2001 From: Reid Wahl Date: Sun, 12 May 2024 22:59:57 -0700 Subject: [PATCH 1/8] Test: cts-scheduler: Test failed remote connection resource migrate_from The test results are currently wrong. Signed-off-by: Reid Wahl --- cts/cts-scheduler.in | 5 + cts/scheduler/dot/remote-failed-migrate1.dot | 5 + cts/scheduler/dot/remote-failed-migrate2.dot | 12 ++ cts/scheduler/dot/remote-partial-migrate3.dot | 5 + cts/scheduler/exp/remote-failed-migrate1.exp | 37 +++++ cts/scheduler/exp/remote-failed-migrate2.exp | 78 ++++++++++ cts/scheduler/exp/remote-partial-migrate3.exp | 37 +++++ .../scores/remote-failed-migrate1.scores | 7 + .../scores/remote-failed-migrate2.scores | 10 ++ .../scores/remote-partial-migrate3.scores | 7 + .../summary/remote-failed-migrate1.summary | 29 ++++ .../summary/remote-failed-migrate2.summary | 35 +++++ .../summary/remote-partial-migrate3.summary | 29 ++++ cts/scheduler/xml/remote-failed-migrate1.xml | 109 ++++++++++++++ cts/scheduler/xml/remote-failed-migrate2.xml | 136 ++++++++++++++++++ 15 files changed, 541 insertions(+) create mode 100644 cts/scheduler/dot/remote-failed-migrate1.dot create mode 100644 cts/scheduler/dot/remote-failed-migrate2.dot create mode 100644 cts/scheduler/dot/remote-partial-migrate3.dot create mode 100644 cts/scheduler/exp/remote-failed-migrate1.exp create mode 100644 cts/scheduler/exp/remote-failed-migrate2.exp create mode 100644 cts/scheduler/exp/remote-partial-migrate3.exp create mode 100644 cts/scheduler/scores/remote-failed-migrate1.scores create mode 100644 cts/scheduler/scores/remote-failed-migrate2.scores create mode 100644 cts/scheduler/scores/remote-partial-migrate3.scores create mode 100644 cts/scheduler/summary/remote-failed-migrate1.summary create mode 100644 cts/scheduler/summary/remote-failed-migrate2.summary create mode 100644 cts/scheduler/summary/remote-partial-migrate3.summary create mode 100644 cts/scheduler/xml/remote-failed-migrate1.xml create mode 100644 cts/scheduler/xml/remote-failed-migrate2.xml diff --git a/cts/cts-scheduler.in b/cts/cts-scheduler.in index 50c32f6c7cd..0b0b4dca561 100644 --- a/cts/cts-scheduler.in +++ b/cts/cts-scheduler.in @@ -1120,6 +1120,11 @@ TESTS = [ "Make sure partial migrations are handled before ops on the remote node" ], [ "remote-partial-migrate2", "Make sure partial migration target is prefered for remote connection" ], + [ "remote-failed-migrate1", + "Recover immediately without fencing after failed migrate_from"], + [ "remote-failed-migrate2", + "Recover immediately without fencing after failed migrate_from " + + "(rsc active)"], [ "remote-recover-fail", "Make sure start failure causes fencing if rsc are active on remote" ], [ "remote-start-fail", "Make sure a start failure does not result in fencing if no active resources are on remote" ], diff --git a/cts/scheduler/dot/remote-failed-migrate1.dot b/cts/scheduler/dot/remote-failed-migrate1.dot new file mode 100644 index 00000000000..975b8408913 --- /dev/null +++ b/cts/scheduler/dot/remote-failed-migrate1.dot @@ -0,0 +1,5 @@ + digraph "g" { +"fastvm-fedora39-23_stop_0 fastvm-fedora39-22" [ style=bold color="green" fontcolor="black"] +"fastvm-fedora39-23_stop_0 fastvm-fedora39-24" [ style=bold color="green" fontcolor="black"] +"stonith 'reboot' fastvm-fedora39-23" [ style=bold color="green" fontcolor="black"] +} diff --git a/cts/scheduler/dot/remote-failed-migrate2.dot b/cts/scheduler/dot/remote-failed-migrate2.dot new file mode 100644 index 00000000000..5378a8bf5a0 --- /dev/null +++ b/cts/scheduler/dot/remote-failed-migrate2.dot @@ -0,0 +1,12 @@ + digraph "g" { +"dummy_monitor_10000 fastvm-fedora39-24" [ style=bold color="green" fontcolor="black"] +"dummy_start_0 fastvm-fedora39-24" -> "dummy_monitor_10000 fastvm-fedora39-24" [ style = bold] +"dummy_start_0 fastvm-fedora39-24" [ style=bold color="green" fontcolor="black"] +"dummy_stop_0 fastvm-fedora39-23" -> "dummy_start_0 fastvm-fedora39-24" [ style = bold] +"dummy_stop_0 fastvm-fedora39-23" [ style=bold color="green" fontcolor="orange"] +"fastvm-fedora39-23_stop_0 fastvm-fedora39-22" [ style=bold color="green" fontcolor="black"] +"fastvm-fedora39-23_stop_0 fastvm-fedora39-24" [ style=bold color="green" fontcolor="black"] +"stonith 'reboot' fastvm-fedora39-23" -> "dummy_start_0 fastvm-fedora39-24" [ style = bold] +"stonith 'reboot' fastvm-fedora39-23" -> "dummy_stop_0 fastvm-fedora39-23" [ style = bold] +"stonith 'reboot' fastvm-fedora39-23" [ style=bold color="green" fontcolor="black"] +} diff --git a/cts/scheduler/dot/remote-partial-migrate3.dot b/cts/scheduler/dot/remote-partial-migrate3.dot new file mode 100644 index 00000000000..975b8408913 --- /dev/null +++ b/cts/scheduler/dot/remote-partial-migrate3.dot @@ -0,0 +1,5 @@ + digraph "g" { +"fastvm-fedora39-23_stop_0 fastvm-fedora39-22" [ style=bold color="green" fontcolor="black"] +"fastvm-fedora39-23_stop_0 fastvm-fedora39-24" [ style=bold color="green" fontcolor="black"] +"stonith 'reboot' fastvm-fedora39-23" [ style=bold color="green" fontcolor="black"] +} diff --git a/cts/scheduler/exp/remote-failed-migrate1.exp b/cts/scheduler/exp/remote-failed-migrate1.exp new file mode 100644 index 00000000000..5b0188b65ff --- /dev/null +++ b/cts/scheduler/exp/remote-failed-migrate1.exp @@ -0,0 +1,37 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/cts/scheduler/exp/remote-failed-migrate2.exp b/cts/scheduler/exp/remote-failed-migrate2.exp new file mode 100644 index 00000000000..12bea2f0918 --- /dev/null +++ b/cts/scheduler/exp/remote-failed-migrate2.exp @@ -0,0 +1,78 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/cts/scheduler/exp/remote-partial-migrate3.exp b/cts/scheduler/exp/remote-partial-migrate3.exp new file mode 100644 index 00000000000..5b0188b65ff --- /dev/null +++ b/cts/scheduler/exp/remote-partial-migrate3.exp @@ -0,0 +1,37 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/cts/scheduler/scores/remote-failed-migrate1.scores b/cts/scheduler/scores/remote-failed-migrate1.scores new file mode 100644 index 00000000000..d36e8d4436a --- /dev/null +++ b/cts/scheduler/scores/remote-failed-migrate1.scores @@ -0,0 +1,7 @@ + +pcmk__primitive_assign: fastvm-fedora39-23 allocation score on fastvm-fedora39-22: -INFINITY +pcmk__primitive_assign: fastvm-fedora39-23 allocation score on fastvm-fedora39-23: -INFINITY +pcmk__primitive_assign: fastvm-fedora39-23 allocation score on fastvm-fedora39-24: -INFINITY +pcmk__primitive_assign: xvm allocation score on fastvm-fedora39-22: 1 +pcmk__primitive_assign: xvm allocation score on fastvm-fedora39-23: -INFINITY +pcmk__primitive_assign: xvm allocation score on fastvm-fedora39-24: 0 diff --git a/cts/scheduler/scores/remote-failed-migrate2.scores b/cts/scheduler/scores/remote-failed-migrate2.scores new file mode 100644 index 00000000000..f023051891a --- /dev/null +++ b/cts/scheduler/scores/remote-failed-migrate2.scores @@ -0,0 +1,10 @@ + +pcmk__primitive_assign: dummy allocation score on fastvm-fedora39-22: 0 +pcmk__primitive_assign: dummy allocation score on fastvm-fedora39-23: INFINITY +pcmk__primitive_assign: dummy allocation score on fastvm-fedora39-24: 0 +pcmk__primitive_assign: fastvm-fedora39-23 allocation score on fastvm-fedora39-22: -INFINITY +pcmk__primitive_assign: fastvm-fedora39-23 allocation score on fastvm-fedora39-23: -INFINITY +pcmk__primitive_assign: fastvm-fedora39-23 allocation score on fastvm-fedora39-24: -INFINITY +pcmk__primitive_assign: xvm allocation score on fastvm-fedora39-22: 1 +pcmk__primitive_assign: xvm allocation score on fastvm-fedora39-23: -INFINITY +pcmk__primitive_assign: xvm allocation score on fastvm-fedora39-24: 0 diff --git a/cts/scheduler/scores/remote-partial-migrate3.scores b/cts/scheduler/scores/remote-partial-migrate3.scores new file mode 100644 index 00000000000..d36e8d4436a --- /dev/null +++ b/cts/scheduler/scores/remote-partial-migrate3.scores @@ -0,0 +1,7 @@ + +pcmk__primitive_assign: fastvm-fedora39-23 allocation score on fastvm-fedora39-22: -INFINITY +pcmk__primitive_assign: fastvm-fedora39-23 allocation score on fastvm-fedora39-23: -INFINITY +pcmk__primitive_assign: fastvm-fedora39-23 allocation score on fastvm-fedora39-24: -INFINITY +pcmk__primitive_assign: xvm allocation score on fastvm-fedora39-22: 1 +pcmk__primitive_assign: xvm allocation score on fastvm-fedora39-23: -INFINITY +pcmk__primitive_assign: xvm allocation score on fastvm-fedora39-24: 0 diff --git a/cts/scheduler/summary/remote-failed-migrate1.summary b/cts/scheduler/summary/remote-failed-migrate1.summary new file mode 100644 index 00000000000..a57a0f879a0 --- /dev/null +++ b/cts/scheduler/summary/remote-failed-migrate1.summary @@ -0,0 +1,29 @@ +Using the original execution date of: 2024-05-13 04:25:37Z +Current cluster status: + * Node List: + * RemoteNode fastvm-fedora39-23: UNCLEAN (offline) + * Online: [ fastvm-fedora39-22 fastvm-fedora39-24 ] + + * Full List of Resources: + * fastvm-fedora39-23 (ocf:pacemaker:remote): FAILED [ fastvm-fedora39-22 fastvm-fedora39-24 ] + * xvm (stonith:fence_xvm): Started fastvm-fedora39-22 + +Transition Summary: + * Fence (reboot) fastvm-fedora39-23 'remote connection is unrecoverable' + * Stop fastvm-fedora39-23 ( fastvm-fedora39-22 ) due to node availability + * Stop fastvm-fedora39-23 ( fastvm-fedora39-24 ) due to node availability + +Executing Cluster Transition: + * Resource action: fastvm-fedora39-23 stop on fastvm-fedora39-22 + * Resource action: fastvm-fedora39-23 stop on fastvm-fedora39-24 + * Fencing fastvm-fedora39-23 (reboot) +Using the original execution date of: 2024-05-13 04:25:37Z + +Revised Cluster Status: + * Node List: + * RemoteNode fastvm-fedora39-23: UNCLEAN (offline) + * Online: [ fastvm-fedora39-22 fastvm-fedora39-24 ] + + * Full List of Resources: + * fastvm-fedora39-23 (ocf:pacemaker:remote): FAILED + * xvm (stonith:fence_xvm): Started fastvm-fedora39-22 diff --git a/cts/scheduler/summary/remote-failed-migrate2.summary b/cts/scheduler/summary/remote-failed-migrate2.summary new file mode 100644 index 00000000000..406f25ee295 --- /dev/null +++ b/cts/scheduler/summary/remote-failed-migrate2.summary @@ -0,0 +1,35 @@ +Using the original execution date of: 2024-05-15 10:28:29Z +Current cluster status: + * Node List: + * RemoteNode fastvm-fedora39-23: UNCLEAN (offline) + * Online: [ fastvm-fedora39-22 fastvm-fedora39-24 ] + + * Full List of Resources: + * fastvm-fedora39-23 (ocf:pacemaker:remote): FAILED [ fastvm-fedora39-24 fastvm-fedora39-22 ] + * xvm (stonith:fence_xvm): Started fastvm-fedora39-22 + * dummy (ocf:pacemaker:Dummy): Started fastvm-fedora39-23 (UNCLEAN) + +Transition Summary: + * Fence (reboot) fastvm-fedora39-23 'remote connection is unrecoverable' + * Stop fastvm-fedora39-23 ( fastvm-fedora39-24 ) due to node availability + * Stop fastvm-fedora39-23 ( fastvm-fedora39-22 ) due to node availability + * Move dummy ( fastvm-fedora39-23 -> fastvm-fedora39-24 ) + +Executing Cluster Transition: + * Resource action: fastvm-fedora39-23 stop on fastvm-fedora39-22 + * Resource action: fastvm-fedora39-23 stop on fastvm-fedora39-24 + * Fencing fastvm-fedora39-23 (reboot) + * Pseudo action: dummy_stop_0 + * Resource action: dummy start on fastvm-fedora39-24 + * Resource action: dummy monitor=10000 on fastvm-fedora39-24 +Using the original execution date of: 2024-05-15 10:28:29Z + +Revised Cluster Status: + * Node List: + * RemoteNode fastvm-fedora39-23: UNCLEAN (offline) + * Online: [ fastvm-fedora39-22 fastvm-fedora39-24 ] + + * Full List of Resources: + * fastvm-fedora39-23 (ocf:pacemaker:remote): FAILED + * xvm (stonith:fence_xvm): Started fastvm-fedora39-22 + * dummy (ocf:pacemaker:Dummy): Started fastvm-fedora39-24 diff --git a/cts/scheduler/summary/remote-partial-migrate3.summary b/cts/scheduler/summary/remote-partial-migrate3.summary new file mode 100644 index 00000000000..a57a0f879a0 --- /dev/null +++ b/cts/scheduler/summary/remote-partial-migrate3.summary @@ -0,0 +1,29 @@ +Using the original execution date of: 2024-05-13 04:25:37Z +Current cluster status: + * Node List: + * RemoteNode fastvm-fedora39-23: UNCLEAN (offline) + * Online: [ fastvm-fedora39-22 fastvm-fedora39-24 ] + + * Full List of Resources: + * fastvm-fedora39-23 (ocf:pacemaker:remote): FAILED [ fastvm-fedora39-22 fastvm-fedora39-24 ] + * xvm (stonith:fence_xvm): Started fastvm-fedora39-22 + +Transition Summary: + * Fence (reboot) fastvm-fedora39-23 'remote connection is unrecoverable' + * Stop fastvm-fedora39-23 ( fastvm-fedora39-22 ) due to node availability + * Stop fastvm-fedora39-23 ( fastvm-fedora39-24 ) due to node availability + +Executing Cluster Transition: + * Resource action: fastvm-fedora39-23 stop on fastvm-fedora39-22 + * Resource action: fastvm-fedora39-23 stop on fastvm-fedora39-24 + * Fencing fastvm-fedora39-23 (reboot) +Using the original execution date of: 2024-05-13 04:25:37Z + +Revised Cluster Status: + * Node List: + * RemoteNode fastvm-fedora39-23: UNCLEAN (offline) + * Online: [ fastvm-fedora39-22 fastvm-fedora39-24 ] + + * Full List of Resources: + * fastvm-fedora39-23 (ocf:pacemaker:remote): FAILED + * xvm (stonith:fence_xvm): Started fastvm-fedora39-22 diff --git a/cts/scheduler/xml/remote-failed-migrate1.xml b/cts/scheduler/xml/remote-failed-migrate1.xml new file mode 100644 index 00000000000..aeec2500f37 --- /dev/null +++ b/cts/scheduler/xml/remote-failed-migrate1.xml @@ -0,0 +1,109 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/cts/scheduler/xml/remote-failed-migrate2.xml b/cts/scheduler/xml/remote-failed-migrate2.xml new file mode 100644 index 00000000000..0d30eae3cd0 --- /dev/null +++ b/cts/scheduler/xml/remote-failed-migrate2.xml @@ -0,0 +1,136 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + From 374a8d3a733d9c3c9f9e130d56b6b65c56ed1fe8 Mon Sep 17 00:00:00 2001 From: Reid Wahl Date: Sun, 12 May 2024 19:52:06 -0700 Subject: [PATCH 2/8] Fix: libpe_status: Don't fence a remote node due to failed migrate_from This also prevents the resource from remaining stopped until the cluster-recheck-interval expires. That's because we no longer set pcmk_on_fail_reset_remote after a migrate_from failure, so pcmk__role_after_failure() no longer returns pcmk_role_stopped. Ref T214 Signed-off-by: Reid Wahl --- lib/pengine/pe_actions.c | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/lib/pengine/pe_actions.c b/lib/pengine/pe_actions.c index b866db61ed0..6d3962e6f8c 100644 --- a/lib/pengine/pe_actions.c +++ b/lib/pengine/pe_actions.c @@ -962,13 +962,16 @@ pcmk__parse_on_fail(const pcmk_resource_t *rsc, const char *action_name, /* Remote node connections are handled specially. Failures that result * in dropping an active connection must result in fencing. The only - * failures that don't are probes and starts. The user can explicitly set - * PCMK_META_ON_FAIL=PCMK_VALUE_FENCE to fence after start failures. + * failures that don't are probes, starts, and migrate_froms (which are + * essentially starts during a migration). The user can explicitly set + * PCMK_META_ON_FAIL=PCMK_VALUE_FENCE to fence after start and migrate_from + * failures. */ if (rsc->is_remote_node && pcmk__is_remote_node(pcmk_find_node(rsc->cluster, rsc->id)) && !pcmk_is_probe(action_name, interval_ms) - && !pcmk__str_eq(action_name, PCMK_ACTION_START, pcmk__str_none)) { + && !pcmk__str_any_of(action_name, PCMK_ACTION_START, + PCMK_ACTION_MIGRATE_FROM, NULL)) { needs_remote_reset = true; if (!pcmk_is_set(rsc->flags, pcmk_rsc_managed)) { desc = NULL; // Force default for unmanaged connections From 11fbd77a0a3823ca325fbb91d7b47d9e8ddd854e Mon Sep 17 00:00:00 2001 From: Reid Wahl Date: Sun, 12 May 2024 23:05:15 -0700 Subject: [PATCH 3/8] Test: cts-scheduler: Update test after remote migrate_from fix Ref T214 Signed-off-by: Reid Wahl --- cts/scheduler/dot/remote-failed-migrate1.dot | 6 +- cts/scheduler/dot/remote-failed-migrate2.dot | 18 +++-- cts/scheduler/dot/remote-partial-migrate3.dot | 6 +- cts/scheduler/exp/remote-failed-migrate1.exp | 42 +++++++--- cts/scheduler/exp/remote-failed-migrate2.exp | 76 ++++++++++++++----- cts/scheduler/exp/remote-partial-migrate3.exp | 42 +++++++--- .../scores/remote-failed-migrate1.scores | 4 +- .../scores/remote-failed-migrate2.scores | 4 +- .../scores/remote-partial-migrate3.scores | 4 +- .../summary/remote-failed-migrate1.summary | 13 ++-- .../summary/remote-failed-migrate2.summary | 23 +++--- .../summary/remote-partial-migrate3.summary | 13 ++-- 12 files changed, 171 insertions(+), 80 deletions(-) diff --git a/cts/scheduler/dot/remote-failed-migrate1.dot b/cts/scheduler/dot/remote-failed-migrate1.dot index 975b8408913..212f674c98d 100644 --- a/cts/scheduler/dot/remote-failed-migrate1.dot +++ b/cts/scheduler/dot/remote-failed-migrate1.dot @@ -1,5 +1,9 @@ digraph "g" { +"fastvm-fedora39-23_monitor_60000 fastvm-fedora39-24" [ style=bold color="green" fontcolor="black"] +"fastvm-fedora39-23_start_0 fastvm-fedora39-24" -> "fastvm-fedora39-23_monitor_60000 fastvm-fedora39-24" [ style = bold] +"fastvm-fedora39-23_start_0 fastvm-fedora39-24" [ style=bold color="green" fontcolor="black"] +"fastvm-fedora39-23_stop_0 fastvm-fedora39-22" -> "fastvm-fedora39-23_start_0 fastvm-fedora39-24" [ style = bold] "fastvm-fedora39-23_stop_0 fastvm-fedora39-22" [ style=bold color="green" fontcolor="black"] +"fastvm-fedora39-23_stop_0 fastvm-fedora39-24" -> "fastvm-fedora39-23_start_0 fastvm-fedora39-24" [ style = bold] "fastvm-fedora39-23_stop_0 fastvm-fedora39-24" [ style=bold color="green" fontcolor="black"] -"stonith 'reboot' fastvm-fedora39-23" [ style=bold color="green" fontcolor="black"] } diff --git a/cts/scheduler/dot/remote-failed-migrate2.dot b/cts/scheduler/dot/remote-failed-migrate2.dot index 5378a8bf5a0..9da1e562b21 100644 --- a/cts/scheduler/dot/remote-failed-migrate2.dot +++ b/cts/scheduler/dot/remote-failed-migrate2.dot @@ -1,12 +1,20 @@ digraph "g" { -"dummy_monitor_10000 fastvm-fedora39-24" [ style=bold color="green" fontcolor="black"] -"dummy_start_0 fastvm-fedora39-24" -> "dummy_monitor_10000 fastvm-fedora39-24" [ style = bold] -"dummy_start_0 fastvm-fedora39-24" [ style=bold color="green" fontcolor="black"] -"dummy_stop_0 fastvm-fedora39-23" -> "dummy_start_0 fastvm-fedora39-24" [ style = bold] +"dummy_monitor_10000 fastvm-fedora39-22" [ style=bold color="green" fontcolor="black"] +"dummy_start_0 fastvm-fedora39-22" -> "dummy_monitor_10000 fastvm-fedora39-22" [ style = bold] +"dummy_start_0 fastvm-fedora39-22" [ style=bold color="green" fontcolor="black"] +"dummy_stop_0 fastvm-fedora39-23" -> "dummy_start_0 fastvm-fedora39-22" [ style = bold] +"dummy_stop_0 fastvm-fedora39-23" -> "fastvm-fedora39-23_stop_0 fastvm-fedora39-22" [ style = bold] +"dummy_stop_0 fastvm-fedora39-23" -> "fastvm-fedora39-23_stop_0 fastvm-fedora39-24" [ style = bold] "dummy_stop_0 fastvm-fedora39-23" [ style=bold color="green" fontcolor="orange"] +"fastvm-fedora39-23_monitor_60000 fastvm-fedora39-24" [ style=bold color="green" fontcolor="black"] +"fastvm-fedora39-23_start_0 fastvm-fedora39-24" -> "fastvm-fedora39-23_monitor_60000 fastvm-fedora39-24" [ style = bold] +"fastvm-fedora39-23_start_0 fastvm-fedora39-24" [ style=bold color="green" fontcolor="black"] +"fastvm-fedora39-23_stop_0 fastvm-fedora39-22" -> "fastvm-fedora39-23_start_0 fastvm-fedora39-24" [ style = bold] "fastvm-fedora39-23_stop_0 fastvm-fedora39-22" [ style=bold color="green" fontcolor="black"] +"fastvm-fedora39-23_stop_0 fastvm-fedora39-24" -> "fastvm-fedora39-23_start_0 fastvm-fedora39-24" [ style = bold] "fastvm-fedora39-23_stop_0 fastvm-fedora39-24" [ style=bold color="green" fontcolor="black"] -"stonith 'reboot' fastvm-fedora39-23" -> "dummy_start_0 fastvm-fedora39-24" [ style = bold] +"stonith 'reboot' fastvm-fedora39-23" -> "dummy_start_0 fastvm-fedora39-22" [ style = bold] "stonith 'reboot' fastvm-fedora39-23" -> "dummy_stop_0 fastvm-fedora39-23" [ style = bold] +"stonith 'reboot' fastvm-fedora39-23" -> "fastvm-fedora39-23_start_0 fastvm-fedora39-24" [ style = bold] "stonith 'reboot' fastvm-fedora39-23" [ style=bold color="green" fontcolor="black"] } diff --git a/cts/scheduler/dot/remote-partial-migrate3.dot b/cts/scheduler/dot/remote-partial-migrate3.dot index 975b8408913..212f674c98d 100644 --- a/cts/scheduler/dot/remote-partial-migrate3.dot +++ b/cts/scheduler/dot/remote-partial-migrate3.dot @@ -1,5 +1,9 @@ digraph "g" { +"fastvm-fedora39-23_monitor_60000 fastvm-fedora39-24" [ style=bold color="green" fontcolor="black"] +"fastvm-fedora39-23_start_0 fastvm-fedora39-24" -> "fastvm-fedora39-23_monitor_60000 fastvm-fedora39-24" [ style = bold] +"fastvm-fedora39-23_start_0 fastvm-fedora39-24" [ style=bold color="green" fontcolor="black"] +"fastvm-fedora39-23_stop_0 fastvm-fedora39-22" -> "fastvm-fedora39-23_start_0 fastvm-fedora39-24" [ style = bold] "fastvm-fedora39-23_stop_0 fastvm-fedora39-22" [ style=bold color="green" fontcolor="black"] +"fastvm-fedora39-23_stop_0 fastvm-fedora39-24" -> "fastvm-fedora39-23_start_0 fastvm-fedora39-24" [ style = bold] "fastvm-fedora39-23_stop_0 fastvm-fedora39-24" [ style=bold color="green" fontcolor="black"] -"stonith 'reboot' fastvm-fedora39-23" [ style=bold color="green" fontcolor="black"] } diff --git a/cts/scheduler/exp/remote-failed-migrate1.exp b/cts/scheduler/exp/remote-failed-migrate1.exp index 5b0188b65ff..9be4e7fbea6 100644 --- a/cts/scheduler/exp/remote-failed-migrate1.exp +++ b/cts/scheduler/exp/remote-failed-migrate1.exp @@ -1,21 +1,38 @@ - + - - - - + - + + + + + - + - + + + + + + + + + + + + + + + + + @@ -23,14 +40,15 @@ - + - - + + + - + diff --git a/cts/scheduler/exp/remote-failed-migrate2.exp b/cts/scheduler/exp/remote-failed-migrate2.exp index 12bea2f0918..fcaa3a4b0e2 100644 --- a/cts/scheduler/exp/remote-failed-migrate2.exp +++ b/cts/scheduler/exp/remote-failed-migrate2.exp @@ -1,5 +1,37 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + @@ -9,11 +41,15 @@ - + + + + + - + - + @@ -21,52 +57,56 @@ - + + + + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + diff --git a/cts/scheduler/exp/remote-partial-migrate3.exp b/cts/scheduler/exp/remote-partial-migrate3.exp index 5b0188b65ff..9be4e7fbea6 100644 --- a/cts/scheduler/exp/remote-partial-migrate3.exp +++ b/cts/scheduler/exp/remote-partial-migrate3.exp @@ -1,21 +1,38 @@ - + - - - - + - + + + + + - + - + + + + + + + + + + + + + + + + + @@ -23,14 +40,15 @@ - + - - + + + - + diff --git a/cts/scheduler/scores/remote-failed-migrate1.scores b/cts/scheduler/scores/remote-failed-migrate1.scores index d36e8d4436a..1208ccfb062 100644 --- a/cts/scheduler/scores/remote-failed-migrate1.scores +++ b/cts/scheduler/scores/remote-failed-migrate1.scores @@ -1,7 +1,7 @@ -pcmk__primitive_assign: fastvm-fedora39-23 allocation score on fastvm-fedora39-22: -INFINITY +pcmk__primitive_assign: fastvm-fedora39-23 allocation score on fastvm-fedora39-22: 0 pcmk__primitive_assign: fastvm-fedora39-23 allocation score on fastvm-fedora39-23: -INFINITY -pcmk__primitive_assign: fastvm-fedora39-23 allocation score on fastvm-fedora39-24: -INFINITY +pcmk__primitive_assign: fastvm-fedora39-23 allocation score on fastvm-fedora39-24: INFINITY pcmk__primitive_assign: xvm allocation score on fastvm-fedora39-22: 1 pcmk__primitive_assign: xvm allocation score on fastvm-fedora39-23: -INFINITY pcmk__primitive_assign: xvm allocation score on fastvm-fedora39-24: 0 diff --git a/cts/scheduler/scores/remote-failed-migrate2.scores b/cts/scheduler/scores/remote-failed-migrate2.scores index f023051891a..91ae1b2b54a 100644 --- a/cts/scheduler/scores/remote-failed-migrate2.scores +++ b/cts/scheduler/scores/remote-failed-migrate2.scores @@ -2,9 +2,9 @@ pcmk__primitive_assign: dummy allocation score on fastvm-fedora39-22: 0 pcmk__primitive_assign: dummy allocation score on fastvm-fedora39-23: INFINITY pcmk__primitive_assign: dummy allocation score on fastvm-fedora39-24: 0 -pcmk__primitive_assign: fastvm-fedora39-23 allocation score on fastvm-fedora39-22: -INFINITY +pcmk__primitive_assign: fastvm-fedora39-23 allocation score on fastvm-fedora39-22: 0 pcmk__primitive_assign: fastvm-fedora39-23 allocation score on fastvm-fedora39-23: -INFINITY -pcmk__primitive_assign: fastvm-fedora39-23 allocation score on fastvm-fedora39-24: -INFINITY +pcmk__primitive_assign: fastvm-fedora39-23 allocation score on fastvm-fedora39-24: INFINITY pcmk__primitive_assign: xvm allocation score on fastvm-fedora39-22: 1 pcmk__primitive_assign: xvm allocation score on fastvm-fedora39-23: -INFINITY pcmk__primitive_assign: xvm allocation score on fastvm-fedora39-24: 0 diff --git a/cts/scheduler/scores/remote-partial-migrate3.scores b/cts/scheduler/scores/remote-partial-migrate3.scores index d36e8d4436a..1208ccfb062 100644 --- a/cts/scheduler/scores/remote-partial-migrate3.scores +++ b/cts/scheduler/scores/remote-partial-migrate3.scores @@ -1,7 +1,7 @@ -pcmk__primitive_assign: fastvm-fedora39-23 allocation score on fastvm-fedora39-22: -INFINITY +pcmk__primitive_assign: fastvm-fedora39-23 allocation score on fastvm-fedora39-22: 0 pcmk__primitive_assign: fastvm-fedora39-23 allocation score on fastvm-fedora39-23: -INFINITY -pcmk__primitive_assign: fastvm-fedora39-23 allocation score on fastvm-fedora39-24: -INFINITY +pcmk__primitive_assign: fastvm-fedora39-23 allocation score on fastvm-fedora39-24: INFINITY pcmk__primitive_assign: xvm allocation score on fastvm-fedora39-22: 1 pcmk__primitive_assign: xvm allocation score on fastvm-fedora39-23: -INFINITY pcmk__primitive_assign: xvm allocation score on fastvm-fedora39-24: 0 diff --git a/cts/scheduler/summary/remote-failed-migrate1.summary b/cts/scheduler/summary/remote-failed-migrate1.summary index a57a0f879a0..b048a9e559c 100644 --- a/cts/scheduler/summary/remote-failed-migrate1.summary +++ b/cts/scheduler/summary/remote-failed-migrate1.summary @@ -1,29 +1,28 @@ Using the original execution date of: 2024-05-13 04:25:37Z Current cluster status: * Node List: - * RemoteNode fastvm-fedora39-23: UNCLEAN (offline) * Online: [ fastvm-fedora39-22 fastvm-fedora39-24 ] + * RemoteOFFLINE: [ fastvm-fedora39-23 ] * Full List of Resources: * fastvm-fedora39-23 (ocf:pacemaker:remote): FAILED [ fastvm-fedora39-22 fastvm-fedora39-24 ] * xvm (stonith:fence_xvm): Started fastvm-fedora39-22 Transition Summary: - * Fence (reboot) fastvm-fedora39-23 'remote connection is unrecoverable' - * Stop fastvm-fedora39-23 ( fastvm-fedora39-22 ) due to node availability - * Stop fastvm-fedora39-23 ( fastvm-fedora39-24 ) due to node availability + * Recover fastvm-fedora39-23 ( fastvm-fedora39-22 -> fastvm-fedora39-24 ) Executing Cluster Transition: * Resource action: fastvm-fedora39-23 stop on fastvm-fedora39-22 * Resource action: fastvm-fedora39-23 stop on fastvm-fedora39-24 - * Fencing fastvm-fedora39-23 (reboot) + * Resource action: fastvm-fedora39-23 start on fastvm-fedora39-24 + * Resource action: fastvm-fedora39-23 monitor=60000 on fastvm-fedora39-24 Using the original execution date of: 2024-05-13 04:25:37Z Revised Cluster Status: * Node List: - * RemoteNode fastvm-fedora39-23: UNCLEAN (offline) * Online: [ fastvm-fedora39-22 fastvm-fedora39-24 ] + * RemoteOnline: [ fastvm-fedora39-23 ] * Full List of Resources: - * fastvm-fedora39-23 (ocf:pacemaker:remote): FAILED + * fastvm-fedora39-23 (ocf:pacemaker:remote): Started fastvm-fedora39-24 * xvm (stonith:fence_xvm): Started fastvm-fedora39-22 diff --git a/cts/scheduler/summary/remote-failed-migrate2.summary b/cts/scheduler/summary/remote-failed-migrate2.summary index 406f25ee295..ca5e04b0923 100644 --- a/cts/scheduler/summary/remote-failed-migrate2.summary +++ b/cts/scheduler/summary/remote-failed-migrate2.summary @@ -10,26 +10,27 @@ Current cluster status: * dummy (ocf:pacemaker:Dummy): Started fastvm-fedora39-23 (UNCLEAN) Transition Summary: - * Fence (reboot) fastvm-fedora39-23 'remote connection is unrecoverable' - * Stop fastvm-fedora39-23 ( fastvm-fedora39-24 ) due to node availability - * Stop fastvm-fedora39-23 ( fastvm-fedora39-22 ) due to node availability - * Move dummy ( fastvm-fedora39-23 -> fastvm-fedora39-24 ) + * Fence (reboot) fastvm-fedora39-23 'dummy is thought to be active there' + * Recover fastvm-fedora39-23 ( fastvm-fedora39-24 ) + * Move dummy ( fastvm-fedora39-23 -> fastvm-fedora39-22 ) Executing Cluster Transition: - * Resource action: fastvm-fedora39-23 stop on fastvm-fedora39-22 - * Resource action: fastvm-fedora39-23 stop on fastvm-fedora39-24 * Fencing fastvm-fedora39-23 (reboot) * Pseudo action: dummy_stop_0 - * Resource action: dummy start on fastvm-fedora39-24 - * Resource action: dummy monitor=10000 on fastvm-fedora39-24 + * Resource action: fastvm-fedora39-23 stop on fastvm-fedora39-22 + * Resource action: fastvm-fedora39-23 stop on fastvm-fedora39-24 + * Resource action: dummy start on fastvm-fedora39-22 + * Resource action: fastvm-fedora39-23 start on fastvm-fedora39-24 + * Resource action: dummy monitor=10000 on fastvm-fedora39-22 + * Resource action: fastvm-fedora39-23 monitor=60000 on fastvm-fedora39-24 Using the original execution date of: 2024-05-15 10:28:29Z Revised Cluster Status: * Node List: - * RemoteNode fastvm-fedora39-23: UNCLEAN (offline) * Online: [ fastvm-fedora39-22 fastvm-fedora39-24 ] + * RemoteOnline: [ fastvm-fedora39-23 ] * Full List of Resources: - * fastvm-fedora39-23 (ocf:pacemaker:remote): FAILED + * fastvm-fedora39-23 (ocf:pacemaker:remote): Started fastvm-fedora39-24 * xvm (stonith:fence_xvm): Started fastvm-fedora39-22 - * dummy (ocf:pacemaker:Dummy): Started fastvm-fedora39-24 + * dummy (ocf:pacemaker:Dummy): Started fastvm-fedora39-22 diff --git a/cts/scheduler/summary/remote-partial-migrate3.summary b/cts/scheduler/summary/remote-partial-migrate3.summary index a57a0f879a0..b048a9e559c 100644 --- a/cts/scheduler/summary/remote-partial-migrate3.summary +++ b/cts/scheduler/summary/remote-partial-migrate3.summary @@ -1,29 +1,28 @@ Using the original execution date of: 2024-05-13 04:25:37Z Current cluster status: * Node List: - * RemoteNode fastvm-fedora39-23: UNCLEAN (offline) * Online: [ fastvm-fedora39-22 fastvm-fedora39-24 ] + * RemoteOFFLINE: [ fastvm-fedora39-23 ] * Full List of Resources: * fastvm-fedora39-23 (ocf:pacemaker:remote): FAILED [ fastvm-fedora39-22 fastvm-fedora39-24 ] * xvm (stonith:fence_xvm): Started fastvm-fedora39-22 Transition Summary: - * Fence (reboot) fastvm-fedora39-23 'remote connection is unrecoverable' - * Stop fastvm-fedora39-23 ( fastvm-fedora39-22 ) due to node availability - * Stop fastvm-fedora39-23 ( fastvm-fedora39-24 ) due to node availability + * Recover fastvm-fedora39-23 ( fastvm-fedora39-22 -> fastvm-fedora39-24 ) Executing Cluster Transition: * Resource action: fastvm-fedora39-23 stop on fastvm-fedora39-22 * Resource action: fastvm-fedora39-23 stop on fastvm-fedora39-24 - * Fencing fastvm-fedora39-23 (reboot) + * Resource action: fastvm-fedora39-23 start on fastvm-fedora39-24 + * Resource action: fastvm-fedora39-23 monitor=60000 on fastvm-fedora39-24 Using the original execution date of: 2024-05-13 04:25:37Z Revised Cluster Status: * Node List: - * RemoteNode fastvm-fedora39-23: UNCLEAN (offline) * Online: [ fastvm-fedora39-22 fastvm-fedora39-24 ] + * RemoteOnline: [ fastvm-fedora39-23 ] * Full List of Resources: - * fastvm-fedora39-23 (ocf:pacemaker:remote): FAILED + * fastvm-fedora39-23 (ocf:pacemaker:remote): Started fastvm-fedora39-24 * xvm (stonith:fence_xvm): Started fastvm-fedora39-22 From 539591a288321182357ea50fc97f4bb17dd272b8 Mon Sep 17 00:00:00 2001 From: Reid Wahl Date: Mon, 13 May 2024 20:49:39 -0700 Subject: [PATCH 4/8] Doc: Pacemaker Explained: Render footnote correctly Signed-off-by: Reid Wahl --- doc/sphinx/Pacemaker_Explained/operations.rst | 1 + 1 file changed, 1 insertion(+) diff --git a/doc/sphinx/Pacemaker_Explained/operations.rst b/doc/sphinx/Pacemaker_Explained/operations.rst index c831f815c6a..a8df8f14d99 100644 --- a/doc/sphinx/Pacemaker_Explained/operations.rst +++ b/doc/sphinx/Pacemaker_Explained/operations.rst @@ -659,6 +659,7 @@ path. Also, if a migratable resource depends on a non-migratable resource, and both need to be moved, the migratable resource will be restarted. + .. rubric:: Footnotes .. [#] Currently, anyway. Automatic monitoring operations may be added in a future From 4d69753147a5874f17933cd107fe34c10b009dce Mon Sep 17 00:00:00 2001 From: Reid Wahl Date: Wed, 15 May 2024 03:10:12 -0700 Subject: [PATCH 5/8] Fix: libpe_status: Fence remote node only after stop/monitor failure Signed-off-by: Reid Wahl --- lib/pengine/pe_actions.c | 17 +++++++++-------- 1 file changed, 9 insertions(+), 8 deletions(-) diff --git a/lib/pengine/pe_actions.c b/lib/pengine/pe_actions.c index 6d3962e6f8c..f42f19f29dc 100644 --- a/lib/pengine/pe_actions.c +++ b/lib/pengine/pe_actions.c @@ -960,18 +960,19 @@ pcmk__parse_on_fail(const pcmk_resource_t *rsc, const char *action_name, action_name, rsc->id, value); } - /* Remote node connections are handled specially. Failures that result - * in dropping an active connection must result in fencing. The only - * failures that don't are probes, starts, and migrate_froms (which are - * essentially starts during a migration). The user can explicitly set - * PCMK_META_ON_FAIL=PCMK_VALUE_FENCE to fence after start and migrate_from - * failures. + /* Remote node connections are handled specially. Failures of stop actions + * and recurring monitor actions require that the remote node be fenced, + * because they indicate a new problem with a previously active connection + * to the node. Failures of probes, starts, migrations, and reloads do not. + * + * The user can explicitly set PCMK_META_ON_FAIL=PCMK_VALUE_FENCE to fence + * after other failures. */ if (rsc->is_remote_node && pcmk__is_remote_node(pcmk_find_node(rsc->cluster, rsc->id)) && !pcmk_is_probe(action_name, interval_ms) - && !pcmk__str_any_of(action_name, PCMK_ACTION_START, - PCMK_ACTION_MIGRATE_FROM, NULL)) { + && pcmk__str_any_of(action_name, PCMK_ACTION_STOP, PCMK_ACTION_MONITOR, + NULL)) { needs_remote_reset = true; if (!pcmk_is_set(rsc->flags, pcmk_rsc_managed)) { desc = NULL; // Force default for unmanaged connections From a935472050233606ac27fe44bdce9bf5c2094116 Mon Sep 17 00:00:00 2001 From: Reid Wahl Date: Wed, 15 May 2024 03:16:11 -0700 Subject: [PATCH 6/8] API: libcrmcommon: New pcmk_rsc_remote_conn_lost pcmk_rsc_flags value Deprecated, for internal use only. Signed-off-by: Reid Wahl --- include/crm/common/resources.h | 3 +++ 1 file changed, 3 insertions(+) diff --git a/include/crm/common/resources.h b/include/crm/common/resources.h index 9b38e68abe5..e1300d741a6 100644 --- a/include/crm/common/resources.h +++ b/include/crm/common/resources.h @@ -176,6 +176,9 @@ enum pcmk_rsc_flags { // Whether resource can be started or promoted only on unfenced nodes pcmk_rsc_needs_unfencing = (1ULL << 30), + + // Whether remote connection resource failure indicates lost connection + pcmk_rsc_remote_conn_lost = (1ULL << 31), }; //!@} From 4d588a56111f07b8c4709feb07a7122961594b07 Mon Sep 17 00:00:00 2001 From: Reid Wahl Date: Wed, 15 May 2024 03:19:56 -0700 Subject: [PATCH 7/8] WIP: Fix: libpe_status: Remote node stays online after failed conn migrate Previously, if the remote node's connection resource failed to migrate, the remote node was considered offline. If a resource was running on the remote node, the remote node would be fenced. This doesn't work yet. Fixes T214 Signed-off-by: Reid Wahl --- lib/pengine/unpack.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/lib/pengine/unpack.c b/lib/pengine/unpack.c index de623d49260..eed97f1d278 100644 --- a/lib/pengine/unpack.c +++ b/lib/pengine/unpack.c @@ -1772,7 +1772,7 @@ determine_remote_online_status(pcmk_scheduler_t *scheduler, this_node->details->online = FALSE; this_node->details->remote_requires_reset = TRUE; - } else if (pcmk_is_set(rsc->flags, pcmk_rsc_failed)) { + } else if (pcmk_is_set(rsc->flags, pcmk_rsc_remote_conn_lost)) { crm_trace("%s node %s OFFLINE because connection resource failed", (container? "Guest" : "Remote"), this_node->details->id); this_node->details->online = FALSE; @@ -2417,7 +2417,10 @@ process_rsc_state(pcmk_resource_t *rsc, pcmk_node_t *node, break; case pcmk_on_fail_reset_remote: - pcmk__set_rsc_flags(rsc, pcmk_rsc_failed|pcmk_rsc_stop_if_failed); + pcmk__set_rsc_flags(rsc, + pcmk_rsc_failed + |pcmk_rsc_stop_if_failed + |pcmk_rsc_remote_conn_lost); if (pcmk_is_set(rsc->cluster->flags, pcmk_sched_fencing_enabled)) { tmpnode = NULL; if (rsc->is_remote_node) { From 01e161bd603f2733e5da8853d0e9363ecab16768 Mon Sep 17 00:00:00 2001 From: Reid Wahl Date: Thu, 16 May 2024 02:28:51 -0700 Subject: [PATCH 8/8] WIP: Test: cts-scheduler: Update tests for remote migrate_from fence fix This still has problems. The newly added tests work as expected, but existing tests break. Signed-off-by: Reid Wahl --- cts/scheduler/dot/bug-cl-5247.dot | 47 ++- .../dot/bundle-connection-with-container.dot | 8 +- cts/scheduler/dot/nested-remote-recovery.dot | 26 +- cts/scheduler/dot/remote-failed-migrate2.dot | 13 +- cts/scheduler/dot/remote-fence-unclean-3.dot | 2 - cts/scheduler/dot/remote-recover-fail.dot | 31 +- cts/scheduler/exp/bug-cl-5247.exp | 346 +++++++++--------- .../exp/bundle-connection-with-container.exp | 119 +++--- cts/scheduler/exp/nested-remote-recovery.exp | 241 ++++-------- cts/scheduler/exp/remote-failed-migrate2.exp | 60 +-- cts/scheduler/exp/remote-fence-unclean-3.exp | 20 +- cts/scheduler/exp/remote-recover-fail.exp | 116 ++---- .../bundle-connection-with-container.scores | 8 +- .../scores/nested-remote-recovery.scores | 8 +- cts/scheduler/summary/bug-cl-5247.summary | 42 +-- .../bundle-connection-with-container.summary | 24 +- .../summary/nested-remote-recovery.summary | 26 +- .../summary/remote-failed-migrate1.summary | 2 +- .../summary/remote-failed-migrate2.summary | 15 +- .../summary/remote-fence-unclean-3.summary | 4 +- .../summary/remote-recover-fail.summary | 34 +- .../summary/remote-start-fail.summary | 2 +- 22 files changed, 441 insertions(+), 753 deletions(-) diff --git a/cts/scheduler/dot/bug-cl-5247.dot b/cts/scheduler/dot/bug-cl-5247.dot index f5d6fa3015e..bd30ade8e03 100644 --- a/cts/scheduler/dot/bug-cl-5247.dot +++ b/cts/scheduler/dot/bug-cl-5247.dot @@ -30,59 +30,55 @@ "msPostgresql_demoted_0" [ style=bold color="green" fontcolor="orange"] "msPostgresql_post_notify_demoted_0" -> "msPostgresql_confirmed-post_notify_demoted_0" [ style = bold] "msPostgresql_post_notify_demoted_0" -> "pgsql_post_notify_demoted_0 pgsr01" [ style = bold] +"msPostgresql_post_notify_demoted_0" -> "pgsql_post_notify_demoted_0 pgsr02" [ style = bold] "msPostgresql_post_notify_demoted_0" [ style=bold color="green" fontcolor="orange"] "msPostgresql_post_notify_stopped_0" -> "msPostgresql_confirmed-post_notify_stopped_0" [ style = bold] -"msPostgresql_post_notify_stopped_0" -> "pgsql_post_notify_stonith_0 pgsr01" [ style = bold] +"msPostgresql_post_notify_stopped_0" -> "pgsql_post_notify_stopped_0 pgsr01" [ style = bold] "msPostgresql_post_notify_stopped_0" [ style=bold color="green" fontcolor="orange"] "msPostgresql_pre_notify_demote_0" -> "msPostgresql_confirmed-pre_notify_demote_0" [ style = bold] "msPostgresql_pre_notify_demote_0" -> "pgsql_pre_notify_demote_0 pgsr01" [ style = bold] +"msPostgresql_pre_notify_demote_0" -> "pgsql_pre_notify_demote_0 pgsr02" [ style = bold] "msPostgresql_pre_notify_demote_0" [ style=bold color="green" fontcolor="orange"] "msPostgresql_pre_notify_stop_0" -> "msPostgresql_confirmed-pre_notify_stop_0" [ style = bold] "msPostgresql_pre_notify_stop_0" -> "pgsql_pre_notify_stop_0 pgsr01" [ style = bold] +"msPostgresql_pre_notify_stop_0" -> "pgsql_pre_notify_stop_0 pgsr02" [ style = bold] "msPostgresql_pre_notify_stop_0" [ style=bold color="green" fontcolor="orange"] "msPostgresql_stop_0" -> "msPostgresql_stopped_0" [ style = bold] "msPostgresql_stop_0" -> "pgsql_stop_0 pgsr02" [ style = bold] "msPostgresql_stop_0" [ style=bold color="green" fontcolor="orange"] "msPostgresql_stopped_0" -> "msPostgresql_post_notify_stopped_0" [ style = bold] "msPostgresql_stopped_0" [ style=bold color="green" fontcolor="orange"] -"pgsql_confirmed-post_notify_stonith_0" -> "pgsql_monitor_9000 pgsr01" [ style = bold] -"pgsql_confirmed-post_notify_stonith_0" [ style=bold color="green" fontcolor="orange"] "pgsql_demote_0 pgsr02" -> "msPostgresql_demoted_0" [ style = bold] "pgsql_demote_0 pgsr02" -> "pgsql_stop_0 pgsr02" [ style = bold] -"pgsql_demote_0 pgsr02" [ style=bold color="green" fontcolor="orange"] +"pgsql_demote_0 pgsr02" -> "pgsr02_stop_0 bl460g8n4" [ style = bold] +"pgsql_demote_0 pgsr02" [ style=bold color="green" fontcolor="black"] "pgsql_monitor_9000 pgsr01" [ style=bold color="green" fontcolor="black"] "pgsql_post_notify_demoted_0 pgsr01" -> "msPostgresql_confirmed-post_notify_demoted_0" [ style = bold] "pgsql_post_notify_demoted_0 pgsr01" [ style=bold color="green" fontcolor="black"] -"pgsql_post_notify_stonith_0 pgsr01" -> "msPostgresql_confirmed-post_notify_stopped_0" [ style = bold] -"pgsql_post_notify_stonith_0 pgsr01" -> "pgsql_confirmed-post_notify_stonith_0" [ style = bold] -"pgsql_post_notify_stonith_0 pgsr01" [ style=bold color="green" fontcolor="black"] -"pgsql_post_notify_stonith_0" -> "pgsql_confirmed-post_notify_stonith_0" [ style = bold] -"pgsql_post_notify_stonith_0" -> "pgsql_post_notify_stonith_0 pgsr01" [ style = bold] -"pgsql_post_notify_stonith_0" [ style=bold color="green" fontcolor="orange"] +"pgsql_post_notify_demoted_0 pgsr02" -> "msPostgresql_confirmed-post_notify_demoted_0" [ style = bold] +"pgsql_post_notify_demoted_0 pgsr02" [ style=bold color="green" fontcolor="black"] +"pgsql_post_notify_stopped_0 pgsr01" -> "msPostgresql_confirmed-post_notify_stopped_0" [ style = bold] +"pgsql_post_notify_stopped_0 pgsr01" [ style=bold color="green" fontcolor="black"] "pgsql_pre_notify_demote_0 pgsr01" -> "msPostgresql_confirmed-pre_notify_demote_0" [ style = bold] "pgsql_pre_notify_demote_0 pgsr01" [ style=bold color="green" fontcolor="black"] +"pgsql_pre_notify_demote_0 pgsr02" -> "msPostgresql_confirmed-pre_notify_demote_0" [ style = bold] +"pgsql_pre_notify_demote_0 pgsr02" [ style=bold color="green" fontcolor="black"] "pgsql_pre_notify_stop_0 pgsr01" -> "msPostgresql_confirmed-pre_notify_stop_0" [ style = bold] "pgsql_pre_notify_stop_0 pgsr01" [ style=bold color="green" fontcolor="black"] +"pgsql_pre_notify_stop_0 pgsr02" -> "msPostgresql_confirmed-pre_notify_stop_0" [ style = bold] +"pgsql_pre_notify_stop_0 pgsr02" [ style=bold color="green" fontcolor="black"] "pgsql_stop_0 pgsr02" -> "msPostgresql_stopped_0" [ style = bold] -"pgsql_stop_0 pgsr02" [ style=bold color="green" fontcolor="orange"] +"pgsql_stop_0 pgsr02" -> "pgsr02_stop_0 bl460g8n4" [ style = bold] +"pgsql_stop_0 pgsr02" [ style=bold color="green" fontcolor="black"] "pgsr01_monitor_0 bl460g8n4" [ style=bold color="green" fontcolor="black"] "pgsr02_monitor_0 bl460g8n3" [ style=bold color="green" fontcolor="black"] "pgsr02_stop_0 bl460g8n4" -> "prmDB2_stop_0 bl460g8n4" [ style = bold] "pgsr02_stop_0 bl460g8n4" [ style=bold color="green" fontcolor="black"] -"prmDB2_stop_0 bl460g8n4" -> "stonith 'off' pgsr02" [ style = bold] "prmDB2_stop_0 bl460g8n4" [ style=bold color="green" fontcolor="black"] -"stonith 'off' pgsr02" -> "master-group_stop_0" [ style = bold] -"stonith 'off' pgsr02" -> "msPostgresql_stop_0" [ style = bold] -"stonith 'off' pgsr02" -> "pgsql_demote_0 pgsr02" [ style = bold] -"stonith 'off' pgsr02" -> "pgsql_post_notify_stonith_0" [ style = bold] -"stonith 'off' pgsr02" -> "pgsql_stop_0 pgsr02" [ style = bold] -"stonith 'off' pgsr02" -> "vip-master_start_0 pgsr01" [ style = bold] -"stonith 'off' pgsr02" -> "vip-master_stop_0 pgsr02" [ style = bold] -"stonith 'off' pgsr02" -> "vip-rep_start_0 pgsr01" [ style = bold] -"stonith 'off' pgsr02" -> "vip-rep_stop_0 pgsr02" [ style = bold] -"stonith 'off' pgsr02" [ style=bold color="green" fontcolor="orange"] "vip-master_monitor_0 pgsr01" -> "master-group_stopped_0" [ style = bold] +"vip-master_monitor_0 pgsr01" -> "pgsr02_stop_0 bl460g8n4" [ style = bold] "vip-master_monitor_0 pgsr01" -> "vip-master_start_0 pgsr01" [ style = bold] +"vip-master_monitor_0 pgsr01" -> "vip-rep_stop_0 pgsr02" [ style = bold] "vip-master_monitor_0 pgsr01" [ style=bold color="green" fontcolor="black"] "vip-master_monitor_10000 pgsr01" [ style=bold color="green" fontcolor="black"] "vip-master_start_0 pgsr01" -> "master-group_running_0" [ style = bold] @@ -90,9 +86,11 @@ "vip-master_start_0 pgsr01" -> "vip-rep_start_0 pgsr01" [ style = bold] "vip-master_start_0 pgsr01" [ style=bold color="green" fontcolor="black"] "vip-master_stop_0 pgsr02" -> "master-group_stopped_0" [ style = bold] +"vip-master_stop_0 pgsr02" -> "pgsr02_stop_0 bl460g8n4" [ style = bold] "vip-master_stop_0 pgsr02" -> "vip-master_start_0 pgsr01" [ style = bold] -"vip-master_stop_0 pgsr02" [ style=bold color="green" fontcolor="orange"] +"vip-master_stop_0 pgsr02" [ style=bold color="green" fontcolor="black"] "vip-rep_monitor_0 pgsr01" -> "master-group_stopped_0" [ style = bold] +"vip-rep_monitor_0 pgsr01" -> "pgsr02_stop_0 bl460g8n4" [ style = bold] "vip-rep_monitor_0 pgsr01" -> "vip-master_stop_0 pgsr02" [ style = bold] "vip-rep_monitor_0 pgsr01" -> "vip-rep_start_0 pgsr01" [ style = bold] "vip-rep_monitor_0 pgsr01" [ style=bold color="green" fontcolor="black"] @@ -101,7 +99,8 @@ "vip-rep_start_0 pgsr01" -> "vip-rep_monitor_10000 pgsr01" [ style = bold] "vip-rep_start_0 pgsr01" [ style=bold color="green" fontcolor="black"] "vip-rep_stop_0 pgsr02" -> "master-group_stopped_0" [ style = bold] +"vip-rep_stop_0 pgsr02" -> "pgsr02_stop_0 bl460g8n4" [ style = bold] "vip-rep_stop_0 pgsr02" -> "vip-master_stop_0 pgsr02" [ style = bold] "vip-rep_stop_0 pgsr02" -> "vip-rep_start_0 pgsr01" [ style = bold] -"vip-rep_stop_0 pgsr02" [ style=bold color="green" fontcolor="orange"] +"vip-rep_stop_0 pgsr02" [ style=bold color="green" fontcolor="black"] } diff --git a/cts/scheduler/dot/bundle-connection-with-container.dot b/cts/scheduler/dot/bundle-connection-with-container.dot index d6494a6b84b..fec6d21e3fc 100644 --- a/cts/scheduler/dot/bundle-connection-with-container.dot +++ b/cts/scheduler/dot/bundle-connection-with-container.dot @@ -26,7 +26,6 @@ "httpd-bundle-podman-0_start_0 rhel8-1" [ style=bold color="green" fontcolor="black"] "httpd-bundle-podman-0_stop_0 rhel8-1" -> "httpd-bundle-podman-0_start_0 rhel8-1" [ style = bold] "httpd-bundle-podman-0_stop_0 rhel8-1" -> "httpd-bundle_stopped_0" [ style = bold] -"httpd-bundle-podman-0_stop_0 rhel8-1" -> "stonith 'reboot' httpd-bundle-0" [ style = bold] "httpd-bundle-podman-0_stop_0 rhel8-1" [ style=bold color="green" fontcolor="black"] "httpd-bundle_running_0" [ style=bold color="green" fontcolor="orange"] "httpd-bundle_start_0" -> "httpd-bundle-clone_start_0" [ style = bold] @@ -41,11 +40,8 @@ "httpd_start_0 httpd-bundle-0" -> "httpd-bundle-clone_running_0" [ style = bold] "httpd_start_0 httpd-bundle-0" -> "httpd_monitor_15000 httpd-bundle-0" [ style = bold] "httpd_start_0 httpd-bundle-0" [ style=bold color="green" fontcolor="black"] +"httpd_stop_0 httpd-bundle-0" -> "httpd-bundle-0_stop_0 rhel8-1" [ style = bold] "httpd_stop_0 httpd-bundle-0" -> "httpd-bundle-clone_stopped_0" [ style = bold] "httpd_stop_0 httpd-bundle-0" -> "httpd_start_0 httpd-bundle-0" [ style = bold] -"httpd_stop_0 httpd-bundle-0" [ style=bold color="green" fontcolor="orange"] -"stonith 'reboot' httpd-bundle-0" -> "httpd-bundle-clone_stop_0" [ style = bold] -"stonith 'reboot' httpd-bundle-0" -> "httpd-bundle-podman-0_start_0 rhel8-1" [ style = bold] -"stonith 'reboot' httpd-bundle-0" -> "httpd_start_0 httpd-bundle-0" [ style = bold] -"stonith 'reboot' httpd-bundle-0" [ style=bold color="green" fontcolor="orange"] +"httpd_stop_0 httpd-bundle-0" [ style=bold color="green" fontcolor="black"] } diff --git a/cts/scheduler/dot/nested-remote-recovery.dot b/cts/scheduler/dot/nested-remote-recovery.dot index 2afc6e9b819..faf7c05fc74 100644 --- a/cts/scheduler/dot/nested-remote-recovery.dot +++ b/cts/scheduler/dot/nested-remote-recovery.dot @@ -17,7 +17,6 @@ "galera-bundle-docker-0_start_0 database-0" [ style=bold color="green" fontcolor="black"] "galera-bundle-docker-0_stop_0 database-0" -> "galera-bundle-docker-0_start_0 database-0" [ style = bold] "galera-bundle-docker-0_stop_0 database-0" -> "galera-bundle_stopped_0" [ style = bold] -"galera-bundle-docker-0_stop_0 database-0" -> "stonith 'reboot' galera-bundle-0" [ style = bold] "galera-bundle-docker-0_stop_0 database-0" [ style=bold color="green" fontcolor="black"] "galera-bundle-master_demote_0" -> "galera-bundle-master_demoted_0" [ style = bold] "galera-bundle-master_demote_0" -> "galera_demote_0 galera-bundle-0" [ style = bold] @@ -25,11 +24,9 @@ "galera-bundle-master_demoted_0" -> "galera-bundle-master_promote_0" [ style = bold] "galera-bundle-master_demoted_0" -> "galera-bundle-master_start_0" [ style = bold] "galera-bundle-master_demoted_0" -> "galera-bundle-master_stop_0" [ style = bold] -"galera-bundle-master_demoted_0" -> "galera-bundle_demoted_0" [ style = bold] "galera-bundle-master_demoted_0" [ style=bold color="green" fontcolor="orange"] "galera-bundle-master_promote_0" -> "galera_promote_0 galera-bundle-0" [ style = bold] "galera-bundle-master_promote_0" [ style=bold color="green" fontcolor="orange"] -"galera-bundle-master_promoted_0" -> "galera-bundle_promoted_0" [ style = bold] "galera-bundle-master_promoted_0" [ style=bold color="green" fontcolor="orange"] "galera-bundle-master_running_0" -> "galera-bundle-master_promote_0" [ style = bold] "galera-bundle-master_running_0" -> "galera-bundle_running_0" [ style = bold] @@ -44,17 +41,6 @@ "galera-bundle-master_stopped_0" -> "galera-bundle-master_start_0" [ style = bold] "galera-bundle-master_stopped_0" -> "galera-bundle_stopped_0" [ style = bold] "galera-bundle-master_stopped_0" [ style=bold color="green" fontcolor="orange"] -"galera-bundle_demote_0" -> "galera-bundle-master_demote_0" [ style = bold] -"galera-bundle_demote_0" -> "galera-bundle_demoted_0" [ style = bold] -"galera-bundle_demote_0" [ style=bold color="green" fontcolor="orange"] -"galera-bundle_demoted_0" -> "galera-bundle_promote_0" [ style = bold] -"galera-bundle_demoted_0" -> "galera-bundle_start_0" [ style = bold] -"galera-bundle_demoted_0" -> "galera-bundle_stop_0" [ style = bold] -"galera-bundle_demoted_0" [ style=bold color="green" fontcolor="orange"] -"galera-bundle_promote_0" -> "galera-bundle-master_promote_0" [ style = bold] -"galera-bundle_promote_0" [ style=bold color="green" fontcolor="orange"] -"galera-bundle_promoted_0" [ style=bold color="green" fontcolor="orange"] -"galera-bundle_running_0" -> "galera-bundle_promote_0" [ style = bold] "galera-bundle_running_0" [ style=bold color="green" fontcolor="orange"] "galera-bundle_start_0" -> "galera-bundle-docker-0_start_0 database-0" [ style = bold] "galera-bundle_start_0" -> "galera-bundle-master_start_0" [ style = bold] @@ -63,13 +49,13 @@ "galera-bundle_stop_0" -> "galera-bundle-master_stop_0" [ style = bold] "galera-bundle_stop_0" -> "galera_stop_0 galera-bundle-0" [ style = bold] "galera-bundle_stop_0" [ style=bold color="green" fontcolor="orange"] -"galera-bundle_stopped_0" -> "galera-bundle_promote_0" [ style = bold] "galera-bundle_stopped_0" -> "galera-bundle_start_0" [ style = bold] "galera-bundle_stopped_0" [ style=bold color="green" fontcolor="orange"] +"galera_demote_0 galera-bundle-0" -> "galera-bundle-0_stop_0 controller-0" [ style = bold] "galera_demote_0 galera-bundle-0" -> "galera-bundle-master_demoted_0" [ style = bold] "galera_demote_0 galera-bundle-0" -> "galera_promote_0 galera-bundle-0" [ style = bold] "galera_demote_0 galera-bundle-0" -> "galera_stop_0 galera-bundle-0" [ style = bold] -"galera_demote_0 galera-bundle-0" [ style=bold color="green" fontcolor="orange"] +"galera_demote_0 galera-bundle-0" [ style=bold color="green" fontcolor="black"] "galera_monitor_10000 galera-bundle-0" [ style=bold color="green" fontcolor="black"] "galera_promote_0 galera-bundle-0" -> "galera-bundle-master_promoted_0" [ style = bold] "galera_promote_0 galera-bundle-0" -> "galera_monitor_10000 galera-bundle-0" [ style = bold] @@ -78,12 +64,8 @@ "galera_start_0 galera-bundle-0" -> "galera_monitor_10000 galera-bundle-0" [ style = bold] "galera_start_0 galera-bundle-0" -> "galera_promote_0 galera-bundle-0" [ style = bold] "galera_start_0 galera-bundle-0" [ style=bold color="green" fontcolor="black"] +"galera_stop_0 galera-bundle-0" -> "galera-bundle-0_stop_0 controller-0" [ style = bold] "galera_stop_0 galera-bundle-0" -> "galera-bundle-master_stopped_0" [ style = bold] "galera_stop_0 galera-bundle-0" -> "galera_start_0 galera-bundle-0" [ style = bold] -"galera_stop_0 galera-bundle-0" [ style=bold color="green" fontcolor="orange"] -"stonith 'reboot' galera-bundle-0" -> "galera-bundle-docker-0_start_0 database-0" [ style = bold] -"stonith 'reboot' galera-bundle-0" -> "galera-bundle-master_stop_0" [ style = bold] -"stonith 'reboot' galera-bundle-0" -> "galera_promote_0 galera-bundle-0" [ style = bold] -"stonith 'reboot' galera-bundle-0" -> "galera_start_0 galera-bundle-0" [ style = bold] -"stonith 'reboot' galera-bundle-0" [ style=bold color="green" fontcolor="orange"] +"galera_stop_0 galera-bundle-0" [ style=bold color="green" fontcolor="black"] } diff --git a/cts/scheduler/dot/remote-failed-migrate2.dot b/cts/scheduler/dot/remote-failed-migrate2.dot index 9da1e562b21..f6a28897caa 100644 --- a/cts/scheduler/dot/remote-failed-migrate2.dot +++ b/cts/scheduler/dot/remote-failed-migrate2.dot @@ -1,20 +1,11 @@ digraph "g" { -"dummy_monitor_10000 fastvm-fedora39-22" [ style=bold color="green" fontcolor="black"] -"dummy_start_0 fastvm-fedora39-22" -> "dummy_monitor_10000 fastvm-fedora39-22" [ style = bold] -"dummy_start_0 fastvm-fedora39-22" [ style=bold color="green" fontcolor="black"] -"dummy_stop_0 fastvm-fedora39-23" -> "dummy_start_0 fastvm-fedora39-22" [ style = bold] -"dummy_stop_0 fastvm-fedora39-23" -> "fastvm-fedora39-23_stop_0 fastvm-fedora39-22" [ style = bold] -"dummy_stop_0 fastvm-fedora39-23" -> "fastvm-fedora39-23_stop_0 fastvm-fedora39-24" [ style = bold] -"dummy_stop_0 fastvm-fedora39-23" [ style=bold color="green" fontcolor="orange"] +"dummy_monitor_10000 fastvm-fedora39-23" [ style=bold color="green" fontcolor="black"] "fastvm-fedora39-23_monitor_60000 fastvm-fedora39-24" [ style=bold color="green" fontcolor="black"] +"fastvm-fedora39-23_start_0 fastvm-fedora39-24" -> "dummy_monitor_10000 fastvm-fedora39-23" [ style = bold] "fastvm-fedora39-23_start_0 fastvm-fedora39-24" -> "fastvm-fedora39-23_monitor_60000 fastvm-fedora39-24" [ style = bold] "fastvm-fedora39-23_start_0 fastvm-fedora39-24" [ style=bold color="green" fontcolor="black"] "fastvm-fedora39-23_stop_0 fastvm-fedora39-22" -> "fastvm-fedora39-23_start_0 fastvm-fedora39-24" [ style = bold] "fastvm-fedora39-23_stop_0 fastvm-fedora39-22" [ style=bold color="green" fontcolor="black"] "fastvm-fedora39-23_stop_0 fastvm-fedora39-24" -> "fastvm-fedora39-23_start_0 fastvm-fedora39-24" [ style = bold] "fastvm-fedora39-23_stop_0 fastvm-fedora39-24" [ style=bold color="green" fontcolor="black"] -"stonith 'reboot' fastvm-fedora39-23" -> "dummy_start_0 fastvm-fedora39-22" [ style = bold] -"stonith 'reboot' fastvm-fedora39-23" -> "dummy_stop_0 fastvm-fedora39-23" [ style = bold] -"stonith 'reboot' fastvm-fedora39-23" -> "fastvm-fedora39-23_start_0 fastvm-fedora39-24" [ style = bold] -"stonith 'reboot' fastvm-fedora39-23" [ style=bold color="green" fontcolor="black"] } diff --git a/cts/scheduler/dot/remote-fence-unclean-3.dot b/cts/scheduler/dot/remote-fence-unclean-3.dot index c70a263bd71..3b8ad83e910 100644 --- a/cts/scheduler/dot/remote-fence-unclean-3.dot +++ b/cts/scheduler/dot/remote-fence-unclean-3.dot @@ -27,6 +27,4 @@ "redis-bundle-1_monitor_0 overcloud-controller-2" [ style=bold color="green" fontcolor="black"] "redis-bundle-2_monitor_0 overcloud-controller-0" [ style=bold color="green" fontcolor="black"] "redis-bundle-2_monitor_0 overcloud-controller-1" [ style=bold color="green" fontcolor="black"] -"stonith 'reboot' overcloud-novacompute-0" -> "fence1_start_0 overcloud-controller-0" [ style = bold] -"stonith 'reboot' overcloud-novacompute-0" [ style=bold color="green" fontcolor="black"] } diff --git a/cts/scheduler/dot/remote-recover-fail.dot b/cts/scheduler/dot/remote-recover-fail.dot index 1e41e02cb06..45620b26b2f 100644 --- a/cts/scheduler/dot/remote-recover-fail.dot +++ b/cts/scheduler/dot/remote-recover-fail.dot @@ -1,31 +1,18 @@ digraph "g" { -"FAKE1_monitor_10000 rhel7-auto2" [ style=bold color="green" fontcolor="black"] -"FAKE1_start_0 rhel7-auto2" -> "FAKE1_monitor_10000 rhel7-auto2" [ style = bold] -"FAKE1_start_0 rhel7-auto2" [ style=bold color="green" fontcolor="black"] -"FAKE2_monitor_10000 rhel7-auto3" [ style=bold color="green" fontcolor="black"] -"FAKE2_start_0 rhel7-auto3" -> "FAKE2_monitor_10000 rhel7-auto3" [ style = bold] -"FAKE2_start_0 rhel7-auto3" [ style=bold color="green" fontcolor="black"] -"FAKE2_stop_0 rhel7-auto4" -> "FAKE2_start_0 rhel7-auto3" [ style = bold] -"FAKE2_stop_0 rhel7-auto4" -> "rhel7-auto4_stop_0 rhel7-auto2" [ style = bold] -"FAKE2_stop_0 rhel7-auto4" [ style=bold color="green" fontcolor="orange"] +"FAKE1_monitor_10000 rhel7-auto4" [ style=bold color="green" fontcolor="black"] +"FAKE1_start_0 rhel7-auto4" -> "FAKE1_monitor_10000 rhel7-auto4" [ style = bold] +"FAKE1_start_0 rhel7-auto4" [ style=bold color="green" fontcolor="black"] +"FAKE2_monitor_10000 rhel7-auto4" [ style=bold color="green" fontcolor="black"] "FAKE3_monitor_10000 rhel7-auto2" [ style=bold color="green" fontcolor="black"] "FAKE4_monitor_10000 rhel7-auto3" [ style=bold color="green" fontcolor="black"] -"FAKE6_monitor_10000 rhel7-auto2" [ style=bold color="green" fontcolor="black"] -"FAKE6_start_0 rhel7-auto2" -> "FAKE6_monitor_10000 rhel7-auto2" [ style = bold] -"FAKE6_start_0 rhel7-auto2" [ style=bold color="green" fontcolor="black"] -"FAKE6_stop_0 rhel7-auto4" -> "FAKE6_start_0 rhel7-auto2" [ style = bold] -"FAKE6_stop_0 rhel7-auto4" -> "rhel7-auto4_stop_0 rhel7-auto2" [ style = bold] -"FAKE6_stop_0 rhel7-auto4" [ style=bold color="green" fontcolor="orange"] +"FAKE6_monitor_10000 rhel7-auto4" [ style=bold color="green" fontcolor="black"] "rhel7-auto4_monitor_60000 rhel7-auto2" [ style=bold color="green" fontcolor="black"] +"rhel7-auto4_start_0 rhel7-auto2" -> "FAKE1_monitor_10000 rhel7-auto4" [ style = bold] +"rhel7-auto4_start_0 rhel7-auto2" -> "FAKE1_start_0 rhel7-auto4" [ style = bold] +"rhel7-auto4_start_0 rhel7-auto2" -> "FAKE2_monitor_10000 rhel7-auto4" [ style = bold] +"rhel7-auto4_start_0 rhel7-auto2" -> "FAKE6_monitor_10000 rhel7-auto4" [ style = bold] "rhel7-auto4_start_0 rhel7-auto2" -> "rhel7-auto4_monitor_60000 rhel7-auto2" [ style = bold] "rhel7-auto4_start_0 rhel7-auto2" [ style=bold color="green" fontcolor="black"] "rhel7-auto4_stop_0 rhel7-auto2" -> "rhel7-auto4_start_0 rhel7-auto2" [ style = bold] "rhel7-auto4_stop_0 rhel7-auto2" [ style=bold color="green" fontcolor="black"] -"stonith 'reboot' rhel7-auto4" -> "FAKE1_start_0 rhel7-auto2" [ style = bold] -"stonith 'reboot' rhel7-auto4" -> "FAKE2_start_0 rhel7-auto3" [ style = bold] -"stonith 'reboot' rhel7-auto4" -> "FAKE2_stop_0 rhel7-auto4" [ style = bold] -"stonith 'reboot' rhel7-auto4" -> "FAKE6_start_0 rhel7-auto2" [ style = bold] -"stonith 'reboot' rhel7-auto4" -> "FAKE6_stop_0 rhel7-auto4" [ style = bold] -"stonith 'reboot' rhel7-auto4" -> "rhel7-auto4_start_0 rhel7-auto2" [ style = bold] -"stonith 'reboot' rhel7-auto4" [ style=bold color="green" fontcolor="black"] } diff --git a/cts/scheduler/exp/bug-cl-5247.exp b/cts/scheduler/exp/bug-cl-5247.exp index 1f5c7f6a998..709e1a65b39 100644 --- a/cts/scheduler/exp/bug-cl-5247.exp +++ b/cts/scheduler/exp/bug-cl-5247.exp @@ -8,138 +8,130 @@ - + - + - + - + - + - + - + - + - - - - + - + - + - + - + - + - + - + - + - + - - - - + - + - + - - - + + + + - - - - + - + - + - + @@ -148,60 +140,58 @@ - + - + - + - + - + - + - - - - + - - - + + + + - + - + - + @@ -210,320 +200,328 @@ - - - + + + + - - - - + - + - - - + + + + - + - - - + + + + - - - - - - - + - - - + + + + - + - + - + - - - + + + - + - + - + - + - + - + - + - + - + - + - - - - + - + - + - + + + + + + + + + + + - + - + - + - + - + + + + - + - + - + - + - + - + - + - + - + + + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + + + + - + - + - + - + - + - + - + - + - + - - - - + - + - + - + - + - + @@ -531,30 +529,34 @@ - + + + + + + + + + + + + + + + + + + + + - + - + - - - - - - - - - - - - - - - diff --git a/cts/scheduler/exp/bundle-connection-with-container.exp b/cts/scheduler/exp/bundle-connection-with-container.exp index ff1c8f016d2..f6766f25082 100644 --- a/cts/scheduler/exp/bundle-connection-with-container.exp +++ b/cts/scheduler/exp/bundle-connection-with-container.exp @@ -1,57 +1,55 @@ - + - + - + + + + + + + - + - + - - - - - - - + - - - - + - - - + + + + - + - + @@ -63,7 +61,7 @@ - + @@ -78,10 +76,7 @@ - - - - + @@ -93,7 +88,7 @@ - + @@ -108,7 +103,7 @@ - + @@ -117,36 +112,33 @@ - + - + - - - - + - + - + - + @@ -159,29 +151,29 @@ - + - + - + - + - + @@ -189,7 +181,11 @@ - + + + + + @@ -200,66 +196,51 @@ - - - - - - - - - - - - - - - - + - + - + - + - + - + - + - + - + - + - + diff --git a/cts/scheduler/exp/nested-remote-recovery.exp b/cts/scheduler/exp/nested-remote-recovery.exp index 78dca95d86a..a70ca4b1934 100644 --- a/cts/scheduler/exp/nested-remote-recovery.exp +++ b/cts/scheduler/exp/nested-remote-recovery.exp @@ -1,103 +1,99 @@ - + - + - + + + + - + - + + + + - + - + - + - - - - - - - - - - + - + - + - + - - - - - - - - - - - - - + - - - + + + + - + - + - + + + + - - - + + + + - + + + + + + + @@ -109,7 +105,7 @@ - + @@ -122,11 +118,7 @@ - - - - - + @@ -136,7 +128,7 @@ - + @@ -156,9 +148,6 @@ - - - @@ -169,7 +158,7 @@ - + @@ -184,14 +173,11 @@ - + - - - @@ -202,7 +188,7 @@ - + @@ -217,7 +203,7 @@ - + @@ -229,36 +215,33 @@ - + - - - - + - + - + - + - + @@ -271,29 +254,29 @@ - + - + - + - + - + @@ -301,143 +284,75 @@ - - - - - - - - - - - - - - - - - - - - - - - - - + - - - - - - - - - - - - - + - - - - - - - - - - - - - - - - - - - - - + - - - + + + + - - - - - - - + - + - + - + - + - + - - - - - + - + - + - + - + - + - - - - + diff --git a/cts/scheduler/exp/remote-failed-migrate2.exp b/cts/scheduler/exp/remote-failed-migrate2.exp index fcaa3a4b0e2..a51242c70a9 100644 --- a/cts/scheduler/exp/remote-failed-migrate2.exp +++ b/cts/scheduler/exp/remote-failed-migrate2.exp @@ -23,9 +23,6 @@ - - - @@ -41,11 +38,7 @@ - - - - - + @@ -57,62 +50,19 @@ - - - - - + - - - - - - - - - - - - - - + - + - - - - - - - - - - - - - - - - + - - - - - - - - - - - diff --git a/cts/scheduler/exp/remote-fence-unclean-3.exp b/cts/scheduler/exp/remote-fence-unclean-3.exp index a51cea91d4d..29b3f34fdfa 100644 --- a/cts/scheduler/exp/remote-fence-unclean-3.exp +++ b/cts/scheduler/exp/remote-fence-unclean-3.exp @@ -1,20 +1,20 @@ - + - + - + @@ -29,9 +29,6 @@ - - - @@ -235,15 +232,4 @@ - - - - - - - - - - - diff --git a/cts/scheduler/exp/remote-recover-fail.exp b/cts/scheduler/exp/remote-recover-fail.exp index d23141922b6..a206695d606 100644 --- a/cts/scheduler/exp/remote-recover-fail.exp +++ b/cts/scheduler/exp/remote-recover-fail.exp @@ -1,20 +1,20 @@ - + - + - + @@ -23,9 +23,6 @@ - - - @@ -38,83 +35,51 @@ - - - - - - - - + - + - + - + + + + - + - + - + - + - + - + - - - - - - - - - - - - - - - - - - - - - - - - - - - - @@ -123,7 +88,7 @@ - + @@ -132,56 +97,17 @@ - - - - - - - - - - - - - - + - + - + - - - - - - - - - - - - - - - - + - - - - - - - - - - - diff --git a/cts/scheduler/scores/bundle-connection-with-container.scores b/cts/scheduler/scores/bundle-connection-with-container.scores index 30d63ac6ea5..6544db04d1f 100644 --- a/cts/scheduler/scores/bundle-connection-with-container.scores +++ b/cts/scheduler/scores/bundle-connection-with-container.scores @@ -137,20 +137,20 @@ pcmk__primitive_assign: httpd-bundle-ip-192.168.122.133 allocation score on rhel pcmk__primitive_assign: httpd-bundle-ip-192.168.122.133 allocation score on rhel8-3: -INFINITY pcmk__primitive_assign: httpd-bundle-ip-192.168.122.133 allocation score on rhel8-4: -INFINITY pcmk__primitive_assign: httpd-bundle-ip-192.168.122.133 allocation score on rhel8-5: -INFINITY -pcmk__primitive_assign: httpd-bundle-podman-0 allocation score on remote-rhel8-2: -INFINITY +pcmk__primitive_assign: httpd-bundle-podman-0 allocation score on remote-rhel8-2: 0 pcmk__primitive_assign: httpd-bundle-podman-0 allocation score on rhel8-1: 0 pcmk__primitive_assign: httpd-bundle-podman-0 allocation score on rhel8-2: -INFINITY -pcmk__primitive_assign: httpd-bundle-podman-0 allocation score on rhel8-3: -INFINITY +pcmk__primitive_assign: httpd-bundle-podman-0 allocation score on rhel8-3: 0 pcmk__primitive_assign: httpd-bundle-podman-0 allocation score on rhel8-4: -INFINITY pcmk__primitive_assign: httpd-bundle-podman-0 allocation score on rhel8-5: -INFINITY pcmk__primitive_assign: httpd-bundle-podman-1 allocation score on remote-rhel8-2: 0 -pcmk__primitive_assign: httpd-bundle-podman-1 allocation score on rhel8-1: 0 +pcmk__primitive_assign: httpd-bundle-podman-1 allocation score on rhel8-1: -INFINITY pcmk__primitive_assign: httpd-bundle-podman-1 allocation score on rhel8-2: -INFINITY pcmk__primitive_assign: httpd-bundle-podman-1 allocation score on rhel8-3: 0 pcmk__primitive_assign: httpd-bundle-podman-1 allocation score on rhel8-4: -INFINITY pcmk__primitive_assign: httpd-bundle-podman-1 allocation score on rhel8-5: -INFINITY pcmk__primitive_assign: httpd-bundle-podman-2 allocation score on remote-rhel8-2: 0 -pcmk__primitive_assign: httpd-bundle-podman-2 allocation score on rhel8-1: 0 +pcmk__primitive_assign: httpd-bundle-podman-2 allocation score on rhel8-1: -INFINITY pcmk__primitive_assign: httpd-bundle-podman-2 allocation score on rhel8-2: -INFINITY pcmk__primitive_assign: httpd-bundle-podman-2 allocation score on rhel8-3: -INFINITY pcmk__primitive_assign: httpd-bundle-podman-2 allocation score on rhel8-4: -INFINITY diff --git a/cts/scheduler/scores/nested-remote-recovery.scores b/cts/scheduler/scores/nested-remote-recovery.scores index e872849cbba..49ae74a61c0 100644 --- a/cts/scheduler/scores/nested-remote-recovery.scores +++ b/cts/scheduler/scores/nested-remote-recovery.scores @@ -429,15 +429,15 @@ pcmk__primitive_assign: galera-bundle-docker-0 allocation score on controller-0: pcmk__primitive_assign: galera-bundle-docker-0 allocation score on controller-1: -INFINITY pcmk__primitive_assign: galera-bundle-docker-0 allocation score on controller-2: -INFINITY pcmk__primitive_assign: galera-bundle-docker-0 allocation score on database-0: INFINITY -pcmk__primitive_assign: galera-bundle-docker-0 allocation score on database-1: -INFINITY -pcmk__primitive_assign: galera-bundle-docker-0 allocation score on database-2: -INFINITY +pcmk__primitive_assign: galera-bundle-docker-0 allocation score on database-1: 0 +pcmk__primitive_assign: galera-bundle-docker-0 allocation score on database-2: 0 pcmk__primitive_assign: galera-bundle-docker-0 allocation score on messaging-0: -INFINITY pcmk__primitive_assign: galera-bundle-docker-0 allocation score on messaging-1: -INFINITY pcmk__primitive_assign: galera-bundle-docker-0 allocation score on messaging-2: -INFINITY pcmk__primitive_assign: galera-bundle-docker-1 allocation score on controller-0: -INFINITY pcmk__primitive_assign: galera-bundle-docker-1 allocation score on controller-1: -INFINITY pcmk__primitive_assign: galera-bundle-docker-1 allocation score on controller-2: -INFINITY -pcmk__primitive_assign: galera-bundle-docker-1 allocation score on database-0: 0 +pcmk__primitive_assign: galera-bundle-docker-1 allocation score on database-0: -INFINITY pcmk__primitive_assign: galera-bundle-docker-1 allocation score on database-1: INFINITY pcmk__primitive_assign: galera-bundle-docker-1 allocation score on database-2: 0 pcmk__primitive_assign: galera-bundle-docker-1 allocation score on messaging-0: -INFINITY @@ -446,7 +446,7 @@ pcmk__primitive_assign: galera-bundle-docker-1 allocation score on messaging-2: pcmk__primitive_assign: galera-bundle-docker-2 allocation score on controller-0: -INFINITY pcmk__primitive_assign: galera-bundle-docker-2 allocation score on controller-1: -INFINITY pcmk__primitive_assign: galera-bundle-docker-2 allocation score on controller-2: -INFINITY -pcmk__primitive_assign: galera-bundle-docker-2 allocation score on database-0: 0 +pcmk__primitive_assign: galera-bundle-docker-2 allocation score on database-0: -INFINITY pcmk__primitive_assign: galera-bundle-docker-2 allocation score on database-1: -INFINITY pcmk__primitive_assign: galera-bundle-docker-2 allocation score on database-2: INFINITY pcmk__primitive_assign: galera-bundle-docker-2 allocation score on messaging-0: -INFINITY diff --git a/cts/scheduler/summary/bug-cl-5247.summary b/cts/scheduler/summary/bug-cl-5247.summary index b18bdd8b919..61b46a9bbe7 100644 --- a/cts/scheduler/summary/bug-cl-5247.summary +++ b/cts/scheduler/summary/bug-cl-5247.summary @@ -2,27 +2,25 @@ Using the original execution date of: 2015-08-12 02:53:40Z Current cluster status: * Node List: * Online: [ bl460g8n3 bl460g8n4 ] - * GuestOnline: [ pgsr01 ] + * GuestOnline: [ pgsr01 pgsr02 ] * Full List of Resources: * prmDB1 (ocf:heartbeat:VirtualDomain): Started bl460g8n3 - * prmDB2 (ocf:heartbeat:VirtualDomain): FAILED bl460g8n4 + * prmDB2 (ocf:heartbeat:VirtualDomain): Started bl460g8n4 * Resource Group: grpStonith1: * prmStonith1-2 (stonith:external/ipmi): Started bl460g8n4 * Resource Group: grpStonith2: * prmStonith2-2 (stonith:external/ipmi): Started bl460g8n3 * Resource Group: master-group: - * vip-master (ocf:heartbeat:Dummy): FAILED pgsr02 - * vip-rep (ocf:heartbeat:Dummy): FAILED pgsr02 + * vip-master (ocf:heartbeat:Dummy): Started pgsr02 + * vip-rep (ocf:heartbeat:Dummy): Started pgsr02 * Clone Set: msPostgresql [pgsql] (promotable): - * Promoted: [ pgsr01 ] - * Stopped: [ bl460g8n3 bl460g8n4 ] + * Promoted: [ pgsr01 pgsr02 ] Transition Summary: - * Fence (off) pgsr02 (resource: prmDB2) 'guest is unclean' * Stop prmDB2 ( bl460g8n4 ) due to node availability - * Recover vip-master ( pgsr02 -> pgsr01 ) - * Recover vip-rep ( pgsr02 -> pgsr01 ) + * Move vip-master ( pgsr02 -> pgsr01 ) + * Move vip-rep ( pgsr02 -> pgsr01 ) * Stop pgsql:0 ( Promoted pgsr02 ) due to node availability * Stop pgsr02 ( bl460g8n4 ) due to node availability @@ -31,28 +29,29 @@ Executing Cluster Transition: * Resource action: vip-rep monitor on pgsr01 * Pseudo action: msPostgresql_pre_notify_demote_0 * Resource action: pgsr01 monitor on bl460g8n4 - * Resource action: pgsr02 stop on bl460g8n4 * Resource action: pgsr02 monitor on bl460g8n3 - * Resource action: prmDB2 stop on bl460g8n4 + * Resource action: pgsql notify on pgsr02 * Resource action: pgsql notify on pgsr01 * Pseudo action: msPostgresql_confirmed-pre_notify_demote_0 * Pseudo action: msPostgresql_demote_0 - * Pseudo action: stonith-pgsr02-off on pgsr02 - * Pseudo action: pgsql_post_notify_stop_0 - * Pseudo action: pgsql_demote_0 + * Resource action: pgsql demote on pgsr02 * Pseudo action: msPostgresql_demoted_0 * Pseudo action: msPostgresql_post_notify_demoted_0 + * Resource action: pgsql notify on pgsr02 * Resource action: pgsql notify on pgsr01 * Pseudo action: msPostgresql_confirmed-post_notify_demoted_0 * Pseudo action: msPostgresql_pre_notify_stop_0 * Pseudo action: master-group_stop_0 - * Pseudo action: vip-rep_stop_0 + * Resource action: vip-rep stop on pgsr02 + * Resource action: pgsql notify on pgsr02 * Resource action: pgsql notify on pgsr01 * Pseudo action: msPostgresql_confirmed-pre_notify_stop_0 * Pseudo action: msPostgresql_stop_0 - * Pseudo action: vip-master_stop_0 - * Pseudo action: pgsql_stop_0 + * Resource action: vip-master stop on pgsr02 + * Resource action: pgsql stop on pgsr02 * Pseudo action: msPostgresql_stopped_0 + * Resource action: pgsr02 stop on bl460g8n4 + * Resource action: prmDB2 stop on bl460g8n4 * Pseudo action: master-group_stopped_0 * Pseudo action: master-group_start_0 * Resource action: vip-master start on pgsr01 @@ -63,7 +62,6 @@ Executing Cluster Transition: * Resource action: vip-rep monitor=10000 on pgsr01 * Resource action: pgsql notify on pgsr01 * Pseudo action: msPostgresql_confirmed-post_notify_stopped_0 - * Pseudo action: pgsql_notified_0 * Resource action: pgsql monitor=9000 on pgsr01 Using the original execution date of: 2015-08-12 02:53:40Z @@ -74,14 +72,14 @@ Revised Cluster Status: * Full List of Resources: * prmDB1 (ocf:heartbeat:VirtualDomain): Started bl460g8n3 - * prmDB2 (ocf:heartbeat:VirtualDomain): FAILED + * prmDB2 (ocf:heartbeat:VirtualDomain): Stopped * Resource Group: grpStonith1: * prmStonith1-2 (stonith:external/ipmi): Started bl460g8n4 * Resource Group: grpStonith2: * prmStonith2-2 (stonith:external/ipmi): Started bl460g8n3 * Resource Group: master-group: - * vip-master (ocf:heartbeat:Dummy): FAILED [ pgsr01 pgsr02 ] - * vip-rep (ocf:heartbeat:Dummy): FAILED [ pgsr01 pgsr02 ] + * vip-master (ocf:heartbeat:Dummy): Started pgsr01 + * vip-rep (ocf:heartbeat:Dummy): Started pgsr01 * Clone Set: msPostgresql [pgsql] (promotable): * Promoted: [ pgsr01 ] - * Stopped: [ bl460g8n3 bl460g8n4 ] + * Stopped: [ bl460g8n3 bl460g8n4 pgsr02 ] diff --git a/cts/scheduler/summary/bundle-connection-with-container.summary b/cts/scheduler/summary/bundle-connection-with-container.summary index 62e0ec683ce..b5b1853bc6a 100644 --- a/cts/scheduler/summary/bundle-connection-with-container.summary +++ b/cts/scheduler/summary/bundle-connection-with-container.summary @@ -4,7 +4,7 @@ Current cluster status: * Online: [ rhel8-1 rhel8-3 rhel8-4 rhel8-5 ] * OFFLINE: [ rhel8-2 ] * RemoteOnline: [ remote-rhel8-2 ] - * GuestOnline: [ httpd-bundle-1 httpd-bundle-2 ] + * GuestOnline: [ httpd-bundle-0 httpd-bundle-1 httpd-bundle-2 ] * Full List of Resources: * Fencing (stonith:fence_xvm): Started rhel8-3 @@ -13,35 +13,33 @@ Current cluster status: * remote-rhel8-2 (ocf:pacemaker:remote): Started rhel8-1 * remote-rsc (ocf:pacemaker:Dummy): Started remote-rhel8-2 * Container bundle set: httpd-bundle [localhost/pcmktest:http]: - * httpd-bundle-0 (192.168.122.131) (ocf:heartbeat:apache): FAILED rhel8-1 + * httpd-bundle-0 (192.168.122.131) (ocf:heartbeat:apache): Started rhel8-1 * httpd-bundle-1 (192.168.122.132) (ocf:heartbeat:apache): Started rhel8-3 * httpd-bundle-2 (192.168.122.133) (ocf:heartbeat:apache): Started remote-rhel8-2 Transition Summary: - * Fence (reboot) httpd-bundle-0 (resource: httpd-bundle-podman-0) 'guest is unclean' - * Recover httpd-bundle-podman-0 ( rhel8-1 ) + * Restart httpd-bundle-podman-0 ( rhel8-1 ) * Recover httpd-bundle-0 ( rhel8-1 ) - * Recover httpd:0 ( httpd-bundle-0 ) + * Restart httpd:0 ( httpd-bundle-0 ) due to required httpd-bundle-podman-0 start Executing Cluster Transition: - * Resource action: httpd-bundle-0 stop on rhel8-1 * Pseudo action: httpd-bundle_stop_0 * Pseudo action: httpd-bundle_start_0 - * Resource action: httpd-bundle-podman-0 stop on rhel8-1 - * Pseudo action: stonith-httpd-bundle-0-reboot on httpd-bundle-0 * Pseudo action: httpd-bundle-clone_stop_0 + * Resource action: httpd stop on httpd-bundle-0 + * Pseudo action: httpd-bundle-clone_stopped_0 + * Pseudo action: httpd-bundle-clone_start_0 + * Resource action: httpd-bundle-0 stop on rhel8-1 + * Resource action: httpd-bundle-podman-0 stop on rhel8-1 + * Pseudo action: httpd-bundle_stopped_0 * Resource action: httpd-bundle-podman-0 start on rhel8-1 * Resource action: httpd-bundle-podman-0 monitor=60000 on rhel8-1 * Resource action: httpd-bundle-0 start on rhel8-1 * Resource action: httpd-bundle-0 monitor=30000 on rhel8-1 - * Pseudo action: httpd_stop_0 - * Pseudo action: httpd-bundle-clone_stopped_0 - * Pseudo action: httpd-bundle-clone_start_0 - * Pseudo action: httpd-bundle_stopped_0 * Resource action: httpd start on httpd-bundle-0 + * Resource action: httpd monitor=15000 on httpd-bundle-0 * Pseudo action: httpd-bundle-clone_running_0 * Pseudo action: httpd-bundle_running_0 - * Resource action: httpd monitor=15000 on httpd-bundle-0 Using the original execution date of: 2022-07-13 22:13:26Z Revised Cluster Status: diff --git a/cts/scheduler/summary/nested-remote-recovery.summary b/cts/scheduler/summary/nested-remote-recovery.summary index fd3ccd76135..5c0e7bfa952 100644 --- a/cts/scheduler/summary/nested-remote-recovery.summary +++ b/cts/scheduler/summary/nested-remote-recovery.summary @@ -3,7 +3,7 @@ Current cluster status: * Node List: * Online: [ controller-0 controller-1 controller-2 ] * RemoteOnline: [ database-0 database-1 database-2 messaging-0 messaging-1 messaging-2 ] - * GuestOnline: [ galera-bundle-1 galera-bundle-2 rabbitmq-bundle-0 rabbitmq-bundle-1 rabbitmq-bundle-2 redis-bundle-0 redis-bundle-1 redis-bundle-2 ] + * GuestOnline: [ galera-bundle-0 galera-bundle-1 galera-bundle-2 rabbitmq-bundle-0 rabbitmq-bundle-1 rabbitmq-bundle-2 redis-bundle-0 redis-bundle-1 redis-bundle-2 ] * Full List of Resources: * database-0 (ocf:pacemaker:remote): Started controller-0 @@ -13,7 +13,7 @@ Current cluster status: * messaging-1 (ocf:pacemaker:remote): Started controller-1 * messaging-2 (ocf:pacemaker:remote): Started controller-1 * Container bundle set: galera-bundle [192.168.24.1:8787/rhosp13/openstack-mariadb:pcmklatest]: - * galera-bundle-0 (ocf:heartbeat:galera): FAILED Promoted database-0 + * galera-bundle-0 (ocf:heartbeat:galera): Promoted database-0 * galera-bundle-1 (ocf:heartbeat:galera): Promoted database-1 * galera-bundle-2 (ocf:heartbeat:galera): Promoted database-2 * Container bundle set: rabbitmq-bundle [192.168.24.1:8787/rhosp13/openstack-rabbitmq:pcmklatest]: @@ -47,24 +47,20 @@ Current cluster status: * stonith-fence_ipmilan-5254002f6d57 (stonith:fence_ipmilan): Started controller-1 Transition Summary: - * Fence (reboot) galera-bundle-0 (resource: galera-bundle-docker-0) 'guest is unclean' - * Recover galera-bundle-docker-0 ( database-0 ) + * Restart galera-bundle-docker-0 ( database-0 ) * Recover galera-bundle-0 ( controller-0 ) - * Recover galera:0 ( Promoted galera-bundle-0 ) + * Restart galera:0 ( Promoted galera-bundle-0 ) due to required galera-bundle-docker-0 start Executing Cluster Transition: - * Resource action: galera-bundle-0 stop on controller-0 - * Pseudo action: galera-bundle_demote_0 * Pseudo action: galera-bundle-master_demote_0 - * Pseudo action: galera_demote_0 - * Pseudo action: galera-bundle-master_demoted_0 - * Pseudo action: galera-bundle_demoted_0 * Pseudo action: galera-bundle_stop_0 - * Resource action: galera-bundle-docker-0 stop on database-0 - * Pseudo action: stonith-galera-bundle-0-reboot on galera-bundle-0 + * Resource action: galera demote on galera-bundle-0 + * Pseudo action: galera-bundle-master_demoted_0 * Pseudo action: galera-bundle-master_stop_0 - * Pseudo action: galera_stop_0 + * Resource action: galera stop on galera-bundle-0 * Pseudo action: galera-bundle-master_stopped_0 + * Resource action: galera-bundle-0 stop on controller-0 + * Resource action: galera-bundle-docker-0 stop on database-0 * Pseudo action: galera-bundle_stopped_0 * Pseudo action: galera-bundle_start_0 * Pseudo action: galera-bundle-master_start_0 @@ -75,12 +71,10 @@ Executing Cluster Transition: * Resource action: galera start on galera-bundle-0 * Pseudo action: galera-bundle-master_running_0 * Pseudo action: galera-bundle_running_0 - * Pseudo action: galera-bundle_promote_0 * Pseudo action: galera-bundle-master_promote_0 * Resource action: galera promote on galera-bundle-0 - * Pseudo action: galera-bundle-master_promoted_0 - * Pseudo action: galera-bundle_promoted_0 * Resource action: galera monitor=10000 on galera-bundle-0 + * Pseudo action: galera-bundle-master_promoted_0 Using the original execution date of: 2018-09-11 21:23:25Z Revised Cluster Status: diff --git a/cts/scheduler/summary/remote-failed-migrate1.summary b/cts/scheduler/summary/remote-failed-migrate1.summary index b048a9e559c..b770d080727 100644 --- a/cts/scheduler/summary/remote-failed-migrate1.summary +++ b/cts/scheduler/summary/remote-failed-migrate1.summary @@ -2,7 +2,7 @@ Using the original execution date of: 2024-05-13 04:25:37Z Current cluster status: * Node List: * Online: [ fastvm-fedora39-22 fastvm-fedora39-24 ] - * RemoteOFFLINE: [ fastvm-fedora39-23 ] + * RemoteOnline: [ fastvm-fedora39-23 ] * Full List of Resources: * fastvm-fedora39-23 (ocf:pacemaker:remote): FAILED [ fastvm-fedora39-22 fastvm-fedora39-24 ] diff --git a/cts/scheduler/summary/remote-failed-migrate2.summary b/cts/scheduler/summary/remote-failed-migrate2.summary index ca5e04b0923..7e0f71fc043 100644 --- a/cts/scheduler/summary/remote-failed-migrate2.summary +++ b/cts/scheduler/summary/remote-failed-migrate2.summary @@ -1,27 +1,22 @@ Using the original execution date of: 2024-05-15 10:28:29Z Current cluster status: * Node List: - * RemoteNode fastvm-fedora39-23: UNCLEAN (offline) * Online: [ fastvm-fedora39-22 fastvm-fedora39-24 ] + * RemoteOnline: [ fastvm-fedora39-23 ] * Full List of Resources: * fastvm-fedora39-23 (ocf:pacemaker:remote): FAILED [ fastvm-fedora39-24 fastvm-fedora39-22 ] * xvm (stonith:fence_xvm): Started fastvm-fedora39-22 - * dummy (ocf:pacemaker:Dummy): Started fastvm-fedora39-23 (UNCLEAN) + * dummy (ocf:pacemaker:Dummy): Started fastvm-fedora39-23 Transition Summary: - * Fence (reboot) fastvm-fedora39-23 'dummy is thought to be active there' - * Recover fastvm-fedora39-23 ( fastvm-fedora39-24 ) - * Move dummy ( fastvm-fedora39-23 -> fastvm-fedora39-22 ) + * Recover fastvm-fedora39-23 ( fastvm-fedora39-24 ) Executing Cluster Transition: - * Fencing fastvm-fedora39-23 (reboot) - * Pseudo action: dummy_stop_0 * Resource action: fastvm-fedora39-23 stop on fastvm-fedora39-22 * Resource action: fastvm-fedora39-23 stop on fastvm-fedora39-24 - * Resource action: dummy start on fastvm-fedora39-22 * Resource action: fastvm-fedora39-23 start on fastvm-fedora39-24 - * Resource action: dummy monitor=10000 on fastvm-fedora39-22 + * Resource action: dummy monitor=10000 on fastvm-fedora39-23 * Resource action: fastvm-fedora39-23 monitor=60000 on fastvm-fedora39-24 Using the original execution date of: 2024-05-15 10:28:29Z @@ -33,4 +28,4 @@ Revised Cluster Status: * Full List of Resources: * fastvm-fedora39-23 (ocf:pacemaker:remote): Started fastvm-fedora39-24 * xvm (stonith:fence_xvm): Started fastvm-fedora39-22 - * dummy (ocf:pacemaker:Dummy): Started fastvm-fedora39-22 + * dummy (ocf:pacemaker:Dummy): Started fastvm-fedora39-23 diff --git a/cts/scheduler/summary/remote-fence-unclean-3.summary b/cts/scheduler/summary/remote-fence-unclean-3.summary index af916ed3e55..d28f4b641f7 100644 --- a/cts/scheduler/summary/remote-fence-unclean-3.summary +++ b/cts/scheduler/summary/remote-fence-unclean-3.summary @@ -1,7 +1,7 @@ Current cluster status: * Node List: * Online: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] - * RemoteOFFLINE: [ overcloud-novacompute-0 ] + * RemoteOnline: [ overcloud-novacompute-0 ] * GuestOnline: [ galera-bundle-0 galera-bundle-1 galera-bundle-2 rabbitmq-bundle-0 rabbitmq-bundle-1 rabbitmq-bundle-2 redis-bundle-0 redis-bundle-1 redis-bundle-2 ] * Full List of Resources: @@ -35,7 +35,6 @@ Current cluster status: * openstack-cinder-backup-docker-0 (ocf:heartbeat:docker): Started overcloud-controller-1 Transition Summary: - * Fence (reboot) overcloud-novacompute-0 'the connection is unrecoverable' * Start fence1 ( overcloud-controller-0 ) * Stop overcloud-novacompute-0 ( overcloud-controller-0 ) due to node availability @@ -62,7 +61,6 @@ Executing Cluster Transition: * Resource action: redis-bundle-1 monitor on overcloud-controller-0 * Resource action: redis-bundle-2 monitor on overcloud-controller-1 * Resource action: redis-bundle-2 monitor on overcloud-controller-0 - * Fencing overcloud-novacompute-0 (reboot) * Resource action: fence1 start on overcloud-controller-0 * Resource action: fence1 monitor=60000 on overcloud-controller-0 diff --git a/cts/scheduler/summary/remote-recover-fail.summary b/cts/scheduler/summary/remote-recover-fail.summary index d2399149f80..1f92a8f93d2 100644 --- a/cts/scheduler/summary/remote-recover-fail.summary +++ b/cts/scheduler/summary/remote-recover-fail.summary @@ -1,41 +1,33 @@ Current cluster status: * Node List: - * RemoteNode rhel7-auto4: UNCLEAN (offline) * Online: [ rhel7-auto2 rhel7-auto3 ] * OFFLINE: [ rhel7-auto1 ] + * RemoteOnline: [ rhel7-auto4 ] * Full List of Resources: * shooter (stonith:fence_xvm): Started rhel7-auto3 * rhel7-auto4 (ocf:pacemaker:remote): FAILED rhel7-auto2 * FAKE1 (ocf:heartbeat:Dummy): Stopped - * FAKE2 (ocf:heartbeat:Dummy): Started rhel7-auto4 (UNCLEAN) + * FAKE2 (ocf:heartbeat:Dummy): Started rhel7-auto4 * FAKE3 (ocf:heartbeat:Dummy): Started rhel7-auto2 * FAKE4 (ocf:heartbeat:Dummy): Started rhel7-auto3 * FAKE5 (ocf:heartbeat:Dummy): Started rhel7-auto3 - * FAKE6 (ocf:heartbeat:Dummy): Started rhel7-auto4 (UNCLEAN) + * FAKE6 (ocf:heartbeat:Dummy): Started rhel7-auto4 Transition Summary: - * Fence (reboot) rhel7-auto4 'FAKE2 is thought to be active there' - * Recover rhel7-auto4 ( rhel7-auto2 ) - * Start FAKE1 ( rhel7-auto2 ) - * Move FAKE2 ( rhel7-auto4 -> rhel7-auto3 ) - * Move FAKE6 ( rhel7-auto4 -> rhel7-auto2 ) + * Recover rhel7-auto4 ( rhel7-auto2 ) + * Start FAKE1 ( rhel7-auto4 ) Executing Cluster Transition: + * Resource action: rhel7-auto4 stop on rhel7-auto2 * Resource action: FAKE3 monitor=10000 on rhel7-auto2 * Resource action: FAKE4 monitor=10000 on rhel7-auto3 - * Fencing rhel7-auto4 (reboot) - * Resource action: FAKE1 start on rhel7-auto2 - * Pseudo action: FAKE2_stop_0 - * Pseudo action: FAKE6_stop_0 - * Resource action: rhel7-auto4 stop on rhel7-auto2 - * Resource action: FAKE1 monitor=10000 on rhel7-auto2 - * Resource action: FAKE2 start on rhel7-auto3 - * Resource action: FAKE6 start on rhel7-auto2 * Resource action: rhel7-auto4 start on rhel7-auto2 - * Resource action: FAKE2 monitor=10000 on rhel7-auto3 - * Resource action: FAKE6 monitor=10000 on rhel7-auto2 + * Resource action: FAKE1 start on rhel7-auto4 + * Resource action: FAKE2 monitor=10000 on rhel7-auto4 + * Resource action: FAKE6 monitor=10000 on rhel7-auto4 * Resource action: rhel7-auto4 monitor=60000 on rhel7-auto2 + * Resource action: FAKE1 monitor=10000 on rhel7-auto4 Revised Cluster Status: * Node List: @@ -46,9 +38,9 @@ Revised Cluster Status: * Full List of Resources: * shooter (stonith:fence_xvm): Started rhel7-auto3 * rhel7-auto4 (ocf:pacemaker:remote): Started rhel7-auto2 - * FAKE1 (ocf:heartbeat:Dummy): Started rhel7-auto2 - * FAKE2 (ocf:heartbeat:Dummy): Started rhel7-auto3 + * FAKE1 (ocf:heartbeat:Dummy): Started rhel7-auto4 + * FAKE2 (ocf:heartbeat:Dummy): Started rhel7-auto4 * FAKE3 (ocf:heartbeat:Dummy): Started rhel7-auto2 * FAKE4 (ocf:heartbeat:Dummy): Started rhel7-auto3 * FAKE5 (ocf:heartbeat:Dummy): Started rhel7-auto3 - * FAKE6 (ocf:heartbeat:Dummy): Started rhel7-auto2 + * FAKE6 (ocf:heartbeat:Dummy): Started rhel7-auto4 diff --git a/cts/scheduler/summary/remote-start-fail.summary b/cts/scheduler/summary/remote-start-fail.summary index cf83c04e896..d0e73b31c6a 100644 --- a/cts/scheduler/summary/remote-start-fail.summary +++ b/cts/scheduler/summary/remote-start-fail.summary @@ -1,7 +1,7 @@ Current cluster status: * Node List: * Online: [ rhel7-auto1 rhel7-auto2 rhel7-auto3 ] - * RemoteOFFLINE: [ rhel7-auto4 ] + * RemoteOnline: [ rhel7-auto4 ] * Full List of Resources: * shooter (stonith:fence_xvm): Started rhel7-auto1