Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[crmsh-4.6] Fix: ui_cluster: Stop renaming cluster name when using qdevice #1576

Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
12 changes: 8 additions & 4 deletions crmsh/bootstrap.py
Original file line number Diff line number Diff line change
Expand Up @@ -497,7 +497,7 @@ def is_online():
return False

# if peer_node is None, this is in the init process
if _context.cluster_node is None:
if not _context or _context.cluster_node is None:
return True
# In join process
# If the joining node is already online but can't find the init node
Expand Down Expand Up @@ -2654,9 +2654,7 @@ def remove_qdevice():
if qdevice_reload_policy == qdevice.QdevicePolicy.QDEVICE_RELOAD:
invoke("crm cluster run 'crm corosync reload'")
elif qdevice_reload_policy == qdevice.QdevicePolicy.QDEVICE_RESTART:
logger.info("Restarting cluster service")
utils.cluster_run_cmd("crm cluster restart")
wait_for_cluster()
restart_cluster()
else:
logger.warning("To remove qdevice service, need to restart cluster service manually on each node")

Expand Down Expand Up @@ -3077,4 +3075,10 @@ def sync_file(path):
utils.cluster_copy_file(path, nodes=_context.node_list_in_cluster, output=False)
else:
csync2_update(path)


def restart_cluster():
logger.info("Restarting cluster service")
utils.cluster_run_cmd("crm cluster restart")
wait_for_cluster()
# EOF
4 changes: 1 addition & 3 deletions crmsh/qdevice.py
Original file line number Diff line number Diff line change
Expand Up @@ -597,9 +597,7 @@ def start_qdevice_service(self):
logger.info("Starting corosync-qdevice.service in cluster")
utils.cluster_run_cmd("systemctl restart corosync-qdevice")
elif self.qdevice_reload_policy == QdevicePolicy.QDEVICE_RESTART:
logger.info("Restarting cluster service")
utils.cluster_run_cmd("crm cluster restart")
bootstrap.wait_for_cluster()
bootstrap.restart_cluster()
else:
logger.warning("To use qdevice service, need to restart cluster service manually on each node")

Expand Down
4 changes: 1 addition & 3 deletions crmsh/sbd.py
Original file line number Diff line number Diff line change
Expand Up @@ -457,9 +457,7 @@ def _restart_cluster_and_configure_sbd_ra(self):
Try to configure sbd resource, restart cluster on needed
"""
if not xmlutil.CrmMonXmlParser().is_any_resource_running():
logger.info("Restarting cluster service")
utils.cluster_run_cmd("crm cluster restart")
bootstrap.wait_for_cluster()
bootstrap.restart_cluster()
self.configure_sbd_resource_and_properties()
else:
logger.warning("To start sbd.service, need to restart cluster service manually on each node")
Expand Down
19 changes: 16 additions & 3 deletions crmsh/ui_cluster.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@
from . import bootstrap
from . import corosync
from . import qdevice
from . import xmlutil
from .cibconfig import cib_factory
from .prun import prun
from .service_manager import ServiceManager
Expand Down Expand Up @@ -594,8 +595,17 @@
'''
Rename the cluster.
'''
if not ServiceManager(sh.ClusterShellAdaptorForLocalShell(sh.LocalShell())).service_is_active("corosync.service"):
service_manager = ServiceManager()
if not service_manager.service_is_active("corosync.service"):

Check warning on line 599 in crmsh/ui_cluster.py

View check run for this annotation

Codecov / codecov/patch

crmsh/ui_cluster.py#L598-L599

Added lines #L598 - L599 were not covered by tests
context.fatal_error("Can't rename cluster when cluster service is stopped")
if service_manager.service_is_active("corosync-qdevice.service"):
logger.error("Can't rename cluster when QDevice service is running")
suggestion = '''Please run `crm cluster remove --qdevice` on any node in the cluster to remove the QDevice configuration;

Check warning on line 603 in crmsh/ui_cluster.py

View check run for this annotation

Codecov / codecov/patch

crmsh/ui_cluster.py#L601-L603

Added lines #L601 - L603 were not covered by tests
Then rename the cluster;
Finally run `crm cluster init qdevice` on any node in the cluster to re-deploy the QDevice.'''
logger.info(suggestion)
return

Check warning on line 607 in crmsh/ui_cluster.py

View check run for this annotation

Codecov / codecov/patch

crmsh/ui_cluster.py#L606-L607

Added lines #L606 - L607 were not covered by tests

old_name = cib_factory.get_property('cluster-name')
if old_name and new_name == old_name:
context.fatal_error("Expected a different name")
Expand All @@ -613,8 +623,11 @@
if not cib_factory.commit():
context.fatal_error("Change property cluster-name failed!")

# it's a safe way to give user a hints that need to restart service
context.info("To apply the change, restart the cluster service at convenient time")
if xmlutil.CrmMonXmlParser().is_any_resource_running():
context.info("To apply the change, restart the cluster service at convenient time")

Check warning on line 627 in crmsh/ui_cluster.py

View check run for this annotation

Codecov / codecov/patch

crmsh/ui_cluster.py#L626-L627

Added lines #L626 - L627 were not covered by tests
else:
bootstrap.restart_cluster()

Check warning on line 629 in crmsh/ui_cluster.py

View check run for this annotation

Codecov / codecov/patch

crmsh/ui_cluster.py#L629

Added line #L629 was not covered by tests


def _parse_clustermap(self, clusters):
'''
Expand Down
Loading