From 958182481e200afa95f04022fe37044393085265 Mon Sep 17 00:00:00 2001 From: Suresh Kumar Anaparti Date: Tue, 8 Jun 2021 15:44:53 +0530 Subject: [PATCH] cloudstack: make code more inclusive Inclusivity changes for CloudStack - Change default git branch name from 'master' to 'main' (post renaming/changing default git branch to 'main' in git repo) - Rename some offensive words/terms as appropriate for inclusiveness. This PR updates the default git branch to 'main', as part of #4887. Signed-off-by: Suresh Kumar Anaparti Signed-off-by: Rohit Yadav --- CONTRIBUTING.md | 42 +-- ISSUE_TEMPLATE.md | 4 +- PULL_REQUEST_TEMPLATE.md | 2 +- README.md | 2 +- .../cloud/network/router/VirtualRouter.java | 2 +- .../apache/cloudstack/api/ApiConstants.java | 1 + .../user/resource/ListResourceLimitsCmd.java | 8 +- .../apache/cloudstack/query/QueryService.java | 4 +- client/conf/db.properties.in | 12 +- .../cloud/agent/api/CheckRouterAnswer.java | 4 +- .../META-INF/db/schema-41510to41600.sql | 11 + .../storage/test/EndpointSelectorTest.java | 14 +- .../test/SnapshotTestWithFakeData.java | 16 +- .../java/com/cloud/utils/db/Merovingian2.java | 8 +- .../com/cloud/utils/db/TransactionLegacy.java | 32 +- .../com/cloud/utils/db/Merovingian2Test.java | 24 +- .../acl/DynamicRoleBasedAPIAccessChecker.java | 2 +- .../acl/ProjectRoleBasedApiAccessChecker.java | 2 +- .../acl/StaticRoleBasedAPIAccessChecker.java | 2 +- .../com/cloud/utils/db/StaticStrategy.java | 34 +- .../cloud/ovm/hypervisor/OvmResourceBase.java | 8 +- .../java/com/cloud/ovm/object/OvmHost.java | 6 +- .../main/java/com/cloud/ovm/object/Test.java | 4 - .../vm/hypervisor/ovm/OvmFaultConstants.py | 2 +- .../vm/hypervisor/ovm/OvmHostModule.py | 8 +- .../hypervisor/ovm3/objects/Cluster.java | 2 +- .../cloud/hypervisor/ovm3/objects/Linux.java | 6 +- .../cloud/hypervisor/ovm3/objects/Pool.java | 10 +- .../resources/Ovm3HypervisorResource.java | 4 +- .../resources/helpers/Ovm3Configuration.java | 20 +- .../helpers/Ovm3HypervisorSupport.java | 44 +-- .../resources/helpers/Ovm3StoragePool.java | 22 +- .../hypervisor/ovm3/objects/LinuxTest.java | 4 +- .../hypervisor/ovm3/objects/PoolTest.java | 6 +- .../helpers/Ovm3ConfigurationTest.java | 4 +- .../helpers/Ovm3HypervisorSupportTest.java | 4 +- .../{clean_master.sh => clean_primary.sh} | 0 .../{clean_slave.sh => clean_secondary.sh} | 0 .../resources/scripts/create_pool_cluster.py | 2 +- .../ovm3/src/test/resources/scripts/info.py | 8 +- .../src/test/resources/scripts/password.py | 2 +- .../src/test/resources/scripts/repo_pool.py | 18 +- .../src/test/resources/scripts/simple_pool.py | 18 +- .../agent/manager/MockVmManagerImpl.java | 6 +- .../kubernetes/cluster/KubernetesCluster.java | 2 +- .../cluster/KubernetesClusterManagerImpl.java | 35 +- .../cluster/KubernetesClusterVO.java | 18 +- .../KubernetesClusterActionWorker.java | 20 +- .../KubernetesClusterScaleWorker.java | 6 +- .../KubernetesClusterStartWorker.java | 184 ++++----- .../KubernetesClusterUpgradeWorker.java | 4 +- .../cluster/utils/KubernetesClusterUtil.java | 16 +- .../cluster/CreateKubernetesClusterCmd.java | 14 +- .../response/KubernetesClusterResponse.java | 13 + .../KubernetesSupportedVersionResponse.java | 2 +- ...aster-add.yml => k8s-control-node-add.yml} | 0 .../{k8s-master.yml => k8s-control-node.yml} | 0 .../resources/script/upgrade-kubernetes.sh | 10 +- .../agent/api/GetControllerDataAnswer.java | 12 +- .../agent/api/GetControllerHostsAnswer.java | 20 +- .../network/bigswitch/BigSwitchBcfApi.java | 12 +- .../network/bigswitch/BigSwitchBcfUtils.java | 18 +- .../network/bigswitch/ControlClusterData.java | 20 +- .../network/bigswitch/ControllerData.java | 10 +- .../resource/BigSwitchBcfResource.java | 2 +- .../network/bigswitch/BigSwitchApiTest.java | 18 +- .../management/ServiceManagerImpl.java | 2 +- .../network/contrail/model/ModelObject.java | 2 +- .../contrail/model/ServiceInstanceModel.java | 2 +- .../management/NetworkProviderTest.java | 8 +- .../management/PublicNetworkTest.java | 8 +- python/lib/cloud_utils.py | 2 +- .../java/com/cloud/api/ApiResponseHelper.java | 2 +- .../com/cloud/api/query/QueryManagerImpl.java | 6 +- .../api/query/dao/UserVmJoinDaoImpl.java | 4 +- .../ConfigurationManagerImpl.java | 6 +- .../com/cloud/deploy/FirstFitPlanner.java | 2 +- .../cloud/ha/HighAvailabilityManagerImpl.java | 2 +- .../network/element/VirtualRouterElement.java | 10 +- .../VirtualNetworkApplianceManagerImpl.java | 46 +-- .../com/cloud/network/vpc/VpcManagerImpl.java | 16 +- .../ResourceLimitManagerImpl.java | 4 +- ...tener.java => LockControllerListener.java} | 10 +- .../cloud/server/ManagementServerImpl.java | 18 +- .../com/cloud/user/AccountManagerImpl.java | 2 +- .../java/com/cloud/vm/UserVmManagerImpl.java | 17 +- .../spring-server-core-managers-context.xml | 2 +- .../system/spring-server-system-context.xml | 2 +- .../element/VirtualRouterElementTest.java | 14 +- ...irtualNetworkApplianceManagerImplTest.java | 2 +- .../agent/noVNC/vendor/pako/lib/zlib/trees.js | 18 +- systemvm/debian/opt/cloud/bin/checkrouter.sh | 4 +- .../bin/{master.py => configure_router.py} | 14 +- systemvm/debian/opt/cloud/bin/cs/CsAddress.py | 4 +- systemvm/debian/opt/cloud/bin/cs/CsDatabag.py | 12 +- systemvm/debian/opt/cloud/bin/cs/CsDhcp.py | 2 +- systemvm/debian/opt/cloud/bin/cs/CsHelper.py | 8 +- .../debian/opt/cloud/bin/cs/CsRedundant.py | 26 +- systemvm/debian/opt/cloud/bin/ipassoc.sh | 8 +- .../cloud/templates/check_heartbeat.sh.templ | 2 +- .../opt/cloud/templates/checkrouter.sh.templ | 4 +- .../opt/cloud/templates/keepalived.conf.templ | 6 +- .../component/maint/test_redundant_router.py | 352 +++++++++--------- ...st_redundant_router_deployment_planning.py | 24 +- .../test_redundant_router_network_rules.py | 92 ++--- .../component/test_acl_isolatednetwork.py | 146 ++++---- .../test_acl_isolatednetwork_delete.py | 40 +- .../component/test_acl_listsnapshot.py | 48 +-- test/integration/component/test_acl_listvm.py | 38 +- .../component/test_acl_listvolume.py | 38 +- ...cl_sharednetwork_deployVM-impersonation.py | 6 +- .../component/test_add_remove_network.py | 2 +- .../component/test_affinity_groups.py | 2 +- .../component/test_egress_fw_rules.py | 52 +-- .../component/test_ip_reservation.py | 24 +- ...st_multiple_subnets_in_isolated_network.py | 2 +- ...ultiple_subnets_in_isolated_network_rvr.py | 2 +- .../component/test_multiple_subnets_in_vpc.py | 2 +- .../test_multiple_subnets_in_vpc_rvr.py | 2 +- .../component/test_persistent_networks.py | 4 +- test/integration/component/test_public_ip.py | 8 +- .../test_redundant_router_cleanups.py | 78 ++-- .../test_redundant_router_services.py | 6 +- .../test_redundant_router_upgrades.py | 8 +- test/integration/component/test_volumes.py | 2 +- .../plugins/test_nicira_controller.py | 54 +-- .../smoke/test_kubernetes_clusters.py | 30 +- test/integration/smoke/test_privategw_acl.py | 6 +- .../smoke/test_routers_network_ops.py | 18 +- test/integration/smoke/test_vpc_redundant.py | 16 +- tools/apidoc/generatecommand.xsl | 4 +- tools/apidoc/generatecommands.xsl | 4 +- tools/apidoc/generatetoc_footer.xsl | 2 +- tools/apidoc/generatetoc_header.xsl | 2 +- tools/apidoc/includes/main.css | 6 +- tools/build/build_asf.sh | 4 +- tools/build/setnextversion.sh | 4 +- tools/devcloud4/advanced/README.md | 2 +- tools/devcloud4/basic/README.md | 2 +- tools/docker/README.md | 2 +- tools/docker/systemtpl.sh | 12 +- tools/marvin/marvin/cloudstackTestClient.py | 6 +- tools/marvin/marvin/codes.py | 2 +- tools/marvin/marvin/configGenerator.py | 2 +- tools/ngui/static/js/lib/angular.js | 18 +- ui/Dockerfile | 2 +- ui/public/locales/ar.json | 4 +- ui/public/locales/ca.json | 4 +- ui/public/locales/de_DE.json | 6 +- ui/public/locales/en.json | 4 +- ui/public/locales/es.json | 4 +- ui/public/locales/hu.json | 4 +- ui/public/locales/it_IT.json | 4 +- ui/public/locales/ko_KR.json | 4 +- ui/public/locales/nl_NL.json | 4 +- ui/public/locales/pl.json | 4 +- ui/public/locales/ru_RU.json | 4 +- ui/src/permission.js | 4 +- ui/src/store/modules/app.js | 2 +- .../views/compute/CreateKubernetesCluster.vue | 12 +- ui/vue.config.js | 2 +- 161 files changed, 1221 insertions(+), 1188 deletions(-) rename plugins/hypervisors/ovm3/src/test/resources/scripts/{clean_master.sh => clean_primary.sh} (100%) rename plugins/hypervisors/ovm3/src/test/resources/scripts/{clean_slave.sh => clean_secondary.sh} (100%) rename plugins/integrations/kubernetes-service/src/main/resources/conf/{k8s-master-add.yml => k8s-control-node-add.yml} (100%) rename plugins/integrations/kubernetes-service/src/main/resources/conf/{k8s-master.yml => k8s-control-node.yml} (100%) rename server/src/main/java/com/cloud/server/{LockMasterListener.java => LockControllerListener.java} (83%) rename systemvm/debian/opt/cloud/bin/{master.py => configure_router.py} (87%) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 2f9bc4b4af1..13e63c8f565 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -14,14 +14,14 @@ Bug fixes It's very important that we can easily track bug fix commits, so their hashes should remain the same in all branches. Therefore, a pull request (PR) that fixes a bug, should be sent against a release branch. This can be either the "current release" or the "previous release", depending on which ones are maintained. -Since the goal is a stable master, bug fixes should be "merged forward" to the next branch in order: "previous release" -> "current release" -> master (in other words: old to new) +Since the goal is a stable main, bug fixes should be "merged forward" to the next branch in order: "previous release" -> "current release" -> main (in other words: old to new) Developing new features ----------------------- -Development should be done in a feature branch, branched off of master. -Send a PR(steps below) to get it into master (2x LGTM applies). -PR will only be merged when master is open, will be held otherwise until master is open again. +Development should be done in a feature branch, branched off of main. +Send a PR(steps below) to get it into main (2x LGTM applies). +PR will only be merged when main is open, will be held otherwise until main is open again. No back porting / cherry-picking features to existing branches! PendingReleaseNotes file @@ -46,9 +46,9 @@ On your computer, follow these steps to setup a local repository for working on $ git clone https://github.com/YOUR_ACCOUNT/cloudstack.git $ cd cloudstack $ git remote add upstream https://github.com/apache/cloudstack.git -$ git checkout master +$ git checkout main $ git fetch upstream -$ git rebase upstream/master +$ git rebase upstream/main ``` @@ -56,7 +56,7 @@ Making changes -------------- -It is important that you create a new branch to make changes on and that you do not change the `master` branch (other than to rebase in changes from `upstream/master`). In this example I will assume you will be making your changes to a branch called `feature_x`. This `feature_x` branch will be created on your local repository and will be pushed to your forked repository on GitHub. Once this branch is on your fork you will create a Pull Request for the changes to be added to the ACS project. +It is important that you create a new branch to make changes on and that you do not change the `main` branch (other than to rebase in changes from `upstream/main`). In this example I will assume you will be making your changes to a branch called `feature_x`. This `feature_x` branch will be created on your local repository and will be pushed to your forked repository on GitHub. Once this branch is on your fork you will create a Pull Request for the changes to be added to the ACS project. It is best practice to create a new branch each time you want to contribute to the project and only track the changes for that pull request in this branch. @@ -71,26 +71,26 @@ $ git commit -a -m "descriptive commit message for your changes" > The `-b` specifies that you want to create a new branch called `feature_x`. You only specify `-b` the first time you checkout because you are creating a new branch. Once the `feature_x` branch exists, you can later switch to it with only `git checkout feature_x`. -Rebase `feature_x` to include updates from `upstream/master` +Rebase `feature_x` to include updates from `upstream/main` ------------------------------------------------------------ -It is important that you maintain an up-to-date `master` branch in your local repository. This is done by rebasing in the code changes from `upstream/master` (the official ACS project repository) into your local repository. You will want to do this before you start working on a feature as well as right before you submit your changes as a pull request. I recommend you do this process periodically while you work to make sure you are working off the most recent project code. +It is important that you maintain an up-to-date `main` branch in your local repository. This is done by rebasing in the code changes from `upstream/main` (the official ACS project repository) into your local repository. You will want to do this before you start working on a feature as well as right before you submit your changes as a pull request. I recommend you do this process periodically while you work to make sure you are working off the most recent project code. This process will do the following: -1. Checkout your local `master` branch -2. Synchronize your local `master` branch with the `upstream/master` so you have all the latest changes from the project +1. Checkout your local `main` branch +2. Synchronize your local `main` branch with the `upstream/main` so you have all the latest changes from the project 3. Rebase the latest project code into your `feature_x` branch so it is up-to-date with the upstream code ``` bash -$ git checkout master +$ git checkout main $ git fetch upstream -$ git rebase upstream/master +$ git rebase upstream/main $ git checkout feature_x -$ git rebase master +$ git rebase main ``` -> Now your `feature_x` branch is up-to-date with all the code in `upstream/master`. +> Now your `feature_x` branch is up-to-date with all the code in `upstream/main`. Make a GitHub Pull Request to contribute your changes @@ -100,10 +100,10 @@ When you are happy with your changes and you are ready to contribute them, you w Please include JIRA id, detailed information about the bug/feature, what all tests are executed, how the reviewer can test this feature etc. Incase of UI PRs, a screenshot is preferred. -> **IMPORTANT:** Make sure you have rebased your `feature_x` branch to include the latest code from `upstream/master` _before_ you do this. +> **IMPORTANT:** Make sure you have rebased your `feature_x` branch to include the latest code from `upstream/main` _before_ you do this. ``` bash -$ git push origin master +$ git push origin main $ git push origin feature_x ``` @@ -113,7 +113,7 @@ To initiate the pull request, do the following: 1. In your browser, navigate to your forked repository: [https://github.com/YOUR_ACCOUNT/cloudstack](https://github.com/YOUR_ACCOUNT/cloudstack) 2. Click the new button called '**Compare & pull request**' that showed up just above the main area in your forked repository -3. Validate the pull request will be into the upstream `master` and will be from your `feature_x` branch +3. Validate the pull request will be into the upstream `main` and will be from your `feature_x` branch 4. Enter a detailed description of the work you have done and then click '**Send pull request**' If you are requested to make modifications to your proposed changes, make the changes locally on your `feature_x` branch, re-push the `feature_x` branch to your fork. The existing pull request should automatically pick up the change and update accordingly. @@ -122,14 +122,14 @@ If you are requested to make modifications to your proposed changes, make the ch Cleaning up after a successful pull request ------------------------------------------- -Once the `feature_x` branch has been committed into the `upstream/master` branch, your local `feature_x` branch and the `origin/feature_x` branch are no longer needed. If you want to make additional changes, restart the process with a new branch. +Once the `feature_x` branch has been committed into the `upstream/main` branch, your local `feature_x` branch and the `origin/feature_x` branch are no longer needed. If you want to make additional changes, restart the process with a new branch. -> **IMPORTANT:** Make sure that your changes are in `upstream/master` before you delete your `feature_x` and `origin/feature_x` branches! +> **IMPORTANT:** Make sure that your changes are in `upstream/main` before you delete your `feature_x` and `origin/feature_x` branches! You can delete these deprecated branches with the following: ``` bash -$ git checkout master +$ git checkout main $ git branch -D feature_x $ git push origin :feature_x ``` diff --git a/ISSUE_TEMPLATE.md b/ISSUE_TEMPLATE.md index 21627321142..85c1ac9ec6a 100644 --- a/ISSUE_TEMPLATE.md +++ b/ISSUE_TEMPLATE.md @@ -1,6 +1,6 @@ @@ -23,7 +23,7 @@ Categorize the issue, e.g. API, VR, VPN, UI, etc. ##### CLOUDSTACK VERSION ~~~ diff --git a/PULL_REQUEST_TEMPLATE.md b/PULL_REQUEST_TEMPLATE.md index 858f100d01b..16345c1d723 100644 --- a/PULL_REQUEST_TEMPLATE.md +++ b/PULL_REQUEST_TEMPLATE.md @@ -48,4 +48,4 @@ This PR... - + diff --git a/README.md b/README.md index e2bb618f091..3d9bc3d4fe3 100644 --- a/README.md +++ b/README.md @@ -1,4 +1,4 @@ -# Apache CloudStack [![Build Status](https://travis-ci.org/apache/cloudstack.svg?branch=master)](https://travis-ci.org/apache/cloudstack) [![Quality Gate Status](https://sonarcloud.io/api/project_badges/measure?project=apachecloudstack&metric=alert_status)](https://sonarcloud.io/dashboard?id=apachecloudstack) [![Lines of Code](https://sonarcloud.io/api/project_badges/measure?project=apachecloudstack&metric=ncloc)](https://sonarcloud.io/dashboard?id=apachecloudstack) ![GitHub language count](https://img.shields.io/github/languages/count/apache/cloudstack.svg) ![GitHub top language](https://img.shields.io/github/languages/top/apache/cloudstack.svg) +# Apache CloudStack [![Build Status](https://travis-ci.org/apache/cloudstack.svg?branch=main)](https://travis-ci.org/apache/cloudstack) [![Quality Gate Status](https://sonarcloud.io/api/project_badges/measure?project=apachecloudstack&metric=alert_status)](https://sonarcloud.io/dashboard?id=apachecloudstack) [![Lines of Code](https://sonarcloud.io/api/project_badges/measure?project=apachecloudstack&metric=ncloc)](https://sonarcloud.io/dashboard?id=apachecloudstack) ![GitHub language count](https://img.shields.io/github/languages/count/apache/cloudstack.svg) ![GitHub top language](https://img.shields.io/github/languages/top/apache/cloudstack.svg) ![Apache CloudStack](tools/logo/apache_cloudstack.png) diff --git a/api/src/main/java/com/cloud/network/router/VirtualRouter.java b/api/src/main/java/com/cloud/network/router/VirtualRouter.java index 84c85ce6675..8bec5199047 100644 --- a/api/src/main/java/com/cloud/network/router/VirtualRouter.java +++ b/api/src/main/java/com/cloud/network/router/VirtualRouter.java @@ -35,7 +35,7 @@ public interface VirtualRouter extends VirtualMachine { boolean getIsRedundantRouter(); public enum RedundantState { - UNKNOWN, MASTER, BACKUP, FAULT + UNKNOWN, PRIMARY, BACKUP, FAULT } RedundantState getRedundantState(); diff --git a/api/src/main/java/org/apache/cloudstack/api/ApiConstants.java b/api/src/main/java/org/apache/cloudstack/api/ApiConstants.java index 605a6c006eb..70095841ac6 100644 --- a/api/src/main/java/org/apache/cloudstack/api/ApiConstants.java +++ b/api/src/main/java/org/apache/cloudstack/api/ApiConstants.java @@ -824,6 +824,7 @@ public class ApiConstants { public static final String KUBERNETES_VERSION_ID = "kubernetesversionid"; public static final String KUBERNETES_VERSION_NAME = "kubernetesversionname"; public static final String MASTER_NODES = "masternodes"; + public static final String CONTROL_NODES = "controlnodes"; public static final String MIN_SEMANTIC_VERSION = "minimumsemanticversion"; public static final String MIN_KUBERNETES_VERSION_ID = "minimumkubernetesversionid"; public static final String NODE_ROOT_DISK_SIZE = "noderootdisksize"; diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/resource/ListResourceLimitsCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/resource/ListResourceLimitsCmd.java index 68c09c27fb0..a3d84f837fb 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/resource/ListResourceLimitsCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/resource/ListResourceLimitsCmd.java @@ -54,8 +54,8 @@ public class ListResourceLimitsCmd extends BaseListProjectAndAccountResourcesCmd + "5 - Project. Number of projects an account can own. " + "6 - Network. Number of networks an account can own. " + "7 - VPC. Number of VPC an account can own. " - + "8 - CPU. Number of CPU an account can allocate for his resources. " - + "9 - Memory. Amount of RAM an account can allocate for his resources. " + + "8 - CPU. Number of CPU an account can allocate for their resources. " + + "9 - Memory. Amount of RAM an account can allocate for their resources. " + "10 - PrimaryStorage. Total primary storage space (in GiB) a user can use. " + "11 - SecondaryStorage. Total secondary storage space (in GiB) a user can use. ") private Integer resourceType; @@ -69,8 +69,8 @@ public class ListResourceLimitsCmd extends BaseListProjectAndAccountResourcesCmd + "project - Project. Number of projects an account can own. " + "network - Network. Number of networks an account can own. " + "vpc - VPC. Number of VPC an account can own. " - + "cpu - CPU. Number of CPU an account can allocate for his resources. " - + "memory - Memory. Amount of RAM an account can allocate for his resources. " + + "cpu - CPU. Number of CPU an account can allocate for their resources. " + + "memory - Memory. Amount of RAM an account can allocate for their resources. " + "primary_storage - PrimaryStorage. Total primary storage space (in GiB) a user can use. " + "secondary_storage - SecondaryStorage. Total secondary storage space (in GiB) a user can use. ") private String resourceTypeName; diff --git a/api/src/main/java/org/apache/cloudstack/query/QueryService.java b/api/src/main/java/org/apache/cloudstack/query/QueryService.java index 57ac963bb8b..3484de84ef4 100644 --- a/api/src/main/java/org/apache/cloudstack/query/QueryService.java +++ b/api/src/main/java/org/apache/cloudstack/query/QueryService.java @@ -92,8 +92,8 @@ public interface QueryService { ConfigKey AllowUserViewDestroyedVM = new ConfigKey<>("Advanced", Boolean.class, "allow.user.view.destroyed.vm", "false", "Determines whether users can view their destroyed or expunging vm ", true, ConfigKey.Scope.Account); - static final ConfigKey UserVMBlacklistedDetails = new ConfigKey("Advanced", String.class, - "user.vm.blacklisted.details", "rootdisksize, cpuOvercommitRatio, memoryOvercommitRatio, Message.ReservedCapacityFreed.Flag", + static final ConfigKey UserVMDeniedDetails = new ConfigKey("Advanced", String.class, + "user.vm.denied.details", "rootdisksize, cpuOvercommitRatio, memoryOvercommitRatio, Message.ReservedCapacityFreed.Flag", "Determines whether users can view certain VM settings. When set to empty, default value used is: rootdisksize, cpuOvercommitRatio, memoryOvercommitRatio, Message.ReservedCapacityFreed.Flag.", true); static final ConfigKey UserVMReadOnlyDetails = new ConfigKey("Advanced", String.class, diff --git a/client/conf/db.properties.in b/client/conf/db.properties.in index f94631c356a..5ea63e43de2 100644 --- a/client/conf/db.properties.in +++ b/client/conf/db.properties.in @@ -83,21 +83,21 @@ db.simulator.autoReconnect=true db.ha.enabled=false db.ha.loadBalanceStrategy=com.cloud.utils.db.StaticStrategy # cloud stack Database -db.cloud.slaves=localhost,localhost +db.cloud.replicas=localhost,localhost db.cloud.autoReconnect=true db.cloud.failOverReadOnly=false db.cloud.reconnectAtTxEnd=true db.cloud.autoReconnectForPools=true -db.cloud.secondsBeforeRetryMaster=3600 -db.cloud.queriesBeforeRetryMaster=5000 +db.cloud.secondsBeforeRetrySource=3600 +db.cloud.queriesBeforeRetrySource=5000 db.cloud.initialTimeout=3600 #usage Database -db.usage.slaves=localhost,localhost +db.usage.replicas=localhost,localhost db.usage.autoReconnect=true db.usage.failOverReadOnly=false db.usage.reconnectAtTxEnd=true db.usage.autoReconnectForPools=true -db.usage.secondsBeforeRetryMaster=3600 -db.usage.queriesBeforeRetryMaster=5000 +db.usage.secondsBeforeRetrySource=3600 +db.usage.queriesBeforeRetrySource=5000 db.usage.initialTimeout=3600 diff --git a/core/src/main/java/com/cloud/agent/api/CheckRouterAnswer.java b/core/src/main/java/com/cloud/agent/api/CheckRouterAnswer.java index 6a95ab11bc8..7f8626bf023 100644 --- a/core/src/main/java/com/cloud/agent/api/CheckRouterAnswer.java +++ b/core/src/main/java/com/cloud/agent/api/CheckRouterAnswer.java @@ -48,8 +48,8 @@ public class CheckRouterAnswer extends Answer { state = RedundantState.UNKNOWN; return false; } - if (details.startsWith("Status: MASTER")) { - state = RedundantState.MASTER; + if (details.startsWith("Status: PRIMARY")) { + state = RedundantState.PRIMARY; } else if (details.startsWith("Status: BACKUP")) { state = RedundantState.BACKUP; } else if (details.startsWith("Status: FAULT")) { diff --git a/engine/schema/src/main/resources/META-INF/db/schema-41510to41600.sql b/engine/schema/src/main/resources/META-INF/db/schema-41510to41600.sql index eec9bcd671b..d5058c1b358 100644 --- a/engine/schema/src/main/resources/META-INF/db/schema-41510to41600.sql +++ b/engine/schema/src/main/resources/META-INF/db/schema-41510to41600.sql @@ -303,3 +303,14 @@ from -- Update name for global configuration user.vm.readonly.ui.details Update configuration set name='user.vm.readonly.details' where name='user.vm.readonly.ui.details'; + +-- Update name for global configuration 'user.vm.readonly.ui.details' to 'user.vm.denied.details' +UPDATE `cloud`.`configuration` SET name='user.vm.denied.details' WHERE name='user.vm.blacklisted.details'; + +-- Update name for global configuration 'blacklisted.routes' to 'denied.routes' +UPDATE `cloud`.`configuration` SET name='denied.routes', description='Routes that are denied, can not be used for Static Routes creation for the VPC Private Gateway' WHERE name='blacklisted.routes'; + +-- Rename 'master_node_count' to 'control_node_count' in kubernetes_cluster table +ALTER TABLE `cloud`.`kubernetes_cluster` CHANGE master_node_count control_node_count bigint NOT NULL default '0' COMMENT 'the number of the control nodes deployed for this Kubernetes cluster'; + +UPDATE `cloud`.`domain_router` SET redundant_state = 'PRIMARY' WHERE redundant_state = 'MASTER'; \ No newline at end of file diff --git a/engine/storage/integration-test/src/test/java/org/apache/cloudstack/storage/test/EndpointSelectorTest.java b/engine/storage/integration-test/src/test/java/org/apache/cloudstack/storage/test/EndpointSelectorTest.java index 29acf94b545..6256452e72a 100644 --- a/engine/storage/integration-test/src/test/java/org/apache/cloudstack/storage/test/EndpointSelectorTest.java +++ b/engine/storage/integration-test/src/test/java/org/apache/cloudstack/storage/test/EndpointSelectorTest.java @@ -72,7 +72,7 @@ import com.cloud.hypervisor.Hypervisor; import com.cloud.org.Cluster; import com.cloud.org.Managed; import com.cloud.resource.ResourceState; -import com.cloud.server.LockMasterListener; +import com.cloud.server.LockControllerListener; import com.cloud.storage.DataStoreRole; import com.cloud.storage.ScopeType; import com.cloud.storage.Storage; @@ -120,12 +120,12 @@ public class EndpointSelectorTest { @Inject AccountManager accountManager; - LockMasterListener lockMasterListener; + LockControllerListener lockControllerListener; VolumeInfo vol = null; FakePrimaryDataStoreDriver driver = new FakePrimaryDataStoreDriver(); @Inject MockStorageMotionStrategy mockStorageMotionStrategy; - Merovingian2 _lockMaster; + Merovingian2 _lockController; @Inject DataStoreManager dataStoreManager; @Inject @@ -187,12 +187,12 @@ public class EndpointSelectorTest { when(accountManager.getSystemAccount()).thenReturn(account); when(accountManager.getSystemUser()).thenReturn(user); - if (Merovingian2.getLockMaster() == null) { - _lockMaster = Merovingian2.createLockMaster(1234); + if (Merovingian2.getLockController() == null) { + _lockController = Merovingian2.createLockController(1234); } else { - _lockMaster = Merovingian2.getLockMaster(); + _lockController = Merovingian2.getLockController(); } - _lockMaster.cleanupThisServer(); + _lockController.cleanupThisServer(); ComponentContext.initComponentsLifeCycle(); } diff --git a/engine/storage/integration-test/src/test/java/org/apache/cloudstack/storage/test/SnapshotTestWithFakeData.java b/engine/storage/integration-test/src/test/java/org/apache/cloudstack/storage/test/SnapshotTestWithFakeData.java index a3961ace64b..152c279547c 100644 --- a/engine/storage/integration-test/src/test/java/org/apache/cloudstack/storage/test/SnapshotTestWithFakeData.java +++ b/engine/storage/integration-test/src/test/java/org/apache/cloudstack/storage/test/SnapshotTestWithFakeData.java @@ -73,7 +73,7 @@ import com.cloud.dc.dao.HostPodDao; import com.cloud.hypervisor.Hypervisor; import com.cloud.org.Cluster; import com.cloud.org.Managed; -import com.cloud.server.LockMasterListener; +import com.cloud.server.LockControllerListener; import com.cloud.storage.CreateSnapshotPayload; import com.cloud.storage.DataStoreRole; import com.cloud.storage.ScopeType; @@ -134,12 +134,12 @@ public class SnapshotTestWithFakeData { ImageStoreVO imageStore; @Inject AccountManager accountManager; - LockMasterListener lockMasterListener; + LockControllerListener lockControllerListener; VolumeInfo vol = null; FakePrimaryDataStoreDriver driver = new FakePrimaryDataStoreDriver(); @Inject MockStorageMotionStrategy mockStorageMotionStrategy; - Merovingian2 _lockMaster; + Merovingian2 _lockController; @Inject SnapshotPolicyDao snapshotPolicyDao; @@ -189,18 +189,18 @@ public class SnapshotTestWithFakeData { when(accountManager.getSystemAccount()).thenReturn(account); when(accountManager.getSystemUser()).thenReturn(user); - if (Merovingian2.getLockMaster() == null) { - _lockMaster = Merovingian2.createLockMaster(1234); + if (Merovingian2.getLockController() == null) { + _lockController = Merovingian2.createLockController(1234); } else { - _lockMaster = Merovingian2.getLockMaster(); + _lockController = Merovingian2.getLockController(); } - _lockMaster.cleanupThisServer(); + _lockController.cleanupThisServer(); ComponentContext.initComponentsLifeCycle(); } @After public void tearDown() throws Exception { - _lockMaster.cleanupThisServer(); + _lockController.cleanupThisServer(); } private SnapshotVO createSnapshotInDb() { diff --git a/framework/db/src/main/java/com/cloud/utils/db/Merovingian2.java b/framework/db/src/main/java/com/cloud/utils/db/Merovingian2.java index d2537e369bd..956430f5c8b 100644 --- a/framework/db/src/main/java/com/cloud/utils/db/Merovingian2.java +++ b/framework/db/src/main/java/com/cloud/utils/db/Merovingian2.java @@ -68,7 +68,7 @@ public class Merovingian2 extends StandardMBean implements MerovingianMBean { conn = TransactionLegacy.getStandaloneConnectionWithException(); conn.setTransactionIsolation(Connection.TRANSACTION_READ_COMMITTED); conn.setAutoCommit(true); - _concierge = new ConnectionConcierge("LockMaster", conn, true); + _concierge = new ConnectionConcierge("LockController", conn, true); } catch (SQLException e) { s_logger.error("Unable to get a new db connection", e); throw new CloudRuntimeException("Unable to initialize a connection to the database for locking purposes", e); @@ -83,8 +83,8 @@ public class Merovingian2 extends StandardMBean implements MerovingianMBean { } } - public static synchronized Merovingian2 createLockMaster(long msId) { - assert s_instance == null : "No lock can serve two masters. Either he will hate the one and love the other, or he will be devoted to the one and despise the other."; + public static synchronized Merovingian2 createLockController(long msId) { + assert s_instance == null : "No lock can serve two controllers. Either we will hate the one and love the other, or we will be devoted to the one and despise the other."; s_instance = new Merovingian2(msId); s_instance.cleanupThisServer(); try { @@ -95,7 +95,7 @@ public class Merovingian2 extends StandardMBean implements MerovingianMBean { return s_instance; } - public static Merovingian2 getLockMaster() { + public static Merovingian2 getLockController() { return s_instance; } diff --git a/framework/db/src/main/java/com/cloud/utils/db/TransactionLegacy.java b/framework/db/src/main/java/com/cloud/utils/db/TransactionLegacy.java index 2dde30275a5..eb6b09c31f3 100644 --- a/framework/db/src/main/java/com/cloud/utils/db/TransactionLegacy.java +++ b/framework/db/src/main/java/com/cloud/utils/db/TransactionLegacy.java @@ -377,19 +377,19 @@ public class TransactionLegacy implements Closeable { } public boolean lock(final String name, final int timeoutSeconds) { - Merovingian2 lockMaster = Merovingian2.getLockMaster(); - if (lockMaster == null) { + Merovingian2 lockController = Merovingian2.getLockController(); + if (lockController == null) { throw new CloudRuntimeException("There's no support for locking yet"); } - return lockMaster.acquire(name, timeoutSeconds); + return lockController.acquire(name, timeoutSeconds); } public boolean release(final String name) { - Merovingian2 lockMaster = Merovingian2.getLockMaster(); - if (lockMaster == null) { + Merovingian2 lockController = Merovingian2.getLockController(); + if (lockController == null) { throw new CloudRuntimeException("There's no support for locking yet"); } - return lockMaster.release(name); + return lockController.release(name); } /** @@ -644,9 +644,9 @@ public class TransactionLegacy implements Closeable { closeConnection(); _stack.clear(); - Merovingian2 lockMaster = Merovingian2.getLockMaster(); - if (lockMaster != null) { - lockMaster.cleanupThread(); + Merovingian2 lockController = Merovingian2.getLockController(); + if (lockController != null) { + lockController.cleanupThread(); } } @@ -1063,11 +1063,11 @@ public class TransactionLegacy implements Closeable { final String url = dbProps.getProperty("db.cloud.url.params"); String cloudDbHAParams = null; - String cloudSlaves = null; + String cloudReplicas = null; if (s_dbHAEnabled) { cloudDbHAParams = getDBHAParams("cloud", dbProps); - cloudSlaves = dbProps.getProperty("db.cloud.slaves"); - s_logger.info("The slaves configured for Cloud Data base is/are : " + cloudSlaves); + cloudReplicas = dbProps.getProperty("db.cloud.replicas"); + s_logger.info("The replicas configured for Cloud Data base is/are : " + cloudReplicas); } final boolean useSSL = Boolean.parseBoolean(dbProps.getProperty("db.cloud.useSSL")); @@ -1078,7 +1078,7 @@ public class TransactionLegacy implements Closeable { System.setProperty("javax.net.ssl.trustStorePassword", dbProps.getProperty("db.cloud.trustStorePassword")); } - final String cloudConnectionUri = cloudDriver + "://" + cloudHost + (s_dbHAEnabled ? "," + cloudSlaves : "") + ":" + cloudPort + "/" + cloudDbName + + final String cloudConnectionUri = cloudDriver + "://" + cloudHost + (s_dbHAEnabled ? "," + cloudReplicas : "") + ":" + cloudPort + "/" + cloudDbName + "?autoReconnect=" + cloudAutoReconnect + (url != null ? "&" + url : "") + (useSSL ? "&useSSL=true" : "") + (s_dbHAEnabled ? "&" + cloudDbHAParams : "") + (s_dbHAEnabled ? "&loadBalanceStrategy=" + loadBalanceStrategy : ""); DriverLoader.loadDriver(cloudDriver); @@ -1101,7 +1101,7 @@ public class TransactionLegacy implements Closeable { final boolean usageAutoReconnect = Boolean.parseBoolean(dbProps.getProperty("db.usage.autoReconnect")); final String usageUrl = dbProps.getProperty("db.usage.url.params"); - final String usageConnectionUri = usageDriver + "://" + usageHost + (s_dbHAEnabled ? "," + dbProps.getProperty("db.cloud.slaves") : "") + ":" + usagePort + + final String usageConnectionUri = usageDriver + "://" + usageHost + (s_dbHAEnabled ? "," + dbProps.getProperty("db.cloud.replicas") : "") + ":" + usagePort + "/" + usageDbName + "?autoReconnect=" + usageAutoReconnect + (usageUrl != null ? "&" + usageUrl : "") + (s_dbHAEnabled ? "&" + getDBHAParams("usage", dbProps) : "") + (s_dbHAEnabled ? "&loadBalanceStrategy=" + loadBalanceStrategy : ""); DriverLoader.loadDriver(usageDriver); @@ -1196,8 +1196,8 @@ public class TransactionLegacy implements Closeable { sb.append("failOverReadOnly=" + dbProps.getProperty("db." + dbName + ".failOverReadOnly")); sb.append("&").append("reconnectAtTxEnd=" + dbProps.getProperty("db." + dbName + ".reconnectAtTxEnd")); sb.append("&").append("autoReconnectForPools=" + dbProps.getProperty("db." + dbName + ".autoReconnectForPools")); - sb.append("&").append("secondsBeforeRetryMaster=" + dbProps.getProperty("db." + dbName + ".secondsBeforeRetryMaster")); - sb.append("&").append("queriesBeforeRetryMaster=" + dbProps.getProperty("db." + dbName + ".queriesBeforeRetryMaster")); + sb.append("&").append("secondsBeforeRetrySource=" + dbProps.getProperty("db." + dbName + ".secondsBeforeRetrySource")); + sb.append("&").append("queriesBeforeRetrySource=" + dbProps.getProperty("db." + dbName + ".queriesBeforeRetrySource")); sb.append("&").append("initialTimeout=" + dbProps.getProperty("db." + dbName + ".initialTimeout")); return sb.toString(); } diff --git a/framework/db/src/test/java/com/cloud/utils/db/Merovingian2Test.java b/framework/db/src/test/java/com/cloud/utils/db/Merovingian2Test.java index 9d5cf1b84c8..eb8b96dc747 100644 --- a/framework/db/src/test/java/com/cloud/utils/db/Merovingian2Test.java +++ b/framework/db/src/test/java/com/cloud/utils/db/Merovingian2Test.java @@ -26,53 +26,53 @@ import org.junit.Test; public class Merovingian2Test extends TestCase { static final Logger s_logger = Logger.getLogger(Merovingian2Test.class); - Merovingian2 _lockMaster = Merovingian2.createLockMaster(1234); + Merovingian2 _lockController = Merovingian2.createLockController(1234); @Override @Before protected void setUp() throws Exception { - _lockMaster.cleanupThisServer(); + _lockController.cleanupThisServer(); } @Override @After protected void tearDown() throws Exception { - _lockMaster.cleanupThisServer(); + _lockController.cleanupThisServer(); } @Test public void testLockAndRelease() { s_logger.info("Testing first acquire"); - boolean result = _lockMaster.acquire("first" + 1234, 5); + boolean result = _lockController.acquire("first" + 1234, 5); Assert.assertTrue(result); s_logger.info("Testing acquire of different lock"); - result = _lockMaster.acquire("second" + 1234, 5); + result = _lockController.acquire("second" + 1234, 5); Assert.assertTrue(result); s_logger.info("Testing reacquire of the same lock"); - result = _lockMaster.acquire("first" + 1234, 5); + result = _lockController.acquire("first" + 1234, 5); Assert.assertTrue(result); - int count = _lockMaster.owns("first" + 1234); + int count = _lockController.owns("first" + 1234); Assert.assertEquals(count, 2); - count = _lockMaster.owns("second" + 1234); + count = _lockController.owns("second" + 1234); Assert.assertEquals(count, 1); s_logger.info("Testing release of the first lock"); - result = _lockMaster.release("first" + 1234); + result = _lockController.release("first" + 1234); Assert.assertTrue(result); - count = _lockMaster.owns("first" + 1234); + count = _lockController.owns("first" + 1234); Assert.assertEquals(count, 1); s_logger.info("Testing release of the second lock"); - result = _lockMaster.release("second" + 1234); + result = _lockController.release("second" + 1234); Assert.assertTrue(result); - result = _lockMaster.release("first" + 1234); + result = _lockController.release("first" + 1234); Assert.assertTrue(result); } diff --git a/plugins/acl/dynamic-role-based/src/main/java/org/apache/cloudstack/acl/DynamicRoleBasedAPIAccessChecker.java b/plugins/acl/dynamic-role-based/src/main/java/org/apache/cloudstack/acl/DynamicRoleBasedAPIAccessChecker.java index 02cdf2a9df4..f693bae8c33 100644 --- a/plugins/acl/dynamic-role-based/src/main/java/org/apache/cloudstack/acl/DynamicRoleBasedAPIAccessChecker.java +++ b/plugins/acl/dynamic-role-based/src/main/java/org/apache/cloudstack/acl/DynamicRoleBasedAPIAccessChecker.java @@ -58,7 +58,7 @@ public class DynamicRoleBasedAPIAccessChecker extends AdapterBase implements API } private void denyApiAccess(final String commandName) throws PermissionDeniedException { - throw new PermissionDeniedException("The API " + commandName + " is blacklisted for the account's role."); + throw new PermissionDeniedException("The API " + commandName + " is denied for the account's role."); } public boolean isDisabled() { diff --git a/plugins/acl/project-role-based/src/main/java/org/apache/cloudstack/acl/ProjectRoleBasedApiAccessChecker.java b/plugins/acl/project-role-based/src/main/java/org/apache/cloudstack/acl/ProjectRoleBasedApiAccessChecker.java index 5a17bb993eb..5648a96ea66 100644 --- a/plugins/acl/project-role-based/src/main/java/org/apache/cloudstack/acl/ProjectRoleBasedApiAccessChecker.java +++ b/plugins/acl/project-role-based/src/main/java/org/apache/cloudstack/acl/ProjectRoleBasedApiAccessChecker.java @@ -55,7 +55,7 @@ public class ProjectRoleBasedApiAccessChecker extends AdapterBase implements AP } private void denyApiAccess(final String commandName) throws PermissionDeniedException { - throw new PermissionDeniedException("The API " + commandName + " is blacklisted for the user's/account's project role."); + throw new PermissionDeniedException("The API " + commandName + " is denied for the user's/account's project role."); } diff --git a/plugins/acl/static-role-based/src/main/java/org/apache/cloudstack/acl/StaticRoleBasedAPIAccessChecker.java b/plugins/acl/static-role-based/src/main/java/org/apache/cloudstack/acl/StaticRoleBasedAPIAccessChecker.java index 6b40ab4ddff..7550483b230 100644 --- a/plugins/acl/static-role-based/src/main/java/org/apache/cloudstack/acl/StaticRoleBasedAPIAccessChecker.java +++ b/plugins/acl/static-role-based/src/main/java/org/apache/cloudstack/acl/StaticRoleBasedAPIAccessChecker.java @@ -90,7 +90,7 @@ public class StaticRoleBasedAPIAccessChecker extends AdapterBase implements APIA } if (commandNames.contains(commandName)) { - throw new PermissionDeniedException("The API is blacklisted. Role type=" + roleType.toString() + " is not allowed to request the api: " + commandName); + throw new PermissionDeniedException("The API is denied. Role type=" + roleType.toString() + " is not allowed to request the api: " + commandName); } else { throw new UnavailableCommandException("The API " + commandName + " does not exist or is not available for this account."); } diff --git a/plugins/database/mysql-ha/src/main/java/com/cloud/utils/db/StaticStrategy.java b/plugins/database/mysql-ha/src/main/java/com/cloud/utils/db/StaticStrategy.java index b3536523a06..90a6dad1674 100644 --- a/plugins/database/mysql-ha/src/main/java/com/cloud/utils/db/StaticStrategy.java +++ b/plugins/database/mysql-ha/src/main/java/com/cloud/utils/db/StaticStrategy.java @@ -44,21 +44,21 @@ public class StaticStrategy implements BalanceStrategy { SQLException ex = null; - List whiteList = new ArrayList(numHosts); - whiteList.addAll(configuredHosts); + List allowList = new ArrayList(numHosts); + allowList.addAll(configuredHosts); - Map blackList = ((LoadBalancedConnectionProxy) proxy).getGlobalBlacklist(); + Map denylist = ((LoadBalancedConnectionProxy) proxy).getGlobalBlacklist(); - whiteList.removeAll(blackList.keySet()); + allowList.removeAll(denylist.keySet()); - Map whiteListMap = this.getArrayIndexMap(whiteList); + Map allowListMap = this.getArrayIndexMap(allowList); for (int attempts = 0; attempts < numRetries;) { - if (whiteList.size() == 0) { + if (allowList.size() == 0) { throw SQLError.createSQLException("No hosts configured", null); } - String hostPortSpec = whiteList.get(0); //Always take the first host + String hostPortSpec = allowList.get(0); //Always take the first host ConnectionImpl conn = (ConnectionImpl) liveConnections.get(hostPortSpec); @@ -70,16 +70,16 @@ public class StaticStrategy implements BalanceStrategy { if (((LoadBalancedConnectionProxy) proxy).shouldExceptionTriggerFailover(sqlEx)) { - Integer whiteListIndex = whiteListMap.get(hostPortSpec); + Integer allowListIndex = allowListMap.get(hostPortSpec); // exclude this host from being picked again - if (whiteListIndex != null) { - whiteList.remove(whiteListIndex.intValue()); - whiteListMap = this.getArrayIndexMap(whiteList); + if (allowListIndex != null) { + allowList.remove(allowListIndex.intValue()); + allowListMap = this.getArrayIndexMap(allowList); } ((LoadBalancedConnectionProxy) proxy).addToGlobalBlacklist(hostPortSpec); - if (whiteList.size() == 0) { + if (allowList.size() == 0) { attempts++; try { Thread.sleep(250); @@ -88,12 +88,12 @@ public class StaticStrategy implements BalanceStrategy { } // start fresh - whiteListMap = new HashMap(numHosts); - whiteList.addAll(configuredHosts); - blackList = ((LoadBalancedConnectionProxy) proxy).getGlobalBlacklist(); + allowListMap = new HashMap(numHosts); + allowList.addAll(configuredHosts); + denylist = ((LoadBalancedConnectionProxy) proxy).getGlobalBlacklist(); - whiteList.removeAll(blackList.keySet()); - whiteListMap = this.getArrayIndexMap(whiteList); + allowList.removeAll(denylist.keySet()); + allowListMap = this.getArrayIndexMap(allowList); } continue; diff --git a/plugins/hypervisors/ovm/src/main/java/com/cloud/ovm/hypervisor/OvmResourceBase.java b/plugins/hypervisors/ovm/src/main/java/com/cloud/ovm/hypervisor/OvmResourceBase.java index 648bf7fc39e..a41555dadcc 100644 --- a/plugins/hypervisors/ovm/src/main/java/com/cloud/ovm/hypervisor/OvmResourceBase.java +++ b/plugins/hypervisors/ovm/src/main/java/com/cloud/ovm/hypervisor/OvmResourceBase.java @@ -224,7 +224,7 @@ public class OvmResourceBase implements ServerResource, HypervisorResource { _conn = new Connection(_ip, _agentUserName, _agentPassword); try { - OvmHost.registerAsMaster(_conn); + OvmHost.registerAsPrimary(_conn); OvmHost.registerAsVmServer(_conn); _bridges = OvmBridge.getAllBridges(_conn); } catch (XmlRpcException e) { @@ -398,11 +398,11 @@ public class OvmResourceBase implements ServerResource, HypervisorResource { try { OvmHost.Details d = OvmHost.getDetails(_conn); //TODO: cleanup halted vm - if (d.masterIp.equalsIgnoreCase(_ip)) { + if (d.primaryIp.equalsIgnoreCase(_ip)) { return new ReadyAnswer(cmd); } else { - s_logger.debug("Master IP changes to " + d.masterIp + ", it should be " + _ip); - return new ReadyAnswer(cmd, "I am not the master server"); + s_logger.debug("Primary IP changes to " + d.primaryIp + ", it should be " + _ip); + return new ReadyAnswer(cmd, "I am not the primary server"); } } catch (XmlRpcException e) { s_logger.debug("XML RPC Exception" + e.getMessage(), e); diff --git a/plugins/hypervisors/ovm/src/main/java/com/cloud/ovm/object/OvmHost.java b/plugins/hypervisors/ovm/src/main/java/com/cloud/ovm/object/OvmHost.java index ad1b2f6f2b7..664fea3de79 100644 --- a/plugins/hypervisors/ovm/src/main/java/com/cloud/ovm/object/OvmHost.java +++ b/plugins/hypervisors/ovm/src/main/java/com/cloud/ovm/object/OvmHost.java @@ -26,7 +26,7 @@ public class OvmHost extends OvmObject { public static final String XEN = "xen"; public static class Details { - public String masterIp; + public String primaryIp; public Integer cpuNum; public Integer cpuSpeed; public Long totalMemory; @@ -42,9 +42,9 @@ public class OvmHost extends OvmObject { } } - public static void registerAsMaster(Connection c) throws XmlRpcException { + public static void registerAsPrimary(Connection c) throws XmlRpcException { Object[] params = {c.getIp(), c.getUserName(), c.getPassword(), c.getPort(), c.getIsSsl()}; - c.call("OvmHost.registerAsMaster", params, false); + c.call("OvmHost.registerAsPrimary", params, false); } public static void registerAsVmServer(Connection c) throws XmlRpcException { diff --git a/plugins/hypervisors/ovm/src/main/java/com/cloud/ovm/object/Test.java b/plugins/hypervisors/ovm/src/main/java/com/cloud/ovm/object/Test.java index a8ab4f77eea..cd1b14eeaa1 100644 --- a/plugins/hypervisors/ovm/src/main/java/com/cloud/ovm/object/Test.java +++ b/plugins/hypervisors/ovm/src/main/java/com/cloud/ovm/object/Test.java @@ -38,8 +38,6 @@ public class Test { //pool.registerServer("192.168.105.155", Pool.ServerType.UTILITY); //pool.registerServer("192.168.105.155", Pool.ServerType.XEN); System.out.println("Is:" + pool.isServerRegistered()); - //String ip = pool.getMasterIp(); - //System.out.println("IP:" + ip); System.out.println(pool.getServerConfig()); System.out.println(pool.getServerXmInfo()); System.out.println(pool.getHostInfo()); @@ -89,8 +87,6 @@ public class Test { /* This is not being used at the moment. * Coverity issue: 1012179 */ - //final String txt = - // "{\"MasterIp\": \"192.168.189.12\", \"dom0Memory\": 790626304, \"freeMemory\": 16378757120, \"totalMemory\": 17169383424, \"cpuNum\": 4, \"agentVersion\": \"2.3-38\", \"cpuSpeed\": 2261}"; //OvmHost.Details d = new GsonBuilder().create().fromJson(txt, OvmHost.Details.class); //OvmHost.Details d = Coder.fromJson(txt, OvmHost.Details.class); diff --git a/plugins/hypervisors/ovm/src/main/scripts/vm/hypervisor/ovm/OvmFaultConstants.py b/plugins/hypervisors/ovm/src/main/scripts/vm/hypervisor/ovm/OvmFaultConstants.py index d929d814da5..25b8e5a4d28 100755 --- a/plugins/hypervisors/ovm/src/main/scripts/vm/hypervisor/ovm/OvmFaultConstants.py +++ b/plugins/hypervisors/ovm/src/main/scripts/vm/hypervisor/ovm/OvmFaultConstants.py @@ -41,7 +41,7 @@ errCode = { "OvmDispatch.InvaildFunction":OvmDispatcherStub+3, "OvmVm.reboot":OvmDispatcherStub+4, - "OvmHost.registerAsMaster":OvmHostErrCodeStub+1, + "OvmHost.registerAsPrimary":OvmHostErrCodeStub+1, "OvmHost.registerAsVmServer":OvmHostErrCodeStub+2, "OvmHost.ping":OvmHostErrCodeStub+3, "OvmHost.getDetails":OvmHostErrCodeStub+4, diff --git a/plugins/hypervisors/ovm/src/main/scripts/vm/hypervisor/ovm/OvmHostModule.py b/plugins/hypervisors/ovm/src/main/scripts/vm/hypervisor/ovm/OvmHostModule.py index 3c61500630d..de50e8b1bb6 100755 --- a/plugins/hypervisors/ovm/src/main/scripts/vm/hypervisor/ovm/OvmHostModule.py +++ b/plugins/hypervisors/ovm/src/main/scripts/vm/hypervisor/ovm/OvmHostModule.py @@ -95,9 +95,9 @@ class OvmHost(OvmObject): raise NoVmFoundException("No domain id for %s found"%vmName) @staticmethod - def registerAsMaster(hostname, username="oracle", password="password", port=8899, isSsl=False): + def registerAsPrimary(hostname, username="oracle", password="password", port=8899, isSsl=False): try: - logger.debug(OvmHost.registerAsMaster, "ip=%s, username=%s, password=%s, port=%s, isSsl=%s"%(hostname, username, password, port, isSsl)) + logger.debug(OvmHost.registerAsPrimary, "ip=%s, username=%s, password=%s, port=%s, isSsl=%s"%(hostname, username, password, port, isSsl)) exceptionIfNoSuccess(register_server(hostname, 'site', False, username, password, port, isSsl), "Register %s as site failed"%hostname) exceptionIfNoSuccess(register_server(hostname, 'utility', False, username, password, port, isSsl), @@ -106,8 +106,8 @@ class OvmHost(OvmObject): return rs except Exception, e: errmsg = fmt_err_msg(e) - logger.error(OvmHost.registerAsMaster, errmsg) - raise XmlRpcFault(toErrCode(OvmHost, OvmHost.registerAsMaster), errmsg) + logger.error(OvmHost.registerAsPrimary, errmsg) + raise XmlRpcFault(toErrCode(OvmHost, OvmHost.registerAsPrimary), errmsg) @staticmethod def registerAsVmServer(hostname, username="oracle", password="password", port=8899, isSsl=False): diff --git a/plugins/hypervisors/ovm3/src/main/java/com/cloud/hypervisor/ovm3/objects/Cluster.java b/plugins/hypervisors/ovm3/src/main/java/com/cloud/hypervisor/ovm3/objects/Cluster.java index 344f708e6ef..0d108069388 100644 --- a/plugins/hypervisors/ovm3/src/main/java/com/cloud/hypervisor/ovm3/objects/Cluster.java +++ b/plugins/hypervisors/ovm3/src/main/java/com/cloud/hypervisor/ovm3/objects/Cluster.java @@ -91,7 +91,7 @@ public class Cluster extends OvmObject { * update_clusterConfiguration, * argument: self - default: None argument: cluster_conf - default: None <( * ? cluster_conf can be a "dict" or a plain file: print - * master.update_clusterConfiguration( + * primary.update_clusterConfiguration( * "heartbeat:\n\tregion = 0004FB0000050000E70FBDDEB802208F\n\tcluster = ba9aaf00ae5e2d72\n\nnode:\n\tip_port = 7777\n\tip_address = 192.168.1.64\n\tnumber = 0\n\tname = ovm-1\n\tcluster = ba9aaf00ae5e2d72\n\nnode:\n\tip_port = 7777\n\tip_address = 192.168.1.65\n\tnumber = 1\n\tname = ovm-2\n\tcluster = ba9aaf00ae5e2d72\n\ncluster:\n\tnode_count = 2\n\theartbeat_mode = global\n\tname = ba9aaf00ae5e2d72\n" * ) */ diff --git a/plugins/hypervisors/ovm3/src/main/java/com/cloud/hypervisor/ovm3/objects/Linux.java b/plugins/hypervisors/ovm3/src/main/java/com/cloud/hypervisor/ovm3/objects/Linux.java index b037dd73a4d..c0c0f3fa682 100644 --- a/plugins/hypervisors/ovm3/src/main/java/com/cloud/hypervisor/ovm3/objects/Linux.java +++ b/plugins/hypervisors/ovm3/src/main/java/com/cloud/hypervisor/ovm3/objects/Linux.java @@ -57,7 +57,7 @@ public class Linux extends OvmObject { * {OS_Major_Version=5, Statistic=20, Membership_State=Unowned, * OVM_Version=3.2.1-517, OS_Type=Linux, Hypervisor_Name=Xen, * CPU_Type=x86_64, Manager_Core_API_Version=3.2.1.516, - * Is_Current_Master=false, OS_Name=Oracle VM Server, + * Is_Primary=false, OS_Name=Oracle VM Server, * Server_Roles=xen,utility, Pool_Unique_Id=none, * Host_Kernel_Release=2.6.39-300.22.2.el5uek, OS_Minor_Version=7, * Agent_Version=3.2.1-183, Boot_Time=1392366638, RPM_Version=3.2.1-183, @@ -154,8 +154,8 @@ public class Linux extends OvmObject { return get("Server_Roles"); } - public boolean getIsMaster() throws Ovm3ResourceException { - return Boolean.parseBoolean(get("Is_Current_Master")); + public boolean getIsPrimary() throws Ovm3ResourceException { + return Boolean.parseBoolean(get("Is_Primary")); } public String getOvmVersion() throws Ovm3ResourceException { diff --git a/plugins/hypervisors/ovm3/src/main/java/com/cloud/hypervisor/ovm3/objects/Pool.java b/plugins/hypervisors/ovm3/src/main/java/com/cloud/hypervisor/ovm3/objects/Pool.java index cf62993a238..6306754185e 100644 --- a/plugins/hypervisors/ovm3/src/main/java/com/cloud/hypervisor/ovm3/objects/Pool.java +++ b/plugins/hypervisors/ovm3/src/main/java/com/cloud/hypervisor/ovm3/objects/Pool.java @@ -42,7 +42,7 @@ public class Pool extends OvmObject { }; private List poolHosts = new ArrayList(); private final List poolRoles = new ArrayList(); - private String poolMasterVip; + private String poolPrimaryVip; private String poolAlias; private String poolId = null; @@ -50,8 +50,8 @@ public class Pool extends OvmObject { setClient(c); } - public String getPoolMasterVip() { - return poolMasterVip; + public String getPoolPrimaryVip() { + return poolPrimaryVip; } public String getPoolAlias() { @@ -115,7 +115,7 @@ public class Pool extends OvmObject { /* * public Boolean updatePoolVirtualIp(String ip) throws * Ovm3ResourceException { Object x = callWrapper("update_pool_virtual_ip", - * ip); if (x == null) { poolMasterVip = ip; return true; } return false; } + * ip); if (x == null) { poolPrimaryVip = ip; return true; } return false; } */ public Boolean leaveServerPool(String uuid) throws Ovm3ResourceException{ @@ -199,7 +199,7 @@ public class Pool extends OvmObject { String path = "//Discover_Server_Pool_Result/Server_Pool"; poolId = xmlToString(path + "/Unique_Id", xmlDocument); poolAlias = xmlToString(path + "/Pool_Alias", xmlDocument); - poolMasterVip = xmlToString(path + "/Master_Virtual_Ip", + poolPrimaryVip = xmlToString(path + "/Primary_Virtual_Ip", xmlDocument); poolHosts.addAll(xmlToList(path + "//Registered_IP", xmlDocument)); if (poolId == null) { diff --git a/plugins/hypervisors/ovm3/src/main/java/com/cloud/hypervisor/ovm3/resources/Ovm3HypervisorResource.java b/plugins/hypervisors/ovm3/src/main/java/com/cloud/hypervisor/ovm3/resources/Ovm3HypervisorResource.java index d3bf4f0b674..e897ca5e5ed 100644 --- a/plugins/hypervisors/ovm3/src/main/java/com/cloud/hypervisor/ovm3/resources/Ovm3HypervisorResource.java +++ b/plugins/hypervisors/ovm3/src/main/java/com/cloud/hypervisor/ovm3/resources/Ovm3HypervisorResource.java @@ -308,7 +308,7 @@ public class Ovm3HypervisorResource extends ServerResourceBase implements Hyperv @Override public boolean configure(String name, Map params) throws ConfigurationException { LOGGER.debug("configure " + name + " with params: " + params); - /* check if we're master or not and if we can connect */ + /* check if we're primary or not and if we can connect */ try { configuration = new Ovm3Configuration(params); if (!configuration.getIsTest()) { @@ -320,7 +320,7 @@ public class Ovm3HypervisorResource extends ServerResourceBase implements Hyperv if (!configuration.getIsTest()) { hypervisorsupport.setupServer(configuration.getAgentSshKeyFileName()); } - hypervisorsupport.masterCheck(); + hypervisorsupport.primaryCheck(); } catch (Exception e) { throw new CloudRuntimeException("Base checks failed for " + configuration.getAgentHostname(), e); } diff --git a/plugins/hypervisors/ovm3/src/main/java/com/cloud/hypervisor/ovm3/resources/helpers/Ovm3Configuration.java b/plugins/hypervisors/ovm3/src/main/java/com/cloud/hypervisor/ovm3/resources/helpers/Ovm3Configuration.java index ba31236a7a8..9da760b97fb 100644 --- a/plugins/hypervisors/ovm3/src/main/java/com/cloud/hypervisor/ovm3/resources/helpers/Ovm3Configuration.java +++ b/plugins/hypervisors/ovm3/src/main/java/com/cloud/hypervisor/ovm3/resources/helpers/Ovm3Configuration.java @@ -50,8 +50,8 @@ public class Ovm3Configuration { private Boolean agentOvsAgentSsl = false; private String agentSshKeyFile = "id_rsa.cloud"; private String agentOwnedByUuid = "d1a749d4295041fb99854f52ea4dea97"; - private Boolean agentIsMaster = false; - private Boolean agentHasMaster = false; + private Boolean agentIsPrimary = false; + private Boolean agentHasPrimary = false; private Boolean agentInOvm3Pool = false; private Boolean agentInOvm3Cluster = false; private String ovm3PoolVip = ""; @@ -266,20 +266,20 @@ public class Ovm3Configuration { this.agentOwnedByUuid = agentOwnedByUuid; } - public Boolean getAgentIsMaster() { - return agentIsMaster; + public Boolean getAgentIsPrimary() { + return agentIsPrimary; } - public void setAgentIsMaster(Boolean agentIsMaster) { - this.agentIsMaster = agentIsMaster; + public void setAgentIsPrimary(Boolean agentIsPrimary) { + this.agentIsPrimary = agentIsPrimary; } - public Boolean getAgentHasMaster() { - return agentHasMaster; + public Boolean getAgentHasPrimary() { + return agentHasPrimary; } - public void setAgentHasMaster(Boolean agentHasMaster) { - this.agentHasMaster = agentHasMaster; + public void setAgentHasPrimary(Boolean agentHasPrimary) { + this.agentHasPrimary = agentHasPrimary; } public Boolean getAgentInOvm3Pool() { diff --git a/plugins/hypervisors/ovm3/src/main/java/com/cloud/hypervisor/ovm3/resources/helpers/Ovm3HypervisorSupport.java b/plugins/hypervisors/ovm3/src/main/java/com/cloud/hypervisor/ovm3/resources/helpers/Ovm3HypervisorSupport.java index 55a087d9746..67a63d788f3 100644 --- a/plugins/hypervisors/ovm3/src/main/java/com/cloud/hypervisor/ovm3/resources/helpers/Ovm3HypervisorSupport.java +++ b/plugins/hypervisors/ovm3/src/main/java/com/cloud/hypervisor/ovm3/resources/helpers/Ovm3HypervisorSupport.java @@ -246,8 +246,8 @@ public class Ovm3HypervisorSupport { d.put("private.network.device", config.getAgentPrivateNetworkName()); d.put("guest.network.device", config.getAgentGuestNetworkName()); d.put("storage.network.device", config.getAgentStorageNetworkName()); - d.put("ismaster", config.getAgentIsMaster().toString()); - d.put("hasmaster", config.getAgentHasMaster().toString()); + d.put("isprimary", config.getAgentIsPrimary().toString()); + d.put("hasprimary", config.getAgentHasPrimary().toString()); cmd.setHostDetails(d); LOGGER.debug("Add an Ovm3 host " + config.getAgentHostname() + ":" + cmd.getHostDetails()); @@ -571,13 +571,13 @@ public class Ovm3HypervisorSupport { } /** - * materCheck + * primaryCheck * * @return */ - public boolean masterCheck() { + public boolean primaryCheck() { if ("".equals(config.getOvm3PoolVip())) { - LOGGER.debug("No cluster vip, not checking for master"); + LOGGER.debug("No cluster vip, not checking for primary"); return false; } @@ -585,26 +585,26 @@ public class Ovm3HypervisorSupport { CloudstackPlugin cSp = new CloudstackPlugin(c); if (cSp.dom0HasIp(config.getOvm3PoolVip())) { LOGGER.debug(config.getAgentHostname() - + " is a master, already has vip " + + " is a primary, already has vip " + config.getOvm3PoolVip()); - config.setAgentIsMaster(true); + config.setAgentIsPrimary(true); } else if (cSp.ping(config.getOvm3PoolVip())) { LOGGER.debug(config.getAgentHostname() - + " has a master, someone has vip " + + " has a primary, someone has vip " + config.getOvm3PoolVip()); - config.setAgentHasMaster(true); + config.setAgentHasPrimary(true); } else { LOGGER.debug(config.getAgentHostname() - + " becomes a master, no one has vip " + + " becomes a primary, no one has vip " + config.getOvm3PoolVip()); - config.setAgentIsMaster(true); + config.setAgentIsPrimary(true); } } catch (Ovm3ResourceException e) { LOGGER.debug(config.getAgentHostname() - + " can't reach master: " + e.getMessage()); - config.setAgentHasMaster(false); + + " can't reach primary: " + e.getMessage()); + config.setAgentHasPrimary(false); } - return config.getAgentIsMaster(); + return config.getAgentIsPrimary(); } /* Check if the host is in ready state for CS */ @@ -614,22 +614,22 @@ public class Ovm3HypervisorSupport { Pool pool = new Pool(c); /* only interesting when doing cluster */ - if (!host.getIsMaster() && config.getAgentInOvm3Cluster()) { - if (pool.getPoolMasterVip().equalsIgnoreCase(c.getIp())) { + if (!host.getIsPrimary() && config.getAgentInOvm3Cluster()) { + if (pool.getPoolPrimaryVip().equalsIgnoreCase(c.getIp())) { /* check pool state here */ return new ReadyAnswer(cmd); } else { - LOGGER.debug("Master IP changes to " - + pool.getPoolMasterVip() + ", it should be " + LOGGER.debug("Primary IP changes to " + + pool.getPoolPrimaryVip() + ", it should be " + c.getIp()); - return new ReadyAnswer(cmd, "I am not the master server"); + return new ReadyAnswer(cmd, "I am not the primary server"); } - } else if (host.getIsMaster()) { - LOGGER.debug("Master, not clustered " + } else if (host.getIsPrimary()) { + LOGGER.debug("Primary, not clustered " + config.getAgentHostname()); return new ReadyAnswer(cmd); } else { - LOGGER.debug("No master, not clustered " + LOGGER.debug("No primary, not clustered " + config.getAgentHostname()); return new ReadyAnswer(cmd); } diff --git a/plugins/hypervisors/ovm3/src/main/java/com/cloud/hypervisor/ovm3/resources/helpers/Ovm3StoragePool.java b/plugins/hypervisors/ovm3/src/main/java/com/cloud/hypervisor/ovm3/resources/helpers/Ovm3StoragePool.java index 6f76eb98150..17ff7153fcf 100644 --- a/plugins/hypervisors/ovm3/src/main/java/com/cloud/hypervisor/ovm3/resources/helpers/Ovm3StoragePool.java +++ b/plugins/hypervisors/ovm3/src/main/java/com/cloud/hypervisor/ovm3/resources/helpers/Ovm3StoragePool.java @@ -138,7 +138,7 @@ public class Ovm3StoragePool { * @throws ConfigurationException */ public boolean prepareForPool() throws ConfigurationException { - /* need single master uuid */ + /* need single primary uuid */ try { Linux host = new Linux(c); Pool pool = new Pool(c); @@ -201,7 +201,7 @@ public class Ovm3StoragePool { Pool poolHost = new Pool(c); PoolOCFS2 poolFs = new PoolOCFS2(c); - if (config.getAgentIsMaster()) { + if (config.getAgentIsPrimary()) { try { LOGGER.debug("Create poolfs on " + config.getAgentHostname() + " for repo " + primUuid); @@ -218,7 +218,7 @@ public class Ovm3StoragePool { } catch (Ovm3ResourceException e) { throw e; } - } else if (config.getAgentHasMaster()) { + } else if (config.getAgentHasPrimary()) { try { poolHost.joinServerPool(poolAlias, primUuid, config.getOvm3PoolVip(), poolSize + 1, @@ -262,15 +262,15 @@ public class Ovm3StoragePool { try { Connection m = new Connection(config.getOvm3PoolVip(), c.getPort(), c.getUserName(), c.getPassword()); - Pool poolMaster = new Pool(m); - if (poolMaster.isInAPool()) { - members.addAll(poolMaster.getPoolMemberList()); - if (!poolMaster.getPoolMemberList().contains(c.getIp()) + Pool poolPrimary = new Pool(m); + if (poolPrimary.isInAPool()) { + members.addAll(poolPrimary.getPoolMemberList()); + if (!poolPrimary.getPoolMemberList().contains(c.getIp()) && c.getIp().equals(config.getOvm3PoolVip())) { members.add(c.getIp()); } } else { - LOGGER.warn(c.getIp() + " noticed master " + LOGGER.warn(c.getIp() + " noticed primary " + config.getOvm3PoolVip() + " is not part of pool"); return false; } @@ -306,7 +306,7 @@ public class Ovm3StoragePool { try { Pool pool = new Pool(c); pool.leaveServerPool(cmd.getPool().getUuid()); - /* also connect to the master and update the pool list ? */ + /* also connect to the primary and update the pool list ? */ } catch (Ovm3ResourceException e) { LOGGER.debug( "Delete storage pool on host " @@ -448,8 +448,8 @@ public class Ovm3StoragePool { GlobalLock lock = GlobalLock.getInternLock("prepare.systemvm"); try { /* double check */ - if (config.getAgentHasMaster() && config.getAgentInOvm3Pool()) { - LOGGER.debug("Skip systemvm iso copy, leave it to the master"); + if (config.getAgentHasPrimary() && config.getAgentInOvm3Pool()) { + LOGGER.debug("Skip systemvm iso copy, leave it to the primary"); return; } if (lock.lock(3600)) { diff --git a/plugins/hypervisors/ovm3/src/test/java/com/cloud/hypervisor/ovm3/objects/LinuxTest.java b/plugins/hypervisors/ovm3/src/test/java/com/cloud/hypervisor/ovm3/objects/LinuxTest.java index 9eb60fe131b..bb6a931b464 100644 --- a/plugins/hypervisors/ovm3/src/test/java/com/cloud/hypervisor/ovm3/objects/LinuxTest.java +++ b/plugins/hypervisors/ovm3/src/test/java/com/cloud/hypervisor/ovm3/objects/LinuxTest.java @@ -71,8 +71,8 @@ public class LinuxTest { + "<Registered_IP>192.168.1.64</Registered_IP>" + "<Node_Number>1</Node_Number>" + "<Server_Roles>xen,utility</Server_Roles>" - + "<Is_Current_Master>true</Is_Current_Master>" - + "<Master_Virtual_Ip>192.168.1.230</Master_Virtual_Ip>" + + "<Is_Primary>true</Is_Primary>" + + "<Primary_Virtual_Ip>192.168.1.230</Primary_Virtual_Ip>" + "<Manager_Core_API_Version>3.2.1.516</Manager_Core_API_Version>" + "<Membership_State>Pooled</Membership_State>" + "<Cluster_State>Offline</Cluster_State>" diff --git a/plugins/hypervisors/ovm3/src/test/java/com/cloud/hypervisor/ovm3/objects/PoolTest.java b/plugins/hypervisors/ovm3/src/test/java/com/cloud/hypervisor/ovm3/objects/PoolTest.java index f1c68357037..0f955f1e7d9 100644 --- a/plugins/hypervisors/ovm3/src/test/java/com/cloud/hypervisor/ovm3/objects/PoolTest.java +++ b/plugins/hypervisors/ovm3/src/test/java/com/cloud/hypervisor/ovm3/objects/PoolTest.java @@ -46,9 +46,9 @@ public class PoolTest { + " " + ALIAS + "" - + " " + + " " + VIP - + "" + + "" + " " + " " + " " @@ -78,7 +78,7 @@ public class PoolTest { results.basicStringTest(pool.getPoolId(), UUID); results.basicStringTest(pool.getPoolId(), UUID); results.basicStringTest(pool.getPoolAlias(), ALIAS); - results.basicStringTest(pool.getPoolMasterVip(), VIP); + results.basicStringTest(pool.getPoolPrimaryVip(), VIP); results.basicBooleanTest(pool.getPoolMemberList().contains(IP)); results.basicBooleanTest(pool.getPoolMemberList().contains(IP2)); } diff --git a/plugins/hypervisors/ovm3/src/test/java/com/cloud/hypervisor/ovm3/resources/helpers/Ovm3ConfigurationTest.java b/plugins/hypervisors/ovm3/src/test/java/com/cloud/hypervisor/ovm3/resources/helpers/Ovm3ConfigurationTest.java index 80ee54f4d6d..2665975f5ab 100644 --- a/plugins/hypervisors/ovm3/src/test/java/com/cloud/hypervisor/ovm3/resources/helpers/Ovm3ConfigurationTest.java +++ b/plugins/hypervisors/ovm3/src/test/java/com/cloud/hypervisor/ovm3/resources/helpers/Ovm3ConfigurationTest.java @@ -45,7 +45,7 @@ public class Ovm3ConfigurationTest { params.put("password", "unknown"); params.put("username", "root"); params.put("pool", "a9c1219d-817d-4242-b23e-2607801c79d5"); - params.put("ismaster", "false"); + params.put("isprimary", "false"); params.put("storage.network.device", "xenbr0"); params.put("Host.OS.Version", "5.7"); params.put("xenserver.nics.max", "7"); @@ -64,7 +64,7 @@ public class Ovm3ConfigurationTest { params.put("ip", "192.168.1.64"); params.put("guid", "19e5f1e7-22f4-3b6d-8d41-c82f89c65295"); params.put("ovm3vip", "192.168.1.230"); - params.put("hasmaster", "true"); + params.put("hasprimary", "true"); params.put("guest.network.device", "xenbr0"); params.put("cluster", "1"); params.put("xenserver.heartbeat.timeout", "120"); diff --git a/plugins/hypervisors/ovm3/src/test/java/com/cloud/hypervisor/ovm3/resources/helpers/Ovm3HypervisorSupportTest.java b/plugins/hypervisors/ovm3/src/test/java/com/cloud/hypervisor/ovm3/resources/helpers/Ovm3HypervisorSupportTest.java index b9a603150f0..c3614888ef3 100644 --- a/plugins/hypervisors/ovm3/src/test/java/com/cloud/hypervisor/ovm3/resources/helpers/Ovm3HypervisorSupportTest.java +++ b/plugins/hypervisors/ovm3/src/test/java/com/cloud/hypervisor/ovm3/resources/helpers/Ovm3HypervisorSupportTest.java @@ -192,9 +192,9 @@ public class Ovm3HypervisorSupportTest { } @Test - public void masterCheckTest() throws ConfigurationException { + public void primaryCheckTest() throws ConfigurationException { con = prepare(); - // System.out.println(hypervisor.masterCheck()); + // System.out.println(hypervisor.primaryCheck()); } @Test diff --git a/plugins/hypervisors/ovm3/src/test/resources/scripts/clean_master.sh b/plugins/hypervisors/ovm3/src/test/resources/scripts/clean_primary.sh similarity index 100% rename from plugins/hypervisors/ovm3/src/test/resources/scripts/clean_master.sh rename to plugins/hypervisors/ovm3/src/test/resources/scripts/clean_primary.sh diff --git a/plugins/hypervisors/ovm3/src/test/resources/scripts/clean_slave.sh b/plugins/hypervisors/ovm3/src/test/resources/scripts/clean_secondary.sh similarity index 100% rename from plugins/hypervisors/ovm3/src/test/resources/scripts/clean_slave.sh rename to plugins/hypervisors/ovm3/src/test/resources/scripts/clean_secondary.sh diff --git a/plugins/hypervisors/ovm3/src/test/resources/scripts/create_pool_cluster.py b/plugins/hypervisors/ovm3/src/test/resources/scripts/create_pool_cluster.py index 6c398519dc3..bba41ab48e0 100755 --- a/plugins/hypervisors/ovm3/src/test/resources/scripts/create_pool_cluster.py +++ b/plugins/hypervisors/ovm3/src/test/resources/scripts/create_pool_cluster.py @@ -254,7 +254,7 @@ try: for node in poolDom.getElementsByTagName('Server_Pool'): id = node.getElementsByTagName('Unique_Id')[0].firstChild.nodeValue alias = node.getElementsByTagName('Pool_Alias')[0].firstChild.nodeValue - mvip = node.getElementsByTagName('Master_Virtual_Ip')[0].firstChild.nodeValue + mvip = node.getElementsByTagName('Primary_Virtual_Ip')[0].firstChild.nodeValue print "pool: %s, %s, %s" % (id, mvip, alias) members = node.getElementsByTagName('Member') for member in members: diff --git a/plugins/hypervisors/ovm3/src/test/resources/scripts/info.py b/plugins/hypervisors/ovm3/src/test/resources/scripts/info.py index 19ff71e6066..be2f8b18c9b 100755 --- a/plugins/hypervisors/ovm3/src/test/resources/scripts/info.py +++ b/plugins/hypervisors/ovm3/src/test/resources/scripts/info.py @@ -68,14 +68,14 @@ def is_it_up(host, port): print "host: %s:%s UP" % (host, port) return True -# hmm master actions don't apply to a slave -master="192.168.1.161" +# hmm primary actions don't apply to a secondary +primary="192.168.1.161" port=8899 user = "oracle" password = "test123" auth = "%s:%s" % (user, password) server = getCon(auth, 'localhost', port) -mserver = getCon(auth, master, port) +mserver = getCon(auth, primary, port) poolNode=True interface = "c0a80100" role='xen,utility' @@ -93,7 +93,7 @@ try: for node in poolDom.getElementsByTagName('Server_Pool'): id = node.getElementsByTagName('Unique_Id')[0].firstChild.nodeValue alias = node.getElementsByTagName('Pool_Alias')[0].firstChild.nodeValue - mvip = node.getElementsByTagName('Master_Virtual_Ip')[0].firstChild.nodeValue + mvip = node.getElementsByTagName('Primary_Virtual_Ip')[0].firstChild.nodeValue print "pool: %s, %s, %s" % (id, mvip, alias) members = node.getElementsByTagName('Member') for member in members: diff --git a/plugins/hypervisors/ovm3/src/test/resources/scripts/password.py b/plugins/hypervisors/ovm3/src/test/resources/scripts/password.py index 748023d0127..c8ab89e7a63 100755 --- a/plugins/hypervisors/ovm3/src/test/resources/scripts/password.py +++ b/plugins/hypervisors/ovm3/src/test/resources/scripts/password.py @@ -42,7 +42,7 @@ def getCon(host, port): return server -# hmm master actions don't apply to a slave +# hmm primary actions don't apply to a secondary port = 8899 user = "oracle" password = "test123" diff --git a/plugins/hypervisors/ovm3/src/test/resources/scripts/repo_pool.py b/plugins/hypervisors/ovm3/src/test/resources/scripts/repo_pool.py index 38861644b63..f9a47dbf68c 100755 --- a/plugins/hypervisors/ovm3/src/test/resources/scripts/repo_pool.py +++ b/plugins/hypervisors/ovm3/src/test/resources/scripts/repo_pool.py @@ -44,14 +44,14 @@ def is_it_up(host, port): print "host: %s:%s UP" % (host, port) return True -# hmm master actions don't apply to a slave -master = "192.168.1.161" +# hmm primary actions don't apply to a secondary +primary = "192.168.1.161" port = 8899 user = "oracle" password = "*******" auth = "%s:%s" % (user, password) server = ServerProxy("http://%s:%s" % ("localhost", port)) -mserver = ServerProxy("http://%s@%s:%s" % (auth, master, port)) +mserver = ServerProxy("http://%s@%s:%s" % (auth, primary, port)) poolNode = True interface = "c0a80100" role = 'xen,utility' @@ -63,11 +63,11 @@ xserver = server print "setting up password" server.update_agent_password(user, password) -if (is_it_up(master, port)): - print "master seems to be up, slaving" +if (is_it_up(primary, port)): + print "primary seems to be up, will become secondary" xserver = mserver else: - print "no master yet, will become master" + print "no primary yet, will become primary" # other mechanism must be used to make interfaces equal... try: @@ -79,7 +79,7 @@ try: poolfsuuid = poolid clusterid = "ba9aaf00ae5e2d72" mgr = "d1a749d4295041fb99854f52ea4dea97" - poolmvip = master + poolmvip = primary poolfsnfsbaseuuid = "6824e646-5908-48c9-ba44-bb1a8a778084" repoid = "6824e646590848c9ba44bb1a8a778084" @@ -114,7 +114,7 @@ try: for node in poolDom.getElementsByTagName('Server_Pool'): id = node.getElementsByTagName('Unique_Id')[0].firstChild.nodeValue alias = node.getElementsByTagName('Pool_Alias')[0].firstChild.nodeValue - mvip = node.getElementsByTagName('Master_Virtual_Ip')[0].firstChild.nodeValue + mvip = node.getElementsByTagName('Primary_Virtual_Ip')[0].firstChild.nodeValue print "pool: %s, %s, %s" % (id, mvip, alias) members = node.getElementsByTagName('Member') for member in members: @@ -127,7 +127,7 @@ try: poolMembers.append(mip) except Error, v: - print "no master will become master, %s" % v + print "no primary will become primary, %s" % v if (pooled == False): # setup the repository diff --git a/plugins/hypervisors/ovm3/src/test/resources/scripts/simple_pool.py b/plugins/hypervisors/ovm3/src/test/resources/scripts/simple_pool.py index 33789a2faf7..4ded83ef005 100755 --- a/plugins/hypervisors/ovm3/src/test/resources/scripts/simple_pool.py +++ b/plugins/hypervisors/ovm3/src/test/resources/scripts/simple_pool.py @@ -55,14 +55,14 @@ def get_ip_address(ifname): struct.pack('256s', ifname[:15]) )[20:24]) -# hmm master actions don't apply to a slave -master = "192.168.1.161" +# hmm primary actions don't apply to a secondary +primary = "192.168.1.161" port = 8899 passw = 'test123' user = 'oracle' auth = "%s:%s" % (user, passw) server = getCon(auth, "localhost", port) -mserver = getCon(auth, master, port) +mserver = getCon(auth, primary, port) try: mserver.echo("test") except AttributeError, v: @@ -81,7 +81,7 @@ try: poolalias = "Pool 0" clusterid = "ba9aaf00ae5e2d72" mgr = "d1a749d4295041fb99854f52ea4dea97" - poolmvip = master + poolmvip = primary # primary primuuid = "7718562d872f47a7b4548f9cac4ffa3a" @@ -119,7 +119,7 @@ try: for node in poolDom.getElementsByTagName('Server_Pool'): id = node.getElementsByTagName('Unique_Id')[0].firstChild.nodeValue alias = node.getElementsByTagName('Pool_Alias')[0].firstChild.nodeValue - mvip = node.getElementsByTagName('Master_Virtual_Ip')[0].firstChild.nodeValue + mvip = node.getElementsByTagName('Primary_Virtual_Ip')[0].firstChild.nodeValue print "pool: %s, %s, %s" % (id, mvip, alias) members = node.getElementsByTagName('Member') for member in members: @@ -134,10 +134,10 @@ try: # if (pooled == False): try: if (poolCount == 0): - print "master" + print "primary" # check if a pool exists already if not create # pool if so add us to the pool - print server.configure_virtual_ip(master, ip) + print server.configure_virtual_ip(primary, ip) print server.create_pool_filesystem( fstype, fsmntpoint, @@ -157,7 +157,7 @@ try: ) else: try: - print "slave" + print "secondary" print server.join_server_pool(poolalias, primuuid, poolmvip, @@ -174,7 +174,7 @@ try: # con = getCon(auth, node, port) # print con.set_pool_member_ip_list(nodes); print mserver.dispatch("http://%s@%s:%s/api/3" % (auth, node, port), "set_pool_member_ip_list", nodes) - # print server.configure_virtual_ip(master, ip) + # print server.configure_virtual_ip(primary, ip) except Error, e: print "something went wrong: %s" % (e) diff --git a/plugins/hypervisors/simulator/src/main/java/com/cloud/agent/manager/MockVmManagerImpl.java b/plugins/hypervisors/simulator/src/main/java/com/cloud/agent/manager/MockVmManagerImpl.java index 6eaf09cad85..365dfb097d2 100644 --- a/plugins/hypervisors/simulator/src/main/java/com/cloud/agent/manager/MockVmManagerImpl.java +++ b/plugins/hypervisors/simulator/src/main/java/com/cloud/agent/manager/MockVmManagerImpl.java @@ -261,9 +261,9 @@ public class MockVmManagerImpl extends ManagerBase implements MockVmManager { final MockVm vm = _mockVmDao.findByVmName(router_name); final String args = vm.getBootargs(); if (args.indexOf("router_pr=100") > 0) { - s_logger.debug("Router priority is for MASTER"); - final CheckRouterAnswer ans = new CheckRouterAnswer(cmd, "Status: MASTER", true); - ans.setState(VirtualRouter.RedundantState.MASTER); + s_logger.debug("Router priority is for PRIMARY"); + final CheckRouterAnswer ans = new CheckRouterAnswer(cmd, "Status: PRIMARY", true); + ans.setState(VirtualRouter.RedundantState.PRIMARY); return ans; } else { s_logger.debug("Router priority is for BACKUP"); diff --git a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesCluster.java b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesCluster.java index aef304a03d3..0e1f91c9c8a 100644 --- a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesCluster.java +++ b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesCluster.java @@ -119,7 +119,7 @@ public interface KubernetesCluster extends ControlledEntity, com.cloud.utils.fsm long getNetworkId(); long getDomainId(); long getAccountId(); - long getMasterNodeCount(); + long getControlNodeCount(); long getNodeCount(); long getTotalNodeCount(); String getKeyPair(); diff --git a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterManagerImpl.java b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterManagerImpl.java index f091d071690..a384a07d7b3 100644 --- a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterManagerImpl.java +++ b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterManagerImpl.java @@ -583,7 +583,8 @@ public class KubernetesClusterManagerImpl extends ManagerBase implements Kuberne DataCenterVO zone = ApiDBUtils.findZoneById(kubernetesCluster.getZoneId()); response.setZoneId(zone.getUuid()); response.setZoneName(zone.getName()); - response.setMasterNodes(kubernetesCluster.getMasterNodeCount()); + response.setMasterNodes(kubernetesCluster.getControlNodeCount()); + response.setControlNodes(kubernetesCluster.getControlNodeCount()); response.setClusterSize(kubernetesCluster.getNodeCount()); VMTemplateVO template = ApiDBUtils.findTemplateById(kubernetesCluster.getTemplateId()); response.setTemplateId(template.getUuid()); @@ -651,7 +652,7 @@ public class KubernetesClusterManagerImpl extends ManagerBase implements Kuberne final Account owner = accountService.getActiveAccountById(cmd.getEntityOwnerId()); final Long networkId = cmd.getNetworkId(); final String sshKeyPair = cmd.getSSHKeyPairName(); - final Long masterNodeCount = cmd.getMasterNodes(); + final Long controlNodeCount = cmd.getControlNodes(); final Long clusterSize = cmd.getClusterSize(); final String dockerRegistryUserName = cmd.getDockerRegistryUserName(); final String dockerRegistryPassword = cmd.getDockerRegistryPassword(); @@ -664,8 +665,8 @@ public class KubernetesClusterManagerImpl extends ManagerBase implements Kuberne throw new InvalidParameterValueException("Invalid name for the Kubernetes cluster name:" + name); } - if (masterNodeCount < 1 || masterNodeCount > 100) { - throw new InvalidParameterValueException("Invalid cluster master nodes count: " + masterNodeCount); + if (controlNodeCount < 1 || controlNodeCount > 100) { + throw new InvalidParameterValueException("Invalid cluster control nodes count: " + controlNodeCount); } if (clusterSize < 1 || clusterSize > 100) { @@ -695,7 +696,7 @@ public class KubernetesClusterManagerImpl extends ManagerBase implements Kuberne if (clusterKubernetesVersion.getZoneId() != null && !clusterKubernetesVersion.getZoneId().equals(zone.getId())) { throw new InvalidParameterValueException(String.format("Kubernetes version ID: %s is not available for zone ID: %s", clusterKubernetesVersion.getUuid(), zone.getUuid())); } - if (masterNodeCount > 1 ) { + if (controlNodeCount > 1 ) { try { if (KubernetesVersionManagerImpl.compareSemanticVersions(clusterKubernetesVersion.getSemanticVersion(), MIN_KUBERNETES_VERSION_HA_SUPPORT) < 0) { throw new InvalidParameterValueException(String.format("HA support is available only for Kubernetes version %s and above. Given version ID: %s is %s", MIN_KUBERNETES_VERSION_HA_SUPPORT, clusterKubernetesVersion.getUuid(), clusterKubernetesVersion.getSemanticVersion())); @@ -765,14 +766,14 @@ public class KubernetesClusterManagerImpl extends ManagerBase implements Kuberne } } - private Network getKubernetesClusterNetworkIfMissing(final String clusterName, final DataCenter zone, final Account owner, final int masterNodesCount, + private Network getKubernetesClusterNetworkIfMissing(final String clusterName, final DataCenter zone, final Account owner, final int controlNodesCount, final int nodesCount, final String externalLoadBalancerIpAddress, final Long networkId) throws CloudRuntimeException { Network network = null; if (networkId != null) { network = networkDao.findById(networkId); if (Network.GuestType.Isolated.equals(network.getGuestType())) { if (kubernetesClusterDao.listByNetworkId(network.getId()).isEmpty()) { - if (!validateNetwork(network, masterNodesCount + nodesCount)) { + if (!validateNetwork(network, controlNodesCount + nodesCount)) { throw new InvalidParameterValueException(String.format("Network ID: %s is not suitable for Kubernetes cluster", network.getUuid())); } networkModel.checkNetworkPermissions(owner, network); @@ -780,8 +781,8 @@ public class KubernetesClusterManagerImpl extends ManagerBase implements Kuberne throw new InvalidParameterValueException(String.format("Network ID: %s is already under use by another Kubernetes cluster", network.getUuid())); } } else if (Network.GuestType.Shared.equals(network.getGuestType())) { - if (masterNodesCount > 1 && Strings.isNullOrEmpty(externalLoadBalancerIpAddress)) { - throw new InvalidParameterValueException(String.format("Multi-master, HA Kubernetes cluster with %s network ID: %s needs an external load balancer IP address. %s parameter can be used", + if (controlNodesCount > 1 && Strings.isNullOrEmpty(externalLoadBalancerIpAddress)) { + throw new InvalidParameterValueException(String.format("Multi-control nodes, HA Kubernetes cluster with %s network ID: %s needs an external load balancer IP address. %s parameter can be used", network.getGuestType().toString(), network.getUuid(), ApiConstants.EXTERNAL_LOAD_BALANCER_IP_ADDRESS)); } } @@ -1005,9 +1006,9 @@ public class KubernetesClusterManagerImpl extends ManagerBase implements Kuberne validateKubernetesClusterCreateParameters(cmd); final DataCenter zone = dataCenterDao.findById(cmd.getZoneId()); - final long masterNodeCount = cmd.getMasterNodes(); + final long controlNodeCount = cmd.getControlNodes(); final long clusterSize = cmd.getClusterSize(); - final long totalNodeCount = masterNodeCount + clusterSize; + final long totalNodeCount = controlNodeCount + clusterSize; final ServiceOffering serviceOffering = serviceOfferingDao.findById(cmd.getServiceOfferingId()); final Account owner = accountService.getActiveAccountById(cmd.getEntityOwnerId()); final KubernetesSupportedVersion clusterKubernetesVersion = kubernetesSupportedVersionDao.findById(cmd.getKubernetesVersionId()); @@ -1022,17 +1023,17 @@ public class KubernetesClusterManagerImpl extends ManagerBase implements Kuberne logAndThrow(Level.ERROR, String.format("Creating Kubernetes cluster failed due to error while finding suitable deployment plan for cluster in zone : %s", zone.getName())); } - final Network defaultNetwork = getKubernetesClusterNetworkIfMissing(cmd.getName(), zone, owner, (int)masterNodeCount, (int)clusterSize, cmd.getExternalLoadBalancerIpAddress(), cmd.getNetworkId()); + final Network defaultNetwork = getKubernetesClusterNetworkIfMissing(cmd.getName(), zone, owner, (int)controlNodeCount, (int)clusterSize, cmd.getExternalLoadBalancerIpAddress(), cmd.getNetworkId()); final VMTemplateVO finalTemplate = getKubernetesServiceTemplate(deployDestination.getCluster().getHypervisorType()); - final long cores = serviceOffering.getCpu() * (masterNodeCount + clusterSize); - final long memory = serviceOffering.getRamSize() * (masterNodeCount + clusterSize); + final long cores = serviceOffering.getCpu() * (controlNodeCount + clusterSize); + final long memory = serviceOffering.getRamSize() * (controlNodeCount + clusterSize); final KubernetesClusterVO cluster = Transaction.execute(new TransactionCallback() { @Override public KubernetesClusterVO doInTransaction(TransactionStatus status) { KubernetesClusterVO newCluster = new KubernetesClusterVO(cmd.getName(), cmd.getDisplayName(), zone.getId(), clusterKubernetesVersion.getId(), serviceOffering.getId(), finalTemplate.getId(), defaultNetwork.getId(), owner.getDomainId(), - owner.getAccountId(), masterNodeCount, clusterSize, KubernetesCluster.State.Created, cmd.getSSHKeyPairName(), cores, memory, cmd.getNodeRootDiskSize(), ""); + owner.getAccountId(), controlNodeCount, clusterSize, KubernetesCluster.State.Created, cmd.getSSHKeyPairName(), cores, memory, cmd.getNodeRootDiskSize(), ""); kubernetesClusterDao.persist(newCluster); return newCluster; } @@ -1318,7 +1319,7 @@ public class KubernetesClusterManagerImpl extends ManagerBase implements Kuberne /* Kubernetes cluster scanner checks if the Kubernetes cluster is in desired state. If it detects Kubernetes cluster is not in desired state, it will trigger an event and marks the Kubernetes cluster to be 'Alert' state. For e.g a Kubernetes cluster in 'Running' state should mean all the cluster of node VM's in the custer should be running and - number of the node VM's should be of cluster size, and the master node VM's is running. It is possible due to + number of the node VM's should be of cluster size, and the control node VM's is running. It is possible due to out of band changes by user or hosts going down, we may end up one or more VM's in stopped state. in which case scanner detects these changes and marks the cluster in 'Alert' state. Similarly cluster in 'Stopped' state means all the cluster VM's are in stopped state any mismatch in states should get picked up by Kubernetes cluster and @@ -1442,7 +1443,7 @@ public class KubernetesClusterManagerImpl extends ManagerBase implements Kuberne boolean isClusterVMsInDesiredState(KubernetesCluster kubernetesCluster, VirtualMachine.State state) { List clusterVMs = kubernetesClusterVmMapDao.listByClusterId(kubernetesCluster.getId()); - // check cluster is running at desired capacity include master nodes as well + // check cluster is running at desired capacity include control nodes as well if (clusterVMs.size() < kubernetesCluster.getTotalNodeCount()) { if (LOGGER.isDebugEnabled()) { LOGGER.debug(String.format("Found only %d VMs in the Kubernetes cluster ID: %s while expected %d VMs to be in state: %s", diff --git a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterVO.java b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterVO.java index 9ff0be335f3..b6a37d9607c 100644 --- a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterVO.java +++ b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterVO.java @@ -69,8 +69,8 @@ public class KubernetesClusterVO implements KubernetesCluster { @Column(name = "account_id") private long accountId; - @Column(name = "master_node_count") - private long masterNodeCount; + @Column(name = "control_node_count") + private long controlNodeCount; @Column(name = "node_count") private long nodeCount; @@ -202,12 +202,12 @@ public class KubernetesClusterVO implements KubernetesCluster { } @Override - public long getMasterNodeCount() { - return masterNodeCount; + public long getControlNodeCount() { + return controlNodeCount; } - public void setMasterNodeCount(long masterNodeCount) { - this.masterNodeCount = masterNodeCount; + public void setControlNodeCount(long controlNodeCount) { + this.controlNodeCount = controlNodeCount; } @Override @@ -221,7 +221,7 @@ public class KubernetesClusterVO implements KubernetesCluster { @Override public long getTotalNodeCount() { - return this.masterNodeCount + this.nodeCount; + return this.controlNodeCount + this.nodeCount; } @Override @@ -308,7 +308,7 @@ public class KubernetesClusterVO implements KubernetesCluster { } public KubernetesClusterVO(String name, String description, long zoneId, long kubernetesVersionId, long serviceOfferingId, long templateId, - long networkId, long domainId, long accountId, long masterNodeCount, long nodeCount, State state, + long networkId, long domainId, long accountId, long controlNodeCount, long nodeCount, State state, String keyPair, long cores, long memory, Long nodeRootDiskSize, String endpoint) { this.uuid = UUID.randomUUID().toString(); this.name = name; @@ -320,7 +320,7 @@ public class KubernetesClusterVO implements KubernetesCluster { this.networkId = networkId; this.domainId = domainId; this.accountId = accountId; - this.masterNodeCount = masterNodeCount; + this.controlNodeCount = controlNodeCount; this.nodeCount = nodeCount; this.state = state; this.keyPair = keyPair; diff --git a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterActionWorker.java b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterActionWorker.java index 1ca58a0dfa6..5f663dff784 100644 --- a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterActionWorker.java +++ b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterActionWorker.java @@ -231,9 +231,9 @@ public class KubernetesClusterActionWorker { }); } - private UserVm fetchMasterVmIfMissing(final UserVm masterVm) { - if (masterVm != null) { - return masterVm; + private UserVm fetchControlVmIfMissing(final UserVm controlVm) { + if (controlVm != null) { + return controlVm; } List clusterVMs = kubernetesClusterVmMapDao.listByClusterId(kubernetesCluster.getId()); if (CollectionUtils.isEmpty(clusterVMs)) { @@ -248,16 +248,16 @@ public class KubernetesClusterActionWorker { return userVmDao.findById(vmIds.get(0)); } - protected String getMasterVmPrivateIp() { + protected String getControlVmPrivateIp() { String ip = null; - UserVm vm = fetchMasterVmIfMissing(null); + UserVm vm = fetchControlVmIfMissing(null); if (vm != null) { ip = vm.getPrivateIpAddress(); } return ip; } - protected Pair getKubernetesClusterServerIpSshPort(UserVm masterVm) { + protected Pair getKubernetesClusterServerIpSshPort(UserVm controlVm) { int port = CLUSTER_NODES_DEFAULT_START_SSH_PORT; KubernetesClusterDetailsVO detail = kubernetesClusterDetailsDao.findDetail(kubernetesCluster.getId(), ApiConstants.EXTERNAL_LOAD_BALANCER_IP_ADDRESS); if (detail != null && !Strings.isNullOrEmpty(detail.getValue())) { @@ -283,12 +283,12 @@ public class KubernetesClusterActionWorker { return new Pair<>(null, port); } else if (Network.GuestType.Shared.equals(network.getGuestType())) { port = 22; - masterVm = fetchMasterVmIfMissing(masterVm); - if (masterVm == null) { - LOGGER.warn(String.format("Unable to retrieve master VM for Kubernetes cluster : %s", kubernetesCluster.getName())); + controlVm = fetchControlVmIfMissing(controlVm); + if (controlVm == null) { + LOGGER.warn(String.format("Unable to retrieve control VM for Kubernetes cluster : %s", kubernetesCluster.getName())); return new Pair<>(null, port); } - return new Pair<>(masterVm.getPrivateIpAddress(), port); + return new Pair<>(controlVm.getPrivateIpAddress(), port); } LOGGER.warn(String.format("Unable to retrieve server IP address for Kubernetes cluster : %s", kubernetesCluster.getName())); return new Pair<>(null, port); diff --git a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterScaleWorker.java b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterScaleWorker.java index 1fce00ba81d..3e32d8ebf4c 100644 --- a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterScaleWorker.java +++ b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterScaleWorker.java @@ -124,7 +124,7 @@ public class KubernetesClusterScaleWorker extends KubernetesClusterResourceModif throw new ManagementServerException("Firewall rule for node SSH access can't be provisioned"); } int existingFirewallRuleSourcePortEnd = firewallRule.getSourcePortEnd(); - final int scaledTotalNodeCount = clusterSize == null ? (int)kubernetesCluster.getTotalNodeCount() : (int)(clusterSize + kubernetesCluster.getMasterNodeCount()); + final int scaledTotalNodeCount = clusterSize == null ? (int)kubernetesCluster.getTotalNodeCount() : (int)(clusterSize + kubernetesCluster.getControlNodeCount()); // Provision new SSH firewall rules try { provisionFirewallRules(publicIp, owner, CLUSTER_NODES_DEFAULT_START_SSH_PORT, CLUSTER_NODES_DEFAULT_START_SSH_PORT + scaledTotalNodeCount - 1); @@ -170,7 +170,7 @@ public class KubernetesClusterScaleWorker extends KubernetesClusterResourceModif final ServiceOffering serviceOffering = newServiceOffering == null ? serviceOfferingDao.findById(kubernetesCluster.getServiceOfferingId()) : newServiceOffering; final Long serviceOfferingId = newServiceOffering == null ? null : serviceOffering.getId(); - final long size = newSize == null ? kubernetesCluster.getTotalNodeCount() : (newSize + kubernetesCluster.getMasterNodeCount()); + final long size = newSize == null ? kubernetesCluster.getTotalNodeCount() : (newSize + kubernetesCluster.getControlNodeCount()); final long cores = serviceOffering.getCpu() * size; final long memory = serviceOffering.getRamSize() * size; KubernetesClusterVO kubernetesClusterVO = updateKubernetesClusterEntry(cores, memory, newSize, serviceOfferingId); @@ -309,7 +309,7 @@ public class KubernetesClusterScaleWorker extends KubernetesClusterResourceModif final List originalVmList = getKubernetesClusterVMMaps(); int i = originalVmList.size() - 1; List removedVmIds = new ArrayList<>(); - while (i >= kubernetesCluster.getMasterNodeCount() + clusterSize) { + while (i >= kubernetesCluster.getControlNodeCount() + clusterSize) { KubernetesClusterVmMapVO vmMapVO = originalVmList.get(i); UserVmVO userVM = userVmDao.findById(vmMapVO.getVmId()); if (!removeKubernetesClusterNode(publicIpAddress, sshPort, userVM, 3, 30000)) { diff --git a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterStartWorker.java b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterStartWorker.java index 4a96b9ed2ba..54c3a6228d1 100644 --- a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterStartWorker.java +++ b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterStartWorker.java @@ -89,8 +89,8 @@ public class KubernetesClusterStartWorker extends KubernetesClusterResourceModif return kubernetesClusterVersion; } - private Pair> getKubernetesMasterIpAddresses(final DataCenter zone, final Network network, final Account account) throws InsufficientAddressCapacityException { - String masterIp = null; + private Pair> getKubernetesControlIpAddresses(final DataCenter zone, final Network network, final Account account) throws InsufficientAddressCapacityException { + String controlIp = null; Map requestedIps = null; if (Network.GuestType.Shared.equals(network.getGuestType())) { List vlanIds = new ArrayList<>(); @@ -100,16 +100,16 @@ public class KubernetesClusterStartWorker extends KubernetesClusterResourceModif } PublicIp ip = ipAddressManager.getAvailablePublicIpAddressFromVlans(zone.getId(), null, account, Vlan.VlanType.DirectAttached, vlanIds,network.getId(), null, false); if (ip != null) { - masterIp = ip.getAddress().toString(); + controlIp = ip.getAddress().toString(); } requestedIps = new HashMap<>(); Ip ipAddress = ip.getAddress(); boolean isIp6 = ipAddress.isIp6(); requestedIps.put(network.getId(), new Network.IpAddresses(ipAddress.isIp4() ? ip.getAddress().addr() : null, null)); } else { - masterIp = ipAddressManager.acquireGuestIpAddress(networkDao.findById(kubernetesCluster.getNetworkId()), null); + controlIp = ipAddressManager.acquireGuestIpAddress(networkDao.findById(kubernetesCluster.getNetworkId()), null); } - return new Pair<>(masterIp, requestedIps); + return new Pair<>(controlIp, requestedIps); } private boolean isKubernetesVersionSupportsHA() { @@ -127,10 +127,10 @@ public class KubernetesClusterStartWorker extends KubernetesClusterResourceModif return haSupported; } - private String getKubernetesMasterConfig(final String masterIp, final String serverIp, - final String hostName, final boolean haSupported, - final boolean ejectIso) throws IOException { - String k8sMasterConfig = readResourceFile("/conf/k8s-master.yml"); + private String getKubernetesControlConfig(final String controlIp, final String serverIp, + final String hostName, final boolean haSupported, + final boolean ejectIso) throws IOException { + String k8sControlConfig = readResourceFile("/conf/k8s-control-node.yml"); final String apiServerCert = "{{ k8s_master.apiserver.crt }}"; final String apiServerKey = "{{ k8s_master.apiserver.key }}"; final String caCert = "{{ k8s_master.ca.crt }}"; @@ -139,8 +139,8 @@ public class KubernetesClusterStartWorker extends KubernetesClusterResourceModif final String clusterInitArgsKey = "{{ k8s_master.cluster.initargs }}"; final String ejectIsoKey = "{{ k8s.eject.iso }}"; final List addresses = new ArrayList<>(); - addresses.add(masterIp); - if (!serverIp.equals(masterIp)) { + addresses.add(controlIp); + if (!serverIp.equals(controlIp)) { addresses.add(serverIp); } final Certificate certificate = caManager.issueCertificate(null, Arrays.asList(hostName, "kubernetes", @@ -149,9 +149,9 @@ public class KubernetesClusterStartWorker extends KubernetesClusterResourceModif final String tlsClientCert = CertUtils.x509CertificateToPem(certificate.getClientCertificate()); final String tlsPrivateKey = CertUtils.privateKeyToPem(certificate.getPrivateKey()); final String tlsCaCert = CertUtils.x509CertificatesToPem(certificate.getCaCertificates()); - k8sMasterConfig = k8sMasterConfig.replace(apiServerCert, tlsClientCert.replace("\n", "\n ")); - k8sMasterConfig = k8sMasterConfig.replace(apiServerKey, tlsPrivateKey.replace("\n", "\n ")); - k8sMasterConfig = k8sMasterConfig.replace(caCert, tlsCaCert.replace("\n", "\n ")); + k8sControlConfig = k8sControlConfig.replace(apiServerCert, tlsClientCert.replace("\n", "\n ")); + k8sControlConfig = k8sControlConfig.replace(apiServerKey, tlsPrivateKey.replace("\n", "\n ")); + k8sControlConfig = k8sControlConfig.replace(caCert, tlsCaCert.replace("\n", "\n ")); String pubKey = "- \"" + configurationDao.getValue("ssh.publickey") + "\""; String sshKeyPair = kubernetesCluster.getKeyPair(); if (!Strings.isNullOrEmpty(sshKeyPair)) { @@ -160,8 +160,8 @@ public class KubernetesClusterStartWorker extends KubernetesClusterResourceModif pubKey += "\n - \"" + sshkp.getPublicKey() + "\""; } } - k8sMasterConfig = k8sMasterConfig.replace(sshPubKey, pubKey); - k8sMasterConfig = k8sMasterConfig.replace(clusterToken, KubernetesClusterUtil.generateClusterToken(kubernetesCluster)); + k8sControlConfig = k8sControlConfig.replace(sshPubKey, pubKey); + k8sControlConfig = k8sControlConfig.replace(clusterToken, KubernetesClusterUtil.generateClusterToken(kubernetesCluster)); String initArgs = ""; if (haSupported) { initArgs = String.format("--control-plane-endpoint %s:%d --upload-certs --certificate-key %s ", @@ -171,55 +171,55 @@ public class KubernetesClusterStartWorker extends KubernetesClusterResourceModif } initArgs += String.format("--apiserver-cert-extra-sans=%s", serverIp); initArgs += String.format(" --kubernetes-version=%s", getKubernetesClusterVersion().getSemanticVersion()); - k8sMasterConfig = k8sMasterConfig.replace(clusterInitArgsKey, initArgs); - k8sMasterConfig = k8sMasterConfig.replace(ejectIsoKey, String.valueOf(ejectIso)); - return k8sMasterConfig; + k8sControlConfig = k8sControlConfig.replace(clusterInitArgsKey, initArgs); + k8sControlConfig = k8sControlConfig.replace(ejectIsoKey, String.valueOf(ejectIso)); + return k8sControlConfig; } - private UserVm createKubernetesMaster(final Network network, String serverIp) throws ManagementServerException, + private UserVm createKubernetesControlNode(final Network network, String serverIp) throws ManagementServerException, ResourceUnavailableException, InsufficientCapacityException { - UserVm masterVm = null; + UserVm controlVm = null; DataCenter zone = dataCenterDao.findById(kubernetesCluster.getZoneId()); ServiceOffering serviceOffering = serviceOfferingDao.findById(kubernetesCluster.getServiceOfferingId()); List networkIds = new ArrayList(); networkIds.add(kubernetesCluster.getNetworkId()); - Pair> ipAddresses = getKubernetesMasterIpAddresses(zone, network, owner); - String masterIp = ipAddresses.first(); + Pair> ipAddresses = getKubernetesControlIpAddresses(zone, network, owner); + String controlIp = ipAddresses.first(); Map requestedIps = ipAddresses.second(); if (Network.GuestType.Shared.equals(network.getGuestType()) && Strings.isNullOrEmpty(serverIp)) { - serverIp = masterIp; + serverIp = controlIp; } - Network.IpAddresses addrs = new Network.IpAddresses(masterIp, null); + Network.IpAddresses addrs = new Network.IpAddresses(controlIp, null); long rootDiskSize = kubernetesCluster.getNodeRootDiskSize(); Map customParameterMap = new HashMap(); if (rootDiskSize > 0) { customParameterMap.put("rootdisksize", String.valueOf(rootDiskSize)); } - String hostName = kubernetesClusterNodeNamePrefix + "-master"; - if (kubernetesCluster.getMasterNodeCount() > 1) { + String hostName = kubernetesClusterNodeNamePrefix + "-control"; + if (kubernetesCluster.getControlNodeCount() > 1) { hostName += "-1"; } hostName = getKubernetesClusterNodeAvailableName(hostName); boolean haSupported = isKubernetesVersionSupportsHA(); - String k8sMasterConfig = null; + String k8sControlConfig = null; try { - k8sMasterConfig = getKubernetesMasterConfig(masterIp, serverIp, hostName, haSupported, Hypervisor.HypervisorType.VMware.equals(clusterTemplate.getHypervisorType())); + k8sControlConfig = getKubernetesControlConfig(controlIp, serverIp, hostName, haSupported, Hypervisor.HypervisorType.VMware.equals(clusterTemplate.getHypervisorType())); } catch (IOException e) { - logAndThrow(Level.ERROR, "Failed to read Kubernetes master configuration file", e); + logAndThrow(Level.ERROR, "Failed to read Kubernetes control configuration file", e); } - String base64UserData = Base64.encodeBase64String(k8sMasterConfig.getBytes(StringUtils.getPreferredCharset())); - masterVm = userVmService.createAdvancedVirtualMachine(zone, serviceOffering, clusterTemplate, networkIds, owner, + String base64UserData = Base64.encodeBase64String(k8sControlConfig.getBytes(StringUtils.getPreferredCharset())); + controlVm = userVmService.createAdvancedVirtualMachine(zone, serviceOffering, clusterTemplate, networkIds, owner, hostName, hostName, null, null, null, Hypervisor.HypervisorType.None, BaseCmd.HTTPMethod.POST, base64UserData, kubernetesCluster.getKeyPair(), requestedIps, addrs, null, null, null, customParameterMap, null, null, null, null); if (LOGGER.isInfoEnabled()) { - LOGGER.info(String.format("Created master VM ID: %s, %s in the Kubernetes cluster : %s", masterVm.getUuid(), hostName, kubernetesCluster.getName())); + LOGGER.info(String.format("Created control VM ID: %s, %s in the Kubernetes cluster : %s", controlVm.getUuid(), hostName, kubernetesCluster.getName())); } - return masterVm; + return controlVm; } - private String getKubernetesAdditionalMasterConfig(final String joinIp, final boolean ejectIso) throws IOException { - String k8sMasterConfig = readResourceFile("/conf/k8s-master-add.yml"); + private String getKubernetesAdditionalControlConfig(final String joinIp, final boolean ejectIso) throws IOException { + String k8sControlConfig = readResourceFile("/conf/k8s-control-node-add.yml"); final String joinIpKey = "{{ k8s_master.join_ip }}"; final String clusterTokenKey = "{{ k8s_master.cluster.token }}"; final String sshPubKey = "{{ k8s.ssh.pub.key }}"; @@ -233,17 +233,17 @@ public class KubernetesClusterStartWorker extends KubernetesClusterResourceModif pubKey += "\n - \"" + sshkp.getPublicKey() + "\""; } } - k8sMasterConfig = k8sMasterConfig.replace(sshPubKey, pubKey); - k8sMasterConfig = k8sMasterConfig.replace(joinIpKey, joinIp); - k8sMasterConfig = k8sMasterConfig.replace(clusterTokenKey, KubernetesClusterUtil.generateClusterToken(kubernetesCluster)); - k8sMasterConfig = k8sMasterConfig.replace(clusterHACertificateKey, KubernetesClusterUtil.generateClusterHACertificateKey(kubernetesCluster)); - k8sMasterConfig = k8sMasterConfig.replace(ejectIsoKey, String.valueOf(ejectIso)); - return k8sMasterConfig; + k8sControlConfig = k8sControlConfig.replace(sshPubKey, pubKey); + k8sControlConfig = k8sControlConfig.replace(joinIpKey, joinIp); + k8sControlConfig = k8sControlConfig.replace(clusterTokenKey, KubernetesClusterUtil.generateClusterToken(kubernetesCluster)); + k8sControlConfig = k8sControlConfig.replace(clusterHACertificateKey, KubernetesClusterUtil.generateClusterHACertificateKey(kubernetesCluster)); + k8sControlConfig = k8sControlConfig.replace(ejectIsoKey, String.valueOf(ejectIso)); + return k8sControlConfig; } - private UserVm createKubernetesAdditionalMaster(final String joinIp, final int additionalMasterNodeInstance) throws ManagementServerException, + private UserVm createKubernetesAdditionalControlNode(final String joinIp, final int additionalControlNodeInstance) throws ManagementServerException, ResourceUnavailableException, InsufficientCapacityException { - UserVm additionalMasterVm = null; + UserVm additionalControlVm = null; DataCenter zone = dataCenterDao.findById(kubernetesCluster.getZoneId()); ServiceOffering serviceOffering = serviceOfferingDao.findById(kubernetesCluster.getServiceOfferingId()); List networkIds = new ArrayList(); @@ -254,50 +254,50 @@ public class KubernetesClusterStartWorker extends KubernetesClusterResourceModif if (rootDiskSize > 0) { customParameterMap.put("rootdisksize", String.valueOf(rootDiskSize)); } - String hostName = getKubernetesClusterNodeAvailableName(String.format("%s-master-%d", kubernetesClusterNodeNamePrefix, additionalMasterNodeInstance + 1)); - String k8sMasterConfig = null; + String hostName = getKubernetesClusterNodeAvailableName(String.format("%s-control-%d", kubernetesClusterNodeNamePrefix, additionalControlNodeInstance + 1)); + String k8sControlConfig = null; try { - k8sMasterConfig = getKubernetesAdditionalMasterConfig(joinIp, Hypervisor.HypervisorType.VMware.equals(clusterTemplate.getHypervisorType())); + k8sControlConfig = getKubernetesAdditionalControlConfig(joinIp, Hypervisor.HypervisorType.VMware.equals(clusterTemplate.getHypervisorType())); } catch (IOException e) { - logAndThrow(Level.ERROR, "Failed to read Kubernetes master configuration file", e); + logAndThrow(Level.ERROR, "Failed to read Kubernetes control configuration file", e); } - String base64UserData = Base64.encodeBase64String(k8sMasterConfig.getBytes(StringUtils.getPreferredCharset())); - additionalMasterVm = userVmService.createAdvancedVirtualMachine(zone, serviceOffering, clusterTemplate, networkIds, owner, + String base64UserData = Base64.encodeBase64String(k8sControlConfig.getBytes(StringUtils.getPreferredCharset())); + additionalControlVm = userVmService.createAdvancedVirtualMachine(zone, serviceOffering, clusterTemplate, networkIds, owner, hostName, hostName, null, null, null, Hypervisor.HypervisorType.None, BaseCmd.HTTPMethod.POST, base64UserData, kubernetesCluster.getKeyPair(), null, addrs, null, null, null, customParameterMap, null, null, null, null); if (LOGGER.isInfoEnabled()) { - LOGGER.info(String.format("Created master VM ID : %s, %s in the Kubernetes cluster : %s", additionalMasterVm.getUuid(), hostName, kubernetesCluster.getName())); + LOGGER.info(String.format("Created control VM ID : %s, %s in the Kubernetes cluster : %s", additionalControlVm.getUuid(), hostName, kubernetesCluster.getName())); } - return additionalMasterVm; + return additionalControlVm; } - private UserVm provisionKubernetesClusterMasterVm(final Network network, final String publicIpAddress) throws + private UserVm provisionKubernetesClusterControlVm(final Network network, final String publicIpAddress) throws ManagementServerException, InsufficientCapacityException, ResourceUnavailableException { - UserVm k8sMasterVM = null; - k8sMasterVM = createKubernetesMaster(network, publicIpAddress); - addKubernetesClusterVm(kubernetesCluster.getId(), k8sMasterVM.getId()); + UserVm k8sControlVM = null; + k8sControlVM = createKubernetesControlNode(network, publicIpAddress); + addKubernetesClusterVm(kubernetesCluster.getId(), k8sControlVM.getId()); if (kubernetesCluster.getNodeRootDiskSize() > 0) { - resizeNodeVolume(k8sMasterVM); + resizeNodeVolume(k8sControlVM); } - startKubernetesVM(k8sMasterVM); - k8sMasterVM = userVmDao.findById(k8sMasterVM.getId()); - if (k8sMasterVM == null) { - throw new ManagementServerException(String.format("Failed to provision master VM for Kubernetes cluster : %s" , kubernetesCluster.getName())); + startKubernetesVM(k8sControlVM); + k8sControlVM = userVmDao.findById(k8sControlVM.getId()); + if (k8sControlVM == null) { + throw new ManagementServerException(String.format("Failed to provision control VM for Kubernetes cluster : %s" , kubernetesCluster.getName())); } if (LOGGER.isInfoEnabled()) { - LOGGER.info(String.format("Provisioned the master VM : %s in to the Kubernetes cluster : %s", k8sMasterVM.getDisplayName(), kubernetesCluster.getName())); + LOGGER.info(String.format("Provisioned the control VM : %s in to the Kubernetes cluster : %s", k8sControlVM.getDisplayName(), kubernetesCluster.getName())); } - return k8sMasterVM; + return k8sControlVM; } - private List provisionKubernetesClusterAdditionalMasterVms(final String publicIpAddress) throws + private List provisionKubernetesClusterAdditionalControlVms(final String publicIpAddress) throws InsufficientCapacityException, ManagementServerException, ResourceUnavailableException { - List additionalMasters = new ArrayList<>(); - if (kubernetesCluster.getMasterNodeCount() > 1) { - for (int i = 1; i < kubernetesCluster.getMasterNodeCount(); i++) { + List additionalControlVms = new ArrayList<>(); + if (kubernetesCluster.getControlNodeCount() > 1) { + for (int i = 1; i < kubernetesCluster.getControlNodeCount(); i++) { UserVm vm = null; - vm = createKubernetesAdditionalMaster(publicIpAddress, i); + vm = createKubernetesAdditionalControlNode(publicIpAddress, i); addKubernetesClusterVm(kubernetesCluster.getId(), vm.getId()); if (kubernetesCluster.getNodeRootDiskSize() > 0) { resizeNodeVolume(vm); @@ -305,15 +305,15 @@ public class KubernetesClusterStartWorker extends KubernetesClusterResourceModif startKubernetesVM(vm); vm = userVmDao.findById(vm.getId()); if (vm == null) { - throw new ManagementServerException(String.format("Failed to provision additional master VM for Kubernetes cluster : %s" , kubernetesCluster.getName())); + throw new ManagementServerException(String.format("Failed to provision additional control VM for Kubernetes cluster : %s" , kubernetesCluster.getName())); } - additionalMasters.add(vm); + additionalControlVms.add(vm); if (LOGGER.isInfoEnabled()) { - LOGGER.info(String.format("Provisioned additional master VM : %s in to the Kubernetes cluster : %s", vm.getDisplayName(), kubernetesCluster.getName())); + LOGGER.info(String.format("Provisioned additional control VM : %s in to the Kubernetes cluster : %s", vm.getDisplayName(), kubernetesCluster.getName())); } } } - return additionalMasters; + return additionalControlVms; } private Network startKubernetesClusterNetwork(final DeployDestination destination) throws ManagementServerException { @@ -348,10 +348,10 @@ public class KubernetesClusterStartWorker extends KubernetesClusterResourceModif account.getId(), false, NetUtils.TCP_PROTO, true); Map> vmIdIpMap = new HashMap<>(); - for (int i = 0; i < kubernetesCluster.getMasterNodeCount(); ++i) { + for (int i = 0; i < kubernetesCluster.getControlNodeCount(); ++i) { List ips = new ArrayList<>(); - Nic masterVmNic = networkModel.getNicInNetwork(clusterVMIds.get(i), kubernetesCluster.getNetworkId()); - ips.add(masterVmNic.getIPv4Address()); + Nic controlVmNic = networkModel.getNicInNetwork(clusterVMIds.get(i), kubernetesCluster.getNetworkId()); + ips.add(controlVmNic.getIPv4Address()); vmIdIpMap.put(clusterVMIds.get(i), ips); } lbService.assignToLoadBalancer(lb.getId(), null, vmIdIpMap); @@ -361,7 +361,7 @@ public class KubernetesClusterStartWorker extends KubernetesClusterResourceModif * Setup network rules for Kubernetes cluster * Open up firewall port CLUSTER_API_PORT, secure port on which Kubernetes * API server is running. Also create load balancing rule to forward public - * IP traffic to master VMs' private IP. + * IP traffic to control VMs' private IP. * Open up firewall ports NODES_DEFAULT_START_SSH_PORT to NODES_DEFAULT_START_SSH_PORT+n * for SSH access. Also create port-forwarding rule to forward public IP traffic to all * @param network @@ -405,7 +405,7 @@ public class KubernetesClusterStartWorker extends KubernetesClusterResourceModif throw new ManagementServerException(String.format("Failed to provision firewall rules for SSH access for the Kubernetes cluster : %s", kubernetesCluster.getName()), e); } - // Load balancer rule fo API access for master node VMs + // Load balancer rule fo API access for control node VMs try { provisionLoadBalancerRule(publicIp, network, owner, clusterVMIds, CLUSTER_API_PORT); } catch (NetworkRuleConflictException | InsufficientAddressCapacityException e) { @@ -450,9 +450,9 @@ public class KubernetesClusterStartWorker extends KubernetesClusterResourceModif } String kubeConfig = KubernetesClusterUtil.getKubernetesClusterConfig(kubernetesCluster, publicIpAddress, sshPort, CLUSTER_NODE_VM_USER, sshKeyFile, timeoutTime); if (!Strings.isNullOrEmpty(kubeConfig)) { - final String masterVMPrivateIpAddress = getMasterVmPrivateIp(); - if (!Strings.isNullOrEmpty(masterVMPrivateIpAddress)) { - kubeConfig = kubeConfig.replace(String.format("server: https://%s:%d", masterVMPrivateIpAddress, CLUSTER_API_PORT), + final String controlVMPrivateIpAddress = getControlVmPrivateIp(); + if (!Strings.isNullOrEmpty(controlVMPrivateIpAddress)) { + kubeConfig = kubeConfig.replace(String.format("server: https://%s:%d", controlVMPrivateIpAddress, CLUSTER_API_PORT), String.format("server: https://%s:%d", publicIpAddress, CLUSTER_API_PORT)); } kubernetesClusterDetailsDao.addDetail(kubernetesCluster.getId(), "kubeConfigData", Base64.encodeBase64String(kubeConfig.getBytes(StringUtils.getPreferredCharset())), false); @@ -503,29 +503,29 @@ public class KubernetesClusterStartWorker extends KubernetesClusterResourceModif Pair publicIpSshPort = getKubernetesClusterServerIpSshPort(null); publicIpAddress = publicIpSshPort.first(); if (Strings.isNullOrEmpty(publicIpAddress) && - (Network.GuestType.Isolated.equals(network.getGuestType()) || kubernetesCluster.getMasterNodeCount() > 1)) { // Shared network, single-master cluster won't have an IP yet + (Network.GuestType.Isolated.equals(network.getGuestType()) || kubernetesCluster.getControlNodeCount() > 1)) { // Shared network, single-control node cluster won't have an IP yet logTransitStateAndThrow(Level.ERROR, String.format("Failed to start Kubernetes cluster : %s as no public IP found for the cluster" , kubernetesCluster.getName()), kubernetesCluster.getId(), KubernetesCluster.Event.CreateFailed); } List clusterVMs = new ArrayList<>(); - UserVm k8sMasterVM = null; + UserVm k8sControlVM = null; try { - k8sMasterVM = provisionKubernetesClusterMasterVm(network, publicIpAddress); + k8sControlVM = provisionKubernetesClusterControlVm(network, publicIpAddress); } catch (CloudRuntimeException | ManagementServerException | ResourceUnavailableException | InsufficientCapacityException e) { - logTransitStateAndThrow(Level.ERROR, String.format("Provisioning the master VM failed in the Kubernetes cluster : %s", kubernetesCluster.getName()), kubernetesCluster.getId(), KubernetesCluster.Event.CreateFailed, e); + logTransitStateAndThrow(Level.ERROR, String.format("Provisioning the control VM failed in the Kubernetes cluster : %s", kubernetesCluster.getName()), kubernetesCluster.getId(), KubernetesCluster.Event.CreateFailed, e); } - clusterVMs.add(k8sMasterVM); + clusterVMs.add(k8sControlVM); if (Strings.isNullOrEmpty(publicIpAddress)) { - publicIpSshPort = getKubernetesClusterServerIpSshPort(k8sMasterVM); + publicIpSshPort = getKubernetesClusterServerIpSshPort(k8sControlVM); publicIpAddress = publicIpSshPort.first(); if (Strings.isNullOrEmpty(publicIpAddress)) { logTransitStateAndThrow(Level.WARN, String.format("Failed to start Kubernetes cluster : %s as no public IP found for the cluster", kubernetesCluster.getName()), kubernetesCluster.getId(), KubernetesCluster.Event.CreateFailed); } } try { - List additionalMasterVMs = provisionKubernetesClusterAdditionalMasterVms(publicIpAddress); - clusterVMs.addAll(additionalMasterVMs); + List additionalControlVMs = provisionKubernetesClusterAdditionalControlVms(publicIpAddress); + clusterVMs.addAll(additionalControlVMs); } catch (CloudRuntimeException | ManagementServerException | ResourceUnavailableException | InsufficientCapacityException e) { - logTransitStateAndThrow(Level.ERROR, String.format("Provisioning additional master VM failed in the Kubernetes cluster : %s", kubernetesCluster.getName()), kubernetesCluster.getId(), KubernetesCluster.Event.CreateFailed, e); + logTransitStateAndThrow(Level.ERROR, String.format("Provisioning additional control VM failed in the Kubernetes cluster : %s", kubernetesCluster.getName()), kubernetesCluster.getId(), KubernetesCluster.Event.CreateFailed, e); } try { List nodeVMs = provisionKubernetesClusterNodeVms(kubernetesCluster.getNodeCount(), publicIpAddress); @@ -542,9 +542,9 @@ public class KubernetesClusterStartWorker extends KubernetesClusterResourceModif logTransitStateAndThrow(Level.ERROR, String.format("Failed to setup Kubernetes cluster : %s, unable to setup network rules", kubernetesCluster.getName()), kubernetesCluster.getId(), KubernetesCluster.Event.CreateFailed, e); } attachIsoKubernetesVMs(clusterVMs); - if (!KubernetesClusterUtil.isKubernetesClusterMasterVmRunning(kubernetesCluster, publicIpAddress, publicIpSshPort.second(), startTimeoutTime)) { - String msg = String.format("Failed to setup Kubernetes cluster : %s in usable state as unable to access master node VMs of the cluster", kubernetesCluster.getName()); - if (kubernetesCluster.getMasterNodeCount() > 1 && Network.GuestType.Shared.equals(network.getGuestType())) { + if (!KubernetesClusterUtil.isKubernetesClusterControlVmRunning(kubernetesCluster, publicIpAddress, publicIpSshPort.second(), startTimeoutTime)) { + String msg = String.format("Failed to setup Kubernetes cluster : %s in usable state as unable to access control node VMs of the cluster", kubernetesCluster.getName()); + if (kubernetesCluster.getControlNodeCount() > 1 && Network.GuestType.Shared.equals(network.getGuestType())) { msg = String.format("%s. Make sure external load-balancer has port forwarding rules for SSH access on ports %d-%d and API access on port %d", msg, CLUSTER_NODES_DEFAULT_START_SSH_PORT, diff --git a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterUpgradeWorker.java b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterUpgradeWorker.java index 957adea6f77..86c5c8ed70b 100644 --- a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterUpgradeWorker.java +++ b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterUpgradeWorker.java @@ -123,9 +123,9 @@ public class KubernetesClusterUpgradeWorker extends KubernetesClusterActionWorke if (!KubernetesClusterUtil.uncordonKubernetesClusterNode(kubernetesCluster, publicIpAddress, sshPort, CLUSTER_NODE_VM_USER, getManagementServerSshPublicKeyFile(), vm, upgradeTimeoutTime, 15000)) { logTransitStateDetachIsoAndThrow(Level.ERROR, String.format("Failed to upgrade Kubernetes cluster : %s, unable to uncordon Kubernetes node on VM : %s", kubernetesCluster.getName(), vm.getDisplayName()), kubernetesCluster, clusterVMs, KubernetesCluster.Event.OperationFailed, null); } - if (i == 0) { // Wait for master to get in Ready state + if (i == 0) { // Wait for control node to get in Ready state if (!KubernetesClusterUtil.isKubernetesClusterNodeReady(kubernetesCluster, publicIpAddress, sshPort, CLUSTER_NODE_VM_USER, getManagementServerSshPublicKeyFile(), hostName, upgradeTimeoutTime, 15000)) { - logTransitStateDetachIsoAndThrow(Level.ERROR, String.format("Failed to upgrade Kubernetes cluster : %s, unable to get master Kubernetes node on VM : %s in ready state", kubernetesCluster.getName(), vm.getDisplayName()), kubernetesCluster, clusterVMs, KubernetesCluster.Event.OperationFailed, null); + logTransitStateDetachIsoAndThrow(Level.ERROR, String.format("Failed to upgrade Kubernetes cluster : %s, unable to get control Kubernetes node on VM : %s in ready state", kubernetesCluster.getName(), vm.getDisplayName()), kubernetesCluster, clusterVMs, KubernetesCluster.Event.OperationFailed, null); } } if (LOGGER.isInfoEnabled()) { diff --git a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/utils/KubernetesClusterUtil.java b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/utils/KubernetesClusterUtil.java index b9bda0840d1..48a39f52e0c 100644 --- a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/utils/KubernetesClusterUtil.java +++ b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/utils/KubernetesClusterUtil.java @@ -254,25 +254,25 @@ public class KubernetesClusterUtil { return k8sApiServerSetup; } - public static boolean isKubernetesClusterMasterVmRunning(final KubernetesCluster kubernetesCluster, final String ipAddress, - final int port, final long timeoutTime) { - boolean masterVmRunning = false; - while (!masterVmRunning && System.currentTimeMillis() < timeoutTime) { + public static boolean isKubernetesClusterControlVmRunning(final KubernetesCluster kubernetesCluster, final String ipAddress, + final int port, final long timeoutTime) { + boolean controlVmRunning = false; + while (!controlVmRunning && System.currentTimeMillis() < timeoutTime) { try (Socket socket = new Socket()) { socket.connect(new InetSocketAddress(ipAddress, port), 10000); - masterVmRunning = true; + controlVmRunning = true; } catch (IOException e) { if (LOGGER.isInfoEnabled()) { - LOGGER.info(String.format("Waiting for Kubernetes cluster : %s master node VMs to be accessible", kubernetesCluster.getName())); + LOGGER.info(String.format("Waiting for Kubernetes cluster : %s control node VMs to be accessible", kubernetesCluster.getName())); } try { Thread.sleep(10000); } catch (InterruptedException ex) { - LOGGER.warn(String.format("Error while waiting for Kubernetes cluster : %s master node VMs to be accessible", kubernetesCluster.getName()), ex); + LOGGER.warn(String.format("Error while waiting for Kubernetes cluster : %s control node VMs to be accessible", kubernetesCluster.getName()), ex); } } } - return masterVmRunning; + return controlVmRunning; } public static boolean validateKubernetesClusterReadyNodesCount(final KubernetesCluster kubernetesCluster, diff --git a/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/command/user/kubernetes/cluster/CreateKubernetesClusterCmd.java b/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/command/user/kubernetes/cluster/CreateKubernetesClusterCmd.java index 54e307c0c5b..8921d691142 100644 --- a/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/command/user/kubernetes/cluster/CreateKubernetesClusterCmd.java +++ b/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/command/user/kubernetes/cluster/CreateKubernetesClusterCmd.java @@ -109,9 +109,14 @@ public class CreateKubernetesClusterCmd extends BaseAsyncCreateCmd { private String sshKeyPairName; @Parameter(name=ApiConstants.MASTER_NODES, type = CommandType.LONG, - description = "number of Kubernetes cluster master nodes, default is 1") + description = "number of Kubernetes cluster master nodes, default is 1. This option is deprecated, please use 'controlnodes' parameter.") + @Deprecated private Long masterNodes; + @Parameter(name=ApiConstants.CONTROL_NODES, type = CommandType.LONG, + description = "number of Kubernetes cluster control nodes, default is 1") + private Long controlNodes; + @Parameter(name=ApiConstants.EXTERNAL_LOAD_BALANCER_IP_ADDRESS, type = CommandType.STRING, description = "external load balancer IP address while using shared network with Kubernetes HA cluster") private String externalLoadBalancerIpAddress; @@ -191,6 +196,13 @@ public class CreateKubernetesClusterCmd extends BaseAsyncCreateCmd { return masterNodes; } + public Long getControlNodes() { + if (controlNodes == null) { + return 1L; + } + return controlNodes; + } + public String getExternalLoadBalancerIpAddress() { return externalLoadBalancerIpAddress; } diff --git a/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/response/KubernetesClusterResponse.java b/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/response/KubernetesClusterResponse.java index bb3f14f5689..682aaaca812 100644 --- a/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/response/KubernetesClusterResponse.java +++ b/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/response/KubernetesClusterResponse.java @@ -101,10 +101,15 @@ public class KubernetesClusterResponse extends BaseResponse implements Controlle @Param(description = "keypair details") private String keypair; + @Deprecated @SerializedName(ApiConstants.MASTER_NODES) @Param(description = "the master nodes count for the Kubernetes cluster") private Long masterNodes; + @SerializedName(ApiConstants.CONTROL_NODES) + @Param(description = "the control nodes count for the Kubernetes cluster") + private Long controlNodes; + @SerializedName(ApiConstants.SIZE) @Param(description = "the size (worker nodes count) of the Kubernetes cluster") private Long clusterSize; @@ -269,6 +274,14 @@ public class KubernetesClusterResponse extends BaseResponse implements Controlle this.masterNodes = masterNodes; } + public Long getControlNodes() { + return controlNodes; + } + + public void setControlNodes(Long controlNodes) { + this.controlNodes = controlNodes; + } + public Long getClusterSize() { return clusterSize; } diff --git a/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/response/KubernetesSupportedVersionResponse.java b/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/response/KubernetesSupportedVersionResponse.java index 4deb50d4a0b..449bd957055 100644 --- a/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/response/KubernetesSupportedVersionResponse.java +++ b/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/response/KubernetesSupportedVersionResponse.java @@ -61,7 +61,7 @@ public class KubernetesSupportedVersionResponse extends BaseResponse { private String zoneName; @SerializedName(ApiConstants.SUPPORTS_HA) - @Param(description = "whether Kubernetes supported version supports HA, multi-master") + @Param(description = "whether Kubernetes supported version supports HA, multi-control nodes") private Boolean supportsHA; @SerializedName(ApiConstants.STATE) diff --git a/plugins/integrations/kubernetes-service/src/main/resources/conf/k8s-master-add.yml b/plugins/integrations/kubernetes-service/src/main/resources/conf/k8s-control-node-add.yml similarity index 100% rename from plugins/integrations/kubernetes-service/src/main/resources/conf/k8s-master-add.yml rename to plugins/integrations/kubernetes-service/src/main/resources/conf/k8s-control-node-add.yml diff --git a/plugins/integrations/kubernetes-service/src/main/resources/conf/k8s-master.yml b/plugins/integrations/kubernetes-service/src/main/resources/conf/k8s-control-node.yml similarity index 100% rename from plugins/integrations/kubernetes-service/src/main/resources/conf/k8s-master.yml rename to plugins/integrations/kubernetes-service/src/main/resources/conf/k8s-control-node.yml diff --git a/plugins/integrations/kubernetes-service/src/main/resources/script/upgrade-kubernetes.sh b/plugins/integrations/kubernetes-service/src/main/resources/script/upgrade-kubernetes.sh index ea36d7ee897..d66176028d6 100644 --- a/plugins/integrations/kubernetes-service/src/main/resources/script/upgrade-kubernetes.sh +++ b/plugins/integrations/kubernetes-service/src/main/resources/script/upgrade-kubernetes.sh @@ -18,14 +18,14 @@ # Version 1.14 and below needs extra flags with kubeadm upgrade node if [ $# -lt 4 ]; then - echo "Invalid input. Valid usage: ./upgrade-kubernetes.sh UPGRADE_VERSION IS_MASTER IS_OLD_VERSION IS_EJECT_ISO" + echo "Invalid input. Valid usage: ./upgrade-kubernetes.sh UPGRADE_VERSION IS_CONTROL_NODE IS_OLD_VERSION IS_EJECT_ISO" echo "eg: ./upgrade-kubernetes.sh 1.16.3 true false false" exit 1 fi UPGRADE_VERSION="${1}" -IS_MAIN_MASTER="" +IS_MAIN_CONTROL="" if [ $# -gt 1 ]; then - IS_MAIN_MASTER="${2}" + IS_MAIN_CONTROL="${2}" fi IS_OLD_VERSION="" if [ $# -gt 2 ]; then @@ -100,7 +100,7 @@ if [ -d "$BINARIES_DIR" ]; then tar -f "${BINARIES_DIR}/cni/cni-plugins-amd64.tgz" -C /opt/cni/bin -xz tar -f "${BINARIES_DIR}/cri-tools/crictl-linux-amd64.tar.gz" -C /opt/bin -xz - if [ "${IS_MAIN_MASTER}" == 'true' ]; then + if [ "${IS_MAIN_CONTROL}" == 'true' ]; then set +e kubeadm upgrade apply ${UPGRADE_VERSION} -y retval=$? @@ -121,7 +121,7 @@ if [ -d "$BINARIES_DIR" ]; then chmod +x {kubelet,kubectl} systemctl restart kubelet - if [ "${IS_MAIN_MASTER}" == 'true' ]; then + if [ "${IS_MAIN_CONTROL}" == 'true' ]; then kubectl apply -f ${BINARIES_DIR}/network.yaml kubectl apply -f ${BINARIES_DIR}/dashboard.yaml fi diff --git a/plugins/network-elements/bigswitch/src/main/java/com/cloud/agent/api/GetControllerDataAnswer.java b/plugins/network-elements/bigswitch/src/main/java/com/cloud/agent/api/GetControllerDataAnswer.java index 9f200d838db..84dd6b346ad 100644 --- a/plugins/network-elements/bigswitch/src/main/java/com/cloud/agent/api/GetControllerDataAnswer.java +++ b/plugins/network-elements/bigswitch/src/main/java/com/cloud/agent/api/GetControllerDataAnswer.java @@ -21,26 +21,26 @@ package com.cloud.agent.api; public class GetControllerDataAnswer extends Answer { private final String _ipAddress; - private final boolean _isMaster; + private final boolean _isPrimary; public GetControllerDataAnswer(final GetControllerDataCommand cmd, - final String ipAddress, final boolean isMaster){ + final String ipAddress, final boolean isPrimary){ super(cmd); this._ipAddress = ipAddress; - this._isMaster = isMaster; + this._isPrimary = isPrimary; } public GetControllerDataAnswer(final Command command, final Exception e) { super(command, e); this._ipAddress = null; - this._isMaster = false; + this._isPrimary = false; } public String getIpAddress() { return _ipAddress; } - public boolean isMaster() { - return _isMaster; + public boolean isPrimary() { + return _isPrimary; } } diff --git a/plugins/network-elements/bigswitch/src/main/java/com/cloud/agent/api/GetControllerHostsAnswer.java b/plugins/network-elements/bigswitch/src/main/java/com/cloud/agent/api/GetControllerHostsAnswer.java index e4c889cbd99..f9a49c8bb57 100644 --- a/plugins/network-elements/bigswitch/src/main/java/com/cloud/agent/api/GetControllerHostsAnswer.java +++ b/plugins/network-elements/bigswitch/src/main/java/com/cloud/agent/api/GetControllerHostsAnswer.java @@ -22,19 +22,19 @@ package com.cloud.agent.api; import com.cloud.host.HostVO; public class GetControllerHostsAnswer { - private HostVO master; - private HostVO slave; + private HostVO primary; + private HostVO secondary; - public HostVO getMaster() { - return master; + public HostVO getPrimary() { + return primary; } - public void setMaster(final HostVO master) { - this.master = master; + public void setPrimary(final HostVO primary) { + this.primary = primary; } - public HostVO getSlave() { - return slave; + public HostVO getSecondary() { + return secondary; } - public void setSlave(final HostVO slave) { - this.slave = slave; + public void setSecondary(final HostVO secondary) { + this.secondary = secondary; } } diff --git a/plugins/network-elements/bigswitch/src/main/java/com/cloud/network/bigswitch/BigSwitchBcfApi.java b/plugins/network-elements/bigswitch/src/main/java/com/cloud/network/bigswitch/BigSwitchBcfApi.java index c1d10dd3086..86396d9b02f 100644 --- a/plugins/network-elements/bigswitch/src/main/java/com/cloud/network/bigswitch/BigSwitchBcfApi.java +++ b/plugins/network-elements/bigswitch/src/main/java/com/cloud/network/bigswitch/BigSwitchBcfApi.java @@ -72,7 +72,7 @@ public class BigSwitchBcfApi { private String zoneId; private Boolean nat; - private boolean isMaster; + private boolean isPrimary; private int _port = 8000; @@ -241,7 +241,7 @@ public class BigSwitchBcfApi { } public ControllerData getControllerData() { - return new ControllerData(host, isMaster); + return new ControllerData(host, isPrimary); } private void checkInvariants() throws BigSwitchBcfApiException{ @@ -274,7 +274,7 @@ public class BigSwitchBcfApi { throw new BigSwitchBcfApiException("BCF topology sync required", true); } if (m.getStatusCode() == HttpStatus.SC_SEE_OTHER) { - isMaster = false; + isPrimary = false; set_hash(HASH_IGNORE); return HASH_IGNORE; } @@ -402,10 +402,10 @@ public class BigSwitchBcfApi { } if(returnValue instanceof ControlClusterStatus) { if(HASH_CONFLICT.equals(hash)) { - isMaster = true; + isPrimary = true; ((ControlClusterStatus) returnValue).setTopologySyncRequested(true); - } else if (!HASH_IGNORE.equals(hash) && !isMaster) { - isMaster = true; + } else if (!HASH_IGNORE.equals(hash) && !isPrimary) { + isPrimary = true; ((ControlClusterStatus) returnValue).setTopologySyncRequested(true); } } diff --git a/plugins/network-elements/bigswitch/src/main/java/com/cloud/network/bigswitch/BigSwitchBcfUtils.java b/plugins/network-elements/bigswitch/src/main/java/com/cloud/network/bigswitch/BigSwitchBcfUtils.java index da409df9835..449aa3c8bad 100644 --- a/plugins/network-elements/bigswitch/src/main/java/com/cloud/network/bigswitch/BigSwitchBcfUtils.java +++ b/plugins/network-elements/bigswitch/src/main/java/com/cloud/network/bigswitch/BigSwitchBcfUtils.java @@ -133,10 +133,10 @@ public class BigSwitchBcfUtils { _hostDao.loadDetails(bigswitchBcfHost); GetControllerDataAnswer answer = (GetControllerDataAnswer) _agentMgr.easySend(bigswitchBcfHost.getId(), cmd); if (answer != null){ - if (answer.isMaster()) { - cluster.setMaster(bigswitchBcfHost); + if (answer.isPrimary()) { + cluster.setPrimary(bigswitchBcfHost); } else { - cluster.setSlave(bigswitchBcfHost); + cluster.setSecondary(bigswitchBcfHost); } } } @@ -471,14 +471,14 @@ public class BigSwitchBcfUtils { public BcfAnswer sendBcfCommandWithNetworkSyncCheck(BcfCommand cmd, Network network)throws IllegalArgumentException{ // get registered Big Switch controller ControlClusterData cluster = getControlClusterData(network.getPhysicalNetworkId()); - if(cluster.getMaster()==null){ + if(cluster.getPrimary()==null){ return new BcfAnswer(cmd, new CloudRuntimeException("Big Switch Network controller temporarily unavailable")); } TopologyData topo = getTopology(network.getPhysicalNetworkId()); cmd.setTopology(topo); - BcfAnswer answer = (BcfAnswer) _agentMgr.easySend(cluster.getMaster().getId(), cmd); + BcfAnswer answer = (BcfAnswer) _agentMgr.easySend(cluster.getPrimary().getId(), cmd); if (answer == null || !answer.getResult()) { s_logger.error ("BCF API Command failed"); @@ -487,17 +487,17 @@ public class BigSwitchBcfUtils { String newHash = answer.getHash(); if (cmd.isTopologySyncRequested()) { - newHash = syncTopologyToBcfHost(cluster.getMaster()); + newHash = syncTopologyToBcfHost(cluster.getPrimary()); } if(newHash != null){ commitTopologyHash(network.getPhysicalNetworkId(), newHash); } - HostVO slave = cluster.getSlave(); - if(slave != null){ + HostVO secondary = cluster.getSecondary(); + if(secondary != null){ TopologyData newTopo = getTopology(network.getPhysicalNetworkId()); CacheBcfTopologyCommand cacheCmd = new CacheBcfTopologyCommand(newTopo); - _agentMgr.easySend(cluster.getSlave().getId(), cacheCmd); + _agentMgr.easySend(cluster.getSecondary().getId(), cacheCmd); } return answer; diff --git a/plugins/network-elements/bigswitch/src/main/java/com/cloud/network/bigswitch/ControlClusterData.java b/plugins/network-elements/bigswitch/src/main/java/com/cloud/network/bigswitch/ControlClusterData.java index 7d628994a5b..05edbc4f553 100644 --- a/plugins/network-elements/bigswitch/src/main/java/com/cloud/network/bigswitch/ControlClusterData.java +++ b/plugins/network-elements/bigswitch/src/main/java/com/cloud/network/bigswitch/ControlClusterData.java @@ -22,22 +22,22 @@ package com.cloud.network.bigswitch; import com.cloud.host.HostVO; public class ControlClusterData { - private HostVO master; - private HostVO slave; + private HostVO primary; + private HostVO secondary; - public HostVO getMaster() { - return master; + public HostVO getPrimary() { + return primary; } - public void setMaster(HostVO master) { - this.master = master; + public void setPrimary(HostVO primary) { + this.primary = primary; } - public HostVO getSlave() { - return slave; + public HostVO getSecondary() { + return secondary; } - public void setSlave(HostVO slave) { - this.slave = slave; + public void setSecondary(HostVO secondary) { + this.secondary = secondary; } } diff --git a/plugins/network-elements/bigswitch/src/main/java/com/cloud/network/bigswitch/ControllerData.java b/plugins/network-elements/bigswitch/src/main/java/com/cloud/network/bigswitch/ControllerData.java index 224a7ab1fd7..ee16a3ce88f 100644 --- a/plugins/network-elements/bigswitch/src/main/java/com/cloud/network/bigswitch/ControllerData.java +++ b/plugins/network-elements/bigswitch/src/main/java/com/cloud/network/bigswitch/ControllerData.java @@ -21,19 +21,19 @@ package com.cloud.network.bigswitch; public class ControllerData { private final String ipAddress; - private final boolean isMaster; + private final boolean isPrimary; - public ControllerData(String ipAddress, boolean isMaster) { + public ControllerData(String ipAddress, boolean isPrimary) { this.ipAddress = ipAddress; - this.isMaster = isMaster; + this.isPrimary = isPrimary; } public String getIpAddress() { return ipAddress; } - public boolean isMaster() { - return isMaster; + public boolean isPrimary() { + return isPrimary; } } diff --git a/plugins/network-elements/bigswitch/src/main/java/com/cloud/network/resource/BigSwitchBcfResource.java b/plugins/network-elements/bigswitch/src/main/java/com/cloud/network/resource/BigSwitchBcfResource.java index a43cad31371..de33b8ae7b4 100644 --- a/plugins/network-elements/bigswitch/src/main/java/com/cloud/network/resource/BigSwitchBcfResource.java +++ b/plugins/network-elements/bigswitch/src/main/java/com/cloud/network/resource/BigSwitchBcfResource.java @@ -563,7 +563,7 @@ public class BigSwitchBcfResource extends ManagerBase implements ServerResource ControllerData controller = _bigswitchBcfApi.getControllerData(); return new GetControllerDataAnswer(cmd, controller.getIpAddress(), - controller.isMaster()); + controller.isPrimary()); } private Answer executeRequest(ReadyCommand cmd) { diff --git a/plugins/network-elements/bigswitch/src/test/java/com/cloud/network/bigswitch/BigSwitchApiTest.java b/plugins/network-elements/bigswitch/src/test/java/com/cloud/network/bigswitch/BigSwitchApiTest.java index 5d40e5dfa8d..207f0ab32c7 100644 --- a/plugins/network-elements/bigswitch/src/test/java/com/cloud/network/bigswitch/BigSwitchApiTest.java +++ b/plugins/network-elements/bigswitch/src/test/java/com/cloud/network/bigswitch/BigSwitchApiTest.java @@ -254,13 +254,13 @@ public class BigSwitchApiTest { } @Test - public void testExecuteCreateObjectSlave() throws BigSwitchBcfApiException, IOException { + public void testExecuteCreateObjectSecondary() throws BigSwitchBcfApiException, IOException { NetworkData network = new NetworkData(); _method = mock(PostMethod.class); when(_method.getStatusCode()).thenReturn(HttpStatus.SC_SEE_OTHER); String hash = _api.executeCreateObject(network, "/", Collections. emptyMap()); assertEquals(hash, BigSwitchBcfApi.HASH_IGNORE); - assertEquals(_api.getControllerData().isMaster(), false); + assertEquals(_api.getControllerData().isPrimary(), false); } @Test(expected = BigSwitchBcfApiException.class) @@ -320,7 +320,7 @@ public class BigSwitchApiTest { } @Test - public void testExecuteUpdateObjectSlave() throws BigSwitchBcfApiException, IOException { + public void testExecuteUpdateObjectSecondary() throws BigSwitchBcfApiException, IOException { NetworkData network = new NetworkData(); _method = mock(PutMethod.class); when(_method.getStatusCode()).thenReturn(HttpStatus.SC_SEE_OTHER); @@ -396,7 +396,7 @@ public class BigSwitchApiTest { } @Test - public void testExecuteRetrieveControllerMasterStatus() throws BigSwitchBcfApiException, IOException { + public void testExecuteRetrieveControllerPrimaryStatus() throws BigSwitchBcfApiException, IOException { _method = mock(GetMethod.class); when(_method.getStatusCode()).thenReturn(HttpStatus.SC_OK); when(((HttpMethodBase)_method).getResponseBodyAsString(2048)).thenReturn("{'healthy': true, 'topologySyncRequested': false}"); @@ -404,11 +404,11 @@ public class BigSwitchApiTest { }.getType(), "/", null); verify(_method, times(1)).releaseConnection(); verify(_client, times(1)).executeMethod(_method); - assertEquals(_api.getControllerData().isMaster(), true); + assertEquals(_api.getControllerData().isPrimary(), true); } @Test - public void testExecuteRetrieveControllerMasterStatusWithTopoConflict() throws BigSwitchBcfApiException, IOException { + public void testExecuteRetrieveControllerPrimaryStatusWithTopoConflict() throws BigSwitchBcfApiException, IOException { _method = mock(GetMethod.class); when(_method.getStatusCode()).thenReturn(HttpStatus.SC_CONFLICT); when(((HttpMethodBase)_method).getResponseBodyAsString(2048)).thenReturn("{'healthy': true, 'topologySyncRequested': true}"); @@ -416,11 +416,11 @@ public class BigSwitchApiTest { }.getType(), "/", null); verify(_method, times(1)).releaseConnection(); verify(_client, times(1)).executeMethod(_method); - assertEquals(_api.getControllerData().isMaster(), true); + assertEquals(_api.getControllerData().isPrimary(), true); } @Test - public void testExecuteRetrieveControllerSlaveStatus() throws BigSwitchBcfApiException, IOException { + public void testExecuteRetrieveControllerSecondaryStatus() throws BigSwitchBcfApiException, IOException { _method = mock(GetMethod.class); when(_method.getStatusCode()).thenReturn(HttpStatus.SC_SEE_OTHER); when(((HttpMethodBase)_method).getResponseBodyAsString(1024)).thenReturn("{'healthy': true, 'topologySyncRequested': false}"); @@ -428,6 +428,6 @@ public class BigSwitchApiTest { }.getType(), "/", null); verify(_method, times(1)).releaseConnection(); verify(_client, times(1)).executeMethod(_method); - assertEquals(_api.getControllerData().isMaster(), false); + assertEquals(_api.getControllerData().isPrimary(), false); } } diff --git a/plugins/network-elements/juniper-contrail/src/main/java/org/apache/cloudstack/network/contrail/management/ServiceManagerImpl.java b/plugins/network-elements/juniper-contrail/src/main/java/org/apache/cloudstack/network/contrail/management/ServiceManagerImpl.java index e6f3b4dfd1c..3bf5bba9c43 100644 --- a/plugins/network-elements/juniper-contrail/src/main/java/org/apache/cloudstack/network/contrail/management/ServiceManagerImpl.java +++ b/plugins/network-elements/juniper-contrail/src/main/java/org/apache/cloudstack/network/contrail/management/ServiceManagerImpl.java @@ -79,7 +79,7 @@ public class ServiceManagerImpl implements ServiceManager { ContrailManager _manager; /** - * In the case of service instance the master object is in the contrail API server. This object stores the + * In the case of service instance the primary object is in the contrail API server. This object stores the * service instance parameters in the database. * * @param owner Used to determine the project. diff --git a/plugins/network-elements/juniper-contrail/src/main/java/org/apache/cloudstack/network/contrail/model/ModelObject.java b/plugins/network-elements/juniper-contrail/src/main/java/org/apache/cloudstack/network/contrail/model/ModelObject.java index acfff7de291..f829d3c4529 100644 --- a/plugins/network-elements/juniper-contrail/src/main/java/org/apache/cloudstack/network/contrail/model/ModelObject.java +++ b/plugins/network-elements/juniper-contrail/src/main/java/org/apache/cloudstack/network/contrail/model/ModelObject.java @@ -33,7 +33,7 @@ import com.cloud.exception.InternalErrorException; * * The object constructor should set the uuid and the internal id of the cloudstack objects. * - * The build method reads the master database (typically cloudstack mysql) and derives the state that + * The build method reads the primary database (typically cloudstack mysql) and derives the state that * we wish to reflect in the contrail API. This method should not modify the Contrail API state. * * The verify method reads the API server state and compares with cached properties. diff --git a/plugins/network-elements/juniper-contrail/src/main/java/org/apache/cloudstack/network/contrail/model/ServiceInstanceModel.java b/plugins/network-elements/juniper-contrail/src/main/java/org/apache/cloudstack/network/contrail/model/ServiceInstanceModel.java index e79053ca4f3..7a074a0036c 100644 --- a/plugins/network-elements/juniper-contrail/src/main/java/org/apache/cloudstack/network/contrail/model/ServiceInstanceModel.java +++ b/plugins/network-elements/juniper-contrail/src/main/java/org/apache/cloudstack/network/contrail/model/ServiceInstanceModel.java @@ -110,7 +110,7 @@ public class ServiceInstanceModel extends ModelObjectBase { } /** - * Recreate the model object from the Contrail API which is the master for this type of object. + * Recreate the model object from the Contrail API which is main for this type of object. * @param siObj */ public void build(ModelController controller, ServiceInstance siObj) { diff --git a/plugins/network-elements/juniper-contrail/src/test/java/org/apache/cloudstack/network/contrail/management/NetworkProviderTest.java b/plugins/network-elements/juniper-contrail/src/test/java/org/apache/cloudstack/network/contrail/management/NetworkProviderTest.java index f9a478713c7..3ad36acc160 100644 --- a/plugins/network-elements/juniper-contrail/src/test/java/org/apache/cloudstack/network/contrail/management/NetworkProviderTest.java +++ b/plugins/network-elements/juniper-contrail/src/test/java/org/apache/cloudstack/network/contrail/management/NetworkProviderTest.java @@ -116,7 +116,7 @@ public class NetworkProviderTest extends TestCase { private ApiConnector _api; private static int s_mysqlSrverPort; private static long s_msId; - private static Merovingian2 s_lockMaster; + private static Merovingian2 s_lockController; public static boolean s_initDone = false; @BeforeClass @@ -127,14 +127,14 @@ public class NetworkProviderTest extends TestCase { s_logger.info("mysql server launched on port " + s_mysqlSrverPort); s_msId = ManagementServerNode.getManagementServerId(); - s_lockMaster = Merovingian2.createLockMaster(s_msId); + s_lockController = Merovingian2.createLockController(s_msId); } @AfterClass public static void globalTearDown() throws Exception { - s_lockMaster.cleanupForServer(s_msId); + s_lockController.cleanupForServer(s_msId); JmxUtil.unregisterMBean("Locks", "Locks"); - s_lockMaster = null; + s_lockController = null; AbstractApplicationContext ctx = (AbstractApplicationContext)ComponentContext.getApplicationContext(); Map lifecycleComponents = ctx.getBeansOfType(ComponentLifecycle.class); diff --git a/plugins/network-elements/juniper-contrail/src/test/java/org/apache/cloudstack/network/contrail/management/PublicNetworkTest.java b/plugins/network-elements/juniper-contrail/src/test/java/org/apache/cloudstack/network/contrail/management/PublicNetworkTest.java index c85bc659e31..9564ec0a24a 100644 --- a/plugins/network-elements/juniper-contrail/src/test/java/org/apache/cloudstack/network/contrail/management/PublicNetworkTest.java +++ b/plugins/network-elements/juniper-contrail/src/test/java/org/apache/cloudstack/network/contrail/management/PublicNetworkTest.java @@ -70,7 +70,7 @@ public class PublicNetworkTest extends TestCase { private static boolean s_initDone = false; private static int s_mysqlServerPort; private static long s_msId; - private static Merovingian2 s_lockMaster; + private static Merovingian2 s_lockController; private ManagementServerMock _server; private ApiConnector _spy; @@ -81,14 +81,14 @@ public class PublicNetworkTest extends TestCase { s_mysqlServerPort = TestDbSetup.init(null); s_logger.info("mysql server launched on port " + s_mysqlServerPort); s_msId = ManagementServerNode.getManagementServerId(); - s_lockMaster = Merovingian2.createLockMaster(s_msId); + s_lockController = Merovingian2.createLockController(s_msId); } @AfterClass public static void globalTearDown() throws Exception { - s_lockMaster.cleanupForServer(s_msId); + s_lockController.cleanupForServer(s_msId); JmxUtil.unregisterMBean("Locks", "Locks"); - s_lockMaster = null; + s_lockController = null; AbstractApplicationContext ctx = (AbstractApplicationContext)ComponentContext.getApplicationContext(); Map lifecycleComponents = ctx.getBeansOfType(ComponentLifecycle.class); diff --git a/python/lib/cloud_utils.py b/python/lib/cloud_utils.py index be908281be2..ecef6f46d97 100644 --- a/python/lib/cloud_utils.py +++ b/python/lib/cloud_utils.py @@ -1161,7 +1161,7 @@ class MigrationStep: You develop your own steps, and then pass a list of those steps to the Migrator instance that will run them in order. - When the migrator runs, it will take the list of steps you gave him, + When the migrator runs, it will take the list of steps you gave, and, for each step: a) instantiate it, passing the context you gave to the migrator diff --git a/server/src/main/java/com/cloud/api/ApiResponseHelper.java b/server/src/main/java/com/cloud/api/ApiResponseHelper.java index 5c5ba55f197..a7968cc7f3a 100644 --- a/server/src/main/java/com/cloud/api/ApiResponseHelper.java +++ b/server/src/main/java/com/cloud/api/ApiResponseHelper.java @@ -1946,7 +1946,7 @@ public class ApiResponseHelper implements ResponseGenerator { //check permissions if (_accountMgr.isNormalUser(caller.getId())) { - //regular user can see only jobs he owns + //regular users can see only jobs they own if (caller.getId() != jobOwner.getId()) { throw new PermissionDeniedException("Account " + caller + " is not authorized to see job id=" + job.getId()); } diff --git a/server/src/main/java/com/cloud/api/query/QueryManagerImpl.java b/server/src/main/java/com/cloud/api/query/QueryManagerImpl.java index f204ead3055..ae8212034ed 100644 --- a/server/src/main/java/com/cloud/api/query/QueryManagerImpl.java +++ b/server/src/main/java/com/cloud/api/query/QueryManagerImpl.java @@ -3746,10 +3746,10 @@ public class QueryManagerImpl extends MutualExclusiveIdsManagerBase implements Q throw new CloudRuntimeException("Resource type not supported."); } if (CallContext.current().getCallingAccount().getType() != Account.ACCOUNT_TYPE_ADMIN) { - final List userBlacklistedSettings = Stream.of(QueryService.UserVMBlacklistedDetails.value().split(",")) + final List userDenyListedSettings = Stream.of(QueryService.UserVMDeniedDetails.value().split(",")) .map(item -> (item).trim()) .collect(Collectors.toList()); - for (final String detail : userBlacklistedSettings) { + for (final String detail : userDenyListedSettings) { if (options.containsKey(detail)) { options.remove(detail); } @@ -4149,6 +4149,6 @@ public class QueryManagerImpl extends MutualExclusiveIdsManagerBase implements Q @Override public ConfigKey[] getConfigKeys() { - return new ConfigKey[] {AllowUserViewDestroyedVM, UserVMBlacklistedDetails, UserVMReadOnlyDetails, SortKeyAscending, AllowUserViewAllDomainAccounts}; + return new ConfigKey[] {AllowUserViewDestroyedVM, UserVMDeniedDetails, UserVMReadOnlyDetails, SortKeyAscending, AllowUserViewAllDomainAccounts}; } } diff --git a/server/src/main/java/com/cloud/api/query/dao/UserVmJoinDaoImpl.java b/server/src/main/java/com/cloud/api/query/dao/UserVmJoinDaoImpl.java index 3d75ff7f160..09eaee356e7 100644 --- a/server/src/main/java/com/cloud/api/query/dao/UserVmJoinDaoImpl.java +++ b/server/src/main/java/com/cloud/api/query/dao/UserVmJoinDaoImpl.java @@ -344,9 +344,9 @@ public class UserVmJoinDaoImpl extends GenericDaoBaseWithTagInformation getCapacityThresholdMap() { // Lets build this real time so that the admin wont have to restart MS - // if he changes these values + // if anyone changes these values Map disableThresholdMap = new HashMap(); String cpuDisableThresholdString = ClusterCPUCapacityDisableThreshold.value().toString(); diff --git a/server/src/main/java/com/cloud/ha/HighAvailabilityManagerImpl.java b/server/src/main/java/com/cloud/ha/HighAvailabilityManagerImpl.java index 8dd9c9b6d6f..e05bc5cb5bc 100644 --- a/server/src/main/java/com/cloud/ha/HighAvailabilityManagerImpl.java +++ b/server/src/main/java/com/cloud/ha/HighAvailabilityManagerImpl.java @@ -90,7 +90,7 @@ import com.cloud.vm.dao.VMInstanceDao; * state. If a Investigator finds the VM is dead, then HA process is started on the VM, skipping step 2. 2. If the list of * Investigators can not determine if the VM is dead or alive. The list of FenceBuilders is invoked to fence off the VM so that * it won't do any damage to the storage and network. 3. The VM is marked as stopped. 4. The VM is started again via the normal - * process of starting VMs. Note that once the VM is marked as stopped, the user may have started the VM himself. 5. VMs that + * process of starting VMs. Note that once the VM is marked as stopped, the user may have started the VM explicitly. 5. VMs that * have re-started more than the configured number of times are marked as in Error state and the user is not allowed to restart * the VM. * diff --git a/server/src/main/java/com/cloud/network/element/VirtualRouterElement.java b/server/src/main/java/com/cloud/network/element/VirtualRouterElement.java index bbc6aa77b79..9191ddddb5b 100644 --- a/server/src/main/java/com/cloud/network/element/VirtualRouterElement.java +++ b/server/src/main/java/com/cloud/network/element/VirtualRouterElement.java @@ -617,7 +617,7 @@ NetworkMigrationResponder, AggregatedCommandExecutor, RedundantResource, DnsServ } NetworkDetailVO updateInSequence=_networkDetailsDao.findDetail(network.getId(), Network.updatingInSequence); if(network.isRedundant() && updateInSequence!=null && "true".equalsIgnoreCase(updateInSequence.getValue())){ - List masterRouters=new ArrayList(); + List primaryRouters=new ArrayList(); int noOfrouters=routers.size(); while (noOfrouters>0){ DomainRouterVO router = routers.get(0); @@ -632,16 +632,16 @@ NetworkMigrationResponder, AggregatedCommandExecutor, RedundantResource, DnsServ continue; } if(router.getRedundantState()!=VirtualRouter.RedundantState.BACKUP) { - masterRouters.add(router); + primaryRouters.add(router); routers.remove(router); } noOfrouters--; } - if(routers.size()==0 && masterRouters.size()==0){ + if(routers.size()==0 && primaryRouters.size()==0){ return null; } - if(routers.size()==0 && masterRouters.size()!=0){ - routers=masterRouters; + if(routers.size()==0 && primaryRouters.size()!=0){ + routers=primaryRouters; } routers=routers.subList(0,1); routers.get(0).setUpdateState(VirtualRouter.UpdateState.UPDATE_IN_PROGRESS); diff --git a/server/src/main/java/com/cloud/network/router/VirtualNetworkApplianceManagerImpl.java b/server/src/main/java/com/cloud/network/router/VirtualNetworkApplianceManagerImpl.java index ebcf16afe3c..7ff911393f1 100644 --- a/server/src/main/java/com/cloud/network/router/VirtualNetworkApplianceManagerImpl.java +++ b/server/src/main/java/com/cloud/network/router/VirtualNetworkApplianceManagerImpl.java @@ -805,7 +805,7 @@ Configurable, StateListener routers) { final Set checkedNetwork = new HashSet(); @@ -1000,16 +1000,16 @@ Configurable, StateListener routers) { + private void checkDuplicatePrimary(final List routers) { final Map networkRouterMaps = new HashMap(); for (final DomainRouterVO router : routers) { final List routerGuestNtwkIds = _routerDao.getRouterNetworks(router.getId()); @@ -1035,13 +1035,13 @@ Configurable, StateListener 0) { Long routerGuestNtwkId = vpcId != null ? vpcId : routerGuestNtwkIds.get(0); - if (router.getRedundantState() == RedundantState.MASTER) { + if (router.getRedundantState() == RedundantState.PRIMARY) { if (networkRouterMaps.containsKey(routerGuestNtwkId)) { final DomainRouterVO dupRouter = networkRouterMaps.get(routerGuestNtwkId); - final String title = "More than one redundant virtual router is in MASTER state! Router " + router.getHostName() + " and router " + final String title = "More than one redundant virtual router is in PRIMARY state! Router " + router.getHostName() + " and router " + dupRouter.getHostName(); final String context = "Virtual router (name: " + router.getHostName() + ", id: " + router.getId() + " and router (name: " + dupRouter.getHostName() - + ", id: " + router.getId() + ") are both in MASTER state! If the problem persist, restart both of routers. "; + + ", id: " + router.getId() + ") are both in PRIMARY state! If the problem persist, restart both of routers. "; _alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_DOMAIN_ROUTER, router.getDataCenterId(), router.getPodIdToDeployIn(), title, context); s_logger.warn(context); } else { @@ -1083,7 +1083,7 @@ Configurable, StateListener() { @@ -2317,14 +2317,14 @@ public class VpcManagerImpl extends ManagerBase implements VpcManager, VpcProvis }); } - protected boolean isCidrBlacklisted(final String cidr, final long zoneId) { + protected boolean isCidrDenylisted(final String cidr, final long zoneId) { final String routesStr = NetworkOrchestrationService.GuestDomainSuffix.valueIn(zoneId); if (routesStr != null && !routesStr.isEmpty()) { - final String[] cidrBlackList = routesStr.split(","); + final String[] cidrDenyList = routesStr.split(","); - if (cidrBlackList != null && cidrBlackList.length > 0) { - for (final String blackListedRoute : cidrBlackList) { - if (NetUtils.isNetworksOverlap(blackListedRoute, cidr)) { + if (cidrDenyList != null && cidrDenyList.length > 0) { + for (final String denyListedRoute : cidrDenyList) { + if (NetUtils.isNetworksOverlap(denyListedRoute, cidr)) { return true; } } diff --git a/server/src/main/java/com/cloud/resourcelimit/ResourceLimitManagerImpl.java b/server/src/main/java/com/cloud/resourcelimit/ResourceLimitManagerImpl.java index bd4e96f6aa1..af7c2a2acb8 100644 --- a/server/src/main/java/com/cloud/resourcelimit/ResourceLimitManagerImpl.java +++ b/server/src/main/java/com/cloud/resourcelimit/ResourceLimitManagerImpl.java @@ -714,8 +714,8 @@ public class ResourceLimitManagerImpl extends ManagerBase implements ResourceLim } if ((caller.getAccountId() == accountId.longValue()) && (_accountMgr.isDomainAdmin(caller.getId()) || caller.getType() == Account.ACCOUNT_TYPE_RESOURCE_DOMAIN_ADMIN)) { - // If the admin is trying to update his own account, disallow. - throw new PermissionDeniedException("Unable to update resource limit for his own account " + accountId + ", permission denied"); + // If the admin is trying to update their own account, disallow. + throw new PermissionDeniedException("Unable to update resource limit for their own account " + accountId + ", permission denied"); } if (account.getType() == Account.ACCOUNT_TYPE_PROJECT) { diff --git a/server/src/main/java/com/cloud/server/LockMasterListener.java b/server/src/main/java/com/cloud/server/LockControllerListener.java similarity index 83% rename from server/src/main/java/com/cloud/server/LockMasterListener.java rename to server/src/main/java/com/cloud/server/LockControllerListener.java index 27cf74f4375..ba555b31343 100644 --- a/server/src/main/java/com/cloud/server/LockMasterListener.java +++ b/server/src/main/java/com/cloud/server/LockControllerListener.java @@ -26,11 +26,11 @@ import com.cloud.utils.db.Merovingian2; * when a management server is down. * */ -public class LockMasterListener implements ClusterManagerListener { - Merovingian2 _lockMaster; +public class LockControllerListener implements ClusterManagerListener { + Merovingian2 _lockController; - public LockMasterListener(long msId) { - _lockMaster = Merovingian2.createLockMaster(msId); + public LockControllerListener(long msId) { + _lockController = Merovingian2.createLockController(msId); } @Override @@ -40,7 +40,7 @@ public class LockMasterListener implements ClusterManagerListener { @Override public void onManagementNodeLeft(List nodeList, long selfNodeId) { for (ManagementServerHost node : nodeList) { - _lockMaster.cleanupForServer(node.getMsid()); + _lockController.cleanupForServer(node.getMsid()); } } diff --git a/server/src/main/java/com/cloud/server/ManagementServerImpl.java b/server/src/main/java/com/cloud/server/ManagementServerImpl.java index 2e7c8fe49a1..98937cad0e3 100644 --- a/server/src/main/java/com/cloud/server/ManagementServerImpl.java +++ b/server/src/main/java/com/cloud/server/ManagementServerImpl.java @@ -879,7 +879,7 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe @Inject private VpcDao _vpcDao; - private LockMasterListener _lockMasterListener; + private LockControllerListener _lockControllerListener; private final ScheduledExecutorService _eventExecutor = Executors.newScheduledThreadPool(1, new NamedThreadFactory("EventChecker")); private final ScheduledExecutorService _alertExecutor = Executors.newScheduledThreadPool(1, new NamedThreadFactory("AlertChecker")); @@ -985,11 +985,11 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe // Set human readable sizes NumbersUtil.enableHumanReadableSizes = _configDao.findByName("display.human.readable.sizes").getValue().equals("true"); - if (_lockMasterListener == null) { - _lockMasterListener = new LockMasterListener(ManagementServerNode.getManagementServerId()); + if (_lockControllerListener == null) { + _lockControllerListener = new LockControllerListener(ManagementServerNode.getManagementServerId()); } - _clusterMgr.registerListener(_lockMasterListener); + _clusterMgr.registerListener(_lockControllerListener); enableAdminUser("password"); return true; @@ -3815,7 +3815,7 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe String signature = ""; try { - // get the user obj to get his secret key + // get the user obj to get their secret key user = _accountMgr.getActiveUser(userId); final String secretKey = user.getSecretKey(); final String input = cloudIdentifier; @@ -4551,12 +4551,12 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe _storagePoolAllocators = storagePoolAllocators; } - public LockMasterListener getLockMasterListener() { - return _lockMasterListener; + public LockControllerListener getLockControllerListener() { + return _lockControllerListener; } - public void setLockMasterListener(final LockMasterListener lockMasterListener) { - _lockMasterListener = lockMasterListener; + public void setLockControllerListener(final LockControllerListener lockControllerListener) { + _lockControllerListener = lockControllerListener; } } diff --git a/server/src/main/java/com/cloud/user/AccountManagerImpl.java b/server/src/main/java/com/cloud/user/AccountManagerImpl.java index 054c3342204..b6f4e5e3600 100644 --- a/server/src/main/java/com/cloud/user/AccountManagerImpl.java +++ b/server/src/main/java/com/cloud/user/AccountManagerImpl.java @@ -581,7 +581,7 @@ public class AccountManagerImpl extends ManagerBase implements AccountManager, M @Override public Long checkAccessAndSpecifyAuthority(Account caller, Long zoneId) { - // We just care for resource domain admin for now. He should be permitted to see only his zone. + // We just care for resource domain admins for now, and they should be permitted to see only their zone. if (isResourceDomainAdmin(caller.getAccountId())) { if (zoneId == null) { return getZoneIdForAccount(caller); diff --git a/server/src/main/java/com/cloud/vm/UserVmManagerImpl.java b/server/src/main/java/com/cloud/vm/UserVmManagerImpl.java index 579a33d6ee8..d9dc32d673f 100644 --- a/server/src/main/java/com/cloud/vm/UserVmManagerImpl.java +++ b/server/src/main/java/com/cloud/vm/UserVmManagerImpl.java @@ -2545,7 +2545,6 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir scanLock.releaseRef(); } } - } @Override @@ -2581,7 +2580,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir updateDisplayVmFlag(isDisplayVm, id, vmInstance); } final Account caller = CallContext.current().getCallingAccount(); - final List userBlacklistedSettings = Stream.of(QueryService.UserVMBlacklistedDetails.value().split(",")) + final List userDenyListedSettings = Stream.of(QueryService.UserVMDeniedDetails.value().split(",")) .map(item -> (item).trim()) .collect(Collectors.toList()); final List userReadOnlySettings = Stream.of(QueryService.UserVMReadOnlyDetails.value().split(",")) @@ -2592,7 +2591,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir userVmDetailsDao.removeDetails(id); } else { for (final UserVmDetailVO detail : userVmDetailsDao.listDetails(id)) { - if (detail != null && !userBlacklistedSettings.contains(detail.getName()) + if (detail != null && !userDenyListedSettings.contains(detail.getName()) && !userReadOnlySettings.contains(detail.getName())) { userVmDetailsDao.removeDetail(id, detail.getName()); } @@ -2605,18 +2604,18 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir } if (caller != null && caller.getType() != Account.ACCOUNT_TYPE_ADMIN) { - // Ensure blacklisted or read-only detail is not passed by non-root-admin user + // Ensure denied or read-only detail is not passed by non-root-admin user for (final String detailName : details.keySet()) { - if (userBlacklistedSettings.contains(detailName)) { + if (userDenyListedSettings.contains(detailName)) { throw new InvalidParameterValueException("You're not allowed to add or edit the restricted setting: " + detailName); } if (userReadOnlySettings.contains(detailName)) { throw new InvalidParameterValueException("You're not allowed to add or edit the read-only setting: " + detailName); } } - // Add any hidden/blacklisted or read-only detail + // Add any hidden/denied or read-only detail for (final UserVmDetailVO detail : userVmDetailsDao.listDetails(id)) { - if (userBlacklistedSettings.contains(detail.getName()) || userReadOnlySettings.contains(detail.getName())) { + if (userDenyListedSettings.contains(detail.getName()) || userReadOnlySettings.contains(detail.getName())) { details.put(detail.getName(), detail.getValue()); } } @@ -5569,7 +5568,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir * @param vm */ protected void persistExtraConfigKvm(String decodedUrl, UserVm vm) { - // validate config against blacklisted cfg commands + // validate config against denied cfg commands validateKvmExtraConfig(decodedUrl); String[] extraConfigs = decodedUrl.split("\n\n"); for (String cfg : extraConfigs) { @@ -5591,7 +5590,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir /** * This method is called by the persistExtraConfigKvm - * Validates passed extra configuration data for KVM and validates against blacklist of unwanted commands + * Validates passed extra configuration data for KVM and validates against deny-list of unwanted commands * controlled by Root admin * @param decodedUrl string containing xml configuration to be validated */ diff --git a/server/src/main/resources/META-INF/cloudstack/core/spring-server-core-managers-context.xml b/server/src/main/resources/META-INF/cloudstack/core/spring-server-core-managers-context.xml index 672f7c0d414..e9905c52d93 100644 --- a/server/src/main/resources/META-INF/cloudstack/core/spring-server-core-managers-context.xml +++ b/server/src/main/resources/META-INF/cloudstack/core/spring-server-core-managers-context.xml @@ -51,7 +51,7 @@ - + - + diff --git a/server/src/test/java/com/cloud/network/element/VirtualRouterElementTest.java b/server/src/test/java/com/cloud/network/element/VirtualRouterElementTest.java index 7c2a3c78178..1b5343881b8 100644 --- a/server/src/test/java/com/cloud/network/element/VirtualRouterElementTest.java +++ b/server/src/test/java/com/cloud/network/element/VirtualRouterElementTest.java @@ -250,7 +250,7 @@ public class VirtualRouterElementTest { public void testGetRouters2(){ Network networkUpdateInprogress=new NetworkVO(2l,null,null,null,1l,1l,1l,1l,"d","d","d",null,1l,1l,null,true,null,true); mockDAOs((NetworkVO)networkUpdateInprogress,testOffering); - //alwyas return backup routers first when both master and backup need update. + //alwyas return backup routers first when both primary and backup need update. List routers=virtualRouterElement.getRouters(networkUpdateInprogress); assertTrue(routers.size()==1); assertTrue(routers.get(0).getRedundantState()==RedundantState.BACKUP && routers.get(0).getUpdateState()==VirtualRouter.UpdateState.UPDATE_IN_PROGRESS); @@ -260,7 +260,7 @@ public class VirtualRouterElementTest { public void testGetRouters3(){ Network network=new NetworkVO(3l,null,null,null,1l,1l,1l,1l,"d","d","d",null,1l,1l,null,true,null,true); mockDAOs((NetworkVO)network,testOffering); - //alwyas return backup routers first when both master and backup need update. + //alwyas return backup routers first when both primary and backup need update. List routers=virtualRouterElement.getRouters(network); assertTrue(routers.size()==4); } @@ -376,7 +376,7 @@ public class VirtualRouterElementTest { /* stopPending */ false, /* vpcId */ null); routerNeedUpdateBackup.setUpdateState(VirtualRouter.UpdateState.UPDATE_NEEDED); - final DomainRouterVO routerNeedUpdateMaster = new DomainRouterVO(/* id */ 3L, + final DomainRouterVO routerNeedUpdatePrimary = new DomainRouterVO(/* id */ 3L, /* serviceOfferingId */ 1L, /* elementId */ 0L, "name", @@ -387,11 +387,11 @@ public class VirtualRouterElementTest { /* accountId */ 1L, /* userId */ 1L, /* isRedundantRouter */ false, - RedundantState.MASTER, + RedundantState.PRIMARY, /* haEnabled */ false, /* stopPending */ false, /* vpcId */ null); - routerNeedUpdateMaster.setUpdateState(VirtualRouter.UpdateState.UPDATE_NEEDED); + routerNeedUpdatePrimary.setUpdateState(VirtualRouter.UpdateState.UPDATE_NEEDED); final DomainRouterVO routerUpdateComplete = new DomainRouterVO(/* id */ 4L, /* serviceOfferingId */ 1L, /* elementId */ 0L, @@ -427,12 +427,12 @@ public class VirtualRouterElementTest { List routerList1=new ArrayList<>(); routerList1.add(routerUpdateComplete); routerList1.add(routerNeedUpdateBackup); - routerList1.add(routerNeedUpdateMaster); + routerList1.add(routerNeedUpdatePrimary); routerList1.add(routerUpdateInProgress); List routerList2=new ArrayList<>(); routerList2.add(routerUpdateComplete); routerList2.add(routerNeedUpdateBackup); - routerList2.add(routerNeedUpdateMaster); + routerList2.add(routerNeedUpdatePrimary); List routerList3=new ArrayList<>(); routerList3.add(routerUpdateComplete); routerList3.add(routerUpdateInProgress); diff --git a/server/src/test/java/com/cloud/network/router/VirtualNetworkApplianceManagerImplTest.java b/server/src/test/java/com/cloud/network/router/VirtualNetworkApplianceManagerImplTest.java index aed4769ab96..7b2906f2a1a 100644 --- a/server/src/test/java/com/cloud/network/router/VirtualNetworkApplianceManagerImplTest.java +++ b/server/src/test/java/com/cloud/network/router/VirtualNetworkApplianceManagerImplTest.java @@ -263,7 +263,7 @@ public class VirtualNetworkApplianceManagerImplTest { @Test public void testUpdateSite2SiteVpnConnectionState() throws Exception{ - DomainRouterVO router = new DomainRouterVO(1L, 1L, 1L, "First testing router", 1L, Hypervisor.HypervisorType.XenServer, 1L, 1L, 1L, 1L, false, VirtualRouter.RedundantState.MASTER, true, true, 1L); + DomainRouterVO router = new DomainRouterVO(1L, 1L, 1L, "First testing router", 1L, Hypervisor.HypervisorType.XenServer, 1L, 1L, 1L, 1L, false, VirtualRouter.RedundantState.PRIMARY, true, true, 1L); router.setState(VirtualMachine.State.Running); router.setPrivateIpAddress("192.168.50.15"); diff --git a/systemvm/agent/noVNC/vendor/pako/lib/zlib/trees.js b/systemvm/agent/noVNC/vendor/pako/lib/zlib/trees.js index a69b8a592fe..be5d0a9675b 100644 --- a/systemvm/agent/noVNC/vendor/pako/lib/zlib/trees.js +++ b/systemvm/agent/noVNC/vendor/pako/lib/zlib/trees.js @@ -951,9 +951,9 @@ function send_all_trees(s, lcodes, dcodes, blcodes) * Check if the data type is TEXT or BINARY, using the following algorithm: * - TEXT if the two conditions below are satisfied: * a) There are no non-portable control characters belonging to the - * "black list" (0..6, 14..25, 28..31). + * "deny list" (0..6, 14..25, 28..31). * b) There is at least one printable character belonging to the - * "white list" (9 {TAB}, 10 {LF}, 13 {CR}, 32..255). + * "allow list" (9 {TAB}, 10 {LF}, 13 {CR}, 32..255). * - BINARY otherwise. * - The following partially-portable control characters form a * "gray list" that is ignored in this detection algorithm: @@ -961,21 +961,21 @@ function send_all_trees(s, lcodes, dcodes, blcodes) * IN assertion: the fields Freq of dyn_ltree are set. */ function detect_data_type(s) { - /* black_mask is the bit mask of black-listed bytes + /* deny_mask is the bit mask of deny-listed bytes * set bits 0..6, 14..25, and 28..31 * 0xf3ffc07f = binary 11110011111111111100000001111111 */ - var black_mask = 0xf3ffc07f; + var deny_mask = 0xf3ffc07f; var n; - /* Check for non-textual ("black-listed") bytes. */ - for (n = 0; n <= 31; n++, black_mask >>>= 1) { - if ((black_mask & 1) && (s.dyn_ltree[n * 2]/*.Freq*/ !== 0)) { + /* Check for non-textual ("deny-listed") bytes. */ + for (n = 0; n <= 31; n++, deny_mask >>>= 1) { + if ((deny_mask & 1) && (s.dyn_ltree[n * 2]/*.Freq*/ !== 0)) { return Z_BINARY; } } - /* Check for textual ("white-listed") bytes. */ + /* Check for textual ("allow-listed") bytes. */ if (s.dyn_ltree[9 * 2]/*.Freq*/ !== 0 || s.dyn_ltree[10 * 2]/*.Freq*/ !== 0 || s.dyn_ltree[13 * 2]/*.Freq*/ !== 0) { return Z_TEXT; @@ -986,7 +986,7 @@ function detect_data_type(s) { } } - /* There are no "black-listed" or "white-listed" bytes: + /* There are no "deny-listed" or "allow-listed" bytes: * this stream either is empty or has tolerated ("gray-listed") bytes only. */ return Z_BINARY; diff --git a/systemvm/debian/opt/cloud/bin/checkrouter.sh b/systemvm/debian/opt/cloud/bin/checkrouter.sh index c0d2ea77019..ae3aff7eb10 100755 --- a/systemvm/debian/opt/cloud/bin/checkrouter.sh +++ b/systemvm/debian/opt/cloud/bin/checkrouter.sh @@ -27,13 +27,13 @@ fi ROUTER_TYPE=$(cat /etc/cloudstack/cmdline.json | grep type | awk '{print $2;}' | sed -e 's/[,\"]//g') if [ "$ROUTER_TYPE" = "router" ] then - ROUTER_STATE=$(ip addr show dev eth0 | grep inet | wc -l | xargs bash -c 'if [ $0 == 2 ]; then echo "MASTER"; else echo "BACKUP"; fi') + ROUTER_STATE=$(ip addr show dev eth0 | grep inet | wc -l | xargs bash -c 'if [ $0 == 2 ]; then echo "PRIMARY"; else echo "BACKUP"; fi') STATUS=$ROUTER_STATE else ROUTER_STATE=$(ip addr show dev eth1 | grep state | awk '{print $9;}') if [ "$ROUTER_STATE" = "UP" ] then - STATUS=MASTER + STATUS=PRIMARY elif [ "$ROUTER_STATE" = "DOWN" ] then STATUS=BACKUP diff --git a/systemvm/debian/opt/cloud/bin/master.py b/systemvm/debian/opt/cloud/bin/configure_router.py similarity index 87% rename from systemvm/debian/opt/cloud/bin/master.py rename to systemvm/debian/opt/cloud/bin/configure_router.py index 26de8b93bb3..8d1f790e472 100755 --- a/systemvm/debian/opt/cloud/bin/master.py +++ b/systemvm/debian/opt/cloud/bin/configure_router.py @@ -25,9 +25,9 @@ import logging from optparse import OptionParser parser = OptionParser() -parser.add_option("-m", "--master", - action="store_true", default=False, dest="master", - help="Set router master") +parser.add_option("-p", "--primary", + action="store_true", default=False, dest="primary", + help="Set router primary") parser.add_option("-b", "--backup", action="store_true", default=False, dest="backup", help="Set router backup") @@ -42,15 +42,15 @@ logging.basicConfig(filename=config.get_logger(), format=config.get_format()) config.cmdline() cl = CsCmdLine("cmdline", config) -# Update the configuration to set state as backup and let keepalived decide who the real Master is! -cl.set_master_state(False) +# Update the configuration to set state as backup and let keepalived decide who the real Primary is! +cl.set_primary_state(False) cl.save() config.set_address() red = CsRedundant(config) -if options.master: - red.set_master() +if options.primary: + red.set_primary() if options.backup: red.set_backup() diff --git a/systemvm/debian/opt/cloud/bin/cs/CsAddress.py b/systemvm/debian/opt/cloud/bin/cs/CsAddress.py index be0c521cd03..0bc5d44ac53 100755 --- a/systemvm/debian/opt/cloud/bin/cs/CsAddress.py +++ b/systemvm/debian/opt/cloud/bin/cs/CsAddress.py @@ -608,13 +608,13 @@ class CsIP: app.setup() # If redundant then this is dealt with - # by the master backup functions + # by the primary backup functions if not cmdline.is_redundant(): if method == "add": CsPasswdSvc(self.address['public_ip']).start() elif method == "delete": CsPasswdSvc(self.address['public_ip']).stop() - elif cmdline.is_master(): + elif cmdline.is_primary(): if method == "add": CsPasswdSvc(self.get_gateway() + "," + self.address['public_ip']).start() elif method == "delete": diff --git a/systemvm/debian/opt/cloud/bin/cs/CsDatabag.py b/systemvm/debian/opt/cloud/bin/cs/CsDatabag.py index adb9a1aae74..aa738dfe805 100755 --- a/systemvm/debian/opt/cloud/bin/cs/CsDatabag.py +++ b/systemvm/debian/opt/cloud/bin/cs/CsDatabag.py @@ -103,23 +103,23 @@ class CsCmdLine(CsDataBag): else: return "unknown" - def is_master(self): + def is_primary(self): if not self.is_redundant(): return False if "redundant_state" in self.idata(): - return self.idata()['redundant_state'] == "MASTER" + return self.idata()['redundant_state'] == "PRIMARY" return False def set_fault_state(self): self.idata()['redundant_state'] = "FAULT" - self.idata()['redundant_master'] = False + self.idata()['redundant_primary'] = False - def set_master_state(self, value): + def set_primary_state(self, value): if value: - self.idata()['redundant_state'] = "MASTER" + self.idata()['redundant_state'] = "PRIMARY" else: self.idata()['redundant_state'] = "BACKUP" - self.idata()['redundant_master'] = value + self.idata()['redundant_primary'] = value def get_router_id(self): if "router_id" in self.idata(): diff --git a/systemvm/debian/opt/cloud/bin/cs/CsDhcp.py b/systemvm/debian/opt/cloud/bin/cs/CsDhcp.py index 91b95c6c676..01d2c46b8ee 100755 --- a/systemvm/debian/opt/cloud/bin/cs/CsDhcp.py +++ b/systemvm/debian/opt/cloud/bin/cs/CsDhcp.py @@ -71,7 +71,7 @@ class CsDhcp(CsDataBag): self.write_hosts() - if not self.cl.is_redundant() or self.cl.is_master(): + if not self.cl.is_redundant() or self.cl.is_primary(): if restart_dnsmasq: CsHelper.service("dnsmasq", "restart") else: diff --git a/systemvm/debian/opt/cloud/bin/cs/CsHelper.py b/systemvm/debian/opt/cloud/bin/cs/CsHelper.py index 00aa4cb6408..c892b5df910 100755 --- a/systemvm/debian/opt/cloud/bin/cs/CsHelper.py +++ b/systemvm/debian/opt/cloud/bin/cs/CsHelper.py @@ -29,8 +29,8 @@ from netaddr import * PUBLIC_INTERFACES = {"router": "eth2", "vpcrouter": "eth1"} -STATE_COMMANDS = {"router": "ip addr show dev eth0 | grep inet | wc -l | xargs bash -c 'if [ $0 == 2 ]; then echo \"MASTER\"; else echo \"BACKUP\"; fi'", - "vpcrouter": "ip addr show dev eth1 | grep state | awk '{print $9;}' | xargs bash -c 'if [ $0 == \"UP\" ]; then echo \"MASTER\"; else echo \"BACKUP\"; fi'"} +STATE_COMMANDS = {"router": "ip addr show dev eth0 | grep inet | wc -l | xargs bash -c 'if [ $0 == 2 ]; then echo \"PRIMARY\"; else echo \"BACKUP\"; fi'", + "vpcrouter": "ip addr show dev eth1 | grep state | awk '{print $9;}' | xargs bash -c 'if [ $0 == \"UP\" ]; then echo \"PRIMARY\"; else echo \"BACKUP\"; fi'"} def reconfigure_interfaces(router_config, interfaces): @@ -41,14 +41,14 @@ def reconfigure_interfaces(router_config, interfaces): cmd = "ip link set %s up" % interface.get_device() # If redundant only bring up public interfaces that are not eth1. # Reason: private gateways are public interfaces. - # master.py and keepalived will deal with eth1 public interface. + # configure_router.py and keepalived will deal with eth1 public interface. if router_config.is_redundant() and interface.is_public(): state_cmd = STATE_COMMANDS[router_config.get_type()] logging.info("Check state command => %s" % state_cmd) state = execute(state_cmd)[0] logging.info("Route state => %s" % state) - if interface.get_device() != PUBLIC_INTERFACES[router_config.get_type()] and state == "MASTER": + if interface.get_device() != PUBLIC_INTERFACES[router_config.get_type()] and state == "PRIMARY": execute(cmd) else: execute(cmd) diff --git a/systemvm/debian/opt/cloud/bin/cs/CsRedundant.py b/systemvm/debian/opt/cloud/bin/cs/CsRedundant.py index 190de1ab82e..23622fdbf5d 100755 --- a/systemvm/debian/opt/cloud/bin/cs/CsRedundant.py +++ b/systemvm/debian/opt/cloud/bin/cs/CsRedundant.py @@ -199,20 +199,20 @@ class CsRedundant(object): if keepalived_conf.is_changed() or force_keepalived_restart: keepalived_conf.commit() os.chmod(self.KEEPALIVED_CONF, 0o644) - if force_keepalived_restart or not self.cl.is_master(): + if force_keepalived_restart or not self.cl.is_primary(): CsHelper.service("keepalived", "restart") else: CsHelper.service("keepalived", "reload") def release_lock(self): try: - os.remove("/tmp/master_lock") + os.remove("/tmp/primary_lock") except OSError: pass def set_lock(self): """ - Make sure that master state changes happen sequentially + Make sure that primary state changes happen sequentially """ iterations = 10 time_between = 1 @@ -220,13 +220,13 @@ class CsRedundant(object): for iter in range(0, iterations): try: s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) - s.bind('/tmp/master_lock') + s.bind('/tmp/primary_lock') return s except socket.error, e: error_code = e.args[0] error_string = e.args[1] print "Process already running (%d:%s). Exiting" % (error_code, error_string) - logging.info("Master is already running, waiting") + logging.info("Primary is already running, waiting") sleep(time_between) def set_fault(self): @@ -290,7 +290,7 @@ class CsRedundant(object): CsHelper.service("dnsmasq", "stop") - self.cl.set_master_state(False) + self.cl.set_primary_state(False) self.cl.save() self.release_lock() @@ -298,14 +298,14 @@ class CsRedundant(object): CsHelper.reconfigure_interfaces(self.cl, interfaces) logging.info("Router switched to backup mode") - def set_master(self): - """ Set the current router to master """ + def set_primary(self): + """ Set the current router to primary """ if not self.cl.is_redundant(): - logging.error("Set master called on non-redundant router") + logging.error("Set primary called on non-redundant router") return self.set_lock() - logging.debug("Setting router to master") + logging.debug("Setting router to primary") dev = '' interfaces = [interface for interface in self.address.get_interfaces() if interface.is_public()] @@ -348,7 +348,7 @@ class CsRedundant(object): CsPasswdSvc(interface.get_gateway() + "," + interface.get_ip()).restart() CsHelper.service("dnsmasq", "restart") - self.cl.set_master_state(True) + self.cl.set_primary_state(True) self.cl.save() self.release_lock() @@ -362,7 +362,7 @@ class CsRedundant(object): public_devices.sort() # Ensure the default route is added, or outgoing traffic from VMs with static NAT on - # the subsequent interfaces will go from he wrong IP + # the subsequent interfaces will go from the wrong IP route = CsRoute() dev = '' for interface in interfaces: @@ -381,7 +381,7 @@ class CsRedundant(object): if interface.get_device() == device: CsHelper.execute("arping -I %s -U %s -c 1" % (device, interface.get_ip())) - logging.info("Router switched to master mode") + logging.info("Router switched to primary mode") def _collect_ignore_ips(self): """ diff --git a/systemvm/debian/opt/cloud/bin/ipassoc.sh b/systemvm/debian/opt/cloud/bin/ipassoc.sh index 9bcb13279d7..e653e72aa62 100755 --- a/systemvm/debian/opt/cloud/bin/ipassoc.sh +++ b/systemvm/debian/opt/cloud/bin/ipassoc.sh @@ -358,7 +358,7 @@ cflag= nflag= op="" -is_master=0 +is_primary=0 is_redundant=0 if_keep_state=0 IFACEGWIPFILE='/var/cache/cloud/ifaceGwIp' @@ -366,13 +366,13 @@ grep "redundant_router=1" /var/cache/cloud/cmdline > /dev/null if [ $? -eq 0 ] then is_redundant=1 - sudo /opt/cloud/bin/checkrouter.sh --no-lock|grep "Status: MASTER" > /dev/null 2>&1 + sudo /opt/cloud/bin/checkrouter.sh --no-lock|grep "Status: PRIMARY" > /dev/null 2>&1 if [ $? -eq 0 ] then - is_master=1 + is_primary=1 fi fi -if [ $is_redundant -eq 1 -a $is_master -ne 1 ] +if [ $is_redundant -eq 1 -a $is_primary -ne 1 ] then if_keep_state=1 fi diff --git a/systemvm/debian/opt/cloud/templates/check_heartbeat.sh.templ b/systemvm/debian/opt/cloud/templates/check_heartbeat.sh.templ index 62a2b180e6c..47db33e716e 100755 --- a/systemvm/debian/opt/cloud/templates/check_heartbeat.sh.templ +++ b/systemvm/debian/opt/cloud/templates/check_heartbeat.sh.templ @@ -58,7 +58,7 @@ then systemctl stop --now conntrackd >> $ROUTER_LOG 2>&1 #Set fault so we have the same effect as a KeepaliveD fault. - python /opt/cloud/bin/master.py --fault + python /opt/cloud/bin/configure_router.py --fault pkill -9 keepalived >> $ROUTER_LOG 2>&1 || true pkill -9 conntrackd >> $ROUTER_LOG 2>&1 || true diff --git a/systemvm/debian/opt/cloud/templates/checkrouter.sh.templ b/systemvm/debian/opt/cloud/templates/checkrouter.sh.templ index fcfc58d5b95..2aff777b9a5 100755 --- a/systemvm/debian/opt/cloud/templates/checkrouter.sh.templ +++ b/systemvm/debian/opt/cloud/templates/checkrouter.sh.templ @@ -21,13 +21,13 @@ INTERFACE=eth1 ROUTER_TYPE=$(cat /etc/cloudstack/cmdline.json | grep type | awk '{print $2;}' | sed -e 's/[,\"]//g') if [ $ROUTER_TYPE = "router" ] then - ROUTER_STATE=$(ip addr | grep eth0 | grep inet | wc -l | xargs bash -c 'if [ $0 == 2 ]; then echo "MASTER"; else echo "BACKUP"; fi') + ROUTER_STATE=$(ip addr | grep eth0 | grep inet | wc -l | xargs bash -c 'if [ $0 == 2 ]; then echo "PRIMARY"; else echo "BACKUP"; fi') STATUS=$ROUTER_STATE else ROUTER_STATE=$(ip addr | grep $INTERFACE | grep state | awk '{print $9;}') if [ $ROUTER_STATE = "UP" ] then - STATUS=MASTER + STATUS=PRIMARY elif [ $ROUTER_STATE = "DOWN" ] then STATUS=BACKUP diff --git a/systemvm/debian/opt/cloud/templates/keepalived.conf.templ b/systemvm/debian/opt/cloud/templates/keepalived.conf.templ index ca9f231a541..a6bd6620241 100644 --- a/systemvm/debian/opt/cloud/templates/keepalived.conf.templ +++ b/systemvm/debian/opt/cloud/templates/keepalived.conf.templ @@ -48,7 +48,7 @@ vrrp_instance inside_network { heartbeat } - notify_backup "/opt/cloud/bin/master.py --backup" - notify_master "/opt/cloud/bin/master.py --master" - notify_fault "/opt/cloud/bin/master.py --fault" + notify_backup "/opt/cloud/bin/configure_router.py --backup" + notify_master "/opt/cloud/bin/configure_router.py --primary" + notify_fault "/opt/cloud/bin/configure_router.py --fault" } diff --git a/test/integration/component/maint/test_redundant_router.py b/test/integration/component/maint/test_redundant_router.py index 94ddae255f0..fe27888bbd1 100644 --- a/test/integration/component/maint/test_redundant_router.py +++ b/test/integration/component/maint/test_redundant_router.py @@ -211,7 +211,7 @@ class TestCreateRvRNetwork(cloudstackTestCase): # - same public IP # - same MAC address of public NIC # - different guestip address - # - redundant state (MASTER or BACKUP) + # - redundant state (PRIMARY or BACKUP) # - same gateway for the public traffic # 6. all routers, networks and user VMs are cleaned up @@ -284,34 +284,34 @@ class TestCreateRvRNetwork(cloudstackTestCase): self.assertEqual( isinstance(routers, list), True, - "list router should return Master and backup routers" + "list router should return Primary and backup routers" ) self.assertEqual( len(routers), 2, - "Length of the list router should be 2 (Backup & master)" + "Length of the list router should be 2 (Backup & primary)" ) - if routers[0].redundantstate == 'MASTER': - master_router = routers[0] + if routers[0].redundantstate == 'PRIMARY': + primary_router = routers[0] backup_router = routers[1] else: - master_router = routers[1] + primary_router = routers[1] backup_router = routers[0] self.debug("Redundant states: %s, %s" % ( - master_router.redundantstate, + primary_router.redundantstate, backup_router.redundantstate )) self.assertEqual( - master_router.publicip, + primary_router.publicip, backup_router.publicip, - "Public Ip should be same for both(MASTER & BACKUP)" + "Public Ip should be same for both(PRIMARY & BACKUP)" ) self.assertEqual( - master_router.redundantstate, - "MASTER", - "Redundant state of router should be MASTER" + primary_router.redundantstate, + "PRIMARY", + "Redundant state of router should be PRIMARY" ) self.assertEqual( backup_router.redundantstate, @@ -319,15 +319,15 @@ class TestCreateRvRNetwork(cloudstackTestCase): "Redundant state of router should be BACKUP" ) self.assertNotEqual( - master_router.guestipaddress, + primary_router.guestipaddress, backup_router.guestipaddress, - "Both (MASTER & BACKUP) routers should not have same guest IP" + "Both (PRIMARY & BACKUP) routers should not have same guest IP" ) self.assertNotEqual( - master_router.guestmacaddress, + primary_router.guestmacaddress, backup_router.guestmacaddress, - "Both (MASTER & BACKUP) routers should not have same guestMAC" + "Both (PRIMARY & BACKUP) routers should not have same guestMAC" ) return @@ -413,7 +413,7 @@ class TestCreateRvRNetworkNonDefaultGuestCidr(cloudstackTestCase): # - same public IP # - same MAC address of public NIC # - different guestip address - # - redundant state (MASTER or BACKUP) + # - redundant state (PRIMARY or BACKUP) # - same gateway for the public traffic # 6. all routers, networks and user VMs are cleaned up @@ -498,30 +498,30 @@ class TestCreateRvRNetworkNonDefaultGuestCidr(cloudstackTestCase): self.assertEqual( isinstance(routers, list), True, - "list router should return Master and backup routers" + "list router should return Primary and backup routers" ) self.assertEqual( len(routers), 2, - "Length of the list router should be 2 (Backup & master)" + "Length of the list router should be 2 (Backup & Primary)" ) - if routers[0].redundantstate == 'MASTER': - master_router = routers[0] + if routers[0].redundantstate == 'PRIMARY': + primary_router = routers[0] backup_router = routers[1] else: - master_router = routers[1] + primary_router = routers[1] backup_router = routers[0] self.assertEqual( - master_router.publicip, + primary_router.publicip, backup_router.publicip, - "Public Ip should be same for both(MASTER & BACKUP)" + "Public Ip should be same for both(PRIMARY & BACKUP)" ) self.assertEqual( - master_router.redundantstate, - "MASTER", - "Redundant state of router should be MASTER" + primary_router.redundantstate, + "PRIMARY", + "Redundant state of router should be PRIMARY" ) self.assertEqual( backup_router.redundantstate, @@ -529,15 +529,15 @@ class TestCreateRvRNetworkNonDefaultGuestCidr(cloudstackTestCase): "Redundant state of router should be BACKUP" ) self.assertNotEqual( - master_router.guestipaddress, + primary_router.guestipaddress, backup_router.guestipaddress, - "Both (MASTER & BACKUP) routers should not have same guest IP" + "Both (PRIMARY & BACKUP) routers should not have same guest IP" ) self.assertNotEqual( - master_router.guestmacaddress, + primary_router.guestmacaddress, backup_router.guestmacaddress, - "Both (MASTER & BACKUP) routers should not have same guestMAC" + "Both (PRIMARY & BACKUP) routers should not have same guestMAC" ) return @@ -622,13 +622,13 @@ class TestRVRInternals(cloudstackTestCase): # Validate the following: # 1. listNetworks lists network in Allocated state # 2. listRouters lists no routers created yet - # 3. listRouters returns Master and Backup routers + # 3. listRouters returns Primary and Backup routers # 4. ssh in to both routers and verify: - # - MASTER router has eth2 with public Ip address + # - PRIMARY router has eth2 with public Ip address # - BACKUP router has only guest eth0 and link local eth1 - # - Broadcast on MASTER eth2 is non-zero (0.0.0.0) + # - Broadcast on PRIMARY eth2 is non-zero (0.0.0.0) # - execute checkrouter.sh in router home and check if it is status - # "MASTER|BACKUP" as returned by the listRouters API + # "PRIMARY|BACKUP" as returned by the listRouters API # 5. DNS of the user VM is set to RedundantRouter Gateway # (/etc/resolv.conf) # Check that the default gateway for the guest is the rvr gateway @@ -703,35 +703,35 @@ class TestRVRInternals(cloudstackTestCase): self.assertEqual( isinstance(routers, list), True, - "list router should return Master and backup routers" + "list router should return Primary and backup routers" ) self.assertEqual( len(routers), 2, - "Length of the list router should be 2 (Backup & master)" + "Length of the list router should be 2 (Backup & Primary)" ) - if routers[0].redundantstate == 'MASTER': - master_router = routers[0] + if routers[0].redundantstate == 'PRIMARY': + primary_router = routers[0] backup_router = routers[1] else: - master_router = routers[1] + primary_router = routers[1] backup_router = routers[0] self.debug("Fetching the host details for double hop into router") hosts = Host.list( self.apiclient, - id=master_router.hostid + id=primary_router.hostid ) self.assertEqual( isinstance(hosts, list), True, "List hosts should return a valid list" ) - master_host = hosts[0] - self.debug("Host for master router: %s" % master_host.name) - self.debug("Host for master router: %s" % master_host.ipaddress) + primary_host = hosts[0] + self.debug("Host for primary router: %s" % primary_host.name) + self.debug("Host for primary router: %s" % primary_host.ipaddress) hosts = Host.list( self.apiclient, @@ -745,37 +745,37 @@ class TestRVRInternals(cloudstackTestCase): backup_host = hosts[0] self.debug("Host for backup router: %s" % backup_host.name) self.debug("Host for backup router: %s" % backup_host.ipaddress) - self.debug(master_router.linklocalip) + self.debug(primary_router.linklocalip) - # Check eth2 port for master router + # Check eth2 port for primary router if self.hypervisor.lower() in ('vmware', 'hyperv'): result = get_process_status( self.apiclient.connection.mgtSvr, 22, self.apiclient.connection.user, self.apiclient.connection.passwd, - master_router.linklocalip, + primary_router.linklocalip, 'ip addr show eth2', hypervisor=self.hypervisor ) else: result = get_process_status( - master_host.ipaddress, + primary_host.ipaddress, 22, self.testdata['configurableData']['host']["username"], self.testdata['configurableData']['host']["password"], - master_router.linklocalip, + primary_router.linklocalip, "ip addr show eth2" ) res = str(result) self.debug("Command 'ip addr show eth2': %s" % result) - self.debug("Router's public Ip: %s" % master_router.publicip) + self.debug("Router's public Ip: %s" % primary_router.publicip) self.assertEqual( res.count("state UP"), 1, - "MASTER router's public interface should be UP" + "PRIMARY router's public interface should be UP" ) self.assertEqual( result.count('brd 0.0.0.0'), @@ -831,8 +831,8 @@ class TestRVRInternals(cloudstackTestCase): self.assertNotEqual( vm.nic[0].gateway, - master_router.publicip, - "The gateway of user VM should be same as master router" + primary_router.publicip, + "The gateway of user VM should be same as primary router" ) self.assertNotEqual( @@ -943,8 +943,8 @@ class TestRvRRedundancy(cloudstackTestCase): return @attr(tags=["advanced", "advancedns", "ssh"]) - def test_01_stopMasterRvR(self): - """Test stop master RVR + def test_01_stopPrimaryRvR(self): + """Test stop primary RVR """ # Steps to validate @@ -954,17 +954,17 @@ class TestRvRRedundancy(cloudstackTestCase): # network # 3. deployVM in above user account in the created network. VM is # successfully Running - # 4. listRouters that has redundantstate=MASTER. only one router is - # returned with redundantstate = MASTER for this network - # 5. stopRouter that is Master. Router goes to stopped state + # 4. listRouters that has redundantstate=PRIMARY. only one router is + # returned with redundantstate = PRIMARY for this network + # 5. stopRouter that is Primary. Router goes to stopped state # successfully - # 6. listRouters in the account and in the network. Lists old MASTER + # 6. listRouters in the account and in the network. Lists old PRIMARY # router in redundantstate=UNKNOWN, and the old BACKUP router as - # new MASTER + # new PRIMARY # 7. start the stopped router. Stopped rvr starts up successfully and # is in Running state # 8. listRouters in the account and in the network. Router shows up as - # BACKUP and NOT MASTER, should have only one BACKUP and one MASTER + # BACKUP and NOT PRIMARY, should have only one BACKUP and one PRIMARY # at the end, public IP of the SourceNAT should remain same after # reboot # 9. delete the account @@ -978,26 +978,26 @@ class TestRvRRedundancy(cloudstackTestCase): self.assertEqual( isinstance(routers, list), True, - "list router should return Master and backup routers" + "list router should return Primary and backup routers" ) self.assertEqual( len(routers), 2, - "Length of the list router should be 2 (Backup & master)" + "Length of the list router should be 2 (Backup & Primary)" ) - if routers[0].redundantstate == 'MASTER': - master_router = routers[0] + if routers[0].redundantstate == 'PRIMARY': + primary_router = routers[0] backup_router = routers[1] else: - master_router = routers[1] + primary_router = routers[1] backup_router = routers[0] - self.debug("Stopping the MASTER router") + self.debug("Stopping the PRIMARY router") try: - Router.stop(self.apiclient, id=master_router.id) + Router.stop(self.apiclient, id=primary_router.id) except Exception as e: - self.fail("Failed to stop master router: %s" % e) + self.fail("Failed to stop primary router: %s" % e) # wait for VR to update state time.sleep(self.testdata["sleep"]) @@ -1005,17 +1005,17 @@ class TestRvRRedundancy(cloudstackTestCase): self.debug("Listing routers for network: %s" % self.network.name) routers = Router.list( self.apiclient, - id=master_router.id, + id=primary_router.id, listall=True ) self.assertEqual( isinstance(routers, list), True, - "list router should return Master and backup routers" + "list router should return Primary and backup routers" ) self.assertIn( routers[0].redundantstate, [ - 'UNKNOWN', 'FAULT'], "Redundant state of the master router\ + 'UNKNOWN', 'FAULT'], "Redundant state of the primary router\ should be UNKNOWN/FAULT but is %s" % routers[0].redundantstate) @@ -1034,26 +1034,26 @@ class TestRvRRedundancy(cloudstackTestCase): ) self.assertEqual( routers[0].redundantstate, - 'MASTER', - "Redundant state of the router should be MASTER but is %s" % + 'PRIMARY', + "Redundant state of the router should be PRIMARY but is %s" % routers[0].redundantstate) - self.debug("Starting the old MASTER router") + self.debug("Starting the old PRIMARY router") try: - Router.start(self.apiclient, id=master_router.id) - self.debug("old MASTER router started") + Router.start(self.apiclient, id=primary_router.id) + self.debug("old PRIMARY router started") except Exception as e: - self.fail("Failed to start master router: %s" % e) + self.fail("Failed to start primary router: %s" % e) # wait for VR to update state time.sleep(self.testdata["sleep"]) self.debug( - "Checking state of the master router in %s" % + "Checking state of the primary router in %s" % self.network.name) routers = Router.list( self.apiclient, - id=master_router.id, + id=primary_router.id, listall=True ) self.assertEqual( @@ -1067,7 +1067,7 @@ class TestRvRRedundancy(cloudstackTestCase): "Redundant state of the router should be BACKUP but is %s" % routers[0].redundantstate) self.assertEqual( - master_router.publicip, + primary_router.publicip, routers[0].publicip, "Public IP should be same after reboot" ) @@ -1085,16 +1085,16 @@ class TestRvRRedundancy(cloudstackTestCase): # network # 3. deployVM in above user account in the created network. VM is # successfully Running - # 4. listRouters that has redundantstate=MASTER. only one router is - # returned with redundantstate = MASTER for this network + # 4. listRouters that has redundantstate=PRIMARY. only one router is + # returned with redundantstate = PRIMARY for this network # 5. stopRouter that is BACKUP. Router goes to stopped state # successfully - # 6. listRouters in the account and in the network. Lists old MASTER + # 6. listRouters in the account and in the network. Lists old PRIMARY # router in redundantstate=UNKNOWN # 7. start the stopped router. Stopped rvr starts up successfully and # is in Running state # 8. listRouters in the account and in the network. Router shows up as - # BACKUP and NOT MASTER, should have only one BACKUP and one MASTER + # BACKUP and NOT PRIMARY, should have only one BACKUP and one PRIMARY # at the end, public IP of the SourceNAT should remain same after # reboot # 9. delete the account @@ -1108,19 +1108,19 @@ class TestRvRRedundancy(cloudstackTestCase): self.assertEqual( isinstance(routers, list), True, - "list router should return Master and backup routers" + "list router should return Primary and backup routers" ) self.assertEqual( len(routers), 2, - "Length of the list router should be 2 (Backup & master)" + "Length of the list router should be 2 (Backup & Primary)" ) - if routers[0].redundantstate == 'MASTER': - master_router = routers[0] + if routers[0].redundantstate == 'PRIMARY': + primary_router = routers[0] backup_router = routers[1] else: - master_router = routers[1] + primary_router = routers[1] backup_router = routers[0] self.debug("Stopping the BACKUP router") @@ -1143,7 +1143,7 @@ class TestRvRRedundancy(cloudstackTestCase): self.assertEqual( isinstance(routers, list), True, - "list router should return Master and backup routers" + "list router should return Primary and backup routers" ) self.assertIn( routers[0].redundantstate, [ @@ -1152,22 +1152,22 @@ class TestRvRRedundancy(cloudstackTestCase): routers[0].redundantstate) self.debug( - "Checking state of the master router in %s" % + "Checking state of the primary router in %s" % self.network.name) routers = Router.list( self.apiclient, - id=master_router.id, + id=primary_router.id, listall=True ) self.assertEqual( isinstance(routers, list), True, - "list router should return Master and backup routers" + "list router should return Primary and backup routers" ) self.assertEqual( routers[0].redundantstate, - 'MASTER', - "Redundant state of the router should be MASTER but is %s" % + 'PRIMARY', + "Redundant state of the router should be PRIMARY but is %s" % routers[0].redundantstate) self.debug("Starting the old BACKUP router") @@ -1175,7 +1175,7 @@ class TestRvRRedundancy(cloudstackTestCase): Router.start(self.apiclient, id=backup_router.id) self.debug("old BACKUP router started") except Exception as e: - self.fail("Failed to stop master router: %s" % e) + self.fail("Failed to stop primary router: %s" % e) # wait for VR to start and update state time.sleep(self.testdata["sleep"]) @@ -1206,8 +1206,8 @@ class TestRvRRedundancy(cloudstackTestCase): return @attr(tags=["advanced", "advancedns", "ssh"]) - def test_03_rebootMasterRvR(self): - """Test reboot master RVR + def test_03_rebootPrimaryRvR(self): + """Test reboot primary RVR """ # Steps to validate @@ -1217,12 +1217,12 @@ class TestRvRRedundancy(cloudstackTestCase): # network # 3. deployVM in above user account in the created network. VM is # successfully Running - # 4. listRouters that has redundantstate=MASTER. only one router is - # returned with redundantstate = MASTER for this network - # 5. reboot router that is MASTER. Router reboots state + # 4. listRouters that has redundantstate=PRIMARY. only one router is + # returned with redundantstate = PRIMARY for this network + # 5. reboot router that is PRIMARY. Router reboots state # successfully - # 6. lists old MASTER router in redundantstate=BACKUP and the old - # BACKUP router as new MASTER + public IP of the SourceNAT should + # 6. lists old PRIMARY router in redundantstate=BACKUP and the old + # BACKUP router as new PRIMARY + public IP of the SourceNAT should # remain same after the reboot self.debug("Listing routers for network: %s" % self.network.name) @@ -1234,42 +1234,42 @@ class TestRvRRedundancy(cloudstackTestCase): self.assertEqual( isinstance(routers, list), True, - "list router should return Master and backup routers" + "list router should return Primary and backup routers" ) self.assertEqual( len(routers), 2, - "Length of the list router should be 2 (Backup & master)" + "Length of the list router should be 2 (Backup & Primary)" ) - if routers[0].redundantstate == 'MASTER': - master_router = routers[0] + if routers[0].redundantstate == 'PRIMARY': + primary_router = routers[0] backup_router = routers[1] else: - master_router = routers[1] + primary_router = routers[1] backup_router = routers[0] - self.debug("Rebooting the master router") + self.debug("Rebooting the primary router") try: - Router.reboot(self.apiclient, id=master_router.id) + Router.reboot(self.apiclient, id=primary_router.id) except Exception as e: - self.fail("Failed to reboot MASTER router: %s" % e) + self.fail("Failed to reboot PRIMARY router: %s" % e) # wait for VR to update state time.sleep(self.testdata["sleep"]) self.debug( - "Checking state of the master router in %s" % + "Checking state of the primary router in %s" % self.network.name) routers = Router.list( self.apiclient, - id=master_router.id, + id=primary_router.id, listall=True ) self.assertEqual( isinstance(routers, list), True, - "list router should return Master and backup routers" + "list router should return Primary and backup routers" ) self.assertEqual( routers[0].redundantstate, @@ -1288,15 +1288,15 @@ class TestRvRRedundancy(cloudstackTestCase): self.assertEqual( isinstance(routers, list), True, - "list router should return Master and backup routers" + "list router should return Primary and backup routers" ) self.assertEqual( routers[0].redundantstate, - 'MASTER', - "Redundant state of the router should be MASTER but is %s" % + 'PRIMARY', + "Redundant state of the router should be PRIMARY but is %s" % routers[0].redundantstate) self.assertEqual( - master_router.publicip, + primary_router.publicip, routers[0].publicip, "Public IP should be same after reboot" ) @@ -1314,12 +1314,12 @@ class TestRvRRedundancy(cloudstackTestCase): # network # 3. deployVM in above user account in the created network. VM is # successfully Running - # 4. listRouters that has redundantstate=MASTER. only one router is - # returned with redundantstate = MASTER for this network + # 4. listRouters that has redundantstate=PRIMARY. only one router is + # returned with redundantstate = PRIMARY for this network # 5. reboot router that is BACKUP. Router reboots state # successfully # 6. lists old BACKUP router in redundantstate=BACKUP, and the old - # MASTER router is still MASTER+ public IP of the SourceNAT should + # PRIMARY router is still PRIMARY+ public IP of the SourceNAT should # remain same after the reboot self.debug("Listing routers for network: %s" % self.network.name) @@ -1331,19 +1331,19 @@ class TestRvRRedundancy(cloudstackTestCase): self.assertEqual( isinstance(routers, list), True, - "list router should return Master and backup routers" + "list router should return Primary and backup routers" ) self.assertEqual( len(routers), 2, - "Length of the list router should be 2 (Backup & master)" + "Length of the list router should be 2 (Backup & Primary)" ) - if routers[0].redundantstate == 'MASTER': - master_router = routers[0] + if routers[0].redundantstate == 'PRIMARY': + primary_router = routers[0] backup_router = routers[1] else: - master_router = routers[1] + primary_router = routers[1] backup_router = routers[0] self.debug("Rebooting the backup router") @@ -1366,7 +1366,7 @@ class TestRvRRedundancy(cloudstackTestCase): self.assertEqual( isinstance(routers, list), True, - "list router should return Master and backup routers" + "list router should return Primary and backup routers" ) self.assertEqual( routers[0].redundantstate, @@ -1375,25 +1375,25 @@ class TestRvRRedundancy(cloudstackTestCase): routers[0].redundantstate) self.debug( - "Checking state of the master router in %s" % + "Checking state of the Primary router in %s" % self.network.name) routers = Router.list( self.apiclient, - id=master_router.id, + id=primary_router.id, listall=True ) self.assertEqual( isinstance(routers, list), True, - "list router should return Master and backup routers" + "list router should return Primary and backup routers" ) self.assertEqual( routers[0].redundantstate, - 'MASTER', - "Redundant state of the router should be MASTER but is %s" % + 'PRIMARY', + "Redundant state of the router should be PRIMARY but is %s" % routers[0].redundantstate) self.assertEqual( - master_router.publicip, + primary_router.publicip, routers[0].publicip, "Public IP should be same after reboot" ) @@ -1411,8 +1411,8 @@ class TestRvRRedundancy(cloudstackTestCase): # network # 3. deployVM in above user account in the created network. VM is # successfully Running - # 4. listRouters that has redundantstate=MASTER. only one router is - # returned with redundantstate = MASTER for this network + # 4. listRouters that has redundantstate=PRIMARY. only one router is + # returned with redundantstate = PRIMARY for this network # 5. stop router that is BACKUP. # 6. listRouters in the account and in the network # 7. deployVM in the user account in the created network @@ -1428,15 +1428,15 @@ class TestRvRRedundancy(cloudstackTestCase): self.assertEqual( isinstance(routers, list), True, - "list router should return Master and backup routers" + "list router should return Primary and backup routers" ) self.assertEqual( len(routers), 2, - "Length of the list router should be 2 (Backup & master)" + "Length of the list router should be 2 (Backup & Primary)" ) - if routers[0].redundantstate == 'MASTER': + if routers[0].redundantstate == 'PRIMARY': backup_router = routers[1] else: backup_router = routers[0] @@ -1461,7 +1461,7 @@ class TestRvRRedundancy(cloudstackTestCase): self.assertEqual( isinstance(routers, list), True, - "list router should return Master and backup routers" + "list router should return Primary and backup routers" ) self.assertIn( routers[0].redundantstate, @@ -1512,7 +1512,7 @@ class TestRvRRedundancy(cloudstackTestCase): self.assertEqual( isinstance(routers, list), True, - "list router should return Master and backup routers" + "list router should return Primary and backup routers" ) self.assertEqual( routers[0].redundantstate, @@ -1537,9 +1537,9 @@ class TestRvRRedundancy(cloudstackTestCase): - def get_master_and_backupRouter(self): + def get_primary_and_backupRouter(self): retry = 4 - master_router = backup_router=None + primary_router = backup_router=None while retry > 0: routers = Router.list( self.apiclient, @@ -1549,22 +1549,22 @@ class TestRvRRedundancy(cloudstackTestCase): retry = retry-1 if len(routers) < 2: continue - if not (routers[0].redundantstate == 'MASTER' or routers[1].redundantstate == 'MASTER'): + if not (routers[0].redundantstate == 'PRIMARY' or routers[1].redundantstate == 'PRIMARY'): continue; - if routers[0].redundantstate == 'MASTER': - master_router = routers[0] + if routers[0].redundantstate == 'PRIMARY': + primary_router = routers[0] backup_router = routers[1] break else: - master_router = routers[1] + primary_router = routers[1] backup_router = routers[0] break - self.info("master_router: %s, backup_router: %s" % (master_router, backup_router)) - return master_router, backup_router + self.info("primary_router: %s, backup_router: %s" % (primary_router, backup_router)) + return primary_router, backup_router def chek_for_new_backupRouter(self,old_backup_router): - master_router, backup_router = self.get_master_and_backupRouter() + primary_router, backup_router = self.get_primary_and_backupRouter() retry = 4 self.info("Checking if new router is getting created.") self.info("old_backup_router:"+old_backup_router.name+" new_backup_router:"+backup_router.name) @@ -1574,7 +1574,7 @@ class TestRvRRedundancy(cloudstackTestCase): if retry == 0: break; time.sleep(self.testdata["sleep"]) - master_router, backup_router = self.get_master_and_backupRouter() + primary_router, backup_router = self.get_primary_and_backupRouter() if retry == 0: self.fail("New router creation taking too long, timed out") @@ -1602,18 +1602,18 @@ class TestRvRRedundancy(cloudstackTestCase): # Steps to validate # update network to a new offering - # check if the master router is running while backup is starting. - # check if the backup is running while master is starting. + # check if the primary router is running while backup is starting. + # check if the backup is running while primary is starting. # check if both the routers are running after the update is complete. #clean up the network to make sure it is in proper state. self.network.restart(self.apiclient,cleanup=True) time.sleep(self.testdata["sleep"]) self.wait_untill_router_stabilises() - old_master_router, old_backup_router = self.get_master_and_backupRouter() - self.info("old_master_router:"+old_master_router.name+" old_backup_router"+old_backup_router.name) + old_primary_router, old_backup_router = self.get_primary_and_backupRouter() + self.info("old_primary_router:"+old_primary_router.name+" old_backup_router"+old_backup_router.name) #chek if the network is in correct state - self.assertEqual(old_master_router.state, "Running", "The master router is not running, network is not in a correct state to start the test") + self.assertEqual(old_primary_router.state, "Running", "The primary router is not running, network is not in a correct state to start the test") self.assertEqual(old_backup_router.state, "Running", "The backup router is not running, network is not in a correct state to start the test") worker, monitor = multiprocessing.Pipe() @@ -1627,30 +1627,30 @@ class TestRvRRedundancy(cloudstackTestCase): self.info("Network update Started, the old backup router will get destroyed and a new router will be created") self.chek_for_new_backupRouter(old_backup_router) - master_router, new_backup_router=self.get_master_and_backupRouter() - #the state of the master router should be running. while backup is being updated - self.assertEqual(master_router.state, "Running", "State of the master router is not running") - self.assertEqual(master_router.redundantstate, 'MASTER', "Redundant state of the master router should be MASTER, but it is %s"%master_router.redundantstate) + primary_router, new_backup_router=self.get_primary_and_backupRouter() + #the state of the primary router should be running. while backup is being updated + self.assertEqual(primary_router.state, "Running", "State of the primary router is not running") + self.assertEqual(primary_router.redundantstate, 'PRIMARY', "Redundant state of the primary router should be PRIMARY, but it is %s"%primary_router.redundantstate) self.info("Old backup router:"+old_backup_router.name+" is destroyed and new router:"+new_backup_router.name+" got created") - #wait for the new backup to become master. + #wait for the new backup to become primary. retry = 4 - while new_backup_router.name != master_router.name: + while new_backup_router.name != primary_router.name: retry = retry-1 if retry == 0: break time.sleep(self.testdata["sleep"]) - self.info("wating for backup router to become master router name:"+new_backup_router.name) - master_router, backup_router = self.get_master_and_backupRouter() + self.info("wating for backup router to become primary router name:"+new_backup_router.name) + primary_router, backup_router = self.get_primary_and_backupRouter() if retry == 0: - self.fail("timed out while waiting for new backup router to change state to MASTER.") + self.fail("timed out while waiting for new backup router to change state to PRIMARY.") - #new backup router has become master. - self.info("newly created router:"+new_backup_router.name+" has changed state to Master") - self.info("old master router:"+old_master_router.name+"is destroyed") - #old master will get destroyed and a new backup will be created. + #new backup router has become primary. + self.info("newly created router:"+new_backup_router.name+" has changed state to Primary") + self.info("old primary router:"+old_primary_router.name+"is destroyed") + #old primary will get destroyed and a new backup will be created. #wait until new backup changes state from unknown to backup - master_router, backup_router = self.get_master_and_backupRouter() + primary_router, backup_router = self.get_primary_and_backupRouter() retry = 4 while backup_router.redundantstate != 'BACKUP': retry = retry-1 @@ -1658,14 +1658,14 @@ class TestRvRRedundancy(cloudstackTestCase): if retry == 0: break time.sleep(self.testdata["sleep"]) - master_router, backup_router = self.get_master_and_backupRouter() - self.assertEqual(master_router.state, "Running", "State of the master router is not running") - self.assertEqual(master_router.redundantstate, 'MASTER', "Redundant state of the master router should be MASTER, but it is %s"%master_router.redundantstate) + primary_router, backup_router = self.get_primary_and_backupRouter() + self.assertEqual(primary_router.state, "Running", "State of the primary router is not running") + self.assertEqual(primary_router.redundantstate, 'PRIMARY', "Redundant state of the primary router should be PRIMARY, but it is %s"%primary_router.redundantstate) if retry == 0: - self.fail("timed out while waiting for new backup rotuer to change state to MASTER.") + self.fail("timed out while waiting for new backup rotuer to change state to PRIMARY.") #the network update is complete.finally both the router should be running. - new_master_router, new_backup_router=self.get_master_and_backupRouter() - self.assertEqual(new_master_router.state, "Running", "State of the master router:"+new_master_router.name+" is not running") + new_primary_router, new_backup_router=self.get_primary_and_backupRouter() + self.assertEqual(new_primary_router.state, "Running", "State of the primary router:"+new_primary_router.name+" is not running") self.assertEqual(new_backup_router.state, "Running", "State of the backup router:"+new_backup_router.name+" is not running") worker_process.join() diff --git a/test/integration/component/maint/test_redundant_router_deployment_planning.py b/test/integration/component/maint/test_redundant_router_deployment_planning.py index b63cda94e58..eb68c435250 100644 --- a/test/integration/component/maint/test_redundant_router_deployment_planning.py +++ b/test/integration/component/maint/test_redundant_router_deployment_planning.py @@ -215,7 +215,7 @@ class TestRvRDeploymentPlanning(cloudstackTestCase): # 1. listNetworkOfferings should show created offering for RvR # 2. listNetworks should show the created network in allocated state # 3. VM should be deployed and in Running state - # 4. There should be two routers (MASTER and BACKUP) for this network + # 4. There should be two routers (PRIMARY and BACKUP) for this network # ensure both routers should be on different pods self.debug("Checking if the current zone has 2 active pods in it..") @@ -317,12 +317,12 @@ class TestRvRDeploymentPlanning(cloudstackTestCase): self.assertEqual( isinstance(routers, list), True, - "list router should return Master and backup routers" + "list router should return Primary and backup routers" ) self.assertEqual( len(routers), 2, - "Length of the list router should be 2 (Backup & master)" + "Length of the list router should be 2 (Backup & Primary)" ) self.assertNotEqual( routers[0].podid, @@ -350,7 +350,7 @@ class TestRvRDeploymentPlanning(cloudstackTestCase): # 1. listNetworkOfferings should show created offering for RvR # 2. listNetworks should show the created network in allocated state # 3. VM should be deployed and in Running state - # 4. There should be two routers (MASTER and BACKUP) for this network + # 4. There should be two routers (PRIMARY and BACKUP) for this network # ensure both routers should be on different pods self.debug("Checking if the current zone has 2 active pods in it..") @@ -481,12 +481,12 @@ class TestRvRDeploymentPlanning(cloudstackTestCase): self.assertEqual( isinstance(routers, list), True, - "list router should return Master and backup routers" + "list router should return Primary and backup routers" ) self.assertEqual( len(routers), 2, - "Length of the list router should be 2 (Backup & master)" + "Length of the list router should be 2 (Backup & Primary)" ) hosts = Host.list( @@ -555,7 +555,7 @@ class TestRvRDeploymentPlanning(cloudstackTestCase): # 2. listNetworks should show the created network in allocated state # 3. VM should be deployed and in Running state and on the specified # host - # 4. There should be two routers (MASTER and BACKUP) for this network + # 4. There should be two routers (PRIMARY and BACKUP) for this network # ensure both routers should be on different storage pools self.debug( @@ -732,12 +732,12 @@ class TestRvRDeploymentPlanning(cloudstackTestCase): self.assertEqual( isinstance(routers, list), True, - "list router should return Master and backup routers" + "list router should return Primary and backup routers" ) self.assertEqual( len(routers), 2, - "Length of the list router should be 2 (Backup & master)" + "Length of the list router should be 2 (Backup & Primary)" ) self.assertNotEqual( routers[0].hostid, @@ -792,7 +792,7 @@ class TestRvRDeploymentPlanning(cloudstackTestCase): # 1. listNetworkOfferings should show created offering for RvR # 2. listNetworks should show the created network in allocated state # 3. VM should be deployed and in Running state and on specified host - # 4. There should be two routers (MASTER and BACKUP) for this network + # 4. There should be two routers (PRIMARY and BACKUP) for this network # ensure both routers should be on different hosts self.debug( @@ -969,12 +969,12 @@ class TestRvRDeploymentPlanning(cloudstackTestCase): self.assertEqual( isinstance(routers, list), True, - "list router should return Master and backup routers" + "list router should return Primary and backup routers" ) self.assertEqual( len(routers), 2, - "Length of the list router should be 2 (Backup & master)" + "Length of the list router should be 2 (Backup & Primary)" ) self.assertNotEqual( routers[0].hostid, diff --git a/test/integration/component/maint/test_redundant_router_network_rules.py b/test/integration/component/maint/test_redundant_router_network_rules.py index da873823eae..7348d162953 100644 --- a/test/integration/component/maint/test_redundant_router_network_rules.py +++ b/test/integration/component/maint/test_redundant_router_network_rules.py @@ -198,14 +198,14 @@ class TestRedundantRouterRulesLifeCycle(cloudstackTestCase): @attr(tags=["advanced", "advancedns", "ssh"], required_hardware="true") def test_networkRules_afterRebootRouters(self): - """Test network rules after master & backup routers rebooted + """Test network rules after primary & backup routers rebooted """ # Steps to validate # 1. listNetworks should show the created network in allocated state # 2. listRouters returns no running routers # 3. VMs should be deployed and in Running state - # 4. should list MASTER and BACKUP routers + # 4. should list PRIMARY and BACKUP routers # 5. listPublicIpAddresses for networkid should show acquired IP addr # 6. listStaticNats for the network associated # 7. listFirewallRules should show allowed ports open @@ -217,9 +217,9 @@ class TestRedundantRouterRulesLifeCycle(cloudstackTestCase): # 13 and 14. listLoadBalancerRules should show associated VMs for # public IP # 15. ssh should succeed to the user VMs - # 16. listRouters should show one Router in MASTER state and Running + # 16. listRouters should show one Router in PRIMARY state and Running # 17. ssh should work for PF, FW, and LB ips - # 18. listRouters should show both routers MASTER and BACKUP in + # 18. listRouters should show both routers PRIMARY and BACKUP in # Running state # 19. listPortForwardingRules, listFirewallRules, listLoadBalancerRule # should return empty response @@ -308,19 +308,19 @@ class TestRedundantRouterRulesLifeCycle(cloudstackTestCase): self.assertEqual( isinstance(routers, list), True, - "list router should return Master and backup routers" + "list router should return Priamry and backup routers" ) self.assertEqual( len(routers), 2, - "Length of the list router should be 2 (Backup & master)" + "Length of the list router should be 2 (Backup & Primary)" ) - if routers[0].redundantstate == 'MASTER': - master_router = routers[0] + if routers[0].redundantstate == 'PRIMARY': + primary_router = routers[0] backup_router = routers[1] else: - master_router = routers[1] + primary_router = routers[1] backup_router = routers[0] self.debug("Associating public IP for network: %s" % network.name) @@ -435,11 +435,11 @@ class TestRedundantRouterRulesLifeCycle(cloudstackTestCase): )) lb_rule.assign(self.apiclient, [virtual_machine]) - self.debug("Starting router ID: %s" % master_router.id) + self.debug("Starting router ID: %s" % primary_router.id) for router in routers: try: - self.debug("Rebooting router ID: %s" % master_router.id) + self.debug("Rebooting router ID: %s" % primary_router.id) #Stop the router cmd = rebootRouter.rebootRouterCmd() cmd.id = router.id @@ -456,12 +456,12 @@ class TestRedundantRouterRulesLifeCycle(cloudstackTestCase): self.assertEqual( isinstance(routers, list), True, - "list router should return Master and backup routers" + "list router should return Primary and backup routers" ) self.assertEqual( len(routers), 2, - "Length of the list router should be 2 (Backup & master)" + "Length of the list router should be 2 (Backup & Primary)" ) for router in routers: self.assertEqual( @@ -510,7 +510,7 @@ class TestRedundantRouterRulesLifeCycle(cloudstackTestCase): # 1. listNetworks should show the created network in allocated state # 2. listRouters returns no running routers # 3. VMs should be deployed and in Running state - # 4. should list MASTER and BACKUP routers + # 4. should list PRIMARY and BACKUP routers # 5. listPublicIpAddresses for networkid should show acquired IP addr # 6. listStaticNats for the network associated # 7. listFirewallRules should show allowed ports open @@ -522,10 +522,10 @@ class TestRedundantRouterRulesLifeCycle(cloudstackTestCase): # 13 and 14. listLoadBalancerRules should show associated VMs for # public IP # 15. ssh should succeed to the user VMs - # 16. listRouters should show one Router in MASTER state and Running & + # 16. listRouters should show one Router in PRIMARY state and Running & # one in BACKUP and Running # 17. ssh should work for PF, FW, and LB ips - # 18. listRouters should show one Router in MASTER state and Running & + # 18. listRouters should show one Router in PRIMARY state and Running & # one in BACKUP and Running # 19. ssh should work for PF, FW, and LB ips # 20. listPortForwardingRules, listFirewallRules, listLoadBalancerRule @@ -615,19 +615,19 @@ class TestRedundantRouterRulesLifeCycle(cloudstackTestCase): self.assertEqual( isinstance(routers, list), True, - "list router should return Master and backup routers" + "list router should return Primary and backup routers" ) self.assertEqual( len(routers), 2, - "Length of the list router should be 2 (Backup & master)" + "Length of the list router should be 2 (Backup & primary)" ) - if routers[0].redundantstate == 'MASTER': - master_router = routers[0] + if routers[0].redundantstate == 'PRIMARY': + primary_router = routers[0] backup_router = routers[1] else: - master_router = routers[1] + primary_router = routers[1] backup_router = routers[0] self.debug("Associating public IP for network: %s" % network.name) @@ -759,12 +759,12 @@ class TestRedundantRouterRulesLifeCycle(cloudstackTestCase): self.assertEqual( isinstance(routers, list), True, - "list router should return Master and backup routers" + "list router should return Primary and backup routers" ) self.assertEqual( len(routers), 2, - "Length of the list router should be 2 (Backup & master)" + "Length of the list router should be 2 (Backup & primary)" ) for router in routers: self.assertEqual( @@ -819,12 +819,12 @@ class TestRedundantRouterRulesLifeCycle(cloudstackTestCase): self.assertEqual( isinstance(routers, list), True, - "list router should return Master and backup routers" + "list router should return Primary and backup routers" ) self.assertEqual( len(routers), 2, - "Length of the list router should be 2 (Backup & master)" + "Length of the list router should be 2 (Backup & primary)" ) for router in routers: self.assertEqual( @@ -872,7 +872,7 @@ class TestRedundantRouterRulesLifeCycle(cloudstackTestCase): # 1. listNetworks should show the created network in allocated state # 2. listRouters returns no running routers # 3. VMs should be deployed and in Running state - # 4. should list MASTER and BACKUP routers + # 4. should list PRIMARY and BACKUP routers # 5. listPublicIpAddresses for networkid should show acquired IP # 6. listRemoteAccessVpns for the network associated should show the # VPN created @@ -962,12 +962,12 @@ class TestRedundantRouterRulesLifeCycle(cloudstackTestCase): self.assertEqual( isinstance(routers, list), True, - "list router should return Master and backup routers" + "list router should return Primary and backup routers" ) self.assertEqual( len(routers), 2, - "Length of the list router should be 2 (Backup & master)" + "Length of the list router should be 2 (Backup & primary)" ) self.debug("Associating public IP for network: %s" % network.name) @@ -1114,15 +1114,15 @@ class TestRedundantRouterRulesLifeCycle(cloudstackTestCase): return @attr(tags=["advanced", "advancedns", "ssh", "needle"], required_hardware="true") - def test_applyNetworkRules_MasterDown_deleteNetworkRules(self): - """Test apply network rules when master down and delete network rules + def test_applyNetworkRules_PrimaryDown_deleteNetworkRules(self): + """Test apply network rules when primary down and delete network rules """ # Steps to validate # 1. listNetworks should show the created network in allocated state # 2. listRouters returns no running routers # 3. VMs should be deployed and in Running state - # 4. should list MASTER and BACKUP routers + # 4. should list PRIMARY and BACKUP routers # 5. listPublicIpAddresses for networkid should show acquired IP addr # 6. listStaticNats for the network associated # 7. listFirewallRules should show allowed ports open @@ -1134,9 +1134,9 @@ class TestRedundantRouterRulesLifeCycle(cloudstackTestCase): # 13 and 14. listLoadBalancerRules should show associated VMs for # public IP # 15. ssh should succeed to the user VMs - # 16. listRouters should show one Router in MASTER state and Running + # 16. listRouters should show one Router in PRIMARY state and Running # 17. ssh should work for PF, FW, and LB ips - # 18. listRouters should show both routers MASTER and BACKUP in + # 18. listRouters should show both routers PRIMARY and BACKUP in # Running state # 19. listPortForwardingRules, listFirewallRules, listLoadBalancerRule # should return empty response @@ -1229,27 +1229,27 @@ class TestRedundantRouterRulesLifeCycle(cloudstackTestCase): self.assertEqual( isinstance(routers, list), True, - "list router should return Master and backup routers" + "list router should return Primary and backup routers" ) self.assertEqual( len(routers), 2, - "Length of the list router should be 2 (Backup & master)" + "Length of the list router should be 2 (Backup & primary)" ) - if routers[0].redundantstate == 'MASTER': - master_router = routers[0] + if routers[0].redundantstate == 'PRIMARY': + primary_router = routers[0] backup_router = routers[1] else: - master_router = routers[1] + primary_router = routers[1] backup_router = routers[0] - self.debug("Stopping router ID: %s" % master_router.id) + self.debug("Stopping router ID: %s" % primary_router.id) try: - Router.stop(self.apiclient, id=master_router.id) + Router.stop(self.apiclient, id=primary_router.id) except Exception as e: - self.fail("Failed to stop master router becaues of %s" % e) + self.fail("Failed to stop primary router becaues of %s" % e) self.debug("Associating public IP for network: %s" % network.name) public_ip = PublicIPAddress.create( @@ -1394,12 +1394,12 @@ class TestRedundantRouterRulesLifeCycle(cloudstackTestCase): except Exception as e: self.fail("SSH to guest VM failed: %s" % e) - self.debug("Starting router ID: %s" % master_router.id) + self.debug("Starting router ID: %s" % primary_router.id) try: - Router.start(self.apiclient, id=master_router.id) + Router.start(self.apiclient, id=primary_router.id) except Exception as e: - self.fail("Failed to start master router..") + self.fail("Failed to start primary router..") self.debug("Listing routers for network: %s" % network.name) routers = Router.list( @@ -1410,12 +1410,12 @@ class TestRedundantRouterRulesLifeCycle(cloudstackTestCase): self.assertEqual( isinstance(routers, list), True, - "list router should return Master and backup routers" + "list router should return Primary and backup routers" ) self.assertEqual( len(routers), 2, - "Length of the list router should be 2 (Backup & master)" + "Length of the list router should be 2 (Backup & Primary)" ) for router in routers: self.assertEqual( diff --git a/test/integration/component/test_acl_isolatednetwork.py b/test/integration/component/test_acl_isolatednetwork.py index a567c26ef69..a1deb93b262 100644 --- a/test/integration/component/test_acl_isolatednetwork.py +++ b/test/integration/component/test_acl_isolatednetwork.py @@ -364,7 +364,7 @@ class TestIsolatedNetwork(cloudstackTestCase): @attr("simulator_only", tags=["advanced"], required_hardware="false") def test_01_createNetwork_admin(self): """ - # Validate that Admin should be able to create network for himslef + # Validate that Admin should be able to create network with self-ownership """ self.apiclient.connection.apiKey = self.user_root_apikey self.apiclient.connection.securityKey = self.user_root_secretkey @@ -380,12 +380,12 @@ class TestIsolatedNetwork(cloudstackTestCase): self.cleanup.append(network) self.assertEqual(network.state.lower() == ALLOCATED.lower(), True, - "Admin User is not able to create a network for himself") + "Admin User is not able to create a network with self-ownership") @attr("simulator_only", tags=["advanced"], required_hardware="false") def test_02_createNetwork_admin_foruserinsamedomain(self): """ - # Validate that Admin should be able to create network for users in his domain + # Validate that Admin should be able to create network for users in their domain """ self.apiclient.connection.apiKey = self.user_root_apikey self.apiclient.connection.securityKey = self.user_root_secretkey @@ -403,13 +403,13 @@ class TestIsolatedNetwork(cloudstackTestCase): self.cleanup.append(network) self.assertEqual(network.state.lower() == ALLOCATED.lower(), True, - "Admin User is not able to create a network for other users in his domain") + "Admin User is not able to create a network for other users in their domain") @attr("simulator_only", tags=["advanced"], required_hardware="false") def test_03_createNetwork_admin_foruserinotherdomain(self): """ - # Validate that Admin should be able to create network for users in his sub domain + # Validate that Admin should be able to create network for users in their sub domain """ self.apiclient.connection.apiKey = self.user_root_apikey self.apiclient.connection.securityKey = self.user_root_secretkey @@ -435,7 +435,7 @@ class TestIsolatedNetwork(cloudstackTestCase): @attr("simulator_only", tags=["advanced"], required_hardware="false") def test_04_createNetwork_domaindmin(self): """ - # Validate that Domain admin should be able to create network for himslef + # Validate that Domain admin should be able to create network with self-ownership """ self.apiclient.connection.apiKey = self.user_d1_apikey self.apiclient.connection.securityKey = self.user_d1_secretkey @@ -451,13 +451,13 @@ class TestIsolatedNetwork(cloudstackTestCase): self.cleanup.append(network) self.assertEqual(network.state.lower() == ALLOCATED.lower(), True, - "Domain admin User is not able to create a network for himself") + "Domain admin User is not able to create a network with self-ownership") @attr("simulator_only", tags=["advanced"], required_hardware="false") def test_05_createNetwork_domaindmin_foruserinsamedomain(self): """ - # Validate that Domain admin should be able to create network for users in his domain + # Validate that Domain admin should be able to create network for users in their domain """ self.apiclient.connection.apiKey = self.user_d1_apikey self.apiclient.connection.securityKey = self.user_d1_secretkey @@ -475,13 +475,13 @@ class TestIsolatedNetwork(cloudstackTestCase): self.cleanup.append(network) self.assertEqual(network.state.lower() == ALLOCATED.lower(), True, - "Domain admin User is not able to create a network for other users in his domain") + "Domain admin User is not able to create a network for other users in their domain") @attr("simulator_only", tags=["advanced"], required_hardware="false") def test_06_createNetwork_domaindmin_foruserinsubdomain(self): """ - # Validate that Domain admin should be able to create network for users in his sub domain + # Validate that Domain admin should be able to create network for users in their sub domain """ self.apiclient.connection.apiKey = self.user_d1_apikey self.apiclient.connection.securityKey = self.user_d1_secretkey @@ -499,13 +499,13 @@ class TestIsolatedNetwork(cloudstackTestCase): self.cleanup.append(network) self.assertEqual(network.state.lower() == ALLOCATED.lower(), True, - "Domain admin User is not able to create a network for other users in his sub domain") + "Domain admin User is not able to create a network for other users in their sub domain") @attr("simulator_only", tags=["advanced"], required_hardware="false") def test_07_createNetwork_domaindmin_forcrossdomainuser(self): """ - # Validate that Domain admin should not be able to create network for users in his sub domain + # Validate that Domain admin should not be able to create network for users in their sub domain """ self.apiclient.connection.apiKey = self.user_d1_apikey self.apiclient.connection.securityKey = self.user_d1_secretkey @@ -521,18 +521,18 @@ class TestIsolatedNetwork(cloudstackTestCase): domainid=self.account_d2a.domainid ) self.cleanup.append(network) - self.fail("Domain admin is allowed to create network for users not in his domain ") + self.fail("Domain admin is allowed to create network for users not in their domain ") except Exception as e: - self.debug("When Domain admin tries to create network for users in his sub domain %s" % e) + self.debug("When Domain admin tries to create network for users in their sub domain %s" % e) if not CloudstackAclException.verifyMsginException(e, CloudstackAclException.NO_PERMISSION_TO_OPERATE_DOMAIN): - self.fail("Error message validation failed when Domain admin tries to create network for users not in his domain ") + self.fail("Error message validation failed when Domain admin tries to create network for users not in their domain ") ## Test cases relating to createNetwork as regular user @attr("simulator_only", tags=["advanced"], required_hardware="false") def test_08_createNetwork_user(self): """ - # Validate that Regular should be able to create network for himslef + # Validate that Regular should be able to create network with self-ownership """ self.apiclient.connection.apiKey = self.user_d1a_apikey self.apiclient.connection.securityKey = self.user_d1a_secretkey @@ -549,12 +549,12 @@ class TestIsolatedNetwork(cloudstackTestCase): self.assertEqual(network.state.lower() == ALLOCATED.lower(), True, - "User is not able to create a network for himself") + "User is not able to create a network with self-ownership") @attr("simulator_only", tags=["advanced"], required_hardware="false") def test_09_createNetwork_user_foruserinsamedomain(self): """ - # Validate that Regular user should NOT be able to create network for users in his domain + # Validate that Regular user should NOT be able to create network for users in their domain """ self.apiclient.connection.apiKey = self.user_d1a_apikey self.apiclient.connection.securityKey = self.user_d1a_secretkey @@ -571,11 +571,11 @@ class TestIsolatedNetwork(cloudstackTestCase): domainid=self.account_d1b.domainid ) self.cleanup.append(network) - self.fail("User is allowed to create network for other users in his domain ") + self.fail("User is allowed to create network for other users in their domain ") except Exception as e: - self.debug("When user tries to create network for users in his domain %s" % e) + self.debug("When user tries to create network for users in their domain %s" % e) if not CloudstackAclException.verifyMsginException(e, CloudstackAclException.UNABLE_TO_LIST_NETWORK_ACCOUNT): - self.fail("Error message validation failed when when User tries to create network for other users in his domain ") + self.fail("Error message validation failed when when User tries to create network for other users in their domain ") @attr("simulator_only", tags=["advanced"], required_hardware="false") def test_10_createNetwork_user_foruserinotherdomain(self): @@ -597,18 +597,18 @@ class TestIsolatedNetwork(cloudstackTestCase): domainid=self.account_d11a.domainid ) self.cleanup.append(network) - self.fail("User is allowed to create network for users not in his domain ") + self.fail("User is allowed to create network for users not in their domain ") except Exception as e: self.debug("When user tries to create network for users in other domain %s" % e) if not CloudstackAclException.verifyMsginException(e, CloudstackAclException.UNABLE_TO_LIST_NETWORK_ACCOUNT): - self.fail("Error message validation failed when User tries to create network for users not in his domain ") + self.fail("Error message validation failed when User tries to create network for users not in their domain ") ## Test cases relating to Deploying VM in a network as admin user @attr("simulator_only", tags=["advanced"], required_hardware="false") def test_11_deployvm_admin(self): """ - # Validate that Admin should be able to deploy VM in the networks he owns + # Validate that Admin should be able to deploy VM in the networks if it is self-owned """ self.apiclient.connection.apiKey = self.user_root_apikey self.apiclient.connection.securityKey = self.user_root_secretkey @@ -626,12 +626,12 @@ class TestIsolatedNetwork(cloudstackTestCase): self.assertEqual(vm.state.lower() == RUNNING.lower(), True, - "Admin User is not able to deploy VM in his own network") + "Admin User is not able to deploy VM in their own network") @attr("simulator_only", tags=["advanced"], required_hardware="false") def test_12_deployvm_admin_foruserinsamedomain(self): """ - # Validate that Admin should be able to deploy Vm for users in his domain + # Validate that Admin should be able to deploy Vm for users in their domain """ self.apiclient.connection.apiKey = self.user_root_apikey self.apiclient.connection.securityKey = self.user_root_secretkey @@ -650,12 +650,12 @@ class TestIsolatedNetwork(cloudstackTestCase): self.cleanup.append(vm) self.assertEqual(vm.state.lower() == RUNNING.lower() and vm.account == self.account_roota.name and vm.domainid == self.account_roota.domainid, True, - "Admin User is not able to deploy VM for users in his domain") + "Admin User is not able to deploy VM for users in their domain") @attr("simulator_only", tags=["advanced"], required_hardware="false") def test_13_deployvm_admin_foruserinotherdomain(self): """ - # Validate that Admin should be able to deploy VM for users in his sub domain + # Validate that Admin should be able to deploy VM for users in their sub domain """ self.apiclient.connection.apiKey = self.user_root_apikey self.apiclient.connection.securityKey = self.user_root_secretkey @@ -707,7 +707,7 @@ class TestIsolatedNetwork(cloudstackTestCase): @attr("simulator_only", tags=["advanced"], required_hardware="false") def test_14_deployvm_domaindmin(self): """ - # Validate that Domain admin should be able to deploy vm for himslef + # Validate that Domain admin should be able to deploy vm with self-ownership """ self.apiclient.connection.apiKey = self.user_d1_apikey self.apiclient.connection.securityKey = self.user_d1_secretkey @@ -725,12 +725,12 @@ class TestIsolatedNetwork(cloudstackTestCase): self.assertEqual(vm.state.lower() == RUNNING.lower(), True, - "Domain admin User is not able to deploy VM for himself") + "Domain admin User is not able to deploy VM with self-ownership") @attr("simulator_only", tags=["advanced"], required_hardware="false") def test_15_deployvm_domaindmin_foruserinsamedomain(self): """ - # Validate that Domain admin should be able to deploy vm for users in his domain + # Validate that Domain admin should be able to deploy vm for users in their domain """ self.apiclient.connection.apiKey = self.user_d1_apikey self.apiclient.connection.securityKey = self.user_d1_secretkey @@ -749,12 +749,12 @@ class TestIsolatedNetwork(cloudstackTestCase): self.cleanup.append(vm) self.assertEqual(vm.state.lower() == RUNNING.lower() and vm.account == self.account_d1a.name and vm.domainid == self.account_d1a.domainid, True, - "Domain admin User is not able to deploy VM for other users in his domain") + "Domain admin User is not able to deploy VM for other users in their domain") @attr("simulator_only", tags=["advanced"], required_hardware="false") def test_16_deployvm_domaindmin_foruserinsubdomain(self): """ - # Validate that Domain admin should be able to deploy vm for users in his sub domain + # Validate that Domain admin should be able to deploy vm for users in their sub domain """ self.apiclient.connection.apiKey = self.user_d1_apikey self.apiclient.connection.securityKey = self.user_d1_secretkey @@ -773,12 +773,12 @@ class TestIsolatedNetwork(cloudstackTestCase): self.cleanup.append(vm) self.assertEqual(vm.state.lower() == RUNNING.lower() and vm.account == self.account_d11a.name and vm.domainid == self.account_d11a.domainid, True, - "Domain admin User is not able to deploy vm for himself") + "Domain admin User is not able to deploy vm with self-ownership") @attr("simulator_only", tags=["advanced"], required_hardware="false") def test_17_deployvm_domaindmin_forcrossdomainuser(self): """ - # Validate that Domain admin should not be able allowed to deploy vm for users not in his sub domain + # Validate that Domain admin should not be able allowed to deploy vm for users not in their sub domain """ self.apiclient.connection.apiKey = self.user_d1_apikey self.apiclient.connection.securityKey = self.user_d1_secretkey @@ -798,7 +798,7 @@ class TestIsolatedNetwork(cloudstackTestCase): self.cleanup.append(vm) self.fail("Domain admin is allowed to deploy vm for users not in hos domain ") except Exception as e: - self.debug("When Domain admin tries to deploy vm for users in his sub domain %s" % e) + self.debug("When Domain admin tries to deploy vm for users in their sub domain %s" % e) if not CloudstackAclException.verifyMsginException(e, CloudstackAclException.NO_PERMISSION_TO_OPERATE_DOMAIN): self.fail("Error message validation failed when Domain admin tries to deploy vm for users not in hos domain ") @@ -822,18 +822,18 @@ class TestIsolatedNetwork(cloudstackTestCase): domainid=self.account_d11a.domainid ) self.cleanup.append(vm) - self.fail("Domain admin is allowed to deploy vm for users in a network that does not belong to him ") + self.fail("Domain admin is allowed to deploy vm for users in a network that is not self-owned ") except Exception as e: self.debug("When domain admin tries to deploy vm for users in network that does not belong to the user %s" % e) if not CloudstackAclException.verifyMsginException(e, CloudstackAclException.UNABLE_TO_USE_NETWORK): - self.fail("Error message validation failed when Domain admin tries to deploy vm for users in a network that does not belong to him ") + self.fail("Error message validation failed when Domain admin tries to deploy vm for users in a network that is not self-owned ") ## Test cases relating to deploying VM as regular user @attr("simulator_only", tags=["advanced"], required_hardware="false") def test_18_deployvm_user(self): """ - # Validate that Regular should be able to deploy vm for himslef + # Validate that Regular should be able to deploy vm with self-ownership """ self.apiclient.connection.apiKey = self.user_d1a_apikey self.apiclient.connection.securityKey = self.user_d1a_secretkey @@ -850,12 +850,12 @@ class TestIsolatedNetwork(cloudstackTestCase): self.cleanup.append(vm) self.assertEqual(vm.state.lower() == RUNNING.lower(), True, - "User is not able to deploy vm for himself") + "User is not able to deploy vm with self-ownership") @attr("simulator_only", tags=["advanced"], required_hardware="false") def test_19_deployvm_user_foruserinsamedomain(self): """ - # Validate that Regular user should NOT be able to deploy vm for users in his domain + # Validate that Regular user should NOT be able to deploy vm for users in their domain """ self.apiclient.connection.apiKey = self.user_d1a_apikey self.apiclient.connection.securityKey = self.user_d1a_secretkey @@ -873,16 +873,16 @@ class TestIsolatedNetwork(cloudstackTestCase): domainid=self.account_d1b.domainid ) self.cleanup.append(vm) - self.fail("Regular user is allowed to deploy vm for other users in his domain ") + self.fail("Regular user is allowed to deploy vm for other users in their domain ") except Exception as e: - self.debug("When user tries to deploy vm for users in his domain %s" % e) + self.debug("When user tries to deploy vm for users in their domain %s" % e) if not CloudstackAclException.verifyMsginException(e, CloudstackAclException.NO_PERMISSION_TO_OPERATE_ACCOUNT): - self.fail("Error message validation failed when Regular user tries to deploy vm for other users in his domain ") + self.fail("Error message validation failed when Regular user tries to deploy vm for other users in their domain ") @attr("simulator_only", tags=["advanced"], required_hardware="false") def test_20_deployvm_user_foruserincrossdomain(self): """ - # Validate that Regular user should NOT be able to deploy vm for users in his domain + # Validate that Regular user should NOT be able to deploy vm for users in their domain """ self.apiclient.connection.apiKey = self.user_d1a_apikey self.apiclient.connection.securityKey = self.user_d1a_secretkey @@ -900,16 +900,16 @@ class TestIsolatedNetwork(cloudstackTestCase): domainid=self.account_d2a.domainid ) self.cleanup.append(vm) - self.fail("Regular user is allowed to deploy vm for users not in his domain ") + self.fail("Regular user is allowed to deploy vm for users not in their domain ") except Exception as e: self.debug("When user tries to deploy vm for users n different domain %s" % e) if not CloudstackAclException.verifyMsginException(e, CloudstackAclException.NO_PERMISSION_TO_OPERATE_ACCOUNT): - self.fail("Error message validation failed when Regular user tries to deploy vm for users not in his domain ") + self.fail("Error message validation failed when Regular user tries to deploy vm for users not in their domain ") @attr("simulator_only", tags=["advanced"], required_hardware="false") def test_20_1_deployvm_user_incrossnetwork(self): """ - #Validate that User should not be able deploy VM in a network that does not belong to him + #Validate that User should not be able deploy VM in a network that is not self-owned """ self.apiclient.connection.apiKey = self.user_d11a_apikey self.apiclient.connection.securityKey = self.user_d11a_secretkey @@ -924,18 +924,18 @@ class TestIsolatedNetwork(cloudstackTestCase): networkids=self.network_d11b.id, ) self.cleanup.append(vm) - self.fail("User is allowed to deploy VM in a network that does not belong to him ") + self.fail("User is allowed to deploy VM in a network that is not self-owned ") except Exception as e: self.debug("When user tries to deploy vm in a network that does not belong to him %s" % e) if not CloudstackAclException.verifyMsginException(e, CloudstackAclException.UNABLE_TO_USE_NETWORK): - self.fail("Error message validation failed when User is allowed to deploy VM in a network that does not belong to him ") + self.fail("Error message validation failed when User is allowed to deploy VM in a network that is not self-owned ") ## Test cases relating to restart Network as admin user @attr("simulator_only", tags=["advanced"], required_hardware="false") def test_21_restartNetwork_admin(self): """ - #Validate that Admin should be able to restart network for networks he owns + #Validate that Admin should be able to restart network for networks if it is self-owned """ self.apiclient.connection.apiKey = self.user_root_apikey self.apiclient.connection.securityKey = self.user_root_secretkey @@ -944,12 +944,12 @@ class TestIsolatedNetwork(cloudstackTestCase): self.assertEqual(restartResponse.success, True, - "Admin User is not able to restart network he owns") + "Admin User is not able to restart network if it is self-owned") @attr("simulator_only", tags=["advanced"], required_hardware="false") def test_22_restartNetwork_admin_foruserinsamedomain(self): """ - # Validate that Admin should be able to restart network for users in his domain + # Validate that Admin should be able to restart network for users in their domain """ self.apiclient.connection.apiKey = self.user_root_apikey self.apiclient.connection.securityKey = self.user_root_secretkey @@ -958,12 +958,12 @@ class TestIsolatedNetwork(cloudstackTestCase): self.assertEqual(restartResponse.success, True, - "Admin User is not able to restart network owned by users his domain") + "Admin User is not able to restart network owned by users their domain") @attr("simulator_only", tags=["advanced"], required_hardware="false") def test_23_restartNetwork_admin_foruserinotherdomain(self): """ - # Validate that Admin should be able to restart network for users in his sub domain + # Validate that Admin should be able to restart network for users in their sub domain """ self.apiclient.connection.apiKey = self.user_root_apikey self.apiclient.connection.securityKey = self.user_root_secretkey @@ -980,7 +980,7 @@ class TestIsolatedNetwork(cloudstackTestCase): @attr("simulator_only", tags=["advanced"], required_hardware="false") def test_24_restartNetwork_domaindmin(self): """ - # Validate that Domain admin should be able to restart network for himslef + # Validate that Domain admin should be able to restart network with self-ownership """ self.apiclient.connection.apiKey = self.user_d1_apikey self.apiclient.connection.securityKey = self.user_d1_secretkey @@ -989,12 +989,12 @@ class TestIsolatedNetwork(cloudstackTestCase): self.assertEqual(restartResponse.success, True, - "Domain admin User is not able to restart network for himself") + "Domain admin User is not able to restart network with self-ownership") @attr("simulator_only", tags=["advanced"], required_hardware="false") def test_25_restartNetwork_domaindmin_foruserinsamedomain(self): """ - # Validate that Domain admin should be able to restart network for users in his domain + # Validate that Domain admin should be able to restart network for users in their domain """ self.apiclient.connection.apiKey = self.user_d1_apikey self.apiclient.connection.securityKey = self.user_d1_secretkey @@ -1002,12 +1002,12 @@ class TestIsolatedNetwork(cloudstackTestCase): restartResponse = self.network_d1a.restart(self.apiclient) self.assertEqual(restartResponse.success, True, - "Domain admin User is not able to restart network for other users in his domain") + "Domain admin User is not able to restart network for other users in their domain") @attr("simulator_only", tags=["advanced"], required_hardware="false") def test_26_restartNetwork_domaindmin_foruserinsubdomain(self): """ - # Validate that Domain admin should be able to restart network for users in his sub domain + # Validate that Domain admin should be able to restart network for users in their sub domain """ self.apiclient.connection.apiKey = self.user_d1_apikey self.apiclient.connection.securityKey = self.user_d1_secretkey @@ -1015,30 +1015,30 @@ class TestIsolatedNetwork(cloudstackTestCase): restartResponse = self.network_d11a.restart(self.apiclient) self.assertEqual(restartResponse.success, True, - "Domain admin User is not able to restart network he owns") + "Domain admin User is not able to restart network if it is self-owned") @attr("simulator_only", tags=["advanced"], required_hardware="false") def test_27_restartNetwork_domaindmin_forcrossdomainuser(self): """ - # Validate that Domain admin should be able to restart network for users in his sub domain + # Validate that Domain admin should be able to restart network for users in their sub domain """ self.apiclient.connection.apiKey = self.user_d1_apikey self.apiclient.connection.securityKey = self.user_d1_secretkey try: restartResponse = self.network_d2a.restart(self.apiclient) - self.fail("Domain admin is allowed to restart network for users not in his domain ") + self.fail("Domain admin is allowed to restart network for users not in their domain ") except Exception as e: - self.debug("When Domain admin tries to restart network for users in his sub domain %s" % e) + self.debug("When Domain admin tries to restart network for users in their sub domain %s" % e) if not CloudstackAclException.verifyMsginException(e, CloudstackAclException.NO_PERMISSION_TO_OPERATE_DOMAIN): - self.fail("Error message validation failed when Domain admin tries to restart network for users not in his domain ") + self.fail("Error message validation failed when Domain admin tries to restart network for users not in their domain ") ## Test cases relating restart network as regular user @attr("simulator_only", tags=["advanced"], required_hardware="false") def test_28_restartNetwork_user(self): """ - #Validate that Regular should be able to restart network for himslef + #Validate that Regular should be able to restart network with self-ownership """ self.apiclient.connection.apiKey = self.user_d1a_apikey self.apiclient.connection.securityKey = self.user_d1a_secretkey @@ -1046,23 +1046,23 @@ class TestIsolatedNetwork(cloudstackTestCase): restartResponse = self.network_d1a.restart(self.apiclient) self.assertEqual(restartResponse.success, True, - "User is not able to restart network he owns") + "User is not able to restart network if it is self-owned") @attr("simulator_only", tags=["advanced"], required_hardware="false") def test_29_restartNetwork_user_foruserinsamedomain(self): """ - #Validate that Regular user should NOT be able to restart network for users in his domain + #Validate that Regular user should NOT be able to restart network for users in their domain """ self.apiclient.connection.apiKey = self.user_d1a_apikey self.apiclient.connection.securityKey = self.user_d1a_secretkey try: restartResponse = self.network_d1b.restart(self.apiclient) - self.fail("Regular user is allowed to restart network for users in his domain ") + self.fail("Regular user is allowed to restart network for users in their domain ") except Exception as e: - self.debug("When user tries to restart network for users in his domain %s" % e) + self.debug("When user tries to restart network for users in their domain %s" % e) if not CloudstackAclException.verifyMsginException(e, CloudstackAclException.NO_PERMISSION_TO_OPERATE_ACCOUNT): - self.fail("Error message validation failed when Regular user tries to restart network for users in his domain ") + self.fail("Error message validation failed when Regular user tries to restart network for users in their domain ") @attr("simulator_only", tags=["advanced"], required_hardware="false") def test_30_restartNetwork_user_foruserinotherdomain(self): @@ -1074,11 +1074,11 @@ class TestIsolatedNetwork(cloudstackTestCase): try: restartResponse = self.network_d11a.restart(self.apiclient) - self.fail("Regular user is allowed to restart network for users not in his domain ") + self.fail("Regular user is allowed to restart network for users not in their domain ") except Exception as e: self.debug("When user tries to restart network for users in other domain %s" % e) if not CloudstackAclException.verifyMsginException(e, CloudstackAclException.NO_PERMISSION_TO_OPERATE_ACCOUNT): - self.fail("Error message validation failed when Regular user is allowed to restart network for users not in his domain ") + self.fail("Error message validation failed when Regular user is allowed to restart network for users not in their domain ") @staticmethod def generateKeysForUser(apiclient, account): diff --git a/test/integration/component/test_acl_isolatednetwork_delete.py b/test/integration/component/test_acl_isolatednetwork_delete.py index 379ed3d6e96..bf464d6d6d9 100644 --- a/test/integration/component/test_acl_isolatednetwork_delete.py +++ b/test/integration/component/test_acl_isolatednetwork_delete.py @@ -348,7 +348,7 @@ class TestIsolatedNetworkDelete(cloudstackTestCase): @attr("simulator_only",tags=["advanced"],required_hardware="false") def test_deleteNetwork_admin(self): """ - Validate that Admin should be able to delete network he owns + Validate that Admin should be able to delete network that is self-owned """ self.apiclient.connection.apiKey = self.user_root_apikey self.apiclient.connection.securityKey = self.user_root_secretkey @@ -358,14 +358,14 @@ class TestIsolatedNetworkDelete(cloudstackTestCase): self.assertEqual(response, None, - "Admin User is not able to restart network he owns") + "Admin User is not able to restart network that is self-owned") self._cleanup.remove(self.network_root) @attr("simulator_only",tags=["advanced"],required_hardware="false") def test_deleteNetwork_admin_foruserinsamedomain(self): """ - Validate that Admin should be able to delete network for users in his domain + Validate that Admin should be able to delete network for users in their domain """ self.apiclient.connection.apiKey = self.user_root_apikey self.apiclient.connection.securityKey = self.user_root_secretkey @@ -375,12 +375,12 @@ class TestIsolatedNetworkDelete(cloudstackTestCase): self.assertEqual(response, None, - "Admin User is not able to delete network owned by users his domain") + "Admin User is not able to delete network owned by users in their domain") self._cleanup.remove(self.network_roota) @attr("simulator_only",tags=["advanced"],required_hardware="false") def test_deleteNetwork_admin_foruserinotherdomain(self): - # Validate that Admin should be able to delete network for users in his sub domain + # Validate that Admin should be able to delete network for users in their sub domain self.apiclient.connection.apiKey = self.user_root_apikey self.apiclient.connection.securityKey = self.user_root_secretkey @@ -397,7 +397,7 @@ class TestIsolatedNetworkDelete(cloudstackTestCase): @attr("simulator_only",tags=["advanced"],required_hardware="false") def test_deleteNetwork_domaindmin(self): """ - Validate that Domain admin should be able to delete network for himslef + Validate that Domain admin should be able to delete network with self-ownership """ self.apiclient.connection.apiKey = self.user_d1_apikey self.apiclient.connection.securityKey = self.user_d1_secretkey @@ -407,13 +407,13 @@ class TestIsolatedNetworkDelete(cloudstackTestCase): self.assertEqual(response, None, - "Domain admin User is not able to delete a network he owns") + "Domain admin User is not able to delete a network that is self-owned") self._cleanup.remove(self.network_d1) @attr("simulator_only",tags=["advanced"],required_hardware="false") def test_deleteNetwork_domaindmin_foruserinsamedomain(self): """ - Validate that Domain admin should be able to delete network for users in his domain + Validate that Domain admin should be able to delete network for users in their domain """ self.apiclient.connection.apiKey = self.user_d1_apikey self.apiclient.connection.securityKey = self.user_d1_secretkey @@ -428,7 +428,7 @@ class TestIsolatedNetworkDelete(cloudstackTestCase): @attr("simulator_only",tags=["advanced"],required_hardware="false") def test_deleteNetwork_domaindmin_foruserinsubdomain(self): """ - Validate that Domain admin should be able to delete network for users in his sub domain + Validate that Domain admin should be able to delete network for users in their sub domain """ self.apiclient.connection.apiKey = self.user_d1_apikey self.apiclient.connection.securityKey = self.user_d1_secretkey @@ -444,7 +444,7 @@ class TestIsolatedNetworkDelete(cloudstackTestCase): @attr("simulator_only",tags=["advanced"],required_hardware="false") def test_deleteNetwork_domaindmin_forcrossdomainuser(self): """ - Validate that Domain admin should be able to delete network for users in his sub domain + Validate that Domain admin should be able to delete network for users in their sub domain """ self.apiclient.connection.apiKey = self.user_d1_apikey self.apiclient.connection.securityKey = self.user_d1_secretkey @@ -452,18 +452,18 @@ class TestIsolatedNetworkDelete(cloudstackTestCase): try: response = self.network_d2a.delete(self.apiclient) self._cleanup.remove(self.network_d2a) - self.fail("Domain admin is allowed to delete network for users not in his domain ") + self.fail("Domain admin is allowed to delete network for users not in their domain ") except Exception as e: self.debug ("When Domain admin tries to delete network for user in a different domain %s" %e) if not CloudstackAclException.verifyMsginException(e,CloudstackAclException.NO_PERMISSION_TO_OPERATE_DOMAIN): - self.fail("Error message validation failed when Domain admin tries to delete network for users not in his domain ") + self.fail("Error message validation failed when Domain admin tries to delete network for users not in their domain ") ## Test cases relating deleting network as regular user @attr("simulator_only",tags=["advanced"],required_hardware="false") def test_deleteNetwork_user(self): """ - Validate that Regular should be able to delete network for himslef + Validate that Regular should be able to delete network with self-ownership """ self.apiclient.connection.apiKey = self.user_d111a_apikey self.apiclient.connection.securityKey = self.user_d111a_secretkey @@ -473,13 +473,13 @@ class TestIsolatedNetworkDelete(cloudstackTestCase): self.assertEqual(response, None, - "User is not able to delete a network he owns") + "User is not able to delete a network that is self-owned") self._cleanup.remove(self.network_d111a) @attr("simulator_only",tags=["advanced"],required_hardware="false") def test_deleteNetwork_user_foruserinsamedomain(self): """ - Validate that Regular user should NOT be able to delete network for users in his domain + Validate that Regular user should NOT be able to delete network for users in their domain """ self.apiclient.connection.apiKey = self.user_d111a_apikey self.apiclient.connection.securityKey = self.user_d111a_secretkey @@ -487,11 +487,11 @@ class TestIsolatedNetworkDelete(cloudstackTestCase): try: response = self.network_d111b.delete(self.apiclient) self._cleanup.remove(self.network_d111b) - self.fail("Regular user is allowed to delete network for users in his domain ") + self.fail("Regular user is allowed to delete network for users in their domain ") except Exception as e: - self.debug ("When user tries to delete network for users in his domain %s" %e) + self.debug ("When user tries to delete network for users in their domain %s" %e) if not CloudstackAclException.verifyMsginException(e,CloudstackAclException.NO_PERMISSION_TO_OPERATE_ACCOUNT): - self.fail("Regular user is allowed to delete network for users in his domain ") + self.fail("Regular user is allowed to delete network for users in their domain ") @attr("simulator_only",tags=["advanced"],required_hardware="false") def test_deleteNetwork_user_foruserinotherdomain(self): @@ -505,11 +505,11 @@ class TestIsolatedNetworkDelete(cloudstackTestCase): try: response = self.network_d11b.delete(self.apiclient) self._cleanup.remove(self.network_d11b) - self.fail("Regular user is allowed to delete network for users not in his domain ") + self.fail("Regular user is allowed to delete network for users not in their domain ") except Exception as e: self.debug ("When user tries to delete network for users in other domain %s" %e) if not CloudstackAclException.verifyMsginException(e,CloudstackAclException.NO_PERMISSION_TO_OPERATE_ACCOUNT): - self.fail("Error message validation failed when Regular user tries to delete network for users not in his domain ") + self.fail("Error message validation failed when Regular user tries to delete network for users not in their domain ") @staticmethod def generateKeysForUser(apiclient,account): diff --git a/test/integration/component/test_acl_listsnapshot.py b/test/integration/component/test_acl_listsnapshot.py index be280b4f223..8bf674ba688 100644 --- a/test/integration/component/test_acl_listsnapshot.py +++ b/test/integration/component/test_acl_listsnapshot.py @@ -2623,7 +2623,7 @@ class TestSnapshotList(cloudstackTestCase): @attr("simulator_only", tags=["advanced"], required_hardware="false") def test_listSnapshot_by_id_as_domainadmin_owns(self): """ - Domain admin should be able to list Snapshots that he owns by passing uuid in "id" parameter + Domain admin should be able to list Snapshots that are self-owned by passing uuid in "id" parameter """ self.apiclient.connection.apiKey = self.user_d1_apikey @@ -2632,16 +2632,16 @@ class TestSnapshotList(cloudstackTestCase): self.assertNotEqual(SnapshotList, None, - "Domain Admin is not able to list Snapshotss that he owns") + "Domain Admin is not able to list Snapshots that are self-owned") self.assertEqual(len(SnapshotList), 1, - "Domain Admin is not able to list Snapshotss that belongs to him") + "Domain Admin is not able to list Snapshots that are self-owned") @attr("simulator_only", tags=["advanced"], required_hardware="false") def test_listSnapshot_by_id_as_domainadmin_ownedbyusersindomain(self): """ - Domain admin should be able to list Snapshots that is owned by any account in his domain by passing uuid in "id" parameter + Domain admin should be able to list Snapshots that is owned by any account in their domain by passing uuid in "id" parameter """ self.apiclient.connection.apiKey = self.user_d1_apikey @@ -2650,16 +2650,16 @@ class TestSnapshotList(cloudstackTestCase): self.assertNotEqual(SnapshotList1, None, - "Domain Admin is not able to list Snapshotss from his domain") + "Domain Admin is not able to list Snapshots from their domain") self.assertEqual(len(SnapshotList1), 1, - "Domain Admin is not able to list Snapshotss from his domain") + "Domain Admin is not able to list Snapshots from their domain") @attr("simulator_only", tags=["advanced"], required_hardware="false") def test_listSnapshot_by_id_as_domainadmin_ownedbyusersinsubdomain(self): """ - Domain admin should be able to list Snapshots that is owned by any account in his sub-domain by passing uuid in "id" parameter + Domain admin should be able to list Snapshots that is owned by any account in their sub-domain by passing uuid in "id" parameter """ self.apiclient.connection.apiKey = self.user_d1_apikey @@ -2668,16 +2668,16 @@ class TestSnapshotList(cloudstackTestCase): self.assertNotEqual(SnapshotList2, None, - "Domain Admin is not able to list Snapshotss from his sub domain") + "Domain Admin is not able to list Snapshots from their sub domain") self.assertEqual(len(SnapshotList2), 1, - "Domain Admin is not able to list Snapshotss from his sub domain") + "Domain Admin is not able to list Snapshots from their sub domain") @attr("simulator_only", tags=["advanced"], required_hardware="false") def test_listSnapshot_by_id_as_domainadmin_ownedbyusersnotindomain(self): """ - Domain admin should not be able to list Snapshots that is owned by account that is not in his domain by passing uuid in "id" parameter + Domain admin should not be able to list Snapshots that is owned by account that is not in their domain by passing uuid in "id" parameter """ self.apiclient.connection.apiKey = self.user_d1_apikey @@ -2686,12 +2686,12 @@ class TestSnapshotList(cloudstackTestCase): self.assertEqual(SnapshotList3, None, - "Domain Admin is able to list Snapshotss from other domains!!!") + "Domain Admin is able to list Snapshots from other domains!!!") @attr("simulator_only", tags=["advanced"], required_hardware="false") def test_listSnapshot_by_id_as_domainadmin_ownedbyusersinsubdomain2(self): """ - Domain admin should be able to list Snapshots that is owned by account that is in his sub domains by passing uuid in "id" parameter + Domain admin should be able to list Snapshots that is owned by account that is in their sub domains by passing uuid in "id" parameter """ self.apiclient.connection.apiKey = self.user_d1_apikey @@ -2700,16 +2700,16 @@ class TestSnapshotList(cloudstackTestCase): self.assertNotEqual(SnapshotList4, None, - "Domain Admin is not able to list Snapshotss from his subdomain") + "Domain Admin is not able to list Snapshots from their subdomain") self.assertEqual(len(SnapshotList4), 1, - "Domain Admin is not able to list Snapshotss from his sub domains") + "Domain Admin is not able to list Snapshots from their sub domains") @attr("simulator_only", tags=["advanced"], required_hardware="false") def test_listSnapshot_by_id_as_rootadmin_owns(self): """ - ROOT admin should be able to list Snapshots that is owned by account in his domains by passing uuid in "id" parameter + ROOT admin should be able to list Snapshots that is owned by account in their domains by passing uuid in "id" parameter """ self.apiclient.connection.apiKey = self.user_a_apikey @@ -2717,10 +2717,10 @@ class TestSnapshotList(cloudstackTestCase): SnapshotList1 = Snapshot.list(self.apiclient, id=self.vm_a_snapshot.id) self.assertNotEqual(SnapshotList1, None, - "ROOT Admin not able to list Snapshotss that he owns") + "ROOT Admin not able to list Snapshots that are self-owned") self.assertEqual(len(SnapshotList1), 1, - "ROOT Admin not able to list Snapshotss that he owns") + "ROOT Admin not able to list Snapshots that are self-owned") @attr("simulator_only", tags=["advanced"], required_hardware="false") def test_listSnapshot_by_id_as_rootadmin_Snapshotsownedbyothers(self): @@ -2734,22 +2734,22 @@ class TestSnapshotList(cloudstackTestCase): SnapshotList2 = Snapshot.list(self.apiclient, id=self.vm_d11a_snapshot.id) self.assertNotEqual(SnapshotList1, None, - "ROOT Admin not able to list Snapshotss from other domains") + "ROOT Admin not able to list Snapshots from other domains") self.assertNotEqual(SnapshotList2, None, - "ROOT Admin not able to list Snapshotss from other domains") + "ROOT Admin not able to list Snapshots from other domains") self.assertEqual(len(SnapshotList1), 1, - "ROOT Admin not able to list Snapshotss from other domains") + "ROOT Admin not able to list Snapshots from other domains") self.assertEqual(len(SnapshotList2), 1, - "ROOT Admin not able to list Snapshotss from other domains") + "ROOT Admin not able to list Snapshots from other domains") @attr("simulator_only", tags=["advanced"], required_hardware="false") def test_listSnapshot_by_id_as_user_own(self): """ - Regular user should be able to list Snapshots that is owned by him by passing uuid in "id" parameter + Regular user should be able to list Snapshots that are self-owned by passing uuid in "id" parameter """ self.apiclient.connection.apiKey = self.user_d11a_apikey @@ -2758,11 +2758,11 @@ class TestSnapshotList(cloudstackTestCase): self.assertNotEqual(SnapshotList1, None, - "Regular User is not able to list Snapshotss that he owns") + "Regular User is not able to list Snapshots that are self-owned") self.assertEqual(len(SnapshotList1), 1, - "Regular User is not able to list Snapshotss that he owns") + "Regular User is not able to list Snapshots that are self-owned") @attr("simulator_only", tags=["advanced"], required_hardware="false") def test_listSnapshot_by_id_as_user_snapshotfromsamedomaindifferentaccount(self): diff --git a/test/integration/component/test_acl_listvm.py b/test/integration/component/test_acl_listvm.py index 91d25a8ef03..b49461617c0 100644 --- a/test/integration/component/test_acl_listvm.py +++ b/test/integration/component/test_acl_listvm.py @@ -2597,7 +2597,7 @@ class TestVMList(cloudstackTestCase): @attr("simulator_only", tags=["advanced"], required_hardware="false") def test_listVM_by_id_as_domainadmin_owns(self): """ - # Domain admin should be able to list Vm that he owns by passing uuid in "id" parameter + # Domain admin should be able to list Vms that are self-owned by passing uuid in "id" parameter """ self.apiclient.connection.apiKey = self.user_d1_apikey @@ -2606,16 +2606,16 @@ class TestVMList(cloudstackTestCase): self.assertNotEqual(VMList, None, - "Domain Admin is not able to list Vms that he owns") + "Domain Admin is not able to list Vms that are self-owned") self.assertEqual(len(VMList), 1, - "Domain Admin is not able to list Vms that belongs to him") + "Domain Admin is not able to list Vms that are self-owned") @attr("simulator_only", tags=["advanced"], required_hardware="false") def test_listVM_by_id_as_domainadmin_ownedbyusersindomain(self): """ - # Domain admin should be able to list Vm that is owned by any account in his domain by passing uuid in "id" parameter + # Domain admin should be able to list Vm that is owned by any account in their domain by passing uuid in "id" parameter """ self.apiclient.connection.apiKey = self.user_d1_apikey @@ -2624,16 +2624,16 @@ class TestVMList(cloudstackTestCase): self.assertNotEqual(VMList1, None, - "Domain Admin is not able to list Vms from his domain") + "Domain Admin is not able to list Vms from their domain") self.assertEqual(len(VMList1), 1, - "Domain Admin is not able to list Vms from his domain") + "Domain Admin is not able to list Vms from their domain") @attr("simulator_only", tags=["advanced"], required_hardware="false") def test_listVM_by_id_as_domainadmin_ownedbyusersinsubdomain(self): """ - # Domain admin should be able to list Vm that is owned by any account in his sub-domain by passing uuid in "id" parameter + # Domain admin should be able to list Vm that is owned by any account in their sub-domain by passing uuid in "id" parameter """ self.apiclient.connection.apiKey = self.user_d1_apikey @@ -2642,16 +2642,16 @@ class TestVMList(cloudstackTestCase): self.assertNotEqual(VMList2, None, - "Domain Admin is not able to list Vms from his sub domain") + "Domain Admin is not able to list Vms from their sub domain") self.assertEqual(len(VMList2), 1, - "Domain Admin is not able to list Vms from his sub domain") + "Domain Admin is not able to list Vms from their sub domain") @attr("simulator_only", tags=["advanced"], required_hardware="false") def test_listVM_by_id_as_domainadmin_ownedbyusersnotindomain(self): """ - # Domain admin should not be able to list Vm that is owned by account that is not in his domain by passing uuid in "id" parameter + # Domain admin should not be able to list Vm that is owned by account that is not in their domain by passing uuid in "id" parameter """ self.apiclient.connection.apiKey = self.user_d1_apikey @@ -2665,7 +2665,7 @@ class TestVMList(cloudstackTestCase): @attr("simulator_only", tags=["advanced"], required_hardware="false") def test_listVM_by_id_as_domainadmin_ownedbyusersinsubdomain2(self): """ - # Domain admin should be able to list Vm that is owned by account that is in his sub domains by passing uuid in "id" parameter + # Domain admin should be able to list Vm that is owned by account that is in their sub domains by passing uuid in "id" parameter """ self.apiclient.connection.apiKey = self.user_d1_apikey @@ -2674,16 +2674,16 @@ class TestVMList(cloudstackTestCase): self.assertNotEqual(VMList4, None, - "Domain Admin is not able to list Vms from his subdomain") + "Domain Admin is not able to list Vms from their sub domains") self.assertEqual(len(VMList4), 1, - "Domain Admin is not able to list Vms from his sub domains") + "Domain Admin is not able to list Vms from their sub domains") @attr("simulator_only", tags=["advanced"], required_hardware="false") def test_listVM_by_id_as_rootadmin_owns(self): """ - # Domain admin should be able to list Vm that is owned by account that is in his sub domains by passing uuid in "id" parameter + # Domain admin should be able to list Vm that is owned by account that is in their sub domains by passing uuid in "id" parameter """ self.apiclient.connection.apiKey = self.user_a_apikey @@ -2691,10 +2691,10 @@ class TestVMList(cloudstackTestCase): VMList1 = VirtualMachine.list(self.apiclient, id=self.vm_a.id) self.assertNotEqual(VMList1, None, - "ROOT Admin not able to list Vms that he owns") + "ROOT Admin not able to list Vms that are self-owned") self.assertEqual(len(VMList1), 1, - "ROOT Admin not able to list Vms that he owns") + "ROOT Admin not able to list Vms that are self-owned") @attr("simulator_only", tags=["advanced"], required_hardware="false") def test_listVM_by_id_as_rootadmin_Vmsownedbyothers(self): @@ -2723,7 +2723,7 @@ class TestVMList(cloudstackTestCase): @attr("simulator_only", tags=["advanced"], required_hardware="false") def test_listVM_by_id_as_user_own(self): """ - # Regular user should be able to list Vm that is owned by him by passing uuid in "id" parameter + # Regular user should be able to list Vms that are self-owned by passing uuid in "id" parameter """ self.apiclient.connection.apiKey = self.user_d11a_apikey @@ -2732,11 +2732,11 @@ class TestVMList(cloudstackTestCase): self.assertNotEqual(VMList1, None, - "Regular User is not able to list Vms that he owns") + "Regular User is not able to list Vms that are self-owned") self.assertEqual(len(VMList1), 1, - "Regular User is not able to list Vms that he owns") + "Regular User is not able to list Vms that are self-owned") @attr("simulator_only", tags=["advanced"], required_hardware="false") def test_listVM_by_id_as_user_vmfromsamedomaindifferentaccount(self): diff --git a/test/integration/component/test_acl_listvolume.py b/test/integration/component/test_acl_listvolume.py index 463bba310d2..be79da8e529 100644 --- a/test/integration/component/test_acl_listvolume.py +++ b/test/integration/component/test_acl_listvolume.py @@ -2603,7 +2603,7 @@ class TestVolumeList(cloudstackTestCase): @attr("simulator_only", tags=["advanced"], required_hardware="false") def test_listVolume_by_id_as_domainadmin_owns(self): """ - # Domain admin should be able to list Volumes that he owns by passing uuid in "id" parameter + # Domain admin should be able to list Volumes that are self-owned by passing uuid in "id" parameter """ self.apiclient.connection.apiKey = self.user_d1_apikey @@ -2612,16 +2612,16 @@ class TestVolumeList(cloudstackTestCase): self.assertNotEqual(VMList, None, - "Domain Admin is not able to list Volumes that he owns") + "Domain Admin is not able to list Volumes that are self-owned") self.assertEqual(len(VMList), 1, - "Domain Admin is not able to list Volumes that belongs to him") + "Domain Admin is not able to list Volumes that are self-owned") @attr("simulator_only", tags=["advanced"], required_hardware="false") def test_listVolume_by_id_as_domainadmin_ownedbyusersindomain(self): """ - # Domain admin should be able to list Volumes that is owned by any account in his domain by passing uuid in "id" parameter + # Domain admin should be able to list Volumes that is owned by any account in their domain by passing uuid in "id" parameter """ self.apiclient.connection.apiKey = self.user_d1_apikey @@ -2630,16 +2630,16 @@ class TestVolumeList(cloudstackTestCase): self.assertNotEqual(VMList1, None, - "Domain Admin is not able to list Volumes from his domain") + "Domain Admin is not able to list Volumes from their domain") self.assertEqual(len(VMList1), 1, - "Domain Admin is not able to list Volumes from his domain") + "Domain Admin is not able to list Volumes from their domain") @attr("simulator_only", tags=["advanced"], required_hardware="false") def test_listVolume_by_id_as_domainadmin_ownedbyusersinsubdomain(self): """ - # Domain admin should be able to list Volumes that is owned by any account in his sub-domain by passing uuid in "id" parameter + # Domain admin should be able to list Volumes that is owned by any account in their sub-domain by passing uuid in "id" parameter """ self.apiclient.connection.apiKey = self.user_d1_apikey @@ -2648,16 +2648,16 @@ class TestVolumeList(cloudstackTestCase): self.assertNotEqual(VMList2, None, - "Domain Admin is not able to list Volumes from his sub domain") + "Domain Admin is not able to list Volumes from their sub domain") self.assertEqual(len(VMList2), 1, - "Domain Admin is not able to list Volumes from his sub domain") + "Domain Admin is not able to list Volumes from their sub domain") @attr("simulator_only", tags=["advanced"], required_hardware="false") def test_listVolume_by_id_as_domainadmin_ownedbyusersnotindomain(self): """ - # Domain admin should not be able to list Volumes that is owned by account that is not in his domain by passing uuid in "id" parameter + # Domain admin should not be able to list Volumes that is owned by account that is not in their domain by passing uuid in "id" parameter """ self.apiclient.connection.apiKey = self.user_d1_apikey @@ -2671,7 +2671,7 @@ class TestVolumeList(cloudstackTestCase): @attr("simulator_only", tags=["advanced"], required_hardware="false") def test_listVolume_by_id_as_domainadmin_ownedbyusersinsubdomain2(self): """ - # Domain admin should be able to list Volumes that is owned by account that is in his sub domains by passing uuid in "id" parameter + # Domain admin should be able to list Volumes that is owned by account that is in their sub domains by passing uuid in "id" parameter """ self.apiclient.connection.apiKey = self.user_d1_apikey @@ -2680,16 +2680,16 @@ class TestVolumeList(cloudstackTestCase): self.assertNotEqual(VMList4, None, - "Domain Admin is not able to list Volumes from his subdomain") + "Domain Admin is not able to list Volumes from their subdomain") self.assertEqual(len(VMList4), 1, - "Domain Admin is not able to list Volumes from his sub domains") + "Domain Admin is not able to list Volumes from their sub domains") @attr("simulator_only", tags=["advanced"], required_hardware="false") def test_listVolume_by_id_as_rootadmin_owns(self): """ - # ROOT admin should be able to list Volumes that is owned by accounts in his domain by passing uuid in "id" parameter + # ROOT admin should be able to list Volumes that is owned by accounts in their domain by passing uuid in "id" parameter """ self.apiclient.connection.apiKey = self.user_a_apikey @@ -2697,10 +2697,10 @@ class TestVolumeList(cloudstackTestCase): VMList1 = Volume.list(self.apiclient, id=self.vm_a_volume[0].id) self.assertNotEqual(VMList1, None, - "ROOT Admin not able to list Volumes that he owns") + "ROOT Admin not able to list Volumes that are self-owned") self.assertEqual(len(VMList1), 1, - "ROOT Admin not able to list Volumes that he owns") + "ROOT Admin not able to list Volumes that are self-owned") @attr("simulator_only", tags=["advanced"], required_hardware="false") def test_listVolume_by_id_as_rootadmin_Volumesownedbyothers(self): @@ -2729,7 +2729,7 @@ class TestVolumeList(cloudstackTestCase): @attr("simulator_only", tags=["advanced"], required_hardware="false") def test_listVolume_by_id_as_user_own(self): """ - # Regular user should be able to list Volumes that is owned by him by passing uuid in "id" parameter + # Regular user should be able to list Volumes that are self-owned by passing uuid in "id" parameter """ self.apiclient.connection.apiKey = self.user_d11a_apikey @@ -2738,11 +2738,11 @@ class TestVolumeList(cloudstackTestCase): self.assertNotEqual(VMList1, None, - "Regular User is not able to list Volumes that he owns") + "Regular User is not able to list Volumes that are self-owned") self.assertEqual(len(VMList1), 1, - "Regular User is not able to list Volumes that he owns") + "Regular User is not able to list Volumes that are self-owned") @attr("simulator_only", tags=["advanced"], required_hardware="false") def test_listVolume_by_id_as_user_volumefromsamedomaindifferentaccount(self): diff --git a/test/integration/component/test_acl_sharednetwork_deployVM-impersonation.py b/test/integration/component/test_acl_sharednetwork_deployVM-impersonation.py index d59ccb6165d..2bd62dc4470 100644 --- a/test/integration/component/test_acl_sharednetwork_deployVM-impersonation.py +++ b/test/integration/component/test_acl_sharednetwork_deployVM-impersonation.py @@ -1196,12 +1196,12 @@ class TestSharedNetworkImpersonation(cloudstackTestCase): accountid=self.account_d2a.name, domainid=self.account_d2a.domainid ) - self.fail("Domain admin user is able to Deploy VM for a domain user he does not have access to in a shared network with scope=domain with no subdomain access ") + self.fail("Domain admin user is able to Deploy VM for a domain user, but there is no access to in a shared network with scope=domain with no subdomain access ") except Exception as e: - self.debug("When a Domain admin user deploys a VM for a domain user he does not have access to in a shared network with scope=domain with no subdomain access %s" % e) + self.debug("When a Domain admin user deploys a VM for a domain user, but there is no access to in a shared network with scope=domain with no subdomain access %s" % e) if not CloudstackAclException.verifyMsginException(e, CloudstackAclException.NO_PERMISSION_TO_OPERATE_DOMAIN): self.fail( - "Error mesage validation failed when Domain admin user tries to Deploy VM for a domain user he does not have access to in a shared network with scope=domain with no subdomain access ") + "Error mesage validation failed when Domain admin user tries to Deploy VM for a domain user, but there is no access to in a shared network with scope=domain with no subdomain access ") ## Test cases relating to deploying Virtual Machine as Domain admin for other users in shared network with scope=Domain and no subdomain access diff --git a/test/integration/component/test_add_remove_network.py b/test/integration/component/test_add_remove_network.py index 91baa3fdb31..ac0ecc7c57b 100644 --- a/test/integration/component/test_add_remove_network.py +++ b/test/integration/component/test_add_remove_network.py @@ -309,7 +309,7 @@ class TestAddNetworkToVirtualMachine(cloudstackTestCase): self.debug("Filtered nics list: %s:" % nics) # Only the nics added to self.virtual_machine should be added to this list - # Nics added to his list are removed before execution of next test case because we are using + # Nics added to their list are removed before execution of next test case because we are using # same virtual machine in all test cases, so it is important that the common # virtual machine should contain only the default nic whenever new test case # execution starts diff --git a/test/integration/component/test_affinity_groups.py b/test/integration/component/test_affinity_groups.py index 445364877a7..9d4c486b33a 100644 --- a/test/integration/component/test_affinity_groups.py +++ b/test/integration/component/test_affinity_groups.py @@ -1648,7 +1648,7 @@ class TestAffinityGroupsAdminUser(cloudstackTestCase): list_aff_grps = AffinityGroup.list(self.api_client) self.assertNotEqual(list_aff_grps, [], "Admin not able to list Affinity " - "Groups belonging to him") + "Groups are owned by the admin") grp_names = [aff_grp1.name, aff_grp2.name] list_names = [] for grp in list_aff_grps: diff --git a/test/integration/component/test_egress_fw_rules.py b/test/integration/component/test_egress_fw_rules.py index fc64cc6a3df..e1b33bfa5eb 100644 --- a/test/integration/component/test_egress_fw_rules.py +++ b/test/integration/component/test_egress_fw_rules.py @@ -848,7 +848,7 @@ class TestEgressFWRules(cloudstackTestCase): @attr(tags=["advanced"], required_hardware="true") def test_13_egress_fr13(self): - """Test Redundant Router : Master failover + """Test Redundant Router : Primary failover """ # Validate the following: # 1. deploy VM using network offering with egress policy true. @@ -865,36 +865,36 @@ class TestEgressFWRules(cloudstackTestCase): listall=True) self.assertEqual(isinstance(routers, list), True, - "list router should return Master and backup routers") + "list router should return Primary and backup routers") self.assertEqual(len(routers), 2, - "Length of the list router should be 2 (Backup & master)") + "Length of the list router should be 2 (Backup & primary)") - if routers[0].redundantstate == 'MASTER': - master_router = routers[0] + if routers[0].redundantstate == 'PRIMARY': + primary_router = routers[0] backup_router = routers[1] else: - master_router = routers[1] + primary_router = routers[1] backup_router = routers[0] - self.debug("Redundant states: %s, %s" % (master_router.redundantstate, + self.debug("Redundant states: %s, %s" % (primary_router.redundantstate, backup_router.redundantstate)) - self.debug("Stopping the Master router") + self.debug("Stopping the Primary router") try: - Router.stop(self.apiclient, id=master_router.id) + Router.stop(self.apiclient, id=primary_router.id) except Exception as e: - self.fail("Failed to stop master router: %s" % e) + self.fail("Failed to stop primary router: %s" % e) # wait for VR update state time.sleep(60) - self.debug("Checking state of the master router in %s" % self.network.name) + self.debug("Checking state of the primary router in %s" % self.network.name) routers = Router.list(self.apiclient, - id=master_router.id, + id=primary_router.id, listall=True) self.assertEqual(isinstance(routers, list), True, - "list router should return Master and backup routers") + "list router should return Primary and backup routers") self.exec_script_on_user_vm('ping -c 1 www.google.com', "| grep -oP \'\d+(?=% packet loss)\'", @@ -903,7 +903,7 @@ class TestEgressFWRules(cloudstackTestCase): @attr(tags=["advanced"], required_hardware="true") def test_13_1_egress_fr13(self): - """Test Redundant Router : Master failover + """Test Redundant Router : Primary failover """ # Validate the following: # 1. deploy VM using network offering with egress policy false. @@ -920,36 +920,36 @@ class TestEgressFWRules(cloudstackTestCase): listall=True) self.assertEqual(isinstance(routers, list), True, - "list router should return Master and backup routers") + "list router should return Primary and backup routers") self.assertEqual(len(routers), 2, - "Length of the list router should be 2 (Backup & master)") + "Length of the list router should be 2 (Backup & primary)") - if routers[0].redundantstate == 'MASTER': - master_router = routers[0] + if routers[0].redundantstate == 'PRIMARY': + primary_router = routers[0] backup_router = routers[1] else: - master_router = routers[1] + primary_router = routers[1] backup_router = routers[0] - self.debug("Redundant states: %s, %s" % (master_router.redundantstate, + self.debug("Redundant states: %s, %s" % (primary_router.redundantstate, backup_router.redundantstate)) - self.debug("Stopping the Master router") + self.debug("Stopping the Primary router") try: - Router.stop(self.apiclient, id=master_router.id) + Router.stop(self.apiclient, id=primary_router.id) except Exception as e: - self.fail("Failed to stop master router: %s" % e) + self.fail("Failed to stop primary router: %s" % e) # wait for VR update state time.sleep(60) - self.debug("Checking state of the master router in %s" % self.network.name) + self.debug("Checking state of the primary router in %s" % self.network.name) routers = Router.list(self.apiclient, - id=master_router.id, + id=primary_router.id, listall=True) self.assertEqual(isinstance(routers, list), True, - "list router should return Master and backup routers") + "list router should return Primary and backup routers") self.exec_script_on_user_vm('ping -c 1 www.google.com', "| grep -oP \'\d+(?=% packet loss)\'", diff --git a/test/integration/component/test_ip_reservation.py b/test/integration/component/test_ip_reservation.py index c5341516f3a..7eaaec46cc6 100644 --- a/test/integration/component/test_ip_reservation.py +++ b/test/integration/component/test_ip_reservation.py @@ -38,7 +38,7 @@ from marvin.lib.common import (get_zone, createEnabledNetworkOffering, createNetworkRulesForVM, verifyNetworkState) -from marvin.codes import (PASS, FAIL, FAILED, UNKNOWN, FAULT, MASTER, +from marvin.codes import (PASS, FAIL, FAILED, UNKNOWN, FAULT, PRIMARY, NAT_RULE, STATIC_NAT_RULE) import netaddr @@ -375,7 +375,7 @@ class TestIpReservation(cloudstackTestCase): # steps # 1. create vm in isolated network with RVR and ip in guestvmcidr # 2. update guestvmcidr - # 3. List routers and stop the master router, wait till backup router comes up + # 3. List routers and stop the primary router, wait till backup router comes up # 4. create another VM # # validation @@ -383,7 +383,7 @@ class TestIpReservation(cloudstackTestCase): # 2. Existing guest vm ip should not be changed after reservation # 3. Newly created VM should get ip in guestvmcidr # 4. Verify that the network has two routers associated with it - # 5. Backup router should come up when master router is stopped""" + # 5. Backup router should come up when primary router is stopped""" subnet = "10.1."+str(random.randrange(1,254)) gateway = subnet +".1" @@ -413,29 +413,29 @@ class TestIpReservation(cloudstackTestCase): self.debug("Listing routers for network: %s" % isolated_network_RVR.name) routers = Router.list(self.apiclient, networkid=isolated_network_RVR.id, listall=True) self.assertEqual(validateList(routers)[0], PASS, "Routers list validation failed") - self.assertEqual(len(routers), 2, "Length of the list router should be 2 (Backup & master)") + self.assertEqual(len(routers), 2, "Length of the list router should be 2 (Backup & primary)") - if routers[0].redundantstate == MASTER: - master_router = routers[0] + if routers[0].redundantstate == PRIMARY: + primary_router = routers[0] backup_router = routers[1] else: - master_router = routers[1] + primary_router = routers[1] backup_router = routers[0] - self.debug("Stopping router ID: %s" % master_router.id) + self.debug("Stopping router ID: %s" % primary_router.id) try: - Router.stop(self.apiclient, id=master_router.id) + Router.stop(self.apiclient, id=primary_router.id) except Exception as e: - self.fail("Failed to stop master router due to error %s" % e) + self.fail("Failed to stop primary router due to error %s" % e) # wait for VR to update state wait_for_cleanup(self.apiclient, ["router.check.interval"]) - result = verifyRouterState(master_router.id, [UNKNOWN,FAULT]) + result = verifyRouterState(primary_router.id, [UNKNOWN,FAULT]) if result[0] == FAIL: self.fail(result[1]) - result = verifyRouterState(backup_router.id, [MASTER]) + result = verifyRouterState(backup_router.id, [PRIMARY]) if result[0] == FAIL: self.fail(result[1]) diff --git a/test/integration/component/test_multiple_subnets_in_isolated_network.py b/test/integration/component/test_multiple_subnets_in_isolated_network.py index 80334955a81..278728b2adb 100644 --- a/test/integration/component/test_multiple_subnets_in_isolated_network.py +++ b/test/integration/component/test_multiple_subnets_in_isolated_network.py @@ -209,7 +209,7 @@ class TestMultiplePublicIpSubnets(cloudstackTestCase): if redundant_state == "FAULT": self.logger.debug("Skip as redundant_state is %s" % redundant_state) return - elif redundant_state == "MASTER": + elif redundant_state == "PRIMARY": command = 'ip link show |grep BROADCAST | egrep "%s" |grep "state DOWN" |wc -l' % publicNics elif redundant_state == "BACKUP": command = 'ip link show |grep BROADCAST | egrep "%s" |grep "state UP" |wc -l' % publicNics diff --git a/test/integration/component/test_multiple_subnets_in_isolated_network_rvr.py b/test/integration/component/test_multiple_subnets_in_isolated_network_rvr.py index 0565b98d8e4..d83571f9210 100644 --- a/test/integration/component/test_multiple_subnets_in_isolated_network_rvr.py +++ b/test/integration/component/test_multiple_subnets_in_isolated_network_rvr.py @@ -209,7 +209,7 @@ class TestMultiplePublicIpSubnets(cloudstackTestCase): if redundant_state == "FAULT": self.logger.debug("Skip as redundant_state is %s" % redundant_state) return - elif redundant_state == "MASTER": + elif redundant_state == "PRIMARY": command = 'ip link show |grep BROADCAST | egrep "%s" |grep "state DOWN" |wc -l' % publicNics elif redundant_state == "BACKUP": command = 'ip link show |grep BROADCAST | egrep "%s" |grep "state UP" |wc -l' % publicNics diff --git a/test/integration/component/test_multiple_subnets_in_vpc.py b/test/integration/component/test_multiple_subnets_in_vpc.py index f20f7c4a8bc..9167f156e19 100644 --- a/test/integration/component/test_multiple_subnets_in_vpc.py +++ b/test/integration/component/test_multiple_subnets_in_vpc.py @@ -213,7 +213,7 @@ class TestMultiplePublicIpSubnets(cloudstackTestCase): if redundant_state == "FAULT": self.logger.debug("Skip as redundant_state is %s" % redundant_state) return - elif redundant_state == "MASTER": + elif redundant_state == "PRIMARY": command = 'ip link show |grep BROADCAST | egrep "%s" |grep "state DOWN" |wc -l' % publicNics elif redundant_state == "BACKUP": command = 'ip link show |grep BROADCAST | egrep "%s" |grep "state UP" |wc -l' % publicNics diff --git a/test/integration/component/test_multiple_subnets_in_vpc_rvr.py b/test/integration/component/test_multiple_subnets_in_vpc_rvr.py index ca7731472b6..7c7127307b5 100644 --- a/test/integration/component/test_multiple_subnets_in_vpc_rvr.py +++ b/test/integration/component/test_multiple_subnets_in_vpc_rvr.py @@ -213,7 +213,7 @@ class TestMultiplePublicIpSubnets(cloudstackTestCase): if redundant_state == "FAULT": self.logger.debug("Skip as redundant_state is %s" % redundant_state) return - elif redundant_state == "MASTER": + elif redundant_state == "PRIMARY": command = 'ip link show |grep BROADCAST | egrep "%s" |grep "state DOWN" |wc -l' % publicNics elif redundant_state == "BACKUP": command = 'ip link show |grep BROADCAST | egrep "%s" |grep "state UP" |wc -l' % publicNics diff --git a/test/integration/component/test_persistent_networks.py b/test/integration/component/test_persistent_networks.py index 97b868802d6..079677d4ffe 100644 --- a/test/integration/component/test_persistent_networks.py +++ b/test/integration/component/test_persistent_networks.py @@ -879,7 +879,7 @@ class TestPersistentNetworks(cloudstackTestCase): # 1. create account and isolated network with network # offering which has ispersistent field enabled # and supporting Redundant Virtual Router in it - # 2. Check the Master and Backup Routers are present + # 2. Check the Primary and Backup Routers are present # 3. Deploy VM ,acquire IP, create Firewall, NAT rules # 4. Verify the working of NAT, Firewall rules # @@ -928,7 +928,7 @@ class TestPersistentNetworks(cloudstackTestCase): self.assertEqual( len(routers), 2, - "Length of the list router should be 2 (Backup & master)") + "Length of the list router should be 2 (Backup & Primary)") # Check if routers are reachable from the host for router in routers: diff --git a/test/integration/component/test_public_ip.py b/test/integration/component/test_public_ip.py index a37226f32fc..f131d0b71ab 100644 --- a/test/integration/component/test_public_ip.py +++ b/test/integration/component/test_public_ip.py @@ -473,7 +473,7 @@ class TestPublicIp(cloudstackTestCase): def test_03_list_publicip_user_domain(self): """ A regular user should be able to display public ip address - only in his domain + only in their domain Step 1: Create an isolated network in the user domain Step 2: Display all public ip address in that domain @@ -511,7 +511,7 @@ class TestPublicIp(cloudstackTestCase): listall=True, forvirtualnetwork=True) - # Step 3: Ensure that sub domain can list only the ip address in his domain + # Step 3: Ensure that sub domain can list only the ip address in their domain self.assertEqual( len(ipAddresses), 10, @@ -610,7 +610,7 @@ class TestPublicIp(cloudstackTestCase): def test_04_list_publicip_all_subdomains(self): """ A domain admin should be able to display public ip address - in his domain and also all child domains + in their domain and also all child domains Step 1: Display all public ip address in that domain and sub domain Step 2: Ensure that the count is 11 (all ip from parent domain and allocated from sub domain) @@ -677,7 +677,7 @@ class TestPublicIp(cloudstackTestCase): def test_05_list_publicip_user_domain(self): """ A domain admin should be able to display public ip address - in his domain and also all child domains + in their domain and also all child domains Step 1: Display all public ip address in that domain and sub domain Step 2: Ensure that the count is 20 diff --git a/test/integration/component/test_redundant_router_cleanups.py b/test/integration/component/test_redundant_router_cleanups.py index 34b1fb3bef9..2c0805f9fba 100644 --- a/test/integration/component/test_redundant_router_cleanups.py +++ b/test/integration/component/test_redundant_router_cleanups.py @@ -306,12 +306,12 @@ class TestRedundantRouterNetworkCleanups(cloudstackTestCase): self.assertEqual( isinstance(routers, list), True, - "list router should return Master and backup routers" + "list router should return Primary and backup routers" ) self.assertEqual( len(routers), 2, - "Length of the list router should be 2 (Backup & master)" + "Length of the list router should be 2 (Backup & primary)" ) self.debug("restarting network with cleanup=False") @@ -329,12 +329,12 @@ class TestRedundantRouterNetworkCleanups(cloudstackTestCase): self.assertEqual( isinstance(routers, list), True, - "list router should return Master and backup routers" + "list router should return Primary and backup routers" ) self.assertEqual( len(routers), 2, - "Length of the list router should be 2 (Backup & master)" + "Length of the list router should be 2 (Backup & primary)" ) for router in routers: self.assertEqual( @@ -440,12 +440,12 @@ class TestRedundantRouterNetworkCleanups(cloudstackTestCase): self.assertEqual( isinstance(routers, list), True, - "list router should return Master and backup routers" + "list router should return Primary and backup routers" ) self.assertEqual( len(routers), 2, - "Length of the list router should be 2 (Backup & master)" + "Length of the list router should be 2 (Backup & primary)" ) self.debug("restarting network with cleanup=True") @@ -463,12 +463,12 @@ class TestRedundantRouterNetworkCleanups(cloudstackTestCase): self.assertEqual( isinstance(routers, list), True, - "list router should return Master and backup routers" + "list router should return Primary and backup routers" ) self.assertEqual( len(routers), 2, - "Length of the list router should be 2 (Backup & master)" + "Length of the list router should be 2 (Backup & primary)" ) for router in routers: self.assertEqual( @@ -490,7 +490,7 @@ class TestRedundantRouterNetworkCleanups(cloudstackTestCase): # 4. stop the running user VM # 5. wait for network.gc time # 6. listRouters - # 7. start the routers MASTER and BACK + # 7. start the routers PRIMARY and BACK # 8. wait for network.gc time and listRouters # 9. delete the account @@ -577,12 +577,12 @@ class TestRedundantRouterNetworkCleanups(cloudstackTestCase): self.assertEqual( isinstance(routers, list), True, - "list router should return Master and backup routers" + "list router should return Primary and backup routers" ) self.assertEqual( len(routers), 2, - "Length of the list router should be 2 (Backup & master)" + "Length of the list router should be 2 (Backup & primary)" ) self.debug("Stopping the user VM: %s" % virtual_machine.name) @@ -616,7 +616,7 @@ class TestRedundantRouterNetworkCleanups(cloudstackTestCase): self.assertEqual( isinstance(routers, list), True, - "list router should return Master and backup routers" + "list router should return Primary and backup routers" ) for router in routers: self.assertEqual( @@ -637,7 +637,7 @@ class TestRedundantRouterNetworkCleanups(cloudstackTestCase): self.assertEqual( isinstance(routers, list), True, - "list router should return Master and backup routers" + "list router should return Primary and backup routers" ) for router in routers: self.assertEqual( @@ -658,7 +658,7 @@ class TestRedundantRouterNetworkCleanups(cloudstackTestCase): self.assertEqual( isinstance(routers, list), True, - "list router should return Master and backup routers" + "list router should return Primary and backup routers" ) for router in routers: self.assertEqual( @@ -669,15 +669,15 @@ class TestRedundantRouterNetworkCleanups(cloudstackTestCase): return @attr(tags=["advanced", "advancedns"], required_hardware="false") - def test_restart_network_with_destroyed_masterVR(self): - """Test restarting RvR network without cleanup after destroying master VR + def test_restart_network_with_destroyed_primaryVR(self): + """Test restarting RvR network without cleanup after destroying primary VR """ # Steps to validate # 1. createNetwork using network offering for redundant virtual router # 2. listRouters in above network # 3. deployVM in above user account in the created network - # 4. Destroy master VR + # 4. Destroy primary VR # 5. restartNetwork cleanup=false # 6. Verify RVR status after network restart @@ -741,46 +741,46 @@ class TestRedundantRouterNetworkCleanups(cloudstackTestCase): self.assertEqual( validateList(routers)[0], PASS, - "list router should return Master and backup routers" + "list router should return Primary and backup routers" ) self.assertEqual( len(routers), 2, - "Length of the list router should be 2 (Backup & master)" + "Length of the list router should be 2 (Backup & primary)" ) - if routers[0].redundantstate == 'MASTER' and\ + if routers[0].redundantstate == 'PRIMARY' and\ routers[1].redundantstate == 'BACKUP': - master_router = routers[0] + primary_router = routers[0] backup_router = routers[1] - elif routers[1].redundantstate == 'MASTER' and \ + elif routers[1].redundantstate == 'PRIMARY' and \ routers[0].redundantstate == 'BACKUP': - master_router = routers[1] + primary_router = routers[1] backup_router = routers[0] else: self.fail("Both the routers in RVR are in BackupState - CLOUDSTACK-9015") Router.stop( self.apiclient, - id=master_router.id + id=primary_router.id ) Router.destroy( self.apiclient, - id=master_router.id + id=primary_router.id ) - masterVR = Router.list( + primaryVR = Router.list( self.apiclient, - id=master_router.id + id=primary_router.id ) - self.assertIsNone(masterVR, "Router is not destroyed") - new_master = Router.list( + self.assertIsNone(primaryVR, "Router is not destroyed") + new_primary = Router.list( self.apiclient, id=backup_router.id ) - self.assertEqual(validateList(new_master)[0], PASS, "Invalid response after vr destroy") + self.assertEqual(validateList(new_primary)[0], PASS, "Invalid response after vr destroy") self.assertEqual( - new_master[0].redundantstate, - "MASTER", - "Backup didn't switch to Master after destroying Master VR" + new_primary[0].redundantstate, + "PRIMARY", + "Backup didn't switch to Primary after destroying Primary VR" ) self.debug("restarting network with cleanup=False") @@ -798,12 +798,12 @@ class TestRedundantRouterNetworkCleanups(cloudstackTestCase): self.assertEqual( validateList(routers)[0], PASS, - "list router should return Master and backup routers afrer network restart" + "list router should return Primary and backup routers afrer network restart" ) self.assertEqual( len(routers), 2, - "Length of the list router should be 2 (Backup & master)" + "Length of the list router should be 2 (Backup & primary)" ) for router in routers: self.assertEqual( @@ -811,12 +811,12 @@ class TestRedundantRouterNetworkCleanups(cloudstackTestCase): "Running", "Router state should be running" ) - if routers[0].redundantstate == 'MASTER' and\ + if routers[0].redundantstate == 'PRIMARY' and\ routers[1].redundantstate == 'BACKUP': - self.debug("Found master and backup VRs after network restart") + self.debug("Found primary and backup VRs after network restart") elif routers[0].redundantstate == 'BACKUP' and \ - routers[1].redundantstate == 'MASTER': - self.debug("Found master and backup routers") + routers[1].redundantstate == 'PRIMARY': + self.debug("Found primary and backup routers") else: self.fail("RVR is not in proper start after network restart") return diff --git a/test/integration/component/test_redundant_router_services.py b/test/integration/component/test_redundant_router_services.py index ba282b28834..02d4d72682f 100644 --- a/test/integration/component/test_redundant_router_services.py +++ b/test/integration/component/test_redundant_router_services.py @@ -209,7 +209,7 @@ class TestEnableVPNOverRvR(cloudstackTestCase): # 1. listNetworks should show the created network in allocated state # 2. listRouters returns no running routers # 3. VMs should be deployed and in Running state - # 4. should list MASTER and BACKUP routers + # 4. should list PRIMARY and BACKUP routers # 5. listPublicIpAddresses for networkid should show acquired IP addr # 6. listRemoteAccessVpns for the network associated should show VPN # created @@ -299,12 +299,12 @@ class TestEnableVPNOverRvR(cloudstackTestCase): self.assertEqual( isinstance(routers, list), True, - "list router should return Master and backup routers" + "list router should return Primary and backup routers" ) self.assertEqual( len(routers), 2, - "Length of the list router should be 2 (Backup & master)" + "Length of the list router should be 2 (Backup & Primary)" ) self.debug("Associating public IP for network: %s" % network.name) diff --git a/test/integration/component/test_redundant_router_upgrades.py b/test/integration/component/test_redundant_router_upgrades.py index 6a0efb02014..4e8454762de 100644 --- a/test/integration/component/test_redundant_router_upgrades.py +++ b/test/integration/component/test_redundant_router_upgrades.py @@ -228,7 +228,7 @@ class TestRvRUpgradeDowngrade(cloudstackTestCase): # one Router running for this network # 3. listNetworkOfferings should show craeted offering for RvR # 4. listNetworks shows the network still successfully implemented - # 5. listRouters shows two routers Up and Running (MASTER and BACKUP) + # 5. listRouters shows two routers Up and Running (PRIMARY and BACKUP) network_offerings = NetworkOffering.list( self.apiclient, @@ -349,7 +349,7 @@ class TestRvRUpgradeDowngrade(cloudstackTestCase): self.assertEqual( len(routers), 2, - "Length of the list router should be 2 (MASTER & BACKUP)" + "Length of the list router should be 2 (PRIMARY & BACKUP)" ) return @@ -372,7 +372,7 @@ class TestRvRUpgradeDowngrade(cloudstackTestCase): # 1. listNetworkOfferings should show craeted offering for RvR # 2. listNetworks should show the created network in allocated state # 3. VM should be deployed and in Running state and there should be - # two routers (MASTER and BACKUP) for this network + # two routers (PRIMARY and BACKUP) for this network # 4. listNetworkOfferings should show craeted offering for VR # 5. listNetworks shows the network still successfully implemented # 6. listRouters shows only one router for this network in Running @@ -456,7 +456,7 @@ class TestRvRUpgradeDowngrade(cloudstackTestCase): self.assertEqual( len(routers), 2, - "Length of the list router should be 2 (MASTER & BACKUP)" + "Length of the list router should be 2 (PRIMARY & BACKUP)" ) network_offerings = NetworkOffering.list( diff --git a/test/integration/component/test_volumes.py b/test/integration/component/test_volumes.py index 9662f6aa695..8f11473089e 100644 --- a/test/integration/component/test_volumes.py +++ b/test/integration/component/test_volumes.py @@ -1191,7 +1191,7 @@ class TestVolumes(cloudstackTestCase): 2. Create a user within this domain 3. As user in step 2. create a volume with standard disk offering 4. Ensure the volume is created in the domain and available to the - user in his listVolumes call + user in their listVolumes call """ dom = Domain.create( self.apiclient, diff --git a/test/integration/plugins/test_nicira_controller.py b/test/integration/plugins/test_nicira_controller.py index 9524a516fc1..aa3786679a5 100644 --- a/test/integration/plugins/test_nicira_controller.py +++ b/test/integration/plugins/test_nicira_controller.py @@ -127,13 +127,13 @@ class TestNiciraContoller(cloudstackTestCase): 'password': 'admin' } - cls.nicira_master_controller = cls.determine_master_controller( + cls.nicira_primary_controller = cls.determine_primary_controller( cls.nicira_hosts, cls.nicira_credentials ) cls.transport_zone_uuid = cls.get_transport_zone_from_controller( - cls.nicira_master_controller, + cls.nicira_primary_controller, cls.nicira_credentials ) @@ -213,7 +213,7 @@ class TestNiciraContoller(cloudstackTestCase): @classmethod - def determine_master_controller(cls, hosts, credentials): + def determine_primary_controller(cls, hosts, credentials): for host in hosts: r1 = requests.post("https://%s/ws.v1/login" % host, credentials, verify=False) r2 = requests.get("https://%s/ws.v1/control-cluster/status" % host, verify=False, cookies=r1.cookies) @@ -260,12 +260,12 @@ class TestNiciraContoller(cloudstackTestCase): return PhysicalNetwork.list(cls.api_client, name=nicira_physical_network_name)[0].id - def determine_slave_conroller(self, hosts, master_controller): - slaves = [ s for s in hosts if s != master_controller ] - if len(slaves) > 0: - return slaves[0] + def determine_secondary_conroller(self, hosts, primary_controller): + secondary = [ s for s in hosts if s != primary_controller ] + if len(secondary) > 0: + return secondary[0] else: - raise Exception("None of the supplied hosts (%s) is a Nicira slave" % hosts) + raise Exception("None of the supplied hosts (%s) is a Nicira secondary" % hosts) def add_nicira_device(self, hostname, l2gatewayserviceuuid=None): @@ -404,29 +404,29 @@ class TestNiciraContoller(cloudstackTestCase): ) - def get_master_router(self, routers): - master = [r for r in routers if r.redundantstate == 'MASTER'] - self.logger.debug("Found %s master router(s): %s" % (master.size(), master)) - return master[0] + def get_primary_router(self, routers): + primary = [r for r in routers if r.redundantstate == 'PRIMARY'] + self.logger.debug("Found %s primary router(s): %s" % (primary.size(), primary)) + return primary[0] def distribute_vm_and_routers_by_hosts(self, virtual_machine, routers): if len(routers) > 1: router = self.get_router(routers) - self.logger.debug("Master Router VM is %s" % router) + self.logger.debug("Primary Router VM is %s" % router) else: router = routers[0] if router.hostid == virtual_machine.hostid: - self.logger.debug("Master Router VM is on the same host as VM") + self.logger.debug("Primary Router VM is on the same host as VM") host = findSuitableHostForMigration(self.api_client, router.id) if host is not None: migrate_router(self.api_client, router.id, host.id) - self.logger.debug("Migrated Master Router VM to host %s" % host.name) + self.logger.debug("Migrated Primary Router VM to host %s" % host.name) else: - self.fail('No suitable host to migrate Master Router VM to') + self.fail('No suitable host to migrate Primary Router VM to') else: - self.logger.debug("Master Router VM is not on the same host as VM: %s, %s" % (router.hostid, virtual_machine.hostid)) + self.logger.debug("Primary Router VM is not on the same host as VM: %s, %s" % (router.hostid, virtual_machine.hostid)) def acquire_publicip(self, network): @@ -459,7 +459,7 @@ class TestNiciraContoller(cloudstackTestCase): @attr(tags = ["advanced", "smoke", "nicira"], required_hardware="true") def test_01_nicira_controller(self): - self.add_nicira_device(self.nicira_master_controller) + self.add_nicira_device(self.nicira_primary_controller) network = self.create_guest_isolated_network() virtual_machine = self.create_virtual_machine(network) @@ -478,19 +478,19 @@ class TestNiciraContoller(cloudstackTestCase): @attr(tags = ["advanced", "smoke", "nicira"], required_hardware="true") def test_02_nicira_controller_redirect(self): """ - Nicira clusters will redirect clients (in this case ACS) to the master node. + Nicira clusters will redirect clients (in this case ACS) to the primary node. This test assumes that a Nicira cluster is present and configured properly, and that it has at least two controller nodes. The test will check that ASC follows redirects by: - - adding a Nicira Nvp device that points to one of the cluster's slave controllers, + - adding a Nicira Nvp device that points to one of the cluster's secondary controllers, - create a VM in a Nicira backed network - If all is well, no matter what controller is specified (slaves or master), the vm (and respective router VM) + If all is well, no matter what controller is specified (secondary or primary), the vm (and respective router VM) should be created without issues. """ - nicira_slave = self.determine_slave_conroller(self.nicira_hosts, self.nicira_master_controller) - self.logger.debug("Nicira slave controller is: %s " % nicira_slave) + nicira_secondary = self.determine_secondary_conroller(self.nicira_hosts, self.nicira_primary_controller) + self.logger.debug("Nicira secondary controller is: %s " % nicira_secondary) - self.add_nicira_device(nicira_slave) + self.add_nicira_device(nicira_secondary) network = self.create_guest_isolated_network() virtual_machine = self.create_virtual_machine(network) @@ -508,7 +508,7 @@ class TestNiciraContoller(cloudstackTestCase): @attr(tags = ["advanced", "smoke", "nicira"], required_hardware="true") def test_03_nicira_tunnel_guest_network(self): - self.add_nicira_device(self.nicira_master_controller) + self.add_nicira_device(self.nicira_primary_controller) network = self.create_guest_isolated_network() virtual_machine = self.create_virtual_machine(network) public_ip = self.acquire_publicip(network) @@ -548,7 +548,7 @@ class TestNiciraContoller(cloudstackTestCase): CASE 1) Numerical VLAN_ID provided in network creation """ self.debug("Starting test case 1 for Shared Networks") - self.add_nicira_device(self.nicira_master_controller, self.l2gatewayserviceuuid) + self.add_nicira_device(self.nicira_primary_controller, self.l2gatewayserviceuuid) network = self.create_guest_shared_network_numerical_vlanid() virtual_machine = self.create_virtual_machine_shared_networks(network) @@ -569,7 +569,7 @@ class TestNiciraContoller(cloudstackTestCase): CASE 2) Logical Router's UUID as VLAN_ID provided in network creation """ self.debug("Starting test case 2 for Shared Networks") - self.add_nicira_device(self.nicira_master_controller, self.l2gatewayserviceuuid) + self.add_nicira_device(self.nicira_primary_controller, self.l2gatewayserviceuuid) network = self.create_guest_shared_network_uuid_vlanid() virtual_machine = self.create_virtual_machine_shared_networks(network) diff --git a/test/integration/smoke/test_kubernetes_clusters.py b/test/integration/smoke/test_kubernetes_clusters.py index 5ec2d49b039..d78f3a0afb3 100644 --- a/test/integration/smoke/test_kubernetes_clusters.py +++ b/test/integration/smoke/test_kubernetes_clusters.py @@ -573,13 +573,13 @@ class TestKubernetesCluster(cloudstackTestCase): return clusterResponse[0] return clusterResponse - def createKubernetesCluster(self, name, version_id, size=1, master_nodes=1): + def createKubernetesCluster(self, name, version_id, size=1, control_nodes=1): createKubernetesClusterCmd = createKubernetesCluster.createKubernetesClusterCmd() createKubernetesClusterCmd.name = name createKubernetesClusterCmd.description = name + "-description" createKubernetesClusterCmd.kubernetesversionid = version_id createKubernetesClusterCmd.size = size - createKubernetesClusterCmd.masternodes = master_nodes + createKubernetesClusterCmd.controlnodes = control_nodes createKubernetesClusterCmd.serviceofferingid = self.cks_service_offering.id createKubernetesClusterCmd.zoneid = self.zone.id createKubernetesClusterCmd.noderootdisksize = 10 @@ -622,10 +622,10 @@ class TestKubernetesCluster(cloudstackTestCase): response = self.apiclient.scaleKubernetesCluster(scaleKubernetesClusterCmd) return response - def getValidKubernetesCluster(self, size=1, master_nodes=1): + def getValidKubernetesCluster(self, size=1, control_nodes=1): cluster = k8s_cluster version = self.kubernetes_version_2 - if master_nodes != 1: + if control_nodes != 1: version = self.kubernetes_version_3 valid = True if cluster == None: @@ -642,7 +642,7 @@ class TestKubernetesCluster(cloudstackTestCase): self.debug("Existing cluster, k8s_cluster ID: %s not returned by list API" % cluster_id) if valid == True: try: - self.verifyKubernetesCluster(cluster, cluster.name, None, size, master_nodes) + self.verifyKubernetesCluster(cluster, cluster.name, None, size, control_nodes) self.debug("Existing Kubernetes cluster available with name %s" % cluster.name) except AssertionError as error: valid = False @@ -652,15 +652,15 @@ class TestKubernetesCluster(cloudstackTestCase): self.debug("Creating for Kubernetes cluster with name %s" % name) try: self.deleteAllLeftoverClusters() - cluster = self.createKubernetesCluster(name, version.id, size, master_nodes) - self.verifyKubernetesCluster(cluster, name, version.id, size, master_nodes) + cluster = self.createKubernetesCluster(name, version.id, size, control_nodes) + self.verifyKubernetesCluster(cluster, name, version.id, size, control_nodes) except Exception as ex: self.fail("Kubernetes cluster deployment failed: %s" % ex) except AssertionError as err: self.fail("Kubernetes cluster deployment failed during cluster verification: %s" % err) return cluster - def verifyKubernetesCluster(self, cluster_response, name, version_id=None, size=1, master_nodes=1): + def verifyKubernetesCluster(self, cluster_response, name, version_id=None, size=1, control_nodes=1): """Check if Kubernetes cluster is valid""" self.verifyKubernetesClusterState(cluster_response, 'Running') @@ -681,7 +681,7 @@ class TestKubernetesCluster(cloudstackTestCase): "Check KubernetesCluster zone {}, {}".format(cluster_response.zoneid, self.zone.id) ) - self.verifyKubernetesClusterSize(cluster_response, size, master_nodes) + self.verifyKubernetesClusterSize(cluster_response, size, control_nodes) db_cluster_name = self.dbclient.execute("select name from kubernetes_cluster where uuid = '%s';" % cluster_response.id)[0][0] @@ -709,7 +709,7 @@ class TestKubernetesCluster(cloudstackTestCase): "Check KubernetesCluster version {}, {}".format(cluster_response.kubernetesversionid, version_id) ) - def verifyKubernetesClusterSize(self, cluster_response, size=1, master_nodes=1): + def verifyKubernetesClusterSize(self, cluster_response, size=1, control_nodes=1): """Check if Kubernetes cluster node sizes are valid""" self.assertEqual( @@ -719,9 +719,9 @@ class TestKubernetesCluster(cloudstackTestCase): ) self.assertEqual( - cluster_response.masternodes, - master_nodes, - "Check KubernetesCluster master nodes {}, {}".format(cluster_response.masternodes, master_nodes) + cluster_response.controlnodes, + control_nodes, + "Check KubernetesCluster control nodes {}, {}".format(cluster_response.controlnodes, control_nodes) ) def verifyKubernetesClusterUpgrade(self, cluster_response, version_id): @@ -730,11 +730,11 @@ class TestKubernetesCluster(cloudstackTestCase): self.verifyKubernetesClusterState(cluster_response, 'Running') self.verifyKubernetesClusterVersion(cluster_response, version_id) - def verifyKubernetesClusterScale(self, cluster_response, size=1, master_nodes=1): + def verifyKubernetesClusterScale(self, cluster_response, size=1, control_nodes=1): """Check if Kubernetes cluster state and node sizes are valid after upgrade""" self.verifyKubernetesClusterState(cluster_response, 'Running') - self.verifyKubernetesClusterSize(cluster_response, size, master_nodes) + self.verifyKubernetesClusterSize(cluster_response, size, control_nodes) def stopAndVerifyKubernetesCluster(self, cluster_id): """Stop Kubernetes cluster and check if it is really stopped""" diff --git a/test/integration/smoke/test_privategw_acl.py b/test/integration/smoke/test_privategw_acl.py index 1111a488c90..da0ae6a0020 100644 --- a/test/integration/smoke/test_privategw_acl.py +++ b/test/integration/smoke/test_privategw_acl.py @@ -454,7 +454,7 @@ class TestPrivateGwACL(cloudstackTestCase): self.check_pvt_gw_connectivity(vm1, public_ip_1, [vm2.nic[0].ipaddress, vm3.nic[0].ipaddress, vm4.nic[0].ipaddress]) self.check_pvt_gw_connectivity(vm2, public_ip_2, [vm2.nic[0].ipaddress, vm3.nic[0].ipaddress, vm4.nic[0].ipaddress]) - self.stop_router_by_type("MASTER") + self.stop_router_by_type("PRIMARY") self.check_routers_state() self.check_private_gateway_interfaces() @@ -852,10 +852,10 @@ class TestPrivateGwACL(cloudstackTestCase): else: self.assertTrue(check_state == 1, "Routers private gateway interface should should have been removed!") - def check_routers_state(self, status_to_check="MASTER", expected_count=1): + def check_routers_state(self, status_to_check="PRIMARY", expected_count=1): routers = self.query_routers() - vals = ["MASTER", "BACKUP", "UNKNOWN"] + vals = ["PRIMARY", "BACKUP", "UNKNOWN"] cnts = [0, 0, 0] result = "UNKNOWN" diff --git a/test/integration/smoke/test_routers_network_ops.py b/test/integration/smoke/test_routers_network_ops.py index 2f122a281e2..cc1774cfc32 100644 --- a/test/integration/smoke/test_routers_network_ops.py +++ b/test/integration/smoke/test_routers_network_ops.py @@ -233,12 +233,12 @@ class TestRedundantIsolateNetworks(cloudstackTestCase): self.assertEqual( isinstance(routers, list), True, - "list router should return Master and backup routers" + "list router should return Primary and backup routers" ) self.assertEqual( len(routers), 2, - "Length of the list router should be 2 (Backup & master)" + "Length of the list router should be 2 (Backup & Primary)" ) public_ips = list_publicIP( @@ -398,12 +398,12 @@ class TestRedundantIsolateNetworks(cloudstackTestCase): self.assertEqual( isinstance(routers, list), True, - "list router should return Master and backup routers" + "list router should return Primary and backup routers" ) self.assertEqual( len(routers), 2, - "Length of the list router should be 2 (Backup & master)" + "Length of the list router should be 2 (Backup & Primary)" ) public_ips = list_publicIP( @@ -573,15 +573,15 @@ class TestRedundantIsolateNetworks(cloudstackTestCase): self.assertEqual( isinstance(routers, list), True, - "list router should return Master and backup routers" + "list router should return Primary and backup routers" ) self.assertEqual( len(routers), 2, - "Length of the list router should be 2 (Backup & master)" + "Length of the list router should be 2 (Backup & Primary)" ) - vals = ["MASTER", "BACKUP", "UNKNOWN"] + vals = ["PRIMARY", "BACKUP", "UNKNOWN"] cnts = [0, 0, 0] result = "UNKNOWN" @@ -632,8 +632,8 @@ class TestRedundantIsolateNetworks(cloudstackTestCase): if result.count(vals[0]) == 1: cnts[vals.index(vals[0])] += 1 - if cnts[vals.index('MASTER')] != 1: - self.fail("No Master or too many master routers found %s" % cnts[vals.index('MASTER')]) + if cnts[vals.index('PRIMARY')] != 1: + self.fail("No Primary or too many primary routers found %s" % cnts[vals.index('PRIMARY')]) return diff --git a/test/integration/smoke/test_vpc_redundant.py b/test/integration/smoke/test_vpc_redundant.py index 9144985d6ce..d7dadb872b7 100644 --- a/test/integration/smoke/test_vpc_redundant.py +++ b/test/integration/smoke/test_vpc_redundant.py @@ -300,11 +300,11 @@ class TestVPCRedundancy(cloudstackTestCase): "Check that %s routers were indeed created" % count) def wait_for_vrrp(self): - # Wait until 3*advert_int+skew time to get one of the routers as MASTER + # Wait until 3*advert_int+skew time to get one of the routers as PRIMARY time.sleep(3 * self.advert_int + 5) - def check_routers_state(self,count=2, status_to_check="MASTER", expected_count=1, showall=False): - vals = ["MASTER", "BACKUP", "UNKNOWN", "FAULT"] + def check_routers_state(self,count=2, status_to_check="PRIMARY", expected_count=1, showall=False): + vals = ["PRIMARY", "BACKUP", "UNKNOWN", "FAULT"] cnts = [0, 0, 0, 0] self.wait_for_vrrp() @@ -543,7 +543,7 @@ class TestVPCRedundancy(cloudstackTestCase): self.add_nat_rules() self.do_vpc_test(False) - self.stop_router_by_type("MASTER") + self.stop_router_by_type("PRIMARY") self.check_routers_state(1) self.do_vpc_test(False) @@ -578,11 +578,11 @@ class TestVPCRedundancy(cloudstackTestCase): self.add_nat_rules() self.do_vpc_test(False) - self.reboot_router_by_type("MASTER") + self.reboot_router_by_type("PRIMARY") self.check_routers_state() self.do_vpc_test(False) - self.reboot_router_by_type("MASTER") + self.reboot_router_by_type("PRIMARY") self.check_routers_state() self.do_vpc_test(False) @@ -615,7 +615,7 @@ class TestVPCRedundancy(cloudstackTestCase): # Router will be in FAULT state, i.e. keepalived is stopped self.check_routers_state(status_to_check="FAULT", expected_count=2) self.start_vm() - self.check_routers_state(status_to_check="MASTER") + self.check_routers_state(status_to_check="PRIMARY") @attr(tags=["advanced", "intervlan"], required_hardware="true") def test_05_rvpc_multi_tiers(self): @@ -636,7 +636,7 @@ class TestVPCRedundancy(cloudstackTestCase): network.get_net().delete(self.apiclient) self.networks.remove(network) - self.check_routers_state(status_to_check="MASTER") + self.check_routers_state(status_to_check="PRIMARY") self.do_vpc_test(False) def destroy_vm(self, network): diff --git a/tools/apidoc/generatecommand.xsl b/tools/apidoc/generatecommand.xsl index 11530f219cf..8b53f9fe19a 100644 --- a/tools/apidoc/generatecommand.xsl +++ b/tools/apidoc/generatecommand.xsl @@ -40,7 +40,7 @@ version="1.0"> -
+
@@ -147,7 +147,7 @@ version="1.0">