diff --git a/api/src/main/java/com/cloud/configuration/ConfigurationService.java b/api/src/main/java/com/cloud/configuration/ConfigurationService.java index 97d4b42974b..13a44ef05b0 100644 --- a/api/src/main/java/com/cloud/configuration/ConfigurationService.java +++ b/api/src/main/java/com/cloud/configuration/ConfigurationService.java @@ -201,11 +201,12 @@ public interface ConfigurationService { * TODO * @param allocationState * TODO + * @param storageAccessGroups * @return the new pod if successful, null otherwise * @throws * @throws */ - Pod createPod(long zoneId, String name, String startIp, String endIp, String gateway, String netmask, String allocationState); + Pod createPod(long zoneId, String name, String startIp, String endIp, String gateway, String netmask, String allocationState, List storageAccessGroups); /** * Creates a mutual exclusive IP range in the pod with same gateway, netmask. diff --git a/api/src/main/java/com/cloud/dc/Pod.java b/api/src/main/java/com/cloud/dc/Pod.java index 1cbab36f3bd..17c5b615d4b 100644 --- a/api/src/main/java/com/cloud/dc/Pod.java +++ b/api/src/main/java/com/cloud/dc/Pod.java @@ -43,4 +43,6 @@ public interface Pod extends InfrastructureEntity, Grouping, Identity, InternalI AllocationState getAllocationState(); boolean getExternalDhcp(); + + String getStorageAccessGroups(); } diff --git a/api/src/main/java/com/cloud/event/EventTypes.java b/api/src/main/java/com/cloud/event/EventTypes.java index 815bd2363d5..e68da0f5182 100644 --- a/api/src/main/java/com/cloud/event/EventTypes.java +++ b/api/src/main/java/com/cloud/event/EventTypes.java @@ -465,6 +465,7 @@ public class EventTypes { public static final String EVENT_ENABLE_PRIMARY_STORAGE = "ENABLE.PS"; public static final String EVENT_DISABLE_PRIMARY_STORAGE = "DISABLE.PS"; public static final String EVENT_SYNC_STORAGE_POOL = "SYNC.STORAGE.POOL"; + public static final String EVENT_CONFIGURE_STORAGE_ACCESS = "CONFIGURE.STORAGE.ACCESS"; public static final String EVENT_CHANGE_STORAGE_POOL_SCOPE = "CHANGE.STORAGE.POOL.SCOPE"; // VPN diff --git a/api/src/main/java/com/cloud/host/Host.java b/api/src/main/java/com/cloud/host/Host.java index afac6df5631..8b9aa4ed791 100644 --- a/api/src/main/java/com/cloud/host/Host.java +++ b/api/src/main/java/com/cloud/host/Host.java @@ -213,4 +213,6 @@ public interface Host extends StateObject, Identity, Partition, HAResour ResourceState getResourceState(); CPU.CPUArch getArch(); + + String getStorageAccessGroups(); } diff --git a/api/src/main/java/com/cloud/org/Cluster.java b/api/src/main/java/com/cloud/org/Cluster.java index 5124168084c..b0aa6bb04cf 100644 --- a/api/src/main/java/com/cloud/org/Cluster.java +++ b/api/src/main/java/com/cloud/org/Cluster.java @@ -41,4 +41,6 @@ public interface Cluster extends Grouping, Partition { ManagedState getManagedState(); CPU.CPUArch getArch(); + + String getStorageAccessGroups(); } diff --git a/api/src/main/java/com/cloud/resource/ResourceService.java b/api/src/main/java/com/cloud/resource/ResourceService.java index 562c3c418df..3cdf8fc64e9 100644 --- a/api/src/main/java/com/cloud/resource/ResourceService.java +++ b/api/src/main/java/com/cloud/resource/ResourceService.java @@ -95,4 +95,11 @@ public interface ResourceService { boolean releaseHostReservation(Long hostId); + void updatePodStorageAccessGroups(long podId, List newStorageAccessGroups); + + void updateZoneStorageAccessGroups(long zoneId, List newStorageAccessGroups); + + void updateClusterStorageAccessGroups(Long clusterId, List newStorageAccessGroups); + + void updateHostStorageAccessGroups(Long hostId, List newStorageAccessGroups); } diff --git a/api/src/main/java/com/cloud/storage/StorageService.java b/api/src/main/java/com/cloud/storage/StorageService.java index b8df75cd3e4..6f7b62911b6 100644 --- a/api/src/main/java/com/cloud/storage/StorageService.java +++ b/api/src/main/java/com/cloud/storage/StorageService.java @@ -22,6 +22,7 @@ import java.util.Map; import org.apache.cloudstack.api.command.admin.storage.CancelPrimaryStorageMaintenanceCmd; import org.apache.cloudstack.api.command.admin.storage.ChangeStoragePoolScopeCmd; +import org.apache.cloudstack.api.command.admin.storage.ConfigureStorageAccessCmd; import org.apache.cloudstack.api.command.admin.storage.CreateSecondaryStagingStoreCmd; import org.apache.cloudstack.api.command.admin.storage.CreateStoragePoolCmd; import org.apache.cloudstack.api.command.admin.storage.DeleteImageStoreCmd; @@ -99,6 +100,8 @@ public interface StorageService { StoragePool disablePrimaryStoragePool(Long id); + boolean configureStorageAccess(ConfigureStorageAccessCmd cmd); + StoragePool getStoragePool(long id); boolean deleteImageStore(DeleteImageStoreCmd cmd); diff --git a/api/src/main/java/org/apache/cloudstack/api/ApiConstants.java b/api/src/main/java/org/apache/cloudstack/api/ApiConstants.java index acce2bc7726..45c3ce0b78b 100644 --- a/api/src/main/java/org/apache/cloudstack/api/ApiConstants.java +++ b/api/src/main/java/org/apache/cloudstack/api/ApiConstants.java @@ -496,6 +496,11 @@ public class ApiConstants { public static final String SYSTEM_VM_TYPE = "systemvmtype"; public static final String TAGS = "tags"; public static final String STORAGE_TAGS = "storagetags"; + public static final String STORAGE_ACCESS_GROUPS = "storageaccessgroups"; + public static final String STORAGE_ACCESS_GROUP = "storageaccessgroup"; + public static final String CLUSTER_STORAGE_ACCESS_GROUPS = "clusterstorageaccessgroups"; + public static final String POD_STORAGE_ACCESS_GROUPS = "podstorageaccessgroups"; + public static final String ZONE_STORAGE_ACCESS_GROUPS = "zonestorageaccessgroups"; public static final String SUCCESS = "success"; public static final String SUITABLE_FOR_VM = "suitableforvirtualmachine"; public static final String SUPPORTS_STORAGE_SNAPSHOT = "supportsstoragesnapshot"; diff --git a/api/src/main/java/org/apache/cloudstack/api/ResponseGenerator.java b/api/src/main/java/org/apache/cloudstack/api/ResponseGenerator.java index ea0d946ee41..e2d132c2ae6 100644 --- a/api/src/main/java/org/apache/cloudstack/api/ResponseGenerator.java +++ b/api/src/main/java/org/apache/cloudstack/api/ResponseGenerator.java @@ -310,6 +310,8 @@ public interface ResponseGenerator { PodResponse createPodResponse(Pod pod, Boolean showCapacities); + PodResponse createMinimalPodResponse(Pod pod); + ZoneResponse createZoneResponse(ResponseView view, DataCenter dataCenter, Boolean showCapacities, Boolean showResourceIcon); DataCenterGuestIpv6PrefixResponse createDataCenterGuestIpv6PrefixResponse(DataCenterGuestIpv6Prefix prefix); @@ -324,6 +326,8 @@ public interface ResponseGenerator { ClusterResponse createClusterResponse(Cluster cluster, Boolean showCapacities); + ClusterResponse createMinimalClusterResponse(Cluster cluster); + FirewallRuleResponse createPortForwardingRuleResponse(PortForwardingRule fwRule); IpForwardingRuleResponse createIpForwardingRuleResponse(StaticNatRule fwRule); diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/cluster/AddClusterCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/cluster/AddClusterCmd.java index 69cb43ce40e..15265f561e7 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/cluster/AddClusterCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/cluster/AddClusterCmd.java @@ -118,6 +118,12 @@ public class AddClusterCmd extends BaseCmd { private String ovm3cluster; @Parameter(name = ApiConstants.OVM3_VIP, type = CommandType.STRING, required = false, description = "Ovm3 vip to use for pool (and cluster)") private String ovm3vip; + @Parameter(name = ApiConstants.STORAGE_ACCESS_GROUPS, + type = CommandType.LIST, collectionType = CommandType.STRING, + description = "comma separated list of storage access groups for the hosts in the cluster", + since = "4.21.0") + private List storageAccessGroups; + public String getOvm3Pool() { return ovm3pool; } @@ -192,6 +198,10 @@ public class AddClusterCmd extends BaseCmd { this.clusterType = type; } + public List getStorageAccessGroups() { + return storageAccessGroups; + } + @Override public long getEntityOwnerId() { return Account.ACCOUNT_ID_SYSTEM; diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/cluster/ListClustersCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/cluster/ListClustersCmd.java index 362913a1138..9cc39503fbf 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/cluster/ListClustersCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/cluster/ListClustersCmd.java @@ -74,6 +74,11 @@ public class ListClustersCmd extends BaseListCmd { since = "4.20.1") private String arch; + @Parameter(name = ApiConstants.STORAGE_ACCESS_GROUP, type = CommandType.STRING, + description = "the name of the storage access group", + since = "4.21.0") + private String storageAccessGroup; + ///////////////////////////////////////////////////// /////////////////// Accessors /////////////////////// ///////////////////////////////////////////////////// @@ -122,6 +127,18 @@ public class ListClustersCmd extends BaseListCmd { return StringUtils.isBlank(arch) ? null : CPU.CPUArch.fromType(arch); } + public String getStorageAccessGroup() { + return storageAccessGroup; + } + + public ListClustersCmd() { + + } + + public ListClustersCmd(String storageAccessGroup) { + this.storageAccessGroup = storageAccessGroup; + } + ///////////////////////////////////////////////////// /////////////// API Implementation/////////////////// ///////////////////////////////////////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/cluster/UpdateClusterCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/cluster/UpdateClusterCmd.java index c4ee87380ed..816285e3430 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/cluster/UpdateClusterCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/cluster/UpdateClusterCmd.java @@ -130,7 +130,7 @@ public class UpdateClusterCmd extends BaseCmd { } Cluster result = _resourceService.updateCluster(this); if (result != null) { - ClusterResponse clusterResponse = _responseGenerator.createClusterResponse(cluster, false); + ClusterResponse clusterResponse = _responseGenerator.createClusterResponse(result, false); clusterResponse.setResponseName(getCommandName()); this.setResponseObject(clusterResponse); } else { diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/host/AddHostCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/host/AddHostCmd.java index ca27837aa88..6531444b52e 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/host/AddHostCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/host/AddHostCmd.java @@ -75,6 +75,12 @@ public class AddHostCmd extends BaseCmd { @Parameter(name = ApiConstants.HOST_TAGS, type = CommandType.LIST, collectionType = CommandType.STRING, description = "list of tags to be added to the host") private List hostTags; + @Parameter(name = ApiConstants.STORAGE_ACCESS_GROUPS, + type = CommandType.LIST, collectionType = CommandType.STRING, + description = "comma separated list of storage access groups for the host", + since = "4.21.0") + private List storageAccessGroups; + ///////////////////////////////////////////////////// /////////////////// Accessors /////////////////////// ///////////////////////////////////////////////////// @@ -115,6 +121,10 @@ public class AddHostCmd extends BaseCmd { return hostTags; } + public List getStorageAccessGroups() { + return storageAccessGroups; + } + public String getAllocationState() { return allocationState; } diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/host/ListHostsCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/host/ListHostsCmd.java index af4ed50fa61..a71150a69e2 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/host/ListHostsCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/host/ListHostsCmd.java @@ -113,6 +113,11 @@ public class ListHostsCmd extends BaseListCmd { @Parameter(name = ApiConstants.ARCH, type = CommandType.STRING, description = "CPU Arch of the host", since = "4.20.1") private String arch; + @Parameter(name = ApiConstants.STORAGE_ACCESS_GROUP, type = CommandType.STRING, + description = "the name of the storage access group", + since = "4.21.0") + private String storageAccessGroup; + ///////////////////////////////////////////////////// /////////////////// Accessors /////////////////////// ///////////////////////////////////////////////////// @@ -205,6 +210,18 @@ public class ListHostsCmd extends BaseListCmd { return StringUtils.isBlank(arch) ? null : CPU.CPUArch.fromType(arch); } + public String getStorageAccessGroup() { + return storageAccessGroup; + } + + public ListHostsCmd() { + + } + + public ListHostsCmd(String storageAccessGroup) { + this.storageAccessGroup = storageAccessGroup; + } + ///////////////////////////////////////////////////// /////////////// API Implementation/////////////////// ///////////////////////////////////////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/pod/CreatePodCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/pod/CreatePodCmd.java index c1d9a6db429..36ad00e6e4a 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/pod/CreatePodCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/pod/CreatePodCmd.java @@ -30,6 +30,8 @@ import org.apache.cloudstack.api.response.ZoneResponse; import com.cloud.dc.Pod; import com.cloud.user.Account; +import java.util.List; + @APICommand(name = "createPod", description = "Creates a new Pod.", responseObject = PodResponse.class, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class CreatePodCmd extends BaseCmd { @@ -63,6 +65,12 @@ public class CreatePodCmd extends BaseCmd { @Parameter(name = ApiConstants.ALLOCATION_STATE, type = CommandType.STRING, description = "Allocation state of this Pod for allocation of new resources") private String allocationState; + @Parameter(name = ApiConstants.STORAGE_ACCESS_GROUPS, + type = CommandType.LIST, collectionType = CommandType.STRING, + description = "comma separated list of storage access groups for the hosts in the pod", + since = "4.21.0") + private List storageAccessGroups; + ///////////////////////////////////////////////////// /////////////////// Accessors /////////////////////// ///////////////////////////////////////////////////// @@ -95,6 +103,10 @@ public class CreatePodCmd extends BaseCmd { return allocationState; } + public List getStorageAccessGroups() { + return storageAccessGroups; + } + ///////////////////////////////////////////////////// /////////////// API Implementation/////////////////// ///////////////////////////////////////////////////// @@ -111,7 +123,7 @@ public class CreatePodCmd extends BaseCmd { @Override public void execute() { - Pod result = _configService.createPod(getZoneId(), getPodName(), getStartIp(), getEndIp(), getGateway(), getNetmask(), getAllocationState()); + Pod result = _configService.createPod(getZoneId(), getPodName(), getStartIp(), getEndIp(), getGateway(), getNetmask(), getAllocationState(), getStorageAccessGroups()); if (result != null) { PodResponse response = _responseGenerator.createPodResponse(result, false); response.setResponseName(getCommandName()); diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/pod/ListPodsByCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/pod/ListPodsByCmd.java index 5ad0b457ced..ca5635d4fe4 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/pod/ListPodsByCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/pod/ListPodsByCmd.java @@ -55,6 +55,11 @@ public class ListPodsByCmd extends BaseListCmd { @Parameter(name = ApiConstants.SHOW_CAPACITIES, type = CommandType.BOOLEAN, description = "flag to display the capacity of the pods") private Boolean showCapacities; + @Parameter(name = ApiConstants.STORAGE_ACCESS_GROUP, type = CommandType.STRING, + description = "the name of the storage access group", + since = "4.21.0") + private String storageAccessGroup; + ///////////////////////////////////////////////////// /////////////////// Accessors /////////////////////// ///////////////////////////////////////////////////// @@ -79,6 +84,18 @@ public class ListPodsByCmd extends BaseListCmd { return showCapacities; } + public String getStorageAccessGroup() { + return storageAccessGroup; + } + + public ListPodsByCmd() { + + } + + public ListPodsByCmd(String storageAccessGroup) { + this.storageAccessGroup = storageAccessGroup; + } + ///////////////////////////////////////////////////// /////////////// API Implementation/////////////////// ///////////////////////////////////////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/storage/ConfigureStorageAccessCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/storage/ConfigureStorageAccessCmd.java new file mode 100644 index 00000000000..bfa2589921f --- /dev/null +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/storage/ConfigureStorageAccessCmd.java @@ -0,0 +1,135 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.api.command.admin.storage; + +import java.util.List; + +import com.cloud.event.EventTypes; +import org.apache.cloudstack.api.ApiCommandResourceType; +import org.apache.cloudstack.api.BaseAsyncCmd; +import org.apache.cloudstack.api.response.ClusterResponse; +import org.apache.cloudstack.api.response.HostResponse; +import org.apache.cloudstack.api.response.PodResponse; +import org.apache.cloudstack.api.response.StoragePoolResponse; +import org.apache.cloudstack.api.response.SuccessResponse; +import org.apache.cloudstack.api.response.ZoneResponse; + +import org.apache.cloudstack.api.APICommand; +import org.apache.cloudstack.api.ApiConstants; +import org.apache.cloudstack.api.ApiErrorCode; +import org.apache.cloudstack.api.Parameter; +import org.apache.cloudstack.api.ServerApiException; + +import com.cloud.user.Account; + +@APICommand(name = "configureStorageAccess", description = "Configure the storage access groups on zone/pod/cluster/host and storage, accordingly connections to the storage pools", responseObject = SuccessResponse.class, since = "4.21.0", + requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) +public class ConfigureStorageAccessCmd extends BaseAsyncCmd { + + ///////////////////////////////////////////////////// + //////////////// API parameters ///////////////////// + ///////////////////////////////////////////////////// + + @Parameter(name = ApiConstants.ZONE_ID, type = CommandType.UUID, entityType = ZoneResponse.class, description = "UUID of the zone") + private Long zoneId; + + @Parameter(name = ApiConstants.POD_ID, type = CommandType.UUID, entityType = PodResponse.class, description = "UUID of the pod") + private Long podId; + + @Parameter(name = ApiConstants.CLUSTER_ID, type = CommandType.UUID, entityType = ClusterResponse.class, description = "UUID of the cluster") + private Long clusterId; + + @Parameter(name = ApiConstants.HOST_ID, type = CommandType.UUID, entityType = HostResponse.class, description = "UUID of the host") + private Long hostId; + + @Parameter(name = ApiConstants.STORAGE_ID, type = CommandType.UUID, entityType = StoragePoolResponse.class, description = "UUID of the Storage Pool") + private Long storageId; + + @Parameter(name = ApiConstants.STORAGE_ACCESS_GROUPS, type = CommandType.LIST, collectionType = CommandType.STRING, + description = "comma separated list of storage access groups for connecting the storage pools and the hosts", + since = "4.21.0") + private List storageAccessGroups; + + + ///////////////////////////////////////////////////// + /////////////////// Accessors /////////////////////// + ///////////////////////////////////////////////////// + + public Long getZoneId() { + return zoneId; + } + + public Long getPodId() { + return podId; + } + + public Long getClusterId() { + return clusterId; + } + + public Long getHostId() { + return hostId; + } + + public Long getStorageId() { + return storageId; + } + + public List getStorageAccessGroups() { + return storageAccessGroups; + } + + ///////////////////////////////////////////////////// + /////////////// API Implementation/////////////////// + ///////////////////////////////////////////////////// + + @Override + public long getEntityOwnerId() { + return Account.ACCOUNT_ID_SYSTEM; + } + + @Override + public ApiCommandResourceType getApiResourceType() { + return ApiCommandResourceType.StoragePool; + } + + @Override + public void execute() { + try { + boolean result = _storageService.configureStorageAccess(this); + if (result) { + SuccessResponse response = new SuccessResponse(getCommandName()); + setResponseObject(response); + } else { + throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, "Failed to configure storage access"); + } + } catch (Exception e) { + logger.debug("Failed to configure storage access ", e); + throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, "Failed to configure storage access, " + e.getMessage()); + } + } + + @Override + public String getEventType() { + return EventTypes.EVENT_CONFIGURE_STORAGE_ACCESS; + } + + @Override + public String getEventDescription() { + return "configuring storage access groups"; + } +} diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/storage/CreateStoragePoolCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/storage/CreateStoragePoolCmd.java index 75813a7aabf..cbe4b8c06b3 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/storage/CreateStoragePoolCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/storage/CreateStoragePoolCmd.java @@ -61,6 +61,10 @@ public class CreateStoragePoolCmd extends BaseCmd { @Parameter(name = ApiConstants.TAGS, type = CommandType.STRING, description = "the tags for the storage pool") private String tags; + @Parameter(name = ApiConstants.STORAGE_ACCESS_GROUPS, type = CommandType.STRING, + description = "comma separated list of storage access groups for connecting to hosts having those specific groups", since = "4.21.0") + private String storageAccessGroups; + @Parameter(name = ApiConstants.URL, type = CommandType.STRING, required = true, description = "the URL of the storage pool") private String url; @@ -115,6 +119,10 @@ public class CreateStoragePoolCmd extends BaseCmd { return tags; } + public String getStorageAccessGroups() { + return storageAccessGroups; + } + public String getUrl() { return url; } diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/storage/ListStorageAccessGroupsCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/storage/ListStorageAccessGroupsCmd.java new file mode 100644 index 00000000000..d2a1757839f --- /dev/null +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/storage/ListStorageAccessGroupsCmd.java @@ -0,0 +1,65 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.cloudstack.api.command.admin.storage; + +import org.apache.cloudstack.api.ApiConstants; +import org.apache.cloudstack.api.BaseCmd; +import org.apache.cloudstack.api.Parameter; +import org.apache.cloudstack.api.response.StorageAccessGroupResponse; +import org.apache.cloudstack.api.APICommand; +import org.apache.cloudstack.api.ApiCommandResourceType; +import org.apache.cloudstack.api.BaseListCmd; +import org.apache.cloudstack.api.response.ListResponse; + +@APICommand(name = "listStorageAccessGroups", description = "Lists storage access groups", responseObject = StorageAccessGroupResponse.class, since = "4.21.0", requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) +public class ListStorageAccessGroupsCmd extends BaseListCmd { + + // /////////////////////////////////////////////////// + // ////////////// API parameters ///////////////////// + // /////////////////////////////////////////////////// + + @Parameter(name = ApiConstants.NAME, type = BaseCmd.CommandType.STRING, description = "Name of the Storage access group") + private String name; + + // /////////////////////////////////////////////////// + // ///////////////// Accessors /////////////////////// + // /////////////////////////////////////////////////// + + public String getName() { + return name; + } + + // /////////////////////////////////////////////////// + // ///////////// API Implementation/////////////////// + // /////////////////////////////////////////////////// + + @Override + public ApiCommandResourceType getApiResourceType() { + return ApiCommandResourceType.StoragePool; + } + + @Override + public void execute() { + ListResponse response = _queryService.searchForStorageAccessGroups(this); + + response.setResponseName(getCommandName()); + + setResponseObject(response); + } +} diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/storage/ListStoragePoolsCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/storage/ListStoragePoolsCmd.java index 57a87939b6b..0f2c9f3416e 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/storage/ListStoragePoolsCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/storage/ListStoragePoolsCmd.java @@ -41,7 +41,7 @@ public class ListStoragePoolsCmd extends BaseListCmd { @Parameter(name = ApiConstants.CLUSTER_ID, type = CommandType.UUID, entityType = ClusterResponse.class, - description = "list storage pools belongig to the specific cluster") + description = "list storage pools belonging to the specific cluster") private Long clusterId; @Parameter(name = ApiConstants.IP_ADDRESS, type = CommandType.STRING, description = "the IP address for the storage pool") @@ -74,6 +74,10 @@ public class ListStoragePoolsCmd extends BaseListCmd { @Parameter(name = ApiConstants.STORAGE_CUSTOM_STATS, type = CommandType.BOOLEAN, description = "If true, lists the custom stats of the storage pool", since = "4.18.1") private Boolean customStats; + + @Parameter(name = ApiConstants.STORAGE_ACCESS_GROUP, type = CommandType.STRING, description = "the name of the storage access group", since = "4.21.0") + private String storageAccessGroup; + ///////////////////////////////////////////////////// /////////////////// Accessors /////////////////////// ///////////////////////////////////////////////////// @@ -134,6 +138,17 @@ public class ListStoragePoolsCmd extends BaseListCmd { return customStats != null && customStats; } + public String getStorageAccessGroup() { + return storageAccessGroup; + } + + public ListStoragePoolsCmd() { + } + + public ListStoragePoolsCmd(String storageAccessGroup) { + this.storageAccessGroup = storageAccessGroup; + } + ///////////////////////////////////////////////////// /////////////// API Implementation/////////////////// ///////////////////////////////////////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/zone/CreateZoneCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/zone/CreateZoneCmd.java index 24660e41ed9..f3c1bab260b 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/zone/CreateZoneCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/zone/CreateZoneCmd.java @@ -31,6 +31,8 @@ import org.apache.cloudstack.context.CallContext; import com.cloud.dc.DataCenter; import com.cloud.user.Account; +import java.util.List; + @APICommand(name = "createZone", description = "Creates a Zone.", responseObject = ZoneResponse.class, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class CreateZoneCmd extends BaseCmd { @@ -88,6 +90,11 @@ public class CreateZoneCmd extends BaseCmd { @Parameter(name = ApiConstants.IS_EDGE, type = CommandType.BOOLEAN, description = "true if the zone is an edge zone, false otherwise", since = "4.18.0") private Boolean isEdge; + @Parameter(name = ApiConstants.STORAGE_ACCESS_GROUPS, + type = CommandType.LIST, collectionType = CommandType.STRING, + description = "comma separated list of storage access groups for the hosts in the zone", + since = "4.21.0") + private List storageAccessGroups; ///////////////////////////////////////////////////// /////////////////// Accessors /////////////////////// @@ -162,6 +169,10 @@ public class CreateZoneCmd extends BaseCmd { return isEdge; } + public List getStorageAccessGroups() { + return storageAccessGroups; + } + ///////////////////////////////////////////////////// /////////////// API Implementation/////////////////// @Override diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/zone/ListZonesCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/zone/ListZonesCmd.java index d926257437e..a5e26f30dfb 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/zone/ListZonesCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/zone/ListZonesCmd.java @@ -69,6 +69,11 @@ public class ListZonesCmd extends BaseListCmd implements UserCmd { @Parameter(name = ApiConstants.SHOW_RESOURCE_ICON, type = CommandType.BOOLEAN, description = "flag to display the resource image for the zones") private Boolean showIcon; + @Parameter(name = ApiConstants.STORAGE_ACCESS_GROUP, type = CommandType.STRING, + description = "the name of the storage access group", + since = "4.21.0") + private String storageAccessGroup; + ///////////////////////////////////////////////////// /////////////////// Accessors /////////////////////// ///////////////////////////////////////////////////// @@ -109,6 +114,18 @@ public class ListZonesCmd extends BaseListCmd implements UserCmd { return showIcon != null ? showIcon : false; } + public String getStorageAccessGroup() { + return storageAccessGroup; + } + + public ListZonesCmd() { + + } + + public ListZonesCmd(String storageAccessGroup) { + this.storageAccessGroup = storageAccessGroup; + } + ///////////////////////////////////////////////////// /////////////// API Implementation/////////////////// ///////////////////////////////////////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/response/ClusterResponse.java b/api/src/main/java/org/apache/cloudstack/api/response/ClusterResponse.java index 1c69849239f..17c86072b98 100644 --- a/api/src/main/java/org/apache/cloudstack/api/response/ClusterResponse.java +++ b/api/src/main/java/org/apache/cloudstack/api/response/ClusterResponse.java @@ -95,6 +95,18 @@ public class ClusterResponse extends BaseResponseWithAnnotations { @Param(description = "CPU Arch of the hosts in the cluster", since = "4.20") private String arch; + @SerializedName(ApiConstants.STORAGE_ACCESS_GROUPS) + @Param(description = "comma-separated list of storage access groups for the host", since = "4.21.0") + private String storageAccessGroups; + + @SerializedName(ApiConstants.POD_STORAGE_ACCESS_GROUPS) + @Param(description = "comma-separated list of storage access groups on the pod", since = "4.21.0") + private String podStorageAccessGroups; + + @SerializedName(ApiConstants.ZONE_STORAGE_ACCESS_GROUPS) + @Param(description = "comma-separated list of storage access groups on the zone", since = "4.21.0") + private String zoneStorageAccessGroups; + public String getId() { return id; } @@ -259,4 +271,28 @@ public class ClusterResponse extends BaseResponseWithAnnotations { public String getArch() { return arch; } + + public String getStorageAccessGroups() { + return storageAccessGroups; + } + + public void setStorageAccessGroups(String storageAccessGroups) { + this.storageAccessGroups = storageAccessGroups; + } + + public String getPodStorageAccessGroups() { + return podStorageAccessGroups; + } + + public void setPodStorageAccessGroups(String podStorageAccessGroups) { + this.podStorageAccessGroups = podStorageAccessGroups; + } + + public String getZoneStorageAccessGroups() { + return zoneStorageAccessGroups; + } + + public void setZoneStorageAccessGroups(String zoneStorageAccessGroups) { + this.zoneStorageAccessGroups = zoneStorageAccessGroups; + } } diff --git a/api/src/main/java/org/apache/cloudstack/api/response/HostResponse.java b/api/src/main/java/org/apache/cloudstack/api/response/HostResponse.java index 091d6391b31..342a1eb7df3 100644 --- a/api/src/main/java/org/apache/cloudstack/api/response/HostResponse.java +++ b/api/src/main/java/org/apache/cloudstack/api/response/HostResponse.java @@ -302,6 +302,22 @@ public class HostResponse extends BaseResponseWithAnnotations { @Param(description = "CPU Arch of the host", since = "4.20") private String arch; + @SerializedName(ApiConstants.STORAGE_ACCESS_GROUPS) + @Param(description = "comma-separated list of storage access groups for the host", since = "4.21.0") + private String storageAccessGroups; + + @SerializedName(ApiConstants.CLUSTER_STORAGE_ACCESS_GROUPS) + @Param(description = "comma-separated list of storage access groups on the cluster", since = "4.21.0") + private String clusterStorageAccessGroups; + + @SerializedName(ApiConstants.POD_STORAGE_ACCESS_GROUPS) + @Param(description = "comma-separated list of storage access groups on the pod", since = "4.21.0") + private String podStorageAccessGroups; + + @SerializedName(ApiConstants.ZONE_STORAGE_ACCESS_GROUPS) + @Param(description = "comma-separated list of storage access groups on the zone", since = "4.21.0") + private String zoneStorageAccessGroups; + @Override public String getObjectId() { return this.getId(); @@ -491,6 +507,38 @@ public class HostResponse extends BaseResponseWithAnnotations { this.hostTags = hostTags; } + public String getStorageAccessGroups() { + return storageAccessGroups; + } + + public void setStorageAccessGroups(String storageAccessGroups) { + this.storageAccessGroups = storageAccessGroups; + } + + public String getClusterStorageAccessGroups() { + return clusterStorageAccessGroups; + } + + public void setClusterStorageAccessGroups(String clusterStorageAccessGroups) { + this.clusterStorageAccessGroups = clusterStorageAccessGroups; + } + + public String getPodStorageAccessGroups() { + return podStorageAccessGroups; + } + + public void setPodStorageAccessGroups(String podStorageAccessGroups) { + this.podStorageAccessGroups = podStorageAccessGroups; + } + + public String getZoneStorageAccessGroups() { + return zoneStorageAccessGroups; + } + + public void setZoneStorageAccessGroups(String zoneStorageAccessGroups) { + this.zoneStorageAccessGroups = zoneStorageAccessGroups; + } + public String getExplicitHostTags() { return explicitHostTags; } diff --git a/api/src/main/java/org/apache/cloudstack/api/response/PodResponse.java b/api/src/main/java/org/apache/cloudstack/api/response/PodResponse.java index 587fabfae8d..6a1afaecbcf 100644 --- a/api/src/main/java/org/apache/cloudstack/api/response/PodResponse.java +++ b/api/src/main/java/org/apache/cloudstack/api/response/PodResponse.java @@ -85,6 +85,14 @@ public class PodResponse extends BaseResponseWithAnnotations { @Param(description = "the capacity of the Pod", responseObject = CapacityResponse.class) private List capacities; + @SerializedName(ApiConstants.STORAGE_ACCESS_GROUPS) + @Param(description = "comma-separated list of storage access groups for the pod", since = "4.21.0") + private String storageAccessGroups; + + @SerializedName(ApiConstants.ZONE_STORAGE_ACCESS_GROUPS) + @Param(description = "comma-separated list of storage access groups on the zone", since = "4.21.0") + private String zoneStorageAccessGroups; + public String getId() { return id; } @@ -184,4 +192,20 @@ public class PodResponse extends BaseResponseWithAnnotations { public void setCapacities(List capacities) { this.capacities = capacities; } + + public String getStorageAccessGroups() { + return storageAccessGroups; + } + + public void setStorageAccessGroups(String storageAccessGroups) { + this.storageAccessGroups = storageAccessGroups; + } + + public String getZoneStorageAccessGroups() { + return zoneStorageAccessGroups; + } + + public void setZoneStorageAccessGroups(String zoneStorageAccessGroups) { + this.zoneStorageAccessGroups = zoneStorageAccessGroups; + } } diff --git a/api/src/main/java/org/apache/cloudstack/api/response/ServiceOfferingResponse.java b/api/src/main/java/org/apache/cloudstack/api/response/ServiceOfferingResponse.java index 0622b936f6e..4e71d39cb8d 100644 --- a/api/src/main/java/org/apache/cloudstack/api/response/ServiceOfferingResponse.java +++ b/api/src/main/java/org/apache/cloudstack/api/response/ServiceOfferingResponse.java @@ -80,7 +80,7 @@ public class ServiceOfferingResponse extends BaseResponseWithAnnotations { @Param(description = "true if the vm needs to be volatile, i.e., on every reboot of vm from API root disk is discarded and creates a new root disk") private Boolean isVolatile; - @SerializedName("storagetags") + @SerializedName(ApiConstants.STORAGE_TAGS) @Param(description = "the tags for the service offering") private String tags; diff --git a/api/src/main/java/org/apache/cloudstack/api/response/StorageAccessGroupResponse.java b/api/src/main/java/org/apache/cloudstack/api/response/StorageAccessGroupResponse.java new file mode 100644 index 00000000000..a6324dd62a9 --- /dev/null +++ b/api/src/main/java/org/apache/cloudstack/api/response/StorageAccessGroupResponse.java @@ -0,0 +1,108 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.api.response; + +import com.google.gson.annotations.SerializedName; +import org.apache.cloudstack.api.ApiConstants; +import org.apache.cloudstack.api.BaseResponse; +import com.cloud.serializer.Param; + +public class StorageAccessGroupResponse extends BaseResponse { + @SerializedName(ApiConstants.ID) + @Param(description = "the ID of the storage access group") + private String id; + + @SerializedName(ApiConstants.NAME) + @Param(description = "the name of the storage access group") + private String name; + + @SerializedName("hosts") + @Param(description = "List of Hosts in the Storage Access Group") + private ListResponse hostResponseList; + + @SerializedName("clusters") + @Param(description = "List of Clusters in the Storage Access Group") + private ListResponse clusterResponseList; + + @SerializedName("pods") + @Param(description = "List of Pods in the Storage Access Group") + private ListResponse podResponseList; + + @SerializedName("zones") + @Param(description = "List of Zones in the Storage Access Group") + private ListResponse zoneResponseList; + + @SerializedName("storagepools") + @Param(description = "List of Storage Pools in the Storage Access Group") + private ListResponse storagePoolResponseList; + + public String getId() { + return id; + } + + public void setId(String id) { + this.id = id; + } + + public String getName() { + return name; + } + + public void setName(String name) { + this.name = name; + } + + public ListResponse getHostResponseList() { + return hostResponseList; + } + + public void setHostResponseList(ListResponse hostResponseList) { + this.hostResponseList = hostResponseList; + } + + public ListResponse getClusterResponseList() { + return clusterResponseList; + } + + public void setClusterResponseList(ListResponse clusterResponseList) { + this.clusterResponseList = clusterResponseList; + } + + public ListResponse getPodResponseList() { + return podResponseList; + } + + public void setPodResponseList(ListResponse podResponseList) { + this.podResponseList = podResponseList; + } + + public ListResponse getZoneResponseList() { + return zoneResponseList; + } + + public void setZoneResponseList(ListResponse zoneResponseList) { + this.zoneResponseList = zoneResponseList; + } + + public ListResponse getStoragePoolResponseList() { + return storagePoolResponseList; + } + + public void setStoragePoolResponseList(ListResponse storagePoolResponseList) { + this.storagePoolResponseList = storagePoolResponseList; + } +} diff --git a/api/src/main/java/org/apache/cloudstack/api/response/StoragePoolResponse.java b/api/src/main/java/org/apache/cloudstack/api/response/StoragePoolResponse.java index 51efb6d42cb..abc674ff0f9 100644 --- a/api/src/main/java/org/apache/cloudstack/api/response/StoragePoolResponse.java +++ b/api/src/main/java/org/apache/cloudstack/api/response/StoragePoolResponse.java @@ -109,6 +109,10 @@ public class StoragePoolResponse extends BaseResponseWithAnnotations { @Param(description = "the tags for the storage pool") private String tags; + @SerializedName(ApiConstants.STORAGE_ACCESS_GROUPS) + @Param(description = "the storage access groups for the storage pool", since = "4.21.0") + private String storageAccessGroups; + @SerializedName(ApiConstants.NFS_MOUNT_OPTIONS) @Param(description = "the nfs mount options for the storage pool", since = "4.19.1") private String nfsMountOpts; @@ -344,6 +348,14 @@ public class StoragePoolResponse extends BaseResponseWithAnnotations { this.tags = tags; } + public String getStorageAccessGroups() { + return storageAccessGroups; + } + + public void setStorageAccessGroups(String storageAccessGroups) { + this.storageAccessGroups = storageAccessGroups; + } + public Boolean getIsTagARule() { return isTagARule; } diff --git a/api/src/main/java/org/apache/cloudstack/api/response/ZoneResponse.java b/api/src/main/java/org/apache/cloudstack/api/response/ZoneResponse.java index 4a5279753a1..8e9a993bac6 100644 --- a/api/src/main/java/org/apache/cloudstack/api/response/ZoneResponse.java +++ b/api/src/main/java/org/apache/cloudstack/api/response/ZoneResponse.java @@ -95,7 +95,7 @@ public class ZoneResponse extends BaseResponseWithAnnotations implements SetReso @SerializedName("securitygroupsenabled") @Param(description = "true if security groups support is enabled, false otherwise") - private boolean securityGroupsEnabled; + private Boolean securityGroupsEnabled; @SerializedName("allocationstate") @Param(description = "the allocation state of the cluster") @@ -115,7 +115,7 @@ public class ZoneResponse extends BaseResponseWithAnnotations implements SetReso @SerializedName(ApiConstants.LOCAL_STORAGE_ENABLED) @Param(description = "true if local storage offering enabled, false otherwise") - private boolean localStorageEnabled; + private Boolean localStorageEnabled; @SerializedName(ApiConstants.TAGS) @Param(description = "the list of resource tags associated with zone.", responseObject = ResourceTagResponse.class, since = "4.3") @@ -161,11 +161,19 @@ public class ZoneResponse extends BaseResponseWithAnnotations implements SetReso @Param(description = "true, if routed network/vpc is enabled", since = "4.20.1") private boolean routedModeEnabled = false; + @SerializedName(ApiConstants.STORAGE_ACCESS_GROUPS) + @Param(description = "comma-separated list of storage access groups for the zone", since = "4.21.0") + private String storageAccessGroups; + public ZoneResponse() { tags = new LinkedHashSet(); } + public ZoneResponse(Set tags) { + this.tags = tags; + } + public void setId(String id) { this.id = id; } @@ -402,6 +410,14 @@ public class ZoneResponse extends BaseResponseWithAnnotations implements SetReso return type; } + public String getStorageAccessGroups() { + return storageAccessGroups; + } + + public void setStorageAccessGroups(String storageAccessGroups) { + this.storageAccessGroups = storageAccessGroups; + } + public void setNsxEnabled(boolean nsxEnabled) { this.nsxEnabled = nsxEnabled; } diff --git a/api/src/main/java/org/apache/cloudstack/query/QueryService.java b/api/src/main/java/org/apache/cloudstack/query/QueryService.java index 0a5721abdc1..4278c9217b5 100644 --- a/api/src/main/java/org/apache/cloudstack/query/QueryService.java +++ b/api/src/main/java/org/apache/cloudstack/query/QueryService.java @@ -32,6 +32,7 @@ import org.apache.cloudstack.api.command.admin.router.ListRoutersCmd; import org.apache.cloudstack.api.command.admin.storage.ListImageStoresCmd; import org.apache.cloudstack.api.command.admin.storage.ListObjectStoragePoolsCmd; import org.apache.cloudstack.api.command.admin.storage.ListSecondaryStagingStoresCmd; +import org.apache.cloudstack.api.command.admin.storage.ListStorageAccessGroupsCmd; import org.apache.cloudstack.api.command.admin.storage.ListStoragePoolsCmd; import org.apache.cloudstack.api.command.admin.storage.ListStorageTagsCmd; import org.apache.cloudstack.api.command.admin.storage.heuristics.ListSecondaryStorageSelectorsCmd; @@ -87,6 +88,7 @@ import org.apache.cloudstack.api.response.SecondaryStorageHeuristicsResponse; import org.apache.cloudstack.api.response.SecurityGroupResponse; import org.apache.cloudstack.api.response.ServiceOfferingResponse; import org.apache.cloudstack.api.response.SnapshotResponse; +import org.apache.cloudstack.api.response.StorageAccessGroupResponse; import org.apache.cloudstack.api.response.StoragePoolResponse; import org.apache.cloudstack.api.response.StorageTagResponse; import org.apache.cloudstack.api.response.TemplateResponse; @@ -197,6 +199,8 @@ public interface QueryService { ListResponse searchForStorageTags(ListStorageTagsCmd cmd); + ListResponse searchForStorageAccessGroups(ListStorageAccessGroupsCmd cmd); + ListResponse searchForHostTags(ListHostTagsCmd cmd); ListResponse listManagementServers(ListMgmtsCmd cmd); diff --git a/core/src/test/java/org/apache/cloudstack/api/agent/test/CheckOnHostCommandTest.java b/core/src/test/java/org/apache/cloudstack/api/agent/test/CheckOnHostCommandTest.java index be7563be045..a696049608e 100644 --- a/core/src/test/java/org/apache/cloudstack/api/agent/test/CheckOnHostCommandTest.java +++ b/core/src/test/java/org/apache/cloudstack/api/agent/test/CheckOnHostCommandTest.java @@ -284,6 +284,11 @@ public class CheckOnHostCommandTest { public CPU.CPUArch getArch() { return CPU.CPUArch.amd64; } + + @Override + public String getStorageAccessGroups() { + return null; + } }; CheckOnHostCommand cohc = new CheckOnHostCommand(host); diff --git a/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/PrimaryDataStoreParameters.java b/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/PrimaryDataStoreParameters.java index 1b18264df15..adb77e69e90 100644 --- a/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/PrimaryDataStoreParameters.java +++ b/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/PrimaryDataStoreParameters.java @@ -30,6 +30,7 @@ public class PrimaryDataStoreParameters { private String providerName; private Map details; private String tags; + private String storageAccessGroups; private StoragePoolType type; private HypervisorType hypervisorType; private String host; @@ -165,6 +166,21 @@ public class PrimaryDataStoreParameters { this.tags = tags; } + /** + * @return the storageAccessGroups + */ + public String getStorageAccessGroups() { + return storageAccessGroups; + } + + /** + * @param storageAccessGroups + * the storageAccessGroups to set + */ + public void setStorageAccessGroups(String storageAccessGroups) { + this.storageAccessGroups = storageAccessGroups; + } + /** * @return the details */ diff --git a/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/StoragePoolAllocator.java b/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/StoragePoolAllocator.java index 6a78f6fe253..9a2dc734685 100644 --- a/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/StoragePoolAllocator.java +++ b/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/StoragePoolAllocator.java @@ -64,4 +64,5 @@ public interface StoragePoolAllocator extends Adapter { static int RETURN_UPTO_ALL = -1; List reorderPools(List pools, VirtualMachineProfile vmProfile, DeploymentPlan plan, DiskProfile dskCh); + } diff --git a/engine/components-api/src/main/java/com/cloud/configuration/ConfigurationManager.java b/engine/components-api/src/main/java/com/cloud/configuration/ConfigurationManager.java index 1694b19c33f..f172bead7aa 100644 --- a/engine/components-api/src/main/java/com/cloud/configuration/ConfigurationManager.java +++ b/engine/components-api/src/main/java/com/cloud/configuration/ConfigurationManager.java @@ -147,12 +147,12 @@ public interface ConfigurationManager { * @param startIp * @param endIp * @param allocationState - * @param skipGatewayOverlapCheck - * (true if it is ok to not validate that gateway IP address overlap with Start/End IP of the POD) + * @param skipGatewayOverlapCheck (true if it is ok to not validate that gateway IP address overlap with Start/End IP of the POD) + * @param storageAccessGroups * @return Pod */ HostPodVO createPod(long userId, String podName, DataCenter zone, String gateway, String cidr, String startIp, String endIp, String allocationState, - boolean skipGatewayOverlapCheck); + boolean skipGatewayOverlapCheck, List storageAccessGroups); /** * Creates a new zone @@ -170,13 +170,14 @@ public interface ConfigurationManager { * @param isSecurityGroupEnabled * @param ip6Dns1 * @param ip6Dns2 + * @param storageAccessGroups * @return * @throws * @throws */ DataCenterVO createZone(long userId, String zoneName, String dns1, String dns2, String internalDns1, String internalDns2, String guestCidr, String domain, Long domainId, NetworkType zoneType, String allocationState, String networkDomain, boolean isSecurityGroupEnabled, boolean isLocalStorageEnabled, String ip6Dns1, - String ip6Dns2, boolean isEdge); + String ip6Dns2, boolean isEdge, List storageAccessGroups); /** * Deletes a VLAN from the database, along with all of its IP addresses. Will not delete VLANs that have allocated diff --git a/engine/components-api/src/main/java/com/cloud/resource/ResourceManager.java b/engine/components-api/src/main/java/com/cloud/resource/ResourceManager.java index 34309e942d3..83f9768a62a 100755 --- a/engine/components-api/src/main/java/com/cloud/resource/ResourceManager.java +++ b/engine/components-api/src/main/java/com/cloud/resource/ResourceManager.java @@ -21,6 +21,8 @@ import java.util.HashMap; import java.util.List; import java.util.Map; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; +import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreInfo; import org.apache.cloudstack.framework.config.ConfigKey; import org.apache.cloudstack.framework.config.Configurable; @@ -236,4 +238,12 @@ public interface ResourceManager extends ResourceService, Configurable { HostVO findOneRandomRunningHostByHypervisor(HypervisorType type, Long dcId); boolean cancelMaintenance(final long hostId); + + void updateStoragePoolConnectionsOnHosts(Long poolId, List storageAccessGroups); + + List getEligibleUpHostsInClusterForStorageConnection(PrimaryDataStoreInfo primaryStore); + + List getEligibleUpAndEnabledHostsInClusterForStorageConnection(PrimaryDataStoreInfo primaryStore); + + List getEligibleUpAndEnabledHostsInZoneForStorageConnection(DataStore dataStore, long zoneId, HypervisorType hypervisorType); } diff --git a/engine/components-api/src/main/java/com/cloud/storage/StorageManager.java b/engine/components-api/src/main/java/com/cloud/storage/StorageManager.java index 58db613c253..3fc6d80befe 100644 --- a/engine/components-api/src/main/java/com/cloud/storage/StorageManager.java +++ b/engine/components-api/src/main/java/com/cloud/storage/StorageManager.java @@ -410,4 +410,9 @@ public interface StorageManager extends StorageService { void validateChildDatastoresToBeAddedInUpState(StoragePoolVO datastoreClusterPool, List childDatastoreAnswerList); + boolean checkIfHostAndStoragePoolHasCommonStorageAccessGroups(Host host, StoragePool pool); + + Pair checkIfReadyVolumeFitsInStoragePoolWithStorageAccessGroups(StoragePool destPool, Volume volume); + + String[] getStorageAccessGroups(Long zoneId, Long podId, Long clusterId, Long hostId); } diff --git a/engine/orchestration/src/main/java/org/apache/cloudstack/engine/datacenter/entity/api/db/EngineClusterVO.java b/engine/orchestration/src/main/java/org/apache/cloudstack/engine/datacenter/entity/api/db/EngineClusterVO.java index 19b0e773cd0..39ab83fab60 100644 --- a/engine/orchestration/src/main/java/org/apache/cloudstack/engine/datacenter/entity/api/db/EngineClusterVO.java +++ b/engine/orchestration/src/main/java/org/apache/cloudstack/engine/datacenter/entity/api/db/EngineClusterVO.java @@ -114,6 +114,9 @@ public class EngineClusterVO implements EngineCluster, Identity { @Column(name = "engine_state", updatable = true, nullable = false, length = 32) protected State state = null; + @Column(name = "storage_access_groups") + String storageAccessGroups; + public EngineClusterVO() { clusterType = Cluster.ClusterType.CloudManaged; allocationState = Grouping.AllocationState.Enabled; @@ -176,6 +179,11 @@ public class EngineClusterVO implements EngineCluster, Identity { return managedState; } + @Override + public String getStorageAccessGroups() { + return storageAccessGroups; + } + public void setManagedState(ManagedState managedState) { this.managedState = managedState; } diff --git a/engine/orchestration/src/main/java/org/apache/cloudstack/engine/datacenter/entity/api/db/EngineHostPodVO.java b/engine/orchestration/src/main/java/org/apache/cloudstack/engine/datacenter/entity/api/db/EngineHostPodVO.java index 95931d5b72d..cd3f6b857a2 100644 --- a/engine/orchestration/src/main/java/org/apache/cloudstack/engine/datacenter/entity/api/db/EngineHostPodVO.java +++ b/engine/orchestration/src/main/java/org/apache/cloudstack/engine/datacenter/entity/api/db/EngineHostPodVO.java @@ -89,6 +89,9 @@ public class EngineHostPodVO implements EnginePod, Identity { @Temporal(value = TemporalType.TIMESTAMP) protected Date lastUpdated; + @Column(name = "storage_access_groups") + String storageAccessGroups; + /** * Note that state is intentionally missing the setter. Any updates to * the state machine needs to go through the DAO object because someone @@ -202,6 +205,11 @@ public class EngineHostPodVO implements EnginePod, Identity { return externalDhcp; } + @Override + public String getStorageAccessGroups() { + return storageAccessGroups; + } + public void setExternalDhcp(boolean use) { externalDhcp = use; } diff --git a/engine/orchestration/src/main/java/org/apache/cloudstack/engine/datacenter/entity/api/db/EngineHostVO.java b/engine/orchestration/src/main/java/org/apache/cloudstack/engine/datacenter/entity/api/db/EngineHostVO.java index 8ef2de3f74d..eec2b011b3e 100644 --- a/engine/orchestration/src/main/java/org/apache/cloudstack/engine/datacenter/entity/api/db/EngineHostVO.java +++ b/engine/orchestration/src/main/java/org/apache/cloudstack/engine/datacenter/entity/api/db/EngineHostVO.java @@ -405,6 +405,9 @@ public class EngineHostVO implements EngineHost, Identity { @Column(name = "engine_state", updatable = true, nullable = false, length = 32) protected State orchestrationState = null; + @Column(name = "storage_access_groups") + private String storageAccessGroups = null; + public EngineHostVO(String guid) { this.guid = guid; this.status = Status.Creating; @@ -807,4 +810,13 @@ public class EngineHostVO implements EngineHost, Identity { public PartitionType partitionType() { return PartitionType.Host; } + + @Override + public String getStorageAccessGroups() { + return storageAccessGroups; + } + + public void setStorageAccessGroups(String storageAccessGroups) { + this.storageAccessGroups = storageAccessGroups; + } } diff --git a/engine/schema/src/main/java/com/cloud/dc/ClusterVO.java b/engine/schema/src/main/java/com/cloud/dc/ClusterVO.java index 434901ef5b3..a18097db6d6 100644 --- a/engine/schema/src/main/java/com/cloud/dc/ClusterVO.java +++ b/engine/schema/src/main/java/com/cloud/dc/ClusterVO.java @@ -85,6 +85,10 @@ public class ClusterVO implements Cluster { @Column(name = "uuid") String uuid; + @Column(name = "storage_access_groups") + String storageAccessGroups; + + public ClusterVO() { clusterType = Cluster.ClusterType.CloudManaged; allocationState = Grouping.AllocationState.Enabled; @@ -215,6 +219,14 @@ public class ClusterVO implements Cluster { this.arch = arch; } + public String getStorageAccessGroups() { + return storageAccessGroups; + } + + public void setStorageAccessGroups(String storageAccessGroups) { + this.storageAccessGroups = storageAccessGroups; + } + @Override public String toString() { return String.format("Cluster {id: \"%s\", name: \"%s\", uuid: \"%s\"}", id, name, uuid); diff --git a/engine/schema/src/main/java/com/cloud/dc/DataCenterVO.java b/engine/schema/src/main/java/com/cloud/dc/DataCenterVO.java index 827b72b58b0..9b24e51a1a8 100644 --- a/engine/schema/src/main/java/com/cloud/dc/DataCenterVO.java +++ b/engine/schema/src/main/java/com/cloud/dc/DataCenterVO.java @@ -142,6 +142,9 @@ public class DataCenterVO implements DataCenter { @Enumerated(value = EnumType.STRING) private DataCenter.Type type; + @Column(name = "storage_access_groups") + String storageAccessGroups; + @Override public String getDnsProvider() { return dnsProvider; @@ -485,6 +488,14 @@ public class DataCenterVO implements DataCenter { this.type = type; } + public String getStorageAccessGroups() { + return storageAccessGroups; + } + + public void setStorageAccessGroups(String storageAccessGroups) { + this.storageAccessGroups = storageAccessGroups; + } + @Override public String toString() { return String.format("Zone {\"id\": \"%s\", \"name\": \"%s\", \"uuid\": \"%s\"}", id, name, uuid); diff --git a/engine/schema/src/main/java/com/cloud/dc/HostPodVO.java b/engine/schema/src/main/java/com/cloud/dc/HostPodVO.java index fdda38fbc39..99ebcf2346c 100644 --- a/engine/schema/src/main/java/com/cloud/dc/HostPodVO.java +++ b/engine/schema/src/main/java/com/cloud/dc/HostPodVO.java @@ -71,6 +71,9 @@ public class HostPodVO implements Pod { @Column(name = "uuid") private String uuid; + @Column(name = "storage_access_groups") + String storageAccessGroups; + public HostPodVO(String name, long dcId, String gateway, String cidrAddress, int cidrSize, String description) { this.name = name; this.dataCenterId = dcId; @@ -199,6 +202,14 @@ public class HostPodVO implements Pod { this.uuid = uuid; } + public String getStorageAccessGroups() { + return storageAccessGroups; + } + + public void setStorageAccessGroups(String storageAccessGroups) { + this.storageAccessGroups = storageAccessGroups; + } + @Override public String toString() { return String.format("HostPod %s", diff --git a/engine/schema/src/main/java/com/cloud/dc/dao/ClusterDao.java b/engine/schema/src/main/java/com/cloud/dc/dao/ClusterDao.java index 69b5f0e146e..a6fe3123c4e 100644 --- a/engine/schema/src/main/java/com/cloud/dc/dao/ClusterDao.java +++ b/engine/schema/src/main/java/com/cloud/dc/dao/ClusterDao.java @@ -57,4 +57,6 @@ public interface ClusterDao extends GenericDao { List getClustersArchsByZone(long zoneId); List listClustersByArchAndZoneId(long zoneId, CPU.CPUArch arch); + + List listDistinctStorageAccessGroups(String name, String keyword); } diff --git a/engine/schema/src/main/java/com/cloud/dc/dao/ClusterDaoImpl.java b/engine/schema/src/main/java/com/cloud/dc/dao/ClusterDaoImpl.java index 59614b54745..7c0d0c53814 100644 --- a/engine/schema/src/main/java/com/cloud/dc/dao/ClusterDaoImpl.java +++ b/engine/schema/src/main/java/com/cloud/dc/dao/ClusterDaoImpl.java @@ -346,4 +346,36 @@ public class ClusterDaoImpl extends GenericDaoBase implements C sc.setParameters("arch", arch); return listBy(sc); } + + @Override + public List listDistinctStorageAccessGroups(String name, String keyword) { + GenericSearchBuilder searchBuilder = createSearchBuilder(String.class); + + searchBuilder.select(null, SearchCriteria.Func.DISTINCT, searchBuilder.entity().getStorageAccessGroups()); + if (name != null) { + searchBuilder.and().op("storageAccessGroupExact", searchBuilder.entity().getStorageAccessGroups(), Op.EQ); + searchBuilder.or("storageAccessGroupPrefix", searchBuilder.entity().getStorageAccessGroups(), Op.LIKE); + searchBuilder.or("storageAccessGroupSuffix", searchBuilder.entity().getStorageAccessGroups(), Op.LIKE); + searchBuilder.or("storageAccessGroupMiddle", searchBuilder.entity().getStorageAccessGroups(), Op.LIKE); + searchBuilder.cp(); + } + if (keyword != null) { + searchBuilder.and("keyword", searchBuilder.entity().getStorageAccessGroups(), Op.LIKE); + } + searchBuilder.done(); + + SearchCriteria sc = searchBuilder.create(); + if (name != null) { + sc.setParameters("storageAccessGroupExact", name); + sc.setParameters("storageAccessGroupPrefix", name + ",%"); + sc.setParameters("storageAccessGroupSuffix", "%," + name); + sc.setParameters("storageAccessGroupMiddle", "%," + name + ",%"); + } + + if (keyword != null) { + sc.setParameters("keyword", "%" + keyword + "%"); + } + + return customSearch(sc, null); + } } diff --git a/engine/schema/src/main/java/com/cloud/dc/dao/DataCenterDao.java b/engine/schema/src/main/java/com/cloud/dc/dao/DataCenterDao.java index dddbce31772..0ba88f39b23 100644 --- a/engine/schema/src/main/java/com/cloud/dc/dao/DataCenterDao.java +++ b/engine/schema/src/main/java/com/cloud/dc/dao/DataCenterDao.java @@ -117,4 +117,6 @@ public interface DataCenterDao extends GenericDao { List listAllZones(); List listByIds(List ids); + + List listDistinctStorageAccessGroups(String name, String keyword); } diff --git a/engine/schema/src/main/java/com/cloud/dc/dao/DataCenterDaoImpl.java b/engine/schema/src/main/java/com/cloud/dc/dao/DataCenterDaoImpl.java index 7719e5adfc7..d8ab12e82e6 100644 --- a/engine/schema/src/main/java/com/cloud/dc/dao/DataCenterDaoImpl.java +++ b/engine/schema/src/main/java/com/cloud/dc/dao/DataCenterDaoImpl.java @@ -25,6 +25,7 @@ import java.util.stream.Collectors; import javax.inject.Inject; import javax.naming.ConfigurationException; +import com.cloud.utils.db.GenericSearchBuilder; import org.apache.commons.collections.CollectionUtils; import org.springframework.stereotype.Component; @@ -441,4 +442,36 @@ public class DataCenterDaoImpl extends GenericDaoBase implem sc.setParameters("ids", ids.toArray()); return listBy(sc); } + + @Override + public List listDistinctStorageAccessGroups(String name, String keyword) { + GenericSearchBuilder searchBuilder = createSearchBuilder(String.class); + + searchBuilder.select(null, SearchCriteria.Func.DISTINCT, searchBuilder.entity().getStorageAccessGroups()); + if (name != null) { + searchBuilder.and().op("storageAccessGroupExact", searchBuilder.entity().getStorageAccessGroups(), SearchCriteria.Op.EQ); + searchBuilder.or("storageAccessGroupPrefix", searchBuilder.entity().getStorageAccessGroups(), SearchCriteria.Op.LIKE); + searchBuilder.or("storageAccessGroupSuffix", searchBuilder.entity().getStorageAccessGroups(), SearchCriteria.Op.LIKE); + searchBuilder.or("storageAccessGroupMiddle", searchBuilder.entity().getStorageAccessGroups(), SearchCriteria.Op.LIKE); + searchBuilder.cp(); + } + if (keyword != null) { + searchBuilder.and("keyword", searchBuilder.entity().getStorageAccessGroups(), SearchCriteria.Op.LIKE); + } + searchBuilder.done(); + + SearchCriteria sc = searchBuilder.create(); + if (name != null) { + sc.setParameters("storageAccessGroupExact", name); + sc.setParameters("storageAccessGroupPrefix", name + ",%"); + sc.setParameters("storageAccessGroupSuffix", "%," + name); + sc.setParameters("storageAccessGroupMiddle", "%," + name + ",%"); + } + + if (keyword != null) { + sc.setParameters("keyword", "%" + keyword + "%"); + } + + return customSearch(sc, null); + } } diff --git a/engine/schema/src/main/java/com/cloud/dc/dao/HostPodDao.java b/engine/schema/src/main/java/com/cloud/dc/dao/HostPodDao.java index b2e9b898606..2549a0555e8 100644 --- a/engine/schema/src/main/java/com/cloud/dc/dao/HostPodDao.java +++ b/engine/schema/src/main/java/com/cloud/dc/dao/HostPodDao.java @@ -34,4 +34,6 @@ public interface HostPodDao extends GenericDao { public List listAllPods(Long zoneId); public List listAllPodsByCidr(long zoneId, String cidr); + + List listDistinctStorageAccessGroups(String name, String keyword); } diff --git a/engine/schema/src/main/java/com/cloud/dc/dao/HostPodDaoImpl.java b/engine/schema/src/main/java/com/cloud/dc/dao/HostPodDaoImpl.java index f1835067380..08901c9a61e 100644 --- a/engine/schema/src/main/java/com/cloud/dc/dao/HostPodDaoImpl.java +++ b/engine/schema/src/main/java/com/cloud/dc/dao/HostPodDaoImpl.java @@ -143,4 +143,36 @@ public class HostPodDaoImpl extends GenericDaoBase implements H return listBy(sc); } + @Override + public List listDistinctStorageAccessGroups(String name, String keyword) { + GenericSearchBuilder searchBuilder = createSearchBuilder(String.class); + + searchBuilder.select(null, SearchCriteria.Func.DISTINCT, searchBuilder.entity().getStorageAccessGroups()); + if (name != null) { + searchBuilder.and().op("storageAccessGroupExact", searchBuilder.entity().getStorageAccessGroups(), Op.EQ); + searchBuilder.or("storageAccessGroupPrefix", searchBuilder.entity().getStorageAccessGroups(), Op.LIKE); + searchBuilder.or("storageAccessGroupSuffix", searchBuilder.entity().getStorageAccessGroups(), Op.LIKE); + searchBuilder.or("storageAccessGroupMiddle", searchBuilder.entity().getStorageAccessGroups(), Op.LIKE); + searchBuilder.cp(); + } + if (keyword != null) { + searchBuilder.and("keyword", searchBuilder.entity().getStorageAccessGroups(), Op.LIKE); + } + searchBuilder.done(); + + SearchCriteria sc = searchBuilder.create(); + if (name != null) { + sc.setParameters("storageAccessGroupExact", name); + sc.setParameters("storageAccessGroupPrefix", name + ",%"); + sc.setParameters("storageAccessGroupSuffix", "%," + name); + sc.setParameters("storageAccessGroupMiddle", "%," + name + ",%"); + } + + if (keyword != null) { + sc.setParameters("keyword", "%" + keyword + "%"); + } + + return customSearch(sc, null); + } + } diff --git a/engine/schema/src/main/java/com/cloud/host/HostVO.java b/engine/schema/src/main/java/com/cloud/host/HostVO.java index bd6768fa0dd..d51b4eca057 100644 --- a/engine/schema/src/main/java/com/cloud/host/HostVO.java +++ b/engine/schema/src/main/java/com/cloud/host/HostVO.java @@ -165,6 +165,9 @@ public class HostVO implements Host { @Column(name = "uuid") private String uuid; + @Column(name = "storage_access_groups") + String storageAccessGroups; + // This is a delayed load value. If the value is null, // then this field has not been loaded yet. // Call host dao to load it. @@ -357,6 +360,15 @@ public class HostVO implements Host { return isTagARule; } + @Override + public String getStorageAccessGroups() { + return storageAccessGroups; + } + + public void setStorageAccessGroups(String storageAccessGroups) { + this.storageAccessGroups = storageAccessGroups; + } + public HashMap> getGpuGroupDetails() { return groupDetails; } diff --git a/engine/schema/src/main/java/com/cloud/host/dao/HostDao.java b/engine/schema/src/main/java/com/cloud/host/dao/HostDao.java index 4e07e6f5c37..2b8a23a1b51 100644 --- a/engine/schema/src/main/java/com/cloud/host/dao/HostDao.java +++ b/engine/schema/src/main/java/com/cloud/host/dao/HostDao.java @@ -84,6 +84,10 @@ public interface HostDao extends GenericDao, StateDao findHypervisorHostInCluster(long clusterId); + List findHypervisorHostInPod(long podId); + + List findHypervisorHostInZone(long zoneId); + HostVO findAnyStateHypervisorHostInCluster(long clusterId); HostVO findOldestExistentHypervisorHostInCluster(long clusterId); @@ -96,10 +100,14 @@ public interface HostDao extends GenericDao, StateDao findByPodId(Long podId); + List findByPodId(Long podId, Type type); + List listIdsByPodId(Long podId); List findByClusterId(Long clusterId); + List findByClusterId(Long clusterId, Type type); + List listIdsByClusterId(Long clusterId); List listIdsForUpRouting(Long zoneId, Long podId, Long clusterId); @@ -221,4 +229,6 @@ public interface HostDao extends GenericDao, StateDao listByIds(final List ids); Long findClusterIdByVolumeInfo(VolumeInfo volumeInfo); + + List listDistinctStorageAccessGroups(String name, String keyword); } diff --git a/engine/schema/src/main/java/com/cloud/host/dao/HostDaoImpl.java b/engine/schema/src/main/java/com/cloud/host/dao/HostDaoImpl.java index 7cda0a367aa..61fa3edcf22 100644 --- a/engine/schema/src/main/java/com/cloud/host/dao/HostDaoImpl.java +++ b/engine/schema/src/main/java/com/cloud/host/dao/HostDaoImpl.java @@ -107,7 +107,7 @@ public class HostDaoImpl extends GenericDaoBase implements HostDao protected SearchBuilder IdStatusSearch; protected SearchBuilder TypeDcSearch; protected SearchBuilder TypeDcStatusSearch; - protected SearchBuilder TypeClusterStatusSearch; + protected SearchBuilder TypeStatusStateSearch; protected SearchBuilder MsStatusSearch; protected SearchBuilder DcPrivateIpAddressSearch; protected SearchBuilder DcStorageIpAddressSearch; @@ -266,12 +266,14 @@ public class HostDaoImpl extends GenericDaoBase implements HostDao TypeDcStatusSearch.and("resourceState", TypeDcStatusSearch.entity().getResourceState(), SearchCriteria.Op.EQ); TypeDcStatusSearch.done(); - TypeClusterStatusSearch = createSearchBuilder(); - TypeClusterStatusSearch.and("type", TypeClusterStatusSearch.entity().getType(), SearchCriteria.Op.EQ); - TypeClusterStatusSearch.and("cluster", TypeClusterStatusSearch.entity().getClusterId(), SearchCriteria.Op.EQ); - TypeClusterStatusSearch.and("status", TypeClusterStatusSearch.entity().getStatus(), SearchCriteria.Op.EQ); - TypeClusterStatusSearch.and("resourceState", TypeClusterStatusSearch.entity().getResourceState(), SearchCriteria.Op.EQ); - TypeClusterStatusSearch.done(); + TypeStatusStateSearch = createSearchBuilder(); + TypeStatusStateSearch.and("type", TypeStatusStateSearch.entity().getType(), SearchCriteria.Op.EQ); + TypeStatusStateSearch.and("cluster", TypeStatusStateSearch.entity().getClusterId(), SearchCriteria.Op.EQ); + TypeStatusStateSearch.and("pod", TypeStatusStateSearch.entity().getPodId(), SearchCriteria.Op.EQ); + TypeStatusStateSearch.and("zone", TypeStatusStateSearch.entity().getDataCenterId(), SearchCriteria.Op.EQ); + TypeStatusStateSearch.and("status", TypeStatusStateSearch.entity().getStatus(), SearchCriteria.Op.EQ); + TypeStatusStateSearch.and("resourceState", TypeStatusStateSearch.entity().getResourceState(), SearchCriteria.Op.EQ); + TypeStatusStateSearch.done(); IdsSearch = createSearchBuilder(); IdsSearch.and("id", IdsSearch.entity().getId(), SearchCriteria.Op.IN); @@ -328,10 +330,12 @@ public class HostDaoImpl extends GenericDaoBase implements HostDao PodSearch = createSearchBuilder(); PodSearch.and("podId", PodSearch.entity().getPodId(), SearchCriteria.Op.EQ); + PodSearch.and("type", PodSearch.entity().getType(), Op.EQ); PodSearch.done(); ClusterSearch = createSearchBuilder(); ClusterSearch.and("clusterId", ClusterSearch.entity().getClusterId(), SearchCriteria.Op.EQ); + ClusterSearch.and("type", ClusterSearch.entity().getType(), Op.EQ); ClusterSearch.done(); TypeSearch = createSearchBuilder(); @@ -1238,8 +1242,16 @@ public class HostDaoImpl extends GenericDaoBase implements HostDao @Override public List findByPodId(Long podId) { + return findByPodId(podId, null); + } + + @Override + public List findByPodId(Long podId, Type type) { SearchCriteria sc = PodSearch.create(); sc.setParameters("podId", podId); + if (type != null) { + sc.setParameters("type", Type.Routing); + } return listBy(sc); } @@ -1250,8 +1262,16 @@ public class HostDaoImpl extends GenericDaoBase implements HostDao @Override public List findByClusterId(Long clusterId) { + return findByClusterId(clusterId, null); + } + + @Override + public List findByClusterId(Long clusterId, Type type) { SearchCriteria sc = ClusterSearch.create(); sc.setParameters("clusterId", clusterId); + if (type != null) { + sc.setParameters("type", Type.Routing); + } return listBy(sc); } @@ -1355,7 +1375,7 @@ public class HostDaoImpl extends GenericDaoBase implements HostDao @Override public List findHypervisorHostInCluster(long clusterId) { - SearchCriteria sc = TypeClusterStatusSearch.create(); + SearchCriteria sc = TypeStatusStateSearch.create(); sc.setParameters("type", Host.Type.Routing); sc.setParameters("cluster", clusterId); sc.setParameters("status", Status.Up); @@ -1364,9 +1384,31 @@ public class HostDaoImpl extends GenericDaoBase implements HostDao return listBy(sc); } + @Override + public List findHypervisorHostInZone(long zoneId) { + SearchCriteria sc = TypeStatusStateSearch.create(); + sc.setParameters("type", Host.Type.Routing); + sc.setParameters("zone", zoneId); + sc.setParameters("status", Status.Up); + sc.setParameters("resourceState", ResourceState.Enabled); + + return listBy(sc); + } + + @Override + public List findHypervisorHostInPod(long podId) { + SearchCriteria sc = TypeStatusStateSearch.create(); + sc.setParameters("type", Host.Type.Routing); + sc.setParameters("pod", podId); + sc.setParameters("status", Status.Up); + sc.setParameters("resourceState", ResourceState.Enabled); + + return listBy(sc); + } + @Override public HostVO findAnyStateHypervisorHostInCluster(long clusterId) { - SearchCriteria sc = TypeClusterStatusSearch.create(); + SearchCriteria sc = TypeStatusStateSearch.create(); sc.setParameters("type", Host.Type.Routing); sc.setParameters("cluster", clusterId); List list = listBy(sc, new Filter(1)); @@ -1375,7 +1417,7 @@ public class HostDaoImpl extends GenericDaoBase implements HostDao @Override public HostVO findOldestExistentHypervisorHostInCluster(long clusterId) { - SearchCriteria sc = TypeClusterStatusSearch.create(); + SearchCriteria sc = TypeStatusStateSearch.create(); sc.setParameters("type", Host.Type.Routing); sc.setParameters("cluster", clusterId); sc.setParameters("status", Status.Up); @@ -1876,4 +1918,36 @@ public class HostDaoImpl extends GenericDaoBase implements HostDao return host.getClusterId(); } + + @Override + public List listDistinctStorageAccessGroups(String name, String keyword) { + GenericSearchBuilder searchBuilder = createSearchBuilder(String.class); + + searchBuilder.select(null, SearchCriteria.Func.DISTINCT, searchBuilder.entity().getStorageAccessGroups()); + if (name != null) { + searchBuilder.and().op("storageAccessGroupExact", searchBuilder.entity().getStorageAccessGroups(), Op.EQ); + searchBuilder.or("storageAccessGroupPrefix", searchBuilder.entity().getStorageAccessGroups(), Op.LIKE); + searchBuilder.or("storageAccessGroupSuffix", searchBuilder.entity().getStorageAccessGroups(), Op.LIKE); + searchBuilder.or("storageAccessGroupMiddle", searchBuilder.entity().getStorageAccessGroups(), Op.LIKE); + searchBuilder.cp(); + } + if (keyword != null) { + searchBuilder.and("keyword", searchBuilder.entity().getStorageAccessGroups(), Op.LIKE); + } + searchBuilder.done(); + + SearchCriteria sc = searchBuilder.create(); + if (name != null) { + sc.setParameters("storageAccessGroupExact", name); + sc.setParameters("storageAccessGroupPrefix", name + ",%"); + sc.setParameters("storageAccessGroupSuffix", "%," + name); + sc.setParameters("storageAccessGroupMiddle", "%," + name + ",%"); + } + + if (keyword != null) { + sc.setParameters("keyword", "%" + keyword + "%"); + } + + return customSearch(sc, null); + } } diff --git a/engine/schema/src/main/java/com/cloud/storage/StoragePoolAndAccessGroupMapVO.java b/engine/schema/src/main/java/com/cloud/storage/StoragePoolAndAccessGroupMapVO.java new file mode 100644 index 00000000000..5690324340c --- /dev/null +++ b/engine/schema/src/main/java/com/cloud/storage/StoragePoolAndAccessGroupMapVO.java @@ -0,0 +1,64 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package com.cloud.storage; + +import javax.persistence.Column; +import javax.persistence.Entity; +import javax.persistence.GeneratedValue; +import javax.persistence.GenerationType; +import javax.persistence.Id; +import javax.persistence.Table; + +import org.apache.cloudstack.api.InternalIdentity; + +@Entity +@Table(name = "storage_pool_and_access_group_map") +public class StoragePoolAndAccessGroupMapVO implements InternalIdentity { + + protected StoragePoolAndAccessGroupMapVO() { + } + + @Id + @GeneratedValue(strategy = GenerationType.IDENTITY) + @Column(name = "id") + private long id; + + @Column(name = "pool_id") + private long poolId; + + @Column(name = "storage_access_group") + private String storageAccessGroup; + + public StoragePoolAndAccessGroupMapVO(long poolId, String storageAccessGroup) { + this.poolId = poolId; + this.storageAccessGroup = storageAccessGroup; + } + + @Override + public long getId() { + return this.id; + } + + public long getPoolId() { + return poolId; + } + + public String getStorageAccessGroup() { + return storageAccessGroup; + } + +} diff --git a/engine/schema/src/main/java/com/cloud/storage/dao/StoragePoolAndAccessGroupMapDao.java b/engine/schema/src/main/java/com/cloud/storage/dao/StoragePoolAndAccessGroupMapDao.java new file mode 100644 index 00000000000..3ff797f7e74 --- /dev/null +++ b/engine/schema/src/main/java/com/cloud/storage/dao/StoragePoolAndAccessGroupMapDao.java @@ -0,0 +1,31 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package com.cloud.storage.dao; + +import java.util.List; + +import com.cloud.storage.StoragePoolAndAccessGroupMapVO; + +import com.cloud.utils.db.GenericDao; + +public interface StoragePoolAndAccessGroupMapDao extends GenericDao { + + void persist(long poolId, List storageAccessGroups); + List getStorageAccessGroups(long poolId); + void deleteStorageAccessGroups(long poolId); + List listDistinctStorageAccessGroups(String name, String keyword); +} diff --git a/engine/schema/src/main/java/com/cloud/storage/dao/StoragePoolAndAccessGroupMapDaoImpl.java b/engine/schema/src/main/java/com/cloud/storage/dao/StoragePoolAndAccessGroupMapDaoImpl.java new file mode 100644 index 00000000000..63e82b79748 --- /dev/null +++ b/engine/schema/src/main/java/com/cloud/storage/dao/StoragePoolAndAccessGroupMapDaoImpl.java @@ -0,0 +1,105 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package com.cloud.storage.dao; + +import java.util.ArrayList; +import java.util.List; + +import com.cloud.storage.StoragePoolAndAccessGroupMapVO; + +import com.cloud.utils.db.GenericDaoBase; +import com.cloud.utils.db.GenericSearchBuilder; +import com.cloud.utils.db.SearchBuilder; +import com.cloud.utils.db.SearchCriteria; +import com.cloud.utils.db.TransactionLegacy; + +public class StoragePoolAndAccessGroupMapDaoImpl extends GenericDaoBase implements StoragePoolAndAccessGroupMapDao { + + protected final SearchBuilder StoragePoolAccessGroupSearch; + + public StoragePoolAndAccessGroupMapDaoImpl() { + StoragePoolAccessGroupSearch = createSearchBuilder(); + StoragePoolAccessGroupSearch.and("poolId", StoragePoolAccessGroupSearch.entity().getPoolId(), SearchCriteria.Op.EQ); + StoragePoolAccessGroupSearch.done(); + } + + @Override + public void persist(long poolId, List storageAccessGroups) { + TransactionLegacy txn = TransactionLegacy.currentTxn(); + + txn.start(); + SearchCriteria sc = StoragePoolAccessGroupSearch.create(); + sc.setParameters("poolId", poolId); + expunge(sc); + + for (String sag : storageAccessGroups) { + sag = sag.trim(); + if (sag.length() > 0) { + StoragePoolAndAccessGroupMapVO vo = new StoragePoolAndAccessGroupMapVO(poolId, sag); + persist(vo); + } + } + txn.commit(); + } + + @Override + public List getStorageAccessGroups(long poolId) { + SearchCriteria sc = StoragePoolAccessGroupSearch.create(); + sc.setParameters("poolId", poolId); + + List results = search(sc, null); + List storagePoolAccessGroups = new ArrayList(results.size()); + for (StoragePoolAndAccessGroupMapVO result : results) { + storagePoolAccessGroups.add(result.getStorageAccessGroup()); + } + + return storagePoolAccessGroups; + } + + @Override + public void deleteStorageAccessGroups(long poolId) { + TransactionLegacy txn = TransactionLegacy.currentTxn(); + txn.start(); + SearchCriteria sc = StoragePoolAccessGroupSearch.create(); + sc.setParameters("poolId", poolId); + expunge(sc); + txn.commit(); + } + + @Override + public List listDistinctStorageAccessGroups(String name, String keyword) { + GenericSearchBuilder searchBuilder = createSearchBuilder(String.class); + + searchBuilder.select(null, SearchCriteria.Func.DISTINCT, searchBuilder.entity().getStorageAccessGroup()); + searchBuilder.and("name", searchBuilder.entity().getStorageAccessGroup(), SearchCriteria.Op.EQ); + searchBuilder.and("keyword", searchBuilder.entity().getStorageAccessGroup(), SearchCriteria.Op.LIKE); + searchBuilder.done(); + + SearchCriteria sc = searchBuilder.create(); + + if (name != null) { + sc.setParameters("name", name); + } + + if (keyword != null) { + sc.setParameters("keyword", "%" + keyword + "%"); + } + + return customSearch(sc, null); + } + +} diff --git a/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/PrimaryDataStoreDao.java b/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/PrimaryDataStoreDao.java index d205379cdb2..7600cdb9b81 100644 --- a/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/PrimaryDataStoreDao.java +++ b/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/PrimaryDataStoreDao.java @@ -58,9 +58,9 @@ public interface PrimaryDataStoreDao extends GenericDao { */ void updateCapacityIops(long id, long capacityIops); - StoragePoolVO persist(StoragePoolVO pool, Map details, List tags, Boolean isTagARule); + StoragePoolVO persist(StoragePoolVO pool, Map details, List tags, Boolean isTagARule, List storageAccessGroups); - StoragePoolVO persist(StoragePoolVO pool, Map details, List tags, Boolean isTagARule, boolean displayDetails); + StoragePoolVO persist(StoragePoolVO pool, Map details, List tags, Boolean isTagARule, boolean displayDetails, List storageAccessGroups); /** * Find pool by name. @@ -84,7 +84,9 @@ public interface PrimaryDataStoreDao extends GenericDao { */ List findPoolsByDetails(long dcId, long podId, Long clusterId, Map details, ScopeType scope); - List findPoolsByTags(long dcId, long podId, Long clusterId, String[] tags, boolean validateTagRule, long ruleExecuteTimeout); + List findPoolsByTags(long dcId, long podId, Long clusterId, ScopeType scope, String[] tags, boolean validateTagRule, long ruleExecuteTimeout); + + List findPoolsByAccessGroupsForHostConnection(Long dcId, Long podId, Long clusterId, ScopeType scope, String[] storageAccessGroups); List findDisabledPoolsByScope(long dcId, Long podId, Long clusterId, ScopeType scope); @@ -127,6 +129,10 @@ public interface PrimaryDataStoreDao extends GenericDao { List findZoneWideStoragePoolsByTags(long dcId, String[] tags, boolean validateTagRule); + List findZoneWideStoragePoolsByAccessGroupsForHostConnection(long dcId, String[] storageAccessGroups); + + List findZoneWideStoragePoolsByAccessGroupsAndHypervisorTypeForHostConnection(long dcId, String[] storageAccessGroups, HypervisorType type); + List findZoneWideStoragePoolsByHypervisor(long dataCenterId, HypervisorType hypervisorType); List findZoneWideStoragePoolsByHypervisor(long dataCenterId, HypervisorType hypervisorType, String keyword); @@ -143,6 +149,8 @@ public interface PrimaryDataStoreDao extends GenericDao { void deletePoolTags(long poolId); + void deleteStoragePoolAccessGroups(long poolId); + List listChildStoragePoolsInDatastoreCluster(long poolId); Integer countAll(); @@ -154,8 +162,10 @@ public interface PrimaryDataStoreDao extends GenericDao { List listStoragePoolsWithActiveVolumesByOfferingId(long offeringid); Pair, Integer> searchForIdsAndCount(Long storagePoolId, String storagePoolName, Long zoneId, - String path, Long podId, Long clusterId, String address, ScopeType scopeType, StoragePoolStatus status, - String keyword, Filter searchFilter); + String path, Long podId, Long clusterId, Long hostId, String address, ScopeType scopeType, StoragePoolStatus status, + String keyword, String storageAccessGroup, Filter searchFilter); List listByIds(List ids); + + List findStoragePoolsByEmptyStorageAccessGroups(Long dcId, Long podId, Long clusterId, ScopeType scope, HypervisorType hypervisorType); } diff --git a/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/PrimaryDataStoreDaoImpl.java b/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/PrimaryDataStoreDaoImpl.java index ef29ddcde86..71d5c93f027 100644 --- a/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/PrimaryDataStoreDaoImpl.java +++ b/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/PrimaryDataStoreDaoImpl.java @@ -28,6 +28,8 @@ import java.util.stream.Collectors; import javax.inject.Inject; import javax.naming.ConfigurationException; +import com.cloud.storage.StoragePoolAndAccessGroupMapVO; +import com.cloud.storage.dao.StoragePoolAndAccessGroupMapDao; import org.apache.commons.collections.CollectionUtils; import com.cloud.host.Status; @@ -70,15 +72,25 @@ public class PrimaryDataStoreDaoImpl extends GenericDaoBase private StoragePoolHostDao _hostDao; @Inject private StoragePoolTagsDao _tagsDao; + @Inject + StoragePoolAndAccessGroupMapDao _storagePoolAccessGroupMapDao; protected final String DetailsSqlPrefix = "SELECT storage_pool.* from storage_pool LEFT JOIN storage_pool_details ON storage_pool.id = storage_pool_details.pool_id WHERE storage_pool.removed is null and storage_pool.status = 'Up' and storage_pool.data_center_id = ? and (storage_pool.pod_id = ? or storage_pool.pod_id is null) and storage_pool.scope = ? and ("; protected final String DetailsSqlSuffix = ") GROUP BY storage_pool_details.pool_id HAVING COUNT(storage_pool_details.name) >= ?"; + protected final String DetailsForHostConnectionSqlSuffix = ") GROUP BY storage_pool_details.pool_id"; private final String ZoneWideTagsSqlPrefix = "SELECT storage_pool.* from storage_pool LEFT JOIN storage_pool_tags ON storage_pool.id = storage_pool_tags.pool_id WHERE storage_pool.removed is null and storage_pool.status = 'Up' AND storage_pool_tags.is_tag_a_rule = 0 and storage_pool.data_center_id = ? and storage_pool.scope = ? and ("; private final String ZoneWideTagsSqlSuffix = ") GROUP BY storage_pool_tags.pool_id HAVING COUNT(storage_pool_tags.tag) >= ?"; + private final String ZoneWideStorageAccessGroupsForHostConnectionSqlPrefix = "SELECT storage_pool.* from storage_pool LEFT JOIN storage_pool_and_access_group_map ON storage_pool.id = storage_pool_and_access_group_map.pool_id WHERE storage_pool.removed is null and storage_pool.status = 'Up' and storage_pool.data_center_id = ? and storage_pool.scope = ? and ("; + private final String ZoneWideStorageAccessGroupsForHostConnectionSqlSuffix = ") GROUP BY storage_pool_and_access_group_map.pool_id"; + private final String ZoneWideStorageAccessGroupsWithHypervisorTypeSqlPrefix = "SELECT storage_pool.* from storage_pool LEFT JOIN storage_pool_and_access_group_map ON storage_pool.id = storage_pool_and_access_group_map.pool_id WHERE storage_pool.removed is null and storage_pool.status = 'Up' and storage_pool.hypervisor = ? and storage_pool.data_center_id = ? and storage_pool.scope = ? and ("; + private final String ZoneWideStorageAccessGroupsWithHypervisorTypeSqlSuffix = ") GROUP BY storage_pool_and_access_group_map.pool_id"; // Storage tags are now separate from storage_pool_details, leaving only details on that table protected final String TagsSqlPrefix = "SELECT storage_pool.* from storage_pool LEFT JOIN storage_pool_tags ON storage_pool.id = storage_pool_tags.pool_id WHERE storage_pool.removed is null and storage_pool.status = 'Up' AND storage_pool_tags.is_tag_a_rule = 0 and storage_pool.data_center_id = ? and (storage_pool.pod_id = ? or storage_pool.pod_id is null) and storage_pool.scope = ? and ("; protected final String TagsSqlSuffix = ") GROUP BY storage_pool_tags.pool_id HAVING COUNT(storage_pool_tags.tag) >= ?"; + protected final String SAGsForHostConnectionSqlPrefix = "SELECT storage_pool.* from storage_pool LEFT JOIN storage_pool_and_access_group_map ON storage_pool.id = storage_pool_and_access_group_map.pool_id WHERE storage_pool.removed is null and storage_pool.status = 'Up' and storage_pool.data_center_id = ? and (storage_pool.pod_id = ? or storage_pool.pod_id is null) and storage_pool.scope = ? and ("; + + protected final String SAGsForHostConnectionSqlSuffix = ") GROUP BY storage_pool_and_access_group_map.pool_id"; private static final String GET_STORAGE_POOLS_OF_VOLUMES_WITHOUT_OR_NOT_HAVING_TAGS = "SELECT s.* " + "FROM volumes vol " + @@ -296,13 +308,13 @@ public class PrimaryDataStoreDaoImpl extends GenericDaoBase } @Override - public StoragePoolVO persist(StoragePoolVO pool, Map details, List tags, Boolean isTagARule) { - return persist(pool, details, tags, isTagARule, true); + public StoragePoolVO persist(StoragePoolVO pool, Map details, List tags, Boolean isTagARule, List storageAccessGroups) { + return persist(pool, details, tags, isTagARule, true, storageAccessGroups); } @Override @DB - public StoragePoolVO persist(StoragePoolVO pool, Map details, List tags, Boolean isTagARule, boolean displayDetails) { + public StoragePoolVO persist(StoragePoolVO pool, Map details, List tags, Boolean isTagARule, boolean displayDetails, List storageAccessGroups) { TransactionLegacy txn = TransactionLegacy.currentTxn(); txn.start(); pool = super.persist(pool); @@ -315,6 +327,9 @@ public class PrimaryDataStoreDaoImpl extends GenericDaoBase if (CollectionUtils.isNotEmpty(tags)) { _tagsDao.persist(pool.getId(), tags, isTagARule); } + if (CollectionUtils.isNotEmpty(storageAccessGroups)) { + _storagePoolAccessGroupMapDao.persist(pool.getId(), storageAccessGroups); + } txn.commit(); return pool; } @@ -338,6 +353,13 @@ public class PrimaryDataStoreDaoImpl extends GenericDaoBase return searchStoragePoolsPreparedStatement(sql, dcId, podId, clusterId, scope, valuesLength); } + protected List findPoolsByDetailsOrTagsForHostConnectionInternal(long dcId, long podId, Long clusterId, ScopeType scope, String sqlValues, ValueType valuesType) { + String sqlPrefix = valuesType.equals(ValueType.DETAILS) ? DetailsSqlPrefix : SAGsForHostConnectionSqlPrefix; + String sqlSuffix = valuesType.equals(ValueType.DETAILS) ? DetailsForHostConnectionSqlSuffix : SAGsForHostConnectionSqlSuffix; + String sql = getSqlPreparedStatement(sqlPrefix, sqlSuffix, sqlValues, clusterId); + return searchStoragePoolsPreparedStatement(sql, dcId, podId, clusterId, scope, null); + } + /** * Search storage pools in a transaction * @param sql prepared statement sql @@ -349,7 +371,50 @@ public class PrimaryDataStoreDaoImpl extends GenericDaoBase * @return storage pools matching criteria */ @DB - protected List searchStoragePoolsPreparedStatement(String sql, long dcId, Long podId, Long clusterId, ScopeType scope, int valuesLength) { + protected List searchStoragePoolsWithHypervisorTypesPreparedStatement(String sql, HypervisorType type, long dcId, Long podId, Long clusterId, ScopeType scope, Integer valuesLength) { + TransactionLegacy txn = TransactionLegacy.currentTxn(); + List pools = new ArrayList(); + try (PreparedStatement pstmt = txn.prepareStatement(sql);) { + if (pstmt != null) { + int i = 1; + pstmt.setString(i++, type.toString()); + pstmt.setLong(i++, dcId); + if (podId != null) { + pstmt.setLong(i++, podId); + } + pstmt.setString(i++, scope.toString()); + if (clusterId != null) { + pstmt.setLong(i++, clusterId); + } + if (valuesLength != null) { + pstmt.setInt(i++, valuesLength); + } + try (ResultSet rs = pstmt.executeQuery();) { + while (rs.next()) { + pools.add(toEntityBean(rs, false)); + } + } catch (SQLException e) { + throw new CloudRuntimeException("Unable to execute :" + e.getMessage(), e); + } + } + } catch (SQLException e) { + throw new CloudRuntimeException("Unable to execute :" + e.getMessage(), e); + } + return pools; + } + + /** + * Search storage pools in a transaction + * @param sql prepared statement sql + * @param dcId data center id + * @param podId pod id + * @param clusterId cluster id + * @param scope scope + * @param valuesLength values length + * @return storage pools matching criteria + */ + @DB + protected List searchStoragePoolsPreparedStatement(String sql, long dcId, Long podId, Long clusterId, ScopeType scope, Integer valuesLength) { TransactionLegacy txn = TransactionLegacy.currentTxn(); List pools = new ArrayList(); try (PreparedStatement pstmt = txn.prepareStatement(sql);) { @@ -363,7 +428,9 @@ public class PrimaryDataStoreDaoImpl extends GenericDaoBase if (clusterId != null) { pstmt.setLong(i++, clusterId); } - pstmt.setInt(i++, valuesLength); + if (valuesLength != null) { + pstmt.setInt(i++, valuesLength); + } try (ResultSet rs = pstmt.executeQuery();) { while (rs.next()) { pools.add(toEntityBean(rs, false)); @@ -420,6 +487,22 @@ public class PrimaryDataStoreDaoImpl extends GenericDaoBase return sqlValues.toString(); } + /** + * Return SQL string from storage pool access group map, to be placed between SQL Prefix and SQL Suffix when creating storage tags PreparedStatement. + * @param storageAccessGroups storage tags array + * @return SQL string containing storage tag values to be placed between Prefix and Suffix when creating PreparedStatement. + * @throws NullPointerException if tags is null + * @throws IndexOutOfBoundsException if tags is not null, but empty + */ + protected String getSqlValuesFromStorageAccessGroups(String[] storageAccessGroups) throws NullPointerException, IndexOutOfBoundsException { + StringBuilder sqlValues = new StringBuilder(); + for (String tag : storageAccessGroups) { + sqlValues.append("(storage_pool_and_access_group_map.storage_access_group='").append(tag).append("') OR "); + } + sqlValues.delete(sqlValues.length() - 4, sqlValues.length()); + return sqlValues.toString(); + } + @DB @Override public List findPoolsByDetails(long dcId, long podId, Long clusterId, Map details, ScopeType scope) { @@ -428,10 +511,10 @@ public class PrimaryDataStoreDaoImpl extends GenericDaoBase } @Override - public List findPoolsByTags(long dcId, long podId, Long clusterId, String[] tags, boolean validateTagRule, long ruleExecuteTimeout) { + public List findPoolsByTags(long dcId, long podId, Long clusterId, ScopeType scope, String[] tags, boolean validateTagRule, long ruleExecuteTimeout) { List storagePools = null; if (tags == null || tags.length == 0) { - storagePools = listBy(dcId, podId, clusterId, ScopeType.CLUSTER); + storagePools = listBy(dcId, podId, clusterId, scope); if (validateTagRule) { storagePools = getPoolsWithoutTagRule(storagePools); @@ -439,7 +522,20 @@ public class PrimaryDataStoreDaoImpl extends GenericDaoBase } else { String sqlValues = getSqlValuesFromStorageTags(tags); - storagePools = findPoolsByDetailsOrTagsInternal(dcId, podId, clusterId, ScopeType.CLUSTER, sqlValues, ValueType.TAGS, tags.length); + storagePools = findPoolsByDetailsOrTagsInternal(dcId, podId, clusterId, scope, sqlValues, ValueType.TAGS, tags.length); + } + + return storagePools; + } + + @Override + public List findPoolsByAccessGroupsForHostConnection(Long dcId, Long podId, Long clusterId, ScopeType scope, String[] storageAccessGroups) { + List storagePools = null; + if (storageAccessGroups == null || storageAccessGroups.length == 0) { + storagePools = listBy(dcId, podId, clusterId, scope); + } else { + String sqlValues = getSqlValuesFromStorageAccessGroups(storageAccessGroups); + storagePools = findPoolsByDetailsOrTagsForHostConnectionInternal(dcId, podId, clusterId, scope, sqlValues, ValueType.TAGS); } return storagePools; @@ -556,6 +652,77 @@ public class PrimaryDataStoreDaoImpl extends GenericDaoBase return storagePoolsToReturn; } + @Override + public List findZoneWideStoragePoolsByAccessGroupsForHostConnection(long dcId, String[] storageAccessGroups) { + if (storageAccessGroups == null || storageAccessGroups.length == 0) { + QueryBuilder sc = QueryBuilder.create(StoragePoolVO.class); + sc.and(sc.entity().getDataCenterId(), Op.EQ, dcId); + sc.and(sc.entity().getStatus(), Op.EQ, Status.Up); + sc.and(sc.entity().getScope(), Op.EQ, ScopeType.ZONE); + return sc.list(); + } else { + String sqlValues = getSqlValuesFromStorageAccessGroups(storageAccessGroups); + String sql = getSqlPreparedStatement(ZoneWideStorageAccessGroupsForHostConnectionSqlPrefix, ZoneWideStorageAccessGroupsForHostConnectionSqlSuffix, sqlValues, null); + return searchStoragePoolsPreparedStatement(sql, dcId, null, null, ScopeType.ZONE, null); + } + } + + @Override + public List findZoneWideStoragePoolsByAccessGroupsAndHypervisorTypeForHostConnection(long dcId, String[] storageAccessGroups, HypervisorType type) { + if (storageAccessGroups == null || storageAccessGroups.length == 0) { + QueryBuilder sc = QueryBuilder.create(StoragePoolVO.class); + sc.and(sc.entity().getDataCenterId(), Op.EQ, dcId); + sc.and(sc.entity().getStatus(), Op.EQ, Status.Up); + sc.and(sc.entity().getScope(), Op.EQ, ScopeType.ZONE); + sc.and(sc.entity().getHypervisor(), Op.EQ, type); + return sc.list(); + } else { + String sqlValues = getSqlValuesFromStorageAccessGroups(storageAccessGroups); + String sql = getSqlPreparedStatement(ZoneWideStorageAccessGroupsWithHypervisorTypeSqlPrefix, ZoneWideStorageAccessGroupsWithHypervisorTypeSqlSuffix, sqlValues, null); + return searchStoragePoolsWithHypervisorTypesPreparedStatement(sql, type, dcId, null, null, ScopeType.ZONE, null); + } + } + + @Override + public List findStoragePoolsByEmptyStorageAccessGroups(Long dcId, Long podId, Long clusterId, ScopeType scope, HypervisorType hypervisorType) { + SearchBuilder poolSearch = createSearchBuilder(); + SearchBuilder storageAccessGroupsPoolSearch = _storagePoolAccessGroupMapDao.createSearchBuilder(); + // Set criteria for pools + poolSearch.and("scope", poolSearch.entity().getScope(), Op.EQ); + poolSearch.and("removed", poolSearch.entity().getRemoved(), Op.NULL); + poolSearch.and("status", poolSearch.entity().getStatus(), Op.EQ); + poolSearch.and("datacenterid", poolSearch.entity().getDataCenterId(), Op.EQ); + poolSearch.and("podid", poolSearch.entity().getPodId(), Op.EQ); + poolSearch.and("clusterid", poolSearch.entity().getClusterId(), Op.EQ); + poolSearch.and("hypervisortype", poolSearch.entity().getHypervisor(), Op.EQ); + + // Set StoragePoolAccessGroupMapVO.pool_id IS NULL. This ensures only pools without tags are returned + storageAccessGroupsPoolSearch.and("poolid", storageAccessGroupsPoolSearch.entity().getPoolId(), Op.NULL); + poolSearch.join("tagJoin", storageAccessGroupsPoolSearch, poolSearch.entity().getId(), storageAccessGroupsPoolSearch.entity().getPoolId(), JoinBuilder.JoinType.LEFT); + + SearchCriteria sc = poolSearch.create(); + sc.setParameters("scope", scope.toString()); + sc.setParameters("status", Status.Up.toString()); + + if (dcId != null) { + sc.setParameters("datacenterid", dcId); + } + + if (podId != null) { + sc.setParameters("podid", podId); + } + + if (clusterId != null) { + sc.setParameters("clusterid", clusterId); + } + + if (hypervisorType != null) { + sc.setParameters("hypervisortype", hypervisorType); + } + + return listBy(sc); + } + @Override public List searchForStoragePoolTags(long poolId) { return _tagsDao.getStoragePoolTags(poolId); @@ -659,6 +826,11 @@ public class PrimaryDataStoreDaoImpl extends GenericDaoBase _tagsDao.deleteTags(poolId); } + @Override + public void deleteStoragePoolAccessGroups(long poolId) { + _storagePoolAccessGroupMapDao.deleteStorageAccessGroups(poolId); + } + @Override public List listChildStoragePoolsInDatastoreCluster(long poolId) { QueryBuilder sc = QueryBuilder.create(StoragePoolVO.class); @@ -725,9 +897,10 @@ public class PrimaryDataStoreDaoImpl extends GenericDaoBase @Override public Pair, Integer> searchForIdsAndCount(Long storagePoolId, String storagePoolName, Long zoneId, - String path, Long podId, Long clusterId, String address, ScopeType scopeType, StoragePoolStatus status, - String keyword, Filter searchFilter) { - SearchCriteria sc = createStoragePoolSearchCriteria(storagePoolId, storagePoolName, zoneId, path, podId, clusterId, address, scopeType, status, keyword); + String path, Long podId, Long clusterId, Long hostId, String address, ScopeType scopeType, StoragePoolStatus status, + String keyword, String storageAccessGroup, Filter searchFilter) { + SearchCriteria sc = createStoragePoolSearchCriteria(storagePoolId, storagePoolName, zoneId, path, podId, clusterId, + hostId, address, scopeType, status, keyword, storageAccessGroup); Pair, Integer> uniquePair = searchAndCount(sc, searchFilter); List idList = uniquePair.first().stream().map(StoragePoolVO::getId).collect(Collectors.toList()); return new Pair<>(idList, uniquePair.second()); @@ -744,8 +917,8 @@ public class PrimaryDataStoreDaoImpl extends GenericDaoBase } private SearchCriteria createStoragePoolSearchCriteria(Long storagePoolId, String storagePoolName, - Long zoneId, String path, Long podId, Long clusterId, String address, ScopeType scopeType, - StoragePoolStatus status, String keyword) { + Long zoneId, String path, Long podId, Long clusterId, Long hostId, String address, ScopeType scopeType, + StoragePoolStatus status, String keyword, String storageAccessGroup) { SearchBuilder sb = createSearchBuilder(); sb.select(null, SearchCriteria.Func.DISTINCT, sb.entity().getId()); // select distinct // ids @@ -760,6 +933,18 @@ public class PrimaryDataStoreDaoImpl extends GenericDaoBase sb.and("status", sb.entity().getStatus(), SearchCriteria.Op.EQ); sb.and("parent", sb.entity().getParent(), SearchCriteria.Op.EQ); + if (hostId != null) { + SearchBuilder hostJoin = _hostDao.createSearchBuilder(); + hostJoin.and("hostId", hostJoin.entity().getHostId(), SearchCriteria.Op.EQ); + sb.join("poolHostJoin", hostJoin, sb.entity().getId(), hostJoin.entity().getPoolId(), JoinBuilder.JoinType.INNER); + } + + if (storageAccessGroup != null) { + SearchBuilder storageAccessGroupJoin = _storagePoolAccessGroupMapDao.createSearchBuilder(); + storageAccessGroupJoin.and("storageAccessGroup", storageAccessGroupJoin.entity().getStorageAccessGroup(), SearchCriteria.Op.EQ); + sb.join("poolStorageAccessGroupJoin", storageAccessGroupJoin, sb.entity().getId(), storageAccessGroupJoin.entity().getPoolId(), JoinBuilder.JoinType.INNER); + } + SearchCriteria sc = sb.create(); if (keyword != null) { @@ -808,6 +993,15 @@ public class PrimaryDataStoreDaoImpl extends GenericDaoBase sc.setParameters("status", status.toString()); } sc.setParameters("parent", 0); + + if (hostId != null) { + sc.setJoinParameters("poolHostJoin", "hostId", hostId); + } + + if (storageAccessGroup != null) { + sc.setJoinParameters("poolStorageAccessGroupJoin", "storageAccessGroup", storageAccessGroup); + } + return sc; } } diff --git a/engine/schema/src/main/resources/META-INF/cloudstack/core/spring-engine-schema-core-common-daos-between-management-and-usage-context.xml b/engine/schema/src/main/resources/META-INF/cloudstack/core/spring-engine-schema-core-common-daos-between-management-and-usage-context.xml index d6d72f9228e..96579b26516 100644 --- a/engine/schema/src/main/resources/META-INF/cloudstack/core/spring-engine-schema-core-common-daos-between-management-and-usage-context.xml +++ b/engine/schema/src/main/resources/META-INF/cloudstack/core/spring-engine-schema-core-common-daos-between-management-and-usage-context.xml @@ -62,6 +62,7 @@ + diff --git a/engine/schema/src/main/resources/META-INF/db/schema-42010to42100.sql b/engine/schema/src/main/resources/META-INF/db/schema-42010to42100.sql index 292da4a466b..b6747ca6071 100644 --- a/engine/schema/src/main/resources/META-INF/db/schema-42010to42100.sql +++ b/engine/schema/src/main/resources/META-INF/db/schema-42010to42100.sql @@ -65,3 +65,18 @@ CREATE TABLE IF NOT EXISTS `cloud`.`reconcile_commands` ( CALL `cloud`.`IDEMPOTENT_ADD_COLUMN`('cloud.snapshot_store_ref', 'kvm_checkpoint_path', 'varchar(255)'); CALL `cloud`.`IDEMPOTENT_ADD_COLUMN`('cloud.snapshot_store_ref', 'end_of_chain', 'int(1) unsigned'); + +-- Create table storage_pool_and_access_group_map +CREATE TABLE IF NOT EXISTS `cloud`.`storage_pool_and_access_group_map` ( + `id` bigint(20) unsigned NOT NULL AUTO_INCREMENT, + `pool_id` bigint(20) unsigned NOT NULL COMMENT "pool id", + `storage_access_group` varchar(255) NOT NULL, + PRIMARY KEY (`id`), + KEY `fk_storage_pool_and_access_group_map__pool_id` (`pool_id`), + CONSTRAINT `fk_storage_pool_and_access_group_map__pool_id` FOREIGN KEY (`pool_id`) REFERENCES `storage_pool` (`id`) ON DELETE CASCADE +) ENGINE=InnoDB AUTO_INCREMENT=2 DEFAULT CHARSET=utf8; + +CALL `cloud`.`IDEMPOTENT_ADD_COLUMN`('cloud.host', 'storage_access_groups', 'varchar(255) DEFAULT NULL COMMENT "storage access groups for the host"'); +CALL `cloud`.`IDEMPOTENT_ADD_COLUMN`('cloud.cluster', 'storage_access_groups', 'varchar(255) DEFAULT NULL COMMENT "storage access groups for the hosts in the cluster"'); +CALL `cloud`.`IDEMPOTENT_ADD_COLUMN`('cloud.host_pod_ref', 'storage_access_groups', 'varchar(255) DEFAULT NULL COMMENT "storage access groups for the hosts in the pod"'); +CALL `cloud`.`IDEMPOTENT_ADD_COLUMN`('cloud.data_center', 'storage_access_groups', 'varchar(255) DEFAULT NULL COMMENT "storage access groups for the hosts in the zone"'); diff --git a/engine/schema/src/main/resources/META-INF/db/views/cloud.data_center_view.sql b/engine/schema/src/main/resources/META-INF/db/views/cloud.data_center_view.sql index c34df4f1cbf..46aea863fc5 100644 --- a/engine/schema/src/main/resources/META-INF/db/views/cloud.data_center_view.sql +++ b/engine/schema/src/main/resources/META-INF/db/views/cloud.data_center_view.sql @@ -42,6 +42,7 @@ select data_center.type, data_center.removed, data_center.sort_key, + data_center.storage_access_groups, domain.id domain_id, domain.uuid domain_uuid, domain.name domain_name, diff --git a/engine/schema/src/main/resources/META-INF/db/views/cloud.host_view.sql b/engine/schema/src/main/resources/META-INF/db/views/cloud.host_view.sql index 6fc8fb80386..d9f4e267159 100644 --- a/engine/schema/src/main/resources/META-INF/db/views/cloud.host_view.sql +++ b/engine/schema/src/main/resources/META-INF/db/views/cloud.host_view.sql @@ -42,17 +42,21 @@ SELECT host.speed, host.ram, host.arch, + host.storage_access_groups, cluster.id cluster_id, cluster.uuid cluster_uuid, cluster.name cluster_name, cluster.cluster_type, + cluster.storage_access_groups AS cluster_storage_access_groups, data_center.id data_center_id, data_center.uuid data_center_uuid, data_center.name data_center_name, + data_center.storage_access_groups AS zone_storage_access_groups, data_center.networktype data_center_type, host_pod_ref.id pod_id, host_pod_ref.uuid pod_uuid, host_pod_ref.name pod_name, + host_pod_ref.storage_access_groups AS pod_storage_access_groups, GROUP_CONCAT(DISTINCT(host_tags.tag)) AS tag, GROUP_CONCAT(DISTINCT(explicit_host_tags.tag)) AS explicit_tag, GROUP_CONCAT(DISTINCT(implicit_host_tags.tag)) AS implicit_tag, diff --git a/engine/schema/src/main/resources/META-INF/db/views/cloud.storage_pool_view.sql b/engine/schema/src/main/resources/META-INF/db/views/cloud.storage_pool_view.sql index 5d7585baa3b..641017bdd5b 100644 --- a/engine/schema/src/main/resources/META-INF/db/views/cloud.storage_pool_view.sql +++ b/engine/schema/src/main/resources/META-INF/db/views/cloud.storage_pool_view.sql @@ -51,6 +51,7 @@ SELECT `host_pod_ref`.`name` AS `pod_name`, `storage_pool_tags`.`tag` AS `tag`, `storage_pool_tags`.`is_tag_a_rule` AS `is_tag_a_rule`, + `storage_pool_and_access_group_map`.`storage_access_group` AS `storage_access_group`, `op_host_capacity`.`used_capacity` AS `disk_used_capacity`, `op_host_capacity`.`reserved_capacity` AS `disk_reserved_capacity`, `async_job`.`id` AS `job_id`, @@ -58,13 +59,16 @@ SELECT `async_job`.`job_status` AS `job_status`, `async_job`.`account_id` AS `job_account_id` FROM - ((((((`cloud`.`storage_pool` - LEFT JOIN `cloud`.`cluster` ON ((`storage_pool`.`cluster_id` = `cluster`.`id`))) - LEFT JOIN `cloud`.`data_center` ON ((`storage_pool`.`data_center_id` = `data_center`.`id`))) - LEFT JOIN `cloud`.`host_pod_ref` ON ((`storage_pool`.`pod_id` = `host_pod_ref`.`id`))) - LEFT JOIN `cloud`.`storage_pool_tags` ON (((`storage_pool_tags`.`pool_id` = `storage_pool`.`id`)))) - LEFT JOIN `cloud`.`op_host_capacity` ON (((`storage_pool`.`id` = `op_host_capacity`.`host_id`) - AND (`op_host_capacity`.`capacity_type` IN (3 , 9))))) - LEFT JOIN `cloud`.`async_job` ON (((`async_job`.`instance_id` = `storage_pool`.`id`) - AND (`async_job`.`instance_type` = 'StoragePool') - AND (`async_job`.`job_status` = 0)))); + `cloud`.`storage_pool` + LEFT JOIN `cloud`.`cluster` ON `storage_pool`.`cluster_id` = `cluster`.`id` + LEFT JOIN `cloud`.`data_center` ON `storage_pool`.`data_center_id` = `data_center`.`id` + LEFT JOIN `cloud`.`host_pod_ref` ON `storage_pool`.`pod_id` = `host_pod_ref`.`id` + LEFT JOIN `cloud`.`storage_pool_tags` ON `storage_pool_tags`.`pool_id` = `storage_pool`.`id` + LEFT JOIN `cloud`.`storage_pool_and_access_group_map` ON `storage_pool_and_access_group_map`.`pool_id` = `storage_pool`.`id` + LEFT JOIN `cloud`.`op_host_capacity` + ON `storage_pool`.`id` = `op_host_capacity`.`host_id` + AND `op_host_capacity`.`capacity_type` IN (3, 9) + LEFT JOIN `cloud`.`async_job` + ON `async_job`.`instance_id` = `storage_pool`.`id` + AND `async_job`.`instance_type` = 'StoragePool' + AND `async_job`.`job_status` = 0; diff --git a/engine/storage/datamotion/src/main/java/org/apache/cloudstack/storage/motion/StorageSystemDataMotionStrategy.java b/engine/storage/datamotion/src/main/java/org/apache/cloudstack/storage/motion/StorageSystemDataMotionStrategy.java index 2e13080494f..0a211ab1934 100644 --- a/engine/storage/datamotion/src/main/java/org/apache/cloudstack/storage/motion/StorageSystemDataMotionStrategy.java +++ b/engine/storage/datamotion/src/main/java/org/apache/cloudstack/storage/motion/StorageSystemDataMotionStrategy.java @@ -35,6 +35,7 @@ import javax.inject.Inject; import com.cloud.agent.api.CheckVirtualMachineAnswer; import com.cloud.agent.api.CheckVirtualMachineCommand; import com.cloud.agent.api.PrepareForMigrationAnswer; +import com.cloud.resource.ResourceManager; import org.apache.cloudstack.engine.subsystem.api.storage.ChapInfo; import org.apache.cloudstack.engine.subsystem.api.storage.ClusterScope; import org.apache.cloudstack.engine.subsystem.api.storage.CopyCommandResult; @@ -51,6 +52,7 @@ import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreState import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStore; import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreStateMachine.Event; import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreDriver; +import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreInfo; import org.apache.cloudstack.engine.subsystem.api.storage.Scope; import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotInfo; import org.apache.cloudstack.engine.subsystem.api.storage.StorageAction; @@ -199,6 +201,8 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy { VMTemplatePoolDao templatePoolDao; @Inject private VolumeDataFactory _volFactory; + @Inject + ResourceManager resourceManager; @Override public StrategyPriority canHandle(DataObject srcData, DataObject destData) { @@ -485,10 +489,10 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy { HostVO hostVO; if (srcStoragePoolVO.getClusterId() != null) { - hostVO = getHostInCluster(srcStoragePoolVO.getClusterId()); + hostVO = getHostInCluster(srcStoragePoolVO); } else { - hostVO = getHost(srcVolumeInfo.getDataCenterId(), hypervisorType, false); + hostVO = getHost(srcVolumeInfo, hypervisorType, false); } volumePath = copyManagedVolumeToSecondaryStorage(srcVolumeInfo, destVolumeInfo, hostVO, @@ -556,10 +560,10 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy { HostVO hostVO; if (destStoragePoolVO.getClusterId() != null) { - hostVO = getHostInCluster(destStoragePoolVO.getClusterId()); + hostVO = getHostInCluster(destStoragePoolVO); } else { - hostVO = getHost(destVolumeInfo.getDataCenterId(), hypervisorType, false); + hostVO = getHost(destVolumeInfo, hypervisorType, false); } setCertainVolumeValuesNull(destVolumeInfo.getId()); @@ -933,9 +937,9 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy { hostVO = _hostDao.findById(destVolumeInfo.getDataStore().getScope().getScopeId()); } else { if (srcStoragePoolVO.getClusterId() != null) { - hostVO = getHostInCluster(srcStoragePoolVO.getClusterId()); + hostVO = getHostInCluster(srcStoragePoolVO); } else { - hostVO = getHost(destVolumeInfo.getDataCenterId(), HypervisorType.KVM, false); + hostVO = getHost(destVolumeInfo, HypervisorType.KVM, false); } } @@ -1337,7 +1341,7 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy { createVolumeFromSnapshot(snapshotInfo); - HostVO hostVO = getHost(snapshotInfo.getDataCenterId(), HypervisorType.XenServer, true); + HostVO hostVO = getHost(snapshotInfo, HypervisorType.XenServer, true); copyCmdAnswer = performResignature(snapshotInfo, hostVO, null, true); @@ -1349,7 +1353,7 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy { CopyCommand copyCommand = new CopyCommand(snapshotInfo.getTO(), volumeInfo.getTO(), primaryStorageDownloadWait, VirtualMachineManager.ExecuteInSequence.value()); - HostVO hostVO = getHostInCluster(volumeStoragePoolVO.getClusterId()); + HostVO hostVO = getHostInCluster(volumeStoragePoolVO); if (!usingBackendSnapshot) { long snapshotStoragePoolId = snapshotInfo.getDataStore().getId(); @@ -1379,7 +1383,7 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy { } finally { try { - HostVO hostVO = getHostInCluster(volumeStoragePoolVO.getClusterId()); + HostVO hostVO = getHostInCluster(volumeStoragePoolVO); long snapshotStoragePoolId = snapshotInfo.getDataStore().getId(); DataStore snapshotDataStore = dataStoreMgr.getDataStore(snapshotStoragePoolId, DataStoreRole.Primary); @@ -1473,7 +1477,7 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy { handleQualityOfServiceForVolumeMigration(volumeInfo, PrimaryDataStoreDriver.QualityOfServiceState.MIGRATION); - hostVO = getHost(snapshotInfo.getDataCenterId(), snapshotInfo.getHypervisorType(), false); + hostVO = getHost(snapshotInfo, snapshotInfo.getHypervisorType(), false); // copy the volume from secondary via the hypervisor if (HypervisorType.XenServer.equals(snapshotInfo.getHypervisorType())) { @@ -1554,7 +1558,7 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy { // only XenServer, VMware, and KVM are currently supported // Leave host equal to null for KVM since we don't need to perform a resignature when using that hypervisor type. if (volumeInfo.getFormat() == ImageFormat.VHD) { - hostVO = getHost(volumeInfo.getDataCenterId(), HypervisorType.XenServer, true); + hostVO = getHost(volumeInfo, HypervisorType.XenServer, true); if (hostVO == null) { throw new CloudRuntimeException("Unable to locate a host capable of resigning in the zone with the following ID: " + @@ -1574,7 +1578,7 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy { } else if (volumeInfo.getFormat() == ImageFormat.OVA) { // all VMware hosts support resigning - hostVO = getHost(volumeInfo.getDataCenterId(), HypervisorType.VMware, false); + hostVO = getHost(volumeInfo, HypervisorType.VMware, false); if (hostVO == null) { throw new CloudRuntimeException("Unable to locate a host capable of resigning in the zone with the following ID: " + @@ -1757,7 +1761,7 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy { } else { // asking for a XenServer host here so we don't always prefer to use XenServer hosts that support resigning // even when we don't need those hosts to do this kind of copy work - hostVO = getHost(snapshotInfo.getDataCenterId(), snapshotInfo.getHypervisorType(), false); + hostVO = getHost(snapshotInfo, snapshotInfo.getHypervisorType(), false); handleQualityOfServiceForVolumeMigration(volumeInfo, PrimaryDataStoreDriver.QualityOfServiceState.MIGRATION); @@ -1814,7 +1818,7 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy { destVolumeInfo = _volumeDataFactory.getVolume(destVolumeInfo.getId(), destVolumeInfo.getDataStore()); - HostVO hostVO = getHost(dataCenterId, hypervisorType, false); + HostVO hostVO = getHost(destVolumeInfo, hypervisorType, false); handleQualityOfServiceForVolumeMigration(destVolumeInfo, PrimaryDataStoreDriver.QualityOfServiceState.MIGRATION); @@ -2606,7 +2610,7 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy { volumeInfo.processEvent(Event.MigrationRequested); - HostVO hostVO = getHost(volumeInfo.getDataCenterId(), HypervisorType.KVM, false); + HostVO hostVO = getHost(volumeInfo, HypervisorType.KVM, false); DataStore srcDataStore = volumeInfo.getDataStore(); int primaryStorageDownloadWait = StorageManager.PRIMARY_STORAGE_DOWNLOAD_WAIT.value(); @@ -2764,10 +2768,10 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy { HypervisorType hypervisorType = snapshotInfo.getHypervisorType(); if (HypervisorType.XenServer.equals(hypervisorType)) { - HostVO hostVO = getHost(snapshotInfo.getDataCenterId(), hypervisorType, true); + HostVO hostVO = getHost(snapshotInfo, hypervisorType, true); if (hostVO == null) { - hostVO = getHost(snapshotInfo.getDataCenterId(), hypervisorType, false); + hostVO = getHost(snapshotInfo, hypervisorType, false); if (hostVO == null) { throw new CloudRuntimeException("Unable to locate an applicable host in data center with ID = " + snapshotInfo.getDataCenterId()); @@ -2778,14 +2782,15 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy { } if (HypervisorType.VMware.equals(hypervisorType) || HypervisorType.KVM.equals(hypervisorType)) { - return getHost(snapshotInfo.getDataCenterId(), hypervisorType, false); + return getHost(snapshotInfo, hypervisorType, false); } throw new CloudRuntimeException("Unsupported hypervisor type"); } - private HostVO getHostInCluster(long clusterId) { - List hosts = _hostDao.findByClusterId(clusterId); + private HostVO getHostInCluster(StoragePoolVO storagePool) { + DataStore store = dataStoreMgr.getDataStore(storagePool.getId(), DataStoreRole.Primary); + List hosts = resourceManager.getEligibleUpAndEnabledHostsInClusterForStorageConnection((PrimaryDataStoreInfo) store); if (hosts != null && hosts.size() > 0) { Collections.shuffle(hosts, RANDOM); @@ -2800,12 +2805,37 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy { throw new CloudRuntimeException("Unable to locate a host"); } - private HostVO getHost(Long zoneId, HypervisorType hypervisorType, boolean computeClusterMustSupportResign) { + private HostVO getHost(SnapshotInfo snapshotInfo, HypervisorType hypervisorType, boolean computeClusterMustSupportResign) { + Long zoneId = snapshotInfo.getDataCenterId(); Preconditions.checkArgument(zoneId != null, "Zone ID cannot be null."); Preconditions.checkArgument(hypervisorType != null, "Hypervisor type cannot be null."); - List hosts = _hostDao.listByDataCenterIdAndHypervisorType(zoneId, hypervisorType); + List hosts; + if (DataStoreRole.Primary.equals(snapshotInfo.getDataStore().getRole())) { + hosts = resourceManager.getEligibleUpAndEnabledHostsInZoneForStorageConnection(snapshotInfo.getDataStore(), zoneId, hypervisorType); + } else { + hosts = _hostDao.listByDataCenterIdAndHypervisorType(zoneId, hypervisorType); + } + return getHost(hosts, computeClusterMustSupportResign); + } + + private HostVO getHost(VolumeInfo volumeInfo, HypervisorType hypervisorType, boolean computeClusterMustSupportResign) { + Long zoneId = volumeInfo.getDataCenterId(); + Preconditions.checkArgument(zoneId != null, "Zone ID cannot be null."); + Preconditions.checkArgument(hypervisorType != null, "Hypervisor type cannot be null."); + + List hosts; + if (DataStoreRole.Primary.equals(volumeInfo.getDataStore().getRole())) { + hosts = resourceManager.getEligibleUpAndEnabledHostsInZoneForStorageConnection(volumeInfo.getDataStore(), zoneId, hypervisorType); + } else { + hosts = _hostDao.listByDataCenterIdAndHypervisorType(zoneId, hypervisorType); + } + + return getHost(hosts, computeClusterMustSupportResign); + } + + private HostVO getHost(List hosts, boolean computeClusterMustSupportResign) { if (hosts == null) { return null; } diff --git a/engine/storage/src/main/java/org/apache/cloudstack/storage/allocator/AbstractStoragePoolAllocator.java b/engine/storage/src/main/java/org/apache/cloudstack/storage/allocator/AbstractStoragePoolAllocator.java index cde635b8049..4b259760915 100644 --- a/engine/storage/src/main/java/org/apache/cloudstack/storage/allocator/AbstractStoragePoolAllocator.java +++ b/engine/storage/src/main/java/org/apache/cloudstack/storage/allocator/AbstractStoragePoolAllocator.java @@ -17,41 +17,45 @@ package org.apache.cloudstack.storage.allocator; import com.cloud.api.query.dao.StoragePoolJoinDao; +import com.cloud.dc.dao.HostPodDao; +import com.cloud.exception.StorageUnavailableException; +import com.cloud.host.HostVO; +import com.cloud.host.dao.HostDao; +import com.cloud.storage.ScopeType; +import com.cloud.storage.StoragePoolStatus; +import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailVO; +import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailsDao; +import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; +import org.apache.commons.lang3.StringUtils; +import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; +import org.apache.commons.collections.CollectionUtils; + +import com.cloud.utils.Pair; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager; +import org.apache.cloudstack.engine.subsystem.api.storage.StoragePoolAllocator; +import org.apache.cloudstack.framework.config.dao.ConfigurationDao; +import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; + import com.cloud.capacity.Capacity; import com.cloud.capacity.dao.CapacityDao; import com.cloud.dc.ClusterVO; import com.cloud.dc.dao.ClusterDao; import com.cloud.deploy.DeploymentPlan; import com.cloud.deploy.DeploymentPlanner.ExcludeList; -import com.cloud.exception.StorageUnavailableException; import com.cloud.hypervisor.Hypervisor.HypervisorType; -import com.cloud.storage.ScopeType; import com.cloud.storage.Storage; import com.cloud.storage.StorageManager; import com.cloud.storage.StoragePool; -import com.cloud.storage.StoragePoolStatus; import com.cloud.storage.StorageUtil; import com.cloud.storage.Volume; import com.cloud.storage.dao.VolumeDao; import com.cloud.user.Account; import com.cloud.utils.NumbersUtil; -import com.cloud.utils.Pair; -import com.cloud.utils.StringUtils; import com.cloud.utils.component.AdapterBase; import com.cloud.vm.DiskProfile; import com.cloud.vm.VirtualMachineProfile; import org.apache.cloudstack.engine.orchestration.service.VolumeOrchestrationService; -import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager; -import org.apache.cloudstack.engine.subsystem.api.storage.StoragePoolAllocator; -import org.apache.cloudstack.framework.config.dao.ConfigurationDao; -import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; -import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailVO; -import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailsDao; -import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; -import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; - -import org.apache.commons.collections.CollectionUtils; import javax.inject.Inject; import javax.naming.ConfigurationException; @@ -77,11 +81,15 @@ public abstract class AbstractStoragePoolAllocator extends AdapterBase implement @Inject protected PrimaryDataStoreDao storagePoolDao; @Inject protected VolumeDao volumeDao; @Inject protected ConfigurationDao configDao; + @Inject protected ClusterDao clusterDao; @Inject protected CapacityDao capacityDao; - @Inject private ClusterDao clusterDao; @Inject private StorageManager storageMgr; @Inject private StorageUtil storageUtil; @Inject private StoragePoolDetailsDao storagePoolDetailsDao; + @Inject + protected HostDao hostDao; + @Inject + protected HostPodDao podDao; /** * make sure shuffled lists of Pools are really shuffled @@ -320,6 +328,16 @@ public abstract class AbstractStoragePoolAllocator extends AdapterBase implement return false; } + if (plan.getHostId() != null) { + HostVO plannedHost = hostDao.findById(plan.getHostId()); + if (!storageMgr.checkIfHostAndStoragePoolHasCommonStorageAccessGroups(plannedHost, pool)) { + if (logger.isDebugEnabled()) { + logger.debug(String.format("StoragePool %s and host %s does not have matching storage access groups", pool, plannedHost)); + } + return false; + } + } + Volume volume = null; boolean isTempVolume = dskCh.getVolumeId() == Volume.DISK_OFFERING_SUITABILITY_CHECK_VOLUME_ID; if (!isTempVolume) { diff --git a/engine/storage/src/main/java/org/apache/cloudstack/storage/allocator/ClusterScopeStoragePoolAllocator.java b/engine/storage/src/main/java/org/apache/cloudstack/storage/allocator/ClusterScopeStoragePoolAllocator.java index a80e003a139..25e4608e58f 100644 --- a/engine/storage/src/main/java/org/apache/cloudstack/storage/allocator/ClusterScopeStoragePoolAllocator.java +++ b/engine/storage/src/main/java/org/apache/cloudstack/storage/allocator/ClusterScopeStoragePoolAllocator.java @@ -77,12 +77,12 @@ public class ClusterScopeStoragePoolAllocator extends AbstractStoragePoolAllocat logDisabledStoragePools(dcId, podId, clusterId, ScopeType.CLUSTER); } - List pools = storagePoolDao.findPoolsByTags(dcId, podId, clusterId, dskCh.getTags(), true, VolumeApiServiceImpl.storageTagRuleExecutionTimeout.value()); + List pools = storagePoolDao.findPoolsByTags(dcId, podId, clusterId, ScopeType.CLUSTER, dskCh.getTags(), true, VolumeApiServiceImpl.storageTagRuleExecutionTimeout.value()); pools.addAll(storagePoolJoinDao.findStoragePoolByScopeAndRuleTags(dcId, podId, clusterId, ScopeType.CLUSTER, List.of(dskCh.getTags()))); logger.debug(String.format("Found pools [%s] that match with tags [%s].", pools, Arrays.toString(dskCh.getTags()))); // add remaining pools in cluster, that did not match tags, to avoid set - List allPools = storagePoolDao.findPoolsByTags(dcId, podId, clusterId, null, false, 0); + List allPools = storagePoolDao.findPoolsByTags(dcId, podId, clusterId, ScopeType.CLUSTER, null, false, 0); allPools.removeAll(pools); for (StoragePoolVO pool : allPools) { logger.trace(String.format("Adding pool [%s] to the 'avoid' set since it did not match any tags.", pool)); @@ -100,7 +100,7 @@ public class ClusterScopeStoragePoolAllocator extends AbstractStoragePoolAllocat } StoragePool storagePool = (StoragePool)dataStoreMgr.getPrimaryDataStore(pool.getId()); if (filter(avoid, storagePool, dskCh, plan)) { - logger.debug(String.format("Found suitable local storage pool [%s] to allocate disk [%s] to it, adding to list.", pool, dskCh)); + logger.debug(String.format("Found suitable cluster storage pool [%s] to allocate disk [%s] to it, adding to list.", pool, dskCh)); suitablePools.add(storagePool); } else { logger.debug(String.format("Adding storage pool [%s] to avoid set during allocation of disk [%s].", pool, dskCh)); diff --git a/engine/storage/src/main/java/org/apache/cloudstack/storage/allocator/ZoneWideStoragePoolAllocator.java b/engine/storage/src/main/java/org/apache/cloudstack/storage/allocator/ZoneWideStoragePoolAllocator.java index f6712ce46b1..13b5f8e4814 100644 --- a/engine/storage/src/main/java/org/apache/cloudstack/storage/allocator/ZoneWideStoragePoolAllocator.java +++ b/engine/storage/src/main/java/org/apache/cloudstack/storage/allocator/ZoneWideStoragePoolAllocator.java @@ -96,7 +96,7 @@ public class ZoneWideStoragePoolAllocator extends AbstractStoragePoolAllocator { } StoragePool storagePool = (StoragePool)this.dataStoreMgr.getPrimaryDataStore(storage.getId()); if (filter(avoid, storagePool, dskCh, plan)) { - logger.debug(String.format("Found suitable local storage pool [%s] to allocate disk [%s] to it, adding to list.", storagePool, dskCh)); + logger.debug(String.format("Found suitable zone wide storage pool [%s] to allocate disk [%s] to it, adding to list.", storagePool, dskCh)); suitablePools.add(storagePool); } else { if (canAddStoragePoolToAvoidSet(storage)) { diff --git a/engine/storage/src/main/java/org/apache/cloudstack/storage/volume/datastore/PrimaryDataStoreHelper.java b/engine/storage/src/main/java/org/apache/cloudstack/storage/volume/datastore/PrimaryDataStoreHelper.java index 66adce76172..5e9891ef989 100644 --- a/engine/storage/src/main/java/org/apache/cloudstack/storage/volume/datastore/PrimaryDataStoreHelper.java +++ b/engine/storage/src/main/java/org/apache/cloudstack/storage/volume/datastore/PrimaryDataStoreHelper.java @@ -159,7 +159,23 @@ public class PrimaryDataStoreHelper { } } - dataStoreVO = dataStoreDao.persist(dataStoreVO, details, storageTags, params.isTagARule(), displayDetails); + String storageAccessGroupsParams = params.getStorageAccessGroups(); + List storageAccessGroupsList = new ArrayList(); + + if (storageAccessGroupsParams != null) { + String[] storageAccessGroups = storageAccessGroupsParams.split(","); + + for (String storageAccessGroup : storageAccessGroups) { + storageAccessGroup = storageAccessGroup.trim(); + if (storageAccessGroup.length() == 0) { + continue; + } + storageAccessGroupsList.add(storageAccessGroup); + } + } + + dataStoreVO = dataStoreDao.persist(dataStoreVO, details, storageTags, params.isTagARule(), displayDetails, storageAccessGroupsList); + return dataStoreMgr.getDataStore(dataStoreVO.getId(), DataStoreRole.Primary); } @@ -278,6 +294,7 @@ public class PrimaryDataStoreHelper { this.dataStoreDao.update(poolVO.getId(), poolVO); dataStoreDao.remove(poolVO.getId()); dataStoreDao.deletePoolTags(poolVO.getId()); + dataStoreDao.deleteStoragePoolAccessGroups(poolVO.getId()); annotationDao.removeByEntityType(AnnotationService.EntityType.PRIMARY_STORAGE.name(), poolVO.getUuid()); deletePoolStats(poolVO.getId()); // Delete op_host_capacity entries diff --git a/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/datastore/provider/DefaultHostListener.java b/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/datastore/provider/DefaultHostListener.java index 331b1f3ce5b..7de9000782e 100644 --- a/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/datastore/provider/DefaultHostListener.java +++ b/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/datastore/provider/DefaultHostListener.java @@ -21,6 +21,7 @@ package org.apache.cloudstack.storage.datastore.provider; import com.cloud.agent.AgentManager; import com.cloud.agent.api.Answer; import com.cloud.agent.api.CleanupPersistentNetworkResourceCommand; +import com.cloud.agent.api.DeleteStoragePoolCommand; import com.cloud.agent.api.ModifyStoragePoolAnswer; import com.cloud.agent.api.ModifyStoragePoolCommand; import com.cloud.agent.api.SetupPersistentNetworkCommand; @@ -45,6 +46,7 @@ import com.cloud.storage.StorageService; import com.cloud.storage.dao.StoragePoolHostDao; import com.cloud.utils.exception.CloudRuntimeException; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager; import org.apache.cloudstack.engine.subsystem.api.storage.HypervisorHostListener; import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; @@ -207,8 +209,41 @@ public class DefaultHostListener implements HypervisorHostListener { @Override public boolean hostDisconnected(long hostId, long poolId) { - // TODO Auto-generated method stub - return false; + HostVO host = hostDao.findById(hostId); + if (host == null) { + logger.error("Failed to disconnect host by HostListener as host was not found with id : " + hostId); + return false; + } + + DataStore dataStore = dataStoreMgr.getDataStore(poolId, DataStoreRole.Primary); + StoragePool storagePool = (StoragePool) dataStore; + DeleteStoragePoolCommand cmd = new DeleteStoragePoolCommand(storagePool); + Answer answer = sendDeleteStoragePoolCommand(cmd, storagePool, host); + if (!answer.getResult()) { + logger.error("Failed to disconnect storage pool: " + storagePool + " and host: " + host); + return false; + } + + StoragePoolHostVO storagePoolHost = storagePoolHostDao.findByPoolHost(poolId, hostId); + if (storagePoolHost != null) { + storagePoolHostDao.deleteStoragePoolHostDetails(hostId, poolId); + } + logger.info("Connection removed between storage pool: " + storagePool + " and host: " + host); + return true; + } + + private Answer sendDeleteStoragePoolCommand(DeleteStoragePoolCommand cmd, StoragePool storagePool, HostVO host) { + Answer answer = agentMgr.easySend(host.getId(), cmd); + if (answer == null) { + throw new CloudRuntimeException(String.format("Unable to get an answer to the delete storage pool command for storage pool %s, sent to host %s", storagePool, host)); + } + + if (!answer.getResult()) { + String msg = "Unable to detach storage pool " + storagePool + " from the host " + host; + alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_HOST, storagePool.getDataCenterId(), storagePool.getPodId(), msg, msg); + } + + return answer; } @Override diff --git a/plugins/network-elements/juniper-contrail/src/test/java/org/apache/cloudstack/network/contrail/management/ManagementServerMock.java b/plugins/network-elements/juniper-contrail/src/test/java/org/apache/cloudstack/network/contrail/management/ManagementServerMock.java index c630f0bf6b9..15f546db0f0 100644 --- a/plugins/network-elements/juniper-contrail/src/test/java/org/apache/cloudstack/network/contrail/management/ManagementServerMock.java +++ b/plugins/network-elements/juniper-contrail/src/test/java/org/apache/cloudstack/network/contrail/management/ManagementServerMock.java @@ -373,7 +373,7 @@ public class ManagementServerMock { ConfigurationManager mgr = (ConfigurationManager)_configService; _zone = mgr.createZone(User.UID_SYSTEM, "default", "8.8.8.8", null, "8.8.4.4", null, null /* cidr */, "ROOT", Domain.ROOT_DOMAIN, NetworkType.Advanced, null, - null /* networkDomain */, false, false, null, null, false); + null /* networkDomain */, false, false, null, null, false, null); } } diff --git a/plugins/storage-allocators/random/src/main/java/org/apache/cloudstack/storage/allocator/RandomStoragePoolAllocator.java b/plugins/storage-allocators/random/src/main/java/org/apache/cloudstack/storage/allocator/RandomStoragePoolAllocator.java index dd8f2e78b73..831e5d2a260 100644 --- a/plugins/storage-allocators/random/src/main/java/org/apache/cloudstack/storage/allocator/RandomStoragePoolAllocator.java +++ b/plugins/storage-allocators/random/src/main/java/org/apache/cloudstack/storage/allocator/RandomStoragePoolAllocator.java @@ -66,7 +66,7 @@ public class RandomStoragePoolAllocator extends AbstractStoragePoolAllocator { StoragePool pol = (StoragePool)this.dataStoreMgr.getPrimaryDataStore(pool.getId()); if (filter(avoid, pol, dskCh, plan)) { - logger.trace(String.format("Found suitable local storage pool [%s], adding to list.", pool)); + logger.trace(String.format("Found suitable storage pool [%s], adding to list.", pool)); suitablePools.add(pol); } } diff --git a/plugins/storage/volume/cloudbyte/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/ElastistorPrimaryDataStoreLifeCycle.java b/plugins/storage/volume/cloudbyte/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/ElastistorPrimaryDataStoreLifeCycle.java index 3ad08428e9d..7f3a36d6538 100644 --- a/plugins/storage/volume/cloudbyte/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/ElastistorPrimaryDataStoreLifeCycle.java +++ b/plugins/storage/volume/cloudbyte/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/ElastistorPrimaryDataStoreLifeCycle.java @@ -26,6 +26,8 @@ import java.util.StringTokenizer; import javax.inject.Inject; +import com.cloud.host.Host; +import com.cloud.storage.dao.StoragePoolAndAccessGroupMapDao; import org.apache.cloudstack.engine.subsystem.api.storage.ClusterScope; import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; import org.apache.cloudstack.engine.subsystem.api.storage.HostScope; @@ -50,7 +52,6 @@ import com.cloud.agent.api.DeleteStoragePoolCommand; import com.cloud.agent.api.StoragePoolInfo; import com.cloud.capacity.CapacityManager; import com.cloud.dc.dao.DataCenterDao; -import com.cloud.host.Host; import com.cloud.host.HostVO; import com.cloud.host.dao.HostDao; import com.cloud.hypervisor.Hypervisor.HypervisorType; @@ -90,6 +91,8 @@ public class ElastistorPrimaryDataStoreLifeCycle extends BasePrimaryDataStoreLif DataCenterDao _zoneDao; @Inject CapacityManager _capacityMgr; + @Inject + private StoragePoolAndAccessGroupMapDao storagePoolAndAccessGroupMapDao; @Override public DataStore initialize(Map dsInfos) { @@ -356,17 +359,13 @@ public class ElastistorPrimaryDataStoreLifeCycle extends BasePrimaryDataStoreLif StoragePoolVO dataStoreVO = _storagePoolDao.findById(store.getId()); PrimaryDataStoreInfo primarystore = (PrimaryDataStoreInfo) store; - // Check if there is host up in this cluster - List allHosts = _resourceMgr.listAllUpHosts(Host.Type.Routing, primarystore.getClusterId(), primarystore.getPodId(), primarystore.getDataCenterId()); - if (allHosts.isEmpty()) { - primaryDataStoreDao.expunge(primarystore.getId()); - throw new CloudRuntimeException("No host up to associate a storage pool with in cluster " + primarystore.getClusterId()); - } + List hostsToConnect = _resourceMgr.getEligibleUpHostsInClusterForStorageConnection(primarystore); + logger.debug(String.format("Attaching the pool to each of the hosts %s in the cluster: %s", hostsToConnect, primarystore.getClusterId())); if (!dataStoreVO.isManaged()) { boolean success = false; - for (HostVO host : allHosts) { - success = createStoragePool(host, primarystore); + for (HostVO h : hostsToConnect) { + success = createStoragePool(h, primarystore); if (success) { break; } @@ -375,7 +374,7 @@ public class ElastistorPrimaryDataStoreLifeCycle extends BasePrimaryDataStoreLif logger.debug("In createPool Adding the pool to each of the hosts"); List poolHosts = new ArrayList(); - for (HostVO h : allHosts) { + for (HostVO h : hostsToConnect) { try { storageMgr.connectHostToSharedPool(h, primarystore.getId()); poolHosts.add(h); @@ -428,10 +427,11 @@ public class ElastistorPrimaryDataStoreLifeCycle extends BasePrimaryDataStoreLif @Override public boolean attachZone(DataStore dataStore, ZoneScope scope, HypervisorType hypervisorType) { - List hosts = _resourceMgr.listAllUpAndEnabledHostsInOneZoneByHypervisor(hypervisorType, scope.getScopeId()); - logger.debug("In createPool. Attaching the pool to each of the hosts."); + List hostsToConnect = _resourceMgr.getEligibleUpAndEnabledHostsInZoneForStorageConnection(dataStore, scope.getScopeId(), hypervisorType); + + logger.debug(String.format("In createPool. Attaching the pool to each of the hosts in %s.", hostsToConnect)); List poolHosts = new ArrayList(); - for (HostVO host : hosts) { + for (HostVO host : hostsToConnect) { try { storageMgr.connectHostToSharedPool(host, dataStore.getId()); poolHosts.add(host); diff --git a/plugins/storage/volume/datera/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/DateraPrimaryDataStoreLifeCycle.java b/plugins/storage/volume/datera/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/DateraPrimaryDataStoreLifeCycle.java index 04ea3141423..537243dd321 100644 --- a/plugins/storage/volume/datera/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/DateraPrimaryDataStoreLifeCycle.java +++ b/plugins/storage/volume/datera/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/DateraPrimaryDataStoreLifeCycle.java @@ -25,7 +25,6 @@ import com.cloud.dc.ClusterVO; import com.cloud.dc.DataCenterVO; import com.cloud.dc.dao.ClusterDao; import com.cloud.dc.dao.DataCenterDao; -import com.cloud.host.Host; import com.cloud.host.HostVO; import com.cloud.host.dao.HostDao; import com.cloud.hypervisor.Hypervisor.HypervisorType; @@ -38,8 +37,10 @@ import com.cloud.storage.StoragePoolAutomation; import com.cloud.storage.dao.SnapshotDao; import com.cloud.storage.dao.SnapshotDetailsDao; import com.cloud.storage.dao.SnapshotDetailsVO; +import com.cloud.storage.dao.StoragePoolAndAccessGroupMapDao; import com.cloud.storage.dao.StoragePoolHostDao; import com.cloud.utils.exception.CloudRuntimeException; +import org.apache.cloudstack.api.ApiConstants; import org.apache.cloudstack.engine.subsystem.api.storage.ClusterScope; import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; import org.apache.cloudstack.engine.subsystem.api.storage.HostScope; @@ -84,6 +85,8 @@ public class DateraPrimaryDataStoreLifeCycle extends BasePrimaryDataStoreLifeCyc private StoragePoolHostDao _storagePoolHostDao; @Inject private StoragePoolAutomation storagePoolAutomation; + @Inject + private StoragePoolAndAccessGroupMapDao storagePoolAndAccessGroupMapDao; @Override public DataStore initialize(Map dsInfos) { @@ -97,6 +100,7 @@ public class DateraPrimaryDataStoreLifeCycle extends BasePrimaryDataStoreLifeCyc Long capacityBytes = (Long) dsInfos.get("capacityBytes"); Long capacityIops = (Long) dsInfos.get("capacityIops"); String tags = (String) dsInfos.get("tags"); + String storageAccessGroups = (String)dsInfos.get(ApiConstants.STORAGE_ACCESS_GROUPS); boolean isTagARule = (Boolean)dsInfos.get("isTagARule"); @SuppressWarnings("unchecked") Map details = (Map) dsInfos.get("details"); @@ -179,6 +183,7 @@ public class DateraPrimaryDataStoreLifeCycle extends BasePrimaryDataStoreLifeCyc parameters.setCapacityIops(capacityIops); parameters.setHypervisorType(HypervisorType.Any); parameters.setTags(tags); + parameters.setStorageAccessGroups(storageAccessGroups); parameters.setIsTagARule(isTagARule); parameters.setDetails(details); @@ -243,22 +248,13 @@ public class DateraPrimaryDataStoreLifeCycle extends BasePrimaryDataStoreLifeCyc @Override public boolean attachCluster(DataStore datastore, ClusterScope scope) { PrimaryDataStoreInfo primaryDataStoreInfo = (PrimaryDataStoreInfo) datastore; + List hostsToConnect = _resourceMgr.getEligibleUpHostsInClusterForStorageConnection(primaryDataStoreInfo); - // check if there is at least one host up in this cluster - List allHosts = _resourceMgr.listAllUpAndEnabledHosts(Host.Type.Routing, - primaryDataStoreInfo.getClusterId(), primaryDataStoreInfo.getPodId(), - primaryDataStoreInfo.getDataCenterId()); - - if (allHosts.isEmpty()) { - storagePoolDao.expunge(primaryDataStoreInfo.getId()); - - throw new CloudRuntimeException( - "No host up to associate a storage pool with in cluster " + primaryDataStoreInfo.getClusterId()); - } + logger.debug(String.format("Attaching the pool to each of the hosts %s in the cluster: %s", hostsToConnect, primaryDataStoreInfo.getClusterId())); List poolHosts = new ArrayList(); - for (HostVO host : allHosts) { + for (HostVO host : hostsToConnect) { try { _storageMgr.connectHostToSharedPool(host, primaryDataStoreInfo.getId()); @@ -288,19 +284,15 @@ public class DateraPrimaryDataStoreLifeCycle extends BasePrimaryDataStoreLifeCyc public boolean attachZone(DataStore dataStore, ZoneScope scope, HypervisorType hypervisorType) { dataStoreHelper.attachZone(dataStore); - List xenServerHosts = _resourceMgr - .listAllUpAndEnabledHostsInOneZoneByHypervisor(HypervisorType.XenServer, scope.getScopeId()); - List vmWareServerHosts = _resourceMgr - .listAllUpAndEnabledHostsInOneZoneByHypervisor(HypervisorType.VMware, scope.getScopeId()); - List kvmHosts = _resourceMgr.listAllUpAndEnabledHostsInOneZoneByHypervisor(HypervisorType.KVM, - scope.getScopeId()); - List hosts = new ArrayList(); + List hostsToConnect = new ArrayList<>(); + HypervisorType[] hypervisorTypes = {HypervisorType.XenServer, HypervisorType.VMware, HypervisorType.KVM}; - hosts.addAll(xenServerHosts); - hosts.addAll(vmWareServerHosts); - hosts.addAll(kvmHosts); + for (HypervisorType type : hypervisorTypes) { + hostsToConnect.addAll(_resourceMgr.getEligibleUpAndEnabledHostsInZoneForStorageConnection(dataStore, scope.getScopeId(), type)); + } - for (HostVO host : hosts) { + logger.debug(String.format("In createPool. Attaching the pool to each of the hosts in %s.", hostsToConnect)); + for (HostVO host : hostsToConnect) { try { _storageMgr.connectHostToSharedPool(host, dataStore.getId()); } catch (Exception e) { diff --git a/plugins/storage/volume/default/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/CloudStackPrimaryDataStoreLifeCycleImpl.java b/plugins/storage/volume/default/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/CloudStackPrimaryDataStoreLifeCycleImpl.java index 351d59f6b03..fbcc51e2e8c 100644 --- a/plugins/storage/volume/default/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/CloudStackPrimaryDataStoreLifeCycleImpl.java +++ b/plugins/storage/volume/default/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/CloudStackPrimaryDataStoreLifeCycleImpl.java @@ -18,7 +18,6 @@ */ package org.apache.cloudstack.storage.datastore.lifecycle; -import java.util.Collections; import java.util.List; import java.util.Map; import java.util.UUID; @@ -26,6 +25,7 @@ import java.util.stream.Collectors; import javax.inject.Inject; +import org.apache.cloudstack.api.ApiConstants; import org.apache.cloudstack.engine.subsystem.api.storage.ClusterScope; import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager; @@ -51,7 +51,6 @@ import com.cloud.dc.dao.HostPodDao; import com.cloud.exception.InvalidParameterValueException; import com.cloud.exception.StorageConflictException; import com.cloud.exception.StorageUnavailableException; -import com.cloud.host.Host; import com.cloud.host.HostVO; import com.cloud.host.dao.HostDao; import com.cloud.hypervisor.Hypervisor.HypervisorType; @@ -63,6 +62,7 @@ import com.cloud.storage.StorageManager; import com.cloud.storage.StoragePool; import com.cloud.storage.StoragePoolAutomation; import com.cloud.storage.StoragePoolHostVO; +import com.cloud.storage.dao.StoragePoolAndAccessGroupMapDao; import com.cloud.storage.dao.StoragePoolHostDao; import com.cloud.storage.dao.StoragePoolWorkDao; import com.cloud.storage.dao.VolumeDao; @@ -129,6 +129,8 @@ public class CloudStackPrimaryDataStoreLifeCycleImpl extends BasePrimaryDataStor StoragePoolAutomation storagePoolAutmation; @Inject protected HostDao _hostDao; + @Inject + private StoragePoolAndAccessGroupMapDao storagePoolAndAccessGroupMapDao; @SuppressWarnings("unchecked") @Override @@ -146,9 +148,11 @@ public class CloudStackPrimaryDataStoreLifeCycleImpl extends BasePrimaryDataStor PrimaryDataStoreParameters parameters = new PrimaryDataStoreParameters(); String tags = (String)dsInfos.get("tags"); + String storageAccessGroups = (String)dsInfos.get(ApiConstants.STORAGE_ACCESS_GROUPS); Map details = (Map)dsInfos.get("details"); parameters.setTags(tags); + parameters.setStorageAccessGroups(storageAccessGroups); parameters.setIsTagARule((Boolean)dsInfos.get("isTagARule")); parameters.setDetails(details); @@ -386,17 +390,15 @@ public class CloudStackPrimaryDataStoreLifeCycleImpl extends BasePrimaryDataStor } private Pair, Boolean> prepareOcfs2NodesIfNeeded(PrimaryDataStoreInfo primaryStore) { + List hostsToConnect = _resourceMgr.getEligibleUpHostsInClusterForStorageConnection(primaryStore); + logger.debug(String.format("Attaching the pool to each of the hosts %s in the cluster: %s", hostsToConnect, primaryStore.getClusterId())); + List hostIds = hostsToConnect.stream().map(HostVO::getId).collect(Collectors.toList()); + if (!StoragePoolType.OCFS2.equals(primaryStore.getPoolType())) { - return new Pair<>(_hostDao.listIdsForUpRouting(primaryStore.getDataCenterId(), - primaryStore.getPodId(), primaryStore.getClusterId()), true); + return new Pair<>(hostIds, true); } - List allHosts = _resourceMgr.listAllUpHosts(Host.Type.Routing, primaryStore.getClusterId(), - primaryStore.getPodId(), primaryStore.getDataCenterId()); - if (allHosts.isEmpty()) { - return new Pair<>(Collections.emptyList(), true); - } - List hostIds = allHosts.stream().map(HostVO::getId).collect(Collectors.toList()); - if (!_ocfs2Mgr.prepareNodes(allHosts, primaryStore)) { + + if (!_ocfs2Mgr.prepareNodes(hostsToConnect, primaryStore)) { return new Pair<>(hostIds, false); } return new Pair<>(hostIds, true); @@ -432,8 +434,9 @@ public class CloudStackPrimaryDataStoreLifeCycleImpl extends BasePrimaryDataStor @Override public boolean attachZone(DataStore store, ZoneScope scope, HypervisorType hypervisorType) { - List hostIds = _hostDao.listIdsForUpEnabledByZoneAndHypervisor(scope.getScopeId(), hypervisorType); - logger.debug("In createPool. Attaching the pool to each of the hosts."); + List hostsToConnect = _resourceMgr.getEligibleUpAndEnabledHostsInZoneForStorageConnection(store, scope.getScopeId(), hypervisorType); + logger.debug(String.format("In createPool. Attaching the pool to each of the hosts in %s.", hostsToConnect)); + List hostIds = hostsToConnect.stream().map(HostVO::getId).collect(Collectors.toList()); storageMgr.connectHostsToPool(store, hostIds, scope, true, true); dataStoreHelper.attachZone(store, hypervisorType); return true; diff --git a/plugins/storage/volume/default/src/test/java/org/apache/cloudstack/storage/datastore/lifecycle/CloudStackPrimaryDataStoreLifeCycleImplTest.java b/plugins/storage/volume/default/src/test/java/org/apache/cloudstack/storage/datastore/lifecycle/CloudStackPrimaryDataStoreLifeCycleImplTest.java index 24c036d443d..3b533d588d3 100644 --- a/plugins/storage/volume/default/src/test/java/org/apache/cloudstack/storage/datastore/lifecycle/CloudStackPrimaryDataStoreLifeCycleImplTest.java +++ b/plugins/storage/volume/default/src/test/java/org/apache/cloudstack/storage/datastore/lifecycle/CloudStackPrimaryDataStoreLifeCycleImplTest.java @@ -25,7 +25,7 @@ import static org.mockito.ArgumentMatchers.eq; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; -import java.util.List; +import java.util.Arrays; import org.apache.cloudstack.engine.subsystem.api.storage.ClusterScope; import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; @@ -143,9 +143,15 @@ public class CloudStackPrimaryDataStoreLifeCycleImplTest extends TestCase { storageMgr.registerHostListener("default", hostListener); + HostVO host1 = Mockito.mock(HostVO.class); + HostVO host2 = Mockito.mock(HostVO.class); + + Mockito.when(host1.getId()).thenReturn(1L); + Mockito.when(host2.getId()).thenReturn(2L); + + when(_resourceMgr.getEligibleUpHostsInClusterForStorageConnection(store)) + .thenReturn(Arrays.asList(host1, host2)); - when(hostDao.listIdsForUpRouting(anyLong(), anyLong(), anyLong())) - .thenReturn(List.of(1L, 2L)); when(hostDao.findById(anyLong())).thenReturn(mock(HostVO.class)); when(agentMgr.easySend(anyLong(), Mockito.any(ModifyStoragePoolCommand.class))).thenReturn(answer); when(answer.getResult()).thenReturn(true); diff --git a/plugins/storage/volume/linstor/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/LinstorPrimaryDataStoreLifeCycleImpl.java b/plugins/storage/volume/linstor/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/LinstorPrimaryDataStoreLifeCycleImpl.java index e36eacf24c2..fa9c1b71ff3 100644 --- a/plugins/storage/volume/linstor/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/LinstorPrimaryDataStoreLifeCycleImpl.java +++ b/plugins/storage/volume/linstor/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/LinstorPrimaryDataStoreLifeCycleImpl.java @@ -39,6 +39,7 @@ import com.cloud.storage.Storage; import com.cloud.storage.StorageManager; import com.cloud.storage.StoragePool; import com.cloud.storage.StoragePoolAutomation; +import com.cloud.storage.dao.StoragePoolAndAccessGroupMapDao; import com.cloud.utils.exception.CloudRuntimeException; import org.apache.cloudstack.engine.subsystem.api.storage.ClusterScope; import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; @@ -68,6 +69,8 @@ public class LinstorPrimaryDataStoreLifeCycleImpl extends BasePrimaryDataStoreLi @Inject private CapacityManager _capacityMgr; @Inject + private StoragePoolAndAccessGroupMapDao storagePoolAndAccessGroupMapDao; + @Inject AgentManager _agentMgr; public LinstorPrimaryDataStoreLifeCycleImpl() @@ -204,20 +207,12 @@ public class LinstorPrimaryDataStoreLifeCycleImpl extends BasePrimaryDataStoreLi throw new CloudRuntimeException(hypervisorType + " is not a supported hypervisor type."); } - // check if there is at least one host up in this cluster - List allHosts = resourceMgr.listAllUpAndEnabledHosts(Host.Type.Routing, - primaryDataStoreInfo.getClusterId(), primaryDataStoreInfo.getPodId(), - primaryDataStoreInfo.getDataCenterId()); - - if (allHosts.isEmpty()) { - _primaryDataStoreDao.expunge(primaryDataStoreInfo.getId()); - - throw new CloudRuntimeException( - "No host up to associate a storage pool with in cluster " + primaryDataStoreInfo.getClusterId()); - } + PrimaryDataStoreInfo primarystore = (PrimaryDataStoreInfo) dataStore; + List hostsToConnect = resourceMgr.getEligibleUpAndEnabledHostsInClusterForStorageConnection(primarystore); + logger.debug(String.format("Attaching the pool to each of the hosts %s in the cluster: %s", hostsToConnect, primarystore.getClusterId())); List poolHosts = new ArrayList<>(); - for (HostVO host : allHosts) { + for (HostVO host : hostsToConnect) { try { createStoragePool(host, primaryDataStoreInfo); @@ -249,10 +244,11 @@ public class LinstorPrimaryDataStoreLifeCycleImpl extends BasePrimaryDataStoreLi throw new CloudRuntimeException(hypervisorType + " is not a supported hypervisor type."); } - List hosts = resourceMgr.listAllUpAndEnabledHostsInOneZoneByHypervisor(hypervisorType, - scope.getScopeId()); + List hostsToConnect = resourceMgr.getEligibleUpAndEnabledHostsInZoneForStorageConnection(dataStore, scope.getScopeId(), hypervisorType); - for (HostVO host : hosts) { + logger.debug(String.format("In createPool. Attaching the pool to each of the hosts in %s.", hostsToConnect)); + + for (HostVO host : hostsToConnect) { try { _storageMgr.connectHostToSharedPool(host, dataStore.getId()); } catch (Exception e) { diff --git a/plugins/storage/volume/nexenta/src/main/java/org/apache/cloudstack/storage/datastore/lifecylce/NexentaPrimaryDataStoreLifeCycle.java b/plugins/storage/volume/nexenta/src/main/java/org/apache/cloudstack/storage/datastore/lifecylce/NexentaPrimaryDataStoreLifeCycle.java index 79f771721f5..43134610552 100644 --- a/plugins/storage/volume/nexenta/src/main/java/org/apache/cloudstack/storage/datastore/lifecylce/NexentaPrimaryDataStoreLifeCycle.java +++ b/plugins/storage/volume/nexenta/src/main/java/org/apache/cloudstack/storage/datastore/lifecylce/NexentaPrimaryDataStoreLifeCycle.java @@ -24,6 +24,7 @@ import java.util.Map; import javax.inject.Inject; +import com.cloud.storage.dao.StoragePoolAndAccessGroupMapDao; import org.apache.cloudstack.engine.subsystem.api.storage.ClusterScope; import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; import org.apache.cloudstack.engine.subsystem.api.storage.HostScope; @@ -58,6 +59,8 @@ public class NexentaPrimaryDataStoreLifeCycle StorageManager _storageMgr; @Inject private StoragePoolAutomation storagePoolAutomation; + @Inject + private StoragePoolAndAccessGroupMapDao storagePoolAndAccessGroupMapDao; @Override public DataStore initialize(Map dsInfos) { @@ -130,16 +133,14 @@ public class NexentaPrimaryDataStoreLifeCycle public boolean attachZone(DataStore dataStore, ZoneScope scope, Hypervisor.HypervisorType hypervisorType) { dataStoreHelper.attachZone(dataStore); - List xenServerHosts = _resourceMgr.listAllUpAndEnabledHostsInOneZoneByHypervisor(Hypervisor.HypervisorType.XenServer, scope.getScopeId()); - List vmWareServerHosts = _resourceMgr.listAllUpAndEnabledHostsInOneZoneByHypervisor(Hypervisor.HypervisorType.VMware, scope.getScopeId()); - List kvmHosts = _resourceMgr.listAllUpAndEnabledHostsInOneZoneByHypervisor(Hypervisor.HypervisorType.KVM, scope.getScopeId()); - List hosts = new ArrayList(); + List hostsToConnect = new ArrayList<>(); + Hypervisor.HypervisorType[] hypervisorTypes = {Hypervisor.HypervisorType.XenServer, Hypervisor.HypervisorType.VMware, Hypervisor.HypervisorType.KVM}; - hosts.addAll(xenServerHosts); - hosts.addAll(vmWareServerHosts); - hosts.addAll(kvmHosts); - - for (HostVO host : hosts) { + for (Hypervisor.HypervisorType type : hypervisorTypes) { + hostsToConnect.addAll(_resourceMgr.getEligibleUpAndEnabledHostsInZoneForStorageConnection(dataStore, scope.getScopeId(), type)); + } + logger.debug(String.format("In createPool. Attaching the pool to each of the hosts in %s.", hostsToConnect)); + for (HostVO host : hostsToConnect) { try { _storageMgr.connectHostToSharedPool(host, dataStore.getId()); } catch (Exception e) { diff --git a/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/ScaleIOPrimaryDataStoreLifeCycle.java b/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/ScaleIOPrimaryDataStoreLifeCycle.java index a538cdb49e4..461992be102 100644 --- a/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/ScaleIOPrimaryDataStoreLifeCycle.java +++ b/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/ScaleIOPrimaryDataStoreLifeCycle.java @@ -18,6 +18,39 @@ */ package org.apache.cloudstack.storage.datastore.lifecycle; +import java.net.URI; +import java.net.URISyntaxException; +import java.net.URLDecoder; +import java.security.KeyManagementException; +import java.security.NoSuchAlgorithmException; +import java.util.List; +import java.util.Map; +import java.util.UUID; +import java.util.stream.Collectors; + +import javax.inject.Inject; + +import com.cloud.host.HostVO; +import com.cloud.storage.dao.StoragePoolAndAccessGroupMapDao; +import org.apache.cloudstack.api.ApiConstants; +import com.cloud.utils.StringUtils; +import org.apache.cloudstack.engine.subsystem.api.storage.ClusterScope; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; +import org.apache.cloudstack.engine.subsystem.api.storage.HostScope; +import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreInfo; +import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreLifeCycle; +import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreParameters; +import org.apache.cloudstack.engine.subsystem.api.storage.ZoneScope; +import org.apache.cloudstack.storage.datastore.api.StoragePoolStatistics; +import org.apache.cloudstack.storage.datastore.client.ScaleIOGatewayClient; +import org.apache.cloudstack.storage.datastore.client.ScaleIOGatewayClientConnectionPool; +import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; +import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailsDao; +import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; +import org.apache.cloudstack.storage.datastore.util.ScaleIOUtil; +import org.apache.cloudstack.storage.volume.datastore.PrimaryDataStoreHelper; +import org.apache.commons.collections.CollectionUtils; + import com.cloud.agent.AgentManager; import com.cloud.agent.api.StoragePoolInfo; import com.cloud.capacity.CapacityManager; @@ -34,41 +67,15 @@ import com.cloud.storage.StoragePool; import com.cloud.storage.StoragePoolAutomation; import com.cloud.storage.dao.StoragePoolHostDao; import com.cloud.template.TemplateManager; -import com.cloud.utils.StringUtils; import com.cloud.utils.UriUtils; import com.cloud.utils.component.ComponentContext; import com.cloud.utils.crypt.DBEncryptionUtil; import com.cloud.utils.exception.CloudRuntimeException; -import org.apache.cloudstack.engine.subsystem.api.storage.ClusterScope; -import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; -import org.apache.cloudstack.engine.subsystem.api.storage.HostScope; -import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreInfo; -import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreLifeCycle; -import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreParameters; -import org.apache.cloudstack.engine.subsystem.api.storage.ZoneScope; -import org.apache.cloudstack.storage.datastore.api.StoragePoolStatistics; -import org.apache.cloudstack.storage.datastore.client.ScaleIOGatewayClient; -import org.apache.cloudstack.storage.datastore.client.ScaleIOGatewayClientConnectionPool; -import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailVO; -import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailsDao; -import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; import org.apache.cloudstack.storage.datastore.manager.ScaleIOSDCManager; import org.apache.cloudstack.storage.datastore.manager.ScaleIOSDCManagerImpl; -import org.apache.cloudstack.storage.datastore.util.ScaleIOUtil; -import org.apache.cloudstack.storage.volume.datastore.PrimaryDataStoreHelper; -import org.apache.commons.collections.CollectionUtils; -import javax.inject.Inject; -import java.net.URI; -import java.net.URISyntaxException; -import java.net.URLDecoder; -import java.security.KeyManagementException; -import java.security.NoSuchAlgorithmException; import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.UUID; public class ScaleIOPrimaryDataStoreLifeCycle extends BasePrimaryDataStoreLifeCycleImpl implements PrimaryDataStoreLifeCycle { @Inject @@ -98,6 +105,8 @@ public class ScaleIOPrimaryDataStoreLifeCycle extends BasePrimaryDataStoreLifeCy @Inject private AgentManager agentMgr; private ScaleIOSDCManager sdcManager; + @Inject + private StoragePoolAndAccessGroupMapDao storagePoolAndAccessGroupMapDao; public ScaleIOPrimaryDataStoreLifeCycle() { sdcManager = new ScaleIOSDCManagerImpl(); @@ -141,6 +150,7 @@ public class ScaleIOPrimaryDataStoreLifeCycle extends BasePrimaryDataStoreLifeCy Long capacityBytes = (Long)dsInfos.get("capacityBytes"); Long capacityIops = (Long)dsInfos.get("capacityIops"); String tags = (String)dsInfos.get("tags"); + String storageAccessGroups = (String)dsInfos.get(ApiConstants.STORAGE_ACCESS_GROUPS); Boolean isTagARule = (Boolean) dsInfos.get("isTagARule"); Map details = (Map) dsInfos.get("details"); @@ -223,6 +233,7 @@ public class ScaleIOPrimaryDataStoreLifeCycle extends BasePrimaryDataStoreLifeCy parameters.setHypervisorType(Hypervisor.HypervisorType.KVM); parameters.setUuid(UUID.randomUUID().toString()); parameters.setTags(tags); + parameters.setStorageAccessGroups(storageAccessGroups); parameters.setIsTagARule(isTagARule); StoragePoolStatistics poolStatistics = scaleIOPool.getStatistics(); @@ -260,14 +271,10 @@ public class ScaleIOPrimaryDataStoreLifeCycle extends BasePrimaryDataStoreLifeCy } PrimaryDataStoreInfo primaryDataStoreInfo = (PrimaryDataStoreInfo) dataStore; - List hostIds = hostDao.listIdsForUpRouting(primaryDataStoreInfo.getDataCenterId(), - primaryDataStoreInfo.getPodId(), primaryDataStoreInfo.getClusterId()); - if (hostIds.isEmpty()) { - primaryDataStoreDao.expunge(primaryDataStoreInfo.getId()); - throw new CloudRuntimeException("No hosts are Up to associate a storage pool with in cluster: " + cluster); - } + List hostsToConnect = resourceManager.getEligibleUpAndEnabledHostsInClusterForStorageConnection(primaryDataStoreInfo); + logger.debug(String.format("Attaching the pool to each of the hosts %s in the cluster: %s", hostsToConnect, cluster)); + List hostIds = hostsToConnect.stream().map(HostVO::getId).collect(Collectors.toList()); - logger.debug("Attaching the pool to each of the hosts in the {}", cluster); storageMgr.connectHostsToPool(dataStore, hostIds, scope, false, false); dataStoreHelper.attachCluster(dataStore); @@ -287,7 +294,10 @@ public class ScaleIOPrimaryDataStoreLifeCycle extends BasePrimaryDataStoreLifeCy logger.debug("Attaching the pool to each of the hosts in the {}", dataCenterDao.findById(scope.getScopeId())); - List hostIds = hostDao.listIdsForUpEnabledByZoneAndHypervisor(scope.getScopeId(), hypervisorType); + List hostsToConnect = resourceManager.getEligibleUpAndEnabledHostsInZoneForStorageConnection(dataStore, scope.getScopeId(), hypervisorType); + logger.debug(String.format("Attaching the pool to each of the hosts %s in the zone: %s", hostsToConnect, scope.getScopeId())); + List hostIds = hostsToConnect.stream().map(HostVO::getId).collect(Collectors.toList()); + storageMgr.connectHostsToPool(dataStore, hostIds, scope, false, false); dataStoreHelper.attachZone(dataStore); diff --git a/plugins/storage/volume/scaleio/src/test/java/org/apache/cloudstack/storage/datastore/lifecycle/ScaleIOPrimaryDataStoreLifeCycleTest.java b/plugins/storage/volume/scaleio/src/test/java/org/apache/cloudstack/storage/datastore/lifecycle/ScaleIOPrimaryDataStoreLifeCycleTest.java index bf0b443c18a..324f3c08cb8 100644 --- a/plugins/storage/volume/scaleio/src/test/java/org/apache/cloudstack/storage/datastore/lifecycle/ScaleIOPrimaryDataStoreLifeCycleTest.java +++ b/plugins/storage/volume/scaleio/src/test/java/org/apache/cloudstack/storage/datastore/lifecycle/ScaleIOPrimaryDataStoreLifeCycleTest.java @@ -30,8 +30,11 @@ import static org.mockito.Mockito.mockStatic; import static org.mockito.Mockito.when; import java.util.ArrayList; +import java.util.Arrays; import java.util.List; +import com.cloud.host.HostVO; +import com.cloud.resource.ResourceManager; import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager; import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreProvider; @@ -106,6 +109,9 @@ public class ScaleIOPrimaryDataStoreLifeCycleTest { @Mock private HypervisorHostListener hostListener; + @Mock + private ResourceManager resourceManager; + @InjectMocks private ScaleIOPrimaryDataStoreLifeCycle scaleIOPrimaryDataStoreLifeCycleTest; private AutoCloseable closeable; @@ -137,8 +143,14 @@ public class ScaleIOPrimaryDataStoreLifeCycleTest { final ZoneScope scope = new ZoneScope(1L); - when(hostDao.listIdsForUpEnabledByZoneAndHypervisor(scope.getScopeId(), Hypervisor.HypervisorType.KVM)) - .thenReturn(List.of(1L, 2L)); + HostVO host1 = Mockito.mock(HostVO.class); + HostVO host2 = Mockito.mock(HostVO.class); + + Mockito.when(host1.getId()).thenReturn(1L); + Mockito.when(host2.getId()).thenReturn(2L); + + when(resourceManager.getEligibleUpAndEnabledHostsInZoneForStorageConnection(dataStore, scope.getScopeId(), Hypervisor.HypervisorType.KVM)) + .thenReturn(Arrays.asList(host1, host2)); when(dataStoreMgr.getDataStore(anyLong(), eq(DataStoreRole.Primary))).thenReturn(store); when(store.isShared()).thenReturn(true); diff --git a/plugins/storage/volume/solidfire/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/SolidFirePrimaryDataStoreLifeCycle.java b/plugins/storage/volume/solidfire/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/SolidFirePrimaryDataStoreLifeCycle.java index 1dbbf458b48..f23698bc97b 100644 --- a/plugins/storage/volume/solidfire/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/SolidFirePrimaryDataStoreLifeCycle.java +++ b/plugins/storage/volume/solidfire/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/SolidFirePrimaryDataStoreLifeCycle.java @@ -25,6 +25,7 @@ import java.util.UUID; import javax.inject.Inject; +import com.cloud.storage.dao.StoragePoolAndAccessGroupMapDao; import org.apache.cloudstack.engine.subsystem.api.storage.ClusterScope; import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; import org.apache.cloudstack.engine.subsystem.api.storage.HostScope; @@ -43,7 +44,6 @@ import com.cloud.capacity.CapacityManager; import com.cloud.dc.ClusterVO; import com.cloud.dc.dao.ClusterDao; import com.cloud.dc.dao.DataCenterDao; -import com.cloud.host.Host; import com.cloud.host.HostVO; import com.cloud.hypervisor.Hypervisor.HypervisorType; import com.cloud.resource.ResourceManager; @@ -74,6 +74,8 @@ public class SolidFirePrimaryDataStoreLifeCycle extends BasePrimaryDataStoreLife @Inject private StoragePoolAutomation _storagePoolAutomation; @Inject private StoragePoolDetailsDao _storagePoolDetailsDao; @Inject private VMTemplatePoolDao _tmpltPoolDao; + @Inject + private StoragePoolAndAccessGroupMapDao storagePoolAndAccessGroupMapDao; // invoked to add primary storage that is based on the SolidFire plug-in @Override @@ -235,11 +237,10 @@ public class SolidFirePrimaryDataStoreLifeCycle extends BasePrimaryDataStoreLife @Override public boolean attachCluster(DataStore dataStore, ClusterScope scope) { PrimaryDataStoreInfo primarystore = (PrimaryDataStoreInfo)dataStore; + List hostsToConnect = _resourceMgr.getEligibleUpAndEnabledHostsInClusterForStorageConnection(primarystore); - List hosts = - _resourceMgr.listAllUpAndEnabledHosts(Host.Type.Routing, primarystore.getClusterId(), primarystore.getPodId(), primarystore.getDataCenterId()); - - for (HostVO host : hosts) { + logger.debug(String.format("Attaching the pool to each of the hosts %s in the cluster: %s", hostsToConnect, primarystore.getClusterId())); + for (HostVO host : hostsToConnect) { try { _storageMgr.connectHostToSharedPool(host, dataStore.getId()); } catch (Exception e) { @@ -254,16 +255,15 @@ public class SolidFirePrimaryDataStoreLifeCycle extends BasePrimaryDataStoreLife @Override public boolean attachZone(DataStore dataStore, ZoneScope scope, HypervisorType hypervisorType) { - List xenServerHosts = _resourceMgr.listAllUpAndEnabledHostsInOneZoneByHypervisor(HypervisorType.XenServer, scope.getScopeId()); - List vmWareServerHosts = _resourceMgr.listAllUpAndEnabledHostsInOneZoneByHypervisor(HypervisorType.VMware, scope.getScopeId()); - List kvmHosts = _resourceMgr.listAllUpAndEnabledHostsInOneZoneByHypervisor(HypervisorType.KVM, scope.getScopeId()); - List hosts = new ArrayList<>(); + List hostsToConnect = new ArrayList<>(); + HypervisorType[] hypervisorTypes = {HypervisorType.XenServer, HypervisorType.VMware, HypervisorType.KVM}; - hosts.addAll(xenServerHosts); - hosts.addAll(vmWareServerHosts); - hosts.addAll(kvmHosts); + for (HypervisorType type : hypervisorTypes) { + hostsToConnect.addAll(_resourceMgr.getEligibleUpAndEnabledHostsInZoneForStorageConnection(dataStore, scope.getScopeId(), type)); + } - for (HostVO host : hosts) { + logger.debug(String.format("In createPool. Attaching the pool to each of the hosts in %s.", hostsToConnect)); + for (HostVO host : hostsToConnect) { try { _storageMgr.connectHostToSharedPool(host, dataStore.getId()); } catch (Exception e) { diff --git a/plugins/storage/volume/solidfire/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/SolidFireSharedPrimaryDataStoreLifeCycle.java b/plugins/storage/volume/solidfire/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/SolidFireSharedPrimaryDataStoreLifeCycle.java index 482fa23096a..b05046cf496 100644 --- a/plugins/storage/volume/solidfire/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/SolidFireSharedPrimaryDataStoreLifeCycle.java +++ b/plugins/storage/volume/solidfire/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/SolidFireSharedPrimaryDataStoreLifeCycle.java @@ -26,6 +26,8 @@ import java.util.Map; import javax.inject.Inject; +import com.cloud.host.Host; +import com.cloud.storage.dao.StoragePoolAndAccessGroupMapDao; import org.apache.cloudstack.context.CallContext; import org.apache.cloudstack.engine.subsystem.api.storage.ClusterScope; import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; @@ -50,7 +52,6 @@ import com.cloud.agent.api.StoragePoolInfo; import com.cloud.dc.ClusterVO; import com.cloud.dc.dao.ClusterDao; import com.cloud.dc.dao.DataCenterDao; -import com.cloud.host.Host; import com.cloud.host.HostVO; import com.cloud.host.dao.HostDao; import com.cloud.hypervisor.Hypervisor.HypervisorType; @@ -85,6 +86,8 @@ public class SolidFireSharedPrimaryDataStoreLifeCycle extends BasePrimaryDataSto @Inject private StoragePoolDetailsDao storagePoolDetailsDao; @Inject private StoragePoolHostDao storagePoolHostDao; @Inject private TemplateManager tmpltMgr; + @Inject + private StoragePoolAndAccessGroupMapDao storagePoolAndAccessGroupMapDao; // invoked to add primary storage that is based on the SolidFire plug-in @Override @@ -382,19 +385,12 @@ public class SolidFireSharedPrimaryDataStoreLifeCycle extends BasePrimaryDataSto public boolean attachCluster(DataStore store, ClusterScope scope) { PrimaryDataStoreInfo primaryDataStoreInfo = (PrimaryDataStoreInfo)store; - // check if there is at least one host up in this cluster - List allHosts = resourceMgr.listAllUpHosts(Host.Type.Routing, primaryDataStoreInfo.getClusterId(), - primaryDataStoreInfo.getPodId(), primaryDataStoreInfo.getDataCenterId()); - - if (allHosts.isEmpty()) { - primaryDataStoreDao.expunge(primaryDataStoreInfo.getId()); - - throw new CloudRuntimeException(String.format("No host up to associate a storage pool with in cluster %s", clusterDao.findById(primaryDataStoreInfo.getClusterId()))); - } + List hostsToConnect = resourceMgr.getEligibleUpHostsInClusterForStorageConnection(primaryDataStoreInfo); boolean success = false; + logger.debug(String.format("Attaching the pool to each of the hosts %s in the cluster: %s", hostsToConnect, clusterDao.findById(primaryDataStoreInfo.getClusterId()))); - for (HostVO host : allHosts) { + for (HostVO host : hostsToConnect) { success = createStoragePool(host, primaryDataStoreInfo); if (success) { @@ -408,7 +404,7 @@ public class SolidFireSharedPrimaryDataStoreLifeCycle extends BasePrimaryDataSto List poolHosts = new ArrayList<>(); - for (HostVO host : allHosts) { + for (HostVO host : hostsToConnect) { try { storageMgr.connectHostToSharedPool(host, primaryDataStoreInfo.getId()); diff --git a/plugins/storage/volume/storpool/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/StorPoolPrimaryDataStoreLifeCycle.java b/plugins/storage/volume/storpool/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/StorPoolPrimaryDataStoreLifeCycle.java index 60427e65ea6..d299fe34ffc 100644 --- a/plugins/storage/volume/storpool/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/StorPoolPrimaryDataStoreLifeCycle.java +++ b/plugins/storage/volume/storpool/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/StorPoolPrimaryDataStoreLifeCycle.java @@ -24,6 +24,7 @@ import java.util.UUID; import javax.inject.Inject; +import com.cloud.storage.dao.StoragePoolAndAccessGroupMapDao; import org.apache.cloudstack.engine.subsystem.api.storage.ClusterScope; import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; import org.apache.cloudstack.engine.subsystem.api.storage.HostScope; @@ -80,6 +81,8 @@ public class StorPoolPrimaryDataStoreLifeCycle extends BasePrimaryDataStoreLifeC private VMTemplateDetailsDao vmTemplateDetailsDao; @Inject private StoragePoolDetailsDao storagePoolDetailsDao; + @Inject + private StoragePoolAndAccessGroupMapDao storagePoolAndAccessGroupMapDao; @Override public DataStore initialize(Map dsInfos) { @@ -208,8 +211,11 @@ public class StorPoolPrimaryDataStoreLifeCycle extends BasePrimaryDataStoreLifeC if (hypervisorType != HypervisorType.KVM) { throw new UnsupportedOperationException("Only KVM hypervisors supported!"); } - List kvmHosts = resourceMgr.listAllUpAndEnabledHostsInOneZoneByHypervisor(HypervisorType.KVM, scope.getScopeId()); - for (HostVO host : kvmHosts) { + List kvmHostsToConnect = resourceMgr.getEligibleUpAndEnabledHostsInZoneForStorageConnection(dataStore, scope.getScopeId(), HypervisorType.KVM); + + logger.debug(String.format("In createPool. Attaching the pool to each of the hosts in %s.", kvmHostsToConnect)); + + for (HostVO host : kvmHostsToConnect) { try { storageMgr.connectHostToSharedPool(host, dataStore.getId()); } catch (Exception e) { diff --git a/server/src/main/java/com/cloud/api/ApiDBUtils.java b/server/src/main/java/com/cloud/api/ApiDBUtils.java index 4ef1b28b9c0..4783815a9e5 100644 --- a/server/src/main/java/com/cloud/api/ApiDBUtils.java +++ b/server/src/main/java/com/cloud/api/ApiDBUtils.java @@ -2007,6 +2007,10 @@ public class ApiDBUtils { return s_projectInvitationJoinDao.newProjectInvitationView(proj); } + public static HostResponse newMinimalHostResponse(HostJoinVO vr) { + return s_hostJoinDao.newMinimalHostResponse(vr); + } + public static HostResponse newHostResponse(HostJoinVO vr, EnumSet details) { return s_hostJoinDao.newHostResponse(vr, details); } @@ -2035,6 +2039,10 @@ public class ApiDBUtils { return s_poolJoinDao.newStoragePoolResponse(vr, customStats); } + public static StoragePoolResponse newMinimalStoragePoolResponse(StoragePoolJoinVO vr) { + return s_poolJoinDao.newMinimalStoragePoolResponse(vr); + } + public static StorageTagResponse newStorageTagResponse(StoragePoolTagVO vr) { return s_tagDao.newStorageTagResponse(vr); } @@ -2164,6 +2172,10 @@ public class ApiDBUtils { return s_dcJoinDao.newDataCenterResponse(view, dc, showCapacities, showResourceImage); } + public static ZoneResponse newMinimalDataCenterResponse(ResponseView view, DataCenterJoinVO dc) { + return s_dcJoinDao.newMinimalDataCenterResponse(view, dc); + } + public static DataCenterJoinVO newDataCenterView(DataCenter dc) { return s_dcJoinDao.newDataCenterView(dc); } diff --git a/server/src/main/java/com/cloud/api/ApiResponseHelper.java b/server/src/main/java/com/cloud/api/ApiResponseHelper.java index 474dfc09626..443bba8e05b 100644 --- a/server/src/main/java/com/cloud/api/ApiResponseHelper.java +++ b/server/src/main/java/com/cloud/api/ApiResponseHelper.java @@ -1301,6 +1301,15 @@ public class ApiResponseHelper implements ResponseGenerator { return response; } + @Override + public PodResponse createMinimalPodResponse(Pod pod) { + PodResponse podResponse = new PodResponse(); + podResponse.setId(pod.getUuid()); + podResponse.setName(pod.getName()); + podResponse.setObjectName("pod"); + return podResponse; + } + @Override public PodResponse createPodResponse(Pod pod, Boolean showCapacities) { String[] ipRange = new String[2]; @@ -1344,7 +1353,7 @@ public class ApiResponseHelper implements ResponseGenerator { PodResponse podResponse = new PodResponse(); podResponse.setId(pod.getUuid()); podResponse.setName(pod.getName()); - DataCenter zone = ApiDBUtils.findZoneById(pod.getDataCenterId()); + DataCenterVO zone = ApiDBUtils.findZoneById(pod.getDataCenterId()); if (zone != null) { podResponse.setZoneId(zone.getUuid()); podResponse.setZoneName(zone.getName()); @@ -1357,6 +1366,8 @@ public class ApiResponseHelper implements ResponseGenerator { podResponse.setVlanId(vlanIds); podResponse.setGateway(pod.getGateway()); podResponse.setAllocationState(pod.getAllocationState().toString()); + podResponse.setStorageAccessGroups(pod.getStorageAccessGroups()); + podResponse.setZoneStorageAccessGroups(zone.getStorageAccessGroups()); if (showCapacities != null && showCapacities) { List capacities = ApiDBUtils.getCapacityByClusterPodZone(null, pod.getId(), null); Set capacityResponses = new HashSet(); @@ -1506,6 +1517,15 @@ public class ApiResponseHelper implements ResponseGenerator { return listPools.get(0); } + @Override + public ClusterResponse createMinimalClusterResponse(Cluster cluster) { + ClusterResponse clusterResponse = new ClusterResponse(); + clusterResponse.setId(cluster.getUuid()); + clusterResponse.setName(cluster.getName()); + clusterResponse.setObjectName("cluster"); + return clusterResponse; + } + @Override public ClusterResponse createClusterResponse(Cluster cluster, Boolean showCapacities) { ClusterResponse clusterResponse = new ClusterResponse(); @@ -1516,7 +1536,7 @@ public class ApiResponseHelper implements ResponseGenerator { clusterResponse.setPodId(pod.getUuid()); clusterResponse.setPodName(pod.getName()); } - DataCenter dc = ApiDBUtils.findZoneById(cluster.getDataCenterId()); + DataCenterVO dc = ApiDBUtils.findZoneById(cluster.getDataCenterId()); if (dc != null) { clusterResponse.setZoneId(dc.getUuid()); clusterResponse.setZoneName(dc.getName()); @@ -1534,6 +1554,10 @@ public class ApiResponseHelper implements ResponseGenerator { clusterResponse.setArch(cluster.getArch().getType()); } + clusterResponse.setStorageAccessGroups(cluster.getStorageAccessGroups()); + clusterResponse.setPodStorageAccessGroups(pod.getStorageAccessGroups()); + clusterResponse.setZoneStorageAccessGroups(dc.getStorageAccessGroups()); + if (showCapacities != null && showCapacities) { List capacities = ApiDBUtils.getCapacityByClusterPodZone(null, null, cluster.getId()); Set capacityResponses = new HashSet(); diff --git a/server/src/main/java/com/cloud/api/query/QueryManagerImpl.java b/server/src/main/java/com/cloud/api/query/QueryManagerImpl.java index 9290d2aa701..288465b0e60 100644 --- a/server/src/main/java/com/cloud/api/query/QueryManagerImpl.java +++ b/server/src/main/java/com/cloud/api/query/QueryManagerImpl.java @@ -36,6 +36,12 @@ import java.util.stream.Stream; import javax.inject.Inject; +import com.cloud.dc.Pod; +import com.cloud.dc.dao.DataCenterDao; +import com.cloud.dc.dao.HostPodDao; +import com.cloud.org.Cluster; +import com.cloud.server.ManagementService; +import com.cloud.storage.dao.StoragePoolAndAccessGroupMapDao; import org.apache.cloudstack.acl.ControlledEntity; import org.apache.cloudstack.acl.ControlledEntity.ACLType; import org.apache.cloudstack.acl.SecurityChecker; @@ -52,6 +58,7 @@ import org.apache.cloudstack.api.ResourceDetail; import org.apache.cloudstack.api.ResponseGenerator; import org.apache.cloudstack.api.ResponseObject.ResponseView; import org.apache.cloudstack.api.command.admin.account.ListAccountsCmdByAdmin; +import org.apache.cloudstack.api.command.admin.cluster.ListClustersCmd; import org.apache.cloudstack.api.command.admin.domain.ListDomainsCmd; import org.apache.cloudstack.api.command.admin.domain.ListDomainsCmdByAdmin; import org.apache.cloudstack.api.command.admin.host.ListHostTagsCmd; @@ -59,6 +66,7 @@ import org.apache.cloudstack.api.command.admin.host.ListHostsCmd; import org.apache.cloudstack.api.command.admin.internallb.ListInternalLBVMsCmd; import org.apache.cloudstack.api.command.admin.iso.ListIsosCmdByAdmin; import org.apache.cloudstack.api.command.admin.management.ListMgmtsCmd; +import org.apache.cloudstack.api.command.admin.pod.ListPodsByCmd; import org.apache.cloudstack.api.command.admin.resource.icon.ListResourceIconCmd; import org.apache.cloudstack.api.command.admin.router.GetRouterHealthCheckResultsCmd; import org.apache.cloudstack.api.command.admin.router.ListRoutersCmd; @@ -66,6 +74,7 @@ import org.apache.cloudstack.api.command.admin.snapshot.ListSnapshotsCmdByAdmin; import org.apache.cloudstack.api.command.admin.storage.ListImageStoresCmd; import org.apache.cloudstack.api.command.admin.storage.ListObjectStoragePoolsCmd; import org.apache.cloudstack.api.command.admin.storage.ListSecondaryStagingStoresCmd; +import org.apache.cloudstack.api.command.admin.storage.ListStorageAccessGroupsCmd; import org.apache.cloudstack.api.command.admin.storage.ListStoragePoolsCmd; import org.apache.cloudstack.api.command.admin.storage.ListStorageTagsCmd; import org.apache.cloudstack.api.command.admin.storage.heuristics.ListSecondaryStorageSelectorsCmd; @@ -100,6 +109,7 @@ import org.apache.cloudstack.api.command.user.zone.ListZonesCmd; import org.apache.cloudstack.api.response.AccountResponse; import org.apache.cloudstack.api.response.AsyncJobResponse; import org.apache.cloudstack.api.response.BucketResponse; +import org.apache.cloudstack.api.response.ClusterResponse; import org.apache.cloudstack.api.response.DetailOptionsResponse; import org.apache.cloudstack.api.response.DiskOfferingResponse; import org.apache.cloudstack.api.response.DomainResponse; @@ -114,6 +124,7 @@ import org.apache.cloudstack.api.response.ListResponse; import org.apache.cloudstack.api.response.ManagementServerResponse; import org.apache.cloudstack.api.response.ObjectStoreResponse; import org.apache.cloudstack.api.response.PeerManagementServerNodeResponse; +import org.apache.cloudstack.api.response.PodResponse; import org.apache.cloudstack.api.response.ProjectAccountResponse; import org.apache.cloudstack.api.response.ProjectInvitationResponse; import org.apache.cloudstack.api.response.ProjectResponse; @@ -125,6 +136,7 @@ import org.apache.cloudstack.api.response.SecondaryStorageHeuristicsResponse; import org.apache.cloudstack.api.response.SecurityGroupResponse; import org.apache.cloudstack.api.response.ServiceOfferingResponse; import org.apache.cloudstack.api.response.SnapshotResponse; +import org.apache.cloudstack.api.response.StorageAccessGroupResponse; import org.apache.cloudstack.api.response.StoragePoolResponse; import org.apache.cloudstack.api.response.StorageTagResponse; import org.apache.cloudstack.api.response.TemplateResponse; @@ -618,6 +630,18 @@ public class QueryManagerImpl extends MutualExclusiveIdsManagerBase implements Q @Inject private AsyncJobManager jobManager; + @Inject + private StoragePoolAndAccessGroupMapDao storagePoolAndAccessGroupMapDao; + + @Inject + public ManagementService managementService; + + @Inject + DataCenterDao dataCenterDao; + + @Inject + HostPodDao podDao; + private SearchCriteria getMinimumCpuServiceOfferingJoinSearchCriteria(int cpu) { SearchCriteria sc = _srvOfferingJoinDao.createSearchCriteria(); SearchCriteria sc1 = _srvOfferingJoinDao.createSearchCriteria(); @@ -2342,6 +2366,16 @@ public class QueryManagerImpl extends MutualExclusiveIdsManagerBase implements Q return response; } + private ListResponse searchForServersWithMinimalResponse(ListHostsCmd cmd) { + logger.debug(">>>Searching for hosts>>>"); + Pair, Integer> hosts = searchForServersInternal(cmd); + ListResponse response = new ListResponse(); + logger.debug(">>>Generating Response>>>"); + List hostResponses = ViewResponseHelper.createMinimalHostResponse(hosts.first().toArray(new HostJoinVO[hosts.first().size()])); + response.setResponses(hostResponses, hosts.second()); + return response; + } + public Pair, Integer> searchForServersInternal(ListHostsCmd cmd) { Pair, Integer> serverIdPage = searchForServerIdsAndCount(cmd); @@ -2373,6 +2407,7 @@ public class QueryManagerImpl extends MutualExclusiveIdsManagerBase implements Q Hypervisor.HypervisorType hypervisorType = cmd.getHypervisor(); Long msId = cmd.getManagementServerId(); final CPU.CPUArch arch = cmd.getArch(); + String storageAccessGroup = cmd.getStorageAccessGroup(); Filter searchFilter = new Filter(HostVO.class, "id", Boolean.TRUE, startIndex, pageSize); @@ -2390,6 +2425,13 @@ public class QueryManagerImpl extends MutualExclusiveIdsManagerBase implements Q hostSearchBuilder.and("hypervisor_type", hostSearchBuilder.entity().getHypervisorType(), SearchCriteria.Op.EQ); hostSearchBuilder.and("mgmt_server_id", hostSearchBuilder.entity().getManagementServerId(), SearchCriteria.Op.EQ); hostSearchBuilder.and("arch", hostSearchBuilder.entity().getArch(), SearchCriteria.Op.EQ); + if (storageAccessGroup != null) { + hostSearchBuilder.and().op("storageAccessGroupExact", hostSearchBuilder.entity().getStorageAccessGroups(), Op.EQ); + hostSearchBuilder.or("storageAccessGroupPrefix", hostSearchBuilder.entity().getStorageAccessGroups(), Op.LIKE); + hostSearchBuilder.or("storageAccessGroupSuffix", hostSearchBuilder.entity().getStorageAccessGroups(), Op.LIKE); + hostSearchBuilder.or("storageAccessGroupMiddle", hostSearchBuilder.entity().getStorageAccessGroups(), Op.LIKE); + hostSearchBuilder.cp(); + } if (keyword != null) { hostSearchBuilder.and().op("keywordName", hostSearchBuilder.entity().getName(), SearchCriteria.Op.LIKE); @@ -2481,6 +2523,13 @@ public class QueryManagerImpl extends MutualExclusiveIdsManagerBase implements Q sc.setParameters("arch", arch); } + if (storageAccessGroup != null) { + sc.setParameters("storageAccessGroupExact", storageAccessGroup); + sc.setParameters("storageAccessGroupPrefix", storageAccessGroup + ",%"); + sc.setParameters("storageAccessGroupSuffix", "%," + storageAccessGroup); + sc.setParameters("storageAccessGroupMiddle", "%," + storageAccessGroup + ",%"); + } + Pair, Integer> uniqueHostPair = hostDao.searchAndCount(sc, searchFilter); Integer count = uniqueHostPair.second(); List hostIds = uniqueHostPair.first().stream().map(HostVO::getId).collect(Collectors.toList()); @@ -3204,7 +3253,14 @@ public class QueryManagerImpl extends MutualExclusiveIdsManagerBase implements Q poolResponse.setCaps(caps); } + private ListResponse searchForStoragePoolsWithMinimalResponse(ListStoragePoolsCmd cmd) { + Pair, Integer> result = searchForStoragePoolsInternal(cmd); + ListResponse response = new ListResponse<>(); + List poolResponses = ViewResponseHelper.createMinimalStoragePoolResponse(result.first().toArray(new StoragePoolJoinVO[result.first().size()])); + response.setResponses(poolResponses, result.second()); + return response; + } private Pair, Integer> searchForStoragePoolsInternal(ListStoragePoolsCmd cmd) { ScopeType scopeType = ScopeType.validateAndGetScopeType(cmd.getScope()); @@ -3216,16 +3272,18 @@ public class QueryManagerImpl extends MutualExclusiveIdsManagerBase implements Q String path = cmd.getPath(); Long pod = cmd.getPodId(); Long cluster = cmd.getClusterId(); + Long host = cmd.getHostId(); String address = cmd.getIpAddress(); String keyword = cmd.getKeyword(); Long startIndex = cmd.getStartIndex(); Long pageSize = cmd.getPageSizeVal(); + String storageAccessGroup = cmd.getStorageAccessGroup(); Filter searchFilter = new Filter(StoragePoolVO.class, "id", Boolean.TRUE, startIndex, pageSize); Pair, Integer> uniquePoolPair = storagePoolDao.searchForIdsAndCount(id, name, zoneId, path, pod, - cluster, address, scopeType, status, keyword, searchFilter); + cluster, host, address, scopeType, status, keyword, storageAccessGroup, searchFilter); List storagePools = _poolJoinDao.searchByIds(uniquePoolPair.first().toArray(new Long[0])); @@ -3243,6 +3301,99 @@ public class QueryManagerImpl extends MutualExclusiveIdsManagerBase implements Q return response; } + @Override + public ListResponse searchForStorageAccessGroups(ListStorageAccessGroupsCmd cmd) { + String name = cmd.getName(); + String keyword = cmd.getKeyword(); + Set storageAccessGroups = new HashSet<>(); + + addStorageAccessGroups(storageAccessGroups, storagePoolAndAccessGroupMapDao.listDistinctStorageAccessGroups(name, keyword)); + addStorageAccessGroups(storageAccessGroups, hostDao.listDistinctStorageAccessGroups(name, keyword)); + addStorageAccessGroups(storageAccessGroups, clusterDao.listDistinctStorageAccessGroups(name, keyword)); + addStorageAccessGroups(storageAccessGroups, podDao.listDistinctStorageAccessGroups(name, keyword)); + addStorageAccessGroups(storageAccessGroups, dataCenterDao.listDistinctStorageAccessGroups(name, keyword)); + + if (StringUtils.isNotEmpty(name) && storageAccessGroups.contains(name)) { + storageAccessGroups = Collections.singleton(name); + } + + if (StringUtils.isNotEmpty(keyword)) { + storageAccessGroups = storageAccessGroups.stream() + .filter(group -> group.contains(keyword)) + .collect(Collectors.toSet()); + } + + List responseList = buildStorageAccessGroupResponses(storageAccessGroups, name); + + ListResponse response = new ListResponse<>(); + response.setResponses(responseList, storageAccessGroups.size()); + return response; + } + + private void addStorageAccessGroups(Set storageAccessGroups, List groups) { + for (String group : groups) { + if (group != null && !group.isEmpty()) { + storageAccessGroups.addAll(Arrays.asList(group.split(","))); + } + } + } + + private List buildStorageAccessGroupResponses( + Set storageAccessGroups, String name) { + List responseList = new ArrayList<>(); + + for (String sag : storageAccessGroups) { + StorageAccessGroupResponse sagResponse = new StorageAccessGroupResponse(); + sagResponse.setName(sag); + sagResponse.setObjectName(ApiConstants.STORAGE_ACCESS_GROUP); + + if (StringUtils.isNotBlank(name)) { + fetchStorageAccessGroupResponse(sagResponse, name); + } + + responseList.add(sagResponse); + } + return responseList; + } + + private void fetchStorageAccessGroupResponse(StorageAccessGroupResponse sagResponse, String name) { + sagResponse.setHostResponseList(searchForServersWithMinimalResponse(new ListHostsCmd(name))); + sagResponse.setZoneResponseList(listDataCentersWithMinimalResponse(new ListZonesCmd(name))); + sagResponse.setPodResponseList(fetchPodsByStorageAccessGroup(name)); + sagResponse.setClusterResponseList(fetchClustersByStorageAccessGroup(name)); + sagResponse.setStoragePoolResponseList(searchForStoragePoolsWithMinimalResponse(new ListStoragePoolsCmd(name))); + } + + private ListResponse fetchPodsByStorageAccessGroup(String name) { + ListPodsByCmd listPodsByCmd = new ListPodsByCmd(name); + Pair, Integer> podResponsePair = managementService.searchForPods(listPodsByCmd); + List podResponses = podResponsePair.first().stream() + .map(pod -> { + PodResponse podResponse = responseGenerator.createMinimalPodResponse(pod); + podResponse.setObjectName("pod"); + return podResponse; + }).collect(Collectors.toList()); + + ListResponse podResponse = new ListResponse<>(); + podResponse.setResponses(podResponses, podResponsePair.second()); + return podResponse; + } + + private ListResponse fetchClustersByStorageAccessGroup(String name) { + ListClustersCmd listClustersCmd = new ListClustersCmd(name); + Pair, Integer> clusterResponsePair = managementService.searchForClusters(listClustersCmd); + List clusterResponses = clusterResponsePair.first().stream() + .map(cluster -> { + ClusterResponse clusterResponse = responseGenerator.createMinimalClusterResponse(cluster); + clusterResponse.setObjectName("cluster"); + return clusterResponse; + }).collect(Collectors.toList()); + + ListResponse clusterResponse = new ListResponse<>(); + clusterResponse.setResponses(clusterResponses, clusterResponsePair.second()); + return clusterResponse; + } + private Pair, Integer> searchForStorageTagsInternal(ListStorageTagsCmd cmd) { Filter searchFilter = new Filter(StoragePoolTagVO.class, "id", Boolean.TRUE, null, null); @@ -4309,6 +4460,20 @@ public class QueryManagerImpl extends MutualExclusiveIdsManagerBase implements Q return response; } + private ListResponse listDataCentersWithMinimalResponse(ListZonesCmd cmd) { + Pair, Integer> result = listDataCentersInternal(cmd); + ListResponse response = new ListResponse(); + + ResponseView respView = ResponseView.Restricted; + if (cmd instanceof ListZonesCmdByAdmin || CallContext.current().getCallingAccount().getType() == Account.Type.ADMIN) { + respView = ResponseView.Full; + } + + List dcResponses = ViewResponseHelper.createMinimalDataCenterResponse(respView, result.first().toArray(new DataCenterJoinVO[result.first().size()])); + response.setResponses(dcResponses, result.second()); + return response; + } + private Pair, Integer> listDataCentersInternal(ListZonesCmd cmd) { Account account = CallContext.current().getCallingAccount(); Long domainId = cmd.getDomainId(); @@ -4318,6 +4483,7 @@ public class QueryManagerImpl extends MutualExclusiveIdsManagerBase implements Q String name = cmd.getName(); String networkType = cmd.getNetworkType(); Map resourceTags = cmd.getTags(); + String storageAccessGroup = cmd.getStorageAccessGroup(); SearchBuilder sb = _dcJoinDao.createSearchBuilder(); if (resourceTags != null && !resourceTags.isEmpty()) { @@ -4331,6 +4497,13 @@ public class QueryManagerImpl extends MutualExclusiveIdsManagerBase implements Q sb.groupBy(sb.entity().getId()); sb.join("tagSearch", tagSearch, sb.entity().getId(), tagSearch.entity().getResourceId(), JoinBuilder.JoinType.INNER); } + if (storageAccessGroup != null) { + sb.and().op("storageAccessGroupExact", sb.entity().getStorageAccessGroups(), Op.EQ); + sb.or("storageAccessGroupPrefix", sb.entity().getStorageAccessGroups(), Op.LIKE); + sb.or("storageAccessGroupSuffix", sb.entity().getStorageAccessGroups(), Op.LIKE); + sb.or("storageAccessGroupMiddle", sb.entity().getStorageAccessGroups(), Op.LIKE); + sb.cp(); + } Filter searchFilter = new Filter(DataCenterJoinVO.class, "sortKey", SortKeyAscending.value(), cmd.getStartIndex(), cmd.getPageSizeVal()); searchFilter.addOrderBy(DataCenterJoinVO.class, "id", true); @@ -4492,6 +4665,13 @@ public class QueryManagerImpl extends MutualExclusiveIdsManagerBase implements Q } } + if (storageAccessGroup != null) { + sc.setParameters("storageAccessGroupExact", storageAccessGroup); + sc.setParameters("storageAccessGroupPrefix", storageAccessGroup + ",%"); + sc.setParameters("storageAccessGroupSuffix", "%," + storageAccessGroup); + sc.setParameters("storageAccessGroupMiddle", "%," + storageAccessGroup + ",%"); + } + return _dcJoinDao.searchAndCount(sc, searchFilter); } diff --git a/server/src/main/java/com/cloud/api/query/ViewResponseHelper.java b/server/src/main/java/com/cloud/api/query/ViewResponseHelper.java index bf5c4666984..2779e3a22e6 100644 --- a/server/src/main/java/com/cloud/api/query/ViewResponseHelper.java +++ b/server/src/main/java/com/cloud/api/query/ViewResponseHelper.java @@ -262,6 +262,15 @@ public class ViewResponseHelper { return new ArrayList(vrDataList.values()); } + public static List createMinimalHostResponse(HostJoinVO... hosts) { + LinkedHashMap vrDataList = new LinkedHashMap<>(); + for (HostJoinVO vr : hosts) { + HostResponse vrData = ApiDBUtils.newMinimalHostResponse(vr); + vrDataList.put(vr.getId(), vrData); + } + return new ArrayList(vrDataList.values()); + } + public static List createHostForMigrationResponse(EnumSet details, HostJoinVO... hosts) { LinkedHashMap vrDataList = new LinkedHashMap<>(); // Initialise the vrdatalist with the input data @@ -330,6 +339,18 @@ public class ViewResponseHelper { return new ArrayList(vrDataList.values()); } + public static List createMinimalStoragePoolResponse(StoragePoolJoinVO... pools) { + LinkedHashMap vrDataList = new LinkedHashMap<>(); + for (StoragePoolJoinVO vr : pools) { + StoragePoolResponse vrData = vrDataList.get(vr.getId()); + if (vrData == null) { + vrData = ApiDBUtils.newMinimalStoragePoolResponse(vr); + } + vrDataList.put(vr.getId(), vrData); + } + return new ArrayList(vrDataList.values()); + } + public static List createStorageTagResponse(StoragePoolTagVO... storageTags) { ArrayList list = new ArrayList(); @@ -596,12 +617,20 @@ public class ViewResponseHelper { public static List createDataCenterResponse(ResponseView view, Boolean showCapacities, Boolean showResourceImage, DataCenterJoinVO... dcs) { List respList = new ArrayList(); - for (DataCenterJoinVO vt : dcs){ + for (DataCenterJoinVO vt : dcs) { respList.add(ApiDBUtils.newDataCenterResponse(view, vt, showCapacities, showResourceImage)); } return respList; } + public static List createMinimalDataCenterResponse(ResponseView view, DataCenterJoinVO... dcs) { + List respList = new ArrayList(); + for (DataCenterJoinVO vt : dcs) { + respList.add(ApiDBUtils.newMinimalDataCenterResponse(view, vt)); + } + return respList; + } + public static List createTemplateResponse(EnumSet detailsView, ResponseView view, TemplateJoinVO... templates) { LinkedHashMap vrDataList = new LinkedHashMap<>(); for (TemplateJoinVO vr : templates) { diff --git a/server/src/main/java/com/cloud/api/query/dao/DataCenterJoinDao.java b/server/src/main/java/com/cloud/api/query/dao/DataCenterJoinDao.java index a53f86495e7..8e32cd01074 100644 --- a/server/src/main/java/com/cloud/api/query/dao/DataCenterJoinDao.java +++ b/server/src/main/java/com/cloud/api/query/dao/DataCenterJoinDao.java @@ -25,6 +25,8 @@ import com.cloud.utils.db.GenericDao; public interface DataCenterJoinDao extends GenericDao { + ZoneResponse newMinimalDataCenterResponse(ResponseView view, DataCenterJoinVO dataCenter); + ZoneResponse newDataCenterResponse(ResponseView view, DataCenterJoinVO dof, Boolean showCapacities, Boolean showResourceImage); DataCenterJoinVO newDataCenterView(DataCenter dof); diff --git a/server/src/main/java/com/cloud/api/query/dao/DataCenterJoinDaoImpl.java b/server/src/main/java/com/cloud/api/query/dao/DataCenterJoinDaoImpl.java index d457f8f7931..b311beeb0cc 100644 --- a/server/src/main/java/com/cloud/api/query/dao/DataCenterJoinDaoImpl.java +++ b/server/src/main/java/com/cloud/api/query/dao/DataCenterJoinDaoImpl.java @@ -74,6 +74,15 @@ public class DataCenterJoinDaoImpl extends GenericDaoBase { + HostResponse newMinimalHostResponse(HostJoinVO host); + HostResponse newHostResponse(HostJoinVO host, EnumSet details); HostForMigrationResponse newHostForMigrationResponse(HostJoinVO host, EnumSet details); diff --git a/server/src/main/java/com/cloud/api/query/dao/HostJoinDaoImpl.java b/server/src/main/java/com/cloud/api/query/dao/HostJoinDaoImpl.java index feee12dcb20..0b7ecf49509 100644 --- a/server/src/main/java/com/cloud/api/query/dao/HostJoinDaoImpl.java +++ b/server/src/main/java/com/cloud/api/query/dao/HostJoinDaoImpl.java @@ -221,6 +221,11 @@ public class HostJoinDaoImpl extends GenericDaoBase implements hostResponse.setArch(host.getArch().getType()); } + hostResponse.setStorageAccessGroups(host.getStorageAccessGroups()); + hostResponse.setClusterStorageAccessGroups(host.getClusterStorageAccessGroups()); + hostResponse.setPodStorageAccessGroups(host.getPodStorageAccessGroups()); + hostResponse.setZoneStorageAccessGroups(host.getZoneStorageAccessGroups()); + float cpuWithOverprovisioning = host.getCpus() * host.getSpeed() * cpuOverprovisioningFactor; hostResponse.setCpuAllocatedValue(cpu); String cpuAllocated = calculateResourceAllocatedPercentage(cpu, cpuWithOverprovisioning); @@ -308,6 +313,16 @@ public class HostJoinDaoImpl extends GenericDaoBase implements hostResponse.setObjectName("host"); } + @Override + public HostResponse newMinimalHostResponse(HostJoinVO host) { + HostResponse hostResponse = new HostResponse(); + hostResponse.setId(host.getUuid()); + hostResponse.setName(host.getName()); + hostResponse.setObjectName("host"); + + return hostResponse; + } + @Override public HostResponse newHostResponse(HostJoinVO host, EnumSet details) { HostResponse hostResponse = new HostResponse(); diff --git a/server/src/main/java/com/cloud/api/query/dao/StoragePoolJoinDao.java b/server/src/main/java/com/cloud/api/query/dao/StoragePoolJoinDao.java index 6e0b59492c0..bc19e089205 100644 --- a/server/src/main/java/com/cloud/api/query/dao/StoragePoolJoinDao.java +++ b/server/src/main/java/com/cloud/api/query/dao/StoragePoolJoinDao.java @@ -28,6 +28,8 @@ import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; public interface StoragePoolJoinDao extends GenericDao { + StoragePoolResponse newMinimalStoragePoolResponse(StoragePoolJoinVO pool); + StoragePoolResponse newStoragePoolResponse(StoragePoolJoinVO host, boolean customStats); StoragePoolResponse setStoragePoolResponse(StoragePoolResponse response, StoragePoolJoinVO host); diff --git a/server/src/main/java/com/cloud/api/query/dao/StoragePoolJoinDaoImpl.java b/server/src/main/java/com/cloud/api/query/dao/StoragePoolJoinDaoImpl.java index 89bfaf24766..ce38727e42e 100644 --- a/server/src/main/java/com/cloud/api/query/dao/StoragePoolJoinDaoImpl.java +++ b/server/src/main/java/com/cloud/api/query/dao/StoragePoolJoinDaoImpl.java @@ -17,6 +17,7 @@ package com.cloud.api.query.dao; import java.util.ArrayList; +import java.util.Arrays; import java.util.List; import javax.inject.Inject; @@ -102,6 +103,16 @@ public class StoragePoolJoinDaoImpl extends GenericDaoBase 0) { - response.setTags(response.getTags() + "," + tag); + if (response.getTags() != null && !response.getTags().isEmpty()) { + List tagsList = new ArrayList<>(Arrays.asList(response.getTags().split(","))); + if (!tagsList.contains(tag)) { + tagsList.add(tag); + } + response.setTags(String.join(",", tagsList)); } else { response.setTags(tag); } } + String storageAccessGroup = sp.getStorageAccessGroup(); + if (storageAccessGroup != null) { + if (response.getStorageAccessGroups() != null && !response.getStorageAccessGroups().isEmpty()) { + List groupList = new ArrayList<>(Arrays.asList(response.getStorageAccessGroups().split(","))); + if (!groupList.contains(storageAccessGroup)) { + groupList.add(storageAccessGroup); + } + response.setStorageAccessGroups(String.join(",", groupList)); + } else { + response.setStorageAccessGroups(storageAccessGroup); + } + } if (response.hasAnnotation() == null) { response.setHasAnnotation(annotationDao.hasAnnotations(sp.getUuid(), AnnotationService.EntityType.PRIMARY_STORAGE.name(), accountManager.isRootAdmin(CallContext.current().getCallingAccount().getId()))); @@ -251,6 +279,7 @@ public class StoragePoolJoinDaoImpl extends GenericDaoBase 0) { + response.setStorageAccessGroups(response.getStorageAccessGroups() + "," + storageAccessGroup); + } else { + response.setStorageAccessGroups(storageAccessGroup); + } + } return response; } diff --git a/server/src/main/java/com/cloud/api/query/vo/DataCenterJoinVO.java b/server/src/main/java/com/cloud/api/query/vo/DataCenterJoinVO.java index 23e8766e677..e04577e5eb6 100644 --- a/server/src/main/java/com/cloud/api/query/vo/DataCenterJoinVO.java +++ b/server/src/main/java/com/cloud/api/query/vo/DataCenterJoinVO.java @@ -125,6 +125,9 @@ public class DataCenterJoinVO extends BaseViewVO implements InternalIdentity, Id @Enumerated(value = EnumType.STRING) private DataCenter.Type type; + @Column(name = "storage_access_groups") + private String storageAccessGroups; + public DataCenterJoinVO() { } @@ -234,7 +237,11 @@ public class DataCenterJoinVO extends BaseViewVO implements InternalIdentity, Id return sortKey; } - public DataCenter.Type getType() { + public DataCenter.Type getType() { return type; } + + public String getStorageAccessGroups() { + return storageAccessGroups; + } } diff --git a/server/src/main/java/com/cloud/api/query/vo/HostJoinVO.java b/server/src/main/java/com/cloud/api/query/vo/HostJoinVO.java index 72918c3fa27..83cfcc8375f 100644 --- a/server/src/main/java/com/cloud/api/query/vo/HostJoinVO.java +++ b/server/src/main/java/com/cloud/api/query/vo/HostJoinVO.java @@ -185,6 +185,18 @@ public class HostJoinVO extends BaseViewVO implements InternalIdentity, Identity @Column(name = "is_tag_a_rule") private Boolean isTagARule; + @Column(name = "storage_access_groups") + private String storageAccessGroups; + + @Column(name = "cluster_storage_access_groups") + private String clusterStorageAccessGroups; + + @Column(name = "pod_storage_access_groups") + private String podStorageAccessGroups; + + @Column(name = "zone_storage_access_groups") + private String zoneStorageAccessGroups; + @Column(name = "memory_used_capacity") private long memUsedCapacity; @@ -417,6 +429,22 @@ public class HostJoinVO extends BaseViewVO implements InternalIdentity, Identity return isTagARule; } + public String getStorageAccessGroups() { + return storageAccessGroups; + } + + public String getClusterStorageAccessGroups() { + return clusterStorageAccessGroups; + } + + public String getPodStorageAccessGroups() { + return podStorageAccessGroups; + } + + public String getZoneStorageAccessGroups() { + return zoneStorageAccessGroups; + } + public String getAnnotation() { return annotation; } diff --git a/server/src/main/java/com/cloud/api/query/vo/StoragePoolJoinVO.java b/server/src/main/java/com/cloud/api/query/vo/StoragePoolJoinVO.java index 41a30fd40d3..0767e468f73 100644 --- a/server/src/main/java/com/cloud/api/query/vo/StoragePoolJoinVO.java +++ b/server/src/main/java/com/cloud/api/query/vo/StoragePoolJoinVO.java @@ -119,6 +119,9 @@ public class StoragePoolJoinVO extends BaseViewVO implements InternalIdentity, I @Column(name = "is_tag_a_rule") private boolean isTagARule; + @Column(name = "storage_access_group") + private String storageAccessGroup; + @Column(name = "disk_used_capacity") private long usedCapacity; @@ -271,6 +274,10 @@ public class StoragePoolJoinVO extends BaseViewVO implements InternalIdentity, I return usedCapacity; } + public String getStorageAccessGroup() { + return storageAccessGroup; + } + public long getReservedCapacity() { return reservedCapacity; } diff --git a/server/src/main/java/com/cloud/configuration/ConfigurationManagerImpl.java b/server/src/main/java/com/cloud/configuration/ConfigurationManagerImpl.java index 56a86e65da0..9fc7fc589e5 100644 --- a/server/src/main/java/com/cloud/configuration/ConfigurationManagerImpl.java +++ b/server/src/main/java/com/cloud/configuration/ConfigurationManagerImpl.java @@ -50,6 +50,7 @@ import java.util.stream.Collectors; import javax.inject.Inject; import javax.naming.ConfigurationException; +import com.cloud.resource.ResourceManager; import org.apache.cloudstack.acl.SecurityChecker; import org.apache.cloudstack.affinity.AffinityGroup; import org.apache.cloudstack.affinity.AffinityGroupService; @@ -473,6 +474,8 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati Ipv6Service ipv6Service; @Inject NsxProviderDao nsxProviderDao; + @Inject + ResourceManager resourceManager; // FIXME - why don't we have interface for DataCenterLinkLocalIpAddressDao? @Inject @@ -2430,7 +2433,7 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati @Override @ActionEvent(eventType = EventTypes.EVENT_POD_CREATE, eventDescription = "creating pod", async = false) - public Pod createPod(final long zoneId, final String name, final String startIp, final String endIp, final String gateway, final String netmask, String allocationState) { + public Pod createPod(final long zoneId, final String name, final String startIp, final String endIp, final String gateway, final String netmask, String allocationState, List storageAccessGroups) { final DataCenterVO zone = _zoneDao.findById(zoneId); if (zone == null) { throw new InvalidParameterValueException("Please specify a valid zone."); @@ -2456,13 +2459,13 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati if (allocationState == null) { allocationState = Grouping.AllocationState.Enabled.toString(); } - return createPod(userId.longValue(), name, zone, gateway, cidr, startIp, endIp, allocationState, false); + return createPod(userId.longValue(), name, zone, gateway, cidr, startIp, endIp, allocationState, false, storageAccessGroups); } @Override @DB public HostPodVO createPod(final long userId, final String podName, final DataCenter zone, final String gateway, final String cidr, String startIp, String endIp, final String allocationStateStr, - final boolean skipGatewayOverlapCheck) { + final boolean skipGatewayOverlapCheck, List storageAccessGroups) { final String cidrAddress = DataCenter.Type.Edge.equals(zone.getType()) ? "" : getCidrAddress(cidr); final int cidrSize = DataCenter.Type.Edge.equals(zone.getType()) ? 0 : getCidrSize(cidr); if (DataCenter.Type.Edge.equals(zone.getType())) { @@ -2495,6 +2498,10 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati podFinal.setAllocationState(allocationState); } + if (CollectionUtils.isNotEmpty(storageAccessGroups)) { + podFinal.setStorageAccessGroups(String.join(",", storageAccessGroups)); + } + final String startIpFinal = startIp; final String endIpFinal = endIp; HostPodVO hostPodVO = Transaction.execute((TransactionCallback) status -> { @@ -2955,8 +2962,8 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati @Override @DB public DataCenterVO createZone(final long userId, final String zoneName, final String dns1, final String dns2, final String internalDns1, final String internalDns2, final String guestCidr, final String domain, - final Long domainId, final NetworkType zoneType, final String allocationStateStr, final String networkDomain, final boolean isSecurityGroupEnabled, final boolean isLocalStorageEnabled, - final String ip6Dns1, final String ip6Dns2, final boolean isEdge) { + final Long domainId, final NetworkType zoneType, final String allocationStateStr, final String networkDomain, final boolean isSecurityGroupEnabled, final boolean isLocalStorageEnabled, + final String ip6Dns1, final String ip6Dns2, final boolean isEdge, List storageAccessGroups) { // checking the following params outside checkzoneparams method as we do // not use these params for updatezone @@ -2991,6 +2998,9 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati zoneFinal.setAllocationState(Grouping.AllocationState.Disabled); } zoneFinal.setType(isEdge ? DataCenter.Type.Edge : DataCenter.Type.Core); + if (CollectionUtils.isNotEmpty(storageAccessGroups)) { + zoneFinal.setStorageAccessGroups(String.join(",", storageAccessGroups)); + } return Transaction.execute(new TransactionCallback() { @Override @@ -3102,6 +3112,7 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati boolean isSecurityGroupEnabled = cmd.getSecuritygroupenabled(); final boolean isLocalStorageEnabled = cmd.getLocalStorageEnabled(); final boolean isEdge = cmd.isEdge(); + final List storageAccessGroups = cmd.getStorageAccessGroups(); if (allocationState == null) { allocationState = Grouping.AllocationState.Disabled.toString(); @@ -3135,7 +3146,7 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati } return createZone(userId, zoneName, dns1, dns2, internalDns1, internalDns2, guestCidr, domainVO != null ? domainVO.getName() : null, domainId, zoneType, allocationState, - networkDomain, isSecurityGroupEnabled, isLocalStorageEnabled, ip6Dns1, ip6Dns2, isEdge); + networkDomain, isSecurityGroupEnabled, isLocalStorageEnabled, ip6Dns1, ip6Dns2, isEdge, storageAccessGroups); } @Override diff --git a/server/src/main/java/com/cloud/deploy/DeploymentPlanningManagerImpl.java b/server/src/main/java/com/cloud/deploy/DeploymentPlanningManagerImpl.java index e7b926eb4e4..feb7e66159d 100644 --- a/server/src/main/java/com/cloud/deploy/DeploymentPlanningManagerImpl.java +++ b/server/src/main/java/com/cloud/deploy/DeploymentPlanningManagerImpl.java @@ -1411,7 +1411,7 @@ StateListener, Configurable { if (vmRequiresSharedStorage) { // check shared pools - List allPoolsInCluster = _storagePoolDao.findPoolsByTags(clusterVO.getDataCenterId(), clusterVO.getPodId(), clusterVO.getId(), null, false, 0); + List allPoolsInCluster = _storagePoolDao.findPoolsByTags(clusterVO.getDataCenterId(), clusterVO.getPodId(), clusterVO.getId(), ScopeType.CLUSTER, null, false, 0); for (StoragePoolVO pool : allPoolsInCluster) { if (!allocatorAvoidOutput.shouldAvoid(pool)) { // there's some pool in the cluster that is not yet in avoid set @@ -1658,6 +1658,13 @@ StateListener, Configurable { } protected boolean hostCanAccessSPool(Host host, StoragePool pool) { + if (!_storageMgr.checkIfHostAndStoragePoolHasCommonStorageAccessGroups(host, pool)) { + if (logger.isDebugEnabled()) { + logger.debug(String.format("StoragePool %s and host %s does not have matching storage access groups", pool, host)); + } + return false; + } + boolean hostCanAccessSPool = false; StoragePoolHostVO hostPoolLinkage = _poolHostDao.findByPoolHost(pool.getId(), host.getId()); diff --git a/server/src/main/java/com/cloud/resource/ResourceManagerImpl.java b/server/src/main/java/com/cloud/resource/ResourceManagerImpl.java index 7c997cc49bc..5c8b09e28ab 100755 --- a/server/src/main/java/com/cloud/resource/ResourceManagerImpl.java +++ b/server/src/main/java/com/cloud/resource/ResourceManagerImpl.java @@ -32,11 +32,17 @@ import java.util.List; import java.util.Locale; import java.util.Map; import java.util.Random; +import java.util.Set; import java.util.stream.Collectors; import javax.inject.Inject; import javax.naming.ConfigurationException; +import com.cloud.storage.ScopeType; +import com.cloud.storage.StoragePoolAndAccessGroupMapVO; +import com.cloud.storage.dao.StoragePoolAndAccessGroupMapDao; +import com.cloud.storage.dao.StoragePoolTagsDao; +import com.cloud.utils.StringUtils; import org.apache.cloudstack.alert.AlertService; import org.apache.cloudstack.annotation.AnnotationService; import org.apache.cloudstack.annotation.dao.AnnotationDao; @@ -54,6 +60,8 @@ import org.apache.cloudstack.api.command.admin.host.ReconnectHostCmd; import org.apache.cloudstack.api.command.admin.host.UpdateHostCmd; import org.apache.cloudstack.api.command.admin.host.UpdateHostPasswordCmd; import org.apache.cloudstack.context.CallContext; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; +import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreInfo; import org.apache.cloudstack.framework.config.ConfigKey; import org.apache.cloudstack.framework.config.dao.ConfigurationDao; import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; @@ -61,6 +69,8 @@ import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; import org.apache.cloudstack.utils.identity.ManagementServerNode; import org.apache.commons.collections.CollectionUtils; import org.apache.commons.lang.ObjectUtils; +import org.apache.commons.lang3.ArrayUtils; + import org.springframework.stereotype.Component; import com.cloud.agent.AgentManager; @@ -172,7 +182,6 @@ import com.cloud.storage.dao.VMTemplateDao; import com.cloud.storage.dao.VolumeDao; import com.cloud.user.Account; import com.cloud.user.AccountManager; -import com.cloud.utils.StringUtils; import com.cloud.utils.Ternary; import com.cloud.utils.UriUtils; import com.cloud.utils.component.Manager; @@ -250,6 +259,10 @@ public class ResourceManagerImpl extends ManagerBase implements ResourceManager, @Inject private PrimaryDataStoreDao _storagePoolDao; @Inject + private StoragePoolTagsDao _storagePoolTagsDao; + @Inject + private StoragePoolAndAccessGroupMapDao _storagePoolAccessGroupMapDao; + @Inject private DataCenterIpAddressDao _privateIPAddressDao; @Inject private IPAddressDao _publicIPAddressDao; @@ -513,6 +526,11 @@ public class ResourceManagerImpl extends ManagerBase implements ResourceManager, cluster.setClusterType(clusterType); cluster.setAllocationState(allocationState); cluster.setArch(arch.getType()); + List storageAccessGroups = cmd.getStorageAccessGroups(); + if (CollectionUtils.isNotEmpty(storageAccessGroups)) { + cluster.setStorageAccessGroups(String.join(",", storageAccessGroups)); + } + try { cluster = _clusterDao.persist(cluster); } catch (final Exception e) { @@ -572,7 +590,7 @@ public class ResourceManagerImpl extends ManagerBase implements ResourceManager, for (final Map.Entry> entry : resources.entrySet()) { final ServerResource resource = entry.getKey(); - final HostVO host = (HostVO)createHostAndAgent(resource, entry.getValue(), true, null, false); + final HostVO host = (HostVO)createHostAndAgent(resource, entry.getValue(), true, null, null, false); if (host != null) { hosts.add(host); } @@ -614,6 +632,7 @@ public class ResourceManagerImpl extends ManagerBase implements ResourceManager, final String username = cmd.getUsername(); final String password = cmd.getPassword(); final List hostTags = cmd.getHostTags(); + final List storageAccessGroups = cmd.getStorageAccessGroups(); dcId = _accountMgr.checkAccessAndSpecifyAuthority(CallContext.current().getCallingAccount(), dcId); @@ -643,18 +662,18 @@ public class ResourceManagerImpl extends ManagerBase implements ResourceManager, String hypervisorType = cmd.getHypervisor().equalsIgnoreCase(HypervisorGuru.HypervisorCustomDisplayName.value()) ? "Custom" : cmd.getHypervisor(); - return discoverHostsFull(dcId, podId, clusterId, clusterName, url, username, password, hypervisorType, hostTags, cmd.getFullUrlParams(), false); + return discoverHostsFull(dcId, podId, clusterId, clusterName, url, username, password, hypervisorType, hostTags, storageAccessGroups, cmd.getFullUrlParams(), false); } @Override public List discoverHosts(final AddSecondaryStorageCmd cmd) throws IllegalArgumentException, DiscoveryException, InvalidParameterValueException { final Long dcId = cmd.getZoneId(); final String url = cmd.getUrl(); - return discoverHostsFull(dcId, null, null, null, url, null, null, "SecondaryStorage", null, null, false); + return discoverHostsFull(dcId, null, null, null, url, null, null, "SecondaryStorage", null, null, null, false); } private List discoverHostsFull(final Long dcId, final Long podId, Long clusterId, final String clusterName, String url, String username, String password, - final String hypervisorType, final List hostTags, final Map params, final boolean deferAgentCreation) throws IllegalArgumentException, DiscoveryException, + final String hypervisorType, final List hostTags, List storageAccessGroups, final Map params, final boolean deferAgentCreation) throws IllegalArgumentException, DiscoveryException, InvalidParameterValueException { URI uri; @@ -860,9 +879,9 @@ public class ResourceManagerImpl extends ManagerBase implements ResourceManager, HostVO host; if (deferAgentCreation) { - host = (HostVO)createHostAndAgentDeferred(resource, entry.getValue(), true, hostTags, false); + host = (HostVO)createHostAndAgentDeferred(resource, entry.getValue(), true, hostTags, storageAccessGroups, false); } else { - host = (HostVO)createHostAndAgent(resource, entry.getValue(), true, hostTags, false); + host = (HostVO)createHostAndAgent(resource, entry.getValue(), true, hostTags, storageAccessGroups, false); } if (host != null) { hosts.add(host); @@ -1270,7 +1289,7 @@ public class ResourceManagerImpl extends ManagerBase implements ResourceManager, } - return cluster; + return _clusterDao.findById(cluster.getId()); } @Override @@ -1917,6 +1936,741 @@ public class ResourceManagerImpl extends ManagerBase implements ResourceManager, } } + private void removeStorageAccessGroupsOnPodsInZone(long zoneId, List newStoragePoolTags, List tagsToDeleteOnZone) { + List pods = _podDao.listByDataCenterId(zoneId); + for (HostPodVO pod : pods) { + removeStorageAccessGroupsOnClustersInPod(pod.getId(), newStoragePoolTags, tagsToDeleteOnZone); + updateStorageAccessGroupsToBeAddedOnPodInZone(pod.getId(), newStoragePoolTags); + } + } + + private void removeStorageAccessGroupsOnClustersInPod(long podId, List newStoragePoolTags, List tagsToDeleteOnPod) { + List clusters = _clusterDao.listByPodId(podId); + for (ClusterVO cluster : clusters) { + updateStorageAccessGroupsToBeDeletedOnHostsInCluster(cluster.getId(), tagsToDeleteOnPod); + updateStorageAccessGroupsToBeAddedOnHostsInCluster(cluster.getId(), newStoragePoolTags); + updateStorageAccessGroupsToBeAddedOnClustersInPod(cluster.getId(), newStoragePoolTags); + } + } + + private void updateStorageAccessGroupsToBeDeletedOnHostsInCluster(long clusterId, List storageAccessGroupsToDeleteOnCluster) { + if (CollectionUtils.isEmpty(storageAccessGroupsToDeleteOnCluster)) { + return; + } + + List hosts = _hostDao.findByClusterId(clusterId); + List hostIdsUsingStorageAccessGroups = listOfHostIdsUsingTheStorageAccessGroups(storageAccessGroupsToDeleteOnCluster, clusterId, null, null); + for (HostVO host : hosts) { + String hostStorageAccessGroups = host.getStorageAccessGroups(); + if (hostIdsUsingStorageAccessGroups != null && hostIdsUsingStorageAccessGroups.contains(host.getId())) { + Set mergedSet = hostStorageAccessGroups != null + ? new HashSet<>(Arrays.asList(hostStorageAccessGroups.split(","))) + : new HashSet<>(); + mergedSet.addAll(storageAccessGroupsToDeleteOnCluster); + host.setStorageAccessGroups(String.join(",", mergedSet)); + _hostDao.update(host.getId(), host); + } else { + if (hostStorageAccessGroups != null) { + List hostTagsList = new ArrayList<>(Arrays.asList(hostStorageAccessGroups.split(","))); + hostTagsList.removeAll(storageAccessGroupsToDeleteOnCluster); + String updatedClusterStoragePoolTags = hostTagsList.isEmpty() ? null : String.join(",", hostTagsList); + host.setStorageAccessGroups(updatedClusterStoragePoolTags); + _hostDao.update(host.getId(), host); + } + } + } + } + + private void updateStorageAccessGroupsToBeAddedOnHostsInCluster(long clusterId, List tagsAddedOnCluster) { + if (CollectionUtils.isEmpty(tagsAddedOnCluster)) { + return; + } + + List hosts = _hostDao.findByClusterId(clusterId); + for (HostVO host : hosts) { + String hostStoragePoolTags = host.getStorageAccessGroups(); + Set hostStoragePoolTagsSet = hostStoragePoolTags != null + ? new HashSet<>(Arrays.asList(hostStoragePoolTags.split(","))) + : new HashSet<>(); + + hostStoragePoolTagsSet.removeIf(tagsAddedOnCluster::contains); + host.setStorageAccessGroups(hostStoragePoolTagsSet.isEmpty() ? null : String.join(",", hostStoragePoolTagsSet)); + _hostDao.update(host.getId(), host); + } + } + + private void updateStorageAccessGroupsToBeAddedOnClustersInPod(long clusterId, List tagsAddedOnPod) { + if (CollectionUtils.isEmpty(tagsAddedOnPod)) { + return; + } + + ClusterVO cluster = _clusterDao.findById(clusterId); + String clusterStoragePoolTags = cluster.getStorageAccessGroups(); + if (clusterStoragePoolTags != null) { + List clusterTagsList = new ArrayList<>(Arrays.asList(clusterStoragePoolTags.split(","))); + clusterTagsList.removeAll(tagsAddedOnPod); + String updatedClusterStoragePoolTags = clusterTagsList.isEmpty() ? null : String.join(",", clusterTagsList); + cluster.setStorageAccessGroups(updatedClusterStoragePoolTags); + _clusterDao.update(cluster.getId(), cluster); + } + } + + private void updateStorageAccessGroupsToBeAddedOnPodInZone(long podId, List tagsAddedOnZone) { + if (CollectionUtils.isEmpty(tagsAddedOnZone)) { + return; + } + + HostPodVO pod = _podDao.findById(podId); + String podStoragePoolTags = pod.getStorageAccessGroups(); + if (podStoragePoolTags != null) { + List podTagsList = new ArrayList<>(Arrays.asList(podStoragePoolTags.split(","))); + podTagsList.removeAll(tagsAddedOnZone); + String updatedClusterStoragePoolTags = podTagsList.isEmpty() ? null : String.join(",", podTagsList); + pod.setStorageAccessGroups(updatedClusterStoragePoolTags); + _podDao.update(pod.getId(), pod); + } + } + + public List listOfHostIdsUsingTheStorageAccessGroups(List storageAccessGroups, Long clusterId, Long podId, Long datacenterId) { + GenericSearchBuilder vmInstanceSearch = _vmDao.createSearchBuilder(Long.class); + vmInstanceSearch.select(null, Func.DISTINCT, vmInstanceSearch.entity().getHostId()); + vmInstanceSearch.and("hostId", vmInstanceSearch.entity().getHostId(), Op.NNULL); + vmInstanceSearch.and("removed", vmInstanceSearch.entity().getRemoved(), Op.NULL); + + GenericSearchBuilder volumeSearch = volumeDao.createSearchBuilder(Long.class); + volumeSearch.selectFields(volumeSearch.entity().getInstanceId()); + volumeSearch.and("state", volumeSearch.entity().getState(), Op.NIN); + + GenericSearchBuilder storagePoolSearch = _storagePoolDao.createSearchBuilder(Long.class); + storagePoolSearch.and("clusterId", storagePoolSearch.entity().getClusterId(), Op.EQ); + storagePoolSearch.and("podId", storagePoolSearch.entity().getPodId(), Op.EQ); + storagePoolSearch.and("datacenterId", storagePoolSearch.entity().getDataCenterId(), Op.EQ); + storagePoolSearch.selectFields(storagePoolSearch.entity().getId()); + + GenericSearchBuilder storageAccessGroupSearch = _storagePoolAccessGroupMapDao.createSearchBuilder(Long.class); + storageAccessGroupSearch.and("sag", storageAccessGroupSearch.entity().getStorageAccessGroup(), Op.IN); + + storagePoolSearch.join("storageAccessGroupSearch", storageAccessGroupSearch, storagePoolSearch.entity().getId(), storageAccessGroupSearch.entity().getPoolId(), JoinBuilder.JoinType.INNER); + storageAccessGroupSearch.done(); + + volumeSearch.join("storagePoolSearch", storagePoolSearch, volumeSearch.entity().getPoolId(), storagePoolSearch.entity().getId(), JoinBuilder.JoinType.INNER); + storagePoolSearch.done(); + + vmInstanceSearch.join("volumeSearch", volumeSearch, vmInstanceSearch.entity().getId(), volumeSearch.entity().getInstanceId(), JoinBuilder.JoinType.INNER); + volumeSearch.done(); + + vmInstanceSearch.done(); + + SearchCriteria sc = vmInstanceSearch.create(); + sc.setJoinParameters("storageAccessGroupSearch", "sag", storageAccessGroups.toArray()); + sc.setJoinParameters("volumeSearch", "state", new String[]{"Destroy", "Error", "Expunging", "Expunged"}); + if (clusterId != null) { + sc.setParameters("storagePoolSearch", "clusterId", clusterId); + } + if (podId != null) { + sc.setParameters("storagePoolSearch", "podId", podId); + } + if (datacenterId != null) { + sc.setParameters("storagePoolSearch", "datacenterId", datacenterId); + } + + return _vmDao.customSearch(sc, null); + } + + public List listOfHostIdsUsingTheStoragePool(Long storagePoolId) { + GenericSearchBuilder vmInstanceSearch = _vmDao.createSearchBuilder(Long.class); + vmInstanceSearch.select(null, Func.DISTINCT, vmInstanceSearch.entity().getHostId()); + vmInstanceSearch.and("hostId", vmInstanceSearch.entity().getHostId(), Op.NNULL); + vmInstanceSearch.and("removed", vmInstanceSearch.entity().getRemoved(), Op.NULL); + + GenericSearchBuilder volumeSearch = volumeDao.createSearchBuilder(Long.class); + volumeSearch.selectFields(volumeSearch.entity().getInstanceId()); + volumeSearch.and("state", volumeSearch.entity().getState(), Op.NIN); + + GenericSearchBuilder storagePoolSearch = _storagePoolDao.createSearchBuilder(Long.class); + storagePoolSearch.selectFields(storagePoolSearch.entity().getId()); + storagePoolSearch.and("poolId", storagePoolSearch.entity().getId(), Op.EQ); + + volumeSearch.join("storagePoolSearch", storagePoolSearch, volumeSearch.entity().getPoolId(), storagePoolSearch.entity().getId(), JoinBuilder.JoinType.INNER); + storagePoolSearch.done(); + + vmInstanceSearch.join("volumeSearch", volumeSearch, vmInstanceSearch.entity().getId(), volumeSearch.entity().getInstanceId(), JoinBuilder.JoinType.INNER); + volumeSearch.done(); + + vmInstanceSearch.done(); + + SearchCriteria sc = vmInstanceSearch.create(); + sc.setJoinParameters("storagePoolSearch", "poolId", storagePoolId); + sc.setJoinParameters("volumeSearch", "state", new String[]{"Destroy", "Error", "Expunging", "Expunged"}); + + return _vmDao.customSearch(sc, null); + } + + public List listOfVolumesUsingTheStorageAccessGroups(List storageAccessGroups, Long hostId, Long clusterId, Long podId, Long datacenterId) { + SearchBuilder volumeSearch = volumeDao.createSearchBuilder(); + volumeSearch.and("state", volumeSearch.entity().getState(), Op.NIN); + + GenericSearchBuilder vmInstanceSearch = _vmDao.createSearchBuilder(Long.class); + vmInstanceSearch.selectFields(vmInstanceSearch.entity().getId()); + vmInstanceSearch.and("hostId", vmInstanceSearch.entity().getHostId(), Op.EQ); + vmInstanceSearch.and("removed", vmInstanceSearch.entity().getRemoved(), Op.NULL); + + GenericSearchBuilder storagePoolSearch = _storagePoolDao.createSearchBuilder(Long.class); + storagePoolSearch.and("clusterId", storagePoolSearch.entity().getClusterId(), Op.EQ); + storagePoolSearch.and("podId", storagePoolSearch.entity().getPodId(), Op.EQ); + storagePoolSearch.and("datacenterId", storagePoolSearch.entity().getDataCenterId(), Op.EQ); + storagePoolSearch.selectFields(storagePoolSearch.entity().getId()); + + GenericSearchBuilder storageAccessGroupSearch = _storagePoolAccessGroupMapDao.createSearchBuilder(Long.class); + storageAccessGroupSearch.and("sag", storageAccessGroupSearch.entity().getStorageAccessGroup(), Op.IN); + + storagePoolSearch.join("storageAccessGroupSearch", storageAccessGroupSearch, storagePoolSearch.entity().getId(), storageAccessGroupSearch.entity().getPoolId(), JoinBuilder.JoinType.INNER); + + volumeSearch.join("storagePoolSearch", storagePoolSearch, volumeSearch.entity().getPoolId(), storagePoolSearch.entity().getId(), JoinBuilder.JoinType.INNER); + + volumeSearch.join("vmInstanceSearch", vmInstanceSearch, volumeSearch.entity().getInstanceId(), vmInstanceSearch.entity().getId(), JoinBuilder.JoinType.INNER); + + storageAccessGroupSearch.done(); + storagePoolSearch.done(); + vmInstanceSearch.done(); + volumeSearch.done(); + + SearchCriteria sc = volumeSearch.create(); + sc.setParameters( "state", new String[]{"Destroy", "Error", "Expunging", "Expunged"}); + sc.setJoinParameters("storageAccessGroupSearch", "sag", storageAccessGroups.toArray()); + if (hostId != null) { + sc.setJoinParameters("vmInstanceSearch", "hostId", hostId); + } + if (clusterId != null) { + sc.setJoinParameters("storagePoolSearch", "clusterId", clusterId); + } + if (podId != null) { + sc.setJoinParameters("storagePoolSearch", "podId", podId); + } + if (datacenterId != null) { + sc.setJoinParameters("storagePoolSearch", "datacenterId", datacenterId); + } + + return volumeDao.customSearch(sc, null); + } + + private List listOfStoragePoolIDsUsedByHost(long hostId) { + GenericSearchBuilder vmInstanceSearch = _vmDao.createSearchBuilder(Long.class); + vmInstanceSearch.selectFields(vmInstanceSearch.entity().getId()); + vmInstanceSearch.and("hostId", vmInstanceSearch.entity().getHostId(), Op.EQ); + + GenericSearchBuilder volumeSearch = volumeDao.createSearchBuilder(Long.class); + volumeSearch.selectFields(volumeSearch.entity().getPoolId()); + volumeSearch.and("state", volumeSearch.entity().getState(), Op.EQ); + + volumeSearch.join("vmInstanceSearch", vmInstanceSearch, volumeSearch.entity().getInstanceId(), vmInstanceSearch.entity().getId(), JoinBuilder.JoinType.INNER); + vmInstanceSearch.done(); + + GenericSearchBuilder storagePoolSearch = _storagePoolDao.createSearchBuilder(Long.class); + storagePoolSearch.select(null, Func.DISTINCT, storagePoolSearch.entity().getId()); + + storagePoolSearch.join("volumeSearch", volumeSearch, storagePoolSearch.entity().getId(), volumeSearch.entity().getPoolId(), JoinBuilder.JoinType.INNER); + volumeSearch.done(); + + storagePoolSearch.done(); + + SearchCriteria sc = storagePoolSearch.create(); + sc.setJoinParameters("vmInstanceSearch", "hostId", hostId); + sc.setJoinParameters("volumeSearch", "state", "Ready"); + + List storagePoolsInUse = _storagePoolDao.customSearch(sc, null); + return storagePoolsInUse; + } + + @Override + public void updateStoragePoolConnectionsOnHosts(Long poolId, List storageAccessGroups) { + StoragePoolVO storagePool = _storagePoolDao.findById(poolId); + List hosts = new ArrayList<>(); + + if (storagePool.getScope().equals(ScopeType.CLUSTER)) { + List hostsInCluster = listAllUpHosts(Host.Type.Routing, storagePool.getClusterId(), storagePool.getPodId(), storagePool.getDataCenterId()); + hosts.addAll(hostsInCluster); + } else if (storagePool.getScope().equals(ScopeType.ZONE)) { + List hostsInZone = listAllUpHosts(Host.Type.Routing, null, null, storagePool.getDataCenterId()); + hosts.addAll(hostsInZone); + } + + List hostsToConnect = new ArrayList<>(); + List hostsToDisconnect = new ArrayList<>(); + boolean storagePoolHasAccessGroups = CollectionUtils.isNotEmpty(storageAccessGroups); + + for (HostVO host : hosts) { + String[] storageAccessGroupsOnHost = _storageMgr.getStorageAccessGroups(null, null, null, host.getId()); + List listOfStorageAccessGroupsOnHost = Arrays.asList(storageAccessGroupsOnHost); + StoragePoolHostVO hostPoolRecord = _storagePoolHostDao.findByPoolHost(storagePool.getId(), host.getId()); + + if (storagePoolHasAccessGroups) { + List intersection = new ArrayList<>(listOfStorageAccessGroupsOnHost); + intersection.retainAll(storageAccessGroups); + if (CollectionUtils.isNotEmpty(intersection)) { + if (hostPoolRecord == null) { + hostsToConnect.add(host); + } + } else { + hostsToDisconnect.add(host); + } + } else { + if (hostPoolRecord == null) { + hostsToConnect.add(host); + } + } + } + + if (CollectionUtils.isNotEmpty(hostsToDisconnect)) { + List hostIdsUsingTheStoragePool = listOfHostIdsUsingTheStoragePool(poolId); + List hostIdsToDisconnect = hostsToDisconnect.stream() + .map(HostVO::getId) + .collect(Collectors.toList()); + List conflictingHostIds = new ArrayList<>(CollectionUtils.intersection(hostIdsToDisconnect, hostIdsUsingTheStoragePool)); + if (CollectionUtils.isNotEmpty(conflictingHostIds)) { + Map> hostVolumeMap = new HashMap<>(); + List volumesInPool = volumeDao.findByPoolId(poolId); + Map vmInstanceCache = new HashMap<>(); + + for (Long hostId : conflictingHostIds) { + HostVO host = _hostDao.findById(hostId); + List matchingVolumes = volumesInPool.stream() + .filter(volume -> { + Long vmId = volume.getInstanceId(); + if (vmId == null) return false; + + VMInstanceVO vmInstance = vmInstanceCache.computeIfAbsent(vmId, _vmDao::findById); + return vmInstance != null && hostId.equals(vmInstance.getHostId()); + }) + .collect(Collectors.toList()); + if (!matchingVolumes.isEmpty()) { + hostVolumeMap.put(host, matchingVolumes); + } + } + + logger.error(String.format("Conflict detected: Hosts using the storage pool that need to be disconnected or " + + "connected to the pool: Host IDs and volumes: %s", hostVolumeMap)); + throw new CloudRuntimeException("Storage access groups cannot be updated as they are currently in use by some hosts. Please check the logs."); + } + } + + if (!hostsToConnect.isEmpty()) { + for (HostVO host : hostsToConnect) { + logger.debug(String.format("Connecting [%s] to [%s]", host, storagePool)); + connectHostToStoragePool(host, storagePool); + } + } + + if (!hostsToDisconnect.isEmpty()) { + for (HostVO host : hostsToDisconnect) { + logger.debug(String.format("Disconnecting [%s] from [%s]", host, storagePool)); + disconnectHostFromStoragePool(host, storagePool); + } + } + } + + protected List filterHostsBasedOnStorageAccessGroups(List allHosts, List storageAccessGroups) { + List hostsToConnect = new ArrayList<>(); + for (HostVO host : allHosts) { + String[] storageAccessGroupsOnHost = _storageMgr.getStorageAccessGroups(null, null, null, host.getId()); + List listOfStorageAccessGroupsOnHost = Arrays.asList(storageAccessGroupsOnHost); + if (CollectionUtils.isNotEmpty(storageAccessGroups)) { + List intersection = new ArrayList<>(listOfStorageAccessGroupsOnHost); + intersection.retainAll(storageAccessGroups); + if (CollectionUtils.isNotEmpty(intersection)) { + hostsToConnect.add(host); + } + } else { + hostsToConnect.add(host); + } + } + return hostsToConnect; + } + + @Override + public List getEligibleUpHostsInClusterForStorageConnection(PrimaryDataStoreInfo primaryStore) { + List allHosts = listAllUpHosts(Host.Type.Routing, primaryStore.getClusterId(), primaryStore.getPodId(), primaryStore.getDataCenterId()); + if (CollectionUtils.isEmpty(allHosts)) { + _storagePoolDao.expunge(primaryStore.getId()); + throw new CloudRuntimeException("No host up to associate a storage pool with in cluster " + primaryStore.getClusterId()); + } + + List storageAccessGroups = _storagePoolAccessGroupMapDao.getStorageAccessGroups(primaryStore.getId()); + return filterHostsBasedOnStorageAccessGroups(allHosts, storageAccessGroups); + } + + @Override + public List getEligibleUpAndEnabledHostsInClusterForStorageConnection(PrimaryDataStoreInfo primaryStore) { + List allHosts = listAllUpAndEnabledHosts(Host.Type.Routing, primaryStore.getClusterId(), primaryStore.getPodId(), primaryStore.getDataCenterId()); + if (CollectionUtils.isEmpty(allHosts)) { + _storagePoolDao.expunge(primaryStore.getId()); + throw new CloudRuntimeException("No host up to associate a storage pool with in cluster " + primaryStore.getClusterId()); + } + + List storageAccessGroups = _storagePoolAccessGroupMapDao.getStorageAccessGroups(primaryStore.getId()); + return filterHostsBasedOnStorageAccessGroups(allHosts, storageAccessGroups); + } + + @Override + public List getEligibleUpAndEnabledHostsInZoneForStorageConnection(DataStore dataStore, long zoneId, HypervisorType hypervisorType) { + List allHosts = listAllUpAndEnabledHostsInOneZoneByHypervisor(hypervisorType, zoneId); + + List storageAccessGroups = _storagePoolAccessGroupMapDao.getStorageAccessGroups(dataStore.getId()); + return filterHostsBasedOnStorageAccessGroups(allHosts, storageAccessGroups); + } + + protected void checkIfAllHostsInUse(List sagsToDelete, Long clusterId, Long podId, Long zoneId) { + if (CollectionUtils.isEmpty(sagsToDelete)) { + return; + } + + List hostIdsUsingStorageAccessGroups = listOfHostIdsUsingTheStorageAccessGroups(sagsToDelete, clusterId, podId, zoneId); + + // Check for zone level hosts + if (zoneId != null) { + List hostsInZone = _hostDao.findByDataCenterId(zoneId); + Set hostIdsInUseSet = hostIdsUsingStorageAccessGroups.stream().collect(Collectors.toSet()); + + boolean allInUseZone = hostsInZone.stream() + .map(HostVO::getId) + .allMatch(hostIdsInUseSet::contains); + + if (allInUseZone) { + throw new CloudRuntimeException("All hosts in the zone are using the storage access groups"); + } + } + + // Check for cluster level hosts + if (clusterId != null) { + List hostsInCluster = _hostDao.findByClusterId(clusterId, Type.Routing); + Set hostIdsInUseSet = hostIdsUsingStorageAccessGroups.stream().collect(Collectors.toSet()); + + boolean allInUseCluster = hostsInCluster.stream() + .map(HostVO::getId) + .allMatch(hostIdsInUseSet::contains); + + if (allInUseCluster) { + throw new CloudRuntimeException("All hosts in the cluster are using the storage access groups"); + } + } + + // Check for pod level hosts + if (podId != null) { + List hostsInPod = _hostDao.findByPodId(podId, Type.Routing); + Set hostIdsInUseSet = hostIdsUsingStorageAccessGroups.stream().collect(Collectors.toSet()); + + boolean allInUsePod = hostsInPod.stream() + .map(HostVO::getId) + .allMatch(hostIdsInUseSet::contains); + + if (allInUsePod) { + throw new CloudRuntimeException("All hosts in the pod are using the storage access groups"); + } + } + } + + @Override + public void updateZoneStorageAccessGroups(long zoneId, List newStorageAccessGroups) { + DataCenterVO zoneVO = _dcDao.findById(zoneId); + if (logger.isDebugEnabled()) { + logger.debug(String.format("Updating storage access groups %s to the zone %s", newStorageAccessGroups, zoneVO)); + } + + List sagsToAdd = new ArrayList<>(newStorageAccessGroups); + String sagsOnPod = zoneVO.getStorageAccessGroups(); + List sagsToDelete; + if (sagsOnPod == null || sagsOnPod.trim().isEmpty()) { + sagsToDelete = new ArrayList<>(); + } else { + sagsToDelete = new ArrayList<>(Arrays.asList(sagsOnPod.split(","))); + } + sagsToDelete.removeAll(newStorageAccessGroups); + checkIfAllHostsInUse(sagsToDelete, null, null, zoneId); + + Map> hostsAndStorageAccessGroupsMap = new HashMap<>(); + List pods = _podDao.listByDataCenterId(zoneId); + for (HostPodVO pod : pods) { + List hostsInPod = _hostDao.findHypervisorHostInPod(pod.getId()); + for (HostVO host : hostsInPod) { + String[] existingSAGs = _storageMgr.getStorageAccessGroups(null, null, null, host.getId()); + List existingSAGsList = new ArrayList<>(Arrays.asList(existingSAGs)); + existingSAGsList.removeAll(sagsToDelete); + List combinedSAGs = new ArrayList<>(sagsToAdd); + combinedSAGs.addAll(existingSAGsList); + hostsAndStorageAccessGroupsMap.put(host, combinedSAGs); + } + updateConnectionsBetweenHostsAndStoragePools(hostsAndStorageAccessGroupsMap); + } + + removeStorageAccessGroupsOnPodsInZone(zoneVO.getId(), newStorageAccessGroups, sagsToDelete); + } + + @Override + public void updatePodStorageAccessGroups(long podId, List newStorageAccessGroups) { + HostPodVO podVO = _podDao.findById(podId); + if (logger.isDebugEnabled()) { + logger.debug(String.format("Updating storage access groups %s to the pod %s", newStorageAccessGroups, podVO)); + } + + List sagsToAdd = new ArrayList<>(newStorageAccessGroups); + + String sagsOnPod = podVO.getStorageAccessGroups(); + List sagsToDelete; + if (sagsOnPod == null || sagsOnPod.trim().isEmpty()) { + sagsToDelete = new ArrayList<>(); + } else { + sagsToDelete = new ArrayList<>(Arrays.asList(sagsOnPod.split(","))); + } + sagsToDelete.removeAll(newStorageAccessGroups); + + checkIfAllHostsInUse(sagsToDelete, null, podId, null); + + Map> hostsAndStorageAccessGroupsMap = new HashMap<>(); + List hostsInPod = _hostDao.findHypervisorHostInPod(podId); + for (HostVO host : hostsInPod) { + String[] existingSAGs = _storageMgr.getStorageAccessGroups(null, null, null, host.getId()); + List existingSAGsList = new ArrayList<>(Arrays.asList(existingSAGs)); + existingSAGsList.removeAll(sagsToDelete); + List combinedSAGs = new ArrayList<>(sagsToAdd); + combinedSAGs.addAll(existingSAGsList); + hostsAndStorageAccessGroupsMap.put(host, combinedSAGs); + } + + updateConnectionsBetweenHostsAndStoragePools(hostsAndStorageAccessGroupsMap); + removeStorageAccessGroupsOnClustersInPod(podId, newStorageAccessGroups, sagsToDelete); + } + + @Override + public void updateClusterStorageAccessGroups(Long clusterId, List newStorageAccessGroups) { + ClusterVO cluster = (ClusterVO) getCluster(clusterId); + if (logger.isDebugEnabled()) { + logger.debug(String.format("Updating storage access groups %s to the cluster %s", newStorageAccessGroups, cluster)); + } + + List sagsToAdd = new ArrayList<>(newStorageAccessGroups); + + String existingClusterStorageAccessGroups = cluster.getStorageAccessGroups(); + List sagsToDelete; + if (existingClusterStorageAccessGroups == null || existingClusterStorageAccessGroups.trim().isEmpty()) { + sagsToDelete = new ArrayList<>(); + } else { + sagsToDelete = new ArrayList<>(Arrays.asList(existingClusterStorageAccessGroups.split(","))); + } + sagsToDelete.removeAll(newStorageAccessGroups); + + checkIfAllHostsInUse(sagsToDelete, clusterId, null, null); + + List hostsInCluster = _hostDao.findHypervisorHostInCluster(cluster.getId()); + Map> hostsAndStorageAccessGroupsMap = new HashMap<>(); + for (HostVO host : hostsInCluster) { + String[] existingSAGs = _storageMgr.getStorageAccessGroups(null, null, null, host.getId()); + Set existingSAGsSet = new HashSet<>(Arrays.asList(existingSAGs)); + existingSAGsSet.removeAll(sagsToDelete); + List existingSAGsList = new ArrayList<>(existingSAGsSet); + Set combinedSAGsSet = new HashSet<>(sagsToAdd); + combinedSAGsSet.addAll(existingSAGsList); + + hostsAndStorageAccessGroupsMap.put(host, new ArrayList<>(combinedSAGsSet)); + } + + updateConnectionsBetweenHostsAndStoragePools(hostsAndStorageAccessGroupsMap); + + updateStorageAccessGroupsToBeDeletedOnHostsInCluster(cluster.getId(), sagsToDelete); + updateStorageAccessGroupsToBeAddedOnHostsInCluster(cluster.getId(), newStorageAccessGroups); + } + + @Override + public void updateHostStorageAccessGroups(Long hostId, List newStorageAccessGroups) { + HostVO host = _hostDao.findById(hostId); + if (logger.isDebugEnabled()) { + logger.debug(String.format("Updating storage access groups %s to the host %s", newStorageAccessGroups, host)); + } + + List sagsToAdd = new ArrayList<>(newStorageAccessGroups); + String[] sagsOnCluster = _storageMgr.getStorageAccessGroups(null, null, host.getClusterId(), null); + if (ArrayUtils.isNotEmpty(sagsOnCluster)) { + sagsToAdd.addAll(Arrays.asList(sagsOnCluster)); + } + + String sagsOnHost = host.getStorageAccessGroups(); + List sagsToDelete; + if (sagsOnHost == null || sagsOnHost.trim().isEmpty()) { + sagsToDelete = new ArrayList<>(); + } else { + sagsToDelete = new ArrayList<>(Arrays.asList(sagsOnHost.split(","))); + } + sagsToDelete.removeAll(newStorageAccessGroups); + + checkIfAnyVolumesInUse(sagsToAdd, sagsToDelete, host); + + updateConnectionsBetweenHostsAndStoragePools(Collections.singletonMap(host, sagsToAdd)); + + host.setStorageAccessGroups(CollectionUtils.isEmpty(newStorageAccessGroups) ? null : String.join(",", newStorageAccessGroups)); + _hostDao.update(host.getId(), host); + } + + protected void checkIfAnyVolumesInUse(List sagsToAdd, List sagsToDelete, HostVO host) { + if (CollectionUtils.isNotEmpty(sagsToDelete)) { + List volumesUsingTheStoragePoolAccessGroups = listOfVolumesUsingTheStorageAccessGroups(sagsToDelete, host.getId(), null, null, null); + if (CollectionUtils.isNotEmpty(volumesUsingTheStoragePoolAccessGroups)) { + List poolsToAdd; + if (CollectionUtils.isNotEmpty(sagsToAdd)) { + poolsToAdd = getStoragePoolsByAccessGroups(host.getDataCenterId(), host.getPodId(), host.getClusterId(), sagsToAdd.toArray(new String[0]), true); + } else { + poolsToAdd = getStoragePoolsByEmptyStorageAccessGroups(host.getDataCenterId(), host.getPodId(), host.getClusterId()); + } + if (CollectionUtils.isNotEmpty(poolsToAdd)) { + Set poolIdsToAdd = poolsToAdd.stream() + .map(StoragePoolVO::getId) + .collect(Collectors.toSet()); + volumesUsingTheStoragePoolAccessGroups.removeIf(volume -> poolIdsToAdd.contains(volume.getPoolId())); + } + if (CollectionUtils.isNotEmpty(volumesUsingTheStoragePoolAccessGroups)) { + logger.error(String.format("There are volumes in storage pools with the Storage Access Groups that need to be deleted or " + + "in the storage pools which are already connected to the host. Those volume IDs are %s", volumesUsingTheStoragePoolAccessGroups)); + throw new CloudRuntimeException("There are volumes in storage pools with the Storage Access Groups that need to be deleted or " + + "in the storage pools which are already connected to the host"); + } + } + } + } + + protected void updateConnectionsBetweenHostsAndStoragePools(Map> hostsAndStorageAccessGroupsMap) { + List hostsList = new ArrayList<>(hostsAndStorageAccessGroupsMap.keySet()); + Map> hostStoragePoolsMapBefore = getHostStoragePoolsBefore(hostsList); + + Map> hostPoolsToAddMapAfter = getHostPoolsToAddAfter(hostsAndStorageAccessGroupsMap); + + disconnectPoolsNotInAccessGroups(hostStoragePoolsMapBefore, hostPoolsToAddMapAfter); + } + + private Map> getHostStoragePoolsBefore(List hostsList) { + Map> hostStoragePoolsMapBefore = new HashMap<>(); + for (HostVO host : hostsList) { + List storagePoolsConnectedToHost = _storageMgr.findStoragePoolsConnectedToHost(host.getId()); + List storagePoolsConnectedBefore = new ArrayList<>(); + if (CollectionUtils.isNotEmpty(storagePoolsConnectedToHost)) { + for (StoragePoolHostVO poolHost : storagePoolsConnectedToHost) { + StoragePoolVO pool = _storagePoolDao.findById(poolHost.getPoolId()); + if (pool != null) { + storagePoolsConnectedBefore.add(pool); + } + } + } + hostStoragePoolsMapBefore.put(host, storagePoolsConnectedBefore); + } + return hostStoragePoolsMapBefore; + } + + private Map> getHostPoolsToAddAfter(Map> hostsAndStorageAccessGroupsMap) { + Map> hostPoolsToAddMapAfter = new HashMap<>(); + for (Map.Entry> entry : hostsAndStorageAccessGroupsMap.entrySet()) { + HostVO host = entry.getKey(); + List sagsToAdd = entry.getValue(); + List poolsToAdd; + if (CollectionUtils.isNotEmpty(sagsToAdd)) { + poolsToAdd = getStoragePoolsByAccessGroups(host.getDataCenterId(), host.getPodId(), host.getClusterId(), sagsToAdd.toArray(new String[0]), true); + } else { + poolsToAdd = getStoragePoolsByEmptyStorageAccessGroups(host.getDataCenterId(), host.getPodId(), host.getClusterId()); + } + hostPoolsToAddMapAfter.put(host, poolsToAdd); + connectHostToStoragePools(host, poolsToAdd); + } + return hostPoolsToAddMapAfter; + } + + private void disconnectPoolsNotInAccessGroups(Map> hostStoragePoolsMapBefore, Map> hostPoolsToAddMapAfter) { + for (Map.Entry> entry : hostStoragePoolsMapBefore.entrySet()) { + HostVO host = entry.getKey(); + List storagePoolsConnectedBefore = entry.getValue(); + List poolsToAdd = hostPoolsToAddMapAfter.get(host); + List poolsToDelete = new ArrayList<>(); + + for (StoragePoolVO pool : storagePoolsConnectedBefore) { + if (poolsToAdd == null || !poolsToAdd.contains(pool)) { + poolsToDelete.add(pool); + } + } + + if (CollectionUtils.isNotEmpty(poolsToDelete)) { + disconnectHostFromStoragePools(host, poolsToDelete); + } + } + } + + protected List getStoragePoolsByAccessGroups(Long dcId, Long podId, Long clusterId, String[] storageAccessGroups, boolean includeEmptyTags) { + List allPoolsByTags = new ArrayList<>(); + allPoolsByTags.addAll(_storagePoolDao.findPoolsByAccessGroupsForHostConnection(dcId, podId, clusterId, ScopeType.CLUSTER, storageAccessGroups)); + allPoolsByTags.addAll(_storagePoolDao.findZoneWideStoragePoolsByAccessGroupsForHostConnection(dcId, storageAccessGroups)); + if (includeEmptyTags) { + allPoolsByTags.addAll(_storagePoolDao.findStoragePoolsByEmptyStorageAccessGroups(dcId, podId, clusterId, ScopeType.CLUSTER, null)); + allPoolsByTags.addAll(_storagePoolDao.findStoragePoolsByEmptyStorageAccessGroups(dcId, null, null, ScopeType.ZONE, null)); + } + + return allPoolsByTags; + } + + private List getStoragePoolsByEmptyStorageAccessGroups(Long dcId, Long podId, Long clusterId) { + List allPoolsByTags = new ArrayList<>(); + allPoolsByTags.addAll(_storagePoolDao.findStoragePoolsByEmptyStorageAccessGroups(dcId, podId, clusterId, ScopeType.CLUSTER, null)); + allPoolsByTags.addAll(_storagePoolDao.findStoragePoolsByEmptyStorageAccessGroups(dcId, null, null, ScopeType.ZONE, null)); + + return allPoolsByTags; + } + + private void connectHostToStoragePools(HostVO host, List poolsToAdd) { + List storagePoolsConnectedToHost = _storageMgr.findStoragePoolsConnectedToHost(host.getId()); + for (StoragePoolVO storagePool : poolsToAdd) { + if (CollectionUtils.isNotEmpty(storagePoolsConnectedToHost)) { + boolean isPresent = storagePoolsConnectedToHost.stream() + .anyMatch(poolHost -> poolHost.getPoolId() == storagePool.getId()); + if (isPresent) { + continue; + } + } + try { + _storageMgr.connectHostToSharedPool(host, storagePool.getId()); + } catch (StorageConflictException se) { + throw new CloudRuntimeException(String.format("Unable to establish a connection between pool %s and the host %s", storagePool, host)); + } catch (Exception e) { + logger.warn(String.format("Unable to establish a connection between pool %s and the host %s", storagePool, host), e); + } + } + } + + protected void connectHostToStoragePool(HostVO host, StoragePoolVO storagePool) { + try { + _storageMgr.connectHostToSharedPool(host, storagePool.getId()); + } catch (StorageConflictException se) { + throw new CloudRuntimeException(String.format("Unable to establish a connection between pool %s and the host %s", storagePool, host)); + } catch (Exception e) { + logger.warn(String.format("Unable to establish a connection between pool %s and the host %s", storagePool, host), e); + } + } + + private void disconnectHostFromStoragePools(HostVO host, List poolsToDelete) { + List usedStoragePoolIDs = listOfStoragePoolIDsUsedByHost(host.getId()); + if (usedStoragePoolIDs != null) { + poolsToDelete.removeIf(poolToDelete -> + usedStoragePoolIDs.stream().anyMatch(usedPoolId -> usedPoolId == poolToDelete.getId()) + ); + } + for (StoragePoolVO storagePool : poolsToDelete) { + disconnectHostFromStoragePool(host, storagePool); + } + } + + protected void disconnectHostFromStoragePool(HostVO host, StoragePoolVO storagePool) { + try { + _storageMgr.disconnectHostFromSharedPool(host, storagePool); + _storagePoolHostDao.deleteStoragePoolHostDetails(host.getId(), storagePool.getId()); + } catch (StorageConflictException se) { + throw new CloudRuntimeException(String.format("Unable to disconnect the pool %s and the host %s", storagePool, host)); + } catch (Exception e) { + logger.warn(String.format("Unable to disconnect the pool %s and the host %s", storagePool, host), e); + } + } + private void updateHostTags(HostVO host, Long hostId, List hostTags, Boolean isTagARule) { List activeVMs = _vmDao.listByHostId(hostId); logger.warn(String.format("The following active VMs [%s] are using the host [%s]. " + @@ -2261,7 +3015,7 @@ public class ResourceManagerImpl extends ManagerBase implements ResourceManager, } protected HostVO createHostVO(final StartupCommand[] cmds, final ServerResource resource, final Map details, List hostTags, - final ResourceStateAdapter.Event stateEvent) { + List storageAccessGroups, final ResourceStateAdapter.Event stateEvent) { boolean newHost = false; StartupCommand startup = cmds[0]; @@ -2353,6 +3107,9 @@ public class ResourceManagerImpl extends ManagerBase implements ResourceManager, host.setStorageUrl(startup.getIqn()); host.setLastPinged(System.currentTimeMillis() >> 10); host.setHostTags(hostTags, false); + if ((CollectionUtils.isNotEmpty(storageAccessGroups))) { + host.setStorageAccessGroups(String.join(",", storageAccessGroups)); + } host.setDetails(details); host.setArch(CPU.CPUArch.fromType(startup.getArch())); if (startup.getStorageIpAddressDeux() != null) { @@ -2495,11 +3252,11 @@ public class ResourceManagerImpl extends ManagerBase implements ResourceManager, } } - private Host createHostAndAgent(final ServerResource resource, final Map details, final boolean old, final List hostTags, final boolean forRebalance) { - return createHostAndAgent(resource, details, old, hostTags, forRebalance, false); + private Host createHostAndAgent(final ServerResource resource, final Map details, final boolean old, final List hostTags, List storageAccessGroups, final boolean forRebalance) { + return createHostAndAgent(resource, details, old, hostTags, storageAccessGroups, forRebalance, false); } - private Host createHostAndAgent(final ServerResource resource, final Map details, final boolean old, final List hostTags, final boolean forRebalance, final boolean isTransferredConnection) { + private Host createHostAndAgent(final ServerResource resource, final Map details, final boolean old, final List hostTags, List storageAccessGroups, final boolean forRebalance, final boolean isTransferredConnection) { HostVO host = null; StartupCommand[] cmds = null; boolean hostExists = false; @@ -2541,7 +3298,7 @@ public class ResourceManagerImpl extends ManagerBase implements ResourceManager, // find out if the host we want to connect to is new (so we can send an event) boolean newHost = getNewHost(cmds) == null; - host = createHostVO(cmds, resource, details, hostTags, ResourceStateAdapter.Event.CREATE_HOST_VO_FOR_DIRECT_CONNECT); + host = createHostVO(cmds, resource, details, hostTags, storageAccessGroups, ResourceStateAdapter.Event.CREATE_HOST_VO_FOR_DIRECT_CONNECT); if (host != null) { created = _agentMgr.handleDirectConnectAgent(host, cmds, resource, forRebalance, newHost); @@ -2568,7 +3325,7 @@ public class ResourceManagerImpl extends ManagerBase implements ResourceManager, return host; } - private Host createHostAndAgentDeferred(final ServerResource resource, final Map details, final boolean old, final List hostTags, final boolean forRebalance) { + private Host createHostAndAgentDeferred(final ServerResource resource, final Map details, final boolean old, final List hostTags, List storageAccessGroups, final boolean forRebalance) { HostVO host = null; StartupCommand[] cmds = null; boolean hostExists = false; @@ -2625,7 +3382,7 @@ public class ResourceManagerImpl extends ManagerBase implements ResourceManager, // find out if the host we want to connect to is new (so we can send an event) newHost = getNewHost(cmds) == null; - host = createHostVO(cmds, resource, details, hostTags, ResourceStateAdapter.Event.CREATE_HOST_VO_FOR_DIRECT_CONNECT); + host = createHostVO(cmds, resource, details, hostTags, storageAccessGroups, ResourceStateAdapter.Event.CREATE_HOST_VO_FOR_DIRECT_CONNECT); if (host != null) { // if first host in cluster no need to defer agent creation @@ -2682,7 +3439,7 @@ public class ResourceManagerImpl extends ManagerBase implements ResourceManager, @Override public Host createHostAndAgent(final Long hostId, final ServerResource resource, final Map details, final boolean old, final List hostTags, final boolean forRebalance, boolean isTransferredConnection) { - final Host host = createHostAndAgent(resource, details, old, hostTags, forRebalance, isTransferredConnection); + final Host host = createHostAndAgent(resource, details, old, hostTags, null, forRebalance, isTransferredConnection); return host; } @@ -2701,12 +3458,12 @@ public class ResourceManagerImpl extends ManagerBase implements ResourceManager, } } - return createHostAndAgent(resource, hostDetails, true, null, false); + return createHostAndAgent(resource, hostDetails, true, null, null, false); } @Override public HostVO createHostVOForConnectedAgent(final StartupCommand[] cmds) { - return createHostVO(cmds, null, null, null, ResourceStateAdapter.Event.CREATE_HOST_VO_FOR_CONNECTED); + return createHostVO(cmds, null, null, null, null, ResourceStateAdapter.Event.CREATE_HOST_VO_FOR_CONNECTED); } private void checkIPConflicts(final HostPodVO pod, final DataCenterVO dc, final String serverPrivateIP, final String serverPublicIP) { diff --git a/server/src/main/java/com/cloud/server/ManagementServerImpl.java b/server/src/main/java/com/cloud/server/ManagementServerImpl.java index d2ddbddcb48..9d734d4fd3b 100644 --- a/server/src/main/java/com/cloud/server/ManagementServerImpl.java +++ b/server/src/main/java/com/cloud/server/ManagementServerImpl.java @@ -213,6 +213,7 @@ import org.apache.cloudstack.api.command.admin.storage.AddImageStoreS3CMD; import org.apache.cloudstack.api.command.admin.storage.AddObjectStoragePoolCmd; import org.apache.cloudstack.api.command.admin.storage.CancelPrimaryStorageMaintenanceCmd; import org.apache.cloudstack.api.command.admin.storage.ChangeStoragePoolScopeCmd; +import org.apache.cloudstack.api.command.admin.storage.ConfigureStorageAccessCmd; import org.apache.cloudstack.api.command.admin.storage.CreateSecondaryStagingStoreCmd; import org.apache.cloudstack.api.command.admin.storage.CreateStoragePoolCmd; import org.apache.cloudstack.api.command.admin.storage.DeleteImageStoreCmd; @@ -223,6 +224,7 @@ import org.apache.cloudstack.api.command.admin.storage.FindStoragePoolsForMigrat import org.apache.cloudstack.api.command.admin.storage.ListImageStoresCmd; import org.apache.cloudstack.api.command.admin.storage.ListObjectStoragePoolsCmd; import org.apache.cloudstack.api.command.admin.storage.ListSecondaryStagingStoresCmd; +import org.apache.cloudstack.api.command.admin.storage.ListStorageAccessGroupsCmd; import org.apache.cloudstack.api.command.admin.storage.ListStoragePoolsCmd; import org.apache.cloudstack.api.command.admin.storage.ListStorageProvidersCmd; import org.apache.cloudstack.api.command.admin.storage.ListStorageTagsCmd; @@ -1275,6 +1277,7 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe final Object allocationState = cmd.getAllocationState(); final String keyword = cmd.getKeyword(); final CPU.CPUArch arch = cmd.getArch(); + final String storageAccessGroup = cmd.getStorageAccessGroup(); zoneId = _accountMgr.checkAccessAndSpecifyAuthority(CallContext.current().getCallingAccount(), zoneId); final Filter searchFilter = new Filter(ClusterVO.class, "id", true, cmd.getStartIndex(), cmd.getPageSizeVal()); @@ -1288,6 +1291,13 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe sb.and("clusterType", sb.entity().getClusterType(), SearchCriteria.Op.EQ); sb.and("allocationState", sb.entity().getAllocationState(), SearchCriteria.Op.EQ); sb.and("arch", sb.entity().getArch(), SearchCriteria.Op.EQ); + if (storageAccessGroup != null) { + sb.and().op("storageAccessGroupExact", sb.entity().getStorageAccessGroups(), SearchCriteria.Op.EQ); + sb.or("storageAccessGroupPrefix", sb.entity().getStorageAccessGroups(), SearchCriteria.Op.LIKE); + sb.or("storageAccessGroupSuffix", sb.entity().getStorageAccessGroups(), SearchCriteria.Op.LIKE); + sb.or("storageAccessGroupMiddle", sb.entity().getStorageAccessGroups(), SearchCriteria.Op.LIKE); + sb.cp(); + } final SearchCriteria sc = sb.create(); if (id != null) { @@ -1331,6 +1341,13 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe sc.setParameters("arch", arch); } + if (storageAccessGroup != null) { + sc.setParameters("storageAccessGroupExact", storageAccessGroup); + sc.setParameters("storageAccessGroupPrefix", storageAccessGroup + ",%"); + sc.setParameters("storageAccessGroupSuffix", "%," + storageAccessGroup); + sc.setParameters("storageAccessGroupMiddle", "%," + storageAccessGroup + ",%"); + } + final Pair, Integer> result = _clusterDao.searchAndCount(sc, searchFilter); return new Pair<>(result.first(), result.second()); } @@ -2014,6 +2031,8 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe Long zoneId = cmd.getZoneId(); final Object keyword = cmd.getKeyword(); final Object allocationState = cmd.getAllocationState(); + final String storageAccessGroup = cmd.getStorageAccessGroup(); + zoneId = _accountMgr.checkAccessAndSpecifyAuthority(CallContext.current().getCallingAccount(), zoneId); final Filter searchFilter = new Filter(HostPodVO.class, "dataCenterId", true, cmd.getStartIndex(), cmd.getPageSizeVal()); @@ -2022,6 +2041,13 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe sb.and("name", sb.entity().getName(), SearchCriteria.Op.EQ); sb.and("dataCenterId", sb.entity().getDataCenterId(), SearchCriteria.Op.EQ); sb.and("allocationState", sb.entity().getAllocationState(), SearchCriteria.Op.EQ); + if (storageAccessGroup != null) { + sb.and().op("storageAccessGroupExact", sb.entity().getStorageAccessGroups(), SearchCriteria.Op.EQ); + sb.or("storageAccessGroupPrefix", sb.entity().getStorageAccessGroups(), SearchCriteria.Op.LIKE); + sb.or("storageAccessGroupSuffix", sb.entity().getStorageAccessGroups(), SearchCriteria.Op.LIKE); + sb.or("storageAccessGroupMiddle", sb.entity().getStorageAccessGroups(), SearchCriteria.Op.LIKE); + sb.cp(); + } final SearchCriteria sc = sb.create(); if (keyword != null) { @@ -2048,6 +2074,13 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe sc.setParameters("allocationState", allocationState); } + if (storageAccessGroup != null) { + sc.setParameters("storageAccessGroupExact", storageAccessGroup); + sc.setParameters("storageAccessGroupPrefix", storageAccessGroup + ",%"); + sc.setParameters("storageAccessGroupSuffix", "%," + storageAccessGroup); + sc.setParameters("storageAccessGroupMiddle", "%," + storageAccessGroup + ",%"); + } + final Pair, Integer> result = _hostPodDao.searchAndCount(sc, searchFilter); return new Pair<>(result.first(), result.second()); } @@ -3587,12 +3620,14 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe cmdList.add(ListSwiftsCmd.class); cmdList.add(ListStoragePoolsCmd.class); cmdList.add(ListStorageTagsCmd.class); + cmdList.add(ListStorageAccessGroupsCmd.class); cmdList.add(FindStoragePoolsForMigrationCmd.class); cmdList.add(PreparePrimaryStorageForMaintenanceCmd.class); cmdList.add(UpdateStoragePoolCmd.class); cmdList.add(SyncStoragePoolCmd.class); cmdList.add(UpdateStorageCapabilitiesCmd.class); cmdList.add(UpdateImageStoreCmd.class); + cmdList.add(ConfigureStorageAccessCmd.class); cmdList.add(DestroySystemVmCmd.class); cmdList.add(ListSystemVMsCmd.class); cmdList.add(MigrateSystemVMCmd.class); diff --git a/server/src/main/java/com/cloud/storage/StorageManagerImpl.java b/server/src/main/java/com/cloud/storage/StorageManagerImpl.java index 54299f55d02..5de7ade696a 100644 --- a/server/src/main/java/com/cloud/storage/StorageManagerImpl.java +++ b/server/src/main/java/com/cloud/storage/StorageManagerImpl.java @@ -39,6 +39,7 @@ import java.util.Iterator; import java.util.LinkedHashSet; import java.util.List; import java.util.Map; +import java.util.Objects; import java.util.Random; import java.util.Set; import java.util.UUID; @@ -51,14 +52,20 @@ import java.util.concurrent.ScheduledExecutorService; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; import java.util.stream.Collectors; +import java.util.stream.Stream; import javax.inject.Inject; +import com.cloud.dc.HostPodVO; +import com.cloud.dc.dao.HostPodDao; +import com.cloud.resource.ResourceManager; +import com.cloud.storage.dao.StoragePoolAndAccessGroupMapDao; import org.apache.cloudstack.annotation.AnnotationService; import org.apache.cloudstack.annotation.dao.AnnotationDao; import org.apache.cloudstack.api.ApiConstants; import org.apache.cloudstack.api.command.admin.storage.CancelPrimaryStorageMaintenanceCmd; import org.apache.cloudstack.api.command.admin.storage.ChangeStoragePoolScopeCmd; +import org.apache.cloudstack.api.command.admin.storage.ConfigureStorageAccessCmd; import org.apache.cloudstack.api.command.admin.storage.CreateSecondaryStagingStoreCmd; import org.apache.cloudstack.api.command.admin.storage.CreateStoragePoolCmd; import org.apache.cloudstack.api.command.admin.storage.DeleteImageStoreCmd; @@ -144,6 +151,7 @@ import org.apache.cloudstack.storage.to.VolumeObjectTO; import org.apache.commons.collections.CollectionUtils; import org.apache.commons.collections.MapUtils; import org.apache.commons.lang.time.DateUtils; +import org.apache.commons.lang3.ArrayUtils; import org.apache.commons.lang3.EnumUtils; import org.springframework.stereotype.Component; @@ -368,6 +376,8 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C @Inject StoragePoolTagsDao _storagePoolTagsDao; @Inject + StoragePoolAndAccessGroupMapDao _storagePoolAccessGroupMapDao; + @Inject PrimaryDataStoreDao primaryStoreDao; @Inject DiskOfferingDetailsDao _diskOfferingDetailsDao; @@ -397,6 +407,12 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C ConfigurationDao configurationDao; @Inject private ImageStoreDetailsUtil imageStoreDetailsUtil; + @Inject + protected HostPodDao _podDao; + @Inject + ResourceManager _resourceMgr; + @Inject + StorageManager storageManager; protected List _discoverers; @@ -673,7 +689,7 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C _storagePoolAcquisitionWaitSeconds = NumbersUtil.parseInt(configs.get("pool.acquisition.wait.seconds"), 1800); logger.info("pool.acquisition.wait.seconds is configured as " + _storagePoolAcquisitionWaitSeconds + " seconds"); - _agentMgr.registerForHostEvents(new StoragePoolMonitor(this, _storagePoolDao, _dataStoreProviderMgr), true, false, true); + _agentMgr.registerForHostEvents(new StoragePoolMonitor(this, _storagePoolDao, _storagePoolHostDao, _dataStoreProviderMgr), true, false, true); logger.info("Storage cleanup enabled: " + StorageCleanupEnabled.value() + ", interval: " + StorageCleanupInterval.value() + ", delay: " + StorageCleanupDelay.value() + ", template cleanup enabled: " + TemplateCleanupEnabled.value()); @@ -1021,6 +1037,7 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C params.put("hypervisorType", hypervisorType); params.put("url", cmd.getUrl()); params.put("tags", cmd.getTags()); + params.put(ApiConstants.STORAGE_ACCESS_GROUPS, cmd.getStorageAccessGroups()); params.put("isTagARule", cmd.isTagARule()); params.put("name", cmd.getStoragePoolName()); params.put("details", details); @@ -1388,6 +1405,232 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C } } + @Override + @ActionEvent(eventType = EventTypes.EVENT_CONFIGURE_STORAGE_ACCESS, eventDescription = "configuring storage groups", async = true) + public boolean configureStorageAccess(ConfigureStorageAccessCmd cmd) { + Long zoneId = cmd.getZoneId(); + Long podId = cmd.getPodId(); + Long clusterId = cmd.getClusterId(); + Long hostId = cmd.getHostId(); + Long storagePoolId = cmd.getStorageId(); + + long nonNullCount = Stream.of(zoneId, podId, clusterId, hostId, storagePoolId) + .filter(Objects::nonNull) + .count(); + + if (nonNullCount != 1) { + throw new IllegalArgumentException("Exactly one of zoneid, podid, clusterid, hostid or storagepoolid is required"); + } + + // SAG -> Storage Access Group + List storageAccessGroups = cmd.getStorageAccessGroups(); + if (storageAccessGroups == null) { + throw new InvalidParameterValueException("storageaccessgroups parameter is required"); + } + + if (zoneId != null) { + DataCenterVO zone = _dcDao.findById(zoneId); + Set existingSAGsSet = (zone.getStorageAccessGroups() == null || zone.getStorageAccessGroups().isEmpty()) + ? Collections.emptySet() + : new HashSet<>(Arrays.asList(zone.getStorageAccessGroups().split(","))); + + Set storagePoolSAGsSet = new HashSet<>(storageAccessGroups); + if (!existingSAGsSet.equals(storagePoolSAGsSet)) { + _resourceMgr.updateZoneStorageAccessGroups(zone.getId(), storageAccessGroups); + String preparedStoragePoolTags = CollectionUtils.isEmpty(storageAccessGroups) ? null : String.join(",", storageAccessGroups); + zone.setStorageAccessGroups(preparedStoragePoolTags); + + if (!_dcDao.update(zoneId, zone)) { + throw new CloudRuntimeException("Failed to update zone with the storage access groups."); + } + } + } + + if (podId != null) { + HostPodVO pod = _podDao.findById(podId); + Set existingTagsSet = (pod.getStorageAccessGroups() == null || pod.getStorageAccessGroups().isEmpty()) + ? Collections.emptySet() + : new HashSet<>(Arrays.asList(pod.getStorageAccessGroups().split(","))); + + if (CollectionUtils.isNotEmpty(storageAccessGroups)) { + checkIfStorageAccessGroupsExistsOnZone(pod.getDataCenterId(), storageAccessGroups); + } + + Set storagePoolTagsSet = new HashSet<>(storageAccessGroups); + if (!existingTagsSet.equals(storagePoolTagsSet)) { + _resourceMgr.updatePodStorageAccessGroups(podId, storageAccessGroups); + String preparedStoragePoolTags = CollectionUtils.isEmpty(storageAccessGroups) ? null : String.join(",", storageAccessGroups); + pod.setStorageAccessGroups(preparedStoragePoolTags); + + if (!_podDao.update(podId, pod)) { + throw new CloudRuntimeException("Failed to update pod with the storage access groups."); + } + } + } + + if (clusterId != null) { + ClusterVO cluster = _clusterDao.findById(clusterId); + Set existingTagsSet = (cluster.getStorageAccessGroups() == null || cluster.getStorageAccessGroups().isEmpty()) + ? Collections.emptySet() + : new HashSet<>(Arrays.asList(cluster.getStorageAccessGroups().split(","))); + + if (CollectionUtils.isNotEmpty(storageAccessGroups)) { + checkIfStorageAccessGroupsExistsOnPod(cluster.getPodId(), storageAccessGroups); + } + + Set storagePoolTagsSet = new HashSet<>(storageAccessGroups); + if (!existingTagsSet.equals(storagePoolTagsSet)) { + _resourceMgr.updateClusterStorageAccessGroups(cluster.getId(), storageAccessGroups); + String preparedStoragePoolTags = CollectionUtils.isEmpty(storageAccessGroups) ? null : String.join(",", storageAccessGroups); + cluster.setStorageAccessGroups(preparedStoragePoolTags); + + if (!_clusterDao.update(clusterId, cluster)) { + throw new CloudRuntimeException("Failed to update cluster with the storage access groups."); + } + } + } + + if (hostId != null) { + HostVO host = _hostDao.findById(hostId); + Set existingTagsSet = (host.getStorageAccessGroups() == null || host.getStorageAccessGroups().isEmpty()) + ? Collections.emptySet() + : new HashSet<>(Arrays.asList(host.getStorageAccessGroups().split(","))); + + if (CollectionUtils.isNotEmpty(storageAccessGroups)) { + checkIfStorageAccessGroupsExistsOnCluster(host.getClusterId(), storageAccessGroups); + } + + Set storageAccessGroupsSet = new HashSet<>(storageAccessGroups); + if (!existingTagsSet.equals(storageAccessGroupsSet)) { + _resourceMgr.updateHostStorageAccessGroups(hostId, storageAccessGroups); + String preparedStoragePoolTags = CollectionUtils.isEmpty(storageAccessGroups) ? null : String.join(",", storageAccessGroups); + host.setStorageAccessGroups(preparedStoragePoolTags); + + if (!_hostDao.update(hostId, host)) { + throw new CloudRuntimeException("Failed to update host with the storage access groups."); + } + } + } + + if (storagePoolId != null) { + StoragePoolVO storagePool = _storagePoolDao.findById(storagePoolId); + if (ScopeType.HOST.equals(storagePool.getScope())) { + throw new CloudRuntimeException("Storage Access Groups are not suitable for local storage"); + } + + if (logger.isDebugEnabled()) { + logger.debug("Updating Storage Pool Access Group Maps to :" + storageAccessGroups); + } + + if (storagePool.getPoolType() == StoragePoolType.DatastoreCluster) { + List childStoragePools = _storagePoolDao.listChildStoragePoolsInDatastoreCluster(storagePool.getId()); + for (StoragePoolVO childPool : childStoragePools) { + _resourceMgr.updateStoragePoolConnectionsOnHosts(childPool.getId(), storageAccessGroups); + _storagePoolAccessGroupMapDao.persist(childPool.getId(), storageAccessGroups); + } + } else { + _resourceMgr.updateStoragePoolConnectionsOnHosts(storagePool.getId(), storageAccessGroups); + } + + _storagePoolAccessGroupMapDao.persist(storagePool.getId(), storageAccessGroups); + } + + return true; + } + + protected void checkIfStorageAccessGroupsExistsOnZone(long zoneId, List storageAccessGroups) { + DataCenterVO zoneVO = _dcDao.findById(zoneId); + + String storageAccessGroupsOnZone = zoneVO.getStorageAccessGroups(); + List zoneTagsList = parseTags(storageAccessGroupsOnZone); + List newTags = storageAccessGroups; + + List existingTagsOnZone = (List) CollectionUtils.intersection(newTags, zoneTagsList); + + if (CollectionUtils.isNotEmpty(existingTagsOnZone)) { + throw new CloudRuntimeException(String.format("access groups already exist on the zone: %s", existingTagsOnZone)); + } + } + + protected void checkIfStorageAccessGroupsExistsOnPod(long podId, List storageAccessGroups) { + HostPodVO podVO = _podDao.findById(podId); + DataCenterVO zoneVO = _dcDao.findById(podVO.getDataCenterId()); + + String storageAccessGroupsOnPod = podVO.getStorageAccessGroups(); + String storageAccessGroupsOnZone = zoneVO.getStorageAccessGroups(); + + List podTagsList = parseTags(storageAccessGroupsOnPod); + List zoneTagsList = parseTags(storageAccessGroupsOnZone); + List newTags = storageAccessGroups; + + List existingTagsOnPod = (List) CollectionUtils.intersection(newTags, podTagsList); + List existingTagsOnZone = (List) CollectionUtils.intersection(newTags, zoneTagsList); + + if (CollectionUtils.isNotEmpty(existingTagsOnPod) || CollectionUtils.isNotEmpty(existingTagsOnZone)) { + String message = "access groups already exist "; + + if (CollectionUtils.isNotEmpty(existingTagsOnPod)) { + message += String.format("on the pod: %s", existingTagsOnPod); + } + if (CollectionUtils.isNotEmpty(existingTagsOnZone)) { + if (CollectionUtils.isNotEmpty(existingTagsOnPod)) { + message += ", "; + } + message += String.format("on the zone: %s", existingTagsOnZone); + } + + throw new CloudRuntimeException(message); + } + } + + protected void checkIfStorageAccessGroupsExistsOnCluster(long clusterId, List storageAccessGroups) { + ClusterVO clusterVO = _clusterDao.findById(clusterId); + HostPodVO podVO = _podDao.findById(clusterVO.getPodId()); + DataCenterVO zoneVO = _dcDao.findById(podVO.getDataCenterId()); + + String storageAccessGroupsOnCluster = clusterVO.getStorageAccessGroups(); + String storageAccessGroupsOnPod = podVO.getStorageAccessGroups(); + String storageAccessGroupsOnZone = zoneVO.getStorageAccessGroups(); + + List podTagsList = parseTags(storageAccessGroupsOnPod); + List zoneTagsList = parseTags(storageAccessGroupsOnZone); + List clusterTagsList = parseTags(storageAccessGroupsOnCluster); + List newTags = storageAccessGroups; + + List existingTagsOnCluster = (List) CollectionUtils.intersection(newTags, clusterTagsList); + List existingTagsOnPod = (List) CollectionUtils.intersection(newTags, podTagsList); + List existingTagsOnZone = (List) CollectionUtils.intersection(newTags, zoneTagsList); + + if (CollectionUtils.isNotEmpty(existingTagsOnCluster) || CollectionUtils.isNotEmpty(existingTagsOnPod) || CollectionUtils.isNotEmpty(existingTagsOnZone)) { + String message = "access groups already exist "; + + if (CollectionUtils.isNotEmpty(existingTagsOnCluster)) { + message += String.format("on the cluster: %s", existingTagsOnCluster); + } + if (CollectionUtils.isNotEmpty(existingTagsOnPod)) { + if (CollectionUtils.isNotEmpty(existingTagsOnCluster)) { + message += ", "; + } + message += String.format("on the pod: %s", existingTagsOnPod); + } + if (CollectionUtils.isNotEmpty(existingTagsOnZone)) { + if (CollectionUtils.isNotEmpty(existingTagsOnCluster) || CollectionUtils.isNotEmpty(existingTagsOnPod)) { + message += ", "; + } + message += String.format("on the zone: %s", existingTagsOnZone); + } + + throw new CloudRuntimeException(message); + } + } + + private List parseTags(String tags) { + if (tags == null || tags.trim().isEmpty()) { + return Collections.emptyList(); + } + return Arrays.asList(tags.split(",")); + } + @Override public void removeStoragePoolFromCluster(long hostId, String iScsiName, StoragePool storagePool) { final Map details = new HashMap<>(); @@ -2609,11 +2852,152 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C storagePoolTags = storagePoolTagVOList.parallelStream().map(StoragePoolTagVO::getTag).collect(Collectors.toList()); isTagARule = storagePoolTagVOList.get(0).isTagARule(); } + List storageAccessGroups = _storagePoolAccessGroupMapDao.getStorageAccessGroups(datastoreClusterPool.getId()); - _storagePoolDao.persist(dataStoreVO, details, storagePoolTags, isTagARule); + _storagePoolDao.persist(dataStoreVO, details, storagePoolTags, isTagARule, storageAccessGroups); return dataStoreVO; } + @Override + public boolean checkIfHostAndStoragePoolHasCommonStorageAccessGroups(Host host, StoragePool pool) { + String[] hostStorageAccessGroups = getStorageAccessGroups(null, null, null, host.getId()); + List storagePoolAccessGroups = _storagePoolAccessGroupMapDao.getStorageAccessGroups(pool.getId()); + + if (CollectionUtils.isEmpty(storagePoolAccessGroups)) { + return true; + } + + if (ArrayUtils.isEmpty(hostStorageAccessGroups)) { + return false; + } + + if (ArrayUtils.isNotEmpty(hostStorageAccessGroups)) { + logger.debug(String.format("Storage access groups on the host %s are %s", host, hostStorageAccessGroups)); + } + + if (CollectionUtils.isNotEmpty(storagePoolAccessGroups)) { + logger.debug(String.format("Storage access groups on the storage pool %s are %s", host, storagePoolAccessGroups)); + } + + List hostTagList = Arrays.asList(hostStorageAccessGroups); + return CollectionUtils.containsAny(hostTagList, storagePoolAccessGroups); + } + + @Override + public Pair checkIfReadyVolumeFitsInStoragePoolWithStorageAccessGroups(StoragePool destPool, Volume volume) { + if (Volume.State.Ready.equals(volume.getState())) { + Long vmId = volume.getInstanceId(); + VMInstanceVO vm = null; + if (vmId != null) { + vm = _vmInstanceDao.findById(vmId); + } + + if (vm == null || State.Stopped.equals(vm.getState())) { + Long srcPoolId = volume.getPoolId(); + StoragePoolVO srcPool = _storagePoolDao.findById(srcPoolId); + List srcStorageAccessGroups = _storagePoolAccessGroupMapDao.getStorageAccessGroups(srcPoolId); + List destStorageAccessGroups = _storagePoolAccessGroupMapDao.getStorageAccessGroups(destPool.getId()); + + if (CollectionUtils.isNotEmpty(srcStorageAccessGroups) && CollectionUtils.isNotEmpty(destStorageAccessGroups)) { + logger.debug(String.format("Storage access groups on source storage %s are %s and destination storage %s are %s", + srcPool, srcStorageAccessGroups, destPool, destStorageAccessGroups)); + List intersection = new ArrayList<>(srcStorageAccessGroups); + intersection.retainAll(destStorageAccessGroups); + if (CollectionUtils.isNotEmpty(intersection)) { + return new Pair<>(true, "Success"); + } else { + List poolIds = new ArrayList<>(); + poolIds.add(srcPool.getId()); + poolIds.add(destPool.getId()); + Host hostWithPoolsAccess = findUpAndEnabledHostWithAccessToStoragePools(poolIds); + if (hostWithPoolsAccess == null) { + logger.debug("Storage access groups on source and destination storages do not match, and there is no common host connected to these storages"); + return new Pair<>(false, "No common host connected to source and destination storages"); + } + } + } + return new Pair<>(true, "Success"); + } else { + if (State.Running.equals(vm.getState())) { + Long hostId = vm.getHostId(); + String[] hostStorageAccessGroups = getStorageAccessGroups(null, null, null, hostId); + Long srcPoolId = volume.getPoolId(); + StoragePoolVO srcPool = _storagePoolDao.findById(srcPoolId); + List srcStorageAccessGroups = _storagePoolAccessGroupMapDao.getStorageAccessGroups(srcPoolId); + List destStorageAccessGroups = _storagePoolAccessGroupMapDao.getStorageAccessGroups(destPool.getId()); + + logger.debug(String.format("Storage access groups on source storage %s are %s and destination storage %s are %s", + srcPool, srcStorageAccessGroups, destPool, destStorageAccessGroups)); + + if (CollectionUtils.isEmpty(srcStorageAccessGroups) && CollectionUtils.isEmpty(destStorageAccessGroups)) { + return new Pair<>(true, "Success"); + } + + if (CollectionUtils.isNotEmpty(srcStorageAccessGroups) && CollectionUtils.isNotEmpty(destStorageAccessGroups)) { + List intersection = new ArrayList<>(srcStorageAccessGroups); + intersection.retainAll(destStorageAccessGroups); + + if (ArrayUtils.isNotEmpty(hostStorageAccessGroups)) { + boolean hasSrcCommon = srcStorageAccessGroups.stream() + .anyMatch(group -> Arrays.asList(hostStorageAccessGroups).contains(group)); + boolean hasDestCommon = destStorageAccessGroups.stream() + .anyMatch(group -> Arrays.asList(hostStorageAccessGroups).contains(group)); + if (hasSrcCommon && hasDestCommon) { + return new Pair<>(true, "Success"); + } + } + + return new Pair<>(false, "No common storage access groups between source, destination pools and host"); + } + + if (CollectionUtils.isEmpty(srcStorageAccessGroups)) { + if (ArrayUtils.isNotEmpty(hostStorageAccessGroups)) { + List hostAccessGroupList = Arrays.asList(hostStorageAccessGroups); + hostAccessGroupList.retainAll(destStorageAccessGroups); + if (CollectionUtils.isNotEmpty(hostAccessGroupList)) { + return new Pair<>(true, "Success"); + } + } + return new Pair<>(false, "Host lacks access to destination storage groups"); + } + + return new Pair<>(true, "Success"); + } + } + } + return new Pair<>(true, "Success"); + } + + @Override + public String[] getStorageAccessGroups(Long zoneId, Long podId, Long clusterId, Long hostId) { + List storageAccessGroups = new ArrayList<>(); + if (hostId != null) { + HostVO host = _hostDao.findById(hostId); + ClusterVO cluster = _clusterDao.findById(host.getClusterId()); + HostPodVO pod = _podDao.findById(cluster.getPodId()); + DataCenterVO zone = _dcDao.findById(pod.getDataCenterId()); + storageAccessGroups.addAll(List.of(com.cloud.utils.StringUtils.splitCommaSeparatedStrings(host.getStorageAccessGroups(), cluster.getStorageAccessGroups(), pod.getStorageAccessGroups(), zone.getStorageAccessGroups()))); + } else if (clusterId != null) { + ClusterVO cluster = _clusterDao.findById(clusterId); + HostPodVO pod = _podDao.findById(cluster.getPodId()); + DataCenterVO zone = _dcDao.findById(pod.getDataCenterId()); + storageAccessGroups.addAll(List.of(com.cloud.utils.StringUtils.splitCommaSeparatedStrings(cluster.getStorageAccessGroups(), pod.getStorageAccessGroups(), zone.getStorageAccessGroups()))); + } else if (podId != null) { + HostPodVO pod = _podDao.findById(podId); + DataCenterVO zone = _dcDao.findById(pod.getDataCenterId()); + storageAccessGroups.addAll(List.of(com.cloud.utils.StringUtils.splitCommaSeparatedStrings(pod.getStorageAccessGroups(), zone.getStorageAccessGroups()))); + } else if (zoneId != null) { + DataCenterVO zone = _dcDao.findById(zoneId); + storageAccessGroups.addAll(List.of(com.cloud.utils.StringUtils.splitCommaSeparatedStrings(zone.getStorageAccessGroups()))); + } + + storageAccessGroups.removeIf(tag -> tag == null || tag.trim().isEmpty()); + + return storageAccessGroups.isEmpty() + ? new String[0] + : storageAccessGroups.toArray(org.apache.commons.lang.ArrayUtils.EMPTY_STRING_ARRAY); + } + private void handleRemoveChildStoragePoolFromDatastoreCluster(Set childDatastoreUUIDs) { for (String childDatastoreUUID : childDatastoreUUIDs) { diff --git a/server/src/main/java/com/cloud/storage/VolumeApiServiceImpl.java b/server/src/main/java/com/cloud/storage/VolumeApiServiceImpl.java index 64fed4ab4c6..2048ee4cfc9 100644 --- a/server/src/main/java/com/cloud/storage/VolumeApiServiceImpl.java +++ b/server/src/main/java/com/cloud/storage/VolumeApiServiceImpl.java @@ -63,6 +63,7 @@ import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreDriver; import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager; import org.apache.cloudstack.engine.subsystem.api.storage.EndPoint; +import org.apache.cloudstack.engine.subsystem.api.storage.EndPointSelector; import org.apache.cloudstack.engine.subsystem.api.storage.HostScope; import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreStateMachine; import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreDriver; @@ -358,6 +359,8 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic private StatsCollector statsCollector; @Inject HostPodDao podDao; + @Inject + EndPointSelector _epSelector; protected Gson _gson; @@ -3408,6 +3411,18 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic destPool = _volumeMgr.findChildDataStoreInDataStoreCluster(dc, destPoolPod, destPool.getClusterId(), null, null, destPool.getId()); } + Pair checkResult = storageMgr.checkIfReadyVolumeFitsInStoragePoolWithStorageAccessGroups(destPool, vol); + if (!checkResult.first()) { + throw new CloudRuntimeException(checkResult.second()); + } + + if (!liveMigrateVolume && vm != null) { + DataStore primaryStore = dataStoreMgr.getPrimaryDataStore(destPool.getId()); + if (_epSelector.select(primaryStore) == null) { + throw new CloudRuntimeException("Unable to find accessible host for volume migration"); + } + } + if (!storageMgr.storagePoolCompatibleWithVolumePool(destPool, (Volume) vol)) { throw new CloudRuntimeException("Storage pool " + destPool.getName() + " is not suitable to migrate volume " + vol.getName()); } diff --git a/server/src/main/java/com/cloud/storage/listener/StoragePoolMonitor.java b/server/src/main/java/com/cloud/storage/listener/StoragePoolMonitor.java index 6f484870e72..01fcb43c4c4 100644 --- a/server/src/main/java/com/cloud/storage/listener/StoragePoolMonitor.java +++ b/server/src/main/java/com/cloud/storage/listener/StoragePoolMonitor.java @@ -16,17 +16,24 @@ // under the License. package com.cloud.storage.listener; +import java.util.ArrayList; import java.util.List; import javax.inject.Inject; +import com.cloud.dc.dao.ClusterDao; +import com.cloud.dc.dao.HostPodDao; +import com.cloud.exception.StorageConflictException; import com.cloud.storage.StorageManager; +import com.cloud.storage.dao.StoragePoolHostDao; +import com.cloud.utils.exception.CloudRuntimeException; import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreProvider; import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreProviderManager; import org.apache.cloudstack.engine.subsystem.api.storage.HypervisorHostListener; import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreProvider; import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; +import org.apache.commons.lang3.ArrayUtils; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.LogManager; @@ -52,12 +59,18 @@ public class StoragePoolMonitor implements Listener { private final StorageManagerImpl _storageManager; private final PrimaryDataStoreDao _poolDao; private DataStoreProviderManager _dataStoreProviderMgr; + private final StoragePoolHostDao _storagePoolHostDao; + @Inject + ClusterDao _clusterDao; + @Inject + HostPodDao _podDao; @Inject OCFS2Manager _ocfs2Mgr; - public StoragePoolMonitor(StorageManagerImpl mgr, PrimaryDataStoreDao poolDao, DataStoreProviderManager dataStoreProviderMgr) { + public StoragePoolMonitor(StorageManagerImpl mgr, PrimaryDataStoreDao poolDao, StoragePoolHostDao storagePoolHostDao, DataStoreProviderManager dataStoreProviderMgr) { _storageManager = mgr; _poolDao = poolDao; + _storagePoolHostDao = storagePoolHostDao; _dataStoreProviderMgr = dataStoreProviderMgr; } @@ -104,13 +117,34 @@ public class StoragePoolMonitor implements Listener { scCmd.getHypervisorType() == HypervisorType.VMware || scCmd.getHypervisorType() == HypervisorType.Simulator || scCmd.getHypervisorType() == HypervisorType.Ovm || scCmd.getHypervisorType() == HypervisorType.Hyperv || scCmd.getHypervisorType() == HypervisorType.LXC || scCmd.getHypervisorType() == HypervisorType.Ovm3) { - List pools = _poolDao.listBy(host.getDataCenterId(), host.getPodId(), host.getClusterId(), ScopeType.CLUSTER); - List zoneStoragePoolsByTags = _poolDao.findZoneWideStoragePoolsByTags(host.getDataCenterId(), null, false); - List zoneStoragePoolsByHypervisor = _poolDao.findZoneWideStoragePoolsByHypervisor(host.getDataCenterId(), scCmd.getHypervisorType()); - zoneStoragePoolsByTags.retainAll(zoneStoragePoolsByHypervisor); - pools.addAll(zoneStoragePoolsByTags); - List zoneStoragePoolsByAnyHypervisor = _poolDao.findZoneWideStoragePoolsByHypervisor(host.getDataCenterId(), HypervisorType.Any); - pools.addAll(zoneStoragePoolsByAnyHypervisor); + String sags[] = _storageManager.getStorageAccessGroups(null, null, null, host.getId()); + + List pools = new ArrayList<>(); + // SAG -> Storage Access Group + if (ArrayUtils.isEmpty(sags)) { + List clusterStoragePoolsByEmptySAGs = _poolDao.findStoragePoolsByEmptyStorageAccessGroups(host.getDataCenterId(), host.getPodId(), host.getClusterId(), ScopeType.CLUSTER, null); + List storagePoolsByEmptySAGs = _poolDao.findStoragePoolsByEmptyStorageAccessGroups(host.getDataCenterId(), null, null, ScopeType.ZONE, null); + List zoneStoragePoolsByHypervisor = _poolDao.findStoragePoolsByEmptyStorageAccessGroups(host.getDataCenterId(), null, null, ScopeType.ZONE, scCmd.getHypervisorType()); + storagePoolsByEmptySAGs.retainAll(zoneStoragePoolsByHypervisor); + pools.addAll(storagePoolsByEmptySAGs); + pools.addAll(clusterStoragePoolsByEmptySAGs); + List zoneStoragePoolsByAnyHypervisor = _poolDao.findStoragePoolsByEmptyStorageAccessGroups(host.getDataCenterId(), null, null, ScopeType.ZONE, HypervisorType.Any); + pools.addAll(zoneStoragePoolsByAnyHypervisor); + } else { + List storagePoolsBySAGs = new ArrayList<>(); + List clusterStoragePoolsBySAGs = _poolDao.findPoolsByAccessGroupsForHostConnection(host.getDataCenterId(), host.getPodId(), host.getClusterId(), ScopeType.CLUSTER, sags); + List clusterStoragePoolsByEmptySAGs = _poolDao.findStoragePoolsByEmptyStorageAccessGroups(host.getDataCenterId(), host.getPodId(), host.getClusterId(), ScopeType.CLUSTER, null); + List zoneStoragePoolsBySAGs = _poolDao.findZoneWideStoragePoolsByAccessGroupsAndHypervisorTypeForHostConnection(host.getDataCenterId(), sags, scCmd.getHypervisorType()); + List zoneStoragePoolsByHypervisorTypeAny = _poolDao.findZoneWideStoragePoolsByAccessGroupsAndHypervisorTypeForHostConnection(host.getDataCenterId(), sags, HypervisorType.Any); + List zoneStoragePoolsByEmptySAGs = _poolDao.findStoragePoolsByEmptyStorageAccessGroups(host.getDataCenterId(), null, null, ScopeType.ZONE, null); + + storagePoolsBySAGs.addAll(zoneStoragePoolsBySAGs); + storagePoolsBySAGs.addAll(zoneStoragePoolsByEmptySAGs); + storagePoolsBySAGs.addAll(zoneStoragePoolsByHypervisorTypeAny); + storagePoolsBySAGs.addAll(clusterStoragePoolsBySAGs); + storagePoolsBySAGs.addAll(clusterStoragePoolsByEmptySAGs); + pools.addAll(storagePoolsBySAGs); + } // get the zone wide disabled pools list if global setting is true. if (StorageManager.MountDisabledStoragePool.value()) { @@ -122,6 +156,9 @@ public class StoragePoolMonitor implements Listener { pools.addAll(_poolDao.findDisabledPoolsByScope(host.getDataCenterId(), host.getPodId(), host.getClusterId(), ScopeType.CLUSTER)); } + List previouslyConnectedPools = new ArrayList<>(); + previouslyConnectedPools.addAll(_storageManager.findStoragePoolsConnectedToHost(host.getId())); + for (StoragePoolVO pool : pools) { if (!pool.isShared()) { continue; @@ -141,6 +178,21 @@ public class StoragePoolMonitor implements Listener { } catch (Exception e) { throw new ConnectionException(true, String.format("Unable to connect host %s to storage pool %s due to %s", host, pool, e.toString()), e); } + + previouslyConnectedPools.removeIf(sp -> sp.getPoolId() == pool.getId()); + } + + // Disconnect any pools which are not expected to be connected + for (StoragePoolHostVO poolToDisconnect: previouslyConnectedPools) { + StoragePoolVO pool = _poolDao.findById(poolToDisconnect.getPoolId()); + try { + _storageManager.disconnectHostFromSharedPool(host, pool); + _storagePoolHostDao.deleteStoragePoolHostDetails(host.getId(), pool.getId()); + } catch (StorageConflictException se) { + throw new CloudRuntimeException(String.format("Unable to disconnect the pool %s and the host %s", pool, host)); + } catch (Exception e) { + logger.warn(String.format("Unable to disconnect the pool %s and the host %s", pool, host), e); + } } } } diff --git a/server/src/main/java/com/cloud/vm/UserVmManagerImpl.java b/server/src/main/java/com/cloud/vm/UserVmManagerImpl.java index f90f612f330..4eaaabd028a 100644 --- a/server/src/main/java/com/cloud/vm/UserVmManagerImpl.java +++ b/server/src/main/java/com/cloud/vm/UserVmManagerImpl.java @@ -28,6 +28,7 @@ import java.io.UnsupportedEncodingException; import java.net.URLDecoder; import java.util.ArrayList; import java.util.Arrays; +import java.util.Collections; import java.util.Date; import java.util.HashMap; import java.util.HashSet; @@ -6647,6 +6648,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir VMInstanceVO vm = preVmStorageMigrationCheck(vmId); Map volumeToPoolIds = new HashMap<>(); checkDestinationHypervisorType(destPool, vm); + checkIfDestinationPoolHasSameStorageAccessGroups(destPool, vm); List volumes = _volsDao.findByInstance(vm.getId()); StoragePoolVO destinationPoolVo = _storagePoolDao.findById(destPool.getId()); Long destPoolPodId = ScopeType.CLUSTER.equals(destinationPoolVo.getScope()) || ScopeType.HOST.equals(destinationPoolVo.getScope()) ? @@ -6662,6 +6664,10 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir throw new InvalidParameterValueException("Storage migration of non-user VMs cannot be done between storage pools of different pods"); } } + Pair checkResult = storageManager.checkIfReadyVolumeFitsInStoragePoolWithStorageAccessGroups(destPool, volume); + if (!checkResult.first()) { + throw new CloudRuntimeException(String.format("Storage suitability check failed for volume %s with error, %s", volume, checkResult.second())); + } volumeToPoolIds.put(volume.getId(), destPool.getId()); } _itMgr.storageMigration(vm.getUuid(), volumeToPoolIds); @@ -6686,12 +6692,27 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir poolClusterId = pool.getClusterId(); } checkDestinationHypervisorType(pool, vm); + Pair checkResult = storageManager.checkIfReadyVolumeFitsInStoragePoolWithStorageAccessGroups(pool, volume); + if (!checkResult.first()) { + throw new CloudRuntimeException(String.format("Storage suitability check failed for volume %s with error %s", volume, checkResult.second())); + } + volumeToPoolIds.put(volume.getId(), pool.getId()); } _itMgr.storageMigration(vm.getUuid(), volumeToPoolIds); return findMigratedVm(vm.getId(), vm.getType()); } + private void checkIfDestinationPoolHasSameStorageAccessGroups(StoragePool destPool, VMInstanceVO vm) { + Long hostId = vm.getHostId(); + if (hostId != null) { + Host host = _hostDao.findById(hostId); + if (!storageManager.checkIfHostAndStoragePoolHasCommonStorageAccessGroups(host, destPool)) { + throw new InvalidParameterValueException(String.format("Destination pool %s does not have matching storage access groups as host %s", destPool.getName(), host.getName())); + } + } + } + private void checkDestinationHypervisorType(StoragePool destPool, VMInstanceVO vm) { HypervisorType destHypervisorType = destPool.getHypervisor(); if (destHypervisorType == null) { @@ -6811,6 +6832,26 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir return host.checkHostServiceOfferingAndTemplateTags(serviceOffering, template, strictHostTags); } + protected void validateStorageAccessGroupsOnHosts(Host srcHost, Host destinationHost) { + String[] storageAccessGroupsOnSrcHost = storageManager.getStorageAccessGroups(null, null, null, srcHost.getId()); + String[] storageAccessGroupsOnDestHost = storageManager.getStorageAccessGroups(null, null, null, destinationHost.getId()); + + List srcHostStorageAccessGroupsList = storageAccessGroupsOnSrcHost != null ? Arrays.asList(storageAccessGroupsOnSrcHost) : Collections.emptyList(); + List destHostStorageAccessGroupsList = storageAccessGroupsOnDestHost != null ? Arrays.asList(storageAccessGroupsOnDestHost) : Collections.emptyList(); + + if (CollectionUtils.isEmpty(srcHostStorageAccessGroupsList)) { + return; + } + + if (CollectionUtils.isEmpty(destHostStorageAccessGroupsList)) { + throw new CloudRuntimeException("Source host has storage access groups, but destination host has none."); + } + + if (!destHostStorageAccessGroupsList.containsAll(srcHostStorageAccessGroupsList)) { + throw new CloudRuntimeException("Storage access groups on the source and destination hosts did not match."); + } + } + protected void validateStrictHostTagCheck(VMInstanceVO vm, HostVO host) { ServiceOffering serviceOffering = serviceOfferingDao.findByIdIncludingRemoved(vm.getServiceOfferingId()); VirtualMachineTemplate template = _templateDao.findByIdIncludingRemoved(vm.getTemplateId()); @@ -6853,6 +6894,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir HostVO destinationHostVO = _hostDao.findById(destinationHost.getId()); _hostDao.loadHostTags(destinationHostVO); validateStrictHostTagCheck(vm, destinationHostVO); + validateStorageAccessGroupsOnHosts(srcHost, destinationHost); checkHostsDedication(vm, srcHost.getId(), destinationHost.getId()); @@ -7213,6 +7255,8 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir destinationHost.getName(), destinationHost.getUuid())); } + validateStorageAccessGroupsOnHosts(srcHost, destinationHost); + return new Pair<>(srcHost, destinationHost); } @@ -7250,8 +7294,12 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir } volToPoolObjectMap.put(volume.getId(), pool.getId()); } - HypervisorType hypervisorType = _volsDao.getHypervisorType(volume.getId()); + HostVO host = _hostDao.findById(vm.getHostId()); + if (!storageManager.checkIfHostAndStoragePoolHasCommonStorageAccessGroups(host, pool)) { + throw new InvalidParameterValueException(String.format("Destination pool %s for the volume %s does not have matching storage access groups as host %s", pool.getName(), volume.getName(), host.getName())); + } + HypervisorType hypervisorType = _volsDao.getHypervisorType(volume.getId()); try { snapshotHelper.checkKvmVolumeSnapshotsOnlyInPrimaryStorage(volume, hypervisorType); } catch (CloudRuntimeException ex) { diff --git a/server/src/test/java/com/cloud/configuration/ConfigurationManagerTest.java b/server/src/test/java/com/cloud/configuration/ConfigurationManagerTest.java index c2c78402aa1..772b5590411 100644 --- a/server/src/test/java/com/cloud/configuration/ConfigurationManagerTest.java +++ b/server/src/test/java/com/cloud/configuration/ConfigurationManagerTest.java @@ -1427,7 +1427,7 @@ public class ConfigurationManagerTest { return pod; }); Mockito.doNothing().when(messageBus).publish(Mockito.any(), Mockito.any(), Mockito.any(), Mockito.any()); - configurationMgr.createPod(zoneId, "TestPod", null, null, null, null, null); + configurationMgr.createPod(zoneId, "TestPod", null, null, null, null, null, null); } @Test diff --git a/server/src/test/java/com/cloud/resource/MockResourceManagerImpl.java b/server/src/test/java/com/cloud/resource/MockResourceManagerImpl.java index 32acdcd4a77..587aafa1587 100755 --- a/server/src/test/java/com/cloud/resource/MockResourceManagerImpl.java +++ b/server/src/test/java/com/cloud/resource/MockResourceManagerImpl.java @@ -52,6 +52,8 @@ import org.apache.cloudstack.api.command.admin.host.PrepareForHostMaintenanceCmd import org.apache.cloudstack.api.command.admin.host.ReconnectHostCmd; import org.apache.cloudstack.api.command.admin.host.UpdateHostCmd; import org.apache.cloudstack.api.command.admin.host.UpdateHostPasswordCmd; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; +import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreInfo; import org.apache.cloudstack.framework.config.ConfigKey; import javax.naming.ConfigurationException; @@ -568,6 +570,7 @@ public class MockResourceManagerImpl extends ManagerBase implements ResourceMana return null; } + /* (non-Javadoc) * @see com.cloud.utils.component.Manager#configure(java.lang.String, java.util.Map) */ @@ -628,6 +631,24 @@ public class MockResourceManagerImpl extends ManagerBase implements ResourceMana return false; } + @Override + public void updatePodStorageAccessGroups(long podId, List newStorageAccessGroups) { + } + + @Override + public void updateZoneStorageAccessGroups(long zoneId, List newStorageAccessGroups) { + } + + @Override + public void updateClusterStorageAccessGroups(Long clusterId, List newStorageAccessGroups) { + + } + + @Override + public void updateHostStorageAccessGroups(Long hostId, List newStorageAccessGroups) { + + } + @Override public boolean isGPUDeviceAvailable(final Host host, final String groupName, final String vgpuType) { // TODO Auto-generated method stub @@ -668,6 +689,25 @@ public class MockResourceManagerImpl extends ManagerBase implements ResourceMana return false; } + @Override + public void updateStoragePoolConnectionsOnHosts(Long poolId, List storageAccessGroups) { + } + + @Override + public List getEligibleUpHostsInClusterForStorageConnection(PrimaryDataStoreInfo primaryDataStoreInfo) { + return null; + } + + @Override + public List getEligibleUpAndEnabledHostsInClusterForStorageConnection(PrimaryDataStoreInfo primaryStore) { + return null; + } + + @Override + public List getEligibleUpAndEnabledHostsInZoneForStorageConnection(DataStore dataStore, long zoneId, HypervisorType hypervisorType) { + return null; + } + @Override public boolean isHostGpuEnabled(final long hostId) { // TODO Auto-generated method stub diff --git a/server/src/test/java/com/cloud/resource/ResourceManagerImplTest.java b/server/src/test/java/com/cloud/resource/ResourceManagerImplTest.java index 414d41145f7..9ee6ab4c529 100644 --- a/server/src/test/java/com/cloud/resource/ResourceManagerImplTest.java +++ b/server/src/test/java/com/cloud/resource/ResourceManagerImplTest.java @@ -21,6 +21,12 @@ import com.cloud.agent.AgentManager; import com.cloud.agent.api.GetVncPortAnswer; import com.cloud.agent.api.GetVncPortCommand; import com.cloud.capacity.dao.CapacityDao; +import com.cloud.dc.ClusterVO; +import com.cloud.dc.DataCenterVO; +import com.cloud.dc.HostPodVO; +import com.cloud.dc.dao.ClusterDao; +import com.cloud.dc.dao.DataCenterDao; +import com.cloud.dc.dao.HostPodDao; import com.cloud.event.ActionEventUtils; import com.cloud.exception.InvalidParameterValueException; import com.cloud.ha.HighAvailabilityManager; @@ -29,9 +35,13 @@ import com.cloud.host.HostVO; import com.cloud.host.Status; import com.cloud.host.dao.HostDao; import com.cloud.hypervisor.Hypervisor; +import com.cloud.storage.ScopeType; import com.cloud.storage.StorageManager; +import com.cloud.storage.StoragePoolHostVO; import com.cloud.storage.Volume; import com.cloud.storage.VolumeVO; +import com.cloud.storage.dao.StoragePoolAndAccessGroupMapDao; +import com.cloud.storage.dao.StoragePoolHostDao; import com.cloud.storage.dao.VolumeDao; import com.cloud.utils.Ternary; import com.cloud.utils.exception.CloudRuntimeException; @@ -45,7 +55,11 @@ import com.cloud.vm.dao.VMInstanceDao; import com.trilead.ssh2.Connection; import org.apache.cloudstack.api.command.admin.host.CancelHostAsDegradedCmd; import org.apache.cloudstack.api.command.admin.host.DeclareHostAsDegradedCmd; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; +import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreInfo; import org.apache.cloudstack.framework.config.dao.ConfigurationDao; +import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; +import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; import org.junit.After; import org.junit.Assert; import org.junit.Before; @@ -97,11 +111,21 @@ public class ResourceManagerImplTest { @Mock private HostDao hostDao; @Mock + private ClusterDao clusterDao; + @Mock + private HostPodDao podDao; + @Mock + private DataCenterDao dcDao; + @Mock private VMInstanceDao vmInstanceDao; @Mock private ConfigurationDao configurationDao; @Mock private VolumeDao volumeDao; + @Mock + private PrimaryDataStoreDao storagePoolDao; + @Mock + private StoragePoolHostDao storagePoolHostDao; @Spy @InjectMocks @@ -128,6 +152,9 @@ public class ResourceManagerImplTest { @Mock private Connection sshConnection; + @Mock + private StoragePoolAndAccessGroupMapDao storagePoolAccessGroupMapDao; + private static long hostId = 1L; private static final String hostUsername = "user"; private static final String hostPassword = "password"; @@ -583,4 +610,568 @@ public class ResourceManagerImplTest { resourceManager.destroyLocalStoragePoolVolumes(poolId); verify(volumeDao, never()).updateAndRemoveVolume(any(VolumeVO.class)); } + + @Test + public void testEmptyHostList() { + List allHosts = new ArrayList<>(); + List storageAccessGroups = Arrays.asList("group1", "group2"); + + List hostsToConnect = resourceManager.filterHostsBasedOnStorageAccessGroups(allHosts, storageAccessGroups); + + Assert.assertTrue("No hosts should be returned when the host list is empty.", hostsToConnect.isEmpty()); + } + + @Test + public void testEmptyStorageAccessGroups() { + List allHosts = Arrays.asList(Mockito.mock(HostVO.class), Mockito.mock(HostVO.class)); + List storageAccessGroups = new ArrayList<>(); + + for (HostVO host : allHosts) { + Mockito.when(host.getId()).thenReturn(1L); + Mockito.doReturn(new String[]{"group1", "group2"}) + .when(storageManager).getStorageAccessGroups(null, null, null, 1L); + } + + List hostsToConnect = resourceManager.filterHostsBasedOnStorageAccessGroups(allHosts, storageAccessGroups); + + Assert.assertTrue("All hosts should be returned when storage access groups are empty.", hostsToConnect.containsAll(allHosts)); + Assert.assertEquals("The number of returned hosts should match the total number of hosts.", allHosts.size(), hostsToConnect.size()); + } + + @Test + public void testHostWithMatchingStorageAccessGroups() { + HostVO host1 = Mockito.mock(HostVO.class); + HostVO host2 = Mockito.mock(HostVO.class); + List allHosts = Arrays.asList(host1, host2); + List storageAccessGroups = Arrays.asList("group1", "group2"); + + Mockito.when(host1.getId()).thenReturn(1L); + Mockito.doReturn(new String[]{"group1"}) + .when(storageManager).getStorageAccessGroups(null, null, null, 1L); + + Mockito.when(host2.getId()).thenReturn(2L); + Mockito.doReturn(new String[]{"group3"}) + .when(storageManager).getStorageAccessGroups(null, null, null, 2L); + + List hostsToConnect = resourceManager.filterHostsBasedOnStorageAccessGroups(allHosts, storageAccessGroups); + + Assert.assertTrue("Only hosts with matching storage access groups should be included.", hostsToConnect.contains(host1)); + Assert.assertFalse("Hosts without matching storage access groups should not be included.", hostsToConnect.contains(host2)); + Assert.assertEquals("Only one host should match the storage access groups.", 1, hostsToConnect.size()); + } + + @Test + public void testHostWithoutMatchingStorageAccessGroups() { + HostVO host1 = Mockito.mock(HostVO.class); + HostVO host2 = Mockito.mock(HostVO.class); + List allHosts = Arrays.asList(host1, host2); + List storageAccessGroups = Arrays.asList("group1", "group2"); + + Mockito.when(host1.getId()).thenReturn(1L); + Mockito.doReturn(new String[]{"group3"}) + .when(storageManager).getStorageAccessGroups(null, null, null, 1L); + + Mockito.when(host2.getId()).thenReturn(2L); + Mockito.doReturn(new String[]{"group4"}) + .when(storageManager).getStorageAccessGroups(null, null, null, 2L); + + List hostsToConnect = resourceManager.filterHostsBasedOnStorageAccessGroups(allHosts, storageAccessGroups); + + Assert.assertTrue("No hosts should match the storage access groups.", hostsToConnect.isEmpty()); + } + + @Test + public void testMixedMatchingAndNonMatchingHosts() { + HostVO host1 = Mockito.mock(HostVO.class); + HostVO host2 = Mockito.mock(HostVO.class); + HostVO host3 = Mockito.mock(HostVO.class); + List allHosts = Arrays.asList(host1, host2, host3); + List storageAccessGroups = Arrays.asList("group1", "group2"); + + Mockito.when(host1.getId()).thenReturn(1L); + Mockito.doReturn(new String[]{"group1"}) + .when(storageManager).getStorageAccessGroups(null, null, null, 1L); + + Mockito.when(host2.getId()).thenReturn(2L); + Mockito.doReturn(new String[]{"group3"}) + .when(storageManager).getStorageAccessGroups(null, null, null, 2L); + + Mockito.when(host3.getId()).thenReturn(3L); + Mockito.doReturn(new String[]{"group2"}) + .when(storageManager).getStorageAccessGroups(null, null, null, 3L); + + List hostsToConnect = resourceManager.filterHostsBasedOnStorageAccessGroups(allHosts, storageAccessGroups); + + Assert.assertTrue("Host1 should be included as it matches 'group1'.", hostsToConnect.contains(host1)); + Assert.assertFalse("Host2 should not be included as it doesn't match any group.", hostsToConnect.contains(host2)); + Assert.assertTrue("Host3 should be included as it matches 'group2'.", hostsToConnect.contains(host3)); + } + + @Test + public void testHostsWithEmptyStorageAccessGroups() { + HostVO host1 = Mockito.mock(HostVO.class); + HostVO host2 = Mockito.mock(HostVO.class); + List allHosts = Arrays.asList(host1, host2); + List storageAccessGroups = Arrays.asList("group1", "group2"); + + Mockito.when(host1.getId()).thenReturn(1L); + Mockito.doReturn(new String[0]) + .when(storageManager).getStorageAccessGroups(null, null, null, 1L); + + Mockito.when(host2.getId()).thenReturn(2L); + Mockito.doReturn(new String[0]) + .when(storageManager).getStorageAccessGroups(null, null, null, 2L); + + List hostsToConnect = resourceManager.filterHostsBasedOnStorageAccessGroups(allHosts, storageAccessGroups); + + Assert.assertTrue("No hosts should be included if storage access groups are empty.", hostsToConnect.isEmpty()); + } + + @Test + public void testZoneLevelWithAllHostsUsingTags() { + List sagsToDelete = Arrays.asList("tag1", "tag2"); + Long clusterId = null; + Long podId = null; + Long zoneId = 3L; + + List hostIdsUsingStorageTags = Arrays.asList(1L, 2L); + Mockito.doReturn(hostIdsUsingStorageTags).when(resourceManager).listOfHostIdsUsingTheStorageAccessGroups(sagsToDelete, clusterId, podId, zoneId); + + List hostsInZone = Arrays.asList(Mockito.mock(HostVO.class), Mockito.mock(HostVO.class)); + Mockito.doReturn(hostsInZone).when(hostDao).findByDataCenterId(zoneId); + + Mockito.doReturn(1L).when(hostsInZone.get(0)).getId(); + Mockito.doReturn(2L).when(hostsInZone.get(1)).getId(); + + try { + resourceManager.checkIfAllHostsInUse(sagsToDelete, clusterId, podId, zoneId); + Assert.fail("Exception should be thrown when all hosts in the zone are using the storage access groups."); + } catch (CloudRuntimeException e) { + Assert.assertEquals("All hosts in the zone are using the storage access groups", e.getMessage()); + } + } + + @Test + public void testClusterLevelWithAllHostsUsingTags() { + List sagsToDelete = Arrays.asList("tag1", "tag2"); + Long clusterId = 1L; + Long podId = null; + Long zoneId = null; + + List hostIdsUsingStorageTags = Arrays.asList(1L, 2L); + Mockito.doReturn(hostIdsUsingStorageTags).when(resourceManager).listOfHostIdsUsingTheStorageAccessGroups(sagsToDelete, clusterId, podId, zoneId); + + List hostsInCluster = Arrays.asList(Mockito.mock(HostVO.class), Mockito.mock(HostVO.class)); + Mockito.doReturn(hostsInCluster).when(hostDao).findByClusterId(clusterId, Host.Type.Routing); + + Mockito.doReturn(1L).when(hostsInCluster.get(0)).getId(); + Mockito.doReturn(2L).when(hostsInCluster.get(1)).getId(); + + try { + resourceManager.checkIfAllHostsInUse(sagsToDelete, clusterId, podId, zoneId); + Assert.fail("Exception should be thrown when all hosts in the cluster are using the storage access groups."); + } catch (CloudRuntimeException e) { + Assert.assertEquals("All hosts in the cluster are using the storage access groups", e.getMessage()); + } + } + + @Test + public void testPodLevelWithAllHostsUsingTags() { + List sagsToDelete = Arrays.asList("tag1", "tag2"); + Long clusterId = null; + Long podId = 2L; + Long zoneId = null; + + List hostIdsUsingStorageTags = Arrays.asList(1L, 2L); + Mockito.doReturn(hostIdsUsingStorageTags).when(resourceManager).listOfHostIdsUsingTheStorageAccessGroups(sagsToDelete, clusterId, podId, zoneId); + + List hostsInPod = Arrays.asList(Mockito.mock(HostVO.class), Mockito.mock(HostVO.class)); + Mockito.doReturn(hostsInPod).when(hostDao).findByPodId(podId, Host.Type.Routing); + + Mockito.doReturn(1L).when(hostsInPod.get(0)).getId(); + Mockito.doReturn(2L).when(hostsInPod.get(1)).getId(); + + try { + resourceManager.checkIfAllHostsInUse(sagsToDelete, clusterId, podId, zoneId); + Assert.fail("Exception should be thrown when all hosts in the pod are using the storage access groups."); + } catch (CloudRuntimeException e) { + Assert.assertEquals("All hosts in the pod are using the storage access groups", e.getMessage()); + } + } + + @Test + public void testCheckIfAnyVolumesInUseWithPoolsToAdd() { + List sagsToAdd = Arrays.asList("sag1", "sag2"); + List sagsToDelete = Arrays.asList("sag3", "sag4"); + + HostVO host = Mockito.mock(HostVO.class); + Mockito.when(host.getId()).thenReturn(1L); + Mockito.when(host.getDataCenterId()).thenReturn(2L); + Mockito.when(host.getPodId()).thenReturn(3L); + Mockito.when(host.getClusterId()).thenReturn(4L); + + VolumeVO volume1 = Mockito.mock(VolumeVO.class); + VolumeVO volume2 = Mockito.mock(VolumeVO.class); + Mockito.when(volume1.getPoolId()).thenReturn(10L); + Mockito.when(volume2.getPoolId()).thenReturn(11L); + List volumesUsingTheStoragePoolAccessGroups = new ArrayList<>(Arrays.asList(volume1, volume2)); + Mockito.doReturn(volumesUsingTheStoragePoolAccessGroups).when(resourceManager).listOfVolumesUsingTheStorageAccessGroups(sagsToDelete, 1L, null, null, null); + + StoragePoolVO pool1 = Mockito.mock(StoragePoolVO.class); + StoragePoolVO pool2 = Mockito.mock(StoragePoolVO.class); + Mockito.when(pool1.getId()).thenReturn(10L); + Mockito.when(pool2.getId()).thenReturn(12L); + List poolsToAdd = Arrays.asList(pool1, pool2); + + Mockito.doReturn(poolsToAdd) + .when(resourceManager).getStoragePoolsByAccessGroups(2L, 3L, 4L, sagsToAdd.toArray(new String[0]), true); + + try { + resourceManager.checkIfAnyVolumesInUse(sagsToAdd, sagsToDelete, host); + Assert.fail("Expected a CloudRuntimeException to be thrown."); + } catch (CloudRuntimeException e) { + Assert.assertTrue("Exception message should mention volumes in use.", + e.getMessage().contains("There are volumes in storage pools with the Storage Access Groups that need to be deleted")); + } + } + + @Test + public void testUpdateStoragePoolConnectionsOnHostsConnect1AndDisconnect2() { + Long poolId = 1L; + List storageAccessGroups = Arrays.asList("sag1", "sag2"); + + StoragePoolVO storagePool = Mockito.mock(StoragePoolVO.class); + Mockito.when(storagePool.getId()).thenReturn(poolId); + Mockito.when(storagePool.getScope()).thenReturn(ScopeType.CLUSTER); + Mockito.when(storagePool.getClusterId()).thenReturn(1L); + Mockito.when(storagePool.getPodId()).thenReturn(1L); + Mockito.when(storagePool.getDataCenterId()).thenReturn(1L); + + Mockito.when(storagePoolDao.findById(poolId)).thenReturn(storagePool); + + HostVO host1 = Mockito.mock(HostVO.class); + HostVO host2 = Mockito.mock(HostVO.class); + Mockito.when(host1.getId()).thenReturn(2L); + Mockito.when(host2.getId()).thenReturn(3L); + + List clusterHosts = Arrays.asList(host1, host2); + Mockito.doReturn(clusterHosts).when(resourceManager).listAllUpHosts(Host.Type.Routing, 1L, 1L, 1L); + + StoragePoolHostVO hostPoolRecord = Mockito.mock(StoragePoolHostVO.class); + Mockito.when(storagePoolHostDao.findByPoolHost(poolId, 2L)).thenReturn(null); + Mockito.when(storagePoolHostDao.findByPoolHost(poolId, 3L)).thenReturn(hostPoolRecord); + + Mockito.doReturn(new String[]{"sag1", "sag2"}).when(storageManager).getStorageAccessGroups(null, null, null, 2L); + Mockito.doReturn(new String[]{"sag3"}).when(storageManager).getStorageAccessGroups(null, null, null, 3L); + + Mockito.doReturn(new ArrayList()).when(resourceManager).listOfHostIdsUsingTheStoragePool(poolId); + + try { + resourceManager.updateStoragePoolConnectionsOnHosts(poolId, storageAccessGroups); + + Mockito.verify(resourceManager, Mockito.times(1)).connectHostToStoragePool(host1, storagePool); + Mockito.verify(resourceManager, Mockito.never()).connectHostToStoragePool(host2, storagePool); + Mockito.verify(resourceManager, Mockito.times(1)).disconnectHostFromStoragePool(host2, storagePool); + Mockito.verify(resourceManager, Mockito.never()).disconnectHostFromStoragePool(host1, storagePool); + } catch (CloudRuntimeException e) { + Assert.fail("No exception should be thrown."); + } + } + + @Test + public void testUpdateStoragePoolConnectionsOnHosts_ZoneScope_NoAccessGroups() { + Long poolId = 1L; + List storageAccessGroups = new ArrayList<>(); + + StoragePoolVO storagePool = Mockito.mock(StoragePoolVO.class); + Mockito.when(storagePool.getId()).thenReturn(poolId); + Mockito.when(storagePool.getScope()).thenReturn(ScopeType.ZONE); + Mockito.when(storagePool.getDataCenterId()).thenReturn(1L); + + Mockito.when(storagePoolDao.findById(poolId)).thenReturn(storagePool); + + HostVO host1 = Mockito.mock(HostVO.class); + HostVO host2 = Mockito.mock(HostVO.class); + Mockito.when(host1.getId()).thenReturn(2L); + Mockito.when(host2.getId()).thenReturn(3L); + + List zoneHosts = Arrays.asList(host1, host2); + Mockito.doReturn(zoneHosts).when(resourceManager).listAllUpHosts(Host.Type.Routing, null, null, 1L); + + Mockito.doReturn(new String[]{"sag1", "sag2"}).when(storageManager).getStorageAccessGroups(null, null, null, 2L); + Mockito.doReturn(new String[]{""}).when(storageManager).getStorageAccessGroups(null, null, null, 3L); + + Mockito.when(storagePoolHostDao.findByPoolHost(poolId, 2L)).thenReturn(null); + Mockito.when(storagePoolHostDao.findByPoolHost(poolId, 3L)).thenReturn(null); + + try { + resourceManager.updateStoragePoolConnectionsOnHosts(poolId, storageAccessGroups); + + Mockito.verify(resourceManager, Mockito.times(1)).connectHostToStoragePool(host1, storagePool); + Mockito.verify(resourceManager, Mockito.times(1)).connectHostToStoragePool(host2, storagePool); + Mockito.verify(resourceManager, Mockito.never()).disconnectHostFromStoragePool(Mockito.any(), Mockito.eq(storagePool)); + } catch (CloudRuntimeException e) { + Assert.fail("No exception should be thrown."); + } + } + + @Test + public void testUpdateStoragePoolConnectionsOnHosts_ConflictWithHostIdsAndVolumes() { + Long poolId = 1L; + List storageAccessGroups = Arrays.asList("sag1", "sag2"); + + StoragePoolVO storagePool = Mockito.mock(StoragePoolVO.class); + Mockito.when(storagePool.getId()).thenReturn(poolId); + Mockito.when(storagePool.getScope()).thenReturn(ScopeType.CLUSTER); + Mockito.when(storagePool.getClusterId()).thenReturn(1L); + Mockito.when(storagePool.getPodId()).thenReturn(1L); + Mockito.when(storagePool.getDataCenterId()).thenReturn(1L); + + Mockito.when(storagePoolDao.findById(poolId)).thenReturn(storagePool); + + HostVO host1 = Mockito.mock(HostVO.class); + HostVO host2 = Mockito.mock(HostVO.class); + Mockito.when(host1.getId()).thenReturn(2L); + Mockito.when(host2.getId()).thenReturn(3L); + + List clusterHosts = Arrays.asList(host1, host2); + Mockito.doReturn(clusterHosts).when(resourceManager).listAllUpHosts(Host.Type.Routing, 1L, 1L, 1L); + + VolumeVO volume1 = Mockito.mock(VolumeVO.class); + VolumeVO volume2 = Mockito.mock(VolumeVO.class); + + Mockito.when(volume1.getInstanceId()).thenReturn(100L); + Mockito.when(volume2.getInstanceId()).thenReturn(101L); + + List volumesInPool = Arrays.asList(volume1, volume2); + Mockito.doReturn(volumesInPool).when(volumeDao).findByPoolId(poolId); + + VMInstanceVO vmInstance1 = Mockito.mock(VMInstanceVO.class); + VMInstanceVO vmInstance2 = Mockito.mock(VMInstanceVO.class); + Mockito.when(vmInstance1.getHostId()).thenReturn(2L); + Mockito.when(vmInstance2.getHostId()).thenReturn(3L); + + Mockito.doReturn(vmInstance1).when(vmInstanceDao).findById(100L); + Mockito.doReturn(vmInstance2).when(vmInstanceDao).findById(101L); + + Mockito.when(storagePoolHostDao.findByPoolHost(poolId, 2L)).thenReturn(null); + Mockito.when(storagePoolHostDao.findByPoolHost(poolId, 3L)).thenReturn(null); + + Mockito.doReturn(new String[]{"sag1"}).when(storageManager).getStorageAccessGroups(null, null, null, 2L); + Mockito.doReturn(new String[]{"sag3"}).when(storageManager).getStorageAccessGroups(null, null, null, 3L); + + Mockito.doReturn(Arrays.asList(2L, 3L)).when(resourceManager).listOfHostIdsUsingTheStoragePool(poolId); + + try { + resourceManager.updateStoragePoolConnectionsOnHosts(poolId, storageAccessGroups); + Assert.fail("Expected a CloudRuntimeException to be thrown."); + } catch (CloudRuntimeException e) { + Assert.assertTrue(e.getMessage().contains("Storage access groups cannot be updated as they are currently in use by some hosts.")); + Mockito.verify(resourceManager, Mockito.never()).connectHostToStoragePool(Mockito.any(), Mockito.eq(storagePool)); + Mockito.verify(resourceManager, Mockito.never()).disconnectHostFromStoragePool(Mockito.any(), Mockito.eq(storagePool)); + } + } + + @Test(expected = CloudRuntimeException.class) + public void testNoUpHostsThrowsException() { + PrimaryDataStoreInfo primaryStore = Mockito.mock(PrimaryDataStoreInfo.class); + Mockito.when(primaryStore.getClusterId()).thenReturn(1L); + Mockito.doReturn(Collections.emptyList()).when(resourceManager).listAllUpHosts(Mockito.any(), Mockito.anyLong(), Mockito.any(), Mockito.anyLong()); + resourceManager.getEligibleUpHostsInClusterForStorageConnection(primaryStore); + } + + @Test(expected = CloudRuntimeException.class) + public void testNoUpAndEnabledHostsThrowsException() { + PrimaryDataStoreInfo primaryStore = Mockito.mock(PrimaryDataStoreInfo.class); + Mockito.when(primaryStore.getClusterId()).thenReturn(1L); + Mockito.doReturn(Collections.emptyList()).when(resourceManager).listAllUpAndEnabledHosts(Mockito.any(), Mockito.anyLong(), Mockito.any(), Mockito.anyLong()); + resourceManager.getEligibleUpAndEnabledHostsInClusterForStorageConnection(primaryStore); + } + + @Test + public void testEligibleHostsMatchingStorageAccessGroups() { + PrimaryDataStoreInfo primaryStore = Mockito.mock(PrimaryDataStoreInfo.class); + DataStore dataStore = Mockito.mock(DataStore.class); + Mockito.when(primaryStore.getId()).thenReturn(1L); + Mockito.when(dataStore.getId()).thenReturn(1L); + Mockito.when(primaryStore.getClusterId()).thenReturn(1L); + + HostVO host1 = Mockito.mock(HostVO.class); + HostVO host2 = Mockito.mock(HostVO.class); + List allHosts = Arrays.asList(host1, host2); + + Mockito.when(host1.getId()).thenReturn(1L); + Mockito.when(host2.getId()).thenReturn(2L); + + Mockito.doReturn(allHosts).when(resourceManager).listAllUpHosts(Mockito.any(), Mockito.anyLong(), Mockito.any(), Mockito.anyLong()); + Mockito.doReturn(allHosts).when(resourceManager).listAllUpAndEnabledHosts(Mockito.any(), Mockito.anyLong(), Mockito.any(), Mockito.anyLong()); + Mockito.doReturn(allHosts).when(resourceManager).listAllUpAndEnabledHostsInOneZoneByHypervisor(Mockito.any(), Mockito.anyLong()); + Mockito.doReturn(Arrays.asList("group1", "group2")).when(storagePoolAccessGroupMapDao).getStorageAccessGroups(1L); + + Mockito.doReturn(new String[]{"group1"}) + .when(storageManager).getStorageAccessGroups(null, null, null, 1L); + Mockito.doReturn(new String[]{"group3"}) + .when(storageManager).getStorageAccessGroups(null, null, null, 2L); + + List hostsToConnect = resourceManager.getEligibleUpHostsInClusterForStorageConnection(primaryStore); + + Assert.assertEquals("Only one host should match the storage access groups.", 1, hostsToConnect.size()); + Assert.assertTrue("Host1 should be included as it matches the storage access group.", hostsToConnect.contains(host1)); + Assert.assertFalse("Host2 should not be included as it does not match any storage access group.", hostsToConnect.contains(host2)); + + hostsToConnect = resourceManager.getEligibleUpAndEnabledHostsInClusterForStorageConnection(primaryStore); + + Assert.assertEquals("Only one host should match the storage access groups.", 1, hostsToConnect.size()); + Assert.assertTrue("Host1 should be included as it matches the storage access group.", hostsToConnect.contains(host1)); + Assert.assertFalse("Host2 should not be included as it does not match any storage access group.", hostsToConnect.contains(host2)); + + hostsToConnect = resourceManager.getEligibleUpAndEnabledHostsInZoneForStorageConnection(dataStore, 1L, Hypervisor.HypervisorType.KVM); + + Assert.assertEquals("Only one host should match the storage access groups.", 1, hostsToConnect.size()); + Assert.assertTrue("Host1 should be included as it matches the storage access group.", hostsToConnect.contains(host1)); + Assert.assertFalse("Host2 should not be included as it does not match any storage access group.", hostsToConnect.contains(host2)); + } + + @Test + public void testUpdateZoneStorageAccessGroups() { + long zoneId = 1L; + long podId = 2L; + long clusterId = 3L; + long host1Id = 1L; + long host2Id = 2L; + List newStorageAccessGroups = Arrays.asList("group1", "group2"); + + DataCenterVO zoneVO = Mockito.mock(DataCenterVO.class); + Mockito.when(dcDao.findById(zoneId)).thenReturn(zoneVO); + Mockito.when(zoneVO.getId()).thenReturn(zoneId); + Mockito.when(zoneVO.getStorageAccessGroups()).thenReturn("group1,group3"); + + HostVO host1 = Mockito.mock(HostVO.class); + HostVO host2 = Mockito.mock(HostVO.class); + Mockito.when(host1.getId()).thenReturn(host1Id); + Mockito.when(host2.getId()).thenReturn(host2Id); + + HostPodVO pod1 = Mockito.mock(HostPodVO.class); + ClusterVO cluster1 = Mockito.mock(ClusterVO.class); + Mockito.when(pod1.getId()).thenReturn(podId); + Mockito.when(cluster1.getId()).thenReturn(clusterId); + Mockito.when(podDao.findById(podId)).thenReturn(pod1); + Mockito.when(clusterDao.findById(clusterId)).thenReturn(cluster1); + Mockito.when(podDao.listByDataCenterId(zoneId)).thenReturn(Collections.singletonList(pod1)); + Mockito.when(clusterDao.listByPodId(podId)).thenReturn(Collections.singletonList(cluster1)); + Mockito.when(hostDao.findHypervisorHostInPod(podId)).thenReturn(Arrays.asList(host1, host2)); + Mockito.when(hostDao.findByDataCenterId(zoneId)).thenReturn(Arrays.asList(host1, host2)); + Mockito.when(hostDao.findByClusterId(clusterId)).thenReturn(Arrays.asList(host1, host2)); + + List hostIdsUsingStorageTags = Arrays.asList(host1Id); + Mockito.doReturn(hostIdsUsingStorageTags).when(resourceManager).listOfHostIdsUsingTheStorageAccessGroups(any(), any(), any(), any()); + + Mockito.doReturn(new String[]{"group1", "group3"}).when(storageManager).getStorageAccessGroups(null, null, null, host1Id); + Mockito.doReturn(new String[]{"group2", "group4"}).when(storageManager).getStorageAccessGroups(null, null, null, host2Id); + + resourceManager.updateZoneStorageAccessGroups(zoneId, newStorageAccessGroups); + + Mockito.verify(hostDao, Mockito.times(2)).update(host1Id, host1); + Mockito.verify(hostDao, Mockito.times(1)).update(host2Id, host2); + } + + @Test + public void testUpdatePodStorageAccessGroups() { + long podId = 2L; + long clusterId = 3L; + long host1Id = 1L; + long host2Id = 2L; + List newStorageAccessGroups = Arrays.asList("group1", "group2"); + + HostVO host1 = Mockito.mock(HostVO.class); + HostVO host2 = Mockito.mock(HostVO.class); + Mockito.when(host1.getId()).thenReturn(host1Id); + Mockito.when(host2.getId()).thenReturn(host2Id); + + HostPodVO pod1 = Mockito.mock(HostPodVO.class); + ClusterVO cluster1 = Mockito.mock(ClusterVO.class); + Mockito.when(pod1.getStorageAccessGroups()).thenReturn("group1,group3"); + Mockito.when(cluster1.getId()).thenReturn(clusterId); + Mockito.when(podDao.findById(podId)).thenReturn(pod1); + Mockito.when(clusterDao.findById(clusterId)).thenReturn(cluster1); + Mockito.when(clusterDao.listByPodId(podId)).thenReturn(Collections.singletonList(cluster1)); + Mockito.when(hostDao.findHypervisorHostInPod(podId)).thenReturn(Arrays.asList(host1, host2)); + Mockito.when(hostDao.findByPodId(podId, Host.Type.Routing)).thenReturn(Arrays.asList(host1, host2)); + Mockito.when(hostDao.findByClusterId(clusterId)).thenReturn(Arrays.asList(host1, host2)); + + List hostIdsUsingStorageTags = Arrays.asList(host1Id); + Mockito.doReturn(hostIdsUsingStorageTags).when(resourceManager).listOfHostIdsUsingTheStorageAccessGroups(any(), any(), any(), any()); + + Mockito.doReturn(new String[]{"group1", "group3"}).when(storageManager).getStorageAccessGroups(null, null, null, host1Id); + Mockito.doReturn(new String[]{"group2", "group4"}).when(storageManager).getStorageAccessGroups(null, null, null, host2Id); + + resourceManager.updatePodStorageAccessGroups(podId, newStorageAccessGroups); + + Mockito.verify(hostDao, Mockito.times(2)).update(host1Id, host1); + Mockito.verify(hostDao, Mockito.times(1)).update(host2Id, host2); + } + + @Test + public void testUpdateClusterStorageAccessGroups() { + long clusterId = 3L; + long host1Id = 1L; + long host2Id = 2L; + List newStorageAccessGroups = Arrays.asList("group1", "group2"); + + HostVO host1 = Mockito.mock(HostVO.class); + HostVO host2 = Mockito.mock(HostVO.class); + Mockito.when(host1.getId()).thenReturn(host1Id); + Mockito.when(host2.getId()).thenReturn(host2Id); + + ClusterVO cluster1 = Mockito.mock(ClusterVO.class); + Mockito.when(cluster1.getStorageAccessGroups()).thenReturn("group1,group3"); + Mockito.when(cluster1.getId()).thenReturn(clusterId); + Mockito.when(clusterDao.findById(clusterId)).thenReturn(cluster1); + Mockito.when(hostDao.findHypervisorHostInCluster(clusterId)).thenReturn(Arrays.asList(host1, host2)); + Mockito.when(hostDao.findByClusterId(clusterId)).thenReturn(Arrays.asList(host1, host2)); + Mockito.when(hostDao.findByClusterId(clusterId, Host.Type.Routing)).thenReturn(Arrays.asList(host1, host2)); + + List hostIdsUsingStorageTags = Arrays.asList(host1Id); + Mockito.doReturn(hostIdsUsingStorageTags).when(resourceManager).listOfHostIdsUsingTheStorageAccessGroups(any(), any(), any(), any()); + + Mockito.doReturn(new String[]{"group1", "group3"}).when(storageManager).getStorageAccessGroups(null, null, null, host1Id); + Mockito.doReturn(new String[]{"group2", "group4"}).when(storageManager).getStorageAccessGroups(null, null, null, host2Id); + + resourceManager.updateClusterStorageAccessGroups(clusterId, newStorageAccessGroups); + + Mockito.verify(hostDao, Mockito.times(2)).update(host1Id, host1); + Mockito.verify(hostDao, Mockito.times(1)).update(host2Id, host2); + } + + @Test + public void testUpdateHostStorageAccessGroups() { + long hostId = 1L; + long clusterId = 2L; + List newStorageAccessGroups = Arrays.asList("group1", "group2"); + + HostVO host = Mockito.mock(HostVO.class); + Mockito.when(host.getId()).thenReturn(hostId); + Mockito.when(host.getClusterId()).thenReturn(clusterId); + Mockito.when(host.getStorageAccessGroups()).thenReturn("group1,group3"); + + Mockito.when(hostDao.findById(hostId)).thenReturn(host); + Mockito.when(storageManager.getStorageAccessGroups(null, null, clusterId, null)) + .thenReturn(new String[]{"group3", "group4"}); + + Mockito.doNothing().when(resourceManager).checkIfAnyVolumesInUse(any(), any(), any()); + Mockito.doNothing().when(resourceManager).updateConnectionsBetweenHostsAndStoragePools(any()); + + resourceManager.updateHostStorageAccessGroups(hostId, newStorageAccessGroups); + + Mockito.verify(resourceManager).checkIfAnyVolumesInUse(eq(Arrays.asList("group1", "group2", "group3", "group4")), + eq(Arrays.asList("group3")), + eq(host)); + + Mockito.verify(resourceManager).updateConnectionsBetweenHostsAndStoragePools( + eq(Collections.singletonMap(host, Arrays.asList("group1", "group2", "group3", "group4"))) + ); + + Mockito.verify(host).setStorageAccessGroups("group1,group2"); + Mockito.verify(hostDao).update(hostId, host); + } } diff --git a/server/src/test/java/com/cloud/storage/StorageManagerImplTest.java b/server/src/test/java/com/cloud/storage/StorageManagerImplTest.java index 999bf85907b..01ab82c913d 100644 --- a/server/src/test/java/com/cloud/storage/StorageManagerImplTest.java +++ b/server/src/test/java/com/cloud/storage/StorageManagerImplTest.java @@ -23,8 +23,15 @@ import java.util.HashMap; import java.util.List; import java.util.Map; +import com.cloud.dc.HostPodVO; +import com.cloud.dc.dao.HostPodDao; +import com.cloud.host.HostVO; +import com.cloud.host.dao.HostDao; +import com.cloud.resource.ResourceManager; +import com.cloud.storage.dao.StoragePoolAndAccessGroupMapDao; import org.apache.cloudstack.api.ApiConstants; import org.apache.cloudstack.api.command.admin.storage.ChangeStoragePoolScopeCmd; +import org.apache.cloudstack.api.command.admin.storage.ConfigureStorageAccessCmd; import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreDriver; import org.apache.cloudstack.framework.config.ConfigDepot; import org.apache.cloudstack.framework.config.ConfigKey; @@ -75,6 +82,12 @@ import com.cloud.vm.DiskProfile; import com.cloud.vm.VMInstanceVO; import com.cloud.vm.dao.VMInstanceDao; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertThrows; +import static org.junit.Assert.assertTrue; +import static org.mockito.Mockito.doReturn; + @RunWith(MockitoJUnitRunner.class) public class StorageManagerImplTest { @@ -125,6 +138,18 @@ public class StorageManagerImplTest { @Mock private VMInstanceVO vmInstanceVOMock; + @Mock + private HostDao hostDao; + @Mock + private HostPodDao podDao; + + @Mock + private StoragePoolAndAccessGroupMapDao storagePoolAccessGroupMapDao; + + @Mock + private ResourceManager resourceMgr; + + @Test public void createLocalStoragePoolName() { String hostMockName = "host1"; @@ -149,7 +174,7 @@ public class StorageManagerImplTest { Mockito.when(storagePoolInfoMock.getUuid()).thenReturn(firstBlockUuid + "-213151-df21ef333d-2d33f1"); String localStoragePoolName = storageManagerImpl.createLocalStoragePoolName(hostMock, storagePoolInfoMock); - Assert.assertEquals(expectedLocalStorageName, localStoragePoolName); + assertEquals(expectedLocalStorageName, localStoragePoolName); } private VolumeVO mockVolumeForIsVolumeSuspectedDestroyDuplicateTest() { @@ -203,7 +228,7 @@ public class StorageManagerImplTest { Mockito.when(volumeVO.getPoolId()).thenReturn(poolId); Mockito.when(volumeVO.getPath()).thenReturn(path); Mockito.when(_volumeDao.findUsableVolumesForInstance(1L)).thenReturn(List.of(volumeVO, Mockito.mock(VolumeVO.class))); - Assert.assertTrue(storageManagerImpl.isVolumeSuspectedDestroyDuplicateOfVmVolume(volume)); + assertTrue(storageManagerImpl.isVolumeSuspectedDestroyDuplicateOfVmVolume(volume)); } @Test @@ -230,7 +255,7 @@ public class StorageManagerImplTest { volume.setState(Volume.State.Allocated); PrimaryDataStoreDao storagePoolDao = Mockito.mock(PrimaryDataStoreDao.class); storageManagerImpl._storagePoolDao = storagePoolDao; - Assert.assertTrue(storageManagerImpl.storagePoolCompatibleWithVolumePool(storagePool, volume)); + assertTrue(storageManagerImpl.storagePoolCompatibleWithVolumePool(storagePool, volume)); } @@ -240,7 +265,7 @@ public class StorageManagerImplTest { "clusterAdminPassword=password;clusterDefaultMinIops=1000;" + "clusterDefaultMaxIops=2000;clusterDefaultBurstIopsPercentOfMaxIops=2"; Map uriParams = storageManagerImpl.extractUriParamsAsMap(sfUrl); - Assert.assertTrue(MapUtils.isEmpty(uriParams)); + assertTrue(MapUtils.isEmpty(uriParams)); } @Test @@ -250,7 +275,7 @@ public class StorageManagerImplTest { String path = "/PATH"; String sfUrl = String.format("%s://%s%s", scheme, host, path); Map uriParams = storageManagerImpl.extractUriParamsAsMap(sfUrl); - Assert.assertTrue(MapUtils.isNotEmpty(uriParams)); + assertTrue(MapUtils.isNotEmpty(uriParams)); Assert.assertEquals(scheme, uriParams.get("scheme")); Assert.assertEquals(host, uriParams.get("host")); Assert.assertEquals(path, uriParams.get("hostPath")); @@ -284,7 +309,7 @@ public class StorageManagerImplTest { StoragePool pool = Mockito.mock(StoragePool.class); Mockito.when(pool.getCapacityIops()).thenReturn(null); List> list = List.of(new Pair<>(Mockito.mock(Volume.class), Mockito.mock(DiskProfile.class))); - Assert.assertTrue(storageManagerImpl.storagePoolHasEnoughIops(100L, list, pool, false)); + assertTrue(storageManagerImpl.storagePoolHasEnoughIops(100L, list, pool, false)); } @Test @@ -295,7 +320,7 @@ public class StorageManagerImplTest { Mockito.when(storagePoolDao.findById(1L)).thenReturn(pool); Mockito.when(capacityManager.getUsedIops(pool)).thenReturn(500L); List> list = List.of(new Pair<>(Mockito.mock(Volume.class), Mockito.mock(DiskProfile.class))); - Assert.assertTrue(storageManagerImpl.storagePoolHasEnoughIops(100L, list, pool, true)); + assertTrue(storageManagerImpl.storagePoolHasEnoughIops(100L, list, pool, true)); } @Test @@ -319,7 +344,7 @@ public class StorageManagerImplTest { StoragePoolVO pool = Mockito.mock(StoragePoolVO.class); List iopsList = Arrays.asList(null, 0L); for (Long iops : iopsList) { - Assert.assertTrue(storageManagerImpl.storagePoolHasEnoughIops(iops, pool)); + assertTrue(storageManagerImpl.storagePoolHasEnoughIops(iops, pool)); } } @@ -328,7 +353,7 @@ public class StorageManagerImplTest { StoragePoolVO pool = Mockito.mock(StoragePoolVO.class); Mockito.doReturn(true).when(storageManagerImpl).storagePoolHasEnoughIops( Mockito.eq(100L), Mockito.anyList(), Mockito.eq(pool), Mockito.eq(false)); - Assert.assertTrue(storageManagerImpl.storagePoolHasEnoughIops(100L, pool)); + assertTrue(storageManagerImpl.storagePoolHasEnoughIops(100L, pool)); } @Test @@ -346,7 +371,7 @@ public class StorageManagerImplTest { new Pair<>(Mockito.mock(Volume.class), Mockito.mock(DiskProfile.class))); StoragePoolVO pool = Mockito.mock(StoragePoolVO.class); Mockito.when(pool.getCapacityIops()).thenReturn(null); - Assert.assertTrue(storageManagerImpl.storagePoolHasEnoughIops(list, pool)); + assertTrue(storageManagerImpl.storagePoolHasEnoughIops(list, pool)); } @Test @@ -360,7 +385,7 @@ public class StorageManagerImplTest { StoragePoolVO pool = Mockito.mock(StoragePoolVO.class); Mockito.doReturn(true).when(storageManagerImpl) .storagePoolHasEnoughIops(100L, list, pool, true); - Assert.assertTrue(storageManagerImpl.storagePoolHasEnoughIops(list, pool)); + assertTrue(storageManagerImpl.storagePoolHasEnoughIops(list, pool)); Mockito.when(profile.getDiskOfferingId()).thenReturn(2L); Mockito.when(profile.getMinIops()).thenReturn(200L); @@ -374,7 +399,7 @@ public class StorageManagerImplTest { StoragePoolVO pool = Mockito.mock(StoragePoolVO.class); List sizeList = Arrays.asList(null, 0L); for (Long size : sizeList) { - Assert.assertTrue(storageManagerImpl.storagePoolHasEnoughSpace(size, pool)); + assertTrue(storageManagerImpl.storagePoolHasEnoughSpace(size, pool)); } } @@ -390,7 +415,7 @@ public class StorageManagerImplTest { return total > asking; }).when(storageManagerImpl).checkPoolforSpace(Mockito.any(StoragePool.class), Mockito.anyLong(), Mockito.anyLong()); - Assert.assertTrue(storageManagerImpl.storagePoolHasEnoughSpace(1000L, pool)); + assertTrue(storageManagerImpl.storagePoolHasEnoughSpace(1000L, pool)); Assert.assertFalse(storageManagerImpl.storagePoolHasEnoughSpace(2200L, pool)); } @@ -402,7 +427,7 @@ public class StorageManagerImplTest { try { Mockito.doReturn(null) .when(storageManagerImpl).getCheckDatastorePolicyComplianceAnswer("policy", pool); - Assert.assertTrue(storageManagerImpl.isStoragePoolCompliantWithStoragePolicy(1L, pool)); + assertTrue(storageManagerImpl.isStoragePoolCompliantWithStoragePolicy(1L, pool)); } catch (StorageUnavailableException e) { Assert.fail(e.getMessage()); } @@ -410,7 +435,7 @@ public class StorageManagerImplTest { Mockito.doReturn(new com.cloud.agent.api.Answer( Mockito.mock(CheckDataStoreStoragePolicyComplainceCommand.class))) .when(storageManagerImpl).getCheckDatastorePolicyComplianceAnswer("policy", pool); - Assert.assertTrue(storageManagerImpl.isStoragePoolCompliantWithStoragePolicy(1L, pool)); + assertTrue(storageManagerImpl.isStoragePoolCompliantWithStoragePolicy(1L, pool)); } catch (StorageUnavailableException e) { Assert.fail(e.getMessage()); } @@ -496,7 +521,7 @@ public class StorageManagerImplTest { } com.cloud.agent.api.Answer answer = storageManagerImpl.getCheckDatastorePolicyComplianceAnswer("1", pool); - Assert.assertTrue(answer.getResult()); + assertTrue(answer.getResult()); } @Test @@ -625,7 +650,7 @@ public class StorageManagerImplTest { public void testCheckNFSMountOptionsForCreateNotKVM() { Map details = new HashMap<>(); details.put(ApiConstants.NFS_MOUNT_OPTIONS, "vers=4.1"); - InvalidParameterValueException exception = Assert.assertThrows(InvalidParameterValueException.class, + InvalidParameterValueException exception = assertThrows(InvalidParameterValueException.class, () -> storageManagerImpl.checkNFSMountOptionsForCreate(details, HypervisorType.XenServer, "")); Assert.assertEquals(exception.getMessage(), "NFS options can not be set for the hypervisor type " + HypervisorType.XenServer); } @@ -634,7 +659,7 @@ public class StorageManagerImplTest { public void testCheckNFSMountOptionsForCreateNotNFS() { Map details = new HashMap<>(); details.put(ApiConstants.NFS_MOUNT_OPTIONS, "vers=4.1"); - InvalidParameterValueException exception = Assert.assertThrows(InvalidParameterValueException.class, + InvalidParameterValueException exception = assertThrows(InvalidParameterValueException.class, () -> storageManagerImpl.checkNFSMountOptionsForCreate(details, HypervisorType.KVM, "")); Assert.assertEquals(exception.getMessage(), "NFS options can only be set on pool type " + Storage.StoragePoolType.NetworkFilesystem); } @@ -658,7 +683,7 @@ public class StorageManagerImplTest { Long accountId = 1L; details.put(ApiConstants.NFS_MOUNT_OPTIONS, "vers=4.1"); Mockito.when(accountMgr.isRootAdmin(accountId)).thenReturn(false); - PermissionDeniedException exception = Assert.assertThrows(PermissionDeniedException.class, + PermissionDeniedException exception = assertThrows(PermissionDeniedException.class, () -> storageManagerImpl.checkNFSMountOptionsForUpdate(details, pool, accountId)); Assert.assertEquals(exception.getMessage(), "Only root admin can modify nfs options"); } @@ -671,7 +696,7 @@ public class StorageManagerImplTest { details.put(ApiConstants.NFS_MOUNT_OPTIONS, "vers=4.1"); Mockito.when(accountMgr.isRootAdmin(accountId)).thenReturn(true); pool.setHypervisor(HypervisorType.XenServer); - InvalidParameterValueException exception = Assert.assertThrows(InvalidParameterValueException.class, + InvalidParameterValueException exception = assertThrows(InvalidParameterValueException.class, () -> storageManagerImpl.checkNFSMountOptionsForUpdate(details, pool, accountId)); Assert.assertEquals(exception.getMessage(), "NFS options can only be set for the hypervisor type " + HypervisorType.KVM); } @@ -685,7 +710,7 @@ public class StorageManagerImplTest { Mockito.when(accountMgr.isRootAdmin(accountId)).thenReturn(true); pool.setHypervisor(HypervisorType.KVM); pool.setPoolType(Storage.StoragePoolType.FiberChannel); - InvalidParameterValueException exception = Assert.assertThrows(InvalidParameterValueException.class, + InvalidParameterValueException exception = assertThrows(InvalidParameterValueException.class, () -> storageManagerImpl.checkNFSMountOptionsForUpdate(details, pool, accountId)); Assert.assertEquals(exception.getMessage(), "NFS options can only be set on pool type " + Storage.StoragePoolType.NetworkFilesystem); } @@ -700,7 +725,7 @@ public class StorageManagerImplTest { pool.setHypervisor(HypervisorType.KVM); pool.setPoolType(Storage.StoragePoolType.NetworkFilesystem); pool.setStatus(StoragePoolStatus.Up); - InvalidParameterValueException exception = Assert.assertThrows(InvalidParameterValueException.class, + InvalidParameterValueException exception = assertThrows(InvalidParameterValueException.class, () -> storageManagerImpl.checkNFSMountOptionsForUpdate(details, pool, accountId)); Assert.assertEquals(exception.getMessage(), "The storage pool should be in maintenance mode to edit nfs options"); } @@ -833,7 +858,67 @@ public class StorageManagerImplTest { overrideDefaultConfigValue(StorageManagerImpl.AllowVolumeReSizeBeyondAllocation, "_defaultValue", "true"); boolean result = storageManagerImpl.checkPoolforSpace(pool, allocatedSizeWithTemplate, totalAskingSize, true); - Assert.assertTrue(result); + assertTrue(result); + } + + @Test + public void testGetStorageAccessGroupsOnHostAllSAGsPresent() { + long hostId = 1L; + + HostVO host = Mockito.mock(HostVO.class); + ClusterVO cluster = Mockito.mock(ClusterVO.class); + HostPodVO pod = Mockito.mock(HostPodVO.class); + DataCenterVO zone = Mockito.mock(DataCenterVO.class); + + Mockito.when(hostDao.findById(hostId)).thenReturn(host); + Mockito.when(host.getClusterId()).thenReturn(2L); + Mockito.when(clusterDao.findById(2L)).thenReturn(cluster); + Mockito.when(cluster.getPodId()).thenReturn(3L); + Mockito.when(podDao.findById(3L)).thenReturn(pod); + Mockito.when(pod.getDataCenterId()).thenReturn(4L); + Mockito.when(dataCenterDao.findById(4L)).thenReturn(zone); + + Mockito.when(host.getStorageAccessGroups()).thenReturn("sag1"); + Mockito.when(cluster.getStorageAccessGroups()).thenReturn("sag2"); + Mockito.when(pod.getStorageAccessGroups()).thenReturn("sag3"); + Mockito.when(zone.getStorageAccessGroups()).thenReturn("sag4"); + + String[] sags = storageManagerImpl.getStorageAccessGroups(null, null, null, hostId); + + assertNotNull(sags); + assertEquals(4, sags.length); + assertEquals("sag1", sags[0]); + assertEquals("sag2", sags[1]); + assertEquals("sag3", sags[2]); + assertEquals("sag4", sags[3]); + } + + @Test + public void testGetSingleStorageAccessGroupOnHost() { + long hostId = 1L; + + HostVO host = Mockito.mock(HostVO.class); + ClusterVO cluster = Mockito.mock(ClusterVO.class); + HostPodVO pod = Mockito.mock(HostPodVO.class); + DataCenterVO zone = Mockito.mock(DataCenterVO.class); + + Mockito.when(hostDao.findById(hostId)).thenReturn(host); + Mockito.when(host.getClusterId()).thenReturn(2L); + Mockito.when(clusterDao.findById(2L)).thenReturn(cluster); + Mockito.when(cluster.getPodId()).thenReturn(3L); + Mockito.when(podDao.findById(3L)).thenReturn(pod); + Mockito.when(pod.getDataCenterId()).thenReturn(4L); + Mockito.when(dataCenterDao.findById(4L)).thenReturn(zone); + + Mockito.when(host.getStorageAccessGroups()).thenReturn(""); + Mockito.when(cluster.getStorageAccessGroups()).thenReturn("sag2"); + Mockito.when(pod.getStorageAccessGroups()).thenReturn(null); + + String[] sags = storageManagerImpl.getStorageAccessGroups(null, null, null, hostId); + + assertNotNull(sags); + assertEquals(1, sags.length); + assertEquals("sag2", sags[0]); } @Test @@ -894,4 +979,512 @@ public class StorageManagerImplTest { Assert.assertEquals("Capacity IOPS should match pool's capacity IOPS", 1000L, result.first().longValue()); Assert.assertNull("Used IOPS should be null when usedIops <= 0", result.second()); } + + + @Test + public void testNoStorageAccessGroupsOnHostAndStoragePool() { + HostVO host = Mockito.mock(HostVO.class); + StoragePoolVO storagePool = Mockito.mock(StoragePoolVO.class); + long hostId = 1L; + long poolId = 2L; + + Mockito.when(host.getId()).thenReturn(hostId); + doReturn(new String[0]).when(storageManagerImpl).getStorageAccessGroups(null, null, null, hostId); + + Mockito.when(storagePool.getId()).thenReturn(poolId); + storageManagerImpl._storagePoolAccessGroupMapDao = storagePoolAccessGroupMapDao; + Mockito.when(storagePoolAccessGroupMapDao.getStorageAccessGroups(poolId)) + .thenReturn(new ArrayList<>()); + + boolean result = storageManagerImpl.checkIfHostAndStoragePoolHasCommonStorageAccessGroups(host, storagePool); + + assertTrue("Host without storage access groups should connect to a storage pool without storage access groups.", result); + } + + @Test + public void testHostWithStorageAccessGroupsAndStoragePoolWithoutStorageAccessGroups() { + HostVO host = Mockito.mock(HostVO.class); + StoragePoolVO storagePool = Mockito.mock(StoragePoolVO.class); + long hostId = 1L; + long poolId = 2L; + + Mockito.when(host.getId()).thenReturn(hostId); + doReturn(new String[]{"StorageAccessGroup1"}).when(storageManagerImpl).getStorageAccessGroups(null, null, null, hostId); + + Mockito.when(storagePool.getId()).thenReturn(poolId); + storageManagerImpl._storagePoolAccessGroupMapDao = storagePoolAccessGroupMapDao; + Mockito.when(storagePoolAccessGroupMapDao.getStorageAccessGroups(poolId)) + .thenReturn(new ArrayList<>()); + + boolean result = storageManagerImpl.checkIfHostAndStoragePoolHasCommonStorageAccessGroups(host, storagePool); + + assertTrue("Host with storage access groups should connect to a storage pool without storage access groups.", result); + } + + @Test + public void testHostWithStorageAccessGroupsAndStoragePoolWithDifferentStorageAccessGroups() { + HostVO host = Mockito.mock(HostVO.class); + StoragePoolVO storagePool = Mockito.mock(StoragePoolVO.class); + long hostId = 1L; + long poolId = 2L; + + Mockito.when(host.getId()).thenReturn(hostId); + doReturn(new String[]{"StorageAccessGroup1"}).when(storageManagerImpl).getStorageAccessGroups(null, null, null, hostId); + + Mockito.when(storagePool.getId()).thenReturn(poolId); + storageManagerImpl._storagePoolAccessGroupMapDao = storagePoolAccessGroupMapDao; + Mockito.when(storagePoolAccessGroupMapDao.getStorageAccessGroups(poolId)) + .thenReturn(Arrays.asList("StorageAccessGroup2", "StorageAccessGroup3")); + + boolean result = storageManagerImpl.checkIfHostAndStoragePoolHasCommonStorageAccessGroups(host, storagePool); + + Assert.assertFalse("Host with storage access groups should not connect to a storage pool with different storage access groups.", result); + } + + @Test + public void testHostWithStorageAccessGroupsAndStoragePoolWithMatchingStorageAccessGroups() { + HostVO host = Mockito.mock(HostVO.class); + StoragePoolVO storagePool = Mockito.mock(StoragePoolVO.class); + long hostId = 1L; + long poolId = 2L; + + Mockito.when(host.getId()).thenReturn(hostId); + doReturn(new String[]{"StorageAccessGroup1"}).when(storageManagerImpl).getStorageAccessGroups(null, null, null, hostId); + + Mockito.when(storagePool.getId()).thenReturn(poolId); + storageManagerImpl._storagePoolAccessGroupMapDao = storagePoolAccessGroupMapDao; + Mockito.when(storagePoolAccessGroupMapDao.getStorageAccessGroups(poolId)) + .thenReturn(Arrays.asList("StorageAccessGroup1", "StorageAccessGroup2")); + + boolean result = storageManagerImpl.checkIfHostAndStoragePoolHasCommonStorageAccessGroups(host, storagePool); + + assertTrue("Host with matching storage access groups should connect to a storage pool with matching storage access groups.", result); + } + + @Test + public void testHostWithEmptySAGsOnHost() { + HostVO host = Mockito.mock(HostVO.class); + StoragePoolVO storagePool = Mockito.mock(StoragePoolVO.class); + long hostId = 1L; + long poolId = 2L; + + Mockito.when(host.getId()).thenReturn(hostId); + doReturn(new String[0]).when(storageManagerImpl).getStorageAccessGroups(null, null, null, hostId); + + Mockito.when(storagePool.getId()).thenReturn(poolId); + storageManagerImpl._storagePoolAccessGroupMapDao = storagePoolAccessGroupMapDao; + Mockito.when(storagePoolAccessGroupMapDao.getStorageAccessGroups(poolId)) + .thenReturn(Arrays.asList("StorageAccessGroup1", "StorageAccessGroup2")); + + boolean result = storageManagerImpl.checkIfHostAndStoragePoolHasCommonStorageAccessGroups(host, storagePool); + + Assert.assertFalse("Host with matching storage access groups should connect to a storage pool with matching storage access groups.", result); + } + + @Test + public void testVolumeReadyNoVMOrVMStoppedAndPoolsWithMatchingStorageAccessGroups() { + StoragePoolVO destPool = Mockito.mock(StoragePoolVO.class); + Volume volume = Mockito.mock(Volume.class); + long srcPoolId = 2L; + long destPoolId = 3L; + + Mockito.when(volume.getState()).thenReturn(Volume.State.Ready); + Mockito.when(volume.getInstanceId()).thenReturn(null); + Mockito.when(volume.getPoolId()).thenReturn(srcPoolId); + + Mockito.when(destPool.getId()).thenReturn(destPoolId); + + List srcStorageAccessGroups = Arrays.asList("StorageAccessGroup1", "StorageAccessGroup2"); + List destStorageAccessGroups = Arrays.asList("StorageAccessGroup1", "StorageAccessGroup2"); + + doReturn(srcStorageAccessGroups).when(storagePoolAccessGroupMapDao).getStorageAccessGroups(srcPoolId); + doReturn(destStorageAccessGroups).when(storagePoolAccessGroupMapDao).getStorageAccessGroups(destPoolId); + + Pair result = storageManagerImpl.checkIfReadyVolumeFitsInStoragePoolWithStorageAccessGroups(destPool, volume); + + assertTrue("Volume in Ready state and no VM or VM stopped should migrate if both pools have matching storage access groups.", result.first()); + } + + @Test + public void testVolumeReadyNoVMOrVMStoppedAndPoolsWithEmptyStorageAccessGroups() { + StoragePoolVO destPool = Mockito.mock(StoragePoolVO.class); + Volume volume = Mockito.mock(Volume.class); + long srcPoolId = 2L; + long destPoolId = 3L; + + Mockito.when(volume.getState()).thenReturn(Volume.State.Ready); + Mockito.when(volume.getInstanceId()).thenReturn(null); + Mockito.when(volume.getPoolId()).thenReturn(srcPoolId); + + Mockito.when(destPool.getId()).thenReturn(destPoolId); + + List srcStorageAccessGroups = new ArrayList<>(); + List destStorageAccessGroups = new ArrayList<>(); + + doReturn(srcStorageAccessGroups).when(storagePoolAccessGroupMapDao).getStorageAccessGroups(srcPoolId); + doReturn(destStorageAccessGroups).when(storagePoolAccessGroupMapDao).getStorageAccessGroups(destPoolId); + + Pair result = storageManagerImpl.checkIfReadyVolumeFitsInStoragePoolWithStorageAccessGroups(destPool, volume); + + assertTrue("Volume with empty storage access groups should be able to fit in the destination pool.", result.first()); + } + + @Test + public void testVolumeReadyVMRunningAndHostHasCommonSAGsForBothPools() { + StoragePoolVO destPool = Mockito.mock(StoragePoolVO.class); + Volume volume = Mockito.mock(Volume.class); + long vmId = 10L; + long srcPoolId = 2L; + long destPoolId = 3L; + + Mockito.when(volume.getState()).thenReturn(Volume.State.Ready); + Mockito.when(volume.getInstanceId()).thenReturn(vmId); + Mockito.when(volume.getPoolId()).thenReturn(srcPoolId); + + Mockito.when(destPool.getId()).thenReturn(destPoolId); + + List srcStorageAccessGroups = Arrays.asList("StorageAccessGroup1", "StorageAccessGroup2"); + List destStorageAccessGroups = Arrays.asList("StorageAccessGroup2", "StorageAccessGroup3"); + + doReturn(srcStorageAccessGroups).when(storagePoolAccessGroupMapDao).getStorageAccessGroups(srcPoolId); + doReturn(destStorageAccessGroups).when(storagePoolAccessGroupMapDao).getStorageAccessGroups(destPoolId); + + Pair result = storageManagerImpl.checkIfReadyVolumeFitsInStoragePoolWithStorageAccessGroups(destPool, volume); + + assertTrue("Volume with host having common storage access groups should fit in both source and destination pools.", result.first()); + } + + @Test + public void testVolumeReadyVMRunningAndHostHasCommonSAGForSourcePoolButNotDestinationPool() { + StoragePoolVO destPool = Mockito.mock(StoragePoolVO.class); + Volume volume = Mockito.mock(Volume.class); + StoragePoolVO srcPool = Mockito.mock(StoragePoolVO.class); + long vmId = 10L; + long srcPoolId = 2L; + long destPoolId = 3L; + + Mockito.when(volume.getState()).thenReturn(Volume.State.Ready); + Mockito.when(volume.getInstanceId()).thenReturn(vmId); + Mockito.when(volume.getPoolId()).thenReturn(srcPoolId); + + Mockito.when(destPool.getId()).thenReturn(destPoolId); + Mockito.when(srcPool.getId()).thenReturn(destPoolId); + Mockito.doReturn(srcPool).when(storagePoolDao).findById(srcPoolId); + + List srcStorageAccessGroups = Arrays.asList("StorageAccessGroup1", "StorageAccessGroup2"); + List destStorageAccessGroups = Arrays.asList("StorageAccessGroup3", "StorageAccessGroup4"); + + doReturn(srcStorageAccessGroups).when(storagePoolAccessGroupMapDao).getStorageAccessGroups(srcPoolId); + doReturn(destStorageAccessGroups).when(storagePoolAccessGroupMapDao).getStorageAccessGroups(destPoolId); + List poolIds = new ArrayList<>(); + poolIds.add(srcPool.getId()); + poolIds.add(destPool.getId()); + Mockito.doReturn(null).when(storageManagerImpl).findUpAndEnabledHostWithAccessToStoragePools(poolIds); + + Pair result = storageManagerImpl.checkIfReadyVolumeFitsInStoragePoolWithStorageAccessGroups(destPool, volume); + + Assert.assertFalse("Volume with host having common storage access group for source pool but not destination pool should not fit.", result.first()); + } + + @Test + public void testNoCommonHostConnected() { + StoragePoolVO destPool = Mockito.mock(StoragePoolVO.class); + StoragePoolVO srcPool = Mockito.mock(StoragePoolVO.class); + Volume volume = Mockito.mock(Volume.class); + long vmId = 10L; + long srcPoolId = 2L; + long destPoolId = 3L; + + Mockito.when(volume.getState()).thenReturn(Volume.State.Ready); + Mockito.when(volume.getInstanceId()).thenReturn(vmId); + Mockito.when(volume.getPoolId()).thenReturn(srcPoolId); + + Mockito.when(destPool.getId()).thenReturn(destPoolId); + Mockito.when(srcPool.getId()).thenReturn(destPoolId); + Mockito.doReturn(srcPool).when(storagePoolDao).findById(srcPoolId); + List srcStorageAccessGroups = Arrays.asList("StorageAccessGroup3", "StorageAccessGroup4"); + List destStorageAccessGroups = Arrays.asList("StorageAccessGroup1", "StorageAccessGroup2"); + + Mockito.doReturn(srcStorageAccessGroups).when(storagePoolAccessGroupMapDao).getStorageAccessGroups(srcPoolId); + Mockito.doReturn(destStorageAccessGroups).when(storagePoolAccessGroupMapDao).getStorageAccessGroups(destPoolId); + List poolIds = new ArrayList<>(); + poolIds.add(srcPool.getId()); + poolIds.add(destPool.getId()); + Mockito.doReturn(null).when(storageManagerImpl).findUpAndEnabledHostWithAccessToStoragePools(poolIds); + Pair result = storageManagerImpl.checkIfReadyVolumeFitsInStoragePoolWithStorageAccessGroups(destPool, volume); + + Assert.assertFalse("Volume with host having common storage access group for destination pool but not source pool should not fit.", result.first()); + Assert.assertEquals("No common host connected to source and destination storages", result.second()); + } + + @Test + public void testConfigureStorageAccess_SkipUpdateForZone() { + Long zoneId = 1L; + List storageAccessGroups = Arrays.asList("sag1", "sag2"); + + ConfigureStorageAccessCmd cmd = Mockito.mock(ConfigureStorageAccessCmd.class); + Mockito.when(cmd.getZoneId()).thenReturn(zoneId); + Mockito.when(cmd.getPodId()).thenReturn(null); + Mockito.when(cmd.getClusterId()).thenReturn(null); + Mockito.when(cmd.getHostId()).thenReturn(null); + Mockito.when(cmd.getStorageId()).thenReturn(null); + Mockito.when(cmd.getStorageAccessGroups()).thenReturn(storageAccessGroups); + + DataCenterVO zone = Mockito.mock(DataCenterVO.class); + Mockito.when(zone.getStorageAccessGroups()).thenReturn("sag2,sag1"); + Mockito.when(dataCenterDao.findById(zoneId)).thenReturn(zone); + + boolean result = storageManagerImpl.configureStorageAccess(cmd); + + Mockito.verify(resourceMgr, Mockito.never()).updateZoneStorageAccessGroups(Mockito.anyLong(), Mockito.anyList()); + Mockito.verify(dataCenterDao, Mockito.never()).update(Mockito.eq(zoneId), Mockito.any(DataCenterVO.class)); + + assertTrue(result); + } + + @Test + public void testConfigureStorageAccess_SkipUpdateForPod() { + Long podId = 1L; + List storageAccessGroups = Arrays.asList("sag1", "sag2"); + + ConfigureStorageAccessCmd cmd = Mockito.mock(ConfigureStorageAccessCmd.class); + Mockito.when(cmd.getZoneId()).thenReturn(null); + Mockito.when(cmd.getPodId()).thenReturn(podId); + Mockito.when(cmd.getClusterId()).thenReturn(null); + Mockito.when(cmd.getHostId()).thenReturn(null); + Mockito.when(cmd.getStorageId()).thenReturn(null); + Mockito.when(cmd.getStorageAccessGroups()).thenReturn(storageAccessGroups); + + HostPodVO pod = Mockito.mock(HostPodVO.class); + Mockito.when(pod.getDataCenterId()).thenReturn(1L); + Mockito.when(pod.getStorageAccessGroups()).thenReturn("sag1,sag2"); + Mockito.when(podDao.findById(podId)).thenReturn(pod); + Mockito.doNothing().when(storageManagerImpl).checkIfStorageAccessGroupsExistsOnZone(1L, storageAccessGroups); + + boolean result = storageManagerImpl.configureStorageAccess(cmd); + + Mockito.verify(resourceMgr, Mockito.never()).updatePodStorageAccessGroups(Mockito.anyLong(), Mockito.anyList()); + Mockito.verify(podDao, Mockito.never()).update(Mockito.eq(podId), Mockito.any(HostPodVO.class)); + + assertTrue(result); + } + + @Test + public void testConfigureStorageAccess_SkipUpdateForCluster() { + Long clusterId = 1L; + List storageAccessGroups = Arrays.asList("sag1", "sag2"); + + ConfigureStorageAccessCmd cmd = Mockito.mock(ConfigureStorageAccessCmd.class); + Mockito.when(cmd.getZoneId()).thenReturn(null); + Mockito.when(cmd.getPodId()).thenReturn(null); + Mockito.when(cmd.getClusterId()).thenReturn(clusterId); + Mockito.when(cmd.getHostId()).thenReturn(null); + Mockito.when(cmd.getStorageId()).thenReturn(null); + Mockito.when(cmd.getStorageAccessGroups()).thenReturn(storageAccessGroups); + + ClusterVO cluster = Mockito.mock(ClusterVO.class); + Mockito.when(cluster.getPodId()).thenReturn(1L); + Mockito.when(cluster.getStorageAccessGroups()).thenReturn("sag1,sag2"); + Mockito.when(clusterDao.findById(clusterId)).thenReturn(cluster); + Mockito.doNothing().when(storageManagerImpl).checkIfStorageAccessGroupsExistsOnPod(1L, storageAccessGroups); + + boolean result = storageManagerImpl.configureStorageAccess(cmd); + + Mockito.verify(resourceMgr, Mockito.never()).updateClusterStorageAccessGroups(Mockito.anyLong(), Mockito.anyList()); + Mockito.verify(clusterDao, Mockito.never()).update(Mockito.eq(clusterId), Mockito.any(ClusterVO.class)); + + assertTrue(result); + } + + @Test + public void testConfigureStorageAccess_SkipUpdateForHost() { + Long hostId = 1L; + List storageAccessGroups = Arrays.asList("sag1", "sag2"); + + ConfigureStorageAccessCmd cmd = Mockito.mock(ConfigureStorageAccessCmd.class); + Mockito.when(cmd.getZoneId()).thenReturn(null); + Mockito.when(cmd.getPodId()).thenReturn(null); + Mockito.when(cmd.getClusterId()).thenReturn(null); + Mockito.when(cmd.getHostId()).thenReturn(hostId); + Mockito.when(cmd.getStorageId()).thenReturn(null); + Mockito.when(cmd.getStorageAccessGroups()).thenReturn(storageAccessGroups); + + HostVO host = Mockito.mock(HostVO.class); + Mockito.when(host.getClusterId()).thenReturn(1L); + Mockito.when(host.getStorageAccessGroups()).thenReturn("sag1,sag2"); + Mockito.when(hostDao.findById(hostId)).thenReturn(host); + Mockito.doNothing().when(storageManagerImpl).checkIfStorageAccessGroupsExistsOnCluster(1L, storageAccessGroups); + + boolean result = storageManagerImpl.configureStorageAccess(cmd); + + Mockito.verify(resourceMgr, Mockito.never()).updateHostStorageAccessGroups(Mockito.anyLong(), Mockito.anyList()); + Mockito.verify(hostDao, Mockito.never()).update(Mockito.eq(hostId), Mockito.any(HostVO.class)); + + assertTrue(result); + } + + @Test + public void testConfigureStorageAccess_InvalidNonNullCount() { + ConfigureStorageAccessCmd cmd = Mockito.mock(ConfigureStorageAccessCmd.class); + + Mockito.when(cmd.getZoneId()).thenReturn(1L); + Mockito.when(cmd.getPodId()).thenReturn(1L); + Mockito.when(cmd.getClusterId()).thenReturn(null); + Mockito.when(cmd.getHostId()).thenReturn(null); + Mockito.when(cmd.getStorageId()).thenReturn(null); + + try { + storageManagerImpl.configureStorageAccess(cmd); + Assert.fail("Expected IllegalArgumentException to be thrown due to nonNullCount validation"); + } catch (IllegalArgumentException e) { + assertTrue(e.getMessage().contains("Exactly one of zoneid, podid, clusterid, hostid or storagepoolid is required")); + } + } + + @Test + public void testConfigureStorageAccess_MissingStorageAccessGroups() { + ConfigureStorageAccessCmd cmd = Mockito.mock(ConfigureStorageAccessCmd.class); + + Mockito.when(cmd.getZoneId()).thenReturn(1L); + Mockito.when(cmd.getPodId()).thenReturn(null); + Mockito.when(cmd.getClusterId()).thenReturn(null); + Mockito.when(cmd.getHostId()).thenReturn(null); + Mockito.when(cmd.getStorageId()).thenReturn(null); + Mockito.when(cmd.getStorageAccessGroups()).thenReturn(null); + + try { + storageManagerImpl.configureStorageAccess(cmd); + Assert.fail("Expected InvalidParameterValueException to be thrown due to missing storageAccessGroups"); + } catch (InvalidParameterValueException e) { + assertTrue(e.getMessage().contains("storageaccessgroups parameter is required")); + } + } + + @Test + public void testCheckIfStorageAccessGroupsExistsOnZone_NoException() { + long zoneId = 1L; + List newStorageAccessGroups = Arrays.asList("group1", "group2"); + + DataCenterVO zoneVO = Mockito.mock(DataCenterVO.class); + Mockito.when(dataCenterDao.findById(zoneId)).thenReturn(zoneVO); + Mockito.when(zoneVO.getStorageAccessGroups()).thenReturn("group3,group4"); + + storageManagerImpl.checkIfStorageAccessGroupsExistsOnZone(zoneId, newStorageAccessGroups); + } + + @Test + public void testCheckIfStorageAccessGroupsExistsOnZone_ThrowsException() { + long zoneId = 1L; + List newStorageAccessGroups = Arrays.asList("group1", "group2", "group3"); + + DataCenterVO zoneVO = Mockito.mock(DataCenterVO.class); + Mockito.when(dataCenterDao.findById(zoneId)).thenReturn(zoneVO); + Mockito.when(zoneVO.getStorageAccessGroups()).thenReturn("group3,group4"); + + CloudRuntimeException thrownException = assertThrows(CloudRuntimeException.class, () -> { + storageManagerImpl.checkIfStorageAccessGroupsExistsOnZone(zoneId, newStorageAccessGroups); + }); + + assertTrue(thrownException.getMessage().contains("access groups already exist on the zone: [group3]")); + } + + @Test + public void testCheckIfStorageAccessGroupsExistsOnPod_NoException() { + long podId = 1L; + long zoneId = 2L; + List newStorageAccessGroups = Arrays.asList("group1", "group2"); + + HostPodVO podVO = Mockito.mock(HostPodVO.class); + DataCenterVO zoneVO = Mockito.mock(DataCenterVO.class); + + Mockito.when(podVO.getDataCenterId()).thenReturn(zoneId); + Mockito.when(podVO.getStorageAccessGroups()).thenReturn("group3,group4"); + + Mockito.when(zoneVO.getStorageAccessGroups()).thenReturn("group5,group6"); + + Mockito.when(podDao.findById(podId)).thenReturn(podVO); + Mockito.when(dataCenterDao.findById(zoneId)).thenReturn(zoneVO); + + storageManagerImpl.checkIfStorageAccessGroupsExistsOnPod(podId, newStorageAccessGroups); + } + + @Test + public void testCheckIfStorageAccessGroupsExistsOnPod_ThrowsException() { + long podId = 1L; + long zoneId = 2L; + List newStorageAccessGroups = Arrays.asList("group1", "group2", "group3"); + + HostPodVO podVO = Mockito.mock(HostPodVO.class); + DataCenterVO zoneVO = Mockito.mock(DataCenterVO.class); + + Mockito.when(podVO.getDataCenterId()).thenReturn(zoneId); + Mockito.when(podVO.getStorageAccessGroups()).thenReturn("group3,group4"); + + Mockito.when(zoneVO.getStorageAccessGroups()).thenReturn("group5,group6"); + + Mockito.when(podDao.findById(podId)).thenReturn(podVO); + Mockito.when(dataCenterDao.findById(zoneId)).thenReturn(zoneVO); + + CloudRuntimeException thrownException = assertThrows(CloudRuntimeException.class, () -> { + storageManagerImpl.checkIfStorageAccessGroupsExistsOnPod(podId, newStorageAccessGroups); + }); + + assertTrue(thrownException.getMessage().contains("access groups already exist on the pod: [group3]")); + } + + @Test + public void testCheckIfStorageAccessGroupsExistsOnCluster_NoException() { + long clusterId = 1L; + long podId = 2L; + long zoneId = 3L; + List newStorageAccessGroups = Arrays.asList("group1", "group2"); + + ClusterVO clusterVO = Mockito.mock(ClusterVO.class); + HostPodVO podVO = Mockito.mock(HostPodVO.class); + DataCenterVO zoneVO = Mockito.mock(DataCenterVO.class); + + Mockito.when(clusterVO.getPodId()).thenReturn(podId); + Mockito.when(clusterVO.getStorageAccessGroups()).thenReturn("group4,group5"); + + Mockito.when(podVO.getDataCenterId()).thenReturn(zoneId); + Mockito.when(podVO.getStorageAccessGroups()).thenReturn("group6,group7"); + + Mockito.when(zoneVO.getStorageAccessGroups()).thenReturn("group8,group9"); + + Mockito.when(clusterDao.findById(clusterId)).thenReturn(clusterVO); + Mockito.when(podDao.findById(podId)).thenReturn(podVO); + Mockito.when(dataCenterDao.findById(zoneId)).thenReturn(zoneVO); + + storageManagerImpl.checkIfStorageAccessGroupsExistsOnCluster(clusterId, newStorageAccessGroups); + } + + @Test + public void testCheckIfStorageAccessGroupsExistsOnCluster_ThrowsException() { + long clusterId = 1L; + long podId = 2L; + long zoneId = 3L; + List newStorageAccessGroups = Arrays.asList("group1", "group2", "group4"); + + ClusterVO clusterVO = Mockito.mock(ClusterVO.class); + HostPodVO podVO = Mockito.mock(HostPodVO.class); + DataCenterVO zoneVO = Mockito.mock(DataCenterVO.class); + + Mockito.when(clusterVO.getPodId()).thenReturn(podId); + Mockito.when(clusterVO.getStorageAccessGroups()).thenReturn("group4,group5"); + + Mockito.when(podVO.getDataCenterId()).thenReturn(zoneId); + Mockito.when(podVO.getStorageAccessGroups()).thenReturn("group6,group7"); + + Mockito.when(zoneVO.getStorageAccessGroups()).thenReturn("group8,group9"); + + Mockito.when(clusterDao.findById(clusterId)).thenReturn(clusterVO); + Mockito.when(podDao.findById(podId)).thenReturn(podVO); + Mockito.when(dataCenterDao.findById(zoneId)).thenReturn(zoneVO); + + CloudRuntimeException thrownException = assertThrows(CloudRuntimeException.class, () -> { + storageManagerImpl.checkIfStorageAccessGroupsExistsOnCluster(clusterId, newStorageAccessGroups); + }); + + assertTrue(thrownException.getMessage().contains("access groups already exist on the cluster: [group4]")); + } } diff --git a/server/src/test/java/com/cloud/storage/VolumeApiServiceImplTest.java b/server/src/test/java/com/cloud/storage/VolumeApiServiceImplTest.java index 7dcf30c55e4..7b24451f066 100644 --- a/server/src/test/java/com/cloud/storage/VolumeApiServiceImplTest.java +++ b/server/src/test/java/com/cloud/storage/VolumeApiServiceImplTest.java @@ -1679,6 +1679,8 @@ public class VolumeApiServiceImplTest { Mockito.when(primaryDataStoreDaoMock.findById(1L)).thenReturn(srcStoragePoolVOMock); Mockito.when(srcStoragePoolVOMock.getPoolType()).thenReturn(Storage.StoragePoolType.PowerFlex); Mockito.when(dataStoreMgr.getDataStore(2L, DataStoreRole.Primary)).thenReturn( dataStore); + Pair checkResult = new Pair<>(true, "success"); + Mockito.doReturn(checkResult).when(storageMgr).checkIfReadyVolumeFitsInStoragePoolWithStorageAccessGroups(any(), any()); volumeApiServiceImpl.migrateVolume(migrateVolumeCmd); } catch (InvalidParameterValueException e) { diff --git a/server/src/test/java/com/cloud/storage/listener/StoragePoolMonitorTest.java b/server/src/test/java/com/cloud/storage/listener/StoragePoolMonitorTest.java index c700188a599..7421eb7ae2d 100644 --- a/server/src/test/java/com/cloud/storage/listener/StoragePoolMonitorTest.java +++ b/server/src/test/java/com/cloud/storage/listener/StoragePoolMonitorTest.java @@ -24,6 +24,7 @@ import com.cloud.storage.ScopeType; import com.cloud.storage.Storage; import com.cloud.storage.StorageManagerImpl; import com.cloud.storage.StoragePoolStatus; +import com.cloud.storage.dao.StoragePoolHostDao; import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; import org.junit.Before; @@ -32,12 +33,11 @@ import org.mockito.Mockito; import java.util.Collections; -import static org.mockito.ArgumentMatchers.nullable; - public class StoragePoolMonitorTest { private StorageManagerImpl storageManager; private PrimaryDataStoreDao poolDao; + private StoragePoolHostDao storagePoolHostDao; private StoragePoolMonitor storagePoolMonitor; private HostVO host; private StoragePoolVO pool; @@ -47,8 +47,9 @@ public class StoragePoolMonitorTest { public void setUp() throws Exception { storageManager = Mockito.mock(StorageManagerImpl.class); poolDao = Mockito.mock(PrimaryDataStoreDao.class); + storagePoolHostDao = Mockito.mock(StoragePoolHostDao.class); - storagePoolMonitor = new StoragePoolMonitor(storageManager, poolDao, null); + storagePoolMonitor = new StoragePoolMonitor(storageManager, poolDao, storagePoolHostDao, null); host = new HostVO("some-uuid"); pool = new StoragePoolVO(); pool.setScope(ScopeType.CLUSTER); @@ -61,14 +62,26 @@ public class StoragePoolMonitorTest { @Test public void testProcessConnectStoragePoolNormal() throws Exception { - Mockito.when(poolDao.listBy(nullable(Long.class), nullable(Long.class), nullable(Long.class), Mockito.any(ScopeType.class))).thenReturn(Collections.singletonList(pool)); - Mockito.when(poolDao.findZoneWideStoragePoolsByTags(Mockito.anyLong(), Mockito.any(String[].class), Mockito.anyBoolean())).thenReturn(Collections.emptyList()); - Mockito.when(poolDao.findZoneWideStoragePoolsByHypervisor(Mockito.anyLong(), Mockito.any(Hypervisor.HypervisorType.class))).thenReturn(Collections.emptyList()); - Mockito.doReturn(true).when(storageManager).connectHostToSharedPool(host, pool.getId()); + HostVO hostMock = Mockito.mock(HostVO.class); + StartupRoutingCommand startupRoutingCommand = Mockito.mock(StartupRoutingCommand.class); + StoragePoolVO poolMock = Mockito.mock(StoragePoolVO.class); + Mockito.when(poolMock.getScope()).thenReturn(ScopeType.CLUSTER); + Mockito.when(poolMock.getStatus()).thenReturn(StoragePoolStatus.Up); + Mockito.when(poolMock.getId()).thenReturn(123L); + Mockito.when(poolMock.getPoolType()).thenReturn(Storage.StoragePoolType.Filesystem); + Mockito.when(hostMock.getDataCenterId()).thenReturn(1L); + Mockito.when(hostMock.getPodId()).thenReturn(1L); + Mockito.when(hostMock.getClusterId()).thenReturn(1L); + Mockito.when(startupRoutingCommand.getHypervisorType()).thenReturn(Hypervisor.HypervisorType.KVM); + Mockito.when(poolDao.findStoragePoolsByEmptyStorageAccessGroups(1L, 1L, 1L, ScopeType.CLUSTER, null)).thenReturn(Collections.singletonList(pool)); + Mockito.when(poolDao.findStoragePoolsByEmptyStorageAccessGroups(1L, null, null, ScopeType.ZONE, null)).thenReturn(Collections.emptyList()); + Mockito.when(poolDao.findStoragePoolsByEmptyStorageAccessGroups(1L, null, null, ScopeType.ZONE, Hypervisor.HypervisorType.KVM)).thenReturn(Collections.emptyList()); + Mockito.when(poolDao.findStoragePoolsByEmptyStorageAccessGroups(1L, null, null, ScopeType.ZONE, Hypervisor.HypervisorType.Any)).thenReturn(Collections.emptyList()); + Mockito.doReturn(true).when(storageManager).connectHostToSharedPool(hostMock, 123L); - storagePoolMonitor.processConnect(host, cmd, false); + storagePoolMonitor.processConnect(hostMock, startupRoutingCommand, false); - Mockito.verify(storageManager, Mockito.times(1)).connectHostToSharedPool(Mockito.eq(host), Mockito.eq(pool.getId())); + Mockito.verify(storageManager, Mockito.times(1)).connectHostToSharedPool(Mockito.eq(hostMock), Mockito.eq(pool.getId())); Mockito.verify(storageManager, Mockito.times(1)).createCapacityEntry(Mockito.eq(pool.getId())); } diff --git a/server/src/test/java/com/cloud/vm/UserVmManagerImplTest.java b/server/src/test/java/com/cloud/vm/UserVmManagerImplTest.java index f07d2af21af..7c61fa04a72 100644 --- a/server/src/test/java/com/cloud/vm/UserVmManagerImplTest.java +++ b/server/src/test/java/com/cloud/vm/UserVmManagerImplTest.java @@ -33,6 +33,7 @@ import static org.mockito.Mockito.doReturn; import static org.mockito.Mockito.lenient; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.never; +import static org.mockito.Mockito.times; import static org.mockito.Mockito.when; import java.util.ArrayList; @@ -40,6 +41,7 @@ import java.util.HashMap; import java.util.List; import java.util.Map; +import com.cloud.storage.StorageManager; import org.apache.cloudstack.acl.ControlledEntity; import org.apache.cloudstack.acl.SecurityChecker; import org.apache.cloudstack.api.BaseCmd.HTTPMethod; @@ -365,6 +367,9 @@ public class UserVmManagerImplTest { @Mock private VMInstanceVO vmInstanceMock; + @Mock + StorageManager storageManager; + private static final long vmId = 1l; private static final long zoneId = 2L; private static final long accountId = 3L; @@ -483,7 +488,7 @@ public class UserVmManagerImplTest { verifyMethodsThatAreAlwaysExecuted(); Mockito.verify(userVmManagerImpl).updateDisplayVmFlag(false, vmId, userVmVoMock); - Mockito.verify(userVmDetailsDao, Mockito.times(0)).removeDetail(anyLong(), anyString()); + Mockito.verify(userVmDetailsDao, times(0)).removeDetail(anyLong(), anyString()); } @Test @@ -501,8 +506,8 @@ public class UserVmManagerImplTest { userVmManagerImpl.updateVirtualMachine(updateVmCommand); verifyMethodsThatAreAlwaysExecuted(); Mockito.verify(userVmDetailsDao).removeDetail(vmId, "userdetail"); - Mockito.verify(userVmDetailsDao, Mockito.times(0)).removeDetail(vmId, "systemdetail"); - Mockito.verify(userVmManagerImpl, Mockito.times(0)).updateDisplayVmFlag(false, vmId, userVmVoMock); + Mockito.verify(userVmDetailsDao, times(0)).removeDetail(vmId, "systemdetail"); + Mockito.verify(userVmManagerImpl, times(0)).updateDisplayVmFlag(false, vmId, userVmVoMock); } @Test @@ -566,11 +571,11 @@ public class UserVmManagerImplTest { userVmManagerImpl.updateVirtualMachine(updateVmCommand); verifyMethodsThatAreAlwaysExecuted(); - Mockito.verify(userVmVoMock, Mockito.times(cleanUpDetails || isDetailsEmpty ? 0 : 1)).setDetails(details); - Mockito.verify(userVmDetailsDao, Mockito.times(cleanUpDetails ? 1 : 0)).removeDetail(vmId, "existingdetail"); - Mockito.verify(userVmDetailsDao, Mockito.times(0)).removeDetail(vmId, "systemdetail"); - Mockito.verify(userVmDao, Mockito.times(cleanUpDetails || isDetailsEmpty ? 0 : 1)).saveDetails(userVmVoMock); - Mockito.verify(userVmManagerImpl, Mockito.times(0)).updateDisplayVmFlag(false, vmId, userVmVoMock); + Mockito.verify(userVmVoMock, times(cleanUpDetails || isDetailsEmpty ? 0 : 1)).setDetails(details); + Mockito.verify(userVmDetailsDao, times(cleanUpDetails ? 1 : 0)).removeDetail(vmId, "existingdetail"); + Mockito.verify(userVmDetailsDao, times(0)).removeDetail(vmId, "systemdetail"); + Mockito.verify(userVmDao, times(cleanUpDetails || isDetailsEmpty ? 0 : 1)).saveDetails(userVmVoMock); + Mockito.verify(userVmManagerImpl, times(0)).updateDisplayVmFlag(false, vmId, userVmVoMock); } private void configureDoNothingForDetailsMethod() { @@ -659,7 +664,7 @@ public class UserVmManagerImplTest { String returnedMacAddress = userVmManagerImpl.validateOrReplaceMacAddress(macAddress, networkMock); - Mockito.verify(networkModel, Mockito.times(times)).getNextAvailableMacAddressInNetwork(Mockito.anyLong()); + Mockito.verify(networkModel, times(times)).getNextAvailableMacAddressInNetwork(Mockito.anyLong()); assertEquals(expectedMacAddress, returnedMacAddress); } @@ -736,7 +741,7 @@ public class UserVmManagerImplTest { long rootDiskSize = userVmManagerImpl.configureCustomRootDiskSize(customParameters, template, Hypervisor.HypervisorType.KVM, diskfferingVo); Assert.assertEquals(expectedRootDiskSize, rootDiskSize); - Mockito.verify(userVmManagerImpl, Mockito.times(timesVerifyIfHypervisorSupports)).verifyIfHypervisorSupportsRootdiskSizeOverride(Mockito.any()); + Mockito.verify(userVmManagerImpl, times(timesVerifyIfHypervisorSupports)).verifyIfHypervisorSupportsRootdiskSizeOverride(Mockito.any()); } @Test @@ -1613,18 +1618,18 @@ public class UserVmManagerImplTest { Long size = volumes.stream().filter(VolumeVO::isDisplay).mapToLong(VolumeVO::getSize).sum(); try { userVmManagerImpl.checkVolumesLimits(account, volumes); - Mockito.verify(resourceLimitMgr, Mockito.times(1)) + Mockito.verify(resourceLimitMgr, times(1)) .checkResourceLimit(account, Resource.ResourceType.volume, 4); - Mockito.verify(resourceLimitMgr, Mockito.times(1)) + Mockito.verify(resourceLimitMgr, times(1)) .checkResourceLimit(account, Resource.ResourceType.primary_storage, size); - Mockito.verify(resourceLimitMgr, Mockito.times(1)) + Mockito.verify(resourceLimitMgr, times(1)) .checkResourceLimitWithTag(account, Resource.ResourceType.volume, "tag1", 2); - Mockito.verify(resourceLimitMgr, Mockito.times(1)) + Mockito.verify(resourceLimitMgr, times(1)) .checkResourceLimitWithTag(account, Resource.ResourceType.volume, "tag2", 3); - Mockito.verify(resourceLimitMgr, Mockito.times(1)) + Mockito.verify(resourceLimitMgr, times(1)) .checkResourceLimitWithTag(account, Resource.ResourceType.primary_storage, "tag1", vol1.getSize() + vol5.getSize()); - Mockito.verify(resourceLimitMgr, Mockito.times(1)) + Mockito.verify(resourceLimitMgr, times(1)) .checkResourceLimitWithTag(account, Resource.ResourceType.primary_storage, "tag2", vol1.getSize() + vol3.getSize() + vol5.getSize()); } catch (ResourceAllocationException e) { @@ -1651,7 +1656,7 @@ public class UserVmManagerImplTest { userVmManagerImpl.validateStrictHostTagCheck(vm, destinationHostVO); Mockito.verify( - destinationHostVO, Mockito.times(1) + destinationHostVO, times(1) ).checkHostServiceOfferingAndTemplateTags(Mockito.any(ServiceOffering.class), Mockito.any(VirtualMachineTemplate.class), Mockito.anySet()); } @@ -2857,7 +2862,7 @@ public class UserVmManagerImplTest { NetworkVO defaultNetwork = userVmManagerImpl.addNicsToApplicableNetworksAndReturnDefaultNetwork(applicableNetworks, requestedIPv4ForNics, requestedIPv6ForNics, networks); - Mockito.verify(networks, Mockito.times(2)).put(Mockito.any(), Mockito.any()); + Mockito.verify(networks, times(2)).put(Mockito.any(), Mockito.any()); Assert.assertEquals(defaultNetwork, networkMock); } @@ -3125,4 +3130,94 @@ public class UserVmManagerImplTest { Mockito.verify(userVmManagerImpl, Mockito.never()).resourceCountIncrement(Mockito.anyLong(), Mockito.any(), Mockito.any(), Mockito.any()); } } + + @Test + public void validateStorageAccessGroupsOnHostsMatchingSAGsNoException() { + Host srcHost = Mockito.mock(Host.class); + Host destHost = Mockito.mock(Host.class); + + Mockito.when(srcHost.getId()).thenReturn(1L); + Mockito.when(destHost.getId()).thenReturn(2L); + when(storageManager.getStorageAccessGroups(null, null, null, srcHost.getId())).thenReturn(new String[]{"sag1", "sag2"}); + when(storageManager.getStorageAccessGroups(null, null, null, destHost.getId())).thenReturn(new String[]{"sag1", "sag2", "sag3"}); + + userVmManagerImpl.validateStorageAccessGroupsOnHosts(srcHost, destHost); + + Mockito.verify(storageManager, times(1)).getStorageAccessGroups(null, null, null, srcHost.getId()); + Mockito.verify(storageManager, times(1)).getStorageAccessGroups(null, null, null, destHost.getId()); + } + + @Test(expected = CloudRuntimeException.class) + public void validateSAGsOnHostsNonMatchingSAGsThrowsException() { + Host srcHost = Mockito.mock(Host.class); + Host destHost = Mockito.mock(Host.class); + + Mockito.when(srcHost.getId()).thenReturn(1L); + Mockito.when(destHost.getId()).thenReturn(2L); + when(storageManager.getStorageAccessGroups(null, null, null, srcHost.getId())).thenReturn(new String[]{"sag1", "sag2"}); + when(storageManager.getStorageAccessGroups(null, null, null, destHost.getId())).thenReturn(new String[]{"sag1", "sag3"}); + + userVmManagerImpl.validateStorageAccessGroupsOnHosts(srcHost, destHost); + } + + @Test + public void validateEmptyStorageAccessGroupOnHosts() { + Host srcHost = Mockito.mock(Host.class); + Host destHost = Mockito.mock(Host.class); + + Mockito.when(srcHost.getId()).thenReturn(1L); + Mockito.when(destHost.getId()).thenReturn(2L); + when(storageManager.getStorageAccessGroups(null, null, null, srcHost.getId())).thenReturn(new String[]{}); + when(storageManager.getStorageAccessGroups(null, null, null, destHost.getId())).thenReturn(new String[]{}); + + userVmManagerImpl.validateStorageAccessGroupsOnHosts(srcHost, destHost); + + Mockito.verify(storageManager, times(1)).getStorageAccessGroups(null, null, null, srcHost.getId()); + Mockito.verify(storageManager, times(1)).getStorageAccessGroups(null, null, null, destHost.getId()); + } + + @Test + public void validateSAGsOnHostsNullStorageAccessGroups() { + Host srcHost = Mockito.mock(Host.class); + Host destHost = Mockito.mock(Host.class); + + Mockito.when(srcHost.getId()).thenReturn(1L); + Mockito.when(destHost.getId()).thenReturn(2L); + when(storageManager.getStorageAccessGroups(null, null, null, srcHost.getId())).thenReturn(null); + when(storageManager.getStorageAccessGroups(null, null, null, destHost.getId())).thenReturn(null); + + userVmManagerImpl.validateStorageAccessGroupsOnHosts(srcHost, destHost); + + Mockito.verify(storageManager, times(1)).getStorageAccessGroups(null, null, null, srcHost.getId()); + Mockito.verify(storageManager, times(1)).getStorageAccessGroups(null, null, null, destHost.getId()); + } + + @Test(expected = CloudRuntimeException.class) + public void validateSAGsOnDestHostNullStorageAccessGroups() { + Host srcHost = Mockito.mock(Host.class); + Host destHost = Mockito.mock(Host.class); + + Mockito.when(srcHost.getId()).thenReturn(1L); + Mockito.when(destHost.getId()).thenReturn(2L); + when(storageManager.getStorageAccessGroups(null, null, null, srcHost.getId())).thenReturn(new String[]{"sag1", "sag2"}); + when(storageManager.getStorageAccessGroups(null, null, null, destHost.getId())).thenReturn(null); + + userVmManagerImpl.validateStorageAccessGroupsOnHosts(srcHost, destHost); + } + + @Test + public void validateNullStorageAccessGroupsOnSrcHost() { + Host srcHost = Mockito.mock(Host.class); + Host destHost = Mockito.mock(Host.class); + + Mockito.when(srcHost.getId()).thenReturn(1L); + Mockito.when(destHost.getId()).thenReturn(2L); + when(storageManager.getStorageAccessGroups(null, null, null, srcHost.getId())).thenReturn(null); + when(storageManager.getStorageAccessGroups(null, null, null, destHost.getId())).thenReturn(new String[]{"sag1", "sag2"}); + + userVmManagerImpl.validateStorageAccessGroupsOnHosts(srcHost, destHost); + + Mockito.verify(storageManager, times(1)).getStorageAccessGroups(null, null, null, srcHost.getId()); + Mockito.verify(storageManager, times(1)).getStorageAccessGroups(null, null, null, destHost.getId()); + } } diff --git a/server/src/test/java/com/cloud/vpc/MockConfigurationManagerImpl.java b/server/src/test/java/com/cloud/vpc/MockConfigurationManagerImpl.java index cdd23b0ccc2..d4f3569cb57 100644 --- a/server/src/test/java/com/cloud/vpc/MockConfigurationManagerImpl.java +++ b/server/src/test/java/com/cloud/vpc/MockConfigurationManagerImpl.java @@ -189,7 +189,7 @@ public class MockConfigurationManagerImpl extends ManagerBase implements Configu * @see com.cloud.configuration.ConfigurationService#createPod(long, java.lang.String, java.lang.String, java.lang.String, java.lang.String, java.lang.String, java.lang.String) */ @Override - public Pod createPod(long zoneId, String name, String startIp, String endIp, String gateway, String netmask, String allocationState) { + public Pod createPod(long zoneId, String name, String startIp, String endIp, String gateway, String netmask, String allocationState, List storageAccessGroups) { // TODO Auto-generated method stub return null; } @@ -507,7 +507,7 @@ public class MockConfigurationManagerImpl extends ManagerBase implements Configu */ @Override public HostPodVO createPod(long userId, String podName, DataCenter zone, String gateway, String cidr, String startIp, String endIp, String allocationState, - boolean skipGatewayOverlapCheck) { + boolean skipGatewayOverlapCheck, List storageAccessGroups) { // TODO Auto-generated method stub return null; } @@ -632,8 +632,8 @@ public class MockConfigurationManagerImpl extends ManagerBase implements Configu */ @Override public DataCenterVO createZone(long userId, String zoneName, String dns1, String dns2, String internalDns1, String internalDns2, String guestCidr, String domain, - Long domainId, NetworkType zoneType, String allocationState, String networkDomain, boolean isSecurityGroupEnabled, boolean isLocalStorageEnabled, String ip6Dns1, - String ip6Dns2, boolean isEdge) { + Long domainId, NetworkType zoneType, String allocationState, String networkDomain, boolean isSecurityGroupEnabled, boolean isLocalStorageEnabled, String ip6Dns1, + String ip6Dns2, boolean isEdge, List storageAccessGroups) { // TODO Auto-generated method stub return null; } diff --git a/server/src/test/java/org/apache/cloudstack/networkoffering/ChildTestConfiguration.java b/server/src/test/java/org/apache/cloudstack/networkoffering/ChildTestConfiguration.java index dac6674109a..d2f2cc1d184 100644 --- a/server/src/test/java/org/apache/cloudstack/networkoffering/ChildTestConfiguration.java +++ b/server/src/test/java/org/apache/cloudstack/networkoffering/ChildTestConfiguration.java @@ -75,6 +75,7 @@ import com.cloud.network.vpn.RemoteAccessVpnService; import com.cloud.offerings.dao.NetworkOfferingDao; import com.cloud.offerings.dao.NetworkOfferingServiceMapDao; import com.cloud.projects.ProjectManager; +import com.cloud.resource.ResourceManager; import com.cloud.server.ConfigurationServer; import com.cloud.server.ManagementService; import com.cloud.service.dao.ServiceOfferingDaoImpl; @@ -254,6 +255,11 @@ public class return Mockito.mock(Ipv6AddressManager.class); } + @Bean + public ResourceManager resourceManager() { + return Mockito.mock(ResourceManager.class); + } + @Bean public ConfigurationDao configDao() { return Mockito.mock(ConfigurationDao.class); diff --git a/server/src/test/resources/createNetworkOffering.xml b/server/src/test/resources/createNetworkOffering.xml index a3f43407c61..e62c4c0660f 100644 --- a/server/src/test/resources/createNetworkOffering.xml +++ b/server/src/test/resources/createNetworkOffering.xml @@ -57,6 +57,7 @@ + diff --git a/tools/apidoc/gen_toc.py b/tools/apidoc/gen_toc.py index c05b8fe2798..3248016528a 100644 --- a/tools/apidoc/gen_toc.py +++ b/tools/apidoc/gen_toc.py @@ -257,6 +257,8 @@ known_categories = { 'deleteASNRange': 'AS Number Range', 'listASNumbers': 'AS Number', 'releaseASNumber': 'AS Number', + 'configureStorageAccess': 'Storage Access Groups', + 'listStorageAccessGroups': 'Storage Access Groups' } diff --git a/ui/public/locales/en.json b/ui/public/locales/en.json index ebfe6bda1b2..0c7c64b3183 100644 --- a/ui/public/locales/en.json +++ b/ui/public/locales/en.json @@ -72,6 +72,7 @@ "label.action.delete.webhook.deliveries": "Delete deliveries", "label.action.change.primary.storage.scope": "Change primary storage scope", "label.action.configure.stickiness": "Stickiness", +"label.action.configure.storage.access.group": "Update storage access group", "label.action.copy.iso": "Copy ISO", "label.action.copy.snapshot": "Copy Snapshot", "label.action.copy.template": "Copy Template", @@ -220,6 +221,10 @@ "label.action.unmanage.instance": "Unmanage Instance", "label.action.unmanage.instances": "Unmanage Instances", "label.action.unmanage.virtualmachine": "Unmanage Instance", +"label.action.update.cluster": "Update cluster", +"label.action.update.pod": "Update pod", +"label.action.update.zone": "Update zone", +"label.action.update.storage.pool": "Update storage pool", "label.action.unmanage.volume": "Unmanage Volume", "label.action.unmanage.volumes": "Unmanage Volumes", "label.action.update.host": "Update host", @@ -2165,6 +2170,10 @@ "label.srx": "SRX", "label.srx.firewall": "Juniper SRX firewall", "label.ssh.key.pairs": "SSH key pairs", +"label.storageaccessgroups": "Storage Access Groups", +"label.clusterstorageaccessgroups": "Cluster Storage Access Groups", +"label.podstorageaccessgroups": "Pod Storage Access Groups", +"label.zonestorageaccessgroups": "Zone Storage Access Groups", "label.uefi.supported": "UEFI supported", "label.usediops": "IOPS used", "label.userdataid": "Userdata ID", @@ -2883,6 +2892,7 @@ "message.configuring.guest.traffic": "Configuring guest traffic", "message.configuring.physical.networks": "Configuring physical Networks", "message.configuring.public.traffic": "Configuring public traffic", +"message.configuring.storage.access.failed": "Configuring storage access failed", "message.configuring.nsx.public.traffic": "Configuring NSX public traffic", "message.configuring.storage.traffic": "Configuring storage traffic", "message.confirm.action.force.reconnect": "Please confirm that you want to force reconnect this host.", diff --git a/ui/src/config/section/infra/clusters.js b/ui/src/config/section/infra/clusters.js index 883efd463c3..c03a1716a8d 100644 --- a/ui/src/config/section/infra/clusters.js +++ b/ui/src/config/section/infra/clusters.js @@ -35,7 +35,7 @@ export default { fields.push('zonename') return fields }, - details: ['name', 'id', 'allocationstate', 'clustertype', 'managedstate', 'arch', 'hypervisortype', 'podname', 'zonename', 'drsimbalance'], + details: ['name', 'id', 'allocationstate', 'clustertype', 'managedstate', 'arch', 'hypervisortype', 'podname', 'zonename', 'drsimbalance', 'storageaccessgroups', 'podstorageaccessgroups', 'zonestorageaccessgroups'], related: [{ name: 'host', title: 'label.hosts', @@ -83,12 +83,8 @@ export default { icon: 'edit-outlined', label: 'label.edit', dataView: true, - args: ['clustername', 'arch'], - mapping: { - arch: { - options: ['x86_64', 'aarch64'] - } - } + popup: true, + component: shallowRef(defineAsyncComponent(() => import('@/views/infra/ClusterUpdate.vue'))) }, { api: 'updateCluster', diff --git a/ui/src/config/section/infra/hosts.js b/ui/src/config/section/infra/hosts.js index 501283984b8..474177918e4 100644 --- a/ui/src/config/section/infra/hosts.js +++ b/ui/src/config/section/infra/hosts.js @@ -45,7 +45,7 @@ export default { fields.push('managementservername') return fields }, - details: ['name', 'id', 'resourcestate', 'ipaddress', 'hypervisor', 'arch', 'type', 'clustername', 'podname', 'zonename', 'managementservername', 'disconnected', 'created'], + details: ['name', 'id', 'resourcestate', 'ipaddress', 'hypervisor', 'arch', 'type', 'clustername', 'podname', 'zonename', 'storageaccessgroups', 'clusterstorageaccessgroups', 'podstorageaccessgroups', 'zonestorageaccessgroups', 'managementservername', 'disconnected', 'created'], tabs: [{ name: 'details', component: shallowRef(defineAsyncComponent(() => import('@/components/view/DetailsTab.vue'))) diff --git a/ui/src/config/section/infra/pods.js b/ui/src/config/section/infra/pods.js index 595b35f4fb9..66d38c08896 100644 --- a/ui/src/config/section/infra/pods.js +++ b/ui/src/config/section/infra/pods.js @@ -26,7 +26,7 @@ export default { permission: ['listPods'], searchFilters: ['name', 'zoneid'], columns: ['name', 'allocationstate', 'gateway', 'netmask', 'zonename'], - details: ['name', 'id', 'allocationstate', 'netmask', 'gateway', 'zonename'], + details: ['name', 'id', 'allocationstate', 'netmask', 'gateway', 'zonename', 'storageaccessgroups', 'zonestorageaccessgroups'], related: [{ name: 'cluster', title: 'label.clusters', @@ -71,7 +71,8 @@ export default { icon: 'edit-outlined', label: 'label.edit', dataView: true, - args: ['name', 'netmask', 'gateway'] + popup: true, + component: shallowRef(defineAsyncComponent(() => import('@/views/infra/PodUpdate.vue'))) }, { api: 'updatePod', diff --git a/ui/src/config/section/infra/primaryStorages.js b/ui/src/config/section/infra/primaryStorages.js index 1b0e5ef1634..f127a0853b9 100644 --- a/ui/src/config/section/infra/primaryStorages.js +++ b/ui/src/config/section/infra/primaryStorages.js @@ -35,7 +35,7 @@ export default { fields.push('zonename') return fields }, - details: ['name', 'id', 'ipaddress', 'type', 'details', 'nfsmountopts', 'scope', 'tags', 'path', 'provider', 'hypervisor', 'overprovisionfactor', 'disksizetotal', 'disksizeallocated', 'disksizeused', 'capacityiops', 'usediops', 'clustername', 'podname', 'zonename', 'created'], + details: ['name', 'id', 'ipaddress', 'type', 'details', 'nfsmountopts', 'scope', 'tags', 'storageaccessgroups', 'path', 'provider', 'hypervisor', 'overprovisionfactor', 'disksizetotal', 'disksizeallocated', 'disksizeused', 'capacityiops', 'usediops', 'clustername', 'podname', 'zonename', 'created'], related: [{ name: 'volume', title: 'label.volumes', diff --git a/ui/src/config/section/infra/zones.js b/ui/src/config/section/infra/zones.js index cb95bce8f75..de971858ab4 100644 --- a/ui/src/config/section/infra/zones.js +++ b/ui/src/config/section/infra/zones.js @@ -34,7 +34,7 @@ export default { fields.push('order') return fields }, - details: ['name', 'id', 'allocationstate', 'type', 'networktype', 'guestcidraddress', 'localstorageenabled', 'securitygroupsenabled', 'dns1', 'dns2', 'internaldns1', 'internaldns2', 'asnrange'], + details: ['name', 'id', 'allocationstate', 'type', 'networktype', 'guestcidraddress', 'localstorageenabled', 'securitygroupsenabled', 'dns1', 'dns2', 'internaldns1', 'internaldns2', 'asnrange', 'storageaccessgroups'], related: [{ name: 'pod', title: 'label.pods', @@ -118,8 +118,8 @@ export default { icon: 'edit-outlined', label: 'label.action.edit.zone', dataView: true, - args: ['name', 'dns1', 'dns2', 'ip6dns1', 'ip6dns2', 'internaldns1', 'internaldns2', 'guestcidraddress', 'domain', 'localstorageenabled'], - show: (record) => { return record.networktype === 'Advanced' } + popup: true, + component: shallowRef(defineAsyncComponent(() => import('@/views/infra/ZoneUpdate.vue'))) }, { api: 'updateZone', diff --git a/ui/src/config/section/offering.js b/ui/src/config/section/offering.js index f83daaea763..8da45a63699 100644 --- a/ui/src/config/section/offering.js +++ b/ui/src/config/section/offering.js @@ -40,7 +40,7 @@ export default { filters: ['active', 'inactive'], columns: ['name', 'displaytext', 'state', 'cpunumber', 'cpuspeed', 'memory', 'domain', 'zone', 'order'], details: () => { - var fields = ['name', 'id', 'displaytext', 'offerha', 'provisioningtype', 'storagetype', 'iscustomized', 'iscustomizediops', 'limitcpuuse', 'cpunumber', 'cpuspeed', 'memory', 'hosttags', 'tags', 'storagetags', 'domain', 'zone', 'created', 'dynamicscalingenabled', 'diskofferingstrictness', 'encryptroot', 'purgeresources'] + var fields = ['name', 'id', 'displaytext', 'offerha', 'provisioningtype', 'storagetype', 'iscustomized', 'iscustomizediops', 'limitcpuuse', 'cpunumber', 'cpuspeed', 'memory', 'hosttags', 'tags', 'storageaccessgroups', 'storagetags', 'domain', 'zone', 'created', 'dynamicscalingenabled', 'diskofferingstrictness', 'encryptroot', 'purgeresources'] if (store.getters.apis.createServiceOffering && store.getters.apis.createServiceOffering.params.filter(x => x.name === 'storagepolicy').length > 0) { fields.splice(6, 0, 'vspherestoragepolicy') @@ -95,7 +95,7 @@ export default { label: 'label.edit', docHelp: 'adminguide/service_offerings.html#modifying-or-deleting-a-service-offering', dataView: true, - args: ['name', 'displaytext', 'storagetags', 'hosttags'] + args: ['name', 'displaytext', 'storageaccessgroups', 'hosttags'] }, { api: 'updateServiceOffering', icon: 'lock-outlined', diff --git a/ui/src/views/AutogenView.vue b/ui/src/views/AutogenView.vue index 17f3460614c..7d7d5ddb6b7 100644 --- a/ui/src/views/AutogenView.vue +++ b/ui/src/views/AutogenView.vue @@ -1191,7 +1191,7 @@ export default { this.showAction = true const listIconForFillValues = ['copy-outlined', 'CopyOutlined', 'edit-outlined', 'EditOutlined', 'share-alt-outlined', 'ShareAltOutlined'] for (const param of this.currentAction.paramFields) { - if (param.type === 'list' && ['tags', 'hosttags', 'storagetags', 'files'].includes(param.name)) { + if (param.type === 'list' && ['tags', 'hosttags', 'storagetags', 'storageaccessgroups', 'files'].includes(param.name)) { param.type = 'string' } this.setRules(param) @@ -1586,7 +1586,7 @@ export default { } break } - if (input === '' && !['tags', 'hosttags', 'storagetags', 'dns2', 'ip6dns1', + if (input === '' && !['tags', 'hosttags', 'storagetags', 'storageaccessgroups', 'dns2', 'ip6dns1', 'ip6dns2', 'internaldns2', 'networkdomain', 'secretkey'].includes(key)) { break } diff --git a/ui/src/views/infra/ClusterUpdate.vue b/ui/src/views/infra/ClusterUpdate.vue new file mode 100644 index 00000000000..c2f295c008d --- /dev/null +++ b/ui/src/views/infra/ClusterUpdate.vue @@ -0,0 +1,234 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + + + + + + diff --git a/ui/src/views/infra/HostInfo.vue b/ui/src/views/infra/HostInfo.vue index 259445154a0..994c6f18b59 100644 --- a/ui/src/views/infra/HostInfo.vue +++ b/ui/src/views/infra/HostInfo.vue @@ -70,6 +70,14 @@ + +
+ {{ $t('label.storageaccessgroups') }} +
+ {{ host.storageaccessgroups }} +
+
+
{{ $t('label.oscategoryid') }} diff --git a/ui/src/views/infra/HostUpdate.vue b/ui/src/views/infra/HostUpdate.vue index aeb2a3c92a6..bb3cfea3bd9 100644 --- a/ui/src/views/infra/HostUpdate.vue +++ b/ui/src/views/infra/HostUpdate.vue @@ -45,6 +45,25 @@ + + + + + {{ opt }} + + +