Introducing Storage Access Groups for better management for host and storage connections (#10381)

* Introducing Storage Access Groups to define the host and storage pool connections

In CloudStack, when a primary storage is added at the Zone or Cluster scope, it is by default connected to all hosts within that scope. This default behavior can be refined using storage access groups, which allow operators to control and limit which hosts can access specific storage pools.

Storage access groups can be assigned to hosts, clusters, pods, zones, and primary storage pools. When a storage access group is set on a cluster/pod/zone, all hosts within that scope inherit the group. Connectivity between a host and a storage pool is then governed by whether they share the same storage access group.

A storage pool with a storage access group will connect only to hosts that have the same storage access group. A storage pool without a storage access group will connect to all hosts, including those with or without a storage access group.
This commit is contained in:
Harikrishna 2025-05-19 11:33:29 +05:30 committed by GitHub
parent d5ba23c848
commit b17808bfba
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
127 changed files with 5703 additions and 357 deletions

View File

@ -201,11 +201,12 @@ public interface ConfigurationService {
* TODO
* @param allocationState
* TODO
* @param storageAccessGroups
* @return the new pod if successful, null otherwise
* @throws
* @throws
*/
Pod createPod(long zoneId, String name, String startIp, String endIp, String gateway, String netmask, String allocationState);
Pod createPod(long zoneId, String name, String startIp, String endIp, String gateway, String netmask, String allocationState, List<String> storageAccessGroups);
/**
* Creates a mutual exclusive IP range in the pod with same gateway, netmask.

View File

@ -43,4 +43,6 @@ public interface Pod extends InfrastructureEntity, Grouping, Identity, InternalI
AllocationState getAllocationState();
boolean getExternalDhcp();
String getStorageAccessGroups();
}

View File

@ -465,6 +465,7 @@ public class EventTypes {
public static final String EVENT_ENABLE_PRIMARY_STORAGE = "ENABLE.PS";
public static final String EVENT_DISABLE_PRIMARY_STORAGE = "DISABLE.PS";
public static final String EVENT_SYNC_STORAGE_POOL = "SYNC.STORAGE.POOL";
public static final String EVENT_CONFIGURE_STORAGE_ACCESS = "CONFIGURE.STORAGE.ACCESS";
public static final String EVENT_CHANGE_STORAGE_POOL_SCOPE = "CHANGE.STORAGE.POOL.SCOPE";
// VPN

View File

@ -213,4 +213,6 @@ public interface Host extends StateObject<Status>, Identity, Partition, HAResour
ResourceState getResourceState();
CPU.CPUArch getArch();
String getStorageAccessGroups();
}

View File

@ -41,4 +41,6 @@ public interface Cluster extends Grouping, Partition {
ManagedState getManagedState();
CPU.CPUArch getArch();
String getStorageAccessGroups();
}

View File

@ -95,4 +95,11 @@ public interface ResourceService {
boolean releaseHostReservation(Long hostId);
void updatePodStorageAccessGroups(long podId, List<String> newStorageAccessGroups);
void updateZoneStorageAccessGroups(long zoneId, List<String> newStorageAccessGroups);
void updateClusterStorageAccessGroups(Long clusterId, List<String> newStorageAccessGroups);
void updateHostStorageAccessGroups(Long hostId, List<String> newStorageAccessGroups);
}

View File

@ -22,6 +22,7 @@ import java.util.Map;
import org.apache.cloudstack.api.command.admin.storage.CancelPrimaryStorageMaintenanceCmd;
import org.apache.cloudstack.api.command.admin.storage.ChangeStoragePoolScopeCmd;
import org.apache.cloudstack.api.command.admin.storage.ConfigureStorageAccessCmd;
import org.apache.cloudstack.api.command.admin.storage.CreateSecondaryStagingStoreCmd;
import org.apache.cloudstack.api.command.admin.storage.CreateStoragePoolCmd;
import org.apache.cloudstack.api.command.admin.storage.DeleteImageStoreCmd;
@ -99,6 +100,8 @@ public interface StorageService {
StoragePool disablePrimaryStoragePool(Long id);
boolean configureStorageAccess(ConfigureStorageAccessCmd cmd);
StoragePool getStoragePool(long id);
boolean deleteImageStore(DeleteImageStoreCmd cmd);

View File

@ -496,6 +496,11 @@ public class ApiConstants {
public static final String SYSTEM_VM_TYPE = "systemvmtype";
public static final String TAGS = "tags";
public static final String STORAGE_TAGS = "storagetags";
public static final String STORAGE_ACCESS_GROUPS = "storageaccessgroups";
public static final String STORAGE_ACCESS_GROUP = "storageaccessgroup";
public static final String CLUSTER_STORAGE_ACCESS_GROUPS = "clusterstorageaccessgroups";
public static final String POD_STORAGE_ACCESS_GROUPS = "podstorageaccessgroups";
public static final String ZONE_STORAGE_ACCESS_GROUPS = "zonestorageaccessgroups";
public static final String SUCCESS = "success";
public static final String SUITABLE_FOR_VM = "suitableforvirtualmachine";
public static final String SUPPORTS_STORAGE_SNAPSHOT = "supportsstoragesnapshot";

View File

@ -310,6 +310,8 @@ public interface ResponseGenerator {
PodResponse createPodResponse(Pod pod, Boolean showCapacities);
PodResponse createMinimalPodResponse(Pod pod);
ZoneResponse createZoneResponse(ResponseView view, DataCenter dataCenter, Boolean showCapacities, Boolean showResourceIcon);
DataCenterGuestIpv6PrefixResponse createDataCenterGuestIpv6PrefixResponse(DataCenterGuestIpv6Prefix prefix);
@ -324,6 +326,8 @@ public interface ResponseGenerator {
ClusterResponse createClusterResponse(Cluster cluster, Boolean showCapacities);
ClusterResponse createMinimalClusterResponse(Cluster cluster);
FirewallRuleResponse createPortForwardingRuleResponse(PortForwardingRule fwRule);
IpForwardingRuleResponse createIpForwardingRuleResponse(StaticNatRule fwRule);

View File

@ -118,6 +118,12 @@ public class AddClusterCmd extends BaseCmd {
private String ovm3cluster;
@Parameter(name = ApiConstants.OVM3_VIP, type = CommandType.STRING, required = false, description = "Ovm3 vip to use for pool (and cluster)")
private String ovm3vip;
@Parameter(name = ApiConstants.STORAGE_ACCESS_GROUPS,
type = CommandType.LIST, collectionType = CommandType.STRING,
description = "comma separated list of storage access groups for the hosts in the cluster",
since = "4.21.0")
private List<String> storageAccessGroups;
public String getOvm3Pool() {
return ovm3pool;
}
@ -192,6 +198,10 @@ public class AddClusterCmd extends BaseCmd {
this.clusterType = type;
}
public List<String> getStorageAccessGroups() {
return storageAccessGroups;
}
@Override
public long getEntityOwnerId() {
return Account.ACCOUNT_ID_SYSTEM;

View File

@ -74,6 +74,11 @@ public class ListClustersCmd extends BaseListCmd {
since = "4.20.1")
private String arch;
@Parameter(name = ApiConstants.STORAGE_ACCESS_GROUP, type = CommandType.STRING,
description = "the name of the storage access group",
since = "4.21.0")
private String storageAccessGroup;
/////////////////////////////////////////////////////
/////////////////// Accessors ///////////////////////
/////////////////////////////////////////////////////
@ -122,6 +127,18 @@ public class ListClustersCmd extends BaseListCmd {
return StringUtils.isBlank(arch) ? null : CPU.CPUArch.fromType(arch);
}
public String getStorageAccessGroup() {
return storageAccessGroup;
}
public ListClustersCmd() {
}
public ListClustersCmd(String storageAccessGroup) {
this.storageAccessGroup = storageAccessGroup;
}
/////////////////////////////////////////////////////
/////////////// API Implementation///////////////////
/////////////////////////////////////////////////////

View File

@ -130,7 +130,7 @@ public class UpdateClusterCmd extends BaseCmd {
}
Cluster result = _resourceService.updateCluster(this);
if (result != null) {
ClusterResponse clusterResponse = _responseGenerator.createClusterResponse(cluster, false);
ClusterResponse clusterResponse = _responseGenerator.createClusterResponse(result, false);
clusterResponse.setResponseName(getCommandName());
this.setResponseObject(clusterResponse);
} else {

View File

@ -75,6 +75,12 @@ public class AddHostCmd extends BaseCmd {
@Parameter(name = ApiConstants.HOST_TAGS, type = CommandType.LIST, collectionType = CommandType.STRING, description = "list of tags to be added to the host")
private List<String> hostTags;
@Parameter(name = ApiConstants.STORAGE_ACCESS_GROUPS,
type = CommandType.LIST, collectionType = CommandType.STRING,
description = "comma separated list of storage access groups for the host",
since = "4.21.0")
private List<String> storageAccessGroups;
/////////////////////////////////////////////////////
/////////////////// Accessors ///////////////////////
/////////////////////////////////////////////////////
@ -115,6 +121,10 @@ public class AddHostCmd extends BaseCmd {
return hostTags;
}
public List<String> getStorageAccessGroups() {
return storageAccessGroups;
}
public String getAllocationState() {
return allocationState;
}

View File

@ -113,6 +113,11 @@ public class ListHostsCmd extends BaseListCmd {
@Parameter(name = ApiConstants.ARCH, type = CommandType.STRING, description = "CPU Arch of the host", since = "4.20.1")
private String arch;
@Parameter(name = ApiConstants.STORAGE_ACCESS_GROUP, type = CommandType.STRING,
description = "the name of the storage access group",
since = "4.21.0")
private String storageAccessGroup;
/////////////////////////////////////////////////////
/////////////////// Accessors ///////////////////////
/////////////////////////////////////////////////////
@ -205,6 +210,18 @@ public class ListHostsCmd extends BaseListCmd {
return StringUtils.isBlank(arch) ? null : CPU.CPUArch.fromType(arch);
}
public String getStorageAccessGroup() {
return storageAccessGroup;
}
public ListHostsCmd() {
}
public ListHostsCmd(String storageAccessGroup) {
this.storageAccessGroup = storageAccessGroup;
}
/////////////////////////////////////////////////////
/////////////// API Implementation///////////////////
/////////////////////////////////////////////////////

View File

@ -30,6 +30,8 @@ import org.apache.cloudstack.api.response.ZoneResponse;
import com.cloud.dc.Pod;
import com.cloud.user.Account;
import java.util.List;
@APICommand(name = "createPod", description = "Creates a new Pod.", responseObject = PodResponse.class,
requestHasSensitiveInfo = false, responseHasSensitiveInfo = false)
public class CreatePodCmd extends BaseCmd {
@ -63,6 +65,12 @@ public class CreatePodCmd extends BaseCmd {
@Parameter(name = ApiConstants.ALLOCATION_STATE, type = CommandType.STRING, description = "Allocation state of this Pod for allocation of new resources")
private String allocationState;
@Parameter(name = ApiConstants.STORAGE_ACCESS_GROUPS,
type = CommandType.LIST, collectionType = CommandType.STRING,
description = "comma separated list of storage access groups for the hosts in the pod",
since = "4.21.0")
private List<String> storageAccessGroups;
/////////////////////////////////////////////////////
/////////////////// Accessors ///////////////////////
/////////////////////////////////////////////////////
@ -95,6 +103,10 @@ public class CreatePodCmd extends BaseCmd {
return allocationState;
}
public List<String> getStorageAccessGroups() {
return storageAccessGroups;
}
/////////////////////////////////////////////////////
/////////////// API Implementation///////////////////
/////////////////////////////////////////////////////
@ -111,7 +123,7 @@ public class CreatePodCmd extends BaseCmd {
@Override
public void execute() {
Pod result = _configService.createPod(getZoneId(), getPodName(), getStartIp(), getEndIp(), getGateway(), getNetmask(), getAllocationState());
Pod result = _configService.createPod(getZoneId(), getPodName(), getStartIp(), getEndIp(), getGateway(), getNetmask(), getAllocationState(), getStorageAccessGroups());
if (result != null) {
PodResponse response = _responseGenerator.createPodResponse(result, false);
response.setResponseName(getCommandName());

View File

@ -55,6 +55,11 @@ public class ListPodsByCmd extends BaseListCmd {
@Parameter(name = ApiConstants.SHOW_CAPACITIES, type = CommandType.BOOLEAN, description = "flag to display the capacity of the pods")
private Boolean showCapacities;
@Parameter(name = ApiConstants.STORAGE_ACCESS_GROUP, type = CommandType.STRING,
description = "the name of the storage access group",
since = "4.21.0")
private String storageAccessGroup;
/////////////////////////////////////////////////////
/////////////////// Accessors ///////////////////////
/////////////////////////////////////////////////////
@ -79,6 +84,18 @@ public class ListPodsByCmd extends BaseListCmd {
return showCapacities;
}
public String getStorageAccessGroup() {
return storageAccessGroup;
}
public ListPodsByCmd() {
}
public ListPodsByCmd(String storageAccessGroup) {
this.storageAccessGroup = storageAccessGroup;
}
/////////////////////////////////////////////////////
/////////////// API Implementation///////////////////
/////////////////////////////////////////////////////

View File

@ -0,0 +1,135 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.apache.cloudstack.api.command.admin.storage;
import java.util.List;
import com.cloud.event.EventTypes;
import org.apache.cloudstack.api.ApiCommandResourceType;
import org.apache.cloudstack.api.BaseAsyncCmd;
import org.apache.cloudstack.api.response.ClusterResponse;
import org.apache.cloudstack.api.response.HostResponse;
import org.apache.cloudstack.api.response.PodResponse;
import org.apache.cloudstack.api.response.StoragePoolResponse;
import org.apache.cloudstack.api.response.SuccessResponse;
import org.apache.cloudstack.api.response.ZoneResponse;
import org.apache.cloudstack.api.APICommand;
import org.apache.cloudstack.api.ApiConstants;
import org.apache.cloudstack.api.ApiErrorCode;
import org.apache.cloudstack.api.Parameter;
import org.apache.cloudstack.api.ServerApiException;
import com.cloud.user.Account;
@APICommand(name = "configureStorageAccess", description = "Configure the storage access groups on zone/pod/cluster/host and storage, accordingly connections to the storage pools", responseObject = SuccessResponse.class, since = "4.21.0",
requestHasSensitiveInfo = false, responseHasSensitiveInfo = false)
public class ConfigureStorageAccessCmd extends BaseAsyncCmd {
/////////////////////////////////////////////////////
//////////////// API parameters /////////////////////
/////////////////////////////////////////////////////
@Parameter(name = ApiConstants.ZONE_ID, type = CommandType.UUID, entityType = ZoneResponse.class, description = "UUID of the zone")
private Long zoneId;
@Parameter(name = ApiConstants.POD_ID, type = CommandType.UUID, entityType = PodResponse.class, description = "UUID of the pod")
private Long podId;
@Parameter(name = ApiConstants.CLUSTER_ID, type = CommandType.UUID, entityType = ClusterResponse.class, description = "UUID of the cluster")
private Long clusterId;
@Parameter(name = ApiConstants.HOST_ID, type = CommandType.UUID, entityType = HostResponse.class, description = "UUID of the host")
private Long hostId;
@Parameter(name = ApiConstants.STORAGE_ID, type = CommandType.UUID, entityType = StoragePoolResponse.class, description = "UUID of the Storage Pool")
private Long storageId;
@Parameter(name = ApiConstants.STORAGE_ACCESS_GROUPS, type = CommandType.LIST, collectionType = CommandType.STRING,
description = "comma separated list of storage access groups for connecting the storage pools and the hosts",
since = "4.21.0")
private List<String> storageAccessGroups;
/////////////////////////////////////////////////////
/////////////////// Accessors ///////////////////////
/////////////////////////////////////////////////////
public Long getZoneId() {
return zoneId;
}
public Long getPodId() {
return podId;
}
public Long getClusterId() {
return clusterId;
}
public Long getHostId() {
return hostId;
}
public Long getStorageId() {
return storageId;
}
public List<String> getStorageAccessGroups() {
return storageAccessGroups;
}
/////////////////////////////////////////////////////
/////////////// API Implementation///////////////////
/////////////////////////////////////////////////////
@Override
public long getEntityOwnerId() {
return Account.ACCOUNT_ID_SYSTEM;
}
@Override
public ApiCommandResourceType getApiResourceType() {
return ApiCommandResourceType.StoragePool;
}
@Override
public void execute() {
try {
boolean result = _storageService.configureStorageAccess(this);
if (result) {
SuccessResponse response = new SuccessResponse(getCommandName());
setResponseObject(response);
} else {
throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, "Failed to configure storage access");
}
} catch (Exception e) {
logger.debug("Failed to configure storage access ", e);
throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, "Failed to configure storage access, " + e.getMessage());
}
}
@Override
public String getEventType() {
return EventTypes.EVENT_CONFIGURE_STORAGE_ACCESS;
}
@Override
public String getEventDescription() {
return "configuring storage access groups";
}
}

View File

@ -61,6 +61,10 @@ public class CreateStoragePoolCmd extends BaseCmd {
@Parameter(name = ApiConstants.TAGS, type = CommandType.STRING, description = "the tags for the storage pool")
private String tags;
@Parameter(name = ApiConstants.STORAGE_ACCESS_GROUPS, type = CommandType.STRING,
description = "comma separated list of storage access groups for connecting to hosts having those specific groups", since = "4.21.0")
private String storageAccessGroups;
@Parameter(name = ApiConstants.URL, type = CommandType.STRING, required = true, description = "the URL of the storage pool")
private String url;
@ -115,6 +119,10 @@ public class CreateStoragePoolCmd extends BaseCmd {
return tags;
}
public String getStorageAccessGroups() {
return storageAccessGroups;
}
public String getUrl() {
return url;
}

View File

@ -0,0 +1,65 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.cloudstack.api.command.admin.storage;
import org.apache.cloudstack.api.ApiConstants;
import org.apache.cloudstack.api.BaseCmd;
import org.apache.cloudstack.api.Parameter;
import org.apache.cloudstack.api.response.StorageAccessGroupResponse;
import org.apache.cloudstack.api.APICommand;
import org.apache.cloudstack.api.ApiCommandResourceType;
import org.apache.cloudstack.api.BaseListCmd;
import org.apache.cloudstack.api.response.ListResponse;
@APICommand(name = "listStorageAccessGroups", description = "Lists storage access groups", responseObject = StorageAccessGroupResponse.class, since = "4.21.0", requestHasSensitiveInfo = false, responseHasSensitiveInfo = false)
public class ListStorageAccessGroupsCmd extends BaseListCmd {
// ///////////////////////////////////////////////////
// ////////////// API parameters /////////////////////
// ///////////////////////////////////////////////////
@Parameter(name = ApiConstants.NAME, type = BaseCmd.CommandType.STRING, description = "Name of the Storage access group")
private String name;
// ///////////////////////////////////////////////////
// ///////////////// Accessors ///////////////////////
// ///////////////////////////////////////////////////
public String getName() {
return name;
}
// ///////////////////////////////////////////////////
// ///////////// API Implementation///////////////////
// ///////////////////////////////////////////////////
@Override
public ApiCommandResourceType getApiResourceType() {
return ApiCommandResourceType.StoragePool;
}
@Override
public void execute() {
ListResponse<StorageAccessGroupResponse> response = _queryService.searchForStorageAccessGroups(this);
response.setResponseName(getCommandName());
setResponseObject(response);
}
}

View File

@ -41,7 +41,7 @@ public class ListStoragePoolsCmd extends BaseListCmd {
@Parameter(name = ApiConstants.CLUSTER_ID,
type = CommandType.UUID,
entityType = ClusterResponse.class,
description = "list storage pools belongig to the specific cluster")
description = "list storage pools belonging to the specific cluster")
private Long clusterId;
@Parameter(name = ApiConstants.IP_ADDRESS, type = CommandType.STRING, description = "the IP address for the storage pool")
@ -74,6 +74,10 @@ public class ListStoragePoolsCmd extends BaseListCmd {
@Parameter(name = ApiConstants.STORAGE_CUSTOM_STATS, type = CommandType.BOOLEAN, description = "If true, lists the custom stats of the storage pool", since = "4.18.1")
private Boolean customStats;
@Parameter(name = ApiConstants.STORAGE_ACCESS_GROUP, type = CommandType.STRING, description = "the name of the storage access group", since = "4.21.0")
private String storageAccessGroup;
/////////////////////////////////////////////////////
/////////////////// Accessors ///////////////////////
/////////////////////////////////////////////////////
@ -134,6 +138,17 @@ public class ListStoragePoolsCmd extends BaseListCmd {
return customStats != null && customStats;
}
public String getStorageAccessGroup() {
return storageAccessGroup;
}
public ListStoragePoolsCmd() {
}
public ListStoragePoolsCmd(String storageAccessGroup) {
this.storageAccessGroup = storageAccessGroup;
}
/////////////////////////////////////////////////////
/////////////// API Implementation///////////////////
/////////////////////////////////////////////////////

View File

@ -31,6 +31,8 @@ import org.apache.cloudstack.context.CallContext;
import com.cloud.dc.DataCenter;
import com.cloud.user.Account;
import java.util.List;
@APICommand(name = "createZone", description = "Creates a Zone.", responseObject = ZoneResponse.class,
requestHasSensitiveInfo = false, responseHasSensitiveInfo = false)
public class CreateZoneCmd extends BaseCmd {
@ -88,6 +90,11 @@ public class CreateZoneCmd extends BaseCmd {
@Parameter(name = ApiConstants.IS_EDGE, type = CommandType.BOOLEAN, description = "true if the zone is an edge zone, false otherwise", since = "4.18.0")
private Boolean isEdge;
@Parameter(name = ApiConstants.STORAGE_ACCESS_GROUPS,
type = CommandType.LIST, collectionType = CommandType.STRING,
description = "comma separated list of storage access groups for the hosts in the zone",
since = "4.21.0")
private List<String> storageAccessGroups;
/////////////////////////////////////////////////////
/////////////////// Accessors ///////////////////////
@ -162,6 +169,10 @@ public class CreateZoneCmd extends BaseCmd {
return isEdge;
}
public List<String> getStorageAccessGroups() {
return storageAccessGroups;
}
/////////////////////////////////////////////////////
/////////////// API Implementation///////////////////
@Override

View File

@ -69,6 +69,11 @@ public class ListZonesCmd extends BaseListCmd implements UserCmd {
@Parameter(name = ApiConstants.SHOW_RESOURCE_ICON, type = CommandType.BOOLEAN, description = "flag to display the resource image for the zones")
private Boolean showIcon;
@Parameter(name = ApiConstants.STORAGE_ACCESS_GROUP, type = CommandType.STRING,
description = "the name of the storage access group",
since = "4.21.0")
private String storageAccessGroup;
/////////////////////////////////////////////////////
/////////////////// Accessors ///////////////////////
/////////////////////////////////////////////////////
@ -109,6 +114,18 @@ public class ListZonesCmd extends BaseListCmd implements UserCmd {
return showIcon != null ? showIcon : false;
}
public String getStorageAccessGroup() {
return storageAccessGroup;
}
public ListZonesCmd() {
}
public ListZonesCmd(String storageAccessGroup) {
this.storageAccessGroup = storageAccessGroup;
}
/////////////////////////////////////////////////////
/////////////// API Implementation///////////////////
/////////////////////////////////////////////////////

View File

@ -95,6 +95,18 @@ public class ClusterResponse extends BaseResponseWithAnnotations {
@Param(description = "CPU Arch of the hosts in the cluster", since = "4.20")
private String arch;
@SerializedName(ApiConstants.STORAGE_ACCESS_GROUPS)
@Param(description = "comma-separated list of storage access groups for the host", since = "4.21.0")
private String storageAccessGroups;
@SerializedName(ApiConstants.POD_STORAGE_ACCESS_GROUPS)
@Param(description = "comma-separated list of storage access groups on the pod", since = "4.21.0")
private String podStorageAccessGroups;
@SerializedName(ApiConstants.ZONE_STORAGE_ACCESS_GROUPS)
@Param(description = "comma-separated list of storage access groups on the zone", since = "4.21.0")
private String zoneStorageAccessGroups;
public String getId() {
return id;
}
@ -259,4 +271,28 @@ public class ClusterResponse extends BaseResponseWithAnnotations {
public String getArch() {
return arch;
}
public String getStorageAccessGroups() {
return storageAccessGroups;
}
public void setStorageAccessGroups(String storageAccessGroups) {
this.storageAccessGroups = storageAccessGroups;
}
public String getPodStorageAccessGroups() {
return podStorageAccessGroups;
}
public void setPodStorageAccessGroups(String podStorageAccessGroups) {
this.podStorageAccessGroups = podStorageAccessGroups;
}
public String getZoneStorageAccessGroups() {
return zoneStorageAccessGroups;
}
public void setZoneStorageAccessGroups(String zoneStorageAccessGroups) {
this.zoneStorageAccessGroups = zoneStorageAccessGroups;
}
}

View File

@ -302,6 +302,22 @@ public class HostResponse extends BaseResponseWithAnnotations {
@Param(description = "CPU Arch of the host", since = "4.20")
private String arch;
@SerializedName(ApiConstants.STORAGE_ACCESS_GROUPS)
@Param(description = "comma-separated list of storage access groups for the host", since = "4.21.0")
private String storageAccessGroups;
@SerializedName(ApiConstants.CLUSTER_STORAGE_ACCESS_GROUPS)
@Param(description = "comma-separated list of storage access groups on the cluster", since = "4.21.0")
private String clusterStorageAccessGroups;
@SerializedName(ApiConstants.POD_STORAGE_ACCESS_GROUPS)
@Param(description = "comma-separated list of storage access groups on the pod", since = "4.21.0")
private String podStorageAccessGroups;
@SerializedName(ApiConstants.ZONE_STORAGE_ACCESS_GROUPS)
@Param(description = "comma-separated list of storage access groups on the zone", since = "4.21.0")
private String zoneStorageAccessGroups;
@Override
public String getObjectId() {
return this.getId();
@ -491,6 +507,38 @@ public class HostResponse extends BaseResponseWithAnnotations {
this.hostTags = hostTags;
}
public String getStorageAccessGroups() {
return storageAccessGroups;
}
public void setStorageAccessGroups(String storageAccessGroups) {
this.storageAccessGroups = storageAccessGroups;
}
public String getClusterStorageAccessGroups() {
return clusterStorageAccessGroups;
}
public void setClusterStorageAccessGroups(String clusterStorageAccessGroups) {
this.clusterStorageAccessGroups = clusterStorageAccessGroups;
}
public String getPodStorageAccessGroups() {
return podStorageAccessGroups;
}
public void setPodStorageAccessGroups(String podStorageAccessGroups) {
this.podStorageAccessGroups = podStorageAccessGroups;
}
public String getZoneStorageAccessGroups() {
return zoneStorageAccessGroups;
}
public void setZoneStorageAccessGroups(String zoneStorageAccessGroups) {
this.zoneStorageAccessGroups = zoneStorageAccessGroups;
}
public String getExplicitHostTags() {
return explicitHostTags;
}

View File

@ -85,6 +85,14 @@ public class PodResponse extends BaseResponseWithAnnotations {
@Param(description = "the capacity of the Pod", responseObject = CapacityResponse.class)
private List<CapacityResponse> capacities;
@SerializedName(ApiConstants.STORAGE_ACCESS_GROUPS)
@Param(description = "comma-separated list of storage access groups for the pod", since = "4.21.0")
private String storageAccessGroups;
@SerializedName(ApiConstants.ZONE_STORAGE_ACCESS_GROUPS)
@Param(description = "comma-separated list of storage access groups on the zone", since = "4.21.0")
private String zoneStorageAccessGroups;
public String getId() {
return id;
}
@ -184,4 +192,20 @@ public class PodResponse extends BaseResponseWithAnnotations {
public void setCapacities(List<CapacityResponse> capacities) {
this.capacities = capacities;
}
public String getStorageAccessGroups() {
return storageAccessGroups;
}
public void setStorageAccessGroups(String storageAccessGroups) {
this.storageAccessGroups = storageAccessGroups;
}
public String getZoneStorageAccessGroups() {
return zoneStorageAccessGroups;
}
public void setZoneStorageAccessGroups(String zoneStorageAccessGroups) {
this.zoneStorageAccessGroups = zoneStorageAccessGroups;
}
}

View File

@ -80,7 +80,7 @@ public class ServiceOfferingResponse extends BaseResponseWithAnnotations {
@Param(description = "true if the vm needs to be volatile, i.e., on every reboot of vm from API root disk is discarded and creates a new root disk")
private Boolean isVolatile;
@SerializedName("storagetags")
@SerializedName(ApiConstants.STORAGE_TAGS)
@Param(description = "the tags for the service offering")
private String tags;

View File

@ -0,0 +1,108 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.apache.cloudstack.api.response;
import com.google.gson.annotations.SerializedName;
import org.apache.cloudstack.api.ApiConstants;
import org.apache.cloudstack.api.BaseResponse;
import com.cloud.serializer.Param;
public class StorageAccessGroupResponse extends BaseResponse {
@SerializedName(ApiConstants.ID)
@Param(description = "the ID of the storage access group")
private String id;
@SerializedName(ApiConstants.NAME)
@Param(description = "the name of the storage access group")
private String name;
@SerializedName("hosts")
@Param(description = "List of Hosts in the Storage Access Group")
private ListResponse<HostResponse> hostResponseList;
@SerializedName("clusters")
@Param(description = "List of Clusters in the Storage Access Group")
private ListResponse<ClusterResponse> clusterResponseList;
@SerializedName("pods")
@Param(description = "List of Pods in the Storage Access Group")
private ListResponse<PodResponse> podResponseList;
@SerializedName("zones")
@Param(description = "List of Zones in the Storage Access Group")
private ListResponse<ZoneResponse> zoneResponseList;
@SerializedName("storagepools")
@Param(description = "List of Storage Pools in the Storage Access Group")
private ListResponse<StoragePoolResponse> storagePoolResponseList;
public String getId() {
return id;
}
public void setId(String id) {
this.id = id;
}
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
public ListResponse<HostResponse> getHostResponseList() {
return hostResponseList;
}
public void setHostResponseList(ListResponse<HostResponse> hostResponseList) {
this.hostResponseList = hostResponseList;
}
public ListResponse<ClusterResponse> getClusterResponseList() {
return clusterResponseList;
}
public void setClusterResponseList(ListResponse<ClusterResponse> clusterResponseList) {
this.clusterResponseList = clusterResponseList;
}
public ListResponse<PodResponse> getPodResponseList() {
return podResponseList;
}
public void setPodResponseList(ListResponse<PodResponse> podResponseList) {
this.podResponseList = podResponseList;
}
public ListResponse<ZoneResponse> getZoneResponseList() {
return zoneResponseList;
}
public void setZoneResponseList(ListResponse<ZoneResponse> zoneResponseList) {
this.zoneResponseList = zoneResponseList;
}
public ListResponse<StoragePoolResponse> getStoragePoolResponseList() {
return storagePoolResponseList;
}
public void setStoragePoolResponseList(ListResponse<StoragePoolResponse> storagePoolResponseList) {
this.storagePoolResponseList = storagePoolResponseList;
}
}

View File

@ -109,6 +109,10 @@ public class StoragePoolResponse extends BaseResponseWithAnnotations {
@Param(description = "the tags for the storage pool")
private String tags;
@SerializedName(ApiConstants.STORAGE_ACCESS_GROUPS)
@Param(description = "the storage access groups for the storage pool", since = "4.21.0")
private String storageAccessGroups;
@SerializedName(ApiConstants.NFS_MOUNT_OPTIONS)
@Param(description = "the nfs mount options for the storage pool", since = "4.19.1")
private String nfsMountOpts;
@ -344,6 +348,14 @@ public class StoragePoolResponse extends BaseResponseWithAnnotations {
this.tags = tags;
}
public String getStorageAccessGroups() {
return storageAccessGroups;
}
public void setStorageAccessGroups(String storageAccessGroups) {
this.storageAccessGroups = storageAccessGroups;
}
public Boolean getIsTagARule() {
return isTagARule;
}

View File

@ -95,7 +95,7 @@ public class ZoneResponse extends BaseResponseWithAnnotations implements SetReso
@SerializedName("securitygroupsenabled")
@Param(description = "true if security groups support is enabled, false otherwise")
private boolean securityGroupsEnabled;
private Boolean securityGroupsEnabled;
@SerializedName("allocationstate")
@Param(description = "the allocation state of the cluster")
@ -115,7 +115,7 @@ public class ZoneResponse extends BaseResponseWithAnnotations implements SetReso
@SerializedName(ApiConstants.LOCAL_STORAGE_ENABLED)
@Param(description = "true if local storage offering enabled, false otherwise")
private boolean localStorageEnabled;
private Boolean localStorageEnabled;
@SerializedName(ApiConstants.TAGS)
@Param(description = "the list of resource tags associated with zone.", responseObject = ResourceTagResponse.class, since = "4.3")
@ -161,11 +161,19 @@ public class ZoneResponse extends BaseResponseWithAnnotations implements SetReso
@Param(description = "true, if routed network/vpc is enabled", since = "4.20.1")
private boolean routedModeEnabled = false;
@SerializedName(ApiConstants.STORAGE_ACCESS_GROUPS)
@Param(description = "comma-separated list of storage access groups for the zone", since = "4.21.0")
private String storageAccessGroups;
public ZoneResponse() {
tags = new LinkedHashSet<ResourceTagResponse>();
}
public ZoneResponse(Set<ResourceTagResponse> tags) {
this.tags = tags;
}
public void setId(String id) {
this.id = id;
}
@ -402,6 +410,14 @@ public class ZoneResponse extends BaseResponseWithAnnotations implements SetReso
return type;
}
public String getStorageAccessGroups() {
return storageAccessGroups;
}
public void setStorageAccessGroups(String storageAccessGroups) {
this.storageAccessGroups = storageAccessGroups;
}
public void setNsxEnabled(boolean nsxEnabled) {
this.nsxEnabled = nsxEnabled;
}

View File

@ -32,6 +32,7 @@ import org.apache.cloudstack.api.command.admin.router.ListRoutersCmd;
import org.apache.cloudstack.api.command.admin.storage.ListImageStoresCmd;
import org.apache.cloudstack.api.command.admin.storage.ListObjectStoragePoolsCmd;
import org.apache.cloudstack.api.command.admin.storage.ListSecondaryStagingStoresCmd;
import org.apache.cloudstack.api.command.admin.storage.ListStorageAccessGroupsCmd;
import org.apache.cloudstack.api.command.admin.storage.ListStoragePoolsCmd;
import org.apache.cloudstack.api.command.admin.storage.ListStorageTagsCmd;
import org.apache.cloudstack.api.command.admin.storage.heuristics.ListSecondaryStorageSelectorsCmd;
@ -87,6 +88,7 @@ import org.apache.cloudstack.api.response.SecondaryStorageHeuristicsResponse;
import org.apache.cloudstack.api.response.SecurityGroupResponse;
import org.apache.cloudstack.api.response.ServiceOfferingResponse;
import org.apache.cloudstack.api.response.SnapshotResponse;
import org.apache.cloudstack.api.response.StorageAccessGroupResponse;
import org.apache.cloudstack.api.response.StoragePoolResponse;
import org.apache.cloudstack.api.response.StorageTagResponse;
import org.apache.cloudstack.api.response.TemplateResponse;
@ -197,6 +199,8 @@ public interface QueryService {
ListResponse<StorageTagResponse> searchForStorageTags(ListStorageTagsCmd cmd);
ListResponse<StorageAccessGroupResponse> searchForStorageAccessGroups(ListStorageAccessGroupsCmd cmd);
ListResponse<HostTagResponse> searchForHostTags(ListHostTagsCmd cmd);
ListResponse<ManagementServerResponse> listManagementServers(ListMgmtsCmd cmd);

View File

@ -284,6 +284,11 @@ public class CheckOnHostCommandTest {
public CPU.CPUArch getArch() {
return CPU.CPUArch.amd64;
}
@Override
public String getStorageAccessGroups() {
return null;
}
};
CheckOnHostCommand cohc = new CheckOnHostCommand(host);

View File

@ -30,6 +30,7 @@ public class PrimaryDataStoreParameters {
private String providerName;
private Map<String, String> details;
private String tags;
private String storageAccessGroups;
private StoragePoolType type;
private HypervisorType hypervisorType;
private String host;
@ -165,6 +166,21 @@ public class PrimaryDataStoreParameters {
this.tags = tags;
}
/**
* @return the storageAccessGroups
*/
public String getStorageAccessGroups() {
return storageAccessGroups;
}
/**
* @param storageAccessGroups
* the storageAccessGroups to set
*/
public void setStorageAccessGroups(String storageAccessGroups) {
this.storageAccessGroups = storageAccessGroups;
}
/**
* @return the details
*/

View File

@ -64,4 +64,5 @@ public interface StoragePoolAllocator extends Adapter {
static int RETURN_UPTO_ALL = -1;
List<StoragePool> reorderPools(List<StoragePool> pools, VirtualMachineProfile vmProfile, DeploymentPlan plan, DiskProfile dskCh);
}

View File

@ -147,12 +147,12 @@ public interface ConfigurationManager {
* @param startIp
* @param endIp
* @param allocationState
* @param skipGatewayOverlapCheck
* (true if it is ok to not validate that gateway IP address overlap with Start/End IP of the POD)
* @param skipGatewayOverlapCheck (true if it is ok to not validate that gateway IP address overlap with Start/End IP of the POD)
* @param storageAccessGroups
* @return Pod
*/
HostPodVO createPod(long userId, String podName, DataCenter zone, String gateway, String cidr, String startIp, String endIp, String allocationState,
boolean skipGatewayOverlapCheck);
boolean skipGatewayOverlapCheck, List<String> storageAccessGroups);
/**
* Creates a new zone
@ -170,13 +170,14 @@ public interface ConfigurationManager {
* @param isSecurityGroupEnabled
* @param ip6Dns1
* @param ip6Dns2
* @param storageAccessGroups
* @return
* @throws
* @throws
*/
DataCenterVO createZone(long userId, String zoneName, String dns1, String dns2, String internalDns1, String internalDns2, String guestCidr, String domain,
Long domainId, NetworkType zoneType, String allocationState, String networkDomain, boolean isSecurityGroupEnabled, boolean isLocalStorageEnabled, String ip6Dns1,
String ip6Dns2, boolean isEdge);
String ip6Dns2, boolean isEdge, List<String> storageAccessGroups);
/**
* Deletes a VLAN from the database, along with all of its IP addresses. Will not delete VLANs that have allocated

View File

@ -21,6 +21,8 @@ import java.util.HashMap;
import java.util.List;
import java.util.Map;
import org.apache.cloudstack.engine.subsystem.api.storage.DataStore;
import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreInfo;
import org.apache.cloudstack.framework.config.ConfigKey;
import org.apache.cloudstack.framework.config.Configurable;
@ -236,4 +238,12 @@ public interface ResourceManager extends ResourceService, Configurable {
HostVO findOneRandomRunningHostByHypervisor(HypervisorType type, Long dcId);
boolean cancelMaintenance(final long hostId);
void updateStoragePoolConnectionsOnHosts(Long poolId, List<String> storageAccessGroups);
List<HostVO> getEligibleUpHostsInClusterForStorageConnection(PrimaryDataStoreInfo primaryStore);
List<HostVO> getEligibleUpAndEnabledHostsInClusterForStorageConnection(PrimaryDataStoreInfo primaryStore);
List<HostVO> getEligibleUpAndEnabledHostsInZoneForStorageConnection(DataStore dataStore, long zoneId, HypervisorType hypervisorType);
}

View File

@ -410,4 +410,9 @@ public interface StorageManager extends StorageService {
void validateChildDatastoresToBeAddedInUpState(StoragePoolVO datastoreClusterPool, List<ModifyStoragePoolAnswer> childDatastoreAnswerList);
boolean checkIfHostAndStoragePoolHasCommonStorageAccessGroups(Host host, StoragePool pool);
Pair<Boolean, String> checkIfReadyVolumeFitsInStoragePoolWithStorageAccessGroups(StoragePool destPool, Volume volume);
String[] getStorageAccessGroups(Long zoneId, Long podId, Long clusterId, Long hostId);
}

View File

@ -114,6 +114,9 @@ public class EngineClusterVO implements EngineCluster, Identity {
@Column(name = "engine_state", updatable = true, nullable = false, length = 32)
protected State state = null;
@Column(name = "storage_access_groups")
String storageAccessGroups;
public EngineClusterVO() {
clusterType = Cluster.ClusterType.CloudManaged;
allocationState = Grouping.AllocationState.Enabled;
@ -176,6 +179,11 @@ public class EngineClusterVO implements EngineCluster, Identity {
return managedState;
}
@Override
public String getStorageAccessGroups() {
return storageAccessGroups;
}
public void setManagedState(ManagedState managedState) {
this.managedState = managedState;
}

View File

@ -89,6 +89,9 @@ public class EngineHostPodVO implements EnginePod, Identity {
@Temporal(value = TemporalType.TIMESTAMP)
protected Date lastUpdated;
@Column(name = "storage_access_groups")
String storageAccessGroups;
/**
* Note that state is intentionally missing the setter. Any updates to
* the state machine needs to go through the DAO object because someone
@ -202,6 +205,11 @@ public class EngineHostPodVO implements EnginePod, Identity {
return externalDhcp;
}
@Override
public String getStorageAccessGroups() {
return storageAccessGroups;
}
public void setExternalDhcp(boolean use) {
externalDhcp = use;
}

View File

@ -405,6 +405,9 @@ public class EngineHostVO implements EngineHost, Identity {
@Column(name = "engine_state", updatable = true, nullable = false, length = 32)
protected State orchestrationState = null;
@Column(name = "storage_access_groups")
private String storageAccessGroups = null;
public EngineHostVO(String guid) {
this.guid = guid;
this.status = Status.Creating;
@ -807,4 +810,13 @@ public class EngineHostVO implements EngineHost, Identity {
public PartitionType partitionType() {
return PartitionType.Host;
}
@Override
public String getStorageAccessGroups() {
return storageAccessGroups;
}
public void setStorageAccessGroups(String storageAccessGroups) {
this.storageAccessGroups = storageAccessGroups;
}
}

View File

@ -85,6 +85,10 @@ public class ClusterVO implements Cluster {
@Column(name = "uuid")
String uuid;
@Column(name = "storage_access_groups")
String storageAccessGroups;
public ClusterVO() {
clusterType = Cluster.ClusterType.CloudManaged;
allocationState = Grouping.AllocationState.Enabled;
@ -215,6 +219,14 @@ public class ClusterVO implements Cluster {
this.arch = arch;
}
public String getStorageAccessGroups() {
return storageAccessGroups;
}
public void setStorageAccessGroups(String storageAccessGroups) {
this.storageAccessGroups = storageAccessGroups;
}
@Override
public String toString() {
return String.format("Cluster {id: \"%s\", name: \"%s\", uuid: \"%s\"}", id, name, uuid);

View File

@ -142,6 +142,9 @@ public class DataCenterVO implements DataCenter {
@Enumerated(value = EnumType.STRING)
private DataCenter.Type type;
@Column(name = "storage_access_groups")
String storageAccessGroups;
@Override
public String getDnsProvider() {
return dnsProvider;
@ -485,6 +488,14 @@ public class DataCenterVO implements DataCenter {
this.type = type;
}
public String getStorageAccessGroups() {
return storageAccessGroups;
}
public void setStorageAccessGroups(String storageAccessGroups) {
this.storageAccessGroups = storageAccessGroups;
}
@Override
public String toString() {
return String.format("Zone {\"id\": \"%s\", \"name\": \"%s\", \"uuid\": \"%s\"}", id, name, uuid);

View File

@ -71,6 +71,9 @@ public class HostPodVO implements Pod {
@Column(name = "uuid")
private String uuid;
@Column(name = "storage_access_groups")
String storageAccessGroups;
public HostPodVO(String name, long dcId, String gateway, String cidrAddress, int cidrSize, String description) {
this.name = name;
this.dataCenterId = dcId;
@ -199,6 +202,14 @@ public class HostPodVO implements Pod {
this.uuid = uuid;
}
public String getStorageAccessGroups() {
return storageAccessGroups;
}
public void setStorageAccessGroups(String storageAccessGroups) {
this.storageAccessGroups = storageAccessGroups;
}
@Override
public String toString() {
return String.format("HostPod %s",

View File

@ -57,4 +57,6 @@ public interface ClusterDao extends GenericDao<ClusterVO, Long> {
List<CPU.CPUArch> getClustersArchsByZone(long zoneId);
List<ClusterVO> listClustersByArchAndZoneId(long zoneId, CPU.CPUArch arch);
List<String> listDistinctStorageAccessGroups(String name, String keyword);
}

View File

@ -346,4 +346,36 @@ public class ClusterDaoImpl extends GenericDaoBase<ClusterVO, Long> implements C
sc.setParameters("arch", arch);
return listBy(sc);
}
@Override
public List<String> listDistinctStorageAccessGroups(String name, String keyword) {
GenericSearchBuilder<ClusterVO, String> searchBuilder = createSearchBuilder(String.class);
searchBuilder.select(null, SearchCriteria.Func.DISTINCT, searchBuilder.entity().getStorageAccessGroups());
if (name != null) {
searchBuilder.and().op("storageAccessGroupExact", searchBuilder.entity().getStorageAccessGroups(), Op.EQ);
searchBuilder.or("storageAccessGroupPrefix", searchBuilder.entity().getStorageAccessGroups(), Op.LIKE);
searchBuilder.or("storageAccessGroupSuffix", searchBuilder.entity().getStorageAccessGroups(), Op.LIKE);
searchBuilder.or("storageAccessGroupMiddle", searchBuilder.entity().getStorageAccessGroups(), Op.LIKE);
searchBuilder.cp();
}
if (keyword != null) {
searchBuilder.and("keyword", searchBuilder.entity().getStorageAccessGroups(), Op.LIKE);
}
searchBuilder.done();
SearchCriteria<String> sc = searchBuilder.create();
if (name != null) {
sc.setParameters("storageAccessGroupExact", name);
sc.setParameters("storageAccessGroupPrefix", name + ",%");
sc.setParameters("storageAccessGroupSuffix", "%," + name);
sc.setParameters("storageAccessGroupMiddle", "%," + name + ",%");
}
if (keyword != null) {
sc.setParameters("keyword", "%" + keyword + "%");
}
return customSearch(sc, null);
}
}

View File

@ -117,4 +117,6 @@ public interface DataCenterDao extends GenericDao<DataCenterVO, Long> {
List<DataCenterVO> listAllZones();
List<DataCenterVO> listByIds(List<Long> ids);
List<String> listDistinctStorageAccessGroups(String name, String keyword);
}

View File

@ -25,6 +25,7 @@ import java.util.stream.Collectors;
import javax.inject.Inject;
import javax.naming.ConfigurationException;
import com.cloud.utils.db.GenericSearchBuilder;
import org.apache.commons.collections.CollectionUtils;
import org.springframework.stereotype.Component;
@ -441,4 +442,36 @@ public class DataCenterDaoImpl extends GenericDaoBase<DataCenterVO, Long> implem
sc.setParameters("ids", ids.toArray());
return listBy(sc);
}
@Override
public List<String> listDistinctStorageAccessGroups(String name, String keyword) {
GenericSearchBuilder<DataCenterVO, String> searchBuilder = createSearchBuilder(String.class);
searchBuilder.select(null, SearchCriteria.Func.DISTINCT, searchBuilder.entity().getStorageAccessGroups());
if (name != null) {
searchBuilder.and().op("storageAccessGroupExact", searchBuilder.entity().getStorageAccessGroups(), SearchCriteria.Op.EQ);
searchBuilder.or("storageAccessGroupPrefix", searchBuilder.entity().getStorageAccessGroups(), SearchCriteria.Op.LIKE);
searchBuilder.or("storageAccessGroupSuffix", searchBuilder.entity().getStorageAccessGroups(), SearchCriteria.Op.LIKE);
searchBuilder.or("storageAccessGroupMiddle", searchBuilder.entity().getStorageAccessGroups(), SearchCriteria.Op.LIKE);
searchBuilder.cp();
}
if (keyword != null) {
searchBuilder.and("keyword", searchBuilder.entity().getStorageAccessGroups(), SearchCriteria.Op.LIKE);
}
searchBuilder.done();
SearchCriteria<String> sc = searchBuilder.create();
if (name != null) {
sc.setParameters("storageAccessGroupExact", name);
sc.setParameters("storageAccessGroupPrefix", name + ",%");
sc.setParameters("storageAccessGroupSuffix", "%," + name);
sc.setParameters("storageAccessGroupMiddle", "%," + name + ",%");
}
if (keyword != null) {
sc.setParameters("keyword", "%" + keyword + "%");
}
return customSearch(sc, null);
}
}

View File

@ -34,4 +34,6 @@ public interface HostPodDao extends GenericDao<HostPodVO, Long> {
public List<Long> listAllPods(Long zoneId);
public List<HostPodVO> listAllPodsByCidr(long zoneId, String cidr);
List<String> listDistinctStorageAccessGroups(String name, String keyword);
}

View File

@ -143,4 +143,36 @@ public class HostPodDaoImpl extends GenericDaoBase<HostPodVO, Long> implements H
return listBy(sc);
}
@Override
public List<String> listDistinctStorageAccessGroups(String name, String keyword) {
GenericSearchBuilder<HostPodVO, String> searchBuilder = createSearchBuilder(String.class);
searchBuilder.select(null, SearchCriteria.Func.DISTINCT, searchBuilder.entity().getStorageAccessGroups());
if (name != null) {
searchBuilder.and().op("storageAccessGroupExact", searchBuilder.entity().getStorageAccessGroups(), Op.EQ);
searchBuilder.or("storageAccessGroupPrefix", searchBuilder.entity().getStorageAccessGroups(), Op.LIKE);
searchBuilder.or("storageAccessGroupSuffix", searchBuilder.entity().getStorageAccessGroups(), Op.LIKE);
searchBuilder.or("storageAccessGroupMiddle", searchBuilder.entity().getStorageAccessGroups(), Op.LIKE);
searchBuilder.cp();
}
if (keyword != null) {
searchBuilder.and("keyword", searchBuilder.entity().getStorageAccessGroups(), Op.LIKE);
}
searchBuilder.done();
SearchCriteria<String> sc = searchBuilder.create();
if (name != null) {
sc.setParameters("storageAccessGroupExact", name);
sc.setParameters("storageAccessGroupPrefix", name + ",%");
sc.setParameters("storageAccessGroupSuffix", "%," + name);
sc.setParameters("storageAccessGroupMiddle", "%," + name + ",%");
}
if (keyword != null) {
sc.setParameters("keyword", "%" + keyword + "%");
}
return customSearch(sc, null);
}
}

View File

@ -165,6 +165,9 @@ public class HostVO implements Host {
@Column(name = "uuid")
private String uuid;
@Column(name = "storage_access_groups")
String storageAccessGroups;
// This is a delayed load value. If the value is null,
// then this field has not been loaded yet.
// Call host dao to load it.
@ -357,6 +360,15 @@ public class HostVO implements Host {
return isTagARule;
}
@Override
public String getStorageAccessGroups() {
return storageAccessGroups;
}
public void setStorageAccessGroups(String storageAccessGroups) {
this.storageAccessGroups = storageAccessGroups;
}
public HashMap<String, HashMap<String, VgpuTypesInfo>> getGpuGroupDetails() {
return groupDetails;
}

View File

@ -84,6 +84,10 @@ public interface HostDao extends GenericDao<HostVO, Long>, StateDao<Status, Stat
List<HostVO> findHypervisorHostInCluster(long clusterId);
List<HostVO> findHypervisorHostInPod(long podId);
List<HostVO> findHypervisorHostInZone(long zoneId);
HostVO findAnyStateHypervisorHostInCluster(long clusterId);
HostVO findOldestExistentHypervisorHostInCluster(long clusterId);
@ -96,10 +100,14 @@ public interface HostDao extends GenericDao<HostVO, Long>, StateDao<Status, Stat
List<HostVO> findByPodId(Long podId);
List<HostVO> findByPodId(Long podId, Type type);
List<Long> listIdsByPodId(Long podId);
List<HostVO> findByClusterId(Long clusterId);
List<HostVO> findByClusterId(Long clusterId, Type type);
List<Long> listIdsByClusterId(Long clusterId);
List<Long> listIdsForUpRouting(Long zoneId, Long podId, Long clusterId);
@ -221,4 +229,6 @@ public interface HostDao extends GenericDao<HostVO, Long>, StateDao<Status, Stat
List<HostVO> listByIds(final List<Long> ids);
Long findClusterIdByVolumeInfo(VolumeInfo volumeInfo);
List<String> listDistinctStorageAccessGroups(String name, String keyword);
}

View File

@ -107,7 +107,7 @@ public class HostDaoImpl extends GenericDaoBase<HostVO, Long> implements HostDao
protected SearchBuilder<HostVO> IdStatusSearch;
protected SearchBuilder<HostVO> TypeDcSearch;
protected SearchBuilder<HostVO> TypeDcStatusSearch;
protected SearchBuilder<HostVO> TypeClusterStatusSearch;
protected SearchBuilder<HostVO> TypeStatusStateSearch;
protected SearchBuilder<HostVO> MsStatusSearch;
protected SearchBuilder<HostVO> DcPrivateIpAddressSearch;
protected SearchBuilder<HostVO> DcStorageIpAddressSearch;
@ -266,12 +266,14 @@ public class HostDaoImpl extends GenericDaoBase<HostVO, Long> implements HostDao
TypeDcStatusSearch.and("resourceState", TypeDcStatusSearch.entity().getResourceState(), SearchCriteria.Op.EQ);
TypeDcStatusSearch.done();
TypeClusterStatusSearch = createSearchBuilder();
TypeClusterStatusSearch.and("type", TypeClusterStatusSearch.entity().getType(), SearchCriteria.Op.EQ);
TypeClusterStatusSearch.and("cluster", TypeClusterStatusSearch.entity().getClusterId(), SearchCriteria.Op.EQ);
TypeClusterStatusSearch.and("status", TypeClusterStatusSearch.entity().getStatus(), SearchCriteria.Op.EQ);
TypeClusterStatusSearch.and("resourceState", TypeClusterStatusSearch.entity().getResourceState(), SearchCriteria.Op.EQ);
TypeClusterStatusSearch.done();
TypeStatusStateSearch = createSearchBuilder();
TypeStatusStateSearch.and("type", TypeStatusStateSearch.entity().getType(), SearchCriteria.Op.EQ);
TypeStatusStateSearch.and("cluster", TypeStatusStateSearch.entity().getClusterId(), SearchCriteria.Op.EQ);
TypeStatusStateSearch.and("pod", TypeStatusStateSearch.entity().getPodId(), SearchCriteria.Op.EQ);
TypeStatusStateSearch.and("zone", TypeStatusStateSearch.entity().getDataCenterId(), SearchCriteria.Op.EQ);
TypeStatusStateSearch.and("status", TypeStatusStateSearch.entity().getStatus(), SearchCriteria.Op.EQ);
TypeStatusStateSearch.and("resourceState", TypeStatusStateSearch.entity().getResourceState(), SearchCriteria.Op.EQ);
TypeStatusStateSearch.done();
IdsSearch = createSearchBuilder();
IdsSearch.and("id", IdsSearch.entity().getId(), SearchCriteria.Op.IN);
@ -328,10 +330,12 @@ public class HostDaoImpl extends GenericDaoBase<HostVO, Long> implements HostDao
PodSearch = createSearchBuilder();
PodSearch.and("podId", PodSearch.entity().getPodId(), SearchCriteria.Op.EQ);
PodSearch.and("type", PodSearch.entity().getType(), Op.EQ);
PodSearch.done();
ClusterSearch = createSearchBuilder();
ClusterSearch.and("clusterId", ClusterSearch.entity().getClusterId(), SearchCriteria.Op.EQ);
ClusterSearch.and("type", ClusterSearch.entity().getType(), Op.EQ);
ClusterSearch.done();
TypeSearch = createSearchBuilder();
@ -1238,8 +1242,16 @@ public class HostDaoImpl extends GenericDaoBase<HostVO, Long> implements HostDao
@Override
public List<HostVO> findByPodId(Long podId) {
return findByPodId(podId, null);
}
@Override
public List<HostVO> findByPodId(Long podId, Type type) {
SearchCriteria<HostVO> sc = PodSearch.create();
sc.setParameters("podId", podId);
if (type != null) {
sc.setParameters("type", Type.Routing);
}
return listBy(sc);
}
@ -1250,8 +1262,16 @@ public class HostDaoImpl extends GenericDaoBase<HostVO, Long> implements HostDao
@Override
public List<HostVO> findByClusterId(Long clusterId) {
return findByClusterId(clusterId, null);
}
@Override
public List<HostVO> findByClusterId(Long clusterId, Type type) {
SearchCriteria<HostVO> sc = ClusterSearch.create();
sc.setParameters("clusterId", clusterId);
if (type != null) {
sc.setParameters("type", Type.Routing);
}
return listBy(sc);
}
@ -1355,7 +1375,7 @@ public class HostDaoImpl extends GenericDaoBase<HostVO, Long> implements HostDao
@Override
public List<HostVO> findHypervisorHostInCluster(long clusterId) {
SearchCriteria<HostVO> sc = TypeClusterStatusSearch.create();
SearchCriteria<HostVO> sc = TypeStatusStateSearch.create();
sc.setParameters("type", Host.Type.Routing);
sc.setParameters("cluster", clusterId);
sc.setParameters("status", Status.Up);
@ -1364,9 +1384,31 @@ public class HostDaoImpl extends GenericDaoBase<HostVO, Long> implements HostDao
return listBy(sc);
}
@Override
public List<HostVO> findHypervisorHostInZone(long zoneId) {
SearchCriteria<HostVO> sc = TypeStatusStateSearch.create();
sc.setParameters("type", Host.Type.Routing);
sc.setParameters("zone", zoneId);
sc.setParameters("status", Status.Up);
sc.setParameters("resourceState", ResourceState.Enabled);
return listBy(sc);
}
@Override
public List<HostVO> findHypervisorHostInPod(long podId) {
SearchCriteria<HostVO> sc = TypeStatusStateSearch.create();
sc.setParameters("type", Host.Type.Routing);
sc.setParameters("pod", podId);
sc.setParameters("status", Status.Up);
sc.setParameters("resourceState", ResourceState.Enabled);
return listBy(sc);
}
@Override
public HostVO findAnyStateHypervisorHostInCluster(long clusterId) {
SearchCriteria<HostVO> sc = TypeClusterStatusSearch.create();
SearchCriteria<HostVO> sc = TypeStatusStateSearch.create();
sc.setParameters("type", Host.Type.Routing);
sc.setParameters("cluster", clusterId);
List<HostVO> list = listBy(sc, new Filter(1));
@ -1375,7 +1417,7 @@ public class HostDaoImpl extends GenericDaoBase<HostVO, Long> implements HostDao
@Override
public HostVO findOldestExistentHypervisorHostInCluster(long clusterId) {
SearchCriteria<HostVO> sc = TypeClusterStatusSearch.create();
SearchCriteria<HostVO> sc = TypeStatusStateSearch.create();
sc.setParameters("type", Host.Type.Routing);
sc.setParameters("cluster", clusterId);
sc.setParameters("status", Status.Up);
@ -1876,4 +1918,36 @@ public class HostDaoImpl extends GenericDaoBase<HostVO, Long> implements HostDao
return host.getClusterId();
}
@Override
public List<String> listDistinctStorageAccessGroups(String name, String keyword) {
GenericSearchBuilder<HostVO, String> searchBuilder = createSearchBuilder(String.class);
searchBuilder.select(null, SearchCriteria.Func.DISTINCT, searchBuilder.entity().getStorageAccessGroups());
if (name != null) {
searchBuilder.and().op("storageAccessGroupExact", searchBuilder.entity().getStorageAccessGroups(), Op.EQ);
searchBuilder.or("storageAccessGroupPrefix", searchBuilder.entity().getStorageAccessGroups(), Op.LIKE);
searchBuilder.or("storageAccessGroupSuffix", searchBuilder.entity().getStorageAccessGroups(), Op.LIKE);
searchBuilder.or("storageAccessGroupMiddle", searchBuilder.entity().getStorageAccessGroups(), Op.LIKE);
searchBuilder.cp();
}
if (keyword != null) {
searchBuilder.and("keyword", searchBuilder.entity().getStorageAccessGroups(), Op.LIKE);
}
searchBuilder.done();
SearchCriteria<String> sc = searchBuilder.create();
if (name != null) {
sc.setParameters("storageAccessGroupExact", name);
sc.setParameters("storageAccessGroupPrefix", name + ",%");
sc.setParameters("storageAccessGroupSuffix", "%," + name);
sc.setParameters("storageAccessGroupMiddle", "%," + name + ",%");
}
if (keyword != null) {
sc.setParameters("keyword", "%" + keyword + "%");
}
return customSearch(sc, null);
}
}

View File

@ -0,0 +1,64 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package com.cloud.storage;
import javax.persistence.Column;
import javax.persistence.Entity;
import javax.persistence.GeneratedValue;
import javax.persistence.GenerationType;
import javax.persistence.Id;
import javax.persistence.Table;
import org.apache.cloudstack.api.InternalIdentity;
@Entity
@Table(name = "storage_pool_and_access_group_map")
public class StoragePoolAndAccessGroupMapVO implements InternalIdentity {
protected StoragePoolAndAccessGroupMapVO() {
}
@Id
@GeneratedValue(strategy = GenerationType.IDENTITY)
@Column(name = "id")
private long id;
@Column(name = "pool_id")
private long poolId;
@Column(name = "storage_access_group")
private String storageAccessGroup;
public StoragePoolAndAccessGroupMapVO(long poolId, String storageAccessGroup) {
this.poolId = poolId;
this.storageAccessGroup = storageAccessGroup;
}
@Override
public long getId() {
return this.id;
}
public long getPoolId() {
return poolId;
}
public String getStorageAccessGroup() {
return storageAccessGroup;
}
}

View File

@ -0,0 +1,31 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package com.cloud.storage.dao;
import java.util.List;
import com.cloud.storage.StoragePoolAndAccessGroupMapVO;
import com.cloud.utils.db.GenericDao;
public interface StoragePoolAndAccessGroupMapDao extends GenericDao<StoragePoolAndAccessGroupMapVO, Long> {
void persist(long poolId, List<String> storageAccessGroups);
List<String> getStorageAccessGroups(long poolId);
void deleteStorageAccessGroups(long poolId);
List<String> listDistinctStorageAccessGroups(String name, String keyword);
}

View File

@ -0,0 +1,105 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package com.cloud.storage.dao;
import java.util.ArrayList;
import java.util.List;
import com.cloud.storage.StoragePoolAndAccessGroupMapVO;
import com.cloud.utils.db.GenericDaoBase;
import com.cloud.utils.db.GenericSearchBuilder;
import com.cloud.utils.db.SearchBuilder;
import com.cloud.utils.db.SearchCriteria;
import com.cloud.utils.db.TransactionLegacy;
public class StoragePoolAndAccessGroupMapDaoImpl extends GenericDaoBase<StoragePoolAndAccessGroupMapVO, Long> implements StoragePoolAndAccessGroupMapDao {
protected final SearchBuilder<StoragePoolAndAccessGroupMapVO> StoragePoolAccessGroupSearch;
public StoragePoolAndAccessGroupMapDaoImpl() {
StoragePoolAccessGroupSearch = createSearchBuilder();
StoragePoolAccessGroupSearch.and("poolId", StoragePoolAccessGroupSearch.entity().getPoolId(), SearchCriteria.Op.EQ);
StoragePoolAccessGroupSearch.done();
}
@Override
public void persist(long poolId, List<String> storageAccessGroups) {
TransactionLegacy txn = TransactionLegacy.currentTxn();
txn.start();
SearchCriteria<StoragePoolAndAccessGroupMapVO> sc = StoragePoolAccessGroupSearch.create();
sc.setParameters("poolId", poolId);
expunge(sc);
for (String sag : storageAccessGroups) {
sag = sag.trim();
if (sag.length() > 0) {
StoragePoolAndAccessGroupMapVO vo = new StoragePoolAndAccessGroupMapVO(poolId, sag);
persist(vo);
}
}
txn.commit();
}
@Override
public List<String> getStorageAccessGroups(long poolId) {
SearchCriteria<StoragePoolAndAccessGroupMapVO> sc = StoragePoolAccessGroupSearch.create();
sc.setParameters("poolId", poolId);
List<StoragePoolAndAccessGroupMapVO> results = search(sc, null);
List<String> storagePoolAccessGroups = new ArrayList<String>(results.size());
for (StoragePoolAndAccessGroupMapVO result : results) {
storagePoolAccessGroups.add(result.getStorageAccessGroup());
}
return storagePoolAccessGroups;
}
@Override
public void deleteStorageAccessGroups(long poolId) {
TransactionLegacy txn = TransactionLegacy.currentTxn();
txn.start();
SearchCriteria<StoragePoolAndAccessGroupMapVO> sc = StoragePoolAccessGroupSearch.create();
sc.setParameters("poolId", poolId);
expunge(sc);
txn.commit();
}
@Override
public List<String> listDistinctStorageAccessGroups(String name, String keyword) {
GenericSearchBuilder<StoragePoolAndAccessGroupMapVO, String> searchBuilder = createSearchBuilder(String.class);
searchBuilder.select(null, SearchCriteria.Func.DISTINCT, searchBuilder.entity().getStorageAccessGroup());
searchBuilder.and("name", searchBuilder.entity().getStorageAccessGroup(), SearchCriteria.Op.EQ);
searchBuilder.and("keyword", searchBuilder.entity().getStorageAccessGroup(), SearchCriteria.Op.LIKE);
searchBuilder.done();
SearchCriteria<String> sc = searchBuilder.create();
if (name != null) {
sc.setParameters("name", name);
}
if (keyword != null) {
sc.setParameters("keyword", "%" + keyword + "%");
}
return customSearch(sc, null);
}
}

View File

@ -58,9 +58,9 @@ public interface PrimaryDataStoreDao extends GenericDao<StoragePoolVO, Long> {
*/
void updateCapacityIops(long id, long capacityIops);
StoragePoolVO persist(StoragePoolVO pool, Map<String, String> details, List<String> tags, Boolean isTagARule);
StoragePoolVO persist(StoragePoolVO pool, Map<String, String> details, List<String> tags, Boolean isTagARule, List<String> storageAccessGroups);
StoragePoolVO persist(StoragePoolVO pool, Map<String, String> details, List<String> tags, Boolean isTagARule, boolean displayDetails);
StoragePoolVO persist(StoragePoolVO pool, Map<String, String> details, List<String> tags, Boolean isTagARule, boolean displayDetails, List<String> storageAccessGroups);
/**
* Find pool by name.
@ -84,7 +84,9 @@ public interface PrimaryDataStoreDao extends GenericDao<StoragePoolVO, Long> {
*/
List<StoragePoolVO> findPoolsByDetails(long dcId, long podId, Long clusterId, Map<String, String> details, ScopeType scope);
List<StoragePoolVO> findPoolsByTags(long dcId, long podId, Long clusterId, String[] tags, boolean validateTagRule, long ruleExecuteTimeout);
List<StoragePoolVO> findPoolsByTags(long dcId, long podId, Long clusterId, ScopeType scope, String[] tags, boolean validateTagRule, long ruleExecuteTimeout);
List<StoragePoolVO> findPoolsByAccessGroupsForHostConnection(Long dcId, Long podId, Long clusterId, ScopeType scope, String[] storageAccessGroups);
List<StoragePoolVO> findDisabledPoolsByScope(long dcId, Long podId, Long clusterId, ScopeType scope);
@ -127,6 +129,10 @@ public interface PrimaryDataStoreDao extends GenericDao<StoragePoolVO, Long> {
List<StoragePoolVO> findZoneWideStoragePoolsByTags(long dcId, String[] tags, boolean validateTagRule);
List<StoragePoolVO> findZoneWideStoragePoolsByAccessGroupsForHostConnection(long dcId, String[] storageAccessGroups);
List<StoragePoolVO> findZoneWideStoragePoolsByAccessGroupsAndHypervisorTypeForHostConnection(long dcId, String[] storageAccessGroups, HypervisorType type);
List<StoragePoolVO> findZoneWideStoragePoolsByHypervisor(long dataCenterId, HypervisorType hypervisorType);
List<StoragePoolVO> findZoneWideStoragePoolsByHypervisor(long dataCenterId, HypervisorType hypervisorType, String keyword);
@ -143,6 +149,8 @@ public interface PrimaryDataStoreDao extends GenericDao<StoragePoolVO, Long> {
void deletePoolTags(long poolId);
void deleteStoragePoolAccessGroups(long poolId);
List<StoragePoolVO> listChildStoragePoolsInDatastoreCluster(long poolId);
Integer countAll();
@ -154,8 +162,10 @@ public interface PrimaryDataStoreDao extends GenericDao<StoragePoolVO, Long> {
List<StoragePoolVO> listStoragePoolsWithActiveVolumesByOfferingId(long offeringid);
Pair<List<Long>, Integer> searchForIdsAndCount(Long storagePoolId, String storagePoolName, Long zoneId,
String path, Long podId, Long clusterId, String address, ScopeType scopeType, StoragePoolStatus status,
String keyword, Filter searchFilter);
String path, Long podId, Long clusterId, Long hostId, String address, ScopeType scopeType, StoragePoolStatus status,
String keyword, String storageAccessGroup, Filter searchFilter);
List<StoragePoolVO> listByIds(List<Long> ids);
List<StoragePoolVO> findStoragePoolsByEmptyStorageAccessGroups(Long dcId, Long podId, Long clusterId, ScopeType scope, HypervisorType hypervisorType);
}

View File

@ -28,6 +28,8 @@ import java.util.stream.Collectors;
import javax.inject.Inject;
import javax.naming.ConfigurationException;
import com.cloud.storage.StoragePoolAndAccessGroupMapVO;
import com.cloud.storage.dao.StoragePoolAndAccessGroupMapDao;
import org.apache.commons.collections.CollectionUtils;
import com.cloud.host.Status;
@ -70,15 +72,25 @@ public class PrimaryDataStoreDaoImpl extends GenericDaoBase<StoragePoolVO, Long>
private StoragePoolHostDao _hostDao;
@Inject
private StoragePoolTagsDao _tagsDao;
@Inject
StoragePoolAndAccessGroupMapDao _storagePoolAccessGroupMapDao;
protected final String DetailsSqlPrefix = "SELECT storage_pool.* from storage_pool LEFT JOIN storage_pool_details ON storage_pool.id = storage_pool_details.pool_id WHERE storage_pool.removed is null and storage_pool.status = 'Up' and storage_pool.data_center_id = ? and (storage_pool.pod_id = ? or storage_pool.pod_id is null) and storage_pool.scope = ? and (";
protected final String DetailsSqlSuffix = ") GROUP BY storage_pool_details.pool_id HAVING COUNT(storage_pool_details.name) >= ?";
protected final String DetailsForHostConnectionSqlSuffix = ") GROUP BY storage_pool_details.pool_id";
private final String ZoneWideTagsSqlPrefix = "SELECT storage_pool.* from storage_pool LEFT JOIN storage_pool_tags ON storage_pool.id = storage_pool_tags.pool_id WHERE storage_pool.removed is null and storage_pool.status = 'Up' AND storage_pool_tags.is_tag_a_rule = 0 and storage_pool.data_center_id = ? and storage_pool.scope = ? and (";
private final String ZoneWideTagsSqlSuffix = ") GROUP BY storage_pool_tags.pool_id HAVING COUNT(storage_pool_tags.tag) >= ?";
private final String ZoneWideStorageAccessGroupsForHostConnectionSqlPrefix = "SELECT storage_pool.* from storage_pool LEFT JOIN storage_pool_and_access_group_map ON storage_pool.id = storage_pool_and_access_group_map.pool_id WHERE storage_pool.removed is null and storage_pool.status = 'Up' and storage_pool.data_center_id = ? and storage_pool.scope = ? and (";
private final String ZoneWideStorageAccessGroupsForHostConnectionSqlSuffix = ") GROUP BY storage_pool_and_access_group_map.pool_id";
private final String ZoneWideStorageAccessGroupsWithHypervisorTypeSqlPrefix = "SELECT storage_pool.* from storage_pool LEFT JOIN storage_pool_and_access_group_map ON storage_pool.id = storage_pool_and_access_group_map.pool_id WHERE storage_pool.removed is null and storage_pool.status = 'Up' and storage_pool.hypervisor = ? and storage_pool.data_center_id = ? and storage_pool.scope = ? and (";
private final String ZoneWideStorageAccessGroupsWithHypervisorTypeSqlSuffix = ") GROUP BY storage_pool_and_access_group_map.pool_id";
// Storage tags are now separate from storage_pool_details, leaving only details on that table
protected final String TagsSqlPrefix = "SELECT storage_pool.* from storage_pool LEFT JOIN storage_pool_tags ON storage_pool.id = storage_pool_tags.pool_id WHERE storage_pool.removed is null and storage_pool.status = 'Up' AND storage_pool_tags.is_tag_a_rule = 0 and storage_pool.data_center_id = ? and (storage_pool.pod_id = ? or storage_pool.pod_id is null) and storage_pool.scope = ? and (";
protected final String TagsSqlSuffix = ") GROUP BY storage_pool_tags.pool_id HAVING COUNT(storage_pool_tags.tag) >= ?";
protected final String SAGsForHostConnectionSqlPrefix = "SELECT storage_pool.* from storage_pool LEFT JOIN storage_pool_and_access_group_map ON storage_pool.id = storage_pool_and_access_group_map.pool_id WHERE storage_pool.removed is null and storage_pool.status = 'Up' and storage_pool.data_center_id = ? and (storage_pool.pod_id = ? or storage_pool.pod_id is null) and storage_pool.scope = ? and (";
protected final String SAGsForHostConnectionSqlSuffix = ") GROUP BY storage_pool_and_access_group_map.pool_id";
private static final String GET_STORAGE_POOLS_OF_VOLUMES_WITHOUT_OR_NOT_HAVING_TAGS = "SELECT s.* " +
"FROM volumes vol " +
@ -296,13 +308,13 @@ public class PrimaryDataStoreDaoImpl extends GenericDaoBase<StoragePoolVO, Long>
}
@Override
public StoragePoolVO persist(StoragePoolVO pool, Map<String, String> details, List<String> tags, Boolean isTagARule) {
return persist(pool, details, tags, isTagARule, true);
public StoragePoolVO persist(StoragePoolVO pool, Map<String, String> details, List<String> tags, Boolean isTagARule, List<String> storageAccessGroups) {
return persist(pool, details, tags, isTagARule, true, storageAccessGroups);
}
@Override
@DB
public StoragePoolVO persist(StoragePoolVO pool, Map<String, String> details, List<String> tags, Boolean isTagARule, boolean displayDetails) {
public StoragePoolVO persist(StoragePoolVO pool, Map<String, String> details, List<String> tags, Boolean isTagARule, boolean displayDetails, List<String> storageAccessGroups) {
TransactionLegacy txn = TransactionLegacy.currentTxn();
txn.start();
pool = super.persist(pool);
@ -315,6 +327,9 @@ public class PrimaryDataStoreDaoImpl extends GenericDaoBase<StoragePoolVO, Long>
if (CollectionUtils.isNotEmpty(tags)) {
_tagsDao.persist(pool.getId(), tags, isTagARule);
}
if (CollectionUtils.isNotEmpty(storageAccessGroups)) {
_storagePoolAccessGroupMapDao.persist(pool.getId(), storageAccessGroups);
}
txn.commit();
return pool;
}
@ -338,6 +353,13 @@ public class PrimaryDataStoreDaoImpl extends GenericDaoBase<StoragePoolVO, Long>
return searchStoragePoolsPreparedStatement(sql, dcId, podId, clusterId, scope, valuesLength);
}
protected List<StoragePoolVO> findPoolsByDetailsOrTagsForHostConnectionInternal(long dcId, long podId, Long clusterId, ScopeType scope, String sqlValues, ValueType valuesType) {
String sqlPrefix = valuesType.equals(ValueType.DETAILS) ? DetailsSqlPrefix : SAGsForHostConnectionSqlPrefix;
String sqlSuffix = valuesType.equals(ValueType.DETAILS) ? DetailsForHostConnectionSqlSuffix : SAGsForHostConnectionSqlSuffix;
String sql = getSqlPreparedStatement(sqlPrefix, sqlSuffix, sqlValues, clusterId);
return searchStoragePoolsPreparedStatement(sql, dcId, podId, clusterId, scope, null);
}
/**
* Search storage pools in a transaction
* @param sql prepared statement sql
@ -349,7 +371,50 @@ public class PrimaryDataStoreDaoImpl extends GenericDaoBase<StoragePoolVO, Long>
* @return storage pools matching criteria
*/
@DB
protected List<StoragePoolVO> searchStoragePoolsPreparedStatement(String sql, long dcId, Long podId, Long clusterId, ScopeType scope, int valuesLength) {
protected List<StoragePoolVO> searchStoragePoolsWithHypervisorTypesPreparedStatement(String sql, HypervisorType type, long dcId, Long podId, Long clusterId, ScopeType scope, Integer valuesLength) {
TransactionLegacy txn = TransactionLegacy.currentTxn();
List<StoragePoolVO> pools = new ArrayList<StoragePoolVO>();
try (PreparedStatement pstmt = txn.prepareStatement(sql);) {
if (pstmt != null) {
int i = 1;
pstmt.setString(i++, type.toString());
pstmt.setLong(i++, dcId);
if (podId != null) {
pstmt.setLong(i++, podId);
}
pstmt.setString(i++, scope.toString());
if (clusterId != null) {
pstmt.setLong(i++, clusterId);
}
if (valuesLength != null) {
pstmt.setInt(i++, valuesLength);
}
try (ResultSet rs = pstmt.executeQuery();) {
while (rs.next()) {
pools.add(toEntityBean(rs, false));
}
} catch (SQLException e) {
throw new CloudRuntimeException("Unable to execute :" + e.getMessage(), e);
}
}
} catch (SQLException e) {
throw new CloudRuntimeException("Unable to execute :" + e.getMessage(), e);
}
return pools;
}
/**
* Search storage pools in a transaction
* @param sql prepared statement sql
* @param dcId data center id
* @param podId pod id
* @param clusterId cluster id
* @param scope scope
* @param valuesLength values length
* @return storage pools matching criteria
*/
@DB
protected List<StoragePoolVO> searchStoragePoolsPreparedStatement(String sql, long dcId, Long podId, Long clusterId, ScopeType scope, Integer valuesLength) {
TransactionLegacy txn = TransactionLegacy.currentTxn();
List<StoragePoolVO> pools = new ArrayList<StoragePoolVO>();
try (PreparedStatement pstmt = txn.prepareStatement(sql);) {
@ -363,7 +428,9 @@ public class PrimaryDataStoreDaoImpl extends GenericDaoBase<StoragePoolVO, Long>
if (clusterId != null) {
pstmt.setLong(i++, clusterId);
}
if (valuesLength != null) {
pstmt.setInt(i++, valuesLength);
}
try (ResultSet rs = pstmt.executeQuery();) {
while (rs.next()) {
pools.add(toEntityBean(rs, false));
@ -420,6 +487,22 @@ public class PrimaryDataStoreDaoImpl extends GenericDaoBase<StoragePoolVO, Long>
return sqlValues.toString();
}
/**
* Return SQL string from storage pool access group map, to be placed between SQL Prefix and SQL Suffix when creating storage tags PreparedStatement.
* @param storageAccessGroups storage tags array
* @return SQL string containing storage tag values to be placed between Prefix and Suffix when creating PreparedStatement.
* @throws NullPointerException if tags is null
* @throws IndexOutOfBoundsException if tags is not null, but empty
*/
protected String getSqlValuesFromStorageAccessGroups(String[] storageAccessGroups) throws NullPointerException, IndexOutOfBoundsException {
StringBuilder sqlValues = new StringBuilder();
for (String tag : storageAccessGroups) {
sqlValues.append("(storage_pool_and_access_group_map.storage_access_group='").append(tag).append("') OR ");
}
sqlValues.delete(sqlValues.length() - 4, sqlValues.length());
return sqlValues.toString();
}
@DB
@Override
public List<StoragePoolVO> findPoolsByDetails(long dcId, long podId, Long clusterId, Map<String, String> details, ScopeType scope) {
@ -428,10 +511,10 @@ public class PrimaryDataStoreDaoImpl extends GenericDaoBase<StoragePoolVO, Long>
}
@Override
public List<StoragePoolVO> findPoolsByTags(long dcId, long podId, Long clusterId, String[] tags, boolean validateTagRule, long ruleExecuteTimeout) {
public List<StoragePoolVO> findPoolsByTags(long dcId, long podId, Long clusterId, ScopeType scope, String[] tags, boolean validateTagRule, long ruleExecuteTimeout) {
List<StoragePoolVO> storagePools = null;
if (tags == null || tags.length == 0) {
storagePools = listBy(dcId, podId, clusterId, ScopeType.CLUSTER);
storagePools = listBy(dcId, podId, clusterId, scope);
if (validateTagRule) {
storagePools = getPoolsWithoutTagRule(storagePools);
@ -439,7 +522,20 @@ public class PrimaryDataStoreDaoImpl extends GenericDaoBase<StoragePoolVO, Long>
} else {
String sqlValues = getSqlValuesFromStorageTags(tags);
storagePools = findPoolsByDetailsOrTagsInternal(dcId, podId, clusterId, ScopeType.CLUSTER, sqlValues, ValueType.TAGS, tags.length);
storagePools = findPoolsByDetailsOrTagsInternal(dcId, podId, clusterId, scope, sqlValues, ValueType.TAGS, tags.length);
}
return storagePools;
}
@Override
public List<StoragePoolVO> findPoolsByAccessGroupsForHostConnection(Long dcId, Long podId, Long clusterId, ScopeType scope, String[] storageAccessGroups) {
List<StoragePoolVO> storagePools = null;
if (storageAccessGroups == null || storageAccessGroups.length == 0) {
storagePools = listBy(dcId, podId, clusterId, scope);
} else {
String sqlValues = getSqlValuesFromStorageAccessGroups(storageAccessGroups);
storagePools = findPoolsByDetailsOrTagsForHostConnectionInternal(dcId, podId, clusterId, scope, sqlValues, ValueType.TAGS);
}
return storagePools;
@ -556,6 +652,77 @@ public class PrimaryDataStoreDaoImpl extends GenericDaoBase<StoragePoolVO, Long>
return storagePoolsToReturn;
}
@Override
public List<StoragePoolVO> findZoneWideStoragePoolsByAccessGroupsForHostConnection(long dcId, String[] storageAccessGroups) {
if (storageAccessGroups == null || storageAccessGroups.length == 0) {
QueryBuilder<StoragePoolVO> sc = QueryBuilder.create(StoragePoolVO.class);
sc.and(sc.entity().getDataCenterId(), Op.EQ, dcId);
sc.and(sc.entity().getStatus(), Op.EQ, Status.Up);
sc.and(sc.entity().getScope(), Op.EQ, ScopeType.ZONE);
return sc.list();
} else {
String sqlValues = getSqlValuesFromStorageAccessGroups(storageAccessGroups);
String sql = getSqlPreparedStatement(ZoneWideStorageAccessGroupsForHostConnectionSqlPrefix, ZoneWideStorageAccessGroupsForHostConnectionSqlSuffix, sqlValues, null);
return searchStoragePoolsPreparedStatement(sql, dcId, null, null, ScopeType.ZONE, null);
}
}
@Override
public List<StoragePoolVO> findZoneWideStoragePoolsByAccessGroupsAndHypervisorTypeForHostConnection(long dcId, String[] storageAccessGroups, HypervisorType type) {
if (storageAccessGroups == null || storageAccessGroups.length == 0) {
QueryBuilder<StoragePoolVO> sc = QueryBuilder.create(StoragePoolVO.class);
sc.and(sc.entity().getDataCenterId(), Op.EQ, dcId);
sc.and(sc.entity().getStatus(), Op.EQ, Status.Up);
sc.and(sc.entity().getScope(), Op.EQ, ScopeType.ZONE);
sc.and(sc.entity().getHypervisor(), Op.EQ, type);
return sc.list();
} else {
String sqlValues = getSqlValuesFromStorageAccessGroups(storageAccessGroups);
String sql = getSqlPreparedStatement(ZoneWideStorageAccessGroupsWithHypervisorTypeSqlPrefix, ZoneWideStorageAccessGroupsWithHypervisorTypeSqlSuffix, sqlValues, null);
return searchStoragePoolsWithHypervisorTypesPreparedStatement(sql, type, dcId, null, null, ScopeType.ZONE, null);
}
}
@Override
public List<StoragePoolVO> findStoragePoolsByEmptyStorageAccessGroups(Long dcId, Long podId, Long clusterId, ScopeType scope, HypervisorType hypervisorType) {
SearchBuilder<StoragePoolVO> poolSearch = createSearchBuilder();
SearchBuilder<StoragePoolAndAccessGroupMapVO> storageAccessGroupsPoolSearch = _storagePoolAccessGroupMapDao.createSearchBuilder();
// Set criteria for pools
poolSearch.and("scope", poolSearch.entity().getScope(), Op.EQ);
poolSearch.and("removed", poolSearch.entity().getRemoved(), Op.NULL);
poolSearch.and("status", poolSearch.entity().getStatus(), Op.EQ);
poolSearch.and("datacenterid", poolSearch.entity().getDataCenterId(), Op.EQ);
poolSearch.and("podid", poolSearch.entity().getPodId(), Op.EQ);
poolSearch.and("clusterid", poolSearch.entity().getClusterId(), Op.EQ);
poolSearch.and("hypervisortype", poolSearch.entity().getHypervisor(), Op.EQ);
// Set StoragePoolAccessGroupMapVO.pool_id IS NULL. This ensures only pools without tags are returned
storageAccessGroupsPoolSearch.and("poolid", storageAccessGroupsPoolSearch.entity().getPoolId(), Op.NULL);
poolSearch.join("tagJoin", storageAccessGroupsPoolSearch, poolSearch.entity().getId(), storageAccessGroupsPoolSearch.entity().getPoolId(), JoinBuilder.JoinType.LEFT);
SearchCriteria<StoragePoolVO> sc = poolSearch.create();
sc.setParameters("scope", scope.toString());
sc.setParameters("status", Status.Up.toString());
if (dcId != null) {
sc.setParameters("datacenterid", dcId);
}
if (podId != null) {
sc.setParameters("podid", podId);
}
if (clusterId != null) {
sc.setParameters("clusterid", clusterId);
}
if (hypervisorType != null) {
sc.setParameters("hypervisortype", hypervisorType);
}
return listBy(sc);
}
@Override
public List<String> searchForStoragePoolTags(long poolId) {
return _tagsDao.getStoragePoolTags(poolId);
@ -659,6 +826,11 @@ public class PrimaryDataStoreDaoImpl extends GenericDaoBase<StoragePoolVO, Long>
_tagsDao.deleteTags(poolId);
}
@Override
public void deleteStoragePoolAccessGroups(long poolId) {
_storagePoolAccessGroupMapDao.deleteStorageAccessGroups(poolId);
}
@Override
public List<StoragePoolVO> listChildStoragePoolsInDatastoreCluster(long poolId) {
QueryBuilder<StoragePoolVO> sc = QueryBuilder.create(StoragePoolVO.class);
@ -725,9 +897,10 @@ public class PrimaryDataStoreDaoImpl extends GenericDaoBase<StoragePoolVO, Long>
@Override
public Pair<List<Long>, Integer> searchForIdsAndCount(Long storagePoolId, String storagePoolName, Long zoneId,
String path, Long podId, Long clusterId, String address, ScopeType scopeType, StoragePoolStatus status,
String keyword, Filter searchFilter) {
SearchCriteria<StoragePoolVO> sc = createStoragePoolSearchCriteria(storagePoolId, storagePoolName, zoneId, path, podId, clusterId, address, scopeType, status, keyword);
String path, Long podId, Long clusterId, Long hostId, String address, ScopeType scopeType, StoragePoolStatus status,
String keyword, String storageAccessGroup, Filter searchFilter) {
SearchCriteria<StoragePoolVO> sc = createStoragePoolSearchCriteria(storagePoolId, storagePoolName, zoneId, path, podId, clusterId,
hostId, address, scopeType, status, keyword, storageAccessGroup);
Pair<List<StoragePoolVO>, Integer> uniquePair = searchAndCount(sc, searchFilter);
List<Long> idList = uniquePair.first().stream().map(StoragePoolVO::getId).collect(Collectors.toList());
return new Pair<>(idList, uniquePair.second());
@ -744,8 +917,8 @@ public class PrimaryDataStoreDaoImpl extends GenericDaoBase<StoragePoolVO, Long>
}
private SearchCriteria<StoragePoolVO> createStoragePoolSearchCriteria(Long storagePoolId, String storagePoolName,
Long zoneId, String path, Long podId, Long clusterId, String address, ScopeType scopeType,
StoragePoolStatus status, String keyword) {
Long zoneId, String path, Long podId, Long clusterId, Long hostId, String address, ScopeType scopeType,
StoragePoolStatus status, String keyword, String storageAccessGroup) {
SearchBuilder<StoragePoolVO> sb = createSearchBuilder();
sb.select(null, SearchCriteria.Func.DISTINCT, sb.entity().getId()); // select distinct
// ids
@ -760,6 +933,18 @@ public class PrimaryDataStoreDaoImpl extends GenericDaoBase<StoragePoolVO, Long>
sb.and("status", sb.entity().getStatus(), SearchCriteria.Op.EQ);
sb.and("parent", sb.entity().getParent(), SearchCriteria.Op.EQ);
if (hostId != null) {
SearchBuilder<StoragePoolHostVO> hostJoin = _hostDao.createSearchBuilder();
hostJoin.and("hostId", hostJoin.entity().getHostId(), SearchCriteria.Op.EQ);
sb.join("poolHostJoin", hostJoin, sb.entity().getId(), hostJoin.entity().getPoolId(), JoinBuilder.JoinType.INNER);
}
if (storageAccessGroup != null) {
SearchBuilder<StoragePoolAndAccessGroupMapVO> storageAccessGroupJoin = _storagePoolAccessGroupMapDao.createSearchBuilder();
storageAccessGroupJoin.and("storageAccessGroup", storageAccessGroupJoin.entity().getStorageAccessGroup(), SearchCriteria.Op.EQ);
sb.join("poolStorageAccessGroupJoin", storageAccessGroupJoin, sb.entity().getId(), storageAccessGroupJoin.entity().getPoolId(), JoinBuilder.JoinType.INNER);
}
SearchCriteria<StoragePoolVO> sc = sb.create();
if (keyword != null) {
@ -808,6 +993,15 @@ public class PrimaryDataStoreDaoImpl extends GenericDaoBase<StoragePoolVO, Long>
sc.setParameters("status", status.toString());
}
sc.setParameters("parent", 0);
if (hostId != null) {
sc.setJoinParameters("poolHostJoin", "hostId", hostId);
}
if (storageAccessGroup != null) {
sc.setJoinParameters("poolStorageAccessGroupJoin", "storageAccessGroup", storageAccessGroup);
}
return sc;
}
}

View File

@ -62,6 +62,7 @@
<bean id="storagePoolDetailsDaoImpl" class="com.cloud.storage.dao.StoragePoolDetailsDaoImpl" />
<bean id="storagePoolHostDaoImpl" class="com.cloud.storage.dao.StoragePoolHostDaoImpl" />
<bean id="storagePoolTagsDaoImpl" class="com.cloud.storage.dao.StoragePoolTagsDaoImpl" />
<bean id="storagePoolAndAccessGroupMapDaoImpl" class="com.cloud.storage.dao.StoragePoolAndAccessGroupMapDaoImpl" />
<bean id="userVmDetailsDaoImpl" class="com.cloud.vm.dao.UserVmDetailsDaoImpl" />
<bean id="vGPUTypesDaoImpl" class="com.cloud.gpu.dao.VGPUTypesDaoImpl" />
<bean id="vMInstanceDaoImpl" class="com.cloud.vm.dao.VMInstanceDaoImpl" />

View File

@ -65,3 +65,18 @@ CREATE TABLE IF NOT EXISTS `cloud`.`reconcile_commands` (
CALL `cloud`.`IDEMPOTENT_ADD_COLUMN`('cloud.snapshot_store_ref', 'kvm_checkpoint_path', 'varchar(255)');
CALL `cloud`.`IDEMPOTENT_ADD_COLUMN`('cloud.snapshot_store_ref', 'end_of_chain', 'int(1) unsigned');
-- Create table storage_pool_and_access_group_map
CREATE TABLE IF NOT EXISTS `cloud`.`storage_pool_and_access_group_map` (
`id` bigint(20) unsigned NOT NULL AUTO_INCREMENT,
`pool_id` bigint(20) unsigned NOT NULL COMMENT "pool id",
`storage_access_group` varchar(255) NOT NULL,
PRIMARY KEY (`id`),
KEY `fk_storage_pool_and_access_group_map__pool_id` (`pool_id`),
CONSTRAINT `fk_storage_pool_and_access_group_map__pool_id` FOREIGN KEY (`pool_id`) REFERENCES `storage_pool` (`id`) ON DELETE CASCADE
) ENGINE=InnoDB AUTO_INCREMENT=2 DEFAULT CHARSET=utf8;
CALL `cloud`.`IDEMPOTENT_ADD_COLUMN`('cloud.host', 'storage_access_groups', 'varchar(255) DEFAULT NULL COMMENT "storage access groups for the host"');
CALL `cloud`.`IDEMPOTENT_ADD_COLUMN`('cloud.cluster', 'storage_access_groups', 'varchar(255) DEFAULT NULL COMMENT "storage access groups for the hosts in the cluster"');
CALL `cloud`.`IDEMPOTENT_ADD_COLUMN`('cloud.host_pod_ref', 'storage_access_groups', 'varchar(255) DEFAULT NULL COMMENT "storage access groups for the hosts in the pod"');
CALL `cloud`.`IDEMPOTENT_ADD_COLUMN`('cloud.data_center', 'storage_access_groups', 'varchar(255) DEFAULT NULL COMMENT "storage access groups for the hosts in the zone"');

View File

@ -42,6 +42,7 @@ select
data_center.type,
data_center.removed,
data_center.sort_key,
data_center.storage_access_groups,
domain.id domain_id,
domain.uuid domain_uuid,
domain.name domain_name,

View File

@ -42,17 +42,21 @@ SELECT
host.speed,
host.ram,
host.arch,
host.storage_access_groups,
cluster.id cluster_id,
cluster.uuid cluster_uuid,
cluster.name cluster_name,
cluster.cluster_type,
cluster.storage_access_groups AS cluster_storage_access_groups,
data_center.id data_center_id,
data_center.uuid data_center_uuid,
data_center.name data_center_name,
data_center.storage_access_groups AS zone_storage_access_groups,
data_center.networktype data_center_type,
host_pod_ref.id pod_id,
host_pod_ref.uuid pod_uuid,
host_pod_ref.name pod_name,
host_pod_ref.storage_access_groups AS pod_storage_access_groups,
GROUP_CONCAT(DISTINCT(host_tags.tag)) AS tag,
GROUP_CONCAT(DISTINCT(explicit_host_tags.tag)) AS explicit_tag,
GROUP_CONCAT(DISTINCT(implicit_host_tags.tag)) AS implicit_tag,

View File

@ -51,6 +51,7 @@ SELECT
`host_pod_ref`.`name` AS `pod_name`,
`storage_pool_tags`.`tag` AS `tag`,
`storage_pool_tags`.`is_tag_a_rule` AS `is_tag_a_rule`,
`storage_pool_and_access_group_map`.`storage_access_group` AS `storage_access_group`,
`op_host_capacity`.`used_capacity` AS `disk_used_capacity`,
`op_host_capacity`.`reserved_capacity` AS `disk_reserved_capacity`,
`async_job`.`id` AS `job_id`,
@ -58,13 +59,16 @@ SELECT
`async_job`.`job_status` AS `job_status`,
`async_job`.`account_id` AS `job_account_id`
FROM
((((((`cloud`.`storage_pool`
LEFT JOIN `cloud`.`cluster` ON ((`storage_pool`.`cluster_id` = `cluster`.`id`)))
LEFT JOIN `cloud`.`data_center` ON ((`storage_pool`.`data_center_id` = `data_center`.`id`)))
LEFT JOIN `cloud`.`host_pod_ref` ON ((`storage_pool`.`pod_id` = `host_pod_ref`.`id`)))
LEFT JOIN `cloud`.`storage_pool_tags` ON (((`storage_pool_tags`.`pool_id` = `storage_pool`.`id`))))
LEFT JOIN `cloud`.`op_host_capacity` ON (((`storage_pool`.`id` = `op_host_capacity`.`host_id`)
AND (`op_host_capacity`.`capacity_type` IN (3 , 9)))))
LEFT JOIN `cloud`.`async_job` ON (((`async_job`.`instance_id` = `storage_pool`.`id`)
AND (`async_job`.`instance_type` = 'StoragePool')
AND (`async_job`.`job_status` = 0))));
`cloud`.`storage_pool`
LEFT JOIN `cloud`.`cluster` ON `storage_pool`.`cluster_id` = `cluster`.`id`
LEFT JOIN `cloud`.`data_center` ON `storage_pool`.`data_center_id` = `data_center`.`id`
LEFT JOIN `cloud`.`host_pod_ref` ON `storage_pool`.`pod_id` = `host_pod_ref`.`id`
LEFT JOIN `cloud`.`storage_pool_tags` ON `storage_pool_tags`.`pool_id` = `storage_pool`.`id`
LEFT JOIN `cloud`.`storage_pool_and_access_group_map` ON `storage_pool_and_access_group_map`.`pool_id` = `storage_pool`.`id`
LEFT JOIN `cloud`.`op_host_capacity`
ON `storage_pool`.`id` = `op_host_capacity`.`host_id`
AND `op_host_capacity`.`capacity_type` IN (3, 9)
LEFT JOIN `cloud`.`async_job`
ON `async_job`.`instance_id` = `storage_pool`.`id`
AND `async_job`.`instance_type` = 'StoragePool'
AND `async_job`.`job_status` = 0;

View File

@ -35,6 +35,7 @@ import javax.inject.Inject;
import com.cloud.agent.api.CheckVirtualMachineAnswer;
import com.cloud.agent.api.CheckVirtualMachineCommand;
import com.cloud.agent.api.PrepareForMigrationAnswer;
import com.cloud.resource.ResourceManager;
import org.apache.cloudstack.engine.subsystem.api.storage.ChapInfo;
import org.apache.cloudstack.engine.subsystem.api.storage.ClusterScope;
import org.apache.cloudstack.engine.subsystem.api.storage.CopyCommandResult;
@ -51,6 +52,7 @@ import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreState
import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStore;
import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreStateMachine.Event;
import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreDriver;
import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreInfo;
import org.apache.cloudstack.engine.subsystem.api.storage.Scope;
import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotInfo;
import org.apache.cloudstack.engine.subsystem.api.storage.StorageAction;
@ -199,6 +201,8 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy {
VMTemplatePoolDao templatePoolDao;
@Inject
private VolumeDataFactory _volFactory;
@Inject
ResourceManager resourceManager;
@Override
public StrategyPriority canHandle(DataObject srcData, DataObject destData) {
@ -485,10 +489,10 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy {
HostVO hostVO;
if (srcStoragePoolVO.getClusterId() != null) {
hostVO = getHostInCluster(srcStoragePoolVO.getClusterId());
hostVO = getHostInCluster(srcStoragePoolVO);
}
else {
hostVO = getHost(srcVolumeInfo.getDataCenterId(), hypervisorType, false);
hostVO = getHost(srcVolumeInfo, hypervisorType, false);
}
volumePath = copyManagedVolumeToSecondaryStorage(srcVolumeInfo, destVolumeInfo, hostVO,
@ -556,10 +560,10 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy {
HostVO hostVO;
if (destStoragePoolVO.getClusterId() != null) {
hostVO = getHostInCluster(destStoragePoolVO.getClusterId());
hostVO = getHostInCluster(destStoragePoolVO);
}
else {
hostVO = getHost(destVolumeInfo.getDataCenterId(), hypervisorType, false);
hostVO = getHost(destVolumeInfo, hypervisorType, false);
}
setCertainVolumeValuesNull(destVolumeInfo.getId());
@ -933,9 +937,9 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy {
hostVO = _hostDao.findById(destVolumeInfo.getDataStore().getScope().getScopeId());
} else {
if (srcStoragePoolVO.getClusterId() != null) {
hostVO = getHostInCluster(srcStoragePoolVO.getClusterId());
hostVO = getHostInCluster(srcStoragePoolVO);
} else {
hostVO = getHost(destVolumeInfo.getDataCenterId(), HypervisorType.KVM, false);
hostVO = getHost(destVolumeInfo, HypervisorType.KVM, false);
}
}
@ -1337,7 +1341,7 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy {
createVolumeFromSnapshot(snapshotInfo);
HostVO hostVO = getHost(snapshotInfo.getDataCenterId(), HypervisorType.XenServer, true);
HostVO hostVO = getHost(snapshotInfo, HypervisorType.XenServer, true);
copyCmdAnswer = performResignature(snapshotInfo, hostVO, null, true);
@ -1349,7 +1353,7 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy {
CopyCommand copyCommand = new CopyCommand(snapshotInfo.getTO(), volumeInfo.getTO(), primaryStorageDownloadWait,
VirtualMachineManager.ExecuteInSequence.value());
HostVO hostVO = getHostInCluster(volumeStoragePoolVO.getClusterId());
HostVO hostVO = getHostInCluster(volumeStoragePoolVO);
if (!usingBackendSnapshot) {
long snapshotStoragePoolId = snapshotInfo.getDataStore().getId();
@ -1379,7 +1383,7 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy {
}
finally {
try {
HostVO hostVO = getHostInCluster(volumeStoragePoolVO.getClusterId());
HostVO hostVO = getHostInCluster(volumeStoragePoolVO);
long snapshotStoragePoolId = snapshotInfo.getDataStore().getId();
DataStore snapshotDataStore = dataStoreMgr.getDataStore(snapshotStoragePoolId, DataStoreRole.Primary);
@ -1473,7 +1477,7 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy {
handleQualityOfServiceForVolumeMigration(volumeInfo, PrimaryDataStoreDriver.QualityOfServiceState.MIGRATION);
hostVO = getHost(snapshotInfo.getDataCenterId(), snapshotInfo.getHypervisorType(), false);
hostVO = getHost(snapshotInfo, snapshotInfo.getHypervisorType(), false);
// copy the volume from secondary via the hypervisor
if (HypervisorType.XenServer.equals(snapshotInfo.getHypervisorType())) {
@ -1554,7 +1558,7 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy {
// only XenServer, VMware, and KVM are currently supported
// Leave host equal to null for KVM since we don't need to perform a resignature when using that hypervisor type.
if (volumeInfo.getFormat() == ImageFormat.VHD) {
hostVO = getHost(volumeInfo.getDataCenterId(), HypervisorType.XenServer, true);
hostVO = getHost(volumeInfo, HypervisorType.XenServer, true);
if (hostVO == null) {
throw new CloudRuntimeException("Unable to locate a host capable of resigning in the zone with the following ID: " +
@ -1574,7 +1578,7 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy {
}
else if (volumeInfo.getFormat() == ImageFormat.OVA) {
// all VMware hosts support resigning
hostVO = getHost(volumeInfo.getDataCenterId(), HypervisorType.VMware, false);
hostVO = getHost(volumeInfo, HypervisorType.VMware, false);
if (hostVO == null) {
throw new CloudRuntimeException("Unable to locate a host capable of resigning in the zone with the following ID: " +
@ -1757,7 +1761,7 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy {
} else {
// asking for a XenServer host here so we don't always prefer to use XenServer hosts that support resigning
// even when we don't need those hosts to do this kind of copy work
hostVO = getHost(snapshotInfo.getDataCenterId(), snapshotInfo.getHypervisorType(), false);
hostVO = getHost(snapshotInfo, snapshotInfo.getHypervisorType(), false);
handleQualityOfServiceForVolumeMigration(volumeInfo, PrimaryDataStoreDriver.QualityOfServiceState.MIGRATION);
@ -1814,7 +1818,7 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy {
destVolumeInfo = _volumeDataFactory.getVolume(destVolumeInfo.getId(), destVolumeInfo.getDataStore());
HostVO hostVO = getHost(dataCenterId, hypervisorType, false);
HostVO hostVO = getHost(destVolumeInfo, hypervisorType, false);
handleQualityOfServiceForVolumeMigration(destVolumeInfo, PrimaryDataStoreDriver.QualityOfServiceState.MIGRATION);
@ -2606,7 +2610,7 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy {
volumeInfo.processEvent(Event.MigrationRequested);
HostVO hostVO = getHost(volumeInfo.getDataCenterId(), HypervisorType.KVM, false);
HostVO hostVO = getHost(volumeInfo, HypervisorType.KVM, false);
DataStore srcDataStore = volumeInfo.getDataStore();
int primaryStorageDownloadWait = StorageManager.PRIMARY_STORAGE_DOWNLOAD_WAIT.value();
@ -2764,10 +2768,10 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy {
HypervisorType hypervisorType = snapshotInfo.getHypervisorType();
if (HypervisorType.XenServer.equals(hypervisorType)) {
HostVO hostVO = getHost(snapshotInfo.getDataCenterId(), hypervisorType, true);
HostVO hostVO = getHost(snapshotInfo, hypervisorType, true);
if (hostVO == null) {
hostVO = getHost(snapshotInfo.getDataCenterId(), hypervisorType, false);
hostVO = getHost(snapshotInfo, hypervisorType, false);
if (hostVO == null) {
throw new CloudRuntimeException("Unable to locate an applicable host in data center with ID = " + snapshotInfo.getDataCenterId());
@ -2778,14 +2782,15 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy {
}
if (HypervisorType.VMware.equals(hypervisorType) || HypervisorType.KVM.equals(hypervisorType)) {
return getHost(snapshotInfo.getDataCenterId(), hypervisorType, false);
return getHost(snapshotInfo, hypervisorType, false);
}
throw new CloudRuntimeException("Unsupported hypervisor type");
}
private HostVO getHostInCluster(long clusterId) {
List<HostVO> hosts = _hostDao.findByClusterId(clusterId);
private HostVO getHostInCluster(StoragePoolVO storagePool) {
DataStore store = dataStoreMgr.getDataStore(storagePool.getId(), DataStoreRole.Primary);
List<HostVO> hosts = resourceManager.getEligibleUpAndEnabledHostsInClusterForStorageConnection((PrimaryDataStoreInfo) store);
if (hosts != null && hosts.size() > 0) {
Collections.shuffle(hosts, RANDOM);
@ -2800,12 +2805,37 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy {
throw new CloudRuntimeException("Unable to locate a host");
}
private HostVO getHost(Long zoneId, HypervisorType hypervisorType, boolean computeClusterMustSupportResign) {
private HostVO getHost(SnapshotInfo snapshotInfo, HypervisorType hypervisorType, boolean computeClusterMustSupportResign) {
Long zoneId = snapshotInfo.getDataCenterId();
Preconditions.checkArgument(zoneId != null, "Zone ID cannot be null.");
Preconditions.checkArgument(hypervisorType != null, "Hypervisor type cannot be null.");
List<HostVO> hosts = _hostDao.listByDataCenterIdAndHypervisorType(zoneId, hypervisorType);
List<HostVO> hosts;
if (DataStoreRole.Primary.equals(snapshotInfo.getDataStore().getRole())) {
hosts = resourceManager.getEligibleUpAndEnabledHostsInZoneForStorageConnection(snapshotInfo.getDataStore(), zoneId, hypervisorType);
} else {
hosts = _hostDao.listByDataCenterIdAndHypervisorType(zoneId, hypervisorType);
}
return getHost(hosts, computeClusterMustSupportResign);
}
private HostVO getHost(VolumeInfo volumeInfo, HypervisorType hypervisorType, boolean computeClusterMustSupportResign) {
Long zoneId = volumeInfo.getDataCenterId();
Preconditions.checkArgument(zoneId != null, "Zone ID cannot be null.");
Preconditions.checkArgument(hypervisorType != null, "Hypervisor type cannot be null.");
List<HostVO> hosts;
if (DataStoreRole.Primary.equals(volumeInfo.getDataStore().getRole())) {
hosts = resourceManager.getEligibleUpAndEnabledHostsInZoneForStorageConnection(volumeInfo.getDataStore(), zoneId, hypervisorType);
} else {
hosts = _hostDao.listByDataCenterIdAndHypervisorType(zoneId, hypervisorType);
}
return getHost(hosts, computeClusterMustSupportResign);
}
private HostVO getHost(List<HostVO> hosts, boolean computeClusterMustSupportResign) {
if (hosts == null) {
return null;
}

View File

@ -17,41 +17,45 @@
package org.apache.cloudstack.storage.allocator;
import com.cloud.api.query.dao.StoragePoolJoinDao;
import com.cloud.dc.dao.HostPodDao;
import com.cloud.exception.StorageUnavailableException;
import com.cloud.host.HostVO;
import com.cloud.host.dao.HostDao;
import com.cloud.storage.ScopeType;
import com.cloud.storage.StoragePoolStatus;
import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailVO;
import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailsDao;
import org.apache.cloudstack.storage.datastore.db.StoragePoolVO;
import org.apache.commons.lang3.StringUtils;
import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils;
import org.apache.commons.collections.CollectionUtils;
import com.cloud.utils.Pair;
import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager;
import org.apache.cloudstack.engine.subsystem.api.storage.StoragePoolAllocator;
import org.apache.cloudstack.framework.config.dao.ConfigurationDao;
import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao;
import com.cloud.capacity.Capacity;
import com.cloud.capacity.dao.CapacityDao;
import com.cloud.dc.ClusterVO;
import com.cloud.dc.dao.ClusterDao;
import com.cloud.deploy.DeploymentPlan;
import com.cloud.deploy.DeploymentPlanner.ExcludeList;
import com.cloud.exception.StorageUnavailableException;
import com.cloud.hypervisor.Hypervisor.HypervisorType;
import com.cloud.storage.ScopeType;
import com.cloud.storage.Storage;
import com.cloud.storage.StorageManager;
import com.cloud.storage.StoragePool;
import com.cloud.storage.StoragePoolStatus;
import com.cloud.storage.StorageUtil;
import com.cloud.storage.Volume;
import com.cloud.storage.dao.VolumeDao;
import com.cloud.user.Account;
import com.cloud.utils.NumbersUtil;
import com.cloud.utils.Pair;
import com.cloud.utils.StringUtils;
import com.cloud.utils.component.AdapterBase;
import com.cloud.vm.DiskProfile;
import com.cloud.vm.VirtualMachineProfile;
import org.apache.cloudstack.engine.orchestration.service.VolumeOrchestrationService;
import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager;
import org.apache.cloudstack.engine.subsystem.api.storage.StoragePoolAllocator;
import org.apache.cloudstack.framework.config.dao.ConfigurationDao;
import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao;
import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailVO;
import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailsDao;
import org.apache.cloudstack.storage.datastore.db.StoragePoolVO;
import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils;
import org.apache.commons.collections.CollectionUtils;
import javax.inject.Inject;
import javax.naming.ConfigurationException;
@ -77,11 +81,15 @@ public abstract class AbstractStoragePoolAllocator extends AdapterBase implement
@Inject protected PrimaryDataStoreDao storagePoolDao;
@Inject protected VolumeDao volumeDao;
@Inject protected ConfigurationDao configDao;
@Inject protected ClusterDao clusterDao;
@Inject protected CapacityDao capacityDao;
@Inject private ClusterDao clusterDao;
@Inject private StorageManager storageMgr;
@Inject private StorageUtil storageUtil;
@Inject private StoragePoolDetailsDao storagePoolDetailsDao;
@Inject
protected HostDao hostDao;
@Inject
protected HostPodDao podDao;
/**
* make sure shuffled lists of Pools are really shuffled
@ -320,6 +328,16 @@ public abstract class AbstractStoragePoolAllocator extends AdapterBase implement
return false;
}
if (plan.getHostId() != null) {
HostVO plannedHost = hostDao.findById(plan.getHostId());
if (!storageMgr.checkIfHostAndStoragePoolHasCommonStorageAccessGroups(plannedHost, pool)) {
if (logger.isDebugEnabled()) {
logger.debug(String.format("StoragePool %s and host %s does not have matching storage access groups", pool, plannedHost));
}
return false;
}
}
Volume volume = null;
boolean isTempVolume = dskCh.getVolumeId() == Volume.DISK_OFFERING_SUITABILITY_CHECK_VOLUME_ID;
if (!isTempVolume) {

View File

@ -77,12 +77,12 @@ public class ClusterScopeStoragePoolAllocator extends AbstractStoragePoolAllocat
logDisabledStoragePools(dcId, podId, clusterId, ScopeType.CLUSTER);
}
List<StoragePoolVO> pools = storagePoolDao.findPoolsByTags(dcId, podId, clusterId, dskCh.getTags(), true, VolumeApiServiceImpl.storageTagRuleExecutionTimeout.value());
List<StoragePoolVO> pools = storagePoolDao.findPoolsByTags(dcId, podId, clusterId, ScopeType.CLUSTER, dskCh.getTags(), true, VolumeApiServiceImpl.storageTagRuleExecutionTimeout.value());
pools.addAll(storagePoolJoinDao.findStoragePoolByScopeAndRuleTags(dcId, podId, clusterId, ScopeType.CLUSTER, List.of(dskCh.getTags())));
logger.debug(String.format("Found pools [%s] that match with tags [%s].", pools, Arrays.toString(dskCh.getTags())));
// add remaining pools in cluster, that did not match tags, to avoid set
List<StoragePoolVO> allPools = storagePoolDao.findPoolsByTags(dcId, podId, clusterId, null, false, 0);
List<StoragePoolVO> allPools = storagePoolDao.findPoolsByTags(dcId, podId, clusterId, ScopeType.CLUSTER, null, false, 0);
allPools.removeAll(pools);
for (StoragePoolVO pool : allPools) {
logger.trace(String.format("Adding pool [%s] to the 'avoid' set since it did not match any tags.", pool));
@ -100,7 +100,7 @@ public class ClusterScopeStoragePoolAllocator extends AbstractStoragePoolAllocat
}
StoragePool storagePool = (StoragePool)dataStoreMgr.getPrimaryDataStore(pool.getId());
if (filter(avoid, storagePool, dskCh, plan)) {
logger.debug(String.format("Found suitable local storage pool [%s] to allocate disk [%s] to it, adding to list.", pool, dskCh));
logger.debug(String.format("Found suitable cluster storage pool [%s] to allocate disk [%s] to it, adding to list.", pool, dskCh));
suitablePools.add(storagePool);
} else {
logger.debug(String.format("Adding storage pool [%s] to avoid set during allocation of disk [%s].", pool, dskCh));

View File

@ -96,7 +96,7 @@ public class ZoneWideStoragePoolAllocator extends AbstractStoragePoolAllocator {
}
StoragePool storagePool = (StoragePool)this.dataStoreMgr.getPrimaryDataStore(storage.getId());
if (filter(avoid, storagePool, dskCh, plan)) {
logger.debug(String.format("Found suitable local storage pool [%s] to allocate disk [%s] to it, adding to list.", storagePool, dskCh));
logger.debug(String.format("Found suitable zone wide storage pool [%s] to allocate disk [%s] to it, adding to list.", storagePool, dskCh));
suitablePools.add(storagePool);
} else {
if (canAddStoragePoolToAvoidSet(storage)) {

View File

@ -159,7 +159,23 @@ public class PrimaryDataStoreHelper {
}
}
dataStoreVO = dataStoreDao.persist(dataStoreVO, details, storageTags, params.isTagARule(), displayDetails);
String storageAccessGroupsParams = params.getStorageAccessGroups();
List<String> storageAccessGroupsList = new ArrayList<String>();
if (storageAccessGroupsParams != null) {
String[] storageAccessGroups = storageAccessGroupsParams.split(",");
for (String storageAccessGroup : storageAccessGroups) {
storageAccessGroup = storageAccessGroup.trim();
if (storageAccessGroup.length() == 0) {
continue;
}
storageAccessGroupsList.add(storageAccessGroup);
}
}
dataStoreVO = dataStoreDao.persist(dataStoreVO, details, storageTags, params.isTagARule(), displayDetails, storageAccessGroupsList);
return dataStoreMgr.getDataStore(dataStoreVO.getId(), DataStoreRole.Primary);
}
@ -278,6 +294,7 @@ public class PrimaryDataStoreHelper {
this.dataStoreDao.update(poolVO.getId(), poolVO);
dataStoreDao.remove(poolVO.getId());
dataStoreDao.deletePoolTags(poolVO.getId());
dataStoreDao.deleteStoragePoolAccessGroups(poolVO.getId());
annotationDao.removeByEntityType(AnnotationService.EntityType.PRIMARY_STORAGE.name(), poolVO.getUuid());
deletePoolStats(poolVO.getId());
// Delete op_host_capacity entries

View File

@ -21,6 +21,7 @@ package org.apache.cloudstack.storage.datastore.provider;
import com.cloud.agent.AgentManager;
import com.cloud.agent.api.Answer;
import com.cloud.agent.api.CleanupPersistentNetworkResourceCommand;
import com.cloud.agent.api.DeleteStoragePoolCommand;
import com.cloud.agent.api.ModifyStoragePoolAnswer;
import com.cloud.agent.api.ModifyStoragePoolCommand;
import com.cloud.agent.api.SetupPersistentNetworkCommand;
@ -45,6 +46,7 @@ import com.cloud.storage.StorageService;
import com.cloud.storage.dao.StoragePoolHostDao;
import com.cloud.utils.exception.CloudRuntimeException;
import org.apache.cloudstack.engine.subsystem.api.storage.DataStore;
import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager;
import org.apache.cloudstack.engine.subsystem.api.storage.HypervisorHostListener;
import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao;
@ -207,10 +209,43 @@ public class DefaultHostListener implements HypervisorHostListener {
@Override
public boolean hostDisconnected(long hostId, long poolId) {
// TODO Auto-generated method stub
HostVO host = hostDao.findById(hostId);
if (host == null) {
logger.error("Failed to disconnect host by HostListener as host was not found with id : " + hostId);
return false;
}
DataStore dataStore = dataStoreMgr.getDataStore(poolId, DataStoreRole.Primary);
StoragePool storagePool = (StoragePool) dataStore;
DeleteStoragePoolCommand cmd = new DeleteStoragePoolCommand(storagePool);
Answer answer = sendDeleteStoragePoolCommand(cmd, storagePool, host);
if (!answer.getResult()) {
logger.error("Failed to disconnect storage pool: " + storagePool + " and host: " + host);
return false;
}
StoragePoolHostVO storagePoolHost = storagePoolHostDao.findByPoolHost(poolId, hostId);
if (storagePoolHost != null) {
storagePoolHostDao.deleteStoragePoolHostDetails(hostId, poolId);
}
logger.info("Connection removed between storage pool: " + storagePool + " and host: " + host);
return true;
}
private Answer sendDeleteStoragePoolCommand(DeleteStoragePoolCommand cmd, StoragePool storagePool, HostVO host) {
Answer answer = agentMgr.easySend(host.getId(), cmd);
if (answer == null) {
throw new CloudRuntimeException(String.format("Unable to get an answer to the delete storage pool command for storage pool %s, sent to host %s", storagePool, host));
}
if (!answer.getResult()) {
String msg = "Unable to detach storage pool " + storagePool + " from the host " + host;
alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_HOST, storagePool.getDataCenterId(), storagePool.getPodId(), msg, msg);
}
return answer;
}
@Override
public boolean hostAboutToBeRemoved(long hostId) {
// send host the cleanup persistent network resources

View File

@ -373,7 +373,7 @@ public class ManagementServerMock {
ConfigurationManager mgr = (ConfigurationManager)_configService;
_zone =
mgr.createZone(User.UID_SYSTEM, "default", "8.8.8.8", null, "8.8.4.4", null, null /* cidr */, "ROOT", Domain.ROOT_DOMAIN, NetworkType.Advanced, null,
null /* networkDomain */, false, false, null, null, false);
null /* networkDomain */, false, false, null, null, false, null);
}
}

View File

@ -66,7 +66,7 @@ public class RandomStoragePoolAllocator extends AbstractStoragePoolAllocator {
StoragePool pol = (StoragePool)this.dataStoreMgr.getPrimaryDataStore(pool.getId());
if (filter(avoid, pol, dskCh, plan)) {
logger.trace(String.format("Found suitable local storage pool [%s], adding to list.", pool));
logger.trace(String.format("Found suitable storage pool [%s], adding to list.", pool));
suitablePools.add(pol);
}
}

View File

@ -26,6 +26,8 @@ import java.util.StringTokenizer;
import javax.inject.Inject;
import com.cloud.host.Host;
import com.cloud.storage.dao.StoragePoolAndAccessGroupMapDao;
import org.apache.cloudstack.engine.subsystem.api.storage.ClusterScope;
import org.apache.cloudstack.engine.subsystem.api.storage.DataStore;
import org.apache.cloudstack.engine.subsystem.api.storage.HostScope;
@ -50,7 +52,6 @@ import com.cloud.agent.api.DeleteStoragePoolCommand;
import com.cloud.agent.api.StoragePoolInfo;
import com.cloud.capacity.CapacityManager;
import com.cloud.dc.dao.DataCenterDao;
import com.cloud.host.Host;
import com.cloud.host.HostVO;
import com.cloud.host.dao.HostDao;
import com.cloud.hypervisor.Hypervisor.HypervisorType;
@ -90,6 +91,8 @@ public class ElastistorPrimaryDataStoreLifeCycle extends BasePrimaryDataStoreLif
DataCenterDao _zoneDao;
@Inject
CapacityManager _capacityMgr;
@Inject
private StoragePoolAndAccessGroupMapDao storagePoolAndAccessGroupMapDao;
@Override
public DataStore initialize(Map<String, Object> dsInfos) {
@ -356,17 +359,13 @@ public class ElastistorPrimaryDataStoreLifeCycle extends BasePrimaryDataStoreLif
StoragePoolVO dataStoreVO = _storagePoolDao.findById(store.getId());
PrimaryDataStoreInfo primarystore = (PrimaryDataStoreInfo) store;
// Check if there is host up in this cluster
List<HostVO> allHosts = _resourceMgr.listAllUpHosts(Host.Type.Routing, primarystore.getClusterId(), primarystore.getPodId(), primarystore.getDataCenterId());
if (allHosts.isEmpty()) {
primaryDataStoreDao.expunge(primarystore.getId());
throw new CloudRuntimeException("No host up to associate a storage pool with in cluster " + primarystore.getClusterId());
}
List<HostVO> hostsToConnect = _resourceMgr.getEligibleUpHostsInClusterForStorageConnection(primarystore);
logger.debug(String.format("Attaching the pool to each of the hosts %s in the cluster: %s", hostsToConnect, primarystore.getClusterId()));
if (!dataStoreVO.isManaged()) {
boolean success = false;
for (HostVO host : allHosts) {
success = createStoragePool(host, primarystore);
for (HostVO h : hostsToConnect) {
success = createStoragePool(h, primarystore);
if (success) {
break;
}
@ -375,7 +374,7 @@ public class ElastistorPrimaryDataStoreLifeCycle extends BasePrimaryDataStoreLif
logger.debug("In createPool Adding the pool to each of the hosts");
List<HostVO> poolHosts = new ArrayList<HostVO>();
for (HostVO h : allHosts) {
for (HostVO h : hostsToConnect) {
try {
storageMgr.connectHostToSharedPool(h, primarystore.getId());
poolHosts.add(h);
@ -428,10 +427,11 @@ public class ElastistorPrimaryDataStoreLifeCycle extends BasePrimaryDataStoreLif
@Override
public boolean attachZone(DataStore dataStore, ZoneScope scope, HypervisorType hypervisorType) {
List<HostVO> hosts = _resourceMgr.listAllUpAndEnabledHostsInOneZoneByHypervisor(hypervisorType, scope.getScopeId());
logger.debug("In createPool. Attaching the pool to each of the hosts.");
List<HostVO> hostsToConnect = _resourceMgr.getEligibleUpAndEnabledHostsInZoneForStorageConnection(dataStore, scope.getScopeId(), hypervisorType);
logger.debug(String.format("In createPool. Attaching the pool to each of the hosts in %s.", hostsToConnect));
List<HostVO> poolHosts = new ArrayList<HostVO>();
for (HostVO host : hosts) {
for (HostVO host : hostsToConnect) {
try {
storageMgr.connectHostToSharedPool(host, dataStore.getId());
poolHosts.add(host);

View File

@ -25,7 +25,6 @@ import com.cloud.dc.ClusterVO;
import com.cloud.dc.DataCenterVO;
import com.cloud.dc.dao.ClusterDao;
import com.cloud.dc.dao.DataCenterDao;
import com.cloud.host.Host;
import com.cloud.host.HostVO;
import com.cloud.host.dao.HostDao;
import com.cloud.hypervisor.Hypervisor.HypervisorType;
@ -38,8 +37,10 @@ import com.cloud.storage.StoragePoolAutomation;
import com.cloud.storage.dao.SnapshotDao;
import com.cloud.storage.dao.SnapshotDetailsDao;
import com.cloud.storage.dao.SnapshotDetailsVO;
import com.cloud.storage.dao.StoragePoolAndAccessGroupMapDao;
import com.cloud.storage.dao.StoragePoolHostDao;
import com.cloud.utils.exception.CloudRuntimeException;
import org.apache.cloudstack.api.ApiConstants;
import org.apache.cloudstack.engine.subsystem.api.storage.ClusterScope;
import org.apache.cloudstack.engine.subsystem.api.storage.DataStore;
import org.apache.cloudstack.engine.subsystem.api.storage.HostScope;
@ -84,6 +85,8 @@ public class DateraPrimaryDataStoreLifeCycle extends BasePrimaryDataStoreLifeCyc
private StoragePoolHostDao _storagePoolHostDao;
@Inject
private StoragePoolAutomation storagePoolAutomation;
@Inject
private StoragePoolAndAccessGroupMapDao storagePoolAndAccessGroupMapDao;
@Override
public DataStore initialize(Map<String, Object> dsInfos) {
@ -97,6 +100,7 @@ public class DateraPrimaryDataStoreLifeCycle extends BasePrimaryDataStoreLifeCyc
Long capacityBytes = (Long) dsInfos.get("capacityBytes");
Long capacityIops = (Long) dsInfos.get("capacityIops");
String tags = (String) dsInfos.get("tags");
String storageAccessGroups = (String)dsInfos.get(ApiConstants.STORAGE_ACCESS_GROUPS);
boolean isTagARule = (Boolean)dsInfos.get("isTagARule");
@SuppressWarnings("unchecked")
Map<String, String> details = (Map<String, String>) dsInfos.get("details");
@ -179,6 +183,7 @@ public class DateraPrimaryDataStoreLifeCycle extends BasePrimaryDataStoreLifeCyc
parameters.setCapacityIops(capacityIops);
parameters.setHypervisorType(HypervisorType.Any);
parameters.setTags(tags);
parameters.setStorageAccessGroups(storageAccessGroups);
parameters.setIsTagARule(isTagARule);
parameters.setDetails(details);
@ -243,22 +248,13 @@ public class DateraPrimaryDataStoreLifeCycle extends BasePrimaryDataStoreLifeCyc
@Override
public boolean attachCluster(DataStore datastore, ClusterScope scope) {
PrimaryDataStoreInfo primaryDataStoreInfo = (PrimaryDataStoreInfo) datastore;
List<HostVO> hostsToConnect = _resourceMgr.getEligibleUpHostsInClusterForStorageConnection(primaryDataStoreInfo);
// check if there is at least one host up in this cluster
List<HostVO> allHosts = _resourceMgr.listAllUpAndEnabledHosts(Host.Type.Routing,
primaryDataStoreInfo.getClusterId(), primaryDataStoreInfo.getPodId(),
primaryDataStoreInfo.getDataCenterId());
if (allHosts.isEmpty()) {
storagePoolDao.expunge(primaryDataStoreInfo.getId());
throw new CloudRuntimeException(
"No host up to associate a storage pool with in cluster " + primaryDataStoreInfo.getClusterId());
}
logger.debug(String.format("Attaching the pool to each of the hosts %s in the cluster: %s", hostsToConnect, primaryDataStoreInfo.getClusterId()));
List<HostVO> poolHosts = new ArrayList<HostVO>();
for (HostVO host : allHosts) {
for (HostVO host : hostsToConnect) {
try {
_storageMgr.connectHostToSharedPool(host, primaryDataStoreInfo.getId());
@ -288,19 +284,15 @@ public class DateraPrimaryDataStoreLifeCycle extends BasePrimaryDataStoreLifeCyc
public boolean attachZone(DataStore dataStore, ZoneScope scope, HypervisorType hypervisorType) {
dataStoreHelper.attachZone(dataStore);
List<HostVO> xenServerHosts = _resourceMgr
.listAllUpAndEnabledHostsInOneZoneByHypervisor(HypervisorType.XenServer, scope.getScopeId());
List<HostVO> vmWareServerHosts = _resourceMgr
.listAllUpAndEnabledHostsInOneZoneByHypervisor(HypervisorType.VMware, scope.getScopeId());
List<HostVO> kvmHosts = _resourceMgr.listAllUpAndEnabledHostsInOneZoneByHypervisor(HypervisorType.KVM,
scope.getScopeId());
List<HostVO> hosts = new ArrayList<HostVO>();
List<HostVO> hostsToConnect = new ArrayList<>();
HypervisorType[] hypervisorTypes = {HypervisorType.XenServer, HypervisorType.VMware, HypervisorType.KVM};
hosts.addAll(xenServerHosts);
hosts.addAll(vmWareServerHosts);
hosts.addAll(kvmHosts);
for (HypervisorType type : hypervisorTypes) {
hostsToConnect.addAll(_resourceMgr.getEligibleUpAndEnabledHostsInZoneForStorageConnection(dataStore, scope.getScopeId(), type));
}
for (HostVO host : hosts) {
logger.debug(String.format("In createPool. Attaching the pool to each of the hosts in %s.", hostsToConnect));
for (HostVO host : hostsToConnect) {
try {
_storageMgr.connectHostToSharedPool(host, dataStore.getId());
} catch (Exception e) {

View File

@ -18,7 +18,6 @@
*/
package org.apache.cloudstack.storage.datastore.lifecycle;
import java.util.Collections;
import java.util.List;
import java.util.Map;
import java.util.UUID;
@ -26,6 +25,7 @@ import java.util.stream.Collectors;
import javax.inject.Inject;
import org.apache.cloudstack.api.ApiConstants;
import org.apache.cloudstack.engine.subsystem.api.storage.ClusterScope;
import org.apache.cloudstack.engine.subsystem.api.storage.DataStore;
import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager;
@ -51,7 +51,6 @@ import com.cloud.dc.dao.HostPodDao;
import com.cloud.exception.InvalidParameterValueException;
import com.cloud.exception.StorageConflictException;
import com.cloud.exception.StorageUnavailableException;
import com.cloud.host.Host;
import com.cloud.host.HostVO;
import com.cloud.host.dao.HostDao;
import com.cloud.hypervisor.Hypervisor.HypervisorType;
@ -63,6 +62,7 @@ import com.cloud.storage.StorageManager;
import com.cloud.storage.StoragePool;
import com.cloud.storage.StoragePoolAutomation;
import com.cloud.storage.StoragePoolHostVO;
import com.cloud.storage.dao.StoragePoolAndAccessGroupMapDao;
import com.cloud.storage.dao.StoragePoolHostDao;
import com.cloud.storage.dao.StoragePoolWorkDao;
import com.cloud.storage.dao.VolumeDao;
@ -129,6 +129,8 @@ public class CloudStackPrimaryDataStoreLifeCycleImpl extends BasePrimaryDataStor
StoragePoolAutomation storagePoolAutmation;
@Inject
protected HostDao _hostDao;
@Inject
private StoragePoolAndAccessGroupMapDao storagePoolAndAccessGroupMapDao;
@SuppressWarnings("unchecked")
@Override
@ -146,9 +148,11 @@ public class CloudStackPrimaryDataStoreLifeCycleImpl extends BasePrimaryDataStor
PrimaryDataStoreParameters parameters = new PrimaryDataStoreParameters();
String tags = (String)dsInfos.get("tags");
String storageAccessGroups = (String)dsInfos.get(ApiConstants.STORAGE_ACCESS_GROUPS);
Map<String, String> details = (Map<String, String>)dsInfos.get("details");
parameters.setTags(tags);
parameters.setStorageAccessGroups(storageAccessGroups);
parameters.setIsTagARule((Boolean)dsInfos.get("isTagARule"));
parameters.setDetails(details);
@ -386,17 +390,15 @@ public class CloudStackPrimaryDataStoreLifeCycleImpl extends BasePrimaryDataStor
}
private Pair<List<Long>, Boolean> prepareOcfs2NodesIfNeeded(PrimaryDataStoreInfo primaryStore) {
List<HostVO> hostsToConnect = _resourceMgr.getEligibleUpHostsInClusterForStorageConnection(primaryStore);
logger.debug(String.format("Attaching the pool to each of the hosts %s in the cluster: %s", hostsToConnect, primaryStore.getClusterId()));
List<Long> hostIds = hostsToConnect.stream().map(HostVO::getId).collect(Collectors.toList());
if (!StoragePoolType.OCFS2.equals(primaryStore.getPoolType())) {
return new Pair<>(_hostDao.listIdsForUpRouting(primaryStore.getDataCenterId(),
primaryStore.getPodId(), primaryStore.getClusterId()), true);
return new Pair<>(hostIds, true);
}
List<HostVO> allHosts = _resourceMgr.listAllUpHosts(Host.Type.Routing, primaryStore.getClusterId(),
primaryStore.getPodId(), primaryStore.getDataCenterId());
if (allHosts.isEmpty()) {
return new Pair<>(Collections.emptyList(), true);
}
List<Long> hostIds = allHosts.stream().map(HostVO::getId).collect(Collectors.toList());
if (!_ocfs2Mgr.prepareNodes(allHosts, primaryStore)) {
if (!_ocfs2Mgr.prepareNodes(hostsToConnect, primaryStore)) {
return new Pair<>(hostIds, false);
}
return new Pair<>(hostIds, true);
@ -432,8 +434,9 @@ public class CloudStackPrimaryDataStoreLifeCycleImpl extends BasePrimaryDataStor
@Override
public boolean attachZone(DataStore store, ZoneScope scope, HypervisorType hypervisorType) {
List<Long> hostIds = _hostDao.listIdsForUpEnabledByZoneAndHypervisor(scope.getScopeId(), hypervisorType);
logger.debug("In createPool. Attaching the pool to each of the hosts.");
List<HostVO> hostsToConnect = _resourceMgr.getEligibleUpAndEnabledHostsInZoneForStorageConnection(store, scope.getScopeId(), hypervisorType);
logger.debug(String.format("In createPool. Attaching the pool to each of the hosts in %s.", hostsToConnect));
List<Long> hostIds = hostsToConnect.stream().map(HostVO::getId).collect(Collectors.toList());
storageMgr.connectHostsToPool(store, hostIds, scope, true, true);
dataStoreHelper.attachZone(store, hypervisorType);
return true;

View File

@ -25,7 +25,7 @@ import static org.mockito.ArgumentMatchers.eq;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
import java.util.List;
import java.util.Arrays;
import org.apache.cloudstack.engine.subsystem.api.storage.ClusterScope;
import org.apache.cloudstack.engine.subsystem.api.storage.DataStore;
@ -143,9 +143,15 @@ public class CloudStackPrimaryDataStoreLifeCycleImplTest extends TestCase {
storageMgr.registerHostListener("default", hostListener);
HostVO host1 = Mockito.mock(HostVO.class);
HostVO host2 = Mockito.mock(HostVO.class);
Mockito.when(host1.getId()).thenReturn(1L);
Mockito.when(host2.getId()).thenReturn(2L);
when(_resourceMgr.getEligibleUpHostsInClusterForStorageConnection(store))
.thenReturn(Arrays.asList(host1, host2));
when(hostDao.listIdsForUpRouting(anyLong(), anyLong(), anyLong()))
.thenReturn(List.of(1L, 2L));
when(hostDao.findById(anyLong())).thenReturn(mock(HostVO.class));
when(agentMgr.easySend(anyLong(), Mockito.any(ModifyStoragePoolCommand.class))).thenReturn(answer);
when(answer.getResult()).thenReturn(true);

View File

@ -39,6 +39,7 @@ import com.cloud.storage.Storage;
import com.cloud.storage.StorageManager;
import com.cloud.storage.StoragePool;
import com.cloud.storage.StoragePoolAutomation;
import com.cloud.storage.dao.StoragePoolAndAccessGroupMapDao;
import com.cloud.utils.exception.CloudRuntimeException;
import org.apache.cloudstack.engine.subsystem.api.storage.ClusterScope;
import org.apache.cloudstack.engine.subsystem.api.storage.DataStore;
@ -68,6 +69,8 @@ public class LinstorPrimaryDataStoreLifeCycleImpl extends BasePrimaryDataStoreLi
@Inject
private CapacityManager _capacityMgr;
@Inject
private StoragePoolAndAccessGroupMapDao storagePoolAndAccessGroupMapDao;
@Inject
AgentManager _agentMgr;
public LinstorPrimaryDataStoreLifeCycleImpl()
@ -204,20 +207,12 @@ public class LinstorPrimaryDataStoreLifeCycleImpl extends BasePrimaryDataStoreLi
throw new CloudRuntimeException(hypervisorType + " is not a supported hypervisor type.");
}
// check if there is at least one host up in this cluster
List<HostVO> allHosts = resourceMgr.listAllUpAndEnabledHosts(Host.Type.Routing,
primaryDataStoreInfo.getClusterId(), primaryDataStoreInfo.getPodId(),
primaryDataStoreInfo.getDataCenterId());
if (allHosts.isEmpty()) {
_primaryDataStoreDao.expunge(primaryDataStoreInfo.getId());
throw new CloudRuntimeException(
"No host up to associate a storage pool with in cluster " + primaryDataStoreInfo.getClusterId());
}
PrimaryDataStoreInfo primarystore = (PrimaryDataStoreInfo) dataStore;
List<HostVO> hostsToConnect = resourceMgr.getEligibleUpAndEnabledHostsInClusterForStorageConnection(primarystore);
logger.debug(String.format("Attaching the pool to each of the hosts %s in the cluster: %s", hostsToConnect, primarystore.getClusterId()));
List<HostVO> poolHosts = new ArrayList<>();
for (HostVO host : allHosts) {
for (HostVO host : hostsToConnect) {
try {
createStoragePool(host, primaryDataStoreInfo);
@ -249,10 +244,11 @@ public class LinstorPrimaryDataStoreLifeCycleImpl extends BasePrimaryDataStoreLi
throw new CloudRuntimeException(hypervisorType + " is not a supported hypervisor type.");
}
List<HostVO> hosts = resourceMgr.listAllUpAndEnabledHostsInOneZoneByHypervisor(hypervisorType,
scope.getScopeId());
List<HostVO> hostsToConnect = resourceMgr.getEligibleUpAndEnabledHostsInZoneForStorageConnection(dataStore, scope.getScopeId(), hypervisorType);
for (HostVO host : hosts) {
logger.debug(String.format("In createPool. Attaching the pool to each of the hosts in %s.", hostsToConnect));
for (HostVO host : hostsToConnect) {
try {
_storageMgr.connectHostToSharedPool(host, dataStore.getId());
} catch (Exception e) {

View File

@ -24,6 +24,7 @@ import java.util.Map;
import javax.inject.Inject;
import com.cloud.storage.dao.StoragePoolAndAccessGroupMapDao;
import org.apache.cloudstack.engine.subsystem.api.storage.ClusterScope;
import org.apache.cloudstack.engine.subsystem.api.storage.DataStore;
import org.apache.cloudstack.engine.subsystem.api.storage.HostScope;
@ -58,6 +59,8 @@ public class NexentaPrimaryDataStoreLifeCycle
StorageManager _storageMgr;
@Inject
private StoragePoolAutomation storagePoolAutomation;
@Inject
private StoragePoolAndAccessGroupMapDao storagePoolAndAccessGroupMapDao;
@Override
public DataStore initialize(Map<String, Object> dsInfos) {
@ -130,16 +133,14 @@ public class NexentaPrimaryDataStoreLifeCycle
public boolean attachZone(DataStore dataStore, ZoneScope scope, Hypervisor.HypervisorType hypervisorType) {
dataStoreHelper.attachZone(dataStore);
List<HostVO> xenServerHosts = _resourceMgr.listAllUpAndEnabledHostsInOneZoneByHypervisor(Hypervisor.HypervisorType.XenServer, scope.getScopeId());
List<HostVO> vmWareServerHosts = _resourceMgr.listAllUpAndEnabledHostsInOneZoneByHypervisor(Hypervisor.HypervisorType.VMware, scope.getScopeId());
List<HostVO> kvmHosts = _resourceMgr.listAllUpAndEnabledHostsInOneZoneByHypervisor(Hypervisor.HypervisorType.KVM, scope.getScopeId());
List<HostVO> hosts = new ArrayList<HostVO>();
List<HostVO> hostsToConnect = new ArrayList<>();
Hypervisor.HypervisorType[] hypervisorTypes = {Hypervisor.HypervisorType.XenServer, Hypervisor.HypervisorType.VMware, Hypervisor.HypervisorType.KVM};
hosts.addAll(xenServerHosts);
hosts.addAll(vmWareServerHosts);
hosts.addAll(kvmHosts);
for (HostVO host : hosts) {
for (Hypervisor.HypervisorType type : hypervisorTypes) {
hostsToConnect.addAll(_resourceMgr.getEligibleUpAndEnabledHostsInZoneForStorageConnection(dataStore, scope.getScopeId(), type));
}
logger.debug(String.format("In createPool. Attaching the pool to each of the hosts in %s.", hostsToConnect));
for (HostVO host : hostsToConnect) {
try {
_storageMgr.connectHostToSharedPool(host, dataStore.getId());
} catch (Exception e) {

View File

@ -18,6 +18,39 @@
*/
package org.apache.cloudstack.storage.datastore.lifecycle;
import java.net.URI;
import java.net.URISyntaxException;
import java.net.URLDecoder;
import java.security.KeyManagementException;
import java.security.NoSuchAlgorithmException;
import java.util.List;
import java.util.Map;
import java.util.UUID;
import java.util.stream.Collectors;
import javax.inject.Inject;
import com.cloud.host.HostVO;
import com.cloud.storage.dao.StoragePoolAndAccessGroupMapDao;
import org.apache.cloudstack.api.ApiConstants;
import com.cloud.utils.StringUtils;
import org.apache.cloudstack.engine.subsystem.api.storage.ClusterScope;
import org.apache.cloudstack.engine.subsystem.api.storage.DataStore;
import org.apache.cloudstack.engine.subsystem.api.storage.HostScope;
import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreInfo;
import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreLifeCycle;
import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreParameters;
import org.apache.cloudstack.engine.subsystem.api.storage.ZoneScope;
import org.apache.cloudstack.storage.datastore.api.StoragePoolStatistics;
import org.apache.cloudstack.storage.datastore.client.ScaleIOGatewayClient;
import org.apache.cloudstack.storage.datastore.client.ScaleIOGatewayClientConnectionPool;
import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao;
import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailsDao;
import org.apache.cloudstack.storage.datastore.db.StoragePoolVO;
import org.apache.cloudstack.storage.datastore.util.ScaleIOUtil;
import org.apache.cloudstack.storage.volume.datastore.PrimaryDataStoreHelper;
import org.apache.commons.collections.CollectionUtils;
import com.cloud.agent.AgentManager;
import com.cloud.agent.api.StoragePoolInfo;
import com.cloud.capacity.CapacityManager;
@ -34,41 +67,15 @@ import com.cloud.storage.StoragePool;
import com.cloud.storage.StoragePoolAutomation;
import com.cloud.storage.dao.StoragePoolHostDao;
import com.cloud.template.TemplateManager;
import com.cloud.utils.StringUtils;
import com.cloud.utils.UriUtils;
import com.cloud.utils.component.ComponentContext;
import com.cloud.utils.crypt.DBEncryptionUtil;
import com.cloud.utils.exception.CloudRuntimeException;
import org.apache.cloudstack.engine.subsystem.api.storage.ClusterScope;
import org.apache.cloudstack.engine.subsystem.api.storage.DataStore;
import org.apache.cloudstack.engine.subsystem.api.storage.HostScope;
import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreInfo;
import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreLifeCycle;
import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreParameters;
import org.apache.cloudstack.engine.subsystem.api.storage.ZoneScope;
import org.apache.cloudstack.storage.datastore.api.StoragePoolStatistics;
import org.apache.cloudstack.storage.datastore.client.ScaleIOGatewayClient;
import org.apache.cloudstack.storage.datastore.client.ScaleIOGatewayClientConnectionPool;
import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao;
import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailVO;
import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailsDao;
import org.apache.cloudstack.storage.datastore.db.StoragePoolVO;
import org.apache.cloudstack.storage.datastore.manager.ScaleIOSDCManager;
import org.apache.cloudstack.storage.datastore.manager.ScaleIOSDCManagerImpl;
import org.apache.cloudstack.storage.datastore.util.ScaleIOUtil;
import org.apache.cloudstack.storage.volume.datastore.PrimaryDataStoreHelper;
import org.apache.commons.collections.CollectionUtils;
import javax.inject.Inject;
import java.net.URI;
import java.net.URISyntaxException;
import java.net.URLDecoder;
import java.security.KeyManagementException;
import java.security.NoSuchAlgorithmException;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.UUID;
public class ScaleIOPrimaryDataStoreLifeCycle extends BasePrimaryDataStoreLifeCycleImpl implements PrimaryDataStoreLifeCycle {
@Inject
@ -98,6 +105,8 @@ public class ScaleIOPrimaryDataStoreLifeCycle extends BasePrimaryDataStoreLifeCy
@Inject
private AgentManager agentMgr;
private ScaleIOSDCManager sdcManager;
@Inject
private StoragePoolAndAccessGroupMapDao storagePoolAndAccessGroupMapDao;
public ScaleIOPrimaryDataStoreLifeCycle() {
sdcManager = new ScaleIOSDCManagerImpl();
@ -141,6 +150,7 @@ public class ScaleIOPrimaryDataStoreLifeCycle extends BasePrimaryDataStoreLifeCy
Long capacityBytes = (Long)dsInfos.get("capacityBytes");
Long capacityIops = (Long)dsInfos.get("capacityIops");
String tags = (String)dsInfos.get("tags");
String storageAccessGroups = (String)dsInfos.get(ApiConstants.STORAGE_ACCESS_GROUPS);
Boolean isTagARule = (Boolean) dsInfos.get("isTagARule");
Map<String, String> details = (Map<String, String>) dsInfos.get("details");
@ -223,6 +233,7 @@ public class ScaleIOPrimaryDataStoreLifeCycle extends BasePrimaryDataStoreLifeCy
parameters.setHypervisorType(Hypervisor.HypervisorType.KVM);
parameters.setUuid(UUID.randomUUID().toString());
parameters.setTags(tags);
parameters.setStorageAccessGroups(storageAccessGroups);
parameters.setIsTagARule(isTagARule);
StoragePoolStatistics poolStatistics = scaleIOPool.getStatistics();
@ -260,14 +271,10 @@ public class ScaleIOPrimaryDataStoreLifeCycle extends BasePrimaryDataStoreLifeCy
}
PrimaryDataStoreInfo primaryDataStoreInfo = (PrimaryDataStoreInfo) dataStore;
List<Long> hostIds = hostDao.listIdsForUpRouting(primaryDataStoreInfo.getDataCenterId(),
primaryDataStoreInfo.getPodId(), primaryDataStoreInfo.getClusterId());
if (hostIds.isEmpty()) {
primaryDataStoreDao.expunge(primaryDataStoreInfo.getId());
throw new CloudRuntimeException("No hosts are Up to associate a storage pool with in cluster: " + cluster);
}
List<HostVO> hostsToConnect = resourceManager.getEligibleUpAndEnabledHostsInClusterForStorageConnection(primaryDataStoreInfo);
logger.debug(String.format("Attaching the pool to each of the hosts %s in the cluster: %s", hostsToConnect, cluster));
List<Long> hostIds = hostsToConnect.stream().map(HostVO::getId).collect(Collectors.toList());
logger.debug("Attaching the pool to each of the hosts in the {}", cluster);
storageMgr.connectHostsToPool(dataStore, hostIds, scope, false, false);
dataStoreHelper.attachCluster(dataStore);
@ -287,7 +294,10 @@ public class ScaleIOPrimaryDataStoreLifeCycle extends BasePrimaryDataStoreLifeCy
logger.debug("Attaching the pool to each of the hosts in the {}",
dataCenterDao.findById(scope.getScopeId()));
List<Long> hostIds = hostDao.listIdsForUpEnabledByZoneAndHypervisor(scope.getScopeId(), hypervisorType);
List<HostVO> hostsToConnect = resourceManager.getEligibleUpAndEnabledHostsInZoneForStorageConnection(dataStore, scope.getScopeId(), hypervisorType);
logger.debug(String.format("Attaching the pool to each of the hosts %s in the zone: %s", hostsToConnect, scope.getScopeId()));
List<Long> hostIds = hostsToConnect.stream().map(HostVO::getId).collect(Collectors.toList());
storageMgr.connectHostsToPool(dataStore, hostIds, scope, false, false);
dataStoreHelper.attachZone(dataStore);

View File

@ -30,8 +30,11 @@ import static org.mockito.Mockito.mockStatic;
import static org.mockito.Mockito.when;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import com.cloud.host.HostVO;
import com.cloud.resource.ResourceManager;
import org.apache.cloudstack.engine.subsystem.api.storage.DataStore;
import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager;
import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreProvider;
@ -106,6 +109,9 @@ public class ScaleIOPrimaryDataStoreLifeCycleTest {
@Mock
private HypervisorHostListener hostListener;
@Mock
private ResourceManager resourceManager;
@InjectMocks
private ScaleIOPrimaryDataStoreLifeCycle scaleIOPrimaryDataStoreLifeCycleTest;
private AutoCloseable closeable;
@ -137,8 +143,14 @@ public class ScaleIOPrimaryDataStoreLifeCycleTest {
final ZoneScope scope = new ZoneScope(1L);
when(hostDao.listIdsForUpEnabledByZoneAndHypervisor(scope.getScopeId(), Hypervisor.HypervisorType.KVM))
.thenReturn(List.of(1L, 2L));
HostVO host1 = Mockito.mock(HostVO.class);
HostVO host2 = Mockito.mock(HostVO.class);
Mockito.when(host1.getId()).thenReturn(1L);
Mockito.when(host2.getId()).thenReturn(2L);
when(resourceManager.getEligibleUpAndEnabledHostsInZoneForStorageConnection(dataStore, scope.getScopeId(), Hypervisor.HypervisorType.KVM))
.thenReturn(Arrays.asList(host1, host2));
when(dataStoreMgr.getDataStore(anyLong(), eq(DataStoreRole.Primary))).thenReturn(store);
when(store.isShared()).thenReturn(true);

View File

@ -25,6 +25,7 @@ import java.util.UUID;
import javax.inject.Inject;
import com.cloud.storage.dao.StoragePoolAndAccessGroupMapDao;
import org.apache.cloudstack.engine.subsystem.api.storage.ClusterScope;
import org.apache.cloudstack.engine.subsystem.api.storage.DataStore;
import org.apache.cloudstack.engine.subsystem.api.storage.HostScope;
@ -43,7 +44,6 @@ import com.cloud.capacity.CapacityManager;
import com.cloud.dc.ClusterVO;
import com.cloud.dc.dao.ClusterDao;
import com.cloud.dc.dao.DataCenterDao;
import com.cloud.host.Host;
import com.cloud.host.HostVO;
import com.cloud.hypervisor.Hypervisor.HypervisorType;
import com.cloud.resource.ResourceManager;
@ -74,6 +74,8 @@ public class SolidFirePrimaryDataStoreLifeCycle extends BasePrimaryDataStoreLife
@Inject private StoragePoolAutomation _storagePoolAutomation;
@Inject private StoragePoolDetailsDao _storagePoolDetailsDao;
@Inject private VMTemplatePoolDao _tmpltPoolDao;
@Inject
private StoragePoolAndAccessGroupMapDao storagePoolAndAccessGroupMapDao;
// invoked to add primary storage that is based on the SolidFire plug-in
@Override
@ -235,11 +237,10 @@ public class SolidFirePrimaryDataStoreLifeCycle extends BasePrimaryDataStoreLife
@Override
public boolean attachCluster(DataStore dataStore, ClusterScope scope) {
PrimaryDataStoreInfo primarystore = (PrimaryDataStoreInfo)dataStore;
List<HostVO> hostsToConnect = _resourceMgr.getEligibleUpAndEnabledHostsInClusterForStorageConnection(primarystore);
List<HostVO> hosts =
_resourceMgr.listAllUpAndEnabledHosts(Host.Type.Routing, primarystore.getClusterId(), primarystore.getPodId(), primarystore.getDataCenterId());
for (HostVO host : hosts) {
logger.debug(String.format("Attaching the pool to each of the hosts %s in the cluster: %s", hostsToConnect, primarystore.getClusterId()));
for (HostVO host : hostsToConnect) {
try {
_storageMgr.connectHostToSharedPool(host, dataStore.getId());
} catch (Exception e) {
@ -254,16 +255,15 @@ public class SolidFirePrimaryDataStoreLifeCycle extends BasePrimaryDataStoreLife
@Override
public boolean attachZone(DataStore dataStore, ZoneScope scope, HypervisorType hypervisorType) {
List<HostVO> xenServerHosts = _resourceMgr.listAllUpAndEnabledHostsInOneZoneByHypervisor(HypervisorType.XenServer, scope.getScopeId());
List<HostVO> vmWareServerHosts = _resourceMgr.listAllUpAndEnabledHostsInOneZoneByHypervisor(HypervisorType.VMware, scope.getScopeId());
List<HostVO> kvmHosts = _resourceMgr.listAllUpAndEnabledHostsInOneZoneByHypervisor(HypervisorType.KVM, scope.getScopeId());
List<HostVO> hosts = new ArrayList<>();
List<HostVO> hostsToConnect = new ArrayList<>();
HypervisorType[] hypervisorTypes = {HypervisorType.XenServer, HypervisorType.VMware, HypervisorType.KVM};
hosts.addAll(xenServerHosts);
hosts.addAll(vmWareServerHosts);
hosts.addAll(kvmHosts);
for (HypervisorType type : hypervisorTypes) {
hostsToConnect.addAll(_resourceMgr.getEligibleUpAndEnabledHostsInZoneForStorageConnection(dataStore, scope.getScopeId(), type));
}
for (HostVO host : hosts) {
logger.debug(String.format("In createPool. Attaching the pool to each of the hosts in %s.", hostsToConnect));
for (HostVO host : hostsToConnect) {
try {
_storageMgr.connectHostToSharedPool(host, dataStore.getId());
} catch (Exception e) {

View File

@ -26,6 +26,8 @@ import java.util.Map;
import javax.inject.Inject;
import com.cloud.host.Host;
import com.cloud.storage.dao.StoragePoolAndAccessGroupMapDao;
import org.apache.cloudstack.context.CallContext;
import org.apache.cloudstack.engine.subsystem.api.storage.ClusterScope;
import org.apache.cloudstack.engine.subsystem.api.storage.DataStore;
@ -50,7 +52,6 @@ import com.cloud.agent.api.StoragePoolInfo;
import com.cloud.dc.ClusterVO;
import com.cloud.dc.dao.ClusterDao;
import com.cloud.dc.dao.DataCenterDao;
import com.cloud.host.Host;
import com.cloud.host.HostVO;
import com.cloud.host.dao.HostDao;
import com.cloud.hypervisor.Hypervisor.HypervisorType;
@ -85,6 +86,8 @@ public class SolidFireSharedPrimaryDataStoreLifeCycle extends BasePrimaryDataSto
@Inject private StoragePoolDetailsDao storagePoolDetailsDao;
@Inject private StoragePoolHostDao storagePoolHostDao;
@Inject private TemplateManager tmpltMgr;
@Inject
private StoragePoolAndAccessGroupMapDao storagePoolAndAccessGroupMapDao;
// invoked to add primary storage that is based on the SolidFire plug-in
@Override
@ -382,19 +385,12 @@ public class SolidFireSharedPrimaryDataStoreLifeCycle extends BasePrimaryDataSto
public boolean attachCluster(DataStore store, ClusterScope scope) {
PrimaryDataStoreInfo primaryDataStoreInfo = (PrimaryDataStoreInfo)store;
// check if there is at least one host up in this cluster
List<HostVO> allHosts = resourceMgr.listAllUpHosts(Host.Type.Routing, primaryDataStoreInfo.getClusterId(),
primaryDataStoreInfo.getPodId(), primaryDataStoreInfo.getDataCenterId());
if (allHosts.isEmpty()) {
primaryDataStoreDao.expunge(primaryDataStoreInfo.getId());
throw new CloudRuntimeException(String.format("No host up to associate a storage pool with in cluster %s", clusterDao.findById(primaryDataStoreInfo.getClusterId())));
}
List<HostVO> hostsToConnect = resourceMgr.getEligibleUpHostsInClusterForStorageConnection(primaryDataStoreInfo);
boolean success = false;
logger.debug(String.format("Attaching the pool to each of the hosts %s in the cluster: %s", hostsToConnect, clusterDao.findById(primaryDataStoreInfo.getClusterId())));
for (HostVO host : allHosts) {
for (HostVO host : hostsToConnect) {
success = createStoragePool(host, primaryDataStoreInfo);
if (success) {
@ -408,7 +404,7 @@ public class SolidFireSharedPrimaryDataStoreLifeCycle extends BasePrimaryDataSto
List<HostVO> poolHosts = new ArrayList<>();
for (HostVO host : allHosts) {
for (HostVO host : hostsToConnect) {
try {
storageMgr.connectHostToSharedPool(host, primaryDataStoreInfo.getId());

View File

@ -24,6 +24,7 @@ import java.util.UUID;
import javax.inject.Inject;
import com.cloud.storage.dao.StoragePoolAndAccessGroupMapDao;
import org.apache.cloudstack.engine.subsystem.api.storage.ClusterScope;
import org.apache.cloudstack.engine.subsystem.api.storage.DataStore;
import org.apache.cloudstack.engine.subsystem.api.storage.HostScope;
@ -80,6 +81,8 @@ public class StorPoolPrimaryDataStoreLifeCycle extends BasePrimaryDataStoreLifeC
private VMTemplateDetailsDao vmTemplateDetailsDao;
@Inject
private StoragePoolDetailsDao storagePoolDetailsDao;
@Inject
private StoragePoolAndAccessGroupMapDao storagePoolAndAccessGroupMapDao;
@Override
public DataStore initialize(Map<String, Object> dsInfos) {
@ -208,8 +211,11 @@ public class StorPoolPrimaryDataStoreLifeCycle extends BasePrimaryDataStoreLifeC
if (hypervisorType != HypervisorType.KVM) {
throw new UnsupportedOperationException("Only KVM hypervisors supported!");
}
List<HostVO> kvmHosts = resourceMgr.listAllUpAndEnabledHostsInOneZoneByHypervisor(HypervisorType.KVM, scope.getScopeId());
for (HostVO host : kvmHosts) {
List<HostVO> kvmHostsToConnect = resourceMgr.getEligibleUpAndEnabledHostsInZoneForStorageConnection(dataStore, scope.getScopeId(), HypervisorType.KVM);
logger.debug(String.format("In createPool. Attaching the pool to each of the hosts in %s.", kvmHostsToConnect));
for (HostVO host : kvmHostsToConnect) {
try {
storageMgr.connectHostToSharedPool(host, dataStore.getId());
} catch (Exception e) {

View File

@ -2007,6 +2007,10 @@ public class ApiDBUtils {
return s_projectInvitationJoinDao.newProjectInvitationView(proj);
}
public static HostResponse newMinimalHostResponse(HostJoinVO vr) {
return s_hostJoinDao.newMinimalHostResponse(vr);
}
public static HostResponse newHostResponse(HostJoinVO vr, EnumSet<HostDetails> details) {
return s_hostJoinDao.newHostResponse(vr, details);
}
@ -2035,6 +2039,10 @@ public class ApiDBUtils {
return s_poolJoinDao.newStoragePoolResponse(vr, customStats);
}
public static StoragePoolResponse newMinimalStoragePoolResponse(StoragePoolJoinVO vr) {
return s_poolJoinDao.newMinimalStoragePoolResponse(vr);
}
public static StorageTagResponse newStorageTagResponse(StoragePoolTagVO vr) {
return s_tagDao.newStorageTagResponse(vr);
}
@ -2164,6 +2172,10 @@ public class ApiDBUtils {
return s_dcJoinDao.newDataCenterResponse(view, dc, showCapacities, showResourceImage);
}
public static ZoneResponse newMinimalDataCenterResponse(ResponseView view, DataCenterJoinVO dc) {
return s_dcJoinDao.newMinimalDataCenterResponse(view, dc);
}
public static DataCenterJoinVO newDataCenterView(DataCenter dc) {
return s_dcJoinDao.newDataCenterView(dc);
}

View File

@ -1301,6 +1301,15 @@ public class ApiResponseHelper implements ResponseGenerator {
return response;
}
@Override
public PodResponse createMinimalPodResponse(Pod pod) {
PodResponse podResponse = new PodResponse();
podResponse.setId(pod.getUuid());
podResponse.setName(pod.getName());
podResponse.setObjectName("pod");
return podResponse;
}
@Override
public PodResponse createPodResponse(Pod pod, Boolean showCapacities) {
String[] ipRange = new String[2];
@ -1344,7 +1353,7 @@ public class ApiResponseHelper implements ResponseGenerator {
PodResponse podResponse = new PodResponse();
podResponse.setId(pod.getUuid());
podResponse.setName(pod.getName());
DataCenter zone = ApiDBUtils.findZoneById(pod.getDataCenterId());
DataCenterVO zone = ApiDBUtils.findZoneById(pod.getDataCenterId());
if (zone != null) {
podResponse.setZoneId(zone.getUuid());
podResponse.setZoneName(zone.getName());
@ -1357,6 +1366,8 @@ public class ApiResponseHelper implements ResponseGenerator {
podResponse.setVlanId(vlanIds);
podResponse.setGateway(pod.getGateway());
podResponse.setAllocationState(pod.getAllocationState().toString());
podResponse.setStorageAccessGroups(pod.getStorageAccessGroups());
podResponse.setZoneStorageAccessGroups(zone.getStorageAccessGroups());
if (showCapacities != null && showCapacities) {
List<SummedCapacity> capacities = ApiDBUtils.getCapacityByClusterPodZone(null, pod.getId(), null);
Set<CapacityResponse> capacityResponses = new HashSet<CapacityResponse>();
@ -1506,6 +1517,15 @@ public class ApiResponseHelper implements ResponseGenerator {
return listPools.get(0);
}
@Override
public ClusterResponse createMinimalClusterResponse(Cluster cluster) {
ClusterResponse clusterResponse = new ClusterResponse();
clusterResponse.setId(cluster.getUuid());
clusterResponse.setName(cluster.getName());
clusterResponse.setObjectName("cluster");
return clusterResponse;
}
@Override
public ClusterResponse createClusterResponse(Cluster cluster, Boolean showCapacities) {
ClusterResponse clusterResponse = new ClusterResponse();
@ -1516,7 +1536,7 @@ public class ApiResponseHelper implements ResponseGenerator {
clusterResponse.setPodId(pod.getUuid());
clusterResponse.setPodName(pod.getName());
}
DataCenter dc = ApiDBUtils.findZoneById(cluster.getDataCenterId());
DataCenterVO dc = ApiDBUtils.findZoneById(cluster.getDataCenterId());
if (dc != null) {
clusterResponse.setZoneId(dc.getUuid());
clusterResponse.setZoneName(dc.getName());
@ -1534,6 +1554,10 @@ public class ApiResponseHelper implements ResponseGenerator {
clusterResponse.setArch(cluster.getArch().getType());
}
clusterResponse.setStorageAccessGroups(cluster.getStorageAccessGroups());
clusterResponse.setPodStorageAccessGroups(pod.getStorageAccessGroups());
clusterResponse.setZoneStorageAccessGroups(dc.getStorageAccessGroups());
if (showCapacities != null && showCapacities) {
List<SummedCapacity> capacities = ApiDBUtils.getCapacityByClusterPodZone(null, null, cluster.getId());
Set<CapacityResponse> capacityResponses = new HashSet<CapacityResponse>();

View File

@ -36,6 +36,12 @@ import java.util.stream.Stream;
import javax.inject.Inject;
import com.cloud.dc.Pod;
import com.cloud.dc.dao.DataCenterDao;
import com.cloud.dc.dao.HostPodDao;
import com.cloud.org.Cluster;
import com.cloud.server.ManagementService;
import com.cloud.storage.dao.StoragePoolAndAccessGroupMapDao;
import org.apache.cloudstack.acl.ControlledEntity;
import org.apache.cloudstack.acl.ControlledEntity.ACLType;
import org.apache.cloudstack.acl.SecurityChecker;
@ -52,6 +58,7 @@ import org.apache.cloudstack.api.ResourceDetail;
import org.apache.cloudstack.api.ResponseGenerator;
import org.apache.cloudstack.api.ResponseObject.ResponseView;
import org.apache.cloudstack.api.command.admin.account.ListAccountsCmdByAdmin;
import org.apache.cloudstack.api.command.admin.cluster.ListClustersCmd;
import org.apache.cloudstack.api.command.admin.domain.ListDomainsCmd;
import org.apache.cloudstack.api.command.admin.domain.ListDomainsCmdByAdmin;
import org.apache.cloudstack.api.command.admin.host.ListHostTagsCmd;
@ -59,6 +66,7 @@ import org.apache.cloudstack.api.command.admin.host.ListHostsCmd;
import org.apache.cloudstack.api.command.admin.internallb.ListInternalLBVMsCmd;
import org.apache.cloudstack.api.command.admin.iso.ListIsosCmdByAdmin;
import org.apache.cloudstack.api.command.admin.management.ListMgmtsCmd;
import org.apache.cloudstack.api.command.admin.pod.ListPodsByCmd;
import org.apache.cloudstack.api.command.admin.resource.icon.ListResourceIconCmd;
import org.apache.cloudstack.api.command.admin.router.GetRouterHealthCheckResultsCmd;
import org.apache.cloudstack.api.command.admin.router.ListRoutersCmd;
@ -66,6 +74,7 @@ import org.apache.cloudstack.api.command.admin.snapshot.ListSnapshotsCmdByAdmin;
import org.apache.cloudstack.api.command.admin.storage.ListImageStoresCmd;
import org.apache.cloudstack.api.command.admin.storage.ListObjectStoragePoolsCmd;
import org.apache.cloudstack.api.command.admin.storage.ListSecondaryStagingStoresCmd;
import org.apache.cloudstack.api.command.admin.storage.ListStorageAccessGroupsCmd;
import org.apache.cloudstack.api.command.admin.storage.ListStoragePoolsCmd;
import org.apache.cloudstack.api.command.admin.storage.ListStorageTagsCmd;
import org.apache.cloudstack.api.command.admin.storage.heuristics.ListSecondaryStorageSelectorsCmd;
@ -100,6 +109,7 @@ import org.apache.cloudstack.api.command.user.zone.ListZonesCmd;
import org.apache.cloudstack.api.response.AccountResponse;
import org.apache.cloudstack.api.response.AsyncJobResponse;
import org.apache.cloudstack.api.response.BucketResponse;
import org.apache.cloudstack.api.response.ClusterResponse;
import org.apache.cloudstack.api.response.DetailOptionsResponse;
import org.apache.cloudstack.api.response.DiskOfferingResponse;
import org.apache.cloudstack.api.response.DomainResponse;
@ -114,6 +124,7 @@ import org.apache.cloudstack.api.response.ListResponse;
import org.apache.cloudstack.api.response.ManagementServerResponse;
import org.apache.cloudstack.api.response.ObjectStoreResponse;
import org.apache.cloudstack.api.response.PeerManagementServerNodeResponse;
import org.apache.cloudstack.api.response.PodResponse;
import org.apache.cloudstack.api.response.ProjectAccountResponse;
import org.apache.cloudstack.api.response.ProjectInvitationResponse;
import org.apache.cloudstack.api.response.ProjectResponse;
@ -125,6 +136,7 @@ import org.apache.cloudstack.api.response.SecondaryStorageHeuristicsResponse;
import org.apache.cloudstack.api.response.SecurityGroupResponse;
import org.apache.cloudstack.api.response.ServiceOfferingResponse;
import org.apache.cloudstack.api.response.SnapshotResponse;
import org.apache.cloudstack.api.response.StorageAccessGroupResponse;
import org.apache.cloudstack.api.response.StoragePoolResponse;
import org.apache.cloudstack.api.response.StorageTagResponse;
import org.apache.cloudstack.api.response.TemplateResponse;
@ -618,6 +630,18 @@ public class QueryManagerImpl extends MutualExclusiveIdsManagerBase implements Q
@Inject
private AsyncJobManager jobManager;
@Inject
private StoragePoolAndAccessGroupMapDao storagePoolAndAccessGroupMapDao;
@Inject
public ManagementService managementService;
@Inject
DataCenterDao dataCenterDao;
@Inject
HostPodDao podDao;
private SearchCriteria<ServiceOfferingJoinVO> getMinimumCpuServiceOfferingJoinSearchCriteria(int cpu) {
SearchCriteria<ServiceOfferingJoinVO> sc = _srvOfferingJoinDao.createSearchCriteria();
SearchCriteria<ServiceOfferingJoinVO> sc1 = _srvOfferingJoinDao.createSearchCriteria();
@ -2342,6 +2366,16 @@ public class QueryManagerImpl extends MutualExclusiveIdsManagerBase implements Q
return response;
}
private ListResponse<HostResponse> searchForServersWithMinimalResponse(ListHostsCmd cmd) {
logger.debug(">>>Searching for hosts>>>");
Pair<List<HostJoinVO>, Integer> hosts = searchForServersInternal(cmd);
ListResponse<HostResponse> response = new ListResponse<HostResponse>();
logger.debug(">>>Generating Response>>>");
List<HostResponse> hostResponses = ViewResponseHelper.createMinimalHostResponse(hosts.first().toArray(new HostJoinVO[hosts.first().size()]));
response.setResponses(hostResponses, hosts.second());
return response;
}
public Pair<List<HostJoinVO>, Integer> searchForServersInternal(ListHostsCmd cmd) {
Pair<List<Long>, Integer> serverIdPage = searchForServerIdsAndCount(cmd);
@ -2373,6 +2407,7 @@ public class QueryManagerImpl extends MutualExclusiveIdsManagerBase implements Q
Hypervisor.HypervisorType hypervisorType = cmd.getHypervisor();
Long msId = cmd.getManagementServerId();
final CPU.CPUArch arch = cmd.getArch();
String storageAccessGroup = cmd.getStorageAccessGroup();
Filter searchFilter = new Filter(HostVO.class, "id", Boolean.TRUE, startIndex, pageSize);
@ -2390,6 +2425,13 @@ public class QueryManagerImpl extends MutualExclusiveIdsManagerBase implements Q
hostSearchBuilder.and("hypervisor_type", hostSearchBuilder.entity().getHypervisorType(), SearchCriteria.Op.EQ);
hostSearchBuilder.and("mgmt_server_id", hostSearchBuilder.entity().getManagementServerId(), SearchCriteria.Op.EQ);
hostSearchBuilder.and("arch", hostSearchBuilder.entity().getArch(), SearchCriteria.Op.EQ);
if (storageAccessGroup != null) {
hostSearchBuilder.and().op("storageAccessGroupExact", hostSearchBuilder.entity().getStorageAccessGroups(), Op.EQ);
hostSearchBuilder.or("storageAccessGroupPrefix", hostSearchBuilder.entity().getStorageAccessGroups(), Op.LIKE);
hostSearchBuilder.or("storageAccessGroupSuffix", hostSearchBuilder.entity().getStorageAccessGroups(), Op.LIKE);
hostSearchBuilder.or("storageAccessGroupMiddle", hostSearchBuilder.entity().getStorageAccessGroups(), Op.LIKE);
hostSearchBuilder.cp();
}
if (keyword != null) {
hostSearchBuilder.and().op("keywordName", hostSearchBuilder.entity().getName(), SearchCriteria.Op.LIKE);
@ -2481,6 +2523,13 @@ public class QueryManagerImpl extends MutualExclusiveIdsManagerBase implements Q
sc.setParameters("arch", arch);
}
if (storageAccessGroup != null) {
sc.setParameters("storageAccessGroupExact", storageAccessGroup);
sc.setParameters("storageAccessGroupPrefix", storageAccessGroup + ",%");
sc.setParameters("storageAccessGroupSuffix", "%," + storageAccessGroup);
sc.setParameters("storageAccessGroupMiddle", "%," + storageAccessGroup + ",%");
}
Pair<List<HostVO>, Integer> uniqueHostPair = hostDao.searchAndCount(sc, searchFilter);
Integer count = uniqueHostPair.second();
List<Long> hostIds = uniqueHostPair.first().stream().map(HostVO::getId).collect(Collectors.toList());
@ -3204,7 +3253,14 @@ public class QueryManagerImpl extends MutualExclusiveIdsManagerBase implements Q
poolResponse.setCaps(caps);
}
private ListResponse<StoragePoolResponse> searchForStoragePoolsWithMinimalResponse(ListStoragePoolsCmd cmd) {
Pair<List<StoragePoolJoinVO>, Integer> result = searchForStoragePoolsInternal(cmd);
ListResponse<StoragePoolResponse> response = new ListResponse<>();
List<StoragePoolResponse> poolResponses = ViewResponseHelper.createMinimalStoragePoolResponse(result.first().toArray(new StoragePoolJoinVO[result.first().size()]));
response.setResponses(poolResponses, result.second());
return response;
}
private Pair<List<StoragePoolJoinVO>, Integer> searchForStoragePoolsInternal(ListStoragePoolsCmd cmd) {
ScopeType scopeType = ScopeType.validateAndGetScopeType(cmd.getScope());
@ -3216,16 +3272,18 @@ public class QueryManagerImpl extends MutualExclusiveIdsManagerBase implements Q
String path = cmd.getPath();
Long pod = cmd.getPodId();
Long cluster = cmd.getClusterId();
Long host = cmd.getHostId();
String address = cmd.getIpAddress();
String keyword = cmd.getKeyword();
Long startIndex = cmd.getStartIndex();
Long pageSize = cmd.getPageSizeVal();
String storageAccessGroup = cmd.getStorageAccessGroup();
Filter searchFilter = new Filter(StoragePoolVO.class, "id", Boolean.TRUE, startIndex, pageSize);
Pair<List<Long>, Integer> uniquePoolPair = storagePoolDao.searchForIdsAndCount(id, name, zoneId, path, pod,
cluster, address, scopeType, status, keyword, searchFilter);
cluster, host, address, scopeType, status, keyword, storageAccessGroup, searchFilter);
List<StoragePoolJoinVO> storagePools = _poolJoinDao.searchByIds(uniquePoolPair.first().toArray(new Long[0]));
@ -3243,6 +3301,99 @@ public class QueryManagerImpl extends MutualExclusiveIdsManagerBase implements Q
return response;
}
@Override
public ListResponse<StorageAccessGroupResponse> searchForStorageAccessGroups(ListStorageAccessGroupsCmd cmd) {
String name = cmd.getName();
String keyword = cmd.getKeyword();
Set<String> storageAccessGroups = new HashSet<>();
addStorageAccessGroups(storageAccessGroups, storagePoolAndAccessGroupMapDao.listDistinctStorageAccessGroups(name, keyword));
addStorageAccessGroups(storageAccessGroups, hostDao.listDistinctStorageAccessGroups(name, keyword));
addStorageAccessGroups(storageAccessGroups, clusterDao.listDistinctStorageAccessGroups(name, keyword));
addStorageAccessGroups(storageAccessGroups, podDao.listDistinctStorageAccessGroups(name, keyword));
addStorageAccessGroups(storageAccessGroups, dataCenterDao.listDistinctStorageAccessGroups(name, keyword));
if (StringUtils.isNotEmpty(name) && storageAccessGroups.contains(name)) {
storageAccessGroups = Collections.singleton(name);
}
if (StringUtils.isNotEmpty(keyword)) {
storageAccessGroups = storageAccessGroups.stream()
.filter(group -> group.contains(keyword))
.collect(Collectors.toSet());
}
List<StorageAccessGroupResponse> responseList = buildStorageAccessGroupResponses(storageAccessGroups, name);
ListResponse<StorageAccessGroupResponse> response = new ListResponse<>();
response.setResponses(responseList, storageAccessGroups.size());
return response;
}
private void addStorageAccessGroups(Set<String> storageAccessGroups, List<String> groups) {
for (String group : groups) {
if (group != null && !group.isEmpty()) {
storageAccessGroups.addAll(Arrays.asList(group.split(",")));
}
}
}
private List<StorageAccessGroupResponse> buildStorageAccessGroupResponses(
Set<String> storageAccessGroups, String name) {
List<StorageAccessGroupResponse> responseList = new ArrayList<>();
for (String sag : storageAccessGroups) {
StorageAccessGroupResponse sagResponse = new StorageAccessGroupResponse();
sagResponse.setName(sag);
sagResponse.setObjectName(ApiConstants.STORAGE_ACCESS_GROUP);
if (StringUtils.isNotBlank(name)) {
fetchStorageAccessGroupResponse(sagResponse, name);
}
responseList.add(sagResponse);
}
return responseList;
}
private void fetchStorageAccessGroupResponse(StorageAccessGroupResponse sagResponse, String name) {
sagResponse.setHostResponseList(searchForServersWithMinimalResponse(new ListHostsCmd(name)));
sagResponse.setZoneResponseList(listDataCentersWithMinimalResponse(new ListZonesCmd(name)));
sagResponse.setPodResponseList(fetchPodsByStorageAccessGroup(name));
sagResponse.setClusterResponseList(fetchClustersByStorageAccessGroup(name));
sagResponse.setStoragePoolResponseList(searchForStoragePoolsWithMinimalResponse(new ListStoragePoolsCmd(name)));
}
private ListResponse<PodResponse> fetchPodsByStorageAccessGroup(String name) {
ListPodsByCmd listPodsByCmd = new ListPodsByCmd(name);
Pair<List<? extends Pod>, Integer> podResponsePair = managementService.searchForPods(listPodsByCmd);
List<PodResponse> podResponses = podResponsePair.first().stream()
.map(pod -> {
PodResponse podResponse = responseGenerator.createMinimalPodResponse(pod);
podResponse.setObjectName("pod");
return podResponse;
}).collect(Collectors.toList());
ListResponse<PodResponse> podResponse = new ListResponse<>();
podResponse.setResponses(podResponses, podResponsePair.second());
return podResponse;
}
private ListResponse<ClusterResponse> fetchClustersByStorageAccessGroup(String name) {
ListClustersCmd listClustersCmd = new ListClustersCmd(name);
Pair<List<? extends Cluster>, Integer> clusterResponsePair = managementService.searchForClusters(listClustersCmd);
List<ClusterResponse> clusterResponses = clusterResponsePair.first().stream()
.map(cluster -> {
ClusterResponse clusterResponse = responseGenerator.createMinimalClusterResponse(cluster);
clusterResponse.setObjectName("cluster");
return clusterResponse;
}).collect(Collectors.toList());
ListResponse<ClusterResponse> clusterResponse = new ListResponse<>();
clusterResponse.setResponses(clusterResponses, clusterResponsePair.second());
return clusterResponse;
}
private Pair<List<StoragePoolTagVO>, Integer> searchForStorageTagsInternal(ListStorageTagsCmd cmd) {
Filter searchFilter = new Filter(StoragePoolTagVO.class, "id", Boolean.TRUE, null, null);
@ -4309,6 +4460,20 @@ public class QueryManagerImpl extends MutualExclusiveIdsManagerBase implements Q
return response;
}
private ListResponse<ZoneResponse> listDataCentersWithMinimalResponse(ListZonesCmd cmd) {
Pair<List<DataCenterJoinVO>, Integer> result = listDataCentersInternal(cmd);
ListResponse<ZoneResponse> response = new ListResponse<ZoneResponse>();
ResponseView respView = ResponseView.Restricted;
if (cmd instanceof ListZonesCmdByAdmin || CallContext.current().getCallingAccount().getType() == Account.Type.ADMIN) {
respView = ResponseView.Full;
}
List<ZoneResponse> dcResponses = ViewResponseHelper.createMinimalDataCenterResponse(respView, result.first().toArray(new DataCenterJoinVO[result.first().size()]));
response.setResponses(dcResponses, result.second());
return response;
}
private Pair<List<DataCenterJoinVO>, Integer> listDataCentersInternal(ListZonesCmd cmd) {
Account account = CallContext.current().getCallingAccount();
Long domainId = cmd.getDomainId();
@ -4318,6 +4483,7 @@ public class QueryManagerImpl extends MutualExclusiveIdsManagerBase implements Q
String name = cmd.getName();
String networkType = cmd.getNetworkType();
Map<String, String> resourceTags = cmd.getTags();
String storageAccessGroup = cmd.getStorageAccessGroup();
SearchBuilder<DataCenterJoinVO> sb = _dcJoinDao.createSearchBuilder();
if (resourceTags != null && !resourceTags.isEmpty()) {
@ -4331,6 +4497,13 @@ public class QueryManagerImpl extends MutualExclusiveIdsManagerBase implements Q
sb.groupBy(sb.entity().getId());
sb.join("tagSearch", tagSearch, sb.entity().getId(), tagSearch.entity().getResourceId(), JoinBuilder.JoinType.INNER);
}
if (storageAccessGroup != null) {
sb.and().op("storageAccessGroupExact", sb.entity().getStorageAccessGroups(), Op.EQ);
sb.or("storageAccessGroupPrefix", sb.entity().getStorageAccessGroups(), Op.LIKE);
sb.or("storageAccessGroupSuffix", sb.entity().getStorageAccessGroups(), Op.LIKE);
sb.or("storageAccessGroupMiddle", sb.entity().getStorageAccessGroups(), Op.LIKE);
sb.cp();
}
Filter searchFilter = new Filter(DataCenterJoinVO.class, "sortKey", SortKeyAscending.value(), cmd.getStartIndex(), cmd.getPageSizeVal());
searchFilter.addOrderBy(DataCenterJoinVO.class, "id", true);
@ -4492,6 +4665,13 @@ public class QueryManagerImpl extends MutualExclusiveIdsManagerBase implements Q
}
}
if (storageAccessGroup != null) {
sc.setParameters("storageAccessGroupExact", storageAccessGroup);
sc.setParameters("storageAccessGroupPrefix", storageAccessGroup + ",%");
sc.setParameters("storageAccessGroupSuffix", "%," + storageAccessGroup);
sc.setParameters("storageAccessGroupMiddle", "%," + storageAccessGroup + ",%");
}
return _dcJoinDao.searchAndCount(sc, searchFilter);
}

View File

@ -262,6 +262,15 @@ public class ViewResponseHelper {
return new ArrayList<HostResponse>(vrDataList.values());
}
public static List<HostResponse> createMinimalHostResponse(HostJoinVO... hosts) {
LinkedHashMap<Long, HostResponse> vrDataList = new LinkedHashMap<>();
for (HostJoinVO vr : hosts) {
HostResponse vrData = ApiDBUtils.newMinimalHostResponse(vr);
vrDataList.put(vr.getId(), vrData);
}
return new ArrayList<HostResponse>(vrDataList.values());
}
public static List<HostForMigrationResponse> createHostForMigrationResponse(EnumSet<HostDetails> details, HostJoinVO... hosts) {
LinkedHashMap<Long, HostForMigrationResponse> vrDataList = new LinkedHashMap<>();
// Initialise the vrdatalist with the input data
@ -330,6 +339,18 @@ public class ViewResponseHelper {
return new ArrayList<StoragePoolResponse>(vrDataList.values());
}
public static List<StoragePoolResponse> createMinimalStoragePoolResponse(StoragePoolJoinVO... pools) {
LinkedHashMap<Long, StoragePoolResponse> vrDataList = new LinkedHashMap<>();
for (StoragePoolJoinVO vr : pools) {
StoragePoolResponse vrData = vrDataList.get(vr.getId());
if (vrData == null) {
vrData = ApiDBUtils.newMinimalStoragePoolResponse(vr);
}
vrDataList.put(vr.getId(), vrData);
}
return new ArrayList<StoragePoolResponse>(vrDataList.values());
}
public static List<StorageTagResponse> createStorageTagResponse(StoragePoolTagVO... storageTags) {
ArrayList<StorageTagResponse> list = new ArrayList<StorageTagResponse>();
@ -596,12 +617,20 @@ public class ViewResponseHelper {
public static List<ZoneResponse> createDataCenterResponse(ResponseView view, Boolean showCapacities, Boolean showResourceImage, DataCenterJoinVO... dcs) {
List<ZoneResponse> respList = new ArrayList<ZoneResponse>();
for (DataCenterJoinVO vt : dcs){
for (DataCenterJoinVO vt : dcs) {
respList.add(ApiDBUtils.newDataCenterResponse(view, vt, showCapacities, showResourceImage));
}
return respList;
}
public static List<ZoneResponse> createMinimalDataCenterResponse(ResponseView view, DataCenterJoinVO... dcs) {
List<ZoneResponse> respList = new ArrayList<ZoneResponse>();
for (DataCenterJoinVO vt : dcs) {
respList.add(ApiDBUtils.newMinimalDataCenterResponse(view, vt));
}
return respList;
}
public static List<TemplateResponse> createTemplateResponse(EnumSet<ApiConstants.DomainDetails> detailsView, ResponseView view, TemplateJoinVO... templates) {
LinkedHashMap<String, TemplateResponse> vrDataList = new LinkedHashMap<>();
for (TemplateJoinVO vr : templates) {

View File

@ -25,6 +25,8 @@ import com.cloud.utils.db.GenericDao;
public interface DataCenterJoinDao extends GenericDao<DataCenterJoinVO, Long> {
ZoneResponse newMinimalDataCenterResponse(ResponseView view, DataCenterJoinVO dataCenter);
ZoneResponse newDataCenterResponse(ResponseView view, DataCenterJoinVO dof, Boolean showCapacities, Boolean showResourceImage);
DataCenterJoinVO newDataCenterView(DataCenter dof);

View File

@ -74,6 +74,15 @@ public class DataCenterJoinDaoImpl extends GenericDaoBase<DataCenterJoinVO, Long
_count = "select count(distinct id) from data_center_view WHERE ";
}
@Override
public ZoneResponse newMinimalDataCenterResponse(ResponseView view, DataCenterJoinVO dataCenter) {
ZoneResponse zoneResponse = new ZoneResponse(null);
zoneResponse.setId(dataCenter.getUuid());
zoneResponse.setName(dataCenter.getName());
zoneResponse.setObjectName("zone");
return zoneResponse;
}
@Override
public ZoneResponse newDataCenterResponse(ResponseView view, DataCenterJoinVO dataCenter, Boolean showCapacities, Boolean showResourceImage) {
ZoneResponse zoneResponse = new ZoneResponse();
@ -82,6 +91,7 @@ public class DataCenterJoinDaoImpl extends GenericDaoBase<DataCenterJoinVO, Long
zoneResponse.setSecurityGroupsEnabled(ApiDBUtils.isSecurityGroupEnabledInZone(dataCenter.getId()));
zoneResponse.setLocalStorageEnabled(dataCenter.isLocalStorageEnabled());
zoneResponse.setType(ObjectUtils.defaultIfNull(dataCenter.getType(), DataCenter.Type.Core).toString());
zoneResponse.setStorageAccessGroups(dataCenter.getStorageAccessGroups());
if ((dataCenter.getDescription() != null) && !dataCenter.getDescription().equalsIgnoreCase("null")) {
zoneResponse.setDescription(dataCenter.getDescription());

View File

@ -29,6 +29,8 @@ import com.cloud.utils.db.GenericDao;
public interface HostJoinDao extends GenericDao<HostJoinVO, Long> {
HostResponse newMinimalHostResponse(HostJoinVO host);
HostResponse newHostResponse(HostJoinVO host, EnumSet<HostDetails> details);
HostForMigrationResponse newHostForMigrationResponse(HostJoinVO host, EnumSet<HostDetails> details);

View File

@ -221,6 +221,11 @@ public class HostJoinDaoImpl extends GenericDaoBase<HostJoinVO, Long> implements
hostResponse.setArch(host.getArch().getType());
}
hostResponse.setStorageAccessGroups(host.getStorageAccessGroups());
hostResponse.setClusterStorageAccessGroups(host.getClusterStorageAccessGroups());
hostResponse.setPodStorageAccessGroups(host.getPodStorageAccessGroups());
hostResponse.setZoneStorageAccessGroups(host.getZoneStorageAccessGroups());
float cpuWithOverprovisioning = host.getCpus() * host.getSpeed() * cpuOverprovisioningFactor;
hostResponse.setCpuAllocatedValue(cpu);
String cpuAllocated = calculateResourceAllocatedPercentage(cpu, cpuWithOverprovisioning);
@ -308,6 +313,16 @@ public class HostJoinDaoImpl extends GenericDaoBase<HostJoinVO, Long> implements
hostResponse.setObjectName("host");
}
@Override
public HostResponse newMinimalHostResponse(HostJoinVO host) {
HostResponse hostResponse = new HostResponse();
hostResponse.setId(host.getUuid());
hostResponse.setName(host.getName());
hostResponse.setObjectName("host");
return hostResponse;
}
@Override
public HostResponse newHostResponse(HostJoinVO host, EnumSet<HostDetails> details) {
HostResponse hostResponse = new HostResponse();

View File

@ -28,6 +28,8 @@ import org.apache.cloudstack.storage.datastore.db.StoragePoolVO;
public interface StoragePoolJoinDao extends GenericDao<StoragePoolJoinVO, Long> {
StoragePoolResponse newMinimalStoragePoolResponse(StoragePoolJoinVO pool);
StoragePoolResponse newStoragePoolResponse(StoragePoolJoinVO host, boolean customStats);
StoragePoolResponse setStoragePoolResponse(StoragePoolResponse response, StoragePoolJoinVO host);

View File

@ -17,6 +17,7 @@
package com.cloud.api.query.dao;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import javax.inject.Inject;
@ -102,6 +103,16 @@ public class StoragePoolJoinDaoImpl extends GenericDaoBase<StoragePoolJoinVO, Lo
_count = "select count(distinct id) from storage_pool_view WHERE ";
}
@Override
public StoragePoolResponse newMinimalStoragePoolResponse(StoragePoolJoinVO pool) {
StoragePool storagePool = storagePoolDao.findById(pool.getId());
StoragePoolResponse poolResponse = new StoragePoolResponse();
poolResponse.setId(pool.getUuid());
poolResponse.setName(pool.getName());
poolResponse.setObjectName("storagepool");
return poolResponse;
}
@Override
public StoragePoolResponse newStoragePoolResponse(StoragePoolJoinVO pool, boolean customStats) {
StoragePool storagePool = storagePoolDao.findById(pool.getId());
@ -165,6 +176,7 @@ public class StoragePoolJoinDaoImpl extends GenericDaoBase<StoragePoolJoinVO, Lo
poolResponse.setClusterName(pool.getClusterName());
poolResponse.setProvider(pool.getStorageProviderName());
poolResponse.setTags(pool.getTag());
poolResponse.setStorageAccessGroups(pool.getStorageAccessGroup());
poolResponse.setIsTagARule(pool.getIsTagARule());
poolResponse.setOverProvisionFactor(Double.toString(CapacityManager.StorageOverprovisioningFactor.valueIn(pool.getId())));
poolResponse.setManaged(storagePool.isManaged());
@ -185,12 +197,28 @@ public class StoragePoolJoinDaoImpl extends GenericDaoBase<StoragePoolJoinVO, Lo
public StoragePoolResponse setStoragePoolResponse(StoragePoolResponse response, StoragePoolJoinVO sp) {
String tag = sp.getTag();
if (tag != null) {
if (response.getTags() != null && response.getTags().length() > 0) {
response.setTags(response.getTags() + "," + tag);
if (response.getTags() != null && !response.getTags().isEmpty()) {
List<String> tagsList = new ArrayList<>(Arrays.asList(response.getTags().split(",")));
if (!tagsList.contains(tag)) {
tagsList.add(tag);
}
response.setTags(String.join(",", tagsList));
} else {
response.setTags(tag);
}
}
String storageAccessGroup = sp.getStorageAccessGroup();
if (storageAccessGroup != null) {
if (response.getStorageAccessGroups() != null && !response.getStorageAccessGroups().isEmpty()) {
List<String> groupList = new ArrayList<>(Arrays.asList(response.getStorageAccessGroups().split(",")));
if (!groupList.contains(storageAccessGroup)) {
groupList.add(storageAccessGroup);
}
response.setStorageAccessGroups(String.join(",", groupList));
} else {
response.setStorageAccessGroups(storageAccessGroup);
}
}
if (response.hasAnnotation() == null) {
response.setHasAnnotation(annotationDao.hasAnnotations(sp.getUuid(), AnnotationService.EntityType.PRIMARY_STORAGE.name(),
accountManager.isRootAdmin(CallContext.current().getCallingAccount().getId())));
@ -251,6 +279,7 @@ public class StoragePoolJoinDaoImpl extends GenericDaoBase<StoragePoolJoinVO, Lo
poolResponse.setClusterName(pool.getClusterName());
poolResponse.setProvider(pool.getStorageProviderName());
poolResponse.setTags(pool.getTag());
poolResponse.setStorageAccessGroups(pool.getStorageAccessGroup());
poolResponse.setIsTagARule(pool.getIsTagARule());
// set async job
@ -271,6 +300,14 @@ public class StoragePoolJoinDaoImpl extends GenericDaoBase<StoragePoolJoinVO, Lo
response.setTags(tag);
}
}
String storageAccessGroup = sp.getStorageAccessGroup();
if (storageAccessGroup != null) {
if (response.getStorageAccessGroups() != null && response.getStorageAccessGroups().length() > 0) {
response.setStorageAccessGroups(response.getStorageAccessGroups() + "," + storageAccessGroup);
} else {
response.setStorageAccessGroups(storageAccessGroup);
}
}
return response;
}

View File

@ -125,6 +125,9 @@ public class DataCenterJoinVO extends BaseViewVO implements InternalIdentity, Id
@Enumerated(value = EnumType.STRING)
private DataCenter.Type type;
@Column(name = "storage_access_groups")
private String storageAccessGroups;
public DataCenterJoinVO() {
}
@ -237,4 +240,8 @@ public class DataCenterJoinVO extends BaseViewVO implements InternalIdentity, Id
public DataCenter.Type getType() {
return type;
}
public String getStorageAccessGroups() {
return storageAccessGroups;
}
}

View File

@ -185,6 +185,18 @@ public class HostJoinVO extends BaseViewVO implements InternalIdentity, Identity
@Column(name = "is_tag_a_rule")
private Boolean isTagARule;
@Column(name = "storage_access_groups")
private String storageAccessGroups;
@Column(name = "cluster_storage_access_groups")
private String clusterStorageAccessGroups;
@Column(name = "pod_storage_access_groups")
private String podStorageAccessGroups;
@Column(name = "zone_storage_access_groups")
private String zoneStorageAccessGroups;
@Column(name = "memory_used_capacity")
private long memUsedCapacity;
@ -417,6 +429,22 @@ public class HostJoinVO extends BaseViewVO implements InternalIdentity, Identity
return isTagARule;
}
public String getStorageAccessGroups() {
return storageAccessGroups;
}
public String getClusterStorageAccessGroups() {
return clusterStorageAccessGroups;
}
public String getPodStorageAccessGroups() {
return podStorageAccessGroups;
}
public String getZoneStorageAccessGroups() {
return zoneStorageAccessGroups;
}
public String getAnnotation() {
return annotation;
}

View File

@ -119,6 +119,9 @@ public class StoragePoolJoinVO extends BaseViewVO implements InternalIdentity, I
@Column(name = "is_tag_a_rule")
private boolean isTagARule;
@Column(name = "storage_access_group")
private String storageAccessGroup;
@Column(name = "disk_used_capacity")
private long usedCapacity;
@ -271,6 +274,10 @@ public class StoragePoolJoinVO extends BaseViewVO implements InternalIdentity, I
return usedCapacity;
}
public String getStorageAccessGroup() {
return storageAccessGroup;
}
public long getReservedCapacity() {
return reservedCapacity;
}

View File

@ -50,6 +50,7 @@ import java.util.stream.Collectors;
import javax.inject.Inject;
import javax.naming.ConfigurationException;
import com.cloud.resource.ResourceManager;
import org.apache.cloudstack.acl.SecurityChecker;
import org.apache.cloudstack.affinity.AffinityGroup;
import org.apache.cloudstack.affinity.AffinityGroupService;
@ -473,6 +474,8 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati
Ipv6Service ipv6Service;
@Inject
NsxProviderDao nsxProviderDao;
@Inject
ResourceManager resourceManager;
// FIXME - why don't we have interface for DataCenterLinkLocalIpAddressDao?
@Inject
@ -2430,7 +2433,7 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati
@Override
@ActionEvent(eventType = EventTypes.EVENT_POD_CREATE, eventDescription = "creating pod", async = false)
public Pod createPod(final long zoneId, final String name, final String startIp, final String endIp, final String gateway, final String netmask, String allocationState) {
public Pod createPod(final long zoneId, final String name, final String startIp, final String endIp, final String gateway, final String netmask, String allocationState, List<String> storageAccessGroups) {
final DataCenterVO zone = _zoneDao.findById(zoneId);
if (zone == null) {
throw new InvalidParameterValueException("Please specify a valid zone.");
@ -2456,13 +2459,13 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati
if (allocationState == null) {
allocationState = Grouping.AllocationState.Enabled.toString();
}
return createPod(userId.longValue(), name, zone, gateway, cidr, startIp, endIp, allocationState, false);
return createPod(userId.longValue(), name, zone, gateway, cidr, startIp, endIp, allocationState, false, storageAccessGroups);
}
@Override
@DB
public HostPodVO createPod(final long userId, final String podName, final DataCenter zone, final String gateway, final String cidr, String startIp, String endIp, final String allocationStateStr,
final boolean skipGatewayOverlapCheck) {
final boolean skipGatewayOverlapCheck, List<String> storageAccessGroups) {
final String cidrAddress = DataCenter.Type.Edge.equals(zone.getType()) ? "" : getCidrAddress(cidr);
final int cidrSize = DataCenter.Type.Edge.equals(zone.getType()) ? 0 : getCidrSize(cidr);
if (DataCenter.Type.Edge.equals(zone.getType())) {
@ -2495,6 +2498,10 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati
podFinal.setAllocationState(allocationState);
}
if (CollectionUtils.isNotEmpty(storageAccessGroups)) {
podFinal.setStorageAccessGroups(String.join(",", storageAccessGroups));
}
final String startIpFinal = startIp;
final String endIpFinal = endIp;
HostPodVO hostPodVO = Transaction.execute((TransactionCallback<HostPodVO>) status -> {
@ -2956,7 +2963,7 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati
@DB
public DataCenterVO createZone(final long userId, final String zoneName, final String dns1, final String dns2, final String internalDns1, final String internalDns2, final String guestCidr, final String domain,
final Long domainId, final NetworkType zoneType, final String allocationStateStr, final String networkDomain, final boolean isSecurityGroupEnabled, final boolean isLocalStorageEnabled,
final String ip6Dns1, final String ip6Dns2, final boolean isEdge) {
final String ip6Dns1, final String ip6Dns2, final boolean isEdge, List<String> storageAccessGroups) {
// checking the following params outside checkzoneparams method as we do
// not use these params for updatezone
@ -2991,6 +2998,9 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati
zoneFinal.setAllocationState(Grouping.AllocationState.Disabled);
}
zoneFinal.setType(isEdge ? DataCenter.Type.Edge : DataCenter.Type.Core);
if (CollectionUtils.isNotEmpty(storageAccessGroups)) {
zoneFinal.setStorageAccessGroups(String.join(",", storageAccessGroups));
}
return Transaction.execute(new TransactionCallback<DataCenterVO>() {
@Override
@ -3102,6 +3112,7 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati
boolean isSecurityGroupEnabled = cmd.getSecuritygroupenabled();
final boolean isLocalStorageEnabled = cmd.getLocalStorageEnabled();
final boolean isEdge = cmd.isEdge();
final List<String> storageAccessGroups = cmd.getStorageAccessGroups();
if (allocationState == null) {
allocationState = Grouping.AllocationState.Disabled.toString();
@ -3135,7 +3146,7 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati
}
return createZone(userId, zoneName, dns1, dns2, internalDns1, internalDns2, guestCidr, domainVO != null ? domainVO.getName() : null, domainId, zoneType, allocationState,
networkDomain, isSecurityGroupEnabled, isLocalStorageEnabled, ip6Dns1, ip6Dns2, isEdge);
networkDomain, isSecurityGroupEnabled, isLocalStorageEnabled, ip6Dns1, ip6Dns2, isEdge, storageAccessGroups);
}
@Override

View File

@ -1411,7 +1411,7 @@ StateListener<State, VirtualMachine.Event, VirtualMachine>, Configurable {
if (vmRequiresSharedStorage) {
// check shared pools
List<StoragePoolVO> allPoolsInCluster = _storagePoolDao.findPoolsByTags(clusterVO.getDataCenterId(), clusterVO.getPodId(), clusterVO.getId(), null, false, 0);
List<StoragePoolVO> allPoolsInCluster = _storagePoolDao.findPoolsByTags(clusterVO.getDataCenterId(), clusterVO.getPodId(), clusterVO.getId(), ScopeType.CLUSTER, null, false, 0);
for (StoragePoolVO pool : allPoolsInCluster) {
if (!allocatorAvoidOutput.shouldAvoid(pool)) {
// there's some pool in the cluster that is not yet in avoid set
@ -1658,6 +1658,13 @@ StateListener<State, VirtualMachine.Event, VirtualMachine>, Configurable {
}
protected boolean hostCanAccessSPool(Host host, StoragePool pool) {
if (!_storageMgr.checkIfHostAndStoragePoolHasCommonStorageAccessGroups(host, pool)) {
if (logger.isDebugEnabled()) {
logger.debug(String.format("StoragePool %s and host %s does not have matching storage access groups", pool, host));
}
return false;
}
boolean hostCanAccessSPool = false;
StoragePoolHostVO hostPoolLinkage = _poolHostDao.findByPoolHost(pool.getId(), host.getId());

View File

@ -32,11 +32,17 @@ import java.util.List;
import java.util.Locale;
import java.util.Map;
import java.util.Random;
import java.util.Set;
import java.util.stream.Collectors;
import javax.inject.Inject;
import javax.naming.ConfigurationException;
import com.cloud.storage.ScopeType;
import com.cloud.storage.StoragePoolAndAccessGroupMapVO;
import com.cloud.storage.dao.StoragePoolAndAccessGroupMapDao;
import com.cloud.storage.dao.StoragePoolTagsDao;
import com.cloud.utils.StringUtils;
import org.apache.cloudstack.alert.AlertService;
import org.apache.cloudstack.annotation.AnnotationService;
import org.apache.cloudstack.annotation.dao.AnnotationDao;
@ -54,6 +60,8 @@ import org.apache.cloudstack.api.command.admin.host.ReconnectHostCmd;
import org.apache.cloudstack.api.command.admin.host.UpdateHostCmd;
import org.apache.cloudstack.api.command.admin.host.UpdateHostPasswordCmd;
import org.apache.cloudstack.context.CallContext;
import org.apache.cloudstack.engine.subsystem.api.storage.DataStore;
import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreInfo;
import org.apache.cloudstack.framework.config.ConfigKey;
import org.apache.cloudstack.framework.config.dao.ConfigurationDao;
import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao;
@ -61,6 +69,8 @@ import org.apache.cloudstack.storage.datastore.db.StoragePoolVO;
import org.apache.cloudstack.utils.identity.ManagementServerNode;
import org.apache.commons.collections.CollectionUtils;
import org.apache.commons.lang.ObjectUtils;
import org.apache.commons.lang3.ArrayUtils;
import org.springframework.stereotype.Component;
import com.cloud.agent.AgentManager;
@ -172,7 +182,6 @@ import com.cloud.storage.dao.VMTemplateDao;
import com.cloud.storage.dao.VolumeDao;
import com.cloud.user.Account;
import com.cloud.user.AccountManager;
import com.cloud.utils.StringUtils;
import com.cloud.utils.Ternary;
import com.cloud.utils.UriUtils;
import com.cloud.utils.component.Manager;
@ -250,6 +259,10 @@ public class ResourceManagerImpl extends ManagerBase implements ResourceManager,
@Inject
private PrimaryDataStoreDao _storagePoolDao;
@Inject
private StoragePoolTagsDao _storagePoolTagsDao;
@Inject
private StoragePoolAndAccessGroupMapDao _storagePoolAccessGroupMapDao;
@Inject
private DataCenterIpAddressDao _privateIPAddressDao;
@Inject
private IPAddressDao _publicIPAddressDao;
@ -513,6 +526,11 @@ public class ResourceManagerImpl extends ManagerBase implements ResourceManager,
cluster.setClusterType(clusterType);
cluster.setAllocationState(allocationState);
cluster.setArch(arch.getType());
List<String> storageAccessGroups = cmd.getStorageAccessGroups();
if (CollectionUtils.isNotEmpty(storageAccessGroups)) {
cluster.setStorageAccessGroups(String.join(",", storageAccessGroups));
}
try {
cluster = _clusterDao.persist(cluster);
} catch (final Exception e) {
@ -572,7 +590,7 @@ public class ResourceManagerImpl extends ManagerBase implements ResourceManager,
for (final Map.Entry<? extends ServerResource, Map<String, String>> entry : resources.entrySet()) {
final ServerResource resource = entry.getKey();
final HostVO host = (HostVO)createHostAndAgent(resource, entry.getValue(), true, null, false);
final HostVO host = (HostVO)createHostAndAgent(resource, entry.getValue(), true, null, null, false);
if (host != null) {
hosts.add(host);
}
@ -614,6 +632,7 @@ public class ResourceManagerImpl extends ManagerBase implements ResourceManager,
final String username = cmd.getUsername();
final String password = cmd.getPassword();
final List<String> hostTags = cmd.getHostTags();
final List<String> storageAccessGroups = cmd.getStorageAccessGroups();
dcId = _accountMgr.checkAccessAndSpecifyAuthority(CallContext.current().getCallingAccount(), dcId);
@ -643,18 +662,18 @@ public class ResourceManagerImpl extends ManagerBase implements ResourceManager,
String hypervisorType = cmd.getHypervisor().equalsIgnoreCase(HypervisorGuru.HypervisorCustomDisplayName.value()) ?
"Custom" : cmd.getHypervisor();
return discoverHostsFull(dcId, podId, clusterId, clusterName, url, username, password, hypervisorType, hostTags, cmd.getFullUrlParams(), false);
return discoverHostsFull(dcId, podId, clusterId, clusterName, url, username, password, hypervisorType, hostTags, storageAccessGroups, cmd.getFullUrlParams(), false);
}
@Override
public List<? extends Host> discoverHosts(final AddSecondaryStorageCmd cmd) throws IllegalArgumentException, DiscoveryException, InvalidParameterValueException {
final Long dcId = cmd.getZoneId();
final String url = cmd.getUrl();
return discoverHostsFull(dcId, null, null, null, url, null, null, "SecondaryStorage", null, null, false);
return discoverHostsFull(dcId, null, null, null, url, null, null, "SecondaryStorage", null, null, null, false);
}
private List<HostVO> discoverHostsFull(final Long dcId, final Long podId, Long clusterId, final String clusterName, String url, String username, String password,
final String hypervisorType, final List<String> hostTags, final Map<String, String> params, final boolean deferAgentCreation) throws IllegalArgumentException, DiscoveryException,
final String hypervisorType, final List<String> hostTags, List<String> storageAccessGroups, final Map<String, String> params, final boolean deferAgentCreation) throws IllegalArgumentException, DiscoveryException,
InvalidParameterValueException {
URI uri;
@ -860,9 +879,9 @@ public class ResourceManagerImpl extends ManagerBase implements ResourceManager,
HostVO host;
if (deferAgentCreation) {
host = (HostVO)createHostAndAgentDeferred(resource, entry.getValue(), true, hostTags, false);
host = (HostVO)createHostAndAgentDeferred(resource, entry.getValue(), true, hostTags, storageAccessGroups, false);
} else {
host = (HostVO)createHostAndAgent(resource, entry.getValue(), true, hostTags, false);
host = (HostVO)createHostAndAgent(resource, entry.getValue(), true, hostTags, storageAccessGroups, false);
}
if (host != null) {
hosts.add(host);
@ -1270,7 +1289,7 @@ public class ResourceManagerImpl extends ManagerBase implements ResourceManager,
}
return cluster;
return _clusterDao.findById(cluster.getId());
}
@Override
@ -1917,6 +1936,741 @@ public class ResourceManagerImpl extends ManagerBase implements ResourceManager,
}
}
private void removeStorageAccessGroupsOnPodsInZone(long zoneId, List<String> newStoragePoolTags, List<String> tagsToDeleteOnZone) {
List<HostPodVO> pods = _podDao.listByDataCenterId(zoneId);
for (HostPodVO pod : pods) {
removeStorageAccessGroupsOnClustersInPod(pod.getId(), newStoragePoolTags, tagsToDeleteOnZone);
updateStorageAccessGroupsToBeAddedOnPodInZone(pod.getId(), newStoragePoolTags);
}
}
private void removeStorageAccessGroupsOnClustersInPod(long podId, List<String> newStoragePoolTags, List<String> tagsToDeleteOnPod) {
List<ClusterVO> clusters = _clusterDao.listByPodId(podId);
for (ClusterVO cluster : clusters) {
updateStorageAccessGroupsToBeDeletedOnHostsInCluster(cluster.getId(), tagsToDeleteOnPod);
updateStorageAccessGroupsToBeAddedOnHostsInCluster(cluster.getId(), newStoragePoolTags);
updateStorageAccessGroupsToBeAddedOnClustersInPod(cluster.getId(), newStoragePoolTags);
}
}
private void updateStorageAccessGroupsToBeDeletedOnHostsInCluster(long clusterId, List<String> storageAccessGroupsToDeleteOnCluster) {
if (CollectionUtils.isEmpty(storageAccessGroupsToDeleteOnCluster)) {
return;
}
List<HostVO> hosts = _hostDao.findByClusterId(clusterId);
List<Long> hostIdsUsingStorageAccessGroups = listOfHostIdsUsingTheStorageAccessGroups(storageAccessGroupsToDeleteOnCluster, clusterId, null, null);
for (HostVO host : hosts) {
String hostStorageAccessGroups = host.getStorageAccessGroups();
if (hostIdsUsingStorageAccessGroups != null && hostIdsUsingStorageAccessGroups.contains(host.getId())) {
Set<String> mergedSet = hostStorageAccessGroups != null
? new HashSet<>(Arrays.asList(hostStorageAccessGroups.split(",")))
: new HashSet<>();
mergedSet.addAll(storageAccessGroupsToDeleteOnCluster);
host.setStorageAccessGroups(String.join(",", mergedSet));
_hostDao.update(host.getId(), host);
} else {
if (hostStorageAccessGroups != null) {
List<String> hostTagsList = new ArrayList<>(Arrays.asList(hostStorageAccessGroups.split(",")));
hostTagsList.removeAll(storageAccessGroupsToDeleteOnCluster);
String updatedClusterStoragePoolTags = hostTagsList.isEmpty() ? null : String.join(",", hostTagsList);
host.setStorageAccessGroups(updatedClusterStoragePoolTags);
_hostDao.update(host.getId(), host);
}
}
}
}
private void updateStorageAccessGroupsToBeAddedOnHostsInCluster(long clusterId, List<String> tagsAddedOnCluster) {
if (CollectionUtils.isEmpty(tagsAddedOnCluster)) {
return;
}
List<HostVO> hosts = _hostDao.findByClusterId(clusterId);
for (HostVO host : hosts) {
String hostStoragePoolTags = host.getStorageAccessGroups();
Set<String> hostStoragePoolTagsSet = hostStoragePoolTags != null
? new HashSet<>(Arrays.asList(hostStoragePoolTags.split(",")))
: new HashSet<>();
hostStoragePoolTagsSet.removeIf(tagsAddedOnCluster::contains);
host.setStorageAccessGroups(hostStoragePoolTagsSet.isEmpty() ? null : String.join(",", hostStoragePoolTagsSet));
_hostDao.update(host.getId(), host);
}
}
private void updateStorageAccessGroupsToBeAddedOnClustersInPod(long clusterId, List<String> tagsAddedOnPod) {
if (CollectionUtils.isEmpty(tagsAddedOnPod)) {
return;
}
ClusterVO cluster = _clusterDao.findById(clusterId);
String clusterStoragePoolTags = cluster.getStorageAccessGroups();
if (clusterStoragePoolTags != null) {
List<String> clusterTagsList = new ArrayList<>(Arrays.asList(clusterStoragePoolTags.split(",")));
clusterTagsList.removeAll(tagsAddedOnPod);
String updatedClusterStoragePoolTags = clusterTagsList.isEmpty() ? null : String.join(",", clusterTagsList);
cluster.setStorageAccessGroups(updatedClusterStoragePoolTags);
_clusterDao.update(cluster.getId(), cluster);
}
}
private void updateStorageAccessGroupsToBeAddedOnPodInZone(long podId, List<String> tagsAddedOnZone) {
if (CollectionUtils.isEmpty(tagsAddedOnZone)) {
return;
}
HostPodVO pod = _podDao.findById(podId);
String podStoragePoolTags = pod.getStorageAccessGroups();
if (podStoragePoolTags != null) {
List<String> podTagsList = new ArrayList<>(Arrays.asList(podStoragePoolTags.split(",")));
podTagsList.removeAll(tagsAddedOnZone);
String updatedClusterStoragePoolTags = podTagsList.isEmpty() ? null : String.join(",", podTagsList);
pod.setStorageAccessGroups(updatedClusterStoragePoolTags);
_podDao.update(pod.getId(), pod);
}
}
public List<Long> listOfHostIdsUsingTheStorageAccessGroups(List<String> storageAccessGroups, Long clusterId, Long podId, Long datacenterId) {
GenericSearchBuilder<VMInstanceVO, Long> vmInstanceSearch = _vmDao.createSearchBuilder(Long.class);
vmInstanceSearch.select(null, Func.DISTINCT, vmInstanceSearch.entity().getHostId());
vmInstanceSearch.and("hostId", vmInstanceSearch.entity().getHostId(), Op.NNULL);
vmInstanceSearch.and("removed", vmInstanceSearch.entity().getRemoved(), Op.NULL);
GenericSearchBuilder<VolumeVO, Long> volumeSearch = volumeDao.createSearchBuilder(Long.class);
volumeSearch.selectFields(volumeSearch.entity().getInstanceId());
volumeSearch.and("state", volumeSearch.entity().getState(), Op.NIN);
GenericSearchBuilder<StoragePoolVO, Long> storagePoolSearch = _storagePoolDao.createSearchBuilder(Long.class);
storagePoolSearch.and("clusterId", storagePoolSearch.entity().getClusterId(), Op.EQ);
storagePoolSearch.and("podId", storagePoolSearch.entity().getPodId(), Op.EQ);
storagePoolSearch.and("datacenterId", storagePoolSearch.entity().getDataCenterId(), Op.EQ);
storagePoolSearch.selectFields(storagePoolSearch.entity().getId());
GenericSearchBuilder<StoragePoolAndAccessGroupMapVO, Long> storageAccessGroupSearch = _storagePoolAccessGroupMapDao.createSearchBuilder(Long.class);
storageAccessGroupSearch.and("sag", storageAccessGroupSearch.entity().getStorageAccessGroup(), Op.IN);
storagePoolSearch.join("storageAccessGroupSearch", storageAccessGroupSearch, storagePoolSearch.entity().getId(), storageAccessGroupSearch.entity().getPoolId(), JoinBuilder.JoinType.INNER);
storageAccessGroupSearch.done();
volumeSearch.join("storagePoolSearch", storagePoolSearch, volumeSearch.entity().getPoolId(), storagePoolSearch.entity().getId(), JoinBuilder.JoinType.INNER);
storagePoolSearch.done();
vmInstanceSearch.join("volumeSearch", volumeSearch, vmInstanceSearch.entity().getId(), volumeSearch.entity().getInstanceId(), JoinBuilder.JoinType.INNER);
volumeSearch.done();
vmInstanceSearch.done();
SearchCriteria<Long> sc = vmInstanceSearch.create();
sc.setJoinParameters("storageAccessGroupSearch", "sag", storageAccessGroups.toArray());
sc.setJoinParameters("volumeSearch", "state", new String[]{"Destroy", "Error", "Expunging", "Expunged"});
if (clusterId != null) {
sc.setParameters("storagePoolSearch", "clusterId", clusterId);
}
if (podId != null) {
sc.setParameters("storagePoolSearch", "podId", podId);
}
if (datacenterId != null) {
sc.setParameters("storagePoolSearch", "datacenterId", datacenterId);
}
return _vmDao.customSearch(sc, null);
}
public List<Long> listOfHostIdsUsingTheStoragePool(Long storagePoolId) {
GenericSearchBuilder<VMInstanceVO, Long> vmInstanceSearch = _vmDao.createSearchBuilder(Long.class);
vmInstanceSearch.select(null, Func.DISTINCT, vmInstanceSearch.entity().getHostId());
vmInstanceSearch.and("hostId", vmInstanceSearch.entity().getHostId(), Op.NNULL);
vmInstanceSearch.and("removed", vmInstanceSearch.entity().getRemoved(), Op.NULL);
GenericSearchBuilder<VolumeVO, Long> volumeSearch = volumeDao.createSearchBuilder(Long.class);
volumeSearch.selectFields(volumeSearch.entity().getInstanceId());
volumeSearch.and("state", volumeSearch.entity().getState(), Op.NIN);
GenericSearchBuilder<StoragePoolVO, Long> storagePoolSearch = _storagePoolDao.createSearchBuilder(Long.class);
storagePoolSearch.selectFields(storagePoolSearch.entity().getId());
storagePoolSearch.and("poolId", storagePoolSearch.entity().getId(), Op.EQ);
volumeSearch.join("storagePoolSearch", storagePoolSearch, volumeSearch.entity().getPoolId(), storagePoolSearch.entity().getId(), JoinBuilder.JoinType.INNER);
storagePoolSearch.done();
vmInstanceSearch.join("volumeSearch", volumeSearch, vmInstanceSearch.entity().getId(), volumeSearch.entity().getInstanceId(), JoinBuilder.JoinType.INNER);
volumeSearch.done();
vmInstanceSearch.done();
SearchCriteria<Long> sc = vmInstanceSearch.create();
sc.setJoinParameters("storagePoolSearch", "poolId", storagePoolId);
sc.setJoinParameters("volumeSearch", "state", new String[]{"Destroy", "Error", "Expunging", "Expunged"});
return _vmDao.customSearch(sc, null);
}
public List<VolumeVO> listOfVolumesUsingTheStorageAccessGroups(List<String> storageAccessGroups, Long hostId, Long clusterId, Long podId, Long datacenterId) {
SearchBuilder<VolumeVO> volumeSearch = volumeDao.createSearchBuilder();
volumeSearch.and("state", volumeSearch.entity().getState(), Op.NIN);
GenericSearchBuilder<VMInstanceVO, Long> vmInstanceSearch = _vmDao.createSearchBuilder(Long.class);
vmInstanceSearch.selectFields(vmInstanceSearch.entity().getId());
vmInstanceSearch.and("hostId", vmInstanceSearch.entity().getHostId(), Op.EQ);
vmInstanceSearch.and("removed", vmInstanceSearch.entity().getRemoved(), Op.NULL);
GenericSearchBuilder<StoragePoolVO, Long> storagePoolSearch = _storagePoolDao.createSearchBuilder(Long.class);
storagePoolSearch.and("clusterId", storagePoolSearch.entity().getClusterId(), Op.EQ);
storagePoolSearch.and("podId", storagePoolSearch.entity().getPodId(), Op.EQ);
storagePoolSearch.and("datacenterId", storagePoolSearch.entity().getDataCenterId(), Op.EQ);
storagePoolSearch.selectFields(storagePoolSearch.entity().getId());
GenericSearchBuilder<StoragePoolAndAccessGroupMapVO, Long> storageAccessGroupSearch = _storagePoolAccessGroupMapDao.createSearchBuilder(Long.class);
storageAccessGroupSearch.and("sag", storageAccessGroupSearch.entity().getStorageAccessGroup(), Op.IN);
storagePoolSearch.join("storageAccessGroupSearch", storageAccessGroupSearch, storagePoolSearch.entity().getId(), storageAccessGroupSearch.entity().getPoolId(), JoinBuilder.JoinType.INNER);
volumeSearch.join("storagePoolSearch", storagePoolSearch, volumeSearch.entity().getPoolId(), storagePoolSearch.entity().getId(), JoinBuilder.JoinType.INNER);
volumeSearch.join("vmInstanceSearch", vmInstanceSearch, volumeSearch.entity().getInstanceId(), vmInstanceSearch.entity().getId(), JoinBuilder.JoinType.INNER);
storageAccessGroupSearch.done();
storagePoolSearch.done();
vmInstanceSearch.done();
volumeSearch.done();
SearchCriteria<VolumeVO> sc = volumeSearch.create();
sc.setParameters( "state", new String[]{"Destroy", "Error", "Expunging", "Expunged"});
sc.setJoinParameters("storageAccessGroupSearch", "sag", storageAccessGroups.toArray());
if (hostId != null) {
sc.setJoinParameters("vmInstanceSearch", "hostId", hostId);
}
if (clusterId != null) {
sc.setJoinParameters("storagePoolSearch", "clusterId", clusterId);
}
if (podId != null) {
sc.setJoinParameters("storagePoolSearch", "podId", podId);
}
if (datacenterId != null) {
sc.setJoinParameters("storagePoolSearch", "datacenterId", datacenterId);
}
return volumeDao.customSearch(sc, null);
}
private List<Long> listOfStoragePoolIDsUsedByHost(long hostId) {
GenericSearchBuilder<VMInstanceVO, Long> vmInstanceSearch = _vmDao.createSearchBuilder(Long.class);
vmInstanceSearch.selectFields(vmInstanceSearch.entity().getId());
vmInstanceSearch.and("hostId", vmInstanceSearch.entity().getHostId(), Op.EQ);
GenericSearchBuilder<VolumeVO, Long> volumeSearch = volumeDao.createSearchBuilder(Long.class);
volumeSearch.selectFields(volumeSearch.entity().getPoolId());
volumeSearch.and("state", volumeSearch.entity().getState(), Op.EQ);
volumeSearch.join("vmInstanceSearch", vmInstanceSearch, volumeSearch.entity().getInstanceId(), vmInstanceSearch.entity().getId(), JoinBuilder.JoinType.INNER);
vmInstanceSearch.done();
GenericSearchBuilder<StoragePoolVO, Long> storagePoolSearch = _storagePoolDao.createSearchBuilder(Long.class);
storagePoolSearch.select(null, Func.DISTINCT, storagePoolSearch.entity().getId());
storagePoolSearch.join("volumeSearch", volumeSearch, storagePoolSearch.entity().getId(), volumeSearch.entity().getPoolId(), JoinBuilder.JoinType.INNER);
volumeSearch.done();
storagePoolSearch.done();
SearchCriteria<Long> sc = storagePoolSearch.create();
sc.setJoinParameters("vmInstanceSearch", "hostId", hostId);
sc.setJoinParameters("volumeSearch", "state", "Ready");
List<Long> storagePoolsInUse = _storagePoolDao.customSearch(sc, null);
return storagePoolsInUse;
}
@Override
public void updateStoragePoolConnectionsOnHosts(Long poolId, List<String> storageAccessGroups) {
StoragePoolVO storagePool = _storagePoolDao.findById(poolId);
List<HostVO> hosts = new ArrayList<>();
if (storagePool.getScope().equals(ScopeType.CLUSTER)) {
List<HostVO> hostsInCluster = listAllUpHosts(Host.Type.Routing, storagePool.getClusterId(), storagePool.getPodId(), storagePool.getDataCenterId());
hosts.addAll(hostsInCluster);
} else if (storagePool.getScope().equals(ScopeType.ZONE)) {
List<HostVO> hostsInZone = listAllUpHosts(Host.Type.Routing, null, null, storagePool.getDataCenterId());
hosts.addAll(hostsInZone);
}
List<HostVO> hostsToConnect = new ArrayList<>();
List<HostVO> hostsToDisconnect = new ArrayList<>();
boolean storagePoolHasAccessGroups = CollectionUtils.isNotEmpty(storageAccessGroups);
for (HostVO host : hosts) {
String[] storageAccessGroupsOnHost = _storageMgr.getStorageAccessGroups(null, null, null, host.getId());
List<String> listOfStorageAccessGroupsOnHost = Arrays.asList(storageAccessGroupsOnHost);
StoragePoolHostVO hostPoolRecord = _storagePoolHostDao.findByPoolHost(storagePool.getId(), host.getId());
if (storagePoolHasAccessGroups) {
List<String> intersection = new ArrayList<>(listOfStorageAccessGroupsOnHost);
intersection.retainAll(storageAccessGroups);
if (CollectionUtils.isNotEmpty(intersection)) {
if (hostPoolRecord == null) {
hostsToConnect.add(host);
}
} else {
hostsToDisconnect.add(host);
}
} else {
if (hostPoolRecord == null) {
hostsToConnect.add(host);
}
}
}
if (CollectionUtils.isNotEmpty(hostsToDisconnect)) {
List<Long> hostIdsUsingTheStoragePool = listOfHostIdsUsingTheStoragePool(poolId);
List<Long> hostIdsToDisconnect = hostsToDisconnect.stream()
.map(HostVO::getId)
.collect(Collectors.toList());
List<Long> conflictingHostIds = new ArrayList<>(CollectionUtils.intersection(hostIdsToDisconnect, hostIdsUsingTheStoragePool));
if (CollectionUtils.isNotEmpty(conflictingHostIds)) {
Map<HostVO, List<VolumeVO>> hostVolumeMap = new HashMap<>();
List<VolumeVO> volumesInPool = volumeDao.findByPoolId(poolId);
Map<Long, VMInstanceVO> vmInstanceCache = new HashMap<>();
for (Long hostId : conflictingHostIds) {
HostVO host = _hostDao.findById(hostId);
List<VolumeVO> matchingVolumes = volumesInPool.stream()
.filter(volume -> {
Long vmId = volume.getInstanceId();
if (vmId == null) return false;
VMInstanceVO vmInstance = vmInstanceCache.computeIfAbsent(vmId, _vmDao::findById);
return vmInstance != null && hostId.equals(vmInstance.getHostId());
})
.collect(Collectors.toList());
if (!matchingVolumes.isEmpty()) {
hostVolumeMap.put(host, matchingVolumes);
}
}
logger.error(String.format("Conflict detected: Hosts using the storage pool that need to be disconnected or " +
"connected to the pool: Host IDs and volumes: %s", hostVolumeMap));
throw new CloudRuntimeException("Storage access groups cannot be updated as they are currently in use by some hosts. Please check the logs.");
}
}
if (!hostsToConnect.isEmpty()) {
for (HostVO host : hostsToConnect) {
logger.debug(String.format("Connecting [%s] to [%s]", host, storagePool));
connectHostToStoragePool(host, storagePool);
}
}
if (!hostsToDisconnect.isEmpty()) {
for (HostVO host : hostsToDisconnect) {
logger.debug(String.format("Disconnecting [%s] from [%s]", host, storagePool));
disconnectHostFromStoragePool(host, storagePool);
}
}
}
protected List<HostVO> filterHostsBasedOnStorageAccessGroups(List<HostVO> allHosts, List<String> storageAccessGroups) {
List<HostVO> hostsToConnect = new ArrayList<>();
for (HostVO host : allHosts) {
String[] storageAccessGroupsOnHost = _storageMgr.getStorageAccessGroups(null, null, null, host.getId());
List<String> listOfStorageAccessGroupsOnHost = Arrays.asList(storageAccessGroupsOnHost);
if (CollectionUtils.isNotEmpty(storageAccessGroups)) {
List<String> intersection = new ArrayList<>(listOfStorageAccessGroupsOnHost);
intersection.retainAll(storageAccessGroups);
if (CollectionUtils.isNotEmpty(intersection)) {
hostsToConnect.add(host);
}
} else {
hostsToConnect.add(host);
}
}
return hostsToConnect;
}
@Override
public List<HostVO> getEligibleUpHostsInClusterForStorageConnection(PrimaryDataStoreInfo primaryStore) {
List<HostVO> allHosts = listAllUpHosts(Host.Type.Routing, primaryStore.getClusterId(), primaryStore.getPodId(), primaryStore.getDataCenterId());
if (CollectionUtils.isEmpty(allHosts)) {
_storagePoolDao.expunge(primaryStore.getId());
throw new CloudRuntimeException("No host up to associate a storage pool with in cluster " + primaryStore.getClusterId());
}
List<String> storageAccessGroups = _storagePoolAccessGroupMapDao.getStorageAccessGroups(primaryStore.getId());
return filterHostsBasedOnStorageAccessGroups(allHosts, storageAccessGroups);
}
@Override
public List<HostVO> getEligibleUpAndEnabledHostsInClusterForStorageConnection(PrimaryDataStoreInfo primaryStore) {
List<HostVO> allHosts = listAllUpAndEnabledHosts(Host.Type.Routing, primaryStore.getClusterId(), primaryStore.getPodId(), primaryStore.getDataCenterId());
if (CollectionUtils.isEmpty(allHosts)) {
_storagePoolDao.expunge(primaryStore.getId());
throw new CloudRuntimeException("No host up to associate a storage pool with in cluster " + primaryStore.getClusterId());
}
List<String> storageAccessGroups = _storagePoolAccessGroupMapDao.getStorageAccessGroups(primaryStore.getId());
return filterHostsBasedOnStorageAccessGroups(allHosts, storageAccessGroups);
}
@Override
public List<HostVO> getEligibleUpAndEnabledHostsInZoneForStorageConnection(DataStore dataStore, long zoneId, HypervisorType hypervisorType) {
List<HostVO> allHosts = listAllUpAndEnabledHostsInOneZoneByHypervisor(hypervisorType, zoneId);
List<String> storageAccessGroups = _storagePoolAccessGroupMapDao.getStorageAccessGroups(dataStore.getId());
return filterHostsBasedOnStorageAccessGroups(allHosts, storageAccessGroups);
}
protected void checkIfAllHostsInUse(List<String> sagsToDelete, Long clusterId, Long podId, Long zoneId) {
if (CollectionUtils.isEmpty(sagsToDelete)) {
return;
}
List<Long> hostIdsUsingStorageAccessGroups = listOfHostIdsUsingTheStorageAccessGroups(sagsToDelete, clusterId, podId, zoneId);
// Check for zone level hosts
if (zoneId != null) {
List<HostVO> hostsInZone = _hostDao.findByDataCenterId(zoneId);
Set<Long> hostIdsInUseSet = hostIdsUsingStorageAccessGroups.stream().collect(Collectors.toSet());
boolean allInUseZone = hostsInZone.stream()
.map(HostVO::getId)
.allMatch(hostIdsInUseSet::contains);
if (allInUseZone) {
throw new CloudRuntimeException("All hosts in the zone are using the storage access groups");
}
}
// Check for cluster level hosts
if (clusterId != null) {
List<HostVO> hostsInCluster = _hostDao.findByClusterId(clusterId, Type.Routing);
Set<Long> hostIdsInUseSet = hostIdsUsingStorageAccessGroups.stream().collect(Collectors.toSet());
boolean allInUseCluster = hostsInCluster.stream()
.map(HostVO::getId)
.allMatch(hostIdsInUseSet::contains);
if (allInUseCluster) {
throw new CloudRuntimeException("All hosts in the cluster are using the storage access groups");
}
}
// Check for pod level hosts
if (podId != null) {
List<HostVO> hostsInPod = _hostDao.findByPodId(podId, Type.Routing);
Set<Long> hostIdsInUseSet = hostIdsUsingStorageAccessGroups.stream().collect(Collectors.toSet());
boolean allInUsePod = hostsInPod.stream()
.map(HostVO::getId)
.allMatch(hostIdsInUseSet::contains);
if (allInUsePod) {
throw new CloudRuntimeException("All hosts in the pod are using the storage access groups");
}
}
}
@Override
public void updateZoneStorageAccessGroups(long zoneId, List<String> newStorageAccessGroups) {
DataCenterVO zoneVO = _dcDao.findById(zoneId);
if (logger.isDebugEnabled()) {
logger.debug(String.format("Updating storage access groups %s to the zone %s", newStorageAccessGroups, zoneVO));
}
List<String> sagsToAdd = new ArrayList<>(newStorageAccessGroups);
String sagsOnPod = zoneVO.getStorageAccessGroups();
List<String> sagsToDelete;
if (sagsOnPod == null || sagsOnPod.trim().isEmpty()) {
sagsToDelete = new ArrayList<>();
} else {
sagsToDelete = new ArrayList<>(Arrays.asList(sagsOnPod.split(",")));
}
sagsToDelete.removeAll(newStorageAccessGroups);
checkIfAllHostsInUse(sagsToDelete, null, null, zoneId);
Map<HostVO, List<String>> hostsAndStorageAccessGroupsMap = new HashMap<>();
List<HostPodVO> pods = _podDao.listByDataCenterId(zoneId);
for (HostPodVO pod : pods) {
List<HostVO> hostsInPod = _hostDao.findHypervisorHostInPod(pod.getId());
for (HostVO host : hostsInPod) {
String[] existingSAGs = _storageMgr.getStorageAccessGroups(null, null, null, host.getId());
List<String> existingSAGsList = new ArrayList<>(Arrays.asList(existingSAGs));
existingSAGsList.removeAll(sagsToDelete);
List<String> combinedSAGs = new ArrayList<>(sagsToAdd);
combinedSAGs.addAll(existingSAGsList);
hostsAndStorageAccessGroupsMap.put(host, combinedSAGs);
}
updateConnectionsBetweenHostsAndStoragePools(hostsAndStorageAccessGroupsMap);
}
removeStorageAccessGroupsOnPodsInZone(zoneVO.getId(), newStorageAccessGroups, sagsToDelete);
}
@Override
public void updatePodStorageAccessGroups(long podId, List<String> newStorageAccessGroups) {
HostPodVO podVO = _podDao.findById(podId);
if (logger.isDebugEnabled()) {
logger.debug(String.format("Updating storage access groups %s to the pod %s", newStorageAccessGroups, podVO));
}
List<String> sagsToAdd = new ArrayList<>(newStorageAccessGroups);
String sagsOnPod = podVO.getStorageAccessGroups();
List<String> sagsToDelete;
if (sagsOnPod == null || sagsOnPod.trim().isEmpty()) {
sagsToDelete = new ArrayList<>();
} else {
sagsToDelete = new ArrayList<>(Arrays.asList(sagsOnPod.split(",")));
}
sagsToDelete.removeAll(newStorageAccessGroups);
checkIfAllHostsInUse(sagsToDelete, null, podId, null);
Map<HostVO, List<String>> hostsAndStorageAccessGroupsMap = new HashMap<>();
List<HostVO> hostsInPod = _hostDao.findHypervisorHostInPod(podId);
for (HostVO host : hostsInPod) {
String[] existingSAGs = _storageMgr.getStorageAccessGroups(null, null, null, host.getId());
List<String> existingSAGsList = new ArrayList<>(Arrays.asList(existingSAGs));
existingSAGsList.removeAll(sagsToDelete);
List<String> combinedSAGs = new ArrayList<>(sagsToAdd);
combinedSAGs.addAll(existingSAGsList);
hostsAndStorageAccessGroupsMap.put(host, combinedSAGs);
}
updateConnectionsBetweenHostsAndStoragePools(hostsAndStorageAccessGroupsMap);
removeStorageAccessGroupsOnClustersInPod(podId, newStorageAccessGroups, sagsToDelete);
}
@Override
public void updateClusterStorageAccessGroups(Long clusterId, List<String> newStorageAccessGroups) {
ClusterVO cluster = (ClusterVO) getCluster(clusterId);
if (logger.isDebugEnabled()) {
logger.debug(String.format("Updating storage access groups %s to the cluster %s", newStorageAccessGroups, cluster));
}
List<String> sagsToAdd = new ArrayList<>(newStorageAccessGroups);
String existingClusterStorageAccessGroups = cluster.getStorageAccessGroups();
List<String> sagsToDelete;
if (existingClusterStorageAccessGroups == null || existingClusterStorageAccessGroups.trim().isEmpty()) {
sagsToDelete = new ArrayList<>();
} else {
sagsToDelete = new ArrayList<>(Arrays.asList(existingClusterStorageAccessGroups.split(",")));
}
sagsToDelete.removeAll(newStorageAccessGroups);
checkIfAllHostsInUse(sagsToDelete, clusterId, null, null);
List<HostVO> hostsInCluster = _hostDao.findHypervisorHostInCluster(cluster.getId());
Map<HostVO, List<String>> hostsAndStorageAccessGroupsMap = new HashMap<>();
for (HostVO host : hostsInCluster) {
String[] existingSAGs = _storageMgr.getStorageAccessGroups(null, null, null, host.getId());
Set<String> existingSAGsSet = new HashSet<>(Arrays.asList(existingSAGs));
existingSAGsSet.removeAll(sagsToDelete);
List<String> existingSAGsList = new ArrayList<>(existingSAGsSet);
Set<String> combinedSAGsSet = new HashSet<>(sagsToAdd);
combinedSAGsSet.addAll(existingSAGsList);
hostsAndStorageAccessGroupsMap.put(host, new ArrayList<>(combinedSAGsSet));
}
updateConnectionsBetweenHostsAndStoragePools(hostsAndStorageAccessGroupsMap);
updateStorageAccessGroupsToBeDeletedOnHostsInCluster(cluster.getId(), sagsToDelete);
updateStorageAccessGroupsToBeAddedOnHostsInCluster(cluster.getId(), newStorageAccessGroups);
}
@Override
public void updateHostStorageAccessGroups(Long hostId, List<String> newStorageAccessGroups) {
HostVO host = _hostDao.findById(hostId);
if (logger.isDebugEnabled()) {
logger.debug(String.format("Updating storage access groups %s to the host %s", newStorageAccessGroups, host));
}
List<String> sagsToAdd = new ArrayList<>(newStorageAccessGroups);
String[] sagsOnCluster = _storageMgr.getStorageAccessGroups(null, null, host.getClusterId(), null);
if (ArrayUtils.isNotEmpty(sagsOnCluster)) {
sagsToAdd.addAll(Arrays.asList(sagsOnCluster));
}
String sagsOnHost = host.getStorageAccessGroups();
List<String> sagsToDelete;
if (sagsOnHost == null || sagsOnHost.trim().isEmpty()) {
sagsToDelete = new ArrayList<>();
} else {
sagsToDelete = new ArrayList<>(Arrays.asList(sagsOnHost.split(",")));
}
sagsToDelete.removeAll(newStorageAccessGroups);
checkIfAnyVolumesInUse(sagsToAdd, sagsToDelete, host);
updateConnectionsBetweenHostsAndStoragePools(Collections.singletonMap(host, sagsToAdd));
host.setStorageAccessGroups(CollectionUtils.isEmpty(newStorageAccessGroups) ? null : String.join(",", newStorageAccessGroups));
_hostDao.update(host.getId(), host);
}
protected void checkIfAnyVolumesInUse(List<String> sagsToAdd, List<String> sagsToDelete, HostVO host) {
if (CollectionUtils.isNotEmpty(sagsToDelete)) {
List<VolumeVO> volumesUsingTheStoragePoolAccessGroups = listOfVolumesUsingTheStorageAccessGroups(sagsToDelete, host.getId(), null, null, null);
if (CollectionUtils.isNotEmpty(volumesUsingTheStoragePoolAccessGroups)) {
List<StoragePoolVO> poolsToAdd;
if (CollectionUtils.isNotEmpty(sagsToAdd)) {
poolsToAdd = getStoragePoolsByAccessGroups(host.getDataCenterId(), host.getPodId(), host.getClusterId(), sagsToAdd.toArray(new String[0]), true);
} else {
poolsToAdd = getStoragePoolsByEmptyStorageAccessGroups(host.getDataCenterId(), host.getPodId(), host.getClusterId());
}
if (CollectionUtils.isNotEmpty(poolsToAdd)) {
Set<Long> poolIdsToAdd = poolsToAdd.stream()
.map(StoragePoolVO::getId)
.collect(Collectors.toSet());
volumesUsingTheStoragePoolAccessGroups.removeIf(volume -> poolIdsToAdd.contains(volume.getPoolId()));
}
if (CollectionUtils.isNotEmpty(volumesUsingTheStoragePoolAccessGroups)) {
logger.error(String.format("There are volumes in storage pools with the Storage Access Groups that need to be deleted or " +
"in the storage pools which are already connected to the host. Those volume IDs are %s", volumesUsingTheStoragePoolAccessGroups));
throw new CloudRuntimeException("There are volumes in storage pools with the Storage Access Groups that need to be deleted or " +
"in the storage pools which are already connected to the host");
}
}
}
}
protected void updateConnectionsBetweenHostsAndStoragePools(Map<HostVO, List<String>> hostsAndStorageAccessGroupsMap) {
List<HostVO> hostsList = new ArrayList<>(hostsAndStorageAccessGroupsMap.keySet());
Map<HostVO, List<StoragePoolVO>> hostStoragePoolsMapBefore = getHostStoragePoolsBefore(hostsList);
Map<HostVO, List<StoragePoolVO>> hostPoolsToAddMapAfter = getHostPoolsToAddAfter(hostsAndStorageAccessGroupsMap);
disconnectPoolsNotInAccessGroups(hostStoragePoolsMapBefore, hostPoolsToAddMapAfter);
}
private Map<HostVO, List<StoragePoolVO>> getHostStoragePoolsBefore(List<HostVO> hostsList) {
Map<HostVO, List<StoragePoolVO>> hostStoragePoolsMapBefore = new HashMap<>();
for (HostVO host : hostsList) {
List<StoragePoolHostVO> storagePoolsConnectedToHost = _storageMgr.findStoragePoolsConnectedToHost(host.getId());
List<StoragePoolVO> storagePoolsConnectedBefore = new ArrayList<>();
if (CollectionUtils.isNotEmpty(storagePoolsConnectedToHost)) {
for (StoragePoolHostVO poolHost : storagePoolsConnectedToHost) {
StoragePoolVO pool = _storagePoolDao.findById(poolHost.getPoolId());
if (pool != null) {
storagePoolsConnectedBefore.add(pool);
}
}
}
hostStoragePoolsMapBefore.put(host, storagePoolsConnectedBefore);
}
return hostStoragePoolsMapBefore;
}
private Map<HostVO, List<StoragePoolVO>> getHostPoolsToAddAfter(Map<HostVO, List<String>> hostsAndStorageAccessGroupsMap) {
Map<HostVO, List<StoragePoolVO>> hostPoolsToAddMapAfter = new HashMap<>();
for (Map.Entry<HostVO, List<String>> entry : hostsAndStorageAccessGroupsMap.entrySet()) {
HostVO host = entry.getKey();
List<String> sagsToAdd = entry.getValue();
List<StoragePoolVO> poolsToAdd;
if (CollectionUtils.isNotEmpty(sagsToAdd)) {
poolsToAdd = getStoragePoolsByAccessGroups(host.getDataCenterId(), host.getPodId(), host.getClusterId(), sagsToAdd.toArray(new String[0]), true);
} else {
poolsToAdd = getStoragePoolsByEmptyStorageAccessGroups(host.getDataCenterId(), host.getPodId(), host.getClusterId());
}
hostPoolsToAddMapAfter.put(host, poolsToAdd);
connectHostToStoragePools(host, poolsToAdd);
}
return hostPoolsToAddMapAfter;
}
private void disconnectPoolsNotInAccessGroups(Map<HostVO, List<StoragePoolVO>> hostStoragePoolsMapBefore, Map<HostVO, List<StoragePoolVO>> hostPoolsToAddMapAfter) {
for (Map.Entry<HostVO, List<StoragePoolVO>> entry : hostStoragePoolsMapBefore.entrySet()) {
HostVO host = entry.getKey();
List<StoragePoolVO> storagePoolsConnectedBefore = entry.getValue();
List<StoragePoolVO> poolsToAdd = hostPoolsToAddMapAfter.get(host);
List<StoragePoolVO> poolsToDelete = new ArrayList<>();
for (StoragePoolVO pool : storagePoolsConnectedBefore) {
if (poolsToAdd == null || !poolsToAdd.contains(pool)) {
poolsToDelete.add(pool);
}
}
if (CollectionUtils.isNotEmpty(poolsToDelete)) {
disconnectHostFromStoragePools(host, poolsToDelete);
}
}
}
protected List<StoragePoolVO> getStoragePoolsByAccessGroups(Long dcId, Long podId, Long clusterId, String[] storageAccessGroups, boolean includeEmptyTags) {
List<StoragePoolVO> allPoolsByTags = new ArrayList<>();
allPoolsByTags.addAll(_storagePoolDao.findPoolsByAccessGroupsForHostConnection(dcId, podId, clusterId, ScopeType.CLUSTER, storageAccessGroups));
allPoolsByTags.addAll(_storagePoolDao.findZoneWideStoragePoolsByAccessGroupsForHostConnection(dcId, storageAccessGroups));
if (includeEmptyTags) {
allPoolsByTags.addAll(_storagePoolDao.findStoragePoolsByEmptyStorageAccessGroups(dcId, podId, clusterId, ScopeType.CLUSTER, null));
allPoolsByTags.addAll(_storagePoolDao.findStoragePoolsByEmptyStorageAccessGroups(dcId, null, null, ScopeType.ZONE, null));
}
return allPoolsByTags;
}
private List<StoragePoolVO> getStoragePoolsByEmptyStorageAccessGroups(Long dcId, Long podId, Long clusterId) {
List<StoragePoolVO> allPoolsByTags = new ArrayList<>();
allPoolsByTags.addAll(_storagePoolDao.findStoragePoolsByEmptyStorageAccessGroups(dcId, podId, clusterId, ScopeType.CLUSTER, null));
allPoolsByTags.addAll(_storagePoolDao.findStoragePoolsByEmptyStorageAccessGroups(dcId, null, null, ScopeType.ZONE, null));
return allPoolsByTags;
}
private void connectHostToStoragePools(HostVO host, List<StoragePoolVO> poolsToAdd) {
List<StoragePoolHostVO> storagePoolsConnectedToHost = _storageMgr.findStoragePoolsConnectedToHost(host.getId());
for (StoragePoolVO storagePool : poolsToAdd) {
if (CollectionUtils.isNotEmpty(storagePoolsConnectedToHost)) {
boolean isPresent = storagePoolsConnectedToHost.stream()
.anyMatch(poolHost -> poolHost.getPoolId() == storagePool.getId());
if (isPresent) {
continue;
}
}
try {
_storageMgr.connectHostToSharedPool(host, storagePool.getId());
} catch (StorageConflictException se) {
throw new CloudRuntimeException(String.format("Unable to establish a connection between pool %s and the host %s", storagePool, host));
} catch (Exception e) {
logger.warn(String.format("Unable to establish a connection between pool %s and the host %s", storagePool, host), e);
}
}
}
protected void connectHostToStoragePool(HostVO host, StoragePoolVO storagePool) {
try {
_storageMgr.connectHostToSharedPool(host, storagePool.getId());
} catch (StorageConflictException se) {
throw new CloudRuntimeException(String.format("Unable to establish a connection between pool %s and the host %s", storagePool, host));
} catch (Exception e) {
logger.warn(String.format("Unable to establish a connection between pool %s and the host %s", storagePool, host), e);
}
}
private void disconnectHostFromStoragePools(HostVO host, List<StoragePoolVO> poolsToDelete) {
List<Long> usedStoragePoolIDs = listOfStoragePoolIDsUsedByHost(host.getId());
if (usedStoragePoolIDs != null) {
poolsToDelete.removeIf(poolToDelete ->
usedStoragePoolIDs.stream().anyMatch(usedPoolId -> usedPoolId == poolToDelete.getId())
);
}
for (StoragePoolVO storagePool : poolsToDelete) {
disconnectHostFromStoragePool(host, storagePool);
}
}
protected void disconnectHostFromStoragePool(HostVO host, StoragePoolVO storagePool) {
try {
_storageMgr.disconnectHostFromSharedPool(host, storagePool);
_storagePoolHostDao.deleteStoragePoolHostDetails(host.getId(), storagePool.getId());
} catch (StorageConflictException se) {
throw new CloudRuntimeException(String.format("Unable to disconnect the pool %s and the host %s", storagePool, host));
} catch (Exception e) {
logger.warn(String.format("Unable to disconnect the pool %s and the host %s", storagePool, host), e);
}
}
private void updateHostTags(HostVO host, Long hostId, List<String> hostTags, Boolean isTagARule) {
List<VMInstanceVO> activeVMs = _vmDao.listByHostId(hostId);
logger.warn(String.format("The following active VMs [%s] are using the host [%s]. " +
@ -2261,7 +3015,7 @@ public class ResourceManagerImpl extends ManagerBase implements ResourceManager,
}
protected HostVO createHostVO(final StartupCommand[] cmds, final ServerResource resource, final Map<String, String> details, List<String> hostTags,
final ResourceStateAdapter.Event stateEvent) {
List<String> storageAccessGroups, final ResourceStateAdapter.Event stateEvent) {
boolean newHost = false;
StartupCommand startup = cmds[0];
@ -2353,6 +3107,9 @@ public class ResourceManagerImpl extends ManagerBase implements ResourceManager,
host.setStorageUrl(startup.getIqn());
host.setLastPinged(System.currentTimeMillis() >> 10);
host.setHostTags(hostTags, false);
if ((CollectionUtils.isNotEmpty(storageAccessGroups))) {
host.setStorageAccessGroups(String.join(",", storageAccessGroups));
}
host.setDetails(details);
host.setArch(CPU.CPUArch.fromType(startup.getArch()));
if (startup.getStorageIpAddressDeux() != null) {
@ -2495,11 +3252,11 @@ public class ResourceManagerImpl extends ManagerBase implements ResourceManager,
}
}
private Host createHostAndAgent(final ServerResource resource, final Map<String, String> details, final boolean old, final List<String> hostTags, final boolean forRebalance) {
return createHostAndAgent(resource, details, old, hostTags, forRebalance, false);
private Host createHostAndAgent(final ServerResource resource, final Map<String, String> details, final boolean old, final List<String> hostTags, List<String> storageAccessGroups, final boolean forRebalance) {
return createHostAndAgent(resource, details, old, hostTags, storageAccessGroups, forRebalance, false);
}
private Host createHostAndAgent(final ServerResource resource, final Map<String, String> details, final boolean old, final List<String> hostTags, final boolean forRebalance, final boolean isTransferredConnection) {
private Host createHostAndAgent(final ServerResource resource, final Map<String, String> details, final boolean old, final List<String> hostTags, List<String> storageAccessGroups, final boolean forRebalance, final boolean isTransferredConnection) {
HostVO host = null;
StartupCommand[] cmds = null;
boolean hostExists = false;
@ -2541,7 +3298,7 @@ public class ResourceManagerImpl extends ManagerBase implements ResourceManager,
// find out if the host we want to connect to is new (so we can send an event)
boolean newHost = getNewHost(cmds) == null;
host = createHostVO(cmds, resource, details, hostTags, ResourceStateAdapter.Event.CREATE_HOST_VO_FOR_DIRECT_CONNECT);
host = createHostVO(cmds, resource, details, hostTags, storageAccessGroups, ResourceStateAdapter.Event.CREATE_HOST_VO_FOR_DIRECT_CONNECT);
if (host != null) {
created = _agentMgr.handleDirectConnectAgent(host, cmds, resource, forRebalance, newHost);
@ -2568,7 +3325,7 @@ public class ResourceManagerImpl extends ManagerBase implements ResourceManager,
return host;
}
private Host createHostAndAgentDeferred(final ServerResource resource, final Map<String, String> details, final boolean old, final List<String> hostTags, final boolean forRebalance) {
private Host createHostAndAgentDeferred(final ServerResource resource, final Map<String, String> details, final boolean old, final List<String> hostTags, List<String> storageAccessGroups, final boolean forRebalance) {
HostVO host = null;
StartupCommand[] cmds = null;
boolean hostExists = false;
@ -2625,7 +3382,7 @@ public class ResourceManagerImpl extends ManagerBase implements ResourceManager,
// find out if the host we want to connect to is new (so we can send an event)
newHost = getNewHost(cmds) == null;
host = createHostVO(cmds, resource, details, hostTags, ResourceStateAdapter.Event.CREATE_HOST_VO_FOR_DIRECT_CONNECT);
host = createHostVO(cmds, resource, details, hostTags, storageAccessGroups, ResourceStateAdapter.Event.CREATE_HOST_VO_FOR_DIRECT_CONNECT);
if (host != null) {
// if first host in cluster no need to defer agent creation
@ -2682,7 +3439,7 @@ public class ResourceManagerImpl extends ManagerBase implements ResourceManager,
@Override
public Host createHostAndAgent(final Long hostId, final ServerResource resource, final Map<String, String> details, final boolean old, final List<String> hostTags, final boolean forRebalance, boolean isTransferredConnection) {
final Host host = createHostAndAgent(resource, details, old, hostTags, forRebalance, isTransferredConnection);
final Host host = createHostAndAgent(resource, details, old, hostTags, null, forRebalance, isTransferredConnection);
return host;
}
@ -2701,12 +3458,12 @@ public class ResourceManagerImpl extends ManagerBase implements ResourceManager,
}
}
return createHostAndAgent(resource, hostDetails, true, null, false);
return createHostAndAgent(resource, hostDetails, true, null, null, false);
}
@Override
public HostVO createHostVOForConnectedAgent(final StartupCommand[] cmds) {
return createHostVO(cmds, null, null, null, ResourceStateAdapter.Event.CREATE_HOST_VO_FOR_CONNECTED);
return createHostVO(cmds, null, null, null, null, ResourceStateAdapter.Event.CREATE_HOST_VO_FOR_CONNECTED);
}
private void checkIPConflicts(final HostPodVO pod, final DataCenterVO dc, final String serverPrivateIP, final String serverPublicIP) {

View File

@ -213,6 +213,7 @@ import org.apache.cloudstack.api.command.admin.storage.AddImageStoreS3CMD;
import org.apache.cloudstack.api.command.admin.storage.AddObjectStoragePoolCmd;
import org.apache.cloudstack.api.command.admin.storage.CancelPrimaryStorageMaintenanceCmd;
import org.apache.cloudstack.api.command.admin.storage.ChangeStoragePoolScopeCmd;
import org.apache.cloudstack.api.command.admin.storage.ConfigureStorageAccessCmd;
import org.apache.cloudstack.api.command.admin.storage.CreateSecondaryStagingStoreCmd;
import org.apache.cloudstack.api.command.admin.storage.CreateStoragePoolCmd;
import org.apache.cloudstack.api.command.admin.storage.DeleteImageStoreCmd;
@ -223,6 +224,7 @@ import org.apache.cloudstack.api.command.admin.storage.FindStoragePoolsForMigrat
import org.apache.cloudstack.api.command.admin.storage.ListImageStoresCmd;
import org.apache.cloudstack.api.command.admin.storage.ListObjectStoragePoolsCmd;
import org.apache.cloudstack.api.command.admin.storage.ListSecondaryStagingStoresCmd;
import org.apache.cloudstack.api.command.admin.storage.ListStorageAccessGroupsCmd;
import org.apache.cloudstack.api.command.admin.storage.ListStoragePoolsCmd;
import org.apache.cloudstack.api.command.admin.storage.ListStorageProvidersCmd;
import org.apache.cloudstack.api.command.admin.storage.ListStorageTagsCmd;
@ -1275,6 +1277,7 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe
final Object allocationState = cmd.getAllocationState();
final String keyword = cmd.getKeyword();
final CPU.CPUArch arch = cmd.getArch();
final String storageAccessGroup = cmd.getStorageAccessGroup();
zoneId = _accountMgr.checkAccessAndSpecifyAuthority(CallContext.current().getCallingAccount(), zoneId);
final Filter searchFilter = new Filter(ClusterVO.class, "id", true, cmd.getStartIndex(), cmd.getPageSizeVal());
@ -1288,6 +1291,13 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe
sb.and("clusterType", sb.entity().getClusterType(), SearchCriteria.Op.EQ);
sb.and("allocationState", sb.entity().getAllocationState(), SearchCriteria.Op.EQ);
sb.and("arch", sb.entity().getArch(), SearchCriteria.Op.EQ);
if (storageAccessGroup != null) {
sb.and().op("storageAccessGroupExact", sb.entity().getStorageAccessGroups(), SearchCriteria.Op.EQ);
sb.or("storageAccessGroupPrefix", sb.entity().getStorageAccessGroups(), SearchCriteria.Op.LIKE);
sb.or("storageAccessGroupSuffix", sb.entity().getStorageAccessGroups(), SearchCriteria.Op.LIKE);
sb.or("storageAccessGroupMiddle", sb.entity().getStorageAccessGroups(), SearchCriteria.Op.LIKE);
sb.cp();
}
final SearchCriteria<ClusterVO> sc = sb.create();
if (id != null) {
@ -1331,6 +1341,13 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe
sc.setParameters("arch", arch);
}
if (storageAccessGroup != null) {
sc.setParameters("storageAccessGroupExact", storageAccessGroup);
sc.setParameters("storageAccessGroupPrefix", storageAccessGroup + ",%");
sc.setParameters("storageAccessGroupSuffix", "%," + storageAccessGroup);
sc.setParameters("storageAccessGroupMiddle", "%," + storageAccessGroup + ",%");
}
final Pair<List<ClusterVO>, Integer> result = _clusterDao.searchAndCount(sc, searchFilter);
return new Pair<>(result.first(), result.second());
}
@ -2014,6 +2031,8 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe
Long zoneId = cmd.getZoneId();
final Object keyword = cmd.getKeyword();
final Object allocationState = cmd.getAllocationState();
final String storageAccessGroup = cmd.getStorageAccessGroup();
zoneId = _accountMgr.checkAccessAndSpecifyAuthority(CallContext.current().getCallingAccount(), zoneId);
final Filter searchFilter = new Filter(HostPodVO.class, "dataCenterId", true, cmd.getStartIndex(), cmd.getPageSizeVal());
@ -2022,6 +2041,13 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe
sb.and("name", sb.entity().getName(), SearchCriteria.Op.EQ);
sb.and("dataCenterId", sb.entity().getDataCenterId(), SearchCriteria.Op.EQ);
sb.and("allocationState", sb.entity().getAllocationState(), SearchCriteria.Op.EQ);
if (storageAccessGroup != null) {
sb.and().op("storageAccessGroupExact", sb.entity().getStorageAccessGroups(), SearchCriteria.Op.EQ);
sb.or("storageAccessGroupPrefix", sb.entity().getStorageAccessGroups(), SearchCriteria.Op.LIKE);
sb.or("storageAccessGroupSuffix", sb.entity().getStorageAccessGroups(), SearchCriteria.Op.LIKE);
sb.or("storageAccessGroupMiddle", sb.entity().getStorageAccessGroups(), SearchCriteria.Op.LIKE);
sb.cp();
}
final SearchCriteria<HostPodVO> sc = sb.create();
if (keyword != null) {
@ -2048,6 +2074,13 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe
sc.setParameters("allocationState", allocationState);
}
if (storageAccessGroup != null) {
sc.setParameters("storageAccessGroupExact", storageAccessGroup);
sc.setParameters("storageAccessGroupPrefix", storageAccessGroup + ",%");
sc.setParameters("storageAccessGroupSuffix", "%," + storageAccessGroup);
sc.setParameters("storageAccessGroupMiddle", "%," + storageAccessGroup + ",%");
}
final Pair<List<HostPodVO>, Integer> result = _hostPodDao.searchAndCount(sc, searchFilter);
return new Pair<>(result.first(), result.second());
}
@ -3587,12 +3620,14 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe
cmdList.add(ListSwiftsCmd.class);
cmdList.add(ListStoragePoolsCmd.class);
cmdList.add(ListStorageTagsCmd.class);
cmdList.add(ListStorageAccessGroupsCmd.class);
cmdList.add(FindStoragePoolsForMigrationCmd.class);
cmdList.add(PreparePrimaryStorageForMaintenanceCmd.class);
cmdList.add(UpdateStoragePoolCmd.class);
cmdList.add(SyncStoragePoolCmd.class);
cmdList.add(UpdateStorageCapabilitiesCmd.class);
cmdList.add(UpdateImageStoreCmd.class);
cmdList.add(ConfigureStorageAccessCmd.class);
cmdList.add(DestroySystemVmCmd.class);
cmdList.add(ListSystemVMsCmd.class);
cmdList.add(MigrateSystemVMCmd.class);

View File

@ -39,6 +39,7 @@ import java.util.Iterator;
import java.util.LinkedHashSet;
import java.util.List;
import java.util.Map;
import java.util.Objects;
import java.util.Random;
import java.util.Set;
import java.util.UUID;
@ -51,14 +52,20 @@ import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.stream.Collectors;
import java.util.stream.Stream;
import javax.inject.Inject;
import com.cloud.dc.HostPodVO;
import com.cloud.dc.dao.HostPodDao;
import com.cloud.resource.ResourceManager;
import com.cloud.storage.dao.StoragePoolAndAccessGroupMapDao;
import org.apache.cloudstack.annotation.AnnotationService;
import org.apache.cloudstack.annotation.dao.AnnotationDao;
import org.apache.cloudstack.api.ApiConstants;
import org.apache.cloudstack.api.command.admin.storage.CancelPrimaryStorageMaintenanceCmd;
import org.apache.cloudstack.api.command.admin.storage.ChangeStoragePoolScopeCmd;
import org.apache.cloudstack.api.command.admin.storage.ConfigureStorageAccessCmd;
import org.apache.cloudstack.api.command.admin.storage.CreateSecondaryStagingStoreCmd;
import org.apache.cloudstack.api.command.admin.storage.CreateStoragePoolCmd;
import org.apache.cloudstack.api.command.admin.storage.DeleteImageStoreCmd;
@ -144,6 +151,7 @@ import org.apache.cloudstack.storage.to.VolumeObjectTO;
import org.apache.commons.collections.CollectionUtils;
import org.apache.commons.collections.MapUtils;
import org.apache.commons.lang.time.DateUtils;
import org.apache.commons.lang3.ArrayUtils;
import org.apache.commons.lang3.EnumUtils;
import org.springframework.stereotype.Component;
@ -368,6 +376,8 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C
@Inject
StoragePoolTagsDao _storagePoolTagsDao;
@Inject
StoragePoolAndAccessGroupMapDao _storagePoolAccessGroupMapDao;
@Inject
PrimaryDataStoreDao primaryStoreDao;
@Inject
DiskOfferingDetailsDao _diskOfferingDetailsDao;
@ -397,6 +407,12 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C
ConfigurationDao configurationDao;
@Inject
private ImageStoreDetailsUtil imageStoreDetailsUtil;
@Inject
protected HostPodDao _podDao;
@Inject
ResourceManager _resourceMgr;
@Inject
StorageManager storageManager;
protected List<StoragePoolDiscoverer> _discoverers;
@ -673,7 +689,7 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C
_storagePoolAcquisitionWaitSeconds = NumbersUtil.parseInt(configs.get("pool.acquisition.wait.seconds"), 1800);
logger.info("pool.acquisition.wait.seconds is configured as " + _storagePoolAcquisitionWaitSeconds + " seconds");
_agentMgr.registerForHostEvents(new StoragePoolMonitor(this, _storagePoolDao, _dataStoreProviderMgr), true, false, true);
_agentMgr.registerForHostEvents(new StoragePoolMonitor(this, _storagePoolDao, _storagePoolHostDao, _dataStoreProviderMgr), true, false, true);
logger.info("Storage cleanup enabled: " + StorageCleanupEnabled.value() + ", interval: " + StorageCleanupInterval.value() + ", delay: " + StorageCleanupDelay.value()
+ ", template cleanup enabled: " + TemplateCleanupEnabled.value());
@ -1021,6 +1037,7 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C
params.put("hypervisorType", hypervisorType);
params.put("url", cmd.getUrl());
params.put("tags", cmd.getTags());
params.put(ApiConstants.STORAGE_ACCESS_GROUPS, cmd.getStorageAccessGroups());
params.put("isTagARule", cmd.isTagARule());
params.put("name", cmd.getStoragePoolName());
params.put("details", details);
@ -1388,6 +1405,232 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C
}
}
@Override
@ActionEvent(eventType = EventTypes.EVENT_CONFIGURE_STORAGE_ACCESS, eventDescription = "configuring storage groups", async = true)
public boolean configureStorageAccess(ConfigureStorageAccessCmd cmd) {
Long zoneId = cmd.getZoneId();
Long podId = cmd.getPodId();
Long clusterId = cmd.getClusterId();
Long hostId = cmd.getHostId();
Long storagePoolId = cmd.getStorageId();
long nonNullCount = Stream.of(zoneId, podId, clusterId, hostId, storagePoolId)
.filter(Objects::nonNull)
.count();
if (nonNullCount != 1) {
throw new IllegalArgumentException("Exactly one of zoneid, podid, clusterid, hostid or storagepoolid is required");
}
// SAG -> Storage Access Group
List<String> storageAccessGroups = cmd.getStorageAccessGroups();
if (storageAccessGroups == null) {
throw new InvalidParameterValueException("storageaccessgroups parameter is required");
}
if (zoneId != null) {
DataCenterVO zone = _dcDao.findById(zoneId);
Set<String> existingSAGsSet = (zone.getStorageAccessGroups() == null || zone.getStorageAccessGroups().isEmpty())
? Collections.emptySet()
: new HashSet<>(Arrays.asList(zone.getStorageAccessGroups().split(",")));
Set<String> storagePoolSAGsSet = new HashSet<>(storageAccessGroups);
if (!existingSAGsSet.equals(storagePoolSAGsSet)) {
_resourceMgr.updateZoneStorageAccessGroups(zone.getId(), storageAccessGroups);
String preparedStoragePoolTags = CollectionUtils.isEmpty(storageAccessGroups) ? null : String.join(",", storageAccessGroups);
zone.setStorageAccessGroups(preparedStoragePoolTags);
if (!_dcDao.update(zoneId, zone)) {
throw new CloudRuntimeException("Failed to update zone with the storage access groups.");
}
}
}
if (podId != null) {
HostPodVO pod = _podDao.findById(podId);
Set<String> existingTagsSet = (pod.getStorageAccessGroups() == null || pod.getStorageAccessGroups().isEmpty())
? Collections.emptySet()
: new HashSet<>(Arrays.asList(pod.getStorageAccessGroups().split(",")));
if (CollectionUtils.isNotEmpty(storageAccessGroups)) {
checkIfStorageAccessGroupsExistsOnZone(pod.getDataCenterId(), storageAccessGroups);
}
Set<String> storagePoolTagsSet = new HashSet<>(storageAccessGroups);
if (!existingTagsSet.equals(storagePoolTagsSet)) {
_resourceMgr.updatePodStorageAccessGroups(podId, storageAccessGroups);
String preparedStoragePoolTags = CollectionUtils.isEmpty(storageAccessGroups) ? null : String.join(",", storageAccessGroups);
pod.setStorageAccessGroups(preparedStoragePoolTags);
if (!_podDao.update(podId, pod)) {
throw new CloudRuntimeException("Failed to update pod with the storage access groups.");
}
}
}
if (clusterId != null) {
ClusterVO cluster = _clusterDao.findById(clusterId);
Set<String> existingTagsSet = (cluster.getStorageAccessGroups() == null || cluster.getStorageAccessGroups().isEmpty())
? Collections.emptySet()
: new HashSet<>(Arrays.asList(cluster.getStorageAccessGroups().split(",")));
if (CollectionUtils.isNotEmpty(storageAccessGroups)) {
checkIfStorageAccessGroupsExistsOnPod(cluster.getPodId(), storageAccessGroups);
}
Set<String> storagePoolTagsSet = new HashSet<>(storageAccessGroups);
if (!existingTagsSet.equals(storagePoolTagsSet)) {
_resourceMgr.updateClusterStorageAccessGroups(cluster.getId(), storageAccessGroups);
String preparedStoragePoolTags = CollectionUtils.isEmpty(storageAccessGroups) ? null : String.join(",", storageAccessGroups);
cluster.setStorageAccessGroups(preparedStoragePoolTags);
if (!_clusterDao.update(clusterId, cluster)) {
throw new CloudRuntimeException("Failed to update cluster with the storage access groups.");
}
}
}
if (hostId != null) {
HostVO host = _hostDao.findById(hostId);
Set<String> existingTagsSet = (host.getStorageAccessGroups() == null || host.getStorageAccessGroups().isEmpty())
? Collections.emptySet()
: new HashSet<>(Arrays.asList(host.getStorageAccessGroups().split(",")));
if (CollectionUtils.isNotEmpty(storageAccessGroups)) {
checkIfStorageAccessGroupsExistsOnCluster(host.getClusterId(), storageAccessGroups);
}
Set<String> storageAccessGroupsSet = new HashSet<>(storageAccessGroups);
if (!existingTagsSet.equals(storageAccessGroupsSet)) {
_resourceMgr.updateHostStorageAccessGroups(hostId, storageAccessGroups);
String preparedStoragePoolTags = CollectionUtils.isEmpty(storageAccessGroups) ? null : String.join(",", storageAccessGroups);
host.setStorageAccessGroups(preparedStoragePoolTags);
if (!_hostDao.update(hostId, host)) {
throw new CloudRuntimeException("Failed to update host with the storage access groups.");
}
}
}
if (storagePoolId != null) {
StoragePoolVO storagePool = _storagePoolDao.findById(storagePoolId);
if (ScopeType.HOST.equals(storagePool.getScope())) {
throw new CloudRuntimeException("Storage Access Groups are not suitable for local storage");
}
if (logger.isDebugEnabled()) {
logger.debug("Updating Storage Pool Access Group Maps to :" + storageAccessGroups);
}
if (storagePool.getPoolType() == StoragePoolType.DatastoreCluster) {
List<StoragePoolVO> childStoragePools = _storagePoolDao.listChildStoragePoolsInDatastoreCluster(storagePool.getId());
for (StoragePoolVO childPool : childStoragePools) {
_resourceMgr.updateStoragePoolConnectionsOnHosts(childPool.getId(), storageAccessGroups);
_storagePoolAccessGroupMapDao.persist(childPool.getId(), storageAccessGroups);
}
} else {
_resourceMgr.updateStoragePoolConnectionsOnHosts(storagePool.getId(), storageAccessGroups);
}
_storagePoolAccessGroupMapDao.persist(storagePool.getId(), storageAccessGroups);
}
return true;
}
protected void checkIfStorageAccessGroupsExistsOnZone(long zoneId, List<String> storageAccessGroups) {
DataCenterVO zoneVO = _dcDao.findById(zoneId);
String storageAccessGroupsOnZone = zoneVO.getStorageAccessGroups();
List<String> zoneTagsList = parseTags(storageAccessGroupsOnZone);
List<String> newTags = storageAccessGroups;
List<String> existingTagsOnZone = (List<String>) CollectionUtils.intersection(newTags, zoneTagsList);
if (CollectionUtils.isNotEmpty(existingTagsOnZone)) {
throw new CloudRuntimeException(String.format("access groups already exist on the zone: %s", existingTagsOnZone));
}
}
protected void checkIfStorageAccessGroupsExistsOnPod(long podId, List<String> storageAccessGroups) {
HostPodVO podVO = _podDao.findById(podId);
DataCenterVO zoneVO = _dcDao.findById(podVO.getDataCenterId());
String storageAccessGroupsOnPod = podVO.getStorageAccessGroups();
String storageAccessGroupsOnZone = zoneVO.getStorageAccessGroups();
List<String> podTagsList = parseTags(storageAccessGroupsOnPod);
List<String> zoneTagsList = parseTags(storageAccessGroupsOnZone);
List<String> newTags = storageAccessGroups;
List<String> existingTagsOnPod = (List<String>) CollectionUtils.intersection(newTags, podTagsList);
List<String> existingTagsOnZone = (List<String>) CollectionUtils.intersection(newTags, zoneTagsList);
if (CollectionUtils.isNotEmpty(existingTagsOnPod) || CollectionUtils.isNotEmpty(existingTagsOnZone)) {
String message = "access groups already exist ";
if (CollectionUtils.isNotEmpty(existingTagsOnPod)) {
message += String.format("on the pod: %s", existingTagsOnPod);
}
if (CollectionUtils.isNotEmpty(existingTagsOnZone)) {
if (CollectionUtils.isNotEmpty(existingTagsOnPod)) {
message += ", ";
}
message += String.format("on the zone: %s", existingTagsOnZone);
}
throw new CloudRuntimeException(message);
}
}
protected void checkIfStorageAccessGroupsExistsOnCluster(long clusterId, List<String> storageAccessGroups) {
ClusterVO clusterVO = _clusterDao.findById(clusterId);
HostPodVO podVO = _podDao.findById(clusterVO.getPodId());
DataCenterVO zoneVO = _dcDao.findById(podVO.getDataCenterId());
String storageAccessGroupsOnCluster = clusterVO.getStorageAccessGroups();
String storageAccessGroupsOnPod = podVO.getStorageAccessGroups();
String storageAccessGroupsOnZone = zoneVO.getStorageAccessGroups();
List<String> podTagsList = parseTags(storageAccessGroupsOnPod);
List<String> zoneTagsList = parseTags(storageAccessGroupsOnZone);
List<String> clusterTagsList = parseTags(storageAccessGroupsOnCluster);
List<String> newTags = storageAccessGroups;
List<String> existingTagsOnCluster = (List<String>) CollectionUtils.intersection(newTags, clusterTagsList);
List<String> existingTagsOnPod = (List<String>) CollectionUtils.intersection(newTags, podTagsList);
List<String> existingTagsOnZone = (List<String>) CollectionUtils.intersection(newTags, zoneTagsList);
if (CollectionUtils.isNotEmpty(existingTagsOnCluster) || CollectionUtils.isNotEmpty(existingTagsOnPod) || CollectionUtils.isNotEmpty(existingTagsOnZone)) {
String message = "access groups already exist ";
if (CollectionUtils.isNotEmpty(existingTagsOnCluster)) {
message += String.format("on the cluster: %s", existingTagsOnCluster);
}
if (CollectionUtils.isNotEmpty(existingTagsOnPod)) {
if (CollectionUtils.isNotEmpty(existingTagsOnCluster)) {
message += ", ";
}
message += String.format("on the pod: %s", existingTagsOnPod);
}
if (CollectionUtils.isNotEmpty(existingTagsOnZone)) {
if (CollectionUtils.isNotEmpty(existingTagsOnCluster) || CollectionUtils.isNotEmpty(existingTagsOnPod)) {
message += ", ";
}
message += String.format("on the zone: %s", existingTagsOnZone);
}
throw new CloudRuntimeException(message);
}
}
private List<String> parseTags(String tags) {
if (tags == null || tags.trim().isEmpty()) {
return Collections.emptyList();
}
return Arrays.asList(tags.split(","));
}
@Override
public void removeStoragePoolFromCluster(long hostId, String iScsiName, StoragePool storagePool) {
final Map<String, String> details = new HashMap<>();
@ -2609,11 +2852,152 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C
storagePoolTags = storagePoolTagVOList.parallelStream().map(StoragePoolTagVO::getTag).collect(Collectors.toList());
isTagARule = storagePoolTagVOList.get(0).isTagARule();
}
List<String> storageAccessGroups = _storagePoolAccessGroupMapDao.getStorageAccessGroups(datastoreClusterPool.getId());
_storagePoolDao.persist(dataStoreVO, details, storagePoolTags, isTagARule);
_storagePoolDao.persist(dataStoreVO, details, storagePoolTags, isTagARule, storageAccessGroups);
return dataStoreVO;
}
@Override
public boolean checkIfHostAndStoragePoolHasCommonStorageAccessGroups(Host host, StoragePool pool) {
String[] hostStorageAccessGroups = getStorageAccessGroups(null, null, null, host.getId());
List<String> storagePoolAccessGroups = _storagePoolAccessGroupMapDao.getStorageAccessGroups(pool.getId());
if (CollectionUtils.isEmpty(storagePoolAccessGroups)) {
return true;
}
if (ArrayUtils.isEmpty(hostStorageAccessGroups)) {
return false;
}
if (ArrayUtils.isNotEmpty(hostStorageAccessGroups)) {
logger.debug(String.format("Storage access groups on the host %s are %s", host, hostStorageAccessGroups));
}
if (CollectionUtils.isNotEmpty(storagePoolAccessGroups)) {
logger.debug(String.format("Storage access groups on the storage pool %s are %s", host, storagePoolAccessGroups));
}
List<String> hostTagList = Arrays.asList(hostStorageAccessGroups);
return CollectionUtils.containsAny(hostTagList, storagePoolAccessGroups);
}
@Override
public Pair<Boolean, String> checkIfReadyVolumeFitsInStoragePoolWithStorageAccessGroups(StoragePool destPool, Volume volume) {
if (Volume.State.Ready.equals(volume.getState())) {
Long vmId = volume.getInstanceId();
VMInstanceVO vm = null;
if (vmId != null) {
vm = _vmInstanceDao.findById(vmId);
}
if (vm == null || State.Stopped.equals(vm.getState())) {
Long srcPoolId = volume.getPoolId();
StoragePoolVO srcPool = _storagePoolDao.findById(srcPoolId);
List<String> srcStorageAccessGroups = _storagePoolAccessGroupMapDao.getStorageAccessGroups(srcPoolId);
List<String> destStorageAccessGroups = _storagePoolAccessGroupMapDao.getStorageAccessGroups(destPool.getId());
if (CollectionUtils.isNotEmpty(srcStorageAccessGroups) && CollectionUtils.isNotEmpty(destStorageAccessGroups)) {
logger.debug(String.format("Storage access groups on source storage %s are %s and destination storage %s are %s",
srcPool, srcStorageAccessGroups, destPool, destStorageAccessGroups));
List<String> intersection = new ArrayList<>(srcStorageAccessGroups);
intersection.retainAll(destStorageAccessGroups);
if (CollectionUtils.isNotEmpty(intersection)) {
return new Pair<>(true, "Success");
} else {
List<Long> poolIds = new ArrayList<>();
poolIds.add(srcPool.getId());
poolIds.add(destPool.getId());
Host hostWithPoolsAccess = findUpAndEnabledHostWithAccessToStoragePools(poolIds);
if (hostWithPoolsAccess == null) {
logger.debug("Storage access groups on source and destination storages do not match, and there is no common host connected to these storages");
return new Pair<>(false, "No common host connected to source and destination storages");
}
}
}
return new Pair<>(true, "Success");
} else {
if (State.Running.equals(vm.getState())) {
Long hostId = vm.getHostId();
String[] hostStorageAccessGroups = getStorageAccessGroups(null, null, null, hostId);
Long srcPoolId = volume.getPoolId();
StoragePoolVO srcPool = _storagePoolDao.findById(srcPoolId);
List<String> srcStorageAccessGroups = _storagePoolAccessGroupMapDao.getStorageAccessGroups(srcPoolId);
List<String> destStorageAccessGroups = _storagePoolAccessGroupMapDao.getStorageAccessGroups(destPool.getId());
logger.debug(String.format("Storage access groups on source storage %s are %s and destination storage %s are %s",
srcPool, srcStorageAccessGroups, destPool, destStorageAccessGroups));
if (CollectionUtils.isEmpty(srcStorageAccessGroups) && CollectionUtils.isEmpty(destStorageAccessGroups)) {
return new Pair<>(true, "Success");
}
if (CollectionUtils.isNotEmpty(srcStorageAccessGroups) && CollectionUtils.isNotEmpty(destStorageAccessGroups)) {
List<String> intersection = new ArrayList<>(srcStorageAccessGroups);
intersection.retainAll(destStorageAccessGroups);
if (ArrayUtils.isNotEmpty(hostStorageAccessGroups)) {
boolean hasSrcCommon = srcStorageAccessGroups.stream()
.anyMatch(group -> Arrays.asList(hostStorageAccessGroups).contains(group));
boolean hasDestCommon = destStorageAccessGroups.stream()
.anyMatch(group -> Arrays.asList(hostStorageAccessGroups).contains(group));
if (hasSrcCommon && hasDestCommon) {
return new Pair<>(true, "Success");
}
}
return new Pair<>(false, "No common storage access groups between source, destination pools and host");
}
if (CollectionUtils.isEmpty(srcStorageAccessGroups)) {
if (ArrayUtils.isNotEmpty(hostStorageAccessGroups)) {
List<String> hostAccessGroupList = Arrays.asList(hostStorageAccessGroups);
hostAccessGroupList.retainAll(destStorageAccessGroups);
if (CollectionUtils.isNotEmpty(hostAccessGroupList)) {
return new Pair<>(true, "Success");
}
}
return new Pair<>(false, "Host lacks access to destination storage groups");
}
return new Pair<>(true, "Success");
}
}
}
return new Pair<>(true, "Success");
}
@Override
public String[] getStorageAccessGroups(Long zoneId, Long podId, Long clusterId, Long hostId) {
List<String> storageAccessGroups = new ArrayList<>();
if (hostId != null) {
HostVO host = _hostDao.findById(hostId);
ClusterVO cluster = _clusterDao.findById(host.getClusterId());
HostPodVO pod = _podDao.findById(cluster.getPodId());
DataCenterVO zone = _dcDao.findById(pod.getDataCenterId());
storageAccessGroups.addAll(List.of(com.cloud.utils.StringUtils.splitCommaSeparatedStrings(host.getStorageAccessGroups(), cluster.getStorageAccessGroups(), pod.getStorageAccessGroups(), zone.getStorageAccessGroups())));
} else if (clusterId != null) {
ClusterVO cluster = _clusterDao.findById(clusterId);
HostPodVO pod = _podDao.findById(cluster.getPodId());
DataCenterVO zone = _dcDao.findById(pod.getDataCenterId());
storageAccessGroups.addAll(List.of(com.cloud.utils.StringUtils.splitCommaSeparatedStrings(cluster.getStorageAccessGroups(), pod.getStorageAccessGroups(), zone.getStorageAccessGroups())));
} else if (podId != null) {
HostPodVO pod = _podDao.findById(podId);
DataCenterVO zone = _dcDao.findById(pod.getDataCenterId());
storageAccessGroups.addAll(List.of(com.cloud.utils.StringUtils.splitCommaSeparatedStrings(pod.getStorageAccessGroups(), zone.getStorageAccessGroups())));
} else if (zoneId != null) {
DataCenterVO zone = _dcDao.findById(zoneId);
storageAccessGroups.addAll(List.of(com.cloud.utils.StringUtils.splitCommaSeparatedStrings(zone.getStorageAccessGroups())));
}
storageAccessGroups.removeIf(tag -> tag == null || tag.trim().isEmpty());
return storageAccessGroups.isEmpty()
? new String[0]
: storageAccessGroups.toArray(org.apache.commons.lang.ArrayUtils.EMPTY_STRING_ARRAY);
}
private void handleRemoveChildStoragePoolFromDatastoreCluster(Set<String> childDatastoreUUIDs) {
for (String childDatastoreUUID : childDatastoreUUIDs) {

View File

@ -63,6 +63,7 @@ import org.apache.cloudstack.engine.subsystem.api.storage.DataStore;
import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreDriver;
import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager;
import org.apache.cloudstack.engine.subsystem.api.storage.EndPoint;
import org.apache.cloudstack.engine.subsystem.api.storage.EndPointSelector;
import org.apache.cloudstack.engine.subsystem.api.storage.HostScope;
import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreStateMachine;
import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreDriver;
@ -358,6 +359,8 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic
private StatsCollector statsCollector;
@Inject
HostPodDao podDao;
@Inject
EndPointSelector _epSelector;
protected Gson _gson;
@ -3408,6 +3411,18 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic
destPool = _volumeMgr.findChildDataStoreInDataStoreCluster(dc, destPoolPod, destPool.getClusterId(), null, null, destPool.getId());
}
Pair<Boolean, String> checkResult = storageMgr.checkIfReadyVolumeFitsInStoragePoolWithStorageAccessGroups(destPool, vol);
if (!checkResult.first()) {
throw new CloudRuntimeException(checkResult.second());
}
if (!liveMigrateVolume && vm != null) {
DataStore primaryStore = dataStoreMgr.getPrimaryDataStore(destPool.getId());
if (_epSelector.select(primaryStore) == null) {
throw new CloudRuntimeException("Unable to find accessible host for volume migration");
}
}
if (!storageMgr.storagePoolCompatibleWithVolumePool(destPool, (Volume) vol)) {
throw new CloudRuntimeException("Storage pool " + destPool.getName() + " is not suitable to migrate volume " + vol.getName());
}

View File

@ -16,17 +16,24 @@
// under the License.
package com.cloud.storage.listener;
import java.util.ArrayList;
import java.util.List;
import javax.inject.Inject;
import com.cloud.dc.dao.ClusterDao;
import com.cloud.dc.dao.HostPodDao;
import com.cloud.exception.StorageConflictException;
import com.cloud.storage.StorageManager;
import com.cloud.storage.dao.StoragePoolHostDao;
import com.cloud.utils.exception.CloudRuntimeException;
import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreProvider;
import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreProviderManager;
import org.apache.cloudstack.engine.subsystem.api.storage.HypervisorHostListener;
import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreProvider;
import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao;
import org.apache.cloudstack.storage.datastore.db.StoragePoolVO;
import org.apache.commons.lang3.ArrayUtils;
import org.apache.logging.log4j.Logger;
import org.apache.logging.log4j.LogManager;
@ -52,12 +59,18 @@ public class StoragePoolMonitor implements Listener {
private final StorageManagerImpl _storageManager;
private final PrimaryDataStoreDao _poolDao;
private DataStoreProviderManager _dataStoreProviderMgr;
private final StoragePoolHostDao _storagePoolHostDao;
@Inject
ClusterDao _clusterDao;
@Inject
HostPodDao _podDao;
@Inject
OCFS2Manager _ocfs2Mgr;
public StoragePoolMonitor(StorageManagerImpl mgr, PrimaryDataStoreDao poolDao, DataStoreProviderManager dataStoreProviderMgr) {
public StoragePoolMonitor(StorageManagerImpl mgr, PrimaryDataStoreDao poolDao, StoragePoolHostDao storagePoolHostDao, DataStoreProviderManager dataStoreProviderMgr) {
_storageManager = mgr;
_poolDao = poolDao;
_storagePoolHostDao = storagePoolHostDao;
_dataStoreProviderMgr = dataStoreProviderMgr;
}
@ -104,13 +117,34 @@ public class StoragePoolMonitor implements Listener {
scCmd.getHypervisorType() == HypervisorType.VMware || scCmd.getHypervisorType() == HypervisorType.Simulator ||
scCmd.getHypervisorType() == HypervisorType.Ovm || scCmd.getHypervisorType() == HypervisorType.Hyperv ||
scCmd.getHypervisorType() == HypervisorType.LXC || scCmd.getHypervisorType() == HypervisorType.Ovm3) {
List<StoragePoolVO> pools = _poolDao.listBy(host.getDataCenterId(), host.getPodId(), host.getClusterId(), ScopeType.CLUSTER);
List<StoragePoolVO> zoneStoragePoolsByTags = _poolDao.findZoneWideStoragePoolsByTags(host.getDataCenterId(), null, false);
List<StoragePoolVO> zoneStoragePoolsByHypervisor = _poolDao.findZoneWideStoragePoolsByHypervisor(host.getDataCenterId(), scCmd.getHypervisorType());
zoneStoragePoolsByTags.retainAll(zoneStoragePoolsByHypervisor);
pools.addAll(zoneStoragePoolsByTags);
List<StoragePoolVO> zoneStoragePoolsByAnyHypervisor = _poolDao.findZoneWideStoragePoolsByHypervisor(host.getDataCenterId(), HypervisorType.Any);
String sags[] = _storageManager.getStorageAccessGroups(null, null, null, host.getId());
List<StoragePoolVO> pools = new ArrayList<>();
// SAG -> Storage Access Group
if (ArrayUtils.isEmpty(sags)) {
List<StoragePoolVO> clusterStoragePoolsByEmptySAGs = _poolDao.findStoragePoolsByEmptyStorageAccessGroups(host.getDataCenterId(), host.getPodId(), host.getClusterId(), ScopeType.CLUSTER, null);
List<StoragePoolVO> storagePoolsByEmptySAGs = _poolDao.findStoragePoolsByEmptyStorageAccessGroups(host.getDataCenterId(), null, null, ScopeType.ZONE, null);
List<StoragePoolVO> zoneStoragePoolsByHypervisor = _poolDao.findStoragePoolsByEmptyStorageAccessGroups(host.getDataCenterId(), null, null, ScopeType.ZONE, scCmd.getHypervisorType());
storagePoolsByEmptySAGs.retainAll(zoneStoragePoolsByHypervisor);
pools.addAll(storagePoolsByEmptySAGs);
pools.addAll(clusterStoragePoolsByEmptySAGs);
List<StoragePoolVO> zoneStoragePoolsByAnyHypervisor = _poolDao.findStoragePoolsByEmptyStorageAccessGroups(host.getDataCenterId(), null, null, ScopeType.ZONE, HypervisorType.Any);
pools.addAll(zoneStoragePoolsByAnyHypervisor);
} else {
List<StoragePoolVO> storagePoolsBySAGs = new ArrayList<>();
List<StoragePoolVO> clusterStoragePoolsBySAGs = _poolDao.findPoolsByAccessGroupsForHostConnection(host.getDataCenterId(), host.getPodId(), host.getClusterId(), ScopeType.CLUSTER, sags);
List<StoragePoolVO> clusterStoragePoolsByEmptySAGs = _poolDao.findStoragePoolsByEmptyStorageAccessGroups(host.getDataCenterId(), host.getPodId(), host.getClusterId(), ScopeType.CLUSTER, null);
List<StoragePoolVO> zoneStoragePoolsBySAGs = _poolDao.findZoneWideStoragePoolsByAccessGroupsAndHypervisorTypeForHostConnection(host.getDataCenterId(), sags, scCmd.getHypervisorType());
List<StoragePoolVO> zoneStoragePoolsByHypervisorTypeAny = _poolDao.findZoneWideStoragePoolsByAccessGroupsAndHypervisorTypeForHostConnection(host.getDataCenterId(), sags, HypervisorType.Any);
List<StoragePoolVO> zoneStoragePoolsByEmptySAGs = _poolDao.findStoragePoolsByEmptyStorageAccessGroups(host.getDataCenterId(), null, null, ScopeType.ZONE, null);
storagePoolsBySAGs.addAll(zoneStoragePoolsBySAGs);
storagePoolsBySAGs.addAll(zoneStoragePoolsByEmptySAGs);
storagePoolsBySAGs.addAll(zoneStoragePoolsByHypervisorTypeAny);
storagePoolsBySAGs.addAll(clusterStoragePoolsBySAGs);
storagePoolsBySAGs.addAll(clusterStoragePoolsByEmptySAGs);
pools.addAll(storagePoolsBySAGs);
}
// get the zone wide disabled pools list if global setting is true.
if (StorageManager.MountDisabledStoragePool.value()) {
@ -122,6 +156,9 @@ public class StoragePoolMonitor implements Listener {
pools.addAll(_poolDao.findDisabledPoolsByScope(host.getDataCenterId(), host.getPodId(), host.getClusterId(), ScopeType.CLUSTER));
}
List<StoragePoolHostVO> previouslyConnectedPools = new ArrayList<>();
previouslyConnectedPools.addAll(_storageManager.findStoragePoolsConnectedToHost(host.getId()));
for (StoragePoolVO pool : pools) {
if (!pool.isShared()) {
continue;
@ -141,6 +178,21 @@ public class StoragePoolMonitor implements Listener {
} catch (Exception e) {
throw new ConnectionException(true, String.format("Unable to connect host %s to storage pool %s due to %s", host, pool, e.toString()), e);
}
previouslyConnectedPools.removeIf(sp -> sp.getPoolId() == pool.getId());
}
// Disconnect any pools which are not expected to be connected
for (StoragePoolHostVO poolToDisconnect: previouslyConnectedPools) {
StoragePoolVO pool = _poolDao.findById(poolToDisconnect.getPoolId());
try {
_storageManager.disconnectHostFromSharedPool(host, pool);
_storagePoolHostDao.deleteStoragePoolHostDetails(host.getId(), pool.getId());
} catch (StorageConflictException se) {
throw new CloudRuntimeException(String.format("Unable to disconnect the pool %s and the host %s", pool, host));
} catch (Exception e) {
logger.warn(String.format("Unable to disconnect the pool %s and the host %s", pool, host), e);
}
}
}
}

Some files were not shown because too many files have changed in this diff Show More