mirror of
https://github.com/apache/cloudstack.git
synced 2025-10-26 08:42:29 +01:00
Merge branch '4.19'
This commit is contained in:
commit
0ec7c72875
@ -451,6 +451,7 @@ public class EventTypes {
|
|||||||
public static final String EVENT_ENABLE_PRIMARY_STORAGE = "ENABLE.PS";
|
public static final String EVENT_ENABLE_PRIMARY_STORAGE = "ENABLE.PS";
|
||||||
public static final String EVENT_DISABLE_PRIMARY_STORAGE = "DISABLE.PS";
|
public static final String EVENT_DISABLE_PRIMARY_STORAGE = "DISABLE.PS";
|
||||||
public static final String EVENT_SYNC_STORAGE_POOL = "SYNC.STORAGE.POOL";
|
public static final String EVENT_SYNC_STORAGE_POOL = "SYNC.STORAGE.POOL";
|
||||||
|
public static final String EVENT_CHANGE_STORAGE_POOL_SCOPE = "CHANGE.STORAGE.POOL.SCOPE";
|
||||||
|
|
||||||
// VPN
|
// VPN
|
||||||
public static final String EVENT_REMOTE_ACCESS_VPN_CREATE = "VPN.REMOTE.ACCESS.CREATE";
|
public static final String EVENT_REMOTE_ACCESS_VPN_CREATE = "VPN.REMOTE.ACCESS.CREATE";
|
||||||
@ -1002,6 +1003,7 @@ public class EventTypes {
|
|||||||
// Primary storage pool
|
// Primary storage pool
|
||||||
entityEventDetails.put(EVENT_ENABLE_PRIMARY_STORAGE, StoragePool.class);
|
entityEventDetails.put(EVENT_ENABLE_PRIMARY_STORAGE, StoragePool.class);
|
||||||
entityEventDetails.put(EVENT_DISABLE_PRIMARY_STORAGE, StoragePool.class);
|
entityEventDetails.put(EVENT_DISABLE_PRIMARY_STORAGE, StoragePool.class);
|
||||||
|
entityEventDetails.put(EVENT_CHANGE_STORAGE_POOL_SCOPE, StoragePool.class);
|
||||||
|
|
||||||
// VPN
|
// VPN
|
||||||
entityEventDetails.put(EVENT_REMOTE_ACCESS_VPN_CREATE, RemoteAccessVpn.class);
|
entityEventDetails.put(EVENT_REMOTE_ACCESS_VPN_CREATE, RemoteAccessVpn.class);
|
||||||
|
|||||||
@ -21,6 +21,7 @@ import java.net.UnknownHostException;
|
|||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
|
|
||||||
import org.apache.cloudstack.api.command.admin.storage.CancelPrimaryStorageMaintenanceCmd;
|
import org.apache.cloudstack.api.command.admin.storage.CancelPrimaryStorageMaintenanceCmd;
|
||||||
|
import org.apache.cloudstack.api.command.admin.storage.ChangeStoragePoolScopeCmd;
|
||||||
import org.apache.cloudstack.api.command.admin.storage.CreateSecondaryStagingStoreCmd;
|
import org.apache.cloudstack.api.command.admin.storage.CreateSecondaryStagingStoreCmd;
|
||||||
import org.apache.cloudstack.api.command.admin.storage.CreateStoragePoolCmd;
|
import org.apache.cloudstack.api.command.admin.storage.CreateStoragePoolCmd;
|
||||||
import org.apache.cloudstack.api.command.admin.storage.DeleteImageStoreCmd;
|
import org.apache.cloudstack.api.command.admin.storage.DeleteImageStoreCmd;
|
||||||
@ -35,6 +36,7 @@ import org.apache.cloudstack.api.command.admin.storage.UpdateStoragePoolCmd;
|
|||||||
import com.cloud.exception.DiscoveryException;
|
import com.cloud.exception.DiscoveryException;
|
||||||
import com.cloud.exception.InsufficientCapacityException;
|
import com.cloud.exception.InsufficientCapacityException;
|
||||||
import com.cloud.exception.InvalidParameterValueException;
|
import com.cloud.exception.InvalidParameterValueException;
|
||||||
|
import com.cloud.exception.PermissionDeniedException;
|
||||||
import com.cloud.exception.ResourceInUseException;
|
import com.cloud.exception.ResourceInUseException;
|
||||||
import com.cloud.exception.ResourceUnavailableException;
|
import com.cloud.exception.ResourceUnavailableException;
|
||||||
import org.apache.cloudstack.api.command.admin.storage.heuristics.CreateSecondaryStorageSelectorCmd;
|
import org.apache.cloudstack.api.command.admin.storage.heuristics.CreateSecondaryStorageSelectorCmd;
|
||||||
@ -130,4 +132,6 @@ public interface StorageService {
|
|||||||
boolean deleteObjectStore(DeleteObjectStoragePoolCmd cmd);
|
boolean deleteObjectStore(DeleteObjectStoragePoolCmd cmd);
|
||||||
|
|
||||||
ObjectStore updateObjectStore(Long id, UpdateObjectStoragePoolCmd cmd);
|
ObjectStore updateObjectStore(Long id, UpdateObjectStoragePoolCmd cmd);
|
||||||
|
|
||||||
|
void changeStoragePoolScope(ChangeStoragePoolScopeCmd cmd) throws IllegalArgumentException, InvalidParameterValueException, PermissionDeniedException;
|
||||||
}
|
}
|
||||||
|
|||||||
@ -450,6 +450,7 @@ public class ApiConstants {
|
|||||||
public static final String STORAGE_POLICY = "storagepolicy";
|
public static final String STORAGE_POLICY = "storagepolicy";
|
||||||
public static final String STORAGE_MOTION_ENABLED = "storagemotionenabled";
|
public static final String STORAGE_MOTION_ENABLED = "storagemotionenabled";
|
||||||
public static final String STORAGE_CAPABILITIES = "storagecapabilities";
|
public static final String STORAGE_CAPABILITIES = "storagecapabilities";
|
||||||
|
public static final String STORAGE_CUSTOM_STATS = "storagecustomstats";
|
||||||
public static final String SUBNET = "subnet";
|
public static final String SUBNET = "subnet";
|
||||||
public static final String OWNER = "owner";
|
public static final String OWNER = "owner";
|
||||||
public static final String SWAP_OWNER = "swapowner";
|
public static final String SWAP_OWNER = "swapowner";
|
||||||
|
|||||||
@ -0,0 +1,98 @@
|
|||||||
|
// Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
// or more contributor license agreements. See the NOTICE file
|
||||||
|
// distributed with this work for additional information
|
||||||
|
// regarding copyright ownership. The ASF licenses this file
|
||||||
|
// to you under the Apache License, Version 2.0 (the
|
||||||
|
// "License"); you may not use this file except in compliance
|
||||||
|
// with the License. You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing,
|
||||||
|
// software distributed under the License is distributed on an
|
||||||
|
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||||
|
// KIND, either express or implied. See the License for the
|
||||||
|
// specific language governing permissions and limitations
|
||||||
|
// under the License.
|
||||||
|
|
||||||
|
package org.apache.cloudstack.api.command.admin.storage;
|
||||||
|
|
||||||
|
import org.apache.cloudstack.api.APICommand;
|
||||||
|
import org.apache.cloudstack.api.ApiCommandResourceType;
|
||||||
|
import org.apache.cloudstack.api.ApiConstants;
|
||||||
|
import org.apache.cloudstack.api.BaseAsyncCmd;
|
||||||
|
import org.apache.cloudstack.api.Parameter;
|
||||||
|
import org.apache.cloudstack.api.response.ClusterResponse;
|
||||||
|
import org.apache.cloudstack.api.response.StoragePoolResponse;
|
||||||
|
import org.apache.cloudstack.api.response.SuccessResponse;
|
||||||
|
import org.apache.cloudstack.context.CallContext;
|
||||||
|
|
||||||
|
import com.cloud.event.EventTypes;
|
||||||
|
import com.cloud.storage.StoragePool;
|
||||||
|
|
||||||
|
@APICommand(name = "changeStoragePoolScope", description = "Changes the scope of a storage pool when the pool is in Disabled state." +
|
||||||
|
"This feature is officially tested and supported for Hypervisors: KVM and VMware, Protocols: NFS and Ceph, and Storage Provider: DefaultPrimary. " +
|
||||||
|
"There might be extra steps involved to make this work for other hypervisors and storage options.",
|
||||||
|
responseObject = SuccessResponse.class, since= "4.19.1", requestHasSensitiveInfo = false, responseHasSensitiveInfo = false)
|
||||||
|
public class ChangeStoragePoolScopeCmd extends BaseAsyncCmd {
|
||||||
|
|
||||||
|
@Parameter(name = ApiConstants.ID, type = CommandType.UUID, entityType = StoragePoolResponse.class, required = true, description = "the Id of the storage pool")
|
||||||
|
private Long id;
|
||||||
|
|
||||||
|
@Parameter(name = ApiConstants.SCOPE, type = CommandType.STRING, required = true, description = "the scope of the storage: cluster or zone")
|
||||||
|
private String scope;
|
||||||
|
|
||||||
|
@Parameter(name = ApiConstants.CLUSTER_ID, type = CommandType.UUID, entityType = ClusterResponse.class, description = "the Id of the cluster to use if scope is being set to Cluster")
|
||||||
|
private Long clusterId;
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public ApiCommandResourceType getApiResourceType() {
|
||||||
|
return ApiCommandResourceType.StoragePool;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public Long getApiResourceId() {
|
||||||
|
return getId();
|
||||||
|
}
|
||||||
|
|
||||||
|
public String getEventType() {
|
||||||
|
return EventTypes.EVENT_CHANGE_STORAGE_POOL_SCOPE;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String getEventDescription() {
|
||||||
|
String description = "Change storage pool scope. Storage pool Id: ";
|
||||||
|
StoragePool pool = _entityMgr.findById(StoragePool.class, getId());
|
||||||
|
if (pool != null) {
|
||||||
|
description += pool.getUuid();
|
||||||
|
} else {
|
||||||
|
description += getId();
|
||||||
|
}
|
||||||
|
description += " to " + getScope();
|
||||||
|
return description;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void execute() {
|
||||||
|
_storageService.changeStoragePoolScope(this);
|
||||||
|
SuccessResponse response = new SuccessResponse(getCommandName());
|
||||||
|
this.setResponseObject(response);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public long getEntityOwnerId() {
|
||||||
|
return CallContext.current().getCallingAccountId();
|
||||||
|
}
|
||||||
|
|
||||||
|
public Long getId() {
|
||||||
|
return id;
|
||||||
|
}
|
||||||
|
|
||||||
|
public String getScope() {
|
||||||
|
return scope;
|
||||||
|
}
|
||||||
|
|
||||||
|
public Long getClusterId() {
|
||||||
|
return clusterId;
|
||||||
|
}
|
||||||
|
}
|
||||||
@ -72,7 +72,8 @@ public class ListStoragePoolsCmd extends BaseListCmd {
|
|||||||
@Parameter(name = ApiConstants.HOST_ID, type = CommandType.UUID, entityType = HostResponse.class, description = "host ID of the storage pools")
|
@Parameter(name = ApiConstants.HOST_ID, type = CommandType.UUID, entityType = HostResponse.class, description = "host ID of the storage pools")
|
||||||
private Long hostId;
|
private Long hostId;
|
||||||
|
|
||||||
|
@Parameter(name = ApiConstants.STORAGE_CUSTOM_STATS, type = CommandType.BOOLEAN, description = "If true, lists the custom stats of the storage pool", since = "4.18.1")
|
||||||
|
private Boolean customStats;
|
||||||
/////////////////////////////////////////////////////
|
/////////////////////////////////////////////////////
|
||||||
/////////////////// Accessors ///////////////////////
|
/////////////////// Accessors ///////////////////////
|
||||||
/////////////////////////////////////////////////////
|
/////////////////////////////////////////////////////
|
||||||
@ -129,6 +130,10 @@ public class ListStoragePoolsCmd extends BaseListCmd {
|
|||||||
this.scope = scope;
|
this.scope = scope;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public Boolean getCustomStats() {
|
||||||
|
return customStats != null && customStats;
|
||||||
|
}
|
||||||
|
|
||||||
/////////////////////////////////////////////////////
|
/////////////////////////////////////////////////////
|
||||||
/////////////// API Implementation///////////////////
|
/////////////// API Implementation///////////////////
|
||||||
/////////////////////////////////////////////////////
|
/////////////////////////////////////////////////////
|
||||||
|
|||||||
@ -0,0 +1,77 @@
|
|||||||
|
/*
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing,
|
||||||
|
* software distributed under the License is distributed on an
|
||||||
|
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||||
|
* KIND, either express or implied. See the License for the
|
||||||
|
* specific language governing permissions and limitations
|
||||||
|
* under the License.
|
||||||
|
*/
|
||||||
|
package org.apache.cloudstack.api.command.admin.vm;
|
||||||
|
|
||||||
|
import org.apache.cloudstack.acl.RoleType;
|
||||||
|
import org.apache.cloudstack.api.APICommand;
|
||||||
|
import org.apache.cloudstack.api.ApiConstants;
|
||||||
|
import org.apache.cloudstack.api.BaseListCmd;
|
||||||
|
import org.apache.cloudstack.api.Parameter;
|
||||||
|
import org.apache.cloudstack.api.response.ClusterResponse;
|
||||||
|
import org.apache.cloudstack.api.response.ListResponse;
|
||||||
|
import org.apache.cloudstack.api.response.StoragePoolResponse;
|
||||||
|
import org.apache.cloudstack.api.response.VirtualMachineResponse;
|
||||||
|
|
||||||
|
import com.cloud.vm.VirtualMachine;
|
||||||
|
|
||||||
|
@APICommand(name = "listAffectedVmsForStorageScopeChange",
|
||||||
|
description = "List user and system VMs that need to be stopped and destroyed respectively for changing the scope of the storage pool from Zone to Cluster.",
|
||||||
|
responseObject = VirtualMachineResponse.class,
|
||||||
|
requestHasSensitiveInfo = false, responseHasSensitiveInfo = false, since = "4.19.1",
|
||||||
|
authorized = {RoleType.Admin})
|
||||||
|
public class ListAffectedVmsForStorageScopeChangeCmd extends BaseListCmd {
|
||||||
|
|
||||||
|
@Parameter(name = ApiConstants.CLUSTER_ID,
|
||||||
|
type = CommandType.UUID,
|
||||||
|
entityType = ClusterResponse.class,
|
||||||
|
required = true,
|
||||||
|
description = "the Id of the cluster the scope of the storage pool is being changed to")
|
||||||
|
private Long clusterIdForScopeChange;
|
||||||
|
|
||||||
|
@Parameter(name = ApiConstants.STORAGE_ID,
|
||||||
|
type = CommandType.UUID,
|
||||||
|
entityType = StoragePoolResponse.class,
|
||||||
|
required = true,
|
||||||
|
description = "the Id of the storage pool on which change scope operation is being done")
|
||||||
|
private Long storageId;
|
||||||
|
|
||||||
|
/////////////////////////////////////////////////////
|
||||||
|
/////////////////// Accessors ///////////////////////
|
||||||
|
/////////////////////////////////////////////////////
|
||||||
|
|
||||||
|
public Long getClusterIdForScopeChange() {
|
||||||
|
return clusterIdForScopeChange;
|
||||||
|
}
|
||||||
|
|
||||||
|
public Long getStorageId() {
|
||||||
|
return storageId;
|
||||||
|
}
|
||||||
|
|
||||||
|
/////////////////////////////////////////////////////
|
||||||
|
/////////////// API Implementation///////////////////
|
||||||
|
/////////////////////////////////////////////////////
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void execute() {
|
||||||
|
ListResponse<VirtualMachineResponse> response = _queryService.listAffectedVmsForStorageScopeChange(this);
|
||||||
|
response.setResponseName(getCommandName());
|
||||||
|
response.setObjectName(VirtualMachine.class.getSimpleName().toLowerCase());
|
||||||
|
setResponseObject(response);
|
||||||
|
}
|
||||||
|
}
|
||||||
@ -97,6 +97,10 @@ public class StoragePoolResponse extends BaseResponseWithAnnotations {
|
|||||||
@Param(description = "total min IOPS currently in use by volumes")
|
@Param(description = "total min IOPS currently in use by volumes")
|
||||||
private Long allocatedIops;
|
private Long allocatedIops;
|
||||||
|
|
||||||
|
@SerializedName(ApiConstants.STORAGE_CUSTOM_STATS)
|
||||||
|
@Param(description = "the storage pool custom stats", since = "4.18.1")
|
||||||
|
private Map<String, String> customStats;
|
||||||
|
|
||||||
@SerializedName("tags")
|
@SerializedName("tags")
|
||||||
@Param(description = "the tags for the storage pool")
|
@Param(description = "the tags for the storage pool")
|
||||||
private String tags;
|
private String tags;
|
||||||
@ -304,6 +308,14 @@ public class StoragePoolResponse extends BaseResponseWithAnnotations {
|
|||||||
this.allocatedIops = allocatedIops;
|
this.allocatedIops = allocatedIops;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public Map<String, String> getCustomStats() {
|
||||||
|
return customStats;
|
||||||
|
}
|
||||||
|
|
||||||
|
public void setCustomStats(Map<String, String> customStats) {
|
||||||
|
this.customStats = customStats;
|
||||||
|
}
|
||||||
|
|
||||||
public String getTags() {
|
public String getTags() {
|
||||||
return tags;
|
return tags;
|
||||||
}
|
}
|
||||||
|
|||||||
@ -0,0 +1,124 @@
|
|||||||
|
// Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
// or more contributor license agreements. See the NOTICE file
|
||||||
|
// distributed with this work for additional information
|
||||||
|
// regarding copyright ownership. The ASF licenses this file
|
||||||
|
// to you under the Apache License, Version 2.0 (the
|
||||||
|
// "License"); you may not use this file except in compliance
|
||||||
|
// with the License. You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing,
|
||||||
|
// software distributed under the License is distributed on an
|
||||||
|
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||||
|
// KIND, either express or implied. See the License for the
|
||||||
|
// specific language governing permissions and limitations
|
||||||
|
// under the License.
|
||||||
|
package org.apache.cloudstack.api.response;
|
||||||
|
|
||||||
|
import org.apache.cloudstack.api.BaseResponse;
|
||||||
|
import org.apache.cloudstack.api.EntityReference;
|
||||||
|
|
||||||
|
import com.cloud.serializer.Param;
|
||||||
|
import com.cloud.vm.VirtualMachine;
|
||||||
|
import com.google.gson.annotations.SerializedName;
|
||||||
|
|
||||||
|
@EntityReference(value = VirtualMachine.class)
|
||||||
|
public class VirtualMachineResponse extends BaseResponse {
|
||||||
|
@SerializedName("id")
|
||||||
|
@Param(description = "the ID of the VM")
|
||||||
|
private String id;
|
||||||
|
|
||||||
|
@SerializedName("type")
|
||||||
|
@Param(description = "the type of VM")
|
||||||
|
private String type;
|
||||||
|
|
||||||
|
@SerializedName("name")
|
||||||
|
@Param(description = "the name of the VM")
|
||||||
|
private String name;
|
||||||
|
|
||||||
|
@SerializedName("clusterid")
|
||||||
|
@Param(description = "the cluster ID for the VM")
|
||||||
|
private String clusterId;
|
||||||
|
|
||||||
|
@SerializedName("clustername")
|
||||||
|
@Param(description = "the cluster name for the VM")
|
||||||
|
private String clusterName;
|
||||||
|
|
||||||
|
@SerializedName("hostid")
|
||||||
|
@Param(description = "the host ID for the VM")
|
||||||
|
private String hostId;
|
||||||
|
|
||||||
|
@SerializedName("hostname")
|
||||||
|
@Param(description = "the hostname for the VM")
|
||||||
|
private String hostName;
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String getObjectId() {
|
||||||
|
return this.getId();
|
||||||
|
}
|
||||||
|
|
||||||
|
public String getId() {
|
||||||
|
return id;
|
||||||
|
}
|
||||||
|
|
||||||
|
public void setId(String id) {
|
||||||
|
this.id = id;
|
||||||
|
}
|
||||||
|
|
||||||
|
public String getVmType() {
|
||||||
|
return type;
|
||||||
|
}
|
||||||
|
|
||||||
|
public void setVmType(String type) {
|
||||||
|
this.type = type;
|
||||||
|
}
|
||||||
|
|
||||||
|
public String getVmName() {
|
||||||
|
return name;
|
||||||
|
}
|
||||||
|
|
||||||
|
public void setVmName(String name) {
|
||||||
|
this.name = name;
|
||||||
|
}
|
||||||
|
|
||||||
|
public String getClusterId() {
|
||||||
|
return clusterId;
|
||||||
|
}
|
||||||
|
|
||||||
|
public void setClusterId(String clusterId) {
|
||||||
|
this.clusterId = clusterId;
|
||||||
|
}
|
||||||
|
|
||||||
|
public String getClusterName() {
|
||||||
|
return clusterName;
|
||||||
|
}
|
||||||
|
|
||||||
|
public void setClusterName(String clusterName) {
|
||||||
|
this.clusterName = clusterName;
|
||||||
|
}
|
||||||
|
|
||||||
|
public String getName() {
|
||||||
|
return name;
|
||||||
|
}
|
||||||
|
|
||||||
|
public void setName(String name) {
|
||||||
|
this.name = name;
|
||||||
|
}
|
||||||
|
|
||||||
|
public String getHostId() {
|
||||||
|
return hostId;
|
||||||
|
}
|
||||||
|
|
||||||
|
public void setHostId(String hostId) {
|
||||||
|
this.hostId = hostId;
|
||||||
|
}
|
||||||
|
|
||||||
|
public String getHostName() {
|
||||||
|
return hostName;
|
||||||
|
}
|
||||||
|
|
||||||
|
public void setHostName(String hostName) {
|
||||||
|
this.hostName = hostName;
|
||||||
|
}
|
||||||
|
}
|
||||||
@ -52,6 +52,7 @@ import org.apache.cloudstack.api.command.user.snapshot.CopySnapshotCmd;
|
|||||||
import org.apache.cloudstack.api.command.user.snapshot.ListSnapshotsCmd;
|
import org.apache.cloudstack.api.command.user.snapshot.ListSnapshotsCmd;
|
||||||
import org.apache.cloudstack.api.command.user.tag.ListTagsCmd;
|
import org.apache.cloudstack.api.command.user.tag.ListTagsCmd;
|
||||||
import org.apache.cloudstack.api.command.user.template.ListTemplatesCmd;
|
import org.apache.cloudstack.api.command.user.template.ListTemplatesCmd;
|
||||||
|
import org.apache.cloudstack.api.command.admin.vm.ListAffectedVmsForStorageScopeChangeCmd;
|
||||||
import org.apache.cloudstack.api.command.user.vm.ListVMsCmd;
|
import org.apache.cloudstack.api.command.user.vm.ListVMsCmd;
|
||||||
import org.apache.cloudstack.api.command.user.vmgroup.ListVMGroupsCmd;
|
import org.apache.cloudstack.api.command.user.vmgroup.ListVMGroupsCmd;
|
||||||
import org.apache.cloudstack.api.command.user.volume.ListResourceDetailsCmd;
|
import org.apache.cloudstack.api.command.user.volume.ListResourceDetailsCmd;
|
||||||
@ -89,6 +90,7 @@ import org.apache.cloudstack.api.response.StorageTagResponse;
|
|||||||
import org.apache.cloudstack.api.response.TemplateResponse;
|
import org.apache.cloudstack.api.response.TemplateResponse;
|
||||||
import org.apache.cloudstack.api.response.UserResponse;
|
import org.apache.cloudstack.api.response.UserResponse;
|
||||||
import org.apache.cloudstack.api.response.UserVmResponse;
|
import org.apache.cloudstack.api.response.UserVmResponse;
|
||||||
|
import org.apache.cloudstack.api.response.VirtualMachineResponse;
|
||||||
import org.apache.cloudstack.api.response.VolumeResponse;
|
import org.apache.cloudstack.api.response.VolumeResponse;
|
||||||
import org.apache.cloudstack.api.response.ZoneResponse;
|
import org.apache.cloudstack.api.response.ZoneResponse;
|
||||||
import org.apache.cloudstack.framework.config.ConfigKey;
|
import org.apache.cloudstack.framework.config.ConfigKey;
|
||||||
@ -140,6 +142,8 @@ public interface QueryService {
|
|||||||
|
|
||||||
ListResponse<UserVmResponse> searchForUserVMs(ListVMsCmd cmd);
|
ListResponse<UserVmResponse> searchForUserVMs(ListVMsCmd cmd);
|
||||||
|
|
||||||
|
ListResponse<VirtualMachineResponse> listAffectedVmsForStorageScopeChange(ListAffectedVmsForStorageScopeChangeCmd cmd);
|
||||||
|
|
||||||
ListResponse<SecurityGroupResponse> searchForSecurityGroups(ListSecurityGroupsCmd cmd);
|
ListResponse<SecurityGroupResponse> searchForSecurityGroups(ListSecurityGroupsCmd cmd);
|
||||||
|
|
||||||
ListResponse<DomainRouterResponse> searchForRouters(ListRoutersCmd cmd);
|
ListResponse<DomainRouterResponse> searchForRouters(ListRoutersCmd cmd);
|
||||||
|
|||||||
@ -0,0 +1,43 @@
|
|||||||
|
//
|
||||||
|
// Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
// or more contributor license agreements. See the NOTICE file
|
||||||
|
// distributed with this work for additional information
|
||||||
|
// regarding copyright ownership. The ASF licenses this file
|
||||||
|
// to you under the Apache License, Version 2.0 (the
|
||||||
|
// "License"); you may not use this file except in compliance
|
||||||
|
// with the License. You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing,
|
||||||
|
// software distributed under the License is distributed on an
|
||||||
|
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||||
|
// KIND, either express or implied. See the License for the
|
||||||
|
// specific language governing permissions and limitations
|
||||||
|
// under the License.
|
||||||
|
//
|
||||||
|
|
||||||
|
package com.cloud.agent.api;
|
||||||
|
|
||||||
|
import java.util.Map;
|
||||||
|
|
||||||
|
public class PrepareStorageClientAnswer extends Answer {
|
||||||
|
Map<String, String> detailsMap;
|
||||||
|
|
||||||
|
public PrepareStorageClientAnswer() {
|
||||||
|
super();
|
||||||
|
}
|
||||||
|
|
||||||
|
public PrepareStorageClientAnswer(Command command, boolean success, Map<String, String> detailsMap) {
|
||||||
|
super(command, success, "");
|
||||||
|
this.detailsMap = detailsMap;
|
||||||
|
}
|
||||||
|
|
||||||
|
public PrepareStorageClientAnswer(Command command, boolean success, String details) {
|
||||||
|
super(command, success, details);
|
||||||
|
}
|
||||||
|
|
||||||
|
public Map<String, String> getDetailsMap() {
|
||||||
|
return detailsMap;
|
||||||
|
}
|
||||||
|
}
|
||||||
@ -0,0 +1,56 @@
|
|||||||
|
//
|
||||||
|
// Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
// or more contributor license agreements. See the NOTICE file
|
||||||
|
// distributed with this work for additional information
|
||||||
|
// regarding copyright ownership. The ASF licenses this file
|
||||||
|
// to you under the Apache License, Version 2.0 (the
|
||||||
|
// "License"); you may not use this file except in compliance
|
||||||
|
// with the License. You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing,
|
||||||
|
// software distributed under the License is distributed on an
|
||||||
|
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||||
|
// KIND, either express or implied. See the License for the
|
||||||
|
// specific language governing permissions and limitations
|
||||||
|
// under the License.
|
||||||
|
//
|
||||||
|
|
||||||
|
package com.cloud.agent.api;
|
||||||
|
|
||||||
|
import java.util.Map;
|
||||||
|
|
||||||
|
import com.cloud.storage.Storage.StoragePoolType;
|
||||||
|
|
||||||
|
public class PrepareStorageClientCommand extends Command {
|
||||||
|
private StoragePoolType poolType;
|
||||||
|
private String poolUuid;
|
||||||
|
private Map<String, String> details;
|
||||||
|
|
||||||
|
public PrepareStorageClientCommand() {
|
||||||
|
}
|
||||||
|
|
||||||
|
public PrepareStorageClientCommand(StoragePoolType poolType, String poolUuid, Map<String, String> details) {
|
||||||
|
this.poolType = poolType;
|
||||||
|
this.poolUuid = poolUuid;
|
||||||
|
this.details = details;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean executeInSequence() {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
public StoragePoolType getPoolType() {
|
||||||
|
return poolType;
|
||||||
|
}
|
||||||
|
|
||||||
|
public String getPoolUuid() {
|
||||||
|
return poolUuid;
|
||||||
|
}
|
||||||
|
|
||||||
|
public Map<String, String> getDetails() {
|
||||||
|
return details;
|
||||||
|
}
|
||||||
|
}
|
||||||
@ -0,0 +1,34 @@
|
|||||||
|
//
|
||||||
|
// Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
// or more contributor license agreements. See the NOTICE file
|
||||||
|
// distributed with this work for additional information
|
||||||
|
// regarding copyright ownership. The ASF licenses this file
|
||||||
|
// to you under the Apache License, Version 2.0 (the
|
||||||
|
// "License"); you may not use this file except in compliance
|
||||||
|
// with the License. You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing,
|
||||||
|
// software distributed under the License is distributed on an
|
||||||
|
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||||
|
// KIND, either express or implied. See the License for the
|
||||||
|
// specific language governing permissions and limitations
|
||||||
|
// under the License.
|
||||||
|
//
|
||||||
|
|
||||||
|
package com.cloud.agent.api;
|
||||||
|
|
||||||
|
public class UnprepareStorageClientAnswer extends Answer {
|
||||||
|
public UnprepareStorageClientAnswer() {
|
||||||
|
super();
|
||||||
|
}
|
||||||
|
|
||||||
|
public UnprepareStorageClientAnswer(Command command, boolean success) {
|
||||||
|
super(command, success, "");
|
||||||
|
}
|
||||||
|
|
||||||
|
public UnprepareStorageClientAnswer(Command command, boolean success, String details) {
|
||||||
|
super(command, success, details);
|
||||||
|
}
|
||||||
|
}
|
||||||
@ -0,0 +1,48 @@
|
|||||||
|
//
|
||||||
|
// Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
// or more contributor license agreements. See the NOTICE file
|
||||||
|
// distributed with this work for additional information
|
||||||
|
// regarding copyright ownership. The ASF licenses this file
|
||||||
|
// to you under the Apache License, Version 2.0 (the
|
||||||
|
// "License"); you may not use this file except in compliance
|
||||||
|
// with the License. You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing,
|
||||||
|
// software distributed under the License is distributed on an
|
||||||
|
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||||
|
// KIND, either express or implied. See the License for the
|
||||||
|
// specific language governing permissions and limitations
|
||||||
|
// under the License.
|
||||||
|
//
|
||||||
|
|
||||||
|
package com.cloud.agent.api;
|
||||||
|
|
||||||
|
import com.cloud.storage.Storage.StoragePoolType;
|
||||||
|
|
||||||
|
public class UnprepareStorageClientCommand extends Command {
|
||||||
|
private StoragePoolType poolType;
|
||||||
|
private String poolUuid;
|
||||||
|
|
||||||
|
public UnprepareStorageClientCommand() {
|
||||||
|
}
|
||||||
|
|
||||||
|
public UnprepareStorageClientCommand(StoragePoolType poolType, String poolUuid) {
|
||||||
|
this.poolType = poolType;
|
||||||
|
this.poolUuid = poolUuid;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean executeInSequence() {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
public StoragePoolType getPoolType() {
|
||||||
|
return poolType;
|
||||||
|
}
|
||||||
|
|
||||||
|
public String getPoolUuid() {
|
||||||
|
return poolUuid;
|
||||||
|
}
|
||||||
|
}
|
||||||
@ -18,6 +18,8 @@
|
|||||||
*/
|
*/
|
||||||
package org.apache.cloudstack.engine.subsystem.api.storage;
|
package org.apache.cloudstack.engine.subsystem.api.storage;
|
||||||
|
|
||||||
|
import java.util.Map;
|
||||||
|
|
||||||
import org.apache.cloudstack.framework.async.AsyncCompletionCallback;
|
import org.apache.cloudstack.framework.async.AsyncCompletionCallback;
|
||||||
import org.apache.cloudstack.storage.command.CommandResult;
|
import org.apache.cloudstack.storage.command.CommandResult;
|
||||||
|
|
||||||
@ -86,6 +88,22 @@ public interface PrimaryDataStoreDriver extends DataStoreDriver {
|
|||||||
*/
|
*/
|
||||||
boolean canProvideStorageStats();
|
boolean canProvideStorageStats();
|
||||||
|
|
||||||
|
/**
|
||||||
|
* intended for managed storage
|
||||||
|
* returns true if the storage can provide its custom stats
|
||||||
|
*/
|
||||||
|
default boolean poolProvidesCustomStorageStats() {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* intended for managed storage
|
||||||
|
* returns the custom stats if the storage can provide them
|
||||||
|
*/
|
||||||
|
default Map<String, String> getCustomStorageStats(StoragePool pool) {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* intended for managed storage
|
* intended for managed storage
|
||||||
* returns the total capacity and used size in bytes
|
* returns the total capacity and used size in bytes
|
||||||
@ -110,6 +128,14 @@ public interface PrimaryDataStoreDriver extends DataStoreDriver {
|
|||||||
*/
|
*/
|
||||||
boolean canHostAccessStoragePool(Host host, StoragePool pool);
|
boolean canHostAccessStoragePool(Host host, StoragePool pool);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* intended for managed storage
|
||||||
|
* returns true if the host can prepare storage client to provide access the storage pool
|
||||||
|
*/
|
||||||
|
default boolean canHostPrepareStoragePoolAccess(Host host, StoragePool pool) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Used by storage pools which want to keep VMs' information
|
* Used by storage pools which want to keep VMs' information
|
||||||
* @return true if additional VM info is needed (intended for storage pools).
|
* @return true if additional VM info is needed (intended for storage pools).
|
||||||
|
|||||||
@ -20,6 +20,7 @@ package org.apache.cloudstack.engine.subsystem.api.storage;
|
|||||||
|
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
|
|
||||||
|
import com.cloud.hypervisor.Hypervisor;
|
||||||
import com.cloud.storage.StoragePool;
|
import com.cloud.storage.StoragePool;
|
||||||
|
|
||||||
public interface PrimaryDataStoreLifeCycle extends DataStoreLifeCycle {
|
public interface PrimaryDataStoreLifeCycle extends DataStoreLifeCycle {
|
||||||
@ -29,4 +30,6 @@ public interface PrimaryDataStoreLifeCycle extends DataStoreLifeCycle {
|
|||||||
void updateStoragePool(StoragePool storagePool, Map<String, String> details);
|
void updateStoragePool(StoragePool storagePool, Map<String, String> details);
|
||||||
void enableStoragePool(DataStore store);
|
void enableStoragePool(DataStore store);
|
||||||
void disableStoragePool(DataStore store);
|
void disableStoragePool(DataStore store);
|
||||||
|
void changeStoragePoolScopeToZone(DataStore store, ClusterScope clusterScope, Hypervisor.HypervisorType hypervisorType);
|
||||||
|
void changeStoragePoolScopeToCluster(DataStore store, ClusterScope clusterScope, Hypervisor.HypervisorType hypervisorType);
|
||||||
}
|
}
|
||||||
|
|||||||
@ -134,6 +134,10 @@ public interface ResourceManager extends ResourceService, Configurable {
|
|||||||
|
|
||||||
public List<HostVO> listAllHostsInAllZonesByType(Type type);
|
public List<HostVO> listAllHostsInAllZonesByType(Type type);
|
||||||
|
|
||||||
|
public List<HostVO> listAllHostsInOneZoneNotInClusterByHypervisor(final HypervisorType type, long dcId, long clusterId);
|
||||||
|
|
||||||
|
public List<HostVO> listAllHostsInOneZoneNotInClusterByHypervisors(List<HypervisorType> types, long dcId, long clusterId);
|
||||||
|
|
||||||
public List<HypervisorType> listAvailHypervisorInZone(Long hostId, Long zoneId);
|
public List<HypervisorType> listAvailHypervisorInZone(Long hostId, Long zoneId);
|
||||||
|
|
||||||
public HostVO findHostByGuid(String guid);
|
public HostVO findHostByGuid(String guid);
|
||||||
|
|||||||
@ -118,7 +118,7 @@ public interface StorageManager extends StorageService {
|
|||||||
"storage.pool.disk.wait",
|
"storage.pool.disk.wait",
|
||||||
"Storage",
|
"Storage",
|
||||||
"60",
|
"60",
|
||||||
"Timeout (in secs) for the storage pool disk (of managed pool) to become available in the host. Currently only supported for PowerFlex.",
|
"Timeout (in secs) for the storage pool disk (of managed pool) to become available in the host. Currently supported for PowerFlex only.",
|
||||||
true,
|
true,
|
||||||
ConfigKey.Scope.StoragePool,
|
ConfigKey.Scope.StoragePool,
|
||||||
null);
|
null);
|
||||||
@ -127,7 +127,7 @@ public interface StorageManager extends StorageService {
|
|||||||
"storage.pool.client.timeout",
|
"storage.pool.client.timeout",
|
||||||
"Storage",
|
"Storage",
|
||||||
"60",
|
"60",
|
||||||
"Timeout (in secs) for the storage pool client connection timeout (for managed pools). Currently only supported for PowerFlex.",
|
"Timeout (in secs) for the API client connection timeout of storage pool (for managed pools). Currently supported for PowerFlex only.",
|
||||||
false,
|
false,
|
||||||
ConfigKey.Scope.StoragePool,
|
ConfigKey.Scope.StoragePool,
|
||||||
null);
|
null);
|
||||||
@ -136,11 +136,20 @@ public interface StorageManager extends StorageService {
|
|||||||
"storage.pool.client.max.connections",
|
"storage.pool.client.max.connections",
|
||||||
"Storage",
|
"Storage",
|
||||||
"100",
|
"100",
|
||||||
"Maximum connections for the storage pool client (for managed pools). Currently only supported for PowerFlex.",
|
"Maximum connections for the API client of storage pool (for managed pools). Currently supported for PowerFlex only.",
|
||||||
false,
|
false,
|
||||||
ConfigKey.Scope.StoragePool,
|
ConfigKey.Scope.StoragePool,
|
||||||
null);
|
null);
|
||||||
|
|
||||||
|
ConfigKey<Integer> STORAGE_POOL_CONNECTED_CLIENTS_LIMIT = new ConfigKey<>(Integer.class,
|
||||||
|
"storage.pool.connected.clients.limit",
|
||||||
|
"Storage",
|
||||||
|
"-1",
|
||||||
|
"Maximum connected storage pool clients supported for the storage (for managed pools), <= 0 for unlimited (default: -1). Currently supported for PowerFlex only.",
|
||||||
|
true,
|
||||||
|
ConfigKey.Scope.StoragePool,
|
||||||
|
null);
|
||||||
|
|
||||||
ConfigKey<String> STORAGE_POOL_IO_POLICY = new ConfigKey<>(String.class,
|
ConfigKey<String> STORAGE_POOL_IO_POLICY = new ConfigKey<>(String.class,
|
||||||
"kvm.storage.pool.io.policy",
|
"kvm.storage.pool.io.policy",
|
||||||
"Storage",
|
"Storage",
|
||||||
@ -252,6 +261,10 @@ public interface StorageManager extends StorageService {
|
|||||||
|
|
||||||
boolean canPoolProvideStorageStats(StoragePool pool);
|
boolean canPoolProvideStorageStats(StoragePool pool);
|
||||||
|
|
||||||
|
boolean poolProvidesCustomStorageStats(StoragePool pool);
|
||||||
|
|
||||||
|
Map<String, String> getCustomStorageStats(StoragePool pool);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Checks if a host has running VMs that are using its local storage pool.
|
* Checks if a host has running VMs that are using its local storage pool.
|
||||||
* @return true if local storage is active on the host
|
* @return true if local storage is active on the host
|
||||||
@ -288,6 +301,8 @@ public interface StorageManager extends StorageService {
|
|||||||
|
|
||||||
boolean canHostAccessStoragePool(Host host, StoragePool pool);
|
boolean canHostAccessStoragePool(Host host, StoragePool pool);
|
||||||
|
|
||||||
|
boolean canHostPrepareStoragePoolAccess(Host host, StoragePool pool);
|
||||||
|
|
||||||
Host getHost(long hostId);
|
Host getHost(long hostId);
|
||||||
|
|
||||||
Host updateSecondaryStorage(long secStorageId, String newUrl);
|
Host updateSecondaryStorage(long secStorageId, String newUrl);
|
||||||
|
|||||||
@ -135,8 +135,8 @@ public class CapacityVO implements Capacity {
|
|||||||
return podId;
|
return podId;
|
||||||
}
|
}
|
||||||
|
|
||||||
public void setPodId(long podId) {
|
public void setPodId(Long podId) {
|
||||||
this.podId = new Long(podId);
|
this.podId = podId;
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
@ -144,8 +144,8 @@ public class CapacityVO implements Capacity {
|
|||||||
return clusterId;
|
return clusterId;
|
||||||
}
|
}
|
||||||
|
|
||||||
public void setClusterId(long clusterId) {
|
public void setClusterId(Long clusterId) {
|
||||||
this.clusterId = new Long(clusterId);
|
this.clusterId = clusterId;
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
|||||||
@ -41,4 +41,6 @@ public interface StoragePoolHostDao extends GenericDao<StoragePoolHostVO, Long>
|
|||||||
public void deleteStoragePoolHostDetails(long hostId, long poolId);
|
public void deleteStoragePoolHostDetails(long hostId, long poolId);
|
||||||
|
|
||||||
List<StoragePoolHostVO> listByHostId(long hostId);
|
List<StoragePoolHostVO> listByHostId(long hostId);
|
||||||
|
|
||||||
|
Pair<List<StoragePoolHostVO>, Integer> listByPoolIdNotInCluster(long clusterId, long poolId);
|
||||||
}
|
}
|
||||||
|
|||||||
@ -23,12 +23,18 @@ import java.util.ArrayList;
|
|||||||
import java.util.List;
|
import java.util.List;
|
||||||
import java.util.stream.Collectors;
|
import java.util.stream.Collectors;
|
||||||
|
|
||||||
|
import javax.annotation.PostConstruct;
|
||||||
|
import javax.inject.Inject;
|
||||||
|
|
||||||
import org.springframework.stereotype.Component;
|
import org.springframework.stereotype.Component;
|
||||||
|
|
||||||
|
import com.cloud.host.HostVO;
|
||||||
import com.cloud.host.Status;
|
import com.cloud.host.Status;
|
||||||
|
import com.cloud.host.dao.HostDao;
|
||||||
import com.cloud.storage.StoragePoolHostVO;
|
import com.cloud.storage.StoragePoolHostVO;
|
||||||
import com.cloud.utils.Pair;
|
import com.cloud.utils.Pair;
|
||||||
import com.cloud.utils.db.GenericDaoBase;
|
import com.cloud.utils.db.GenericDaoBase;
|
||||||
|
import com.cloud.utils.db.JoinBuilder;
|
||||||
import com.cloud.utils.db.SearchBuilder;
|
import com.cloud.utils.db.SearchBuilder;
|
||||||
import com.cloud.utils.db.SearchCriteria;
|
import com.cloud.utils.db.SearchCriteria;
|
||||||
import com.cloud.utils.db.TransactionLegacy;
|
import com.cloud.utils.db.TransactionLegacy;
|
||||||
@ -40,6 +46,11 @@ public class StoragePoolHostDaoImpl extends GenericDaoBase<StoragePoolHostVO, Lo
|
|||||||
protected final SearchBuilder<StoragePoolHostVO> HostSearch;
|
protected final SearchBuilder<StoragePoolHostVO> HostSearch;
|
||||||
protected final SearchBuilder<StoragePoolHostVO> PoolHostSearch;
|
protected final SearchBuilder<StoragePoolHostVO> PoolHostSearch;
|
||||||
|
|
||||||
|
protected SearchBuilder<StoragePoolHostVO> poolNotInClusterSearch;
|
||||||
|
|
||||||
|
@Inject
|
||||||
|
HostDao hostDao;
|
||||||
|
|
||||||
protected static final String HOST_FOR_POOL_SEARCH = "SELECT * FROM storage_pool_host_ref ph, host h where ph.host_id = h.id and ph.pool_id=? and h.status=? ";
|
protected static final String HOST_FOR_POOL_SEARCH = "SELECT * FROM storage_pool_host_ref ph, host h where ph.host_id = h.id and ph.pool_id=? and h.status=? ";
|
||||||
|
|
||||||
protected static final String HOSTS_FOR_POOLS_SEARCH = "SELECT DISTINCT(ph.host_id) FROM storage_pool_host_ref ph, host h WHERE ph.host_id = h.id AND h.status = 'Up' AND resource_state = 'Enabled' AND ph.pool_id IN (?)";
|
protected static final String HOSTS_FOR_POOLS_SEARCH = "SELECT DISTINCT(ph.host_id) FROM storage_pool_host_ref ph, host h WHERE ph.host_id = h.id AND h.status = 'Up' AND resource_state = 'Enabled' AND ph.pool_id IN (?)";
|
||||||
@ -68,6 +79,15 @@ public class StoragePoolHostDaoImpl extends GenericDaoBase<StoragePoolHostVO, Lo
|
|||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@PostConstruct
|
||||||
|
public void init(){
|
||||||
|
poolNotInClusterSearch = createSearchBuilder();
|
||||||
|
poolNotInClusterSearch.and("poolId", poolNotInClusterSearch.entity().getPoolId(), SearchCriteria.Op.EQ);
|
||||||
|
SearchBuilder<HostVO> hostSearch = hostDao.createSearchBuilder();
|
||||||
|
poolNotInClusterSearch.join("hostSearch", hostSearch, hostSearch.entity().getId(), poolNotInClusterSearch.entity().getHostId(), JoinBuilder.JoinType.INNER);
|
||||||
|
hostSearch.and("clusterId", hostSearch.entity().getClusterId(), SearchCriteria.Op.NEQ);
|
||||||
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public List<StoragePoolHostVO> listByPoolId(long id) {
|
public List<StoragePoolHostVO> listByPoolId(long id) {
|
||||||
SearchCriteria<StoragePoolHostVO> sc = PoolSearch.create();
|
SearchCriteria<StoragePoolHostVO> sc = PoolSearch.create();
|
||||||
@ -194,4 +214,12 @@ public class StoragePoolHostDaoImpl extends GenericDaoBase<StoragePoolHostVO, Lo
|
|||||||
remove(sc);
|
remove(sc);
|
||||||
txn.commit();
|
txn.commit();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public Pair<List<StoragePoolHostVO>, Integer> listByPoolIdNotInCluster(long clusterId, long poolId) {
|
||||||
|
SearchCriteria<StoragePoolHostVO> sc = poolNotInClusterSearch.create();
|
||||||
|
sc.setParameters("poolId", poolId);
|
||||||
|
sc.setJoinParameters("hostSearch", "clusterId", clusterId);
|
||||||
|
return searchAndCount(sc, null);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@ -77,10 +77,11 @@ public class VolumeDaoImpl extends GenericDaoBase<VolumeVO, Long> implements Vol
|
|||||||
protected GenericSearchBuilder<VolumeVO, SumCount> primaryStorageSearch2;
|
protected GenericSearchBuilder<VolumeVO, SumCount> primaryStorageSearch2;
|
||||||
protected GenericSearchBuilder<VolumeVO, SumCount> secondaryStorageSearch;
|
protected GenericSearchBuilder<VolumeVO, SumCount> secondaryStorageSearch;
|
||||||
private final SearchBuilder<VolumeVO> poolAndPathSearch;
|
private final SearchBuilder<VolumeVO> poolAndPathSearch;
|
||||||
|
|
||||||
@Inject
|
@Inject
|
||||||
ReservationDao reservationDao;
|
ReservationDao reservationDao;
|
||||||
@Inject
|
@Inject
|
||||||
ResourceTagDao _tagsDao;
|
ResourceTagDao tagsDao;
|
||||||
|
|
||||||
// need to account for zone-wide primary storage where storage_pool has
|
// need to account for zone-wide primary storage where storage_pool has
|
||||||
// null-value pod and cluster, where hypervisor information is stored in
|
// null-value pod and cluster, where hypervisor information is stored in
|
||||||
@ -503,7 +504,6 @@ public class VolumeDaoImpl extends GenericDaoBase<VolumeVO, Long> implements Vol
|
|||||||
poolAndPathSearch.and("poolId", poolAndPathSearch.entity().getPoolId(), Op.EQ);
|
poolAndPathSearch.and("poolId", poolAndPathSearch.entity().getPoolId(), Op.EQ);
|
||||||
poolAndPathSearch.and("path", poolAndPathSearch.entity().getPath(), Op.EQ);
|
poolAndPathSearch.and("path", poolAndPathSearch.entity().getPath(), Op.EQ);
|
||||||
poolAndPathSearch.done();
|
poolAndPathSearch.done();
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
@ -741,7 +741,7 @@ public class VolumeDaoImpl extends GenericDaoBase<VolumeVO, Long> implements Vol
|
|||||||
logger.debug(String.format("Removing volume %s from DB", id));
|
logger.debug(String.format("Removing volume %s from DB", id));
|
||||||
VolumeVO entry = findById(id);
|
VolumeVO entry = findById(id);
|
||||||
if (entry != null) {
|
if (entry != null) {
|
||||||
_tagsDao.removeByIdAndType(id, ResourceObjectType.Volume);
|
tagsDao.removeByIdAndType(id, ResourceObjectType.Volume);
|
||||||
}
|
}
|
||||||
boolean result = super.remove(id);
|
boolean result = super.remove(id);
|
||||||
|
|
||||||
@ -764,7 +764,7 @@ public class VolumeDaoImpl extends GenericDaoBase<VolumeVO, Long> implements Vol
|
|||||||
destVol.setInstanceId(instanceId);
|
destVol.setInstanceId(instanceId);
|
||||||
update(srcVolId, srcVol);
|
update(srcVolId, srcVol);
|
||||||
update(destVolId, destVol);
|
update(destVolId, destVol);
|
||||||
_tagsDao.updateResourceId(srcVolId, destVolId, ResourceObjectType.Volume);
|
tagsDao.updateResourceId(srcVolId, destVolId, ResourceObjectType.Volume);
|
||||||
} catch (Exception e) {
|
} catch (Exception e) {
|
||||||
throw new CloudRuntimeException("Unable to persist the sequence number for this host");
|
throw new CloudRuntimeException("Unable to persist the sequence number for this host");
|
||||||
}
|
}
|
||||||
|
|||||||
@ -168,4 +168,6 @@ public interface VMInstanceDao extends GenericDao<VMInstanceVO, Long>, StateDao<
|
|||||||
|
|
||||||
List<VMInstanceVO> searchRemovedByRemoveDate(final Date startDate, final Date endDate, final Long batchSize,
|
List<VMInstanceVO> searchRemovedByRemoveDate(final Date startDate, final Date endDate, final Long batchSize,
|
||||||
List<Long> skippedVmIds);
|
List<Long> skippedVmIds);
|
||||||
|
|
||||||
|
Pair<List<VMInstanceVO>, Integer> listByVmsNotInClusterUsingPool(long clusterId, long poolId);
|
||||||
}
|
}
|
||||||
|
|||||||
@ -24,6 +24,7 @@ import java.util.Date;
|
|||||||
import java.util.HashMap;
|
import java.util.HashMap;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
|
import java.util.stream.Collectors;
|
||||||
|
|
||||||
import javax.annotation.PostConstruct;
|
import javax.annotation.PostConstruct;
|
||||||
import javax.inject.Inject;
|
import javax.inject.Inject;
|
||||||
@ -35,6 +36,8 @@ import com.cloud.host.HostVO;
|
|||||||
import com.cloud.host.dao.HostDao;
|
import com.cloud.host.dao.HostDao;
|
||||||
import com.cloud.hypervisor.Hypervisor;
|
import com.cloud.hypervisor.Hypervisor;
|
||||||
import com.cloud.server.ResourceTag.ResourceObjectType;
|
import com.cloud.server.ResourceTag.ResourceObjectType;
|
||||||
|
import com.cloud.storage.VolumeVO;
|
||||||
|
import com.cloud.storage.dao.VolumeDao;
|
||||||
import com.cloud.tags.dao.ResourceTagDao;
|
import com.cloud.tags.dao.ResourceTagDao;
|
||||||
import com.cloud.utils.DateUtil;
|
import com.cloud.utils.DateUtil;
|
||||||
import com.cloud.utils.Pair;
|
import com.cloud.utils.Pair;
|
||||||
@ -97,11 +100,16 @@ public class VMInstanceDaoImpl extends GenericDaoBase<VMInstanceVO, Long> implem
|
|||||||
protected SearchBuilder<VMInstanceVO> NotMigratingSearch;
|
protected SearchBuilder<VMInstanceVO> NotMigratingSearch;
|
||||||
protected SearchBuilder<VMInstanceVO> BackupSearch;
|
protected SearchBuilder<VMInstanceVO> BackupSearch;
|
||||||
protected SearchBuilder<VMInstanceVO> LastHostAndStatesSearch;
|
protected SearchBuilder<VMInstanceVO> LastHostAndStatesSearch;
|
||||||
|
protected SearchBuilder<VMInstanceVO> VmsNotInClusterUsingPool;
|
||||||
|
|
||||||
@Inject
|
@Inject
|
||||||
ResourceTagDao _tagsDao;
|
ResourceTagDao tagsDao;
|
||||||
@Inject
|
@Inject
|
||||||
NicDao _nicDao;
|
NicDao nicDao;
|
||||||
|
@Inject
|
||||||
|
VolumeDao volumeDao;
|
||||||
|
@Inject
|
||||||
|
HostDao hostDao;
|
||||||
|
|
||||||
protected Attribute _updateTimeAttr;
|
protected Attribute _updateTimeAttr;
|
||||||
|
|
||||||
@ -278,7 +286,7 @@ public class VMInstanceDaoImpl extends GenericDaoBase<VMInstanceVO, Long> implem
|
|||||||
_updateTimeAttr = _allAttributes.get("updateTime");
|
_updateTimeAttr = _allAttributes.get("updateTime");
|
||||||
assert _updateTimeAttr != null : "Couldn't get this updateTime attribute";
|
assert _updateTimeAttr != null : "Couldn't get this updateTime attribute";
|
||||||
|
|
||||||
SearchBuilder<NicVO> nicSearch = _nicDao.createSearchBuilder();
|
SearchBuilder<NicVO> nicSearch = nicDao.createSearchBuilder();
|
||||||
nicSearch.and("networkId", nicSearch.entity().getNetworkId(), SearchCriteria.Op.EQ);
|
nicSearch.and("networkId", nicSearch.entity().getNetworkId(), SearchCriteria.Op.EQ);
|
||||||
nicSearch.and("removedNic", nicSearch.entity().getRemoved(), SearchCriteria.Op.NULL);
|
nicSearch.and("removedNic", nicSearch.entity().getRemoved(), SearchCriteria.Op.NULL);
|
||||||
|
|
||||||
@ -307,6 +315,16 @@ public class VMInstanceDaoImpl extends GenericDaoBase<VMInstanceVO, Long> implem
|
|||||||
LastHostAndStatesSearch.and("states", LastHostAndStatesSearch.entity().getState(), Op.IN);
|
LastHostAndStatesSearch.and("states", LastHostAndStatesSearch.entity().getState(), Op.IN);
|
||||||
LastHostAndStatesSearch.done();
|
LastHostAndStatesSearch.done();
|
||||||
|
|
||||||
|
VmsNotInClusterUsingPool = createSearchBuilder();
|
||||||
|
SearchBuilder<VolumeVO> volumeSearch = volumeDao.createSearchBuilder();
|
||||||
|
volumeSearch.and("poolId", volumeSearch.entity().getPoolId(), Op.EQ);
|
||||||
|
volumeSearch.and("removed", volumeSearch.entity().getRemoved(), Op.NULL);
|
||||||
|
VmsNotInClusterUsingPool.join("volumeSearch", volumeSearch, volumeSearch.entity().getInstanceId(), VmsNotInClusterUsingPool.entity().getId(), JoinType.INNER);
|
||||||
|
SearchBuilder<HostVO> hostSearch2 = hostDao.createSearchBuilder();
|
||||||
|
hostSearch2.and("clusterId", hostSearch2.entity().getClusterId(), SearchCriteria.Op.NEQ);
|
||||||
|
VmsNotInClusterUsingPool.join("hostSearch2", hostSearch2, hostSearch2.entity().getId(), VmsNotInClusterUsingPool.entity().getHostId(), JoinType.INNER);
|
||||||
|
VmsNotInClusterUsingPool.and("vmStates", VmsNotInClusterUsingPool.entity().getState(), Op.IN);
|
||||||
|
VmsNotInClusterUsingPool.done();
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
@ -836,7 +854,7 @@ public class VMInstanceDaoImpl extends GenericDaoBase<VMInstanceVO, Long> implem
|
|||||||
public List<VMInstanceVO> listNonRemovedVmsByTypeAndNetwork(long networkId, VirtualMachine.Type... types) {
|
public List<VMInstanceVO> listNonRemovedVmsByTypeAndNetwork(long networkId, VirtualMachine.Type... types) {
|
||||||
if (NetworkTypeSearch == null) {
|
if (NetworkTypeSearch == null) {
|
||||||
|
|
||||||
SearchBuilder<NicVO> nicSearch = _nicDao.createSearchBuilder();
|
SearchBuilder<NicVO> nicSearch = nicDao.createSearchBuilder();
|
||||||
nicSearch.and("networkId", nicSearch.entity().getNetworkId(), SearchCriteria.Op.EQ);
|
nicSearch.and("networkId", nicSearch.entity().getNetworkId(), SearchCriteria.Op.EQ);
|
||||||
|
|
||||||
NetworkTypeSearch = createSearchBuilder();
|
NetworkTypeSearch = createSearchBuilder();
|
||||||
@ -873,7 +891,7 @@ public class VMInstanceDaoImpl extends GenericDaoBase<VMInstanceVO, Long> implem
|
|||||||
txn.start();
|
txn.start();
|
||||||
VMInstanceVO vm = findById(id);
|
VMInstanceVO vm = findById(id);
|
||||||
if (vm != null && vm.getType() == Type.User) {
|
if (vm != null && vm.getType() == Type.User) {
|
||||||
_tagsDao.removeByIdAndType(id, ResourceObjectType.UserVm);
|
tagsDao.removeByIdAndType(id, ResourceObjectType.UserVm);
|
||||||
}
|
}
|
||||||
boolean result = super.remove(id);
|
boolean result = super.remove(id);
|
||||||
txn.commit();
|
txn.commit();
|
||||||
@ -1040,4 +1058,14 @@ public class VMInstanceDaoImpl extends GenericDaoBase<VMInstanceVO, Long> implem
|
|||||||
Filter filter = new Filter(VMInstanceVO.class, "id", true, 0L, batchSize);
|
Filter filter = new Filter(VMInstanceVO.class, "id", true, 0L, batchSize);
|
||||||
return searchIncludingRemoved(sc, filter, null, false);
|
return searchIncludingRemoved(sc, filter, null, false);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public Pair<List<VMInstanceVO>, Integer> listByVmsNotInClusterUsingPool(long clusterId, long poolId) {
|
||||||
|
SearchCriteria<VMInstanceVO> sc = VmsNotInClusterUsingPool.create();
|
||||||
|
sc.setParameters("vmStates", State.Starting, State.Running, State.Stopping, State.Migrating, State.Restoring);
|
||||||
|
sc.setJoinParameters("volumeSearch", "poolId", poolId);
|
||||||
|
sc.setJoinParameters("hostSearch2", "clusterId", clusterId);
|
||||||
|
List<VMInstanceVO> vms = search(sc, null);
|
||||||
|
List<VMInstanceVO> uniqueVms = vms.stream().distinct().collect(Collectors.toList());
|
||||||
|
return new Pair<>(uniqueVms, uniqueVms.size());
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@ -32,6 +32,7 @@ import org.apache.logging.log4j.Logger;
|
|||||||
import org.apache.logging.log4j.LogManager;
|
import org.apache.logging.log4j.LogManager;
|
||||||
import org.springframework.stereotype.Component;
|
import org.springframework.stereotype.Component;
|
||||||
|
|
||||||
|
import org.apache.cloudstack.engine.subsystem.api.storage.ClusterScope;
|
||||||
import org.apache.cloudstack.engine.subsystem.api.storage.DataStore;
|
import org.apache.cloudstack.engine.subsystem.api.storage.DataStore;
|
||||||
import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager;
|
import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager;
|
||||||
import org.apache.cloudstack.engine.subsystem.api.storage.HostScope;
|
import org.apache.cloudstack.engine.subsystem.api.storage.HostScope;
|
||||||
@ -43,17 +44,20 @@ import com.cloud.agent.api.StoragePoolInfo;
|
|||||||
import com.cloud.capacity.Capacity;
|
import com.cloud.capacity.Capacity;
|
||||||
import com.cloud.capacity.CapacityVO;
|
import com.cloud.capacity.CapacityVO;
|
||||||
import com.cloud.capacity.dao.CapacityDao;
|
import com.cloud.capacity.dao.CapacityDao;
|
||||||
import com.cloud.hypervisor.Hypervisor.HypervisorType;
|
|
||||||
import com.cloud.exception.InvalidParameterValueException;
|
import com.cloud.exception.InvalidParameterValueException;
|
||||||
|
import com.cloud.hypervisor.Hypervisor.HypervisorType;
|
||||||
import com.cloud.storage.DataStoreRole;
|
import com.cloud.storage.DataStoreRole;
|
||||||
import com.cloud.storage.ScopeType;
|
import com.cloud.storage.ScopeType;
|
||||||
|
import com.cloud.storage.Storage.StoragePoolType;
|
||||||
import com.cloud.storage.StorageManager;
|
import com.cloud.storage.StorageManager;
|
||||||
import com.cloud.storage.StoragePoolHostVO;
|
import com.cloud.storage.StoragePoolHostVO;
|
||||||
import com.cloud.storage.StoragePoolStatus;
|
import com.cloud.storage.StoragePoolStatus;
|
||||||
import com.cloud.storage.Storage.StoragePoolType;
|
|
||||||
import com.cloud.storage.dao.StoragePoolHostDao;
|
import com.cloud.storage.dao.StoragePoolHostDao;
|
||||||
import com.cloud.utils.crypt.DBEncryptionUtil;
|
import com.cloud.utils.crypt.DBEncryptionUtil;
|
||||||
|
import com.cloud.utils.db.Transaction;
|
||||||
|
import com.cloud.utils.db.TransactionCallbackNoReturn;
|
||||||
import com.cloud.utils.db.TransactionLegacy;
|
import com.cloud.utils.db.TransactionLegacy;
|
||||||
|
import com.cloud.utils.db.TransactionStatus;
|
||||||
import com.cloud.utils.exception.CloudRuntimeException;
|
import com.cloud.utils.exception.CloudRuntimeException;
|
||||||
|
|
||||||
@Component
|
@Component
|
||||||
@ -266,4 +270,48 @@ public class PrimaryDataStoreHelper {
|
|||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public void switchToZone(DataStore store, HypervisorType hypervisorType) {
|
||||||
|
StoragePoolVO pool = dataStoreDao.findById(store.getId());
|
||||||
|
CapacityVO capacity = _capacityDao.findByHostIdType(store.getId(), Capacity.CAPACITY_TYPE_STORAGE_ALLOCATED);
|
||||||
|
Transaction.execute(new TransactionCallbackNoReturn() {
|
||||||
|
public void doInTransactionWithoutResult(TransactionStatus status) {
|
||||||
|
pool.setScope(ScopeType.ZONE);
|
||||||
|
pool.setPodId(null);
|
||||||
|
pool.setClusterId(null);
|
||||||
|
pool.setHypervisor(hypervisorType);
|
||||||
|
dataStoreDao.update(pool.getId(), pool);
|
||||||
|
|
||||||
|
capacity.setPodId(null);
|
||||||
|
capacity.setClusterId(null);
|
||||||
|
_capacityDao.update(capacity.getId(), capacity);
|
||||||
|
}
|
||||||
|
});
|
||||||
|
logger.debug("Scope of storage pool id=" + pool.getId() + " is changed to zone");
|
||||||
|
}
|
||||||
|
|
||||||
|
public void switchToCluster(DataStore store, ClusterScope clusterScope) {
|
||||||
|
List<StoragePoolHostVO> hostPoolRecords = storagePoolHostDao.listByPoolIdNotInCluster(clusterScope.getScopeId(), store.getId()).first();
|
||||||
|
StoragePoolVO pool = dataStoreDao.findById(store.getId());
|
||||||
|
CapacityVO capacity = _capacityDao.findByHostIdType(store.getId(), Capacity.CAPACITY_TYPE_STORAGE_ALLOCATED);
|
||||||
|
|
||||||
|
Transaction.execute(new TransactionCallbackNoReturn() {
|
||||||
|
@Override
|
||||||
|
public void doInTransactionWithoutResult(TransactionStatus status) {
|
||||||
|
if (hostPoolRecords != null) {
|
||||||
|
for (StoragePoolHostVO host : hostPoolRecords) {
|
||||||
|
storagePoolHostDao.deleteStoragePoolHostDetails(host.getHostId(), host.getPoolId());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
pool.setScope(ScopeType.CLUSTER);
|
||||||
|
pool.setPodId(clusterScope.getPodId());
|
||||||
|
pool.setClusterId(clusterScope.getScopeId());
|
||||||
|
dataStoreDao.update(pool.getId(), pool);
|
||||||
|
|
||||||
|
capacity.setPodId(clusterScope.getPodId());
|
||||||
|
capacity.setClusterId(clusterScope.getScopeId());
|
||||||
|
_capacityDao.update(capacity.getId(), capacity);
|
||||||
|
}
|
||||||
|
});
|
||||||
|
logger.debug("Scope of storage pool id=" + pool.getId() + " is changed to cluster id=" + clusterScope.getScopeId());
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@ -0,0 +1,114 @@
|
|||||||
|
// Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
// or more contributor license agreements. See the NOTICE file
|
||||||
|
// distributed with this work for additional information
|
||||||
|
// regarding copyright ownership. The ASF licenses this file
|
||||||
|
// to you under the Apache License, Version 2.0 (the
|
||||||
|
// "License"); you may not use this file except in compliance
|
||||||
|
// with the License. You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing,
|
||||||
|
// software distributed under the License is distributed on an
|
||||||
|
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||||
|
// KIND, either express or implied. See the License for the
|
||||||
|
// specific language governing permissions and limitations
|
||||||
|
// under the License.
|
||||||
|
|
||||||
|
package org.apache.cloudstack.storage.volume.datastore;
|
||||||
|
|
||||||
|
import java.util.List;
|
||||||
|
|
||||||
|
import org.apache.cloudstack.engine.subsystem.api.storage.ClusterScope;
|
||||||
|
import org.apache.cloudstack.engine.subsystem.api.storage.DataStore;
|
||||||
|
import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao;
|
||||||
|
import org.apache.cloudstack.storage.datastore.db.StoragePoolVO;
|
||||||
|
import org.junit.Assert;
|
||||||
|
import org.junit.Test;
|
||||||
|
import org.junit.runner.RunWith;
|
||||||
|
import org.mockito.InjectMocks;
|
||||||
|
import org.mockito.Mock;
|
||||||
|
import org.mockito.Mockito;
|
||||||
|
import org.mockito.Spy;
|
||||||
|
import org.mockito.junit.MockitoJUnitRunner;
|
||||||
|
|
||||||
|
import com.cloud.capacity.Capacity;
|
||||||
|
import com.cloud.capacity.CapacityVO;
|
||||||
|
import com.cloud.capacity.dao.CapacityDao;
|
||||||
|
import com.cloud.hypervisor.Hypervisor.HypervisorType;
|
||||||
|
import com.cloud.storage.ScopeType;
|
||||||
|
import com.cloud.storage.Storage;
|
||||||
|
import com.cloud.storage.StoragePoolHostVO;
|
||||||
|
import com.cloud.storage.dao.StoragePoolHostDao;
|
||||||
|
import com.cloud.utils.Pair;
|
||||||
|
|
||||||
|
@RunWith(MockitoJUnitRunner.class)
|
||||||
|
public class PrimaryDataStoreHelperTest {
|
||||||
|
|
||||||
|
@Mock
|
||||||
|
private PrimaryDataStoreDao dataStoreDao;
|
||||||
|
|
||||||
|
@Mock
|
||||||
|
private CapacityDao capacityDao;
|
||||||
|
|
||||||
|
@Mock
|
||||||
|
private StoragePoolHostDao storagePoolHostDao;
|
||||||
|
|
||||||
|
@Spy
|
||||||
|
@InjectMocks
|
||||||
|
PrimaryDataStoreHelper dataStoreHelper;
|
||||||
|
|
||||||
|
private static final Long ZONE_ID = 1L;
|
||||||
|
private static final Long CLUSTER_ID = 2L;
|
||||||
|
private static final Long POD_ID = 3L;
|
||||||
|
private static final Long POOL_ID = 4L;
|
||||||
|
private static final Short capacityType = 0;
|
||||||
|
private static final Float usedPercentage = 0.0f;
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testSwitchToZone() {
|
||||||
|
StoragePoolVO pool = new StoragePoolVO(POOL_ID, null, null, Storage.StoragePoolType.NetworkFilesystem, ZONE_ID, POD_ID, 0L, 0L, null, 0, null);
|
||||||
|
pool.setClusterId(CLUSTER_ID);
|
||||||
|
pool.setScope(ScopeType.CLUSTER);
|
||||||
|
CapacityVO capacity = new CapacityVO(ZONE_ID, POD_ID, CLUSTER_ID, capacityType, usedPercentage);
|
||||||
|
|
||||||
|
Mockito.when(dataStoreDao.findById(pool.getId())).thenReturn(pool);
|
||||||
|
Mockito.when(capacityDao.findByHostIdType(pool.getId(), Capacity.CAPACITY_TYPE_STORAGE_ALLOCATED)).thenReturn(capacity);
|
||||||
|
DataStore storeMock = Mockito.mock(DataStore.class);
|
||||||
|
Mockito.when(storeMock.getId()).thenReturn(POOL_ID);
|
||||||
|
|
||||||
|
dataStoreHelper.switchToZone(storeMock, HypervisorType.KVM);
|
||||||
|
|
||||||
|
Assert.assertEquals(pool.getScope(), ScopeType.ZONE);
|
||||||
|
Assert.assertEquals(pool.getPodId(), null);
|
||||||
|
Assert.assertEquals(pool.getClusterId(), null);
|
||||||
|
Assert.assertEquals(pool.getHypervisor(), HypervisorType.KVM);
|
||||||
|
Assert.assertEquals(capacity.getPodId(), null);
|
||||||
|
Assert.assertEquals(capacity.getClusterId(), null);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testSwitchToCluster() {
|
||||||
|
StoragePoolVO pool = new StoragePoolVO(POOL_ID, null, null, Storage.StoragePoolType.NetworkFilesystem, ZONE_ID, null, 0L, 0L, null, 0, null);
|
||||||
|
pool.setScope(ScopeType.ZONE);
|
||||||
|
CapacityVO capacity = new CapacityVO(ZONE_ID, null, null, capacityType, usedPercentage);
|
||||||
|
ClusterScope clusterScope = new ClusterScope(CLUSTER_ID, POD_ID, ZONE_ID);
|
||||||
|
|
||||||
|
Pair<List<StoragePoolHostVO>, Integer> hostPoolRecords = new Pair<>(null, 0);
|
||||||
|
Mockito.when(storagePoolHostDao.listByPoolIdNotInCluster(CLUSTER_ID, POOL_ID)).thenReturn(hostPoolRecords);
|
||||||
|
Mockito.when(dataStoreDao.findById(pool.getId())).thenReturn(pool);
|
||||||
|
Mockito.when(capacityDao.findByHostIdType(pool.getId(), Capacity.CAPACITY_TYPE_STORAGE_ALLOCATED)).thenReturn(capacity);
|
||||||
|
DataStore storeMock = Mockito.mock(DataStore.class);
|
||||||
|
Mockito.when(storeMock.getId()).thenReturn(POOL_ID);
|
||||||
|
|
||||||
|
dataStoreHelper.switchToCluster(storeMock, clusterScope);
|
||||||
|
|
||||||
|
Mockito.verify(storagePoolHostDao, Mockito.never()).deleteStoragePoolHostDetails(Mockito.anyLong(), Mockito.anyLong());
|
||||||
|
|
||||||
|
Assert.assertEquals(pool.getScope(), ScopeType.CLUSTER);
|
||||||
|
Assert.assertEquals(pool.getPodId(), POD_ID);
|
||||||
|
Assert.assertEquals(pool.getClusterId(), CLUSTER_ID);
|
||||||
|
Assert.assertEquals(capacity.getPodId(), POD_ID);
|
||||||
|
Assert.assertEquals(capacity.getClusterId(), CLUSTER_ID);
|
||||||
|
}
|
||||||
|
}
|
||||||
@ -0,0 +1,106 @@
|
|||||||
|
// Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
// or more contributor license agreements. See the NOTICE file
|
||||||
|
// distributed with this work for additional information
|
||||||
|
// regarding copyright ownership. The ASF licenses this file
|
||||||
|
// to you under the Apache License, Version 2.0 (the
|
||||||
|
// "License"); you may not use this file except in compliance
|
||||||
|
// with the License. You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing,
|
||||||
|
// software distributed under the License is distributed on an
|
||||||
|
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||||
|
// KIND, either express or implied. See the License for the
|
||||||
|
// specific language governing permissions and limitations
|
||||||
|
// under the License.
|
||||||
|
|
||||||
|
package org.apache.cloudstack.storage.datastore.lifecycle;
|
||||||
|
|
||||||
|
import java.util.Arrays;
|
||||||
|
import java.util.List;
|
||||||
|
|
||||||
|
import javax.inject.Inject;
|
||||||
|
|
||||||
|
import org.apache.cloudstack.engine.subsystem.api.storage.ClusterScope;
|
||||||
|
import org.apache.cloudstack.engine.subsystem.api.storage.DataStore;
|
||||||
|
import org.apache.cloudstack.storage.volume.datastore.PrimaryDataStoreHelper;
|
||||||
|
import org.apache.log4j.Logger;
|
||||||
|
|
||||||
|
import com.cloud.agent.AgentManager;
|
||||||
|
import com.cloud.agent.api.Answer;
|
||||||
|
import com.cloud.agent.api.DeleteStoragePoolCommand;
|
||||||
|
import com.cloud.host.HostVO;
|
||||||
|
import com.cloud.host.dao.HostDao;
|
||||||
|
import com.cloud.hypervisor.Hypervisor.HypervisorType;
|
||||||
|
import com.cloud.resource.ResourceManager;
|
||||||
|
import com.cloud.storage.StorageManager;
|
||||||
|
import com.cloud.storage.StoragePool;
|
||||||
|
import com.cloud.storage.StoragePoolHostVO;
|
||||||
|
import com.cloud.storage.dao.StoragePoolHostDao;
|
||||||
|
import com.cloud.utils.Pair;
|
||||||
|
|
||||||
|
public class BasePrimaryDataStoreLifeCycleImpl {
|
||||||
|
private static final Logger s_logger = Logger.getLogger(BasePrimaryDataStoreLifeCycleImpl.class);
|
||||||
|
@Inject
|
||||||
|
AgentManager agentMgr;
|
||||||
|
@Inject
|
||||||
|
protected ResourceManager resourceMgr;
|
||||||
|
@Inject
|
||||||
|
StorageManager storageMgr;
|
||||||
|
@Inject
|
||||||
|
PrimaryDataStoreHelper dataStoreHelper;
|
||||||
|
@Inject
|
||||||
|
protected HostDao hostDao;
|
||||||
|
@Inject
|
||||||
|
protected StoragePoolHostDao storagePoolHostDao;
|
||||||
|
|
||||||
|
private List<HostVO> getPoolHostsList(ClusterScope clusterScope, HypervisorType hypervisorType) {
|
||||||
|
List<HostVO> hosts;
|
||||||
|
if (hypervisorType != null) {
|
||||||
|
hosts = resourceMgr
|
||||||
|
.listAllHostsInOneZoneNotInClusterByHypervisor(hypervisorType, clusterScope.getZoneId(), clusterScope.getScopeId());
|
||||||
|
} else {
|
||||||
|
List<HypervisorType> hypervisorTypes = Arrays.asList(HypervisorType.KVM, HypervisorType.VMware);
|
||||||
|
hosts = resourceMgr
|
||||||
|
.listAllHostsInOneZoneNotInClusterByHypervisors(hypervisorTypes, clusterScope.getZoneId(), clusterScope.getScopeId());
|
||||||
|
}
|
||||||
|
return hosts;
|
||||||
|
}
|
||||||
|
|
||||||
|
public void changeStoragePoolScopeToZone(DataStore store, ClusterScope clusterScope, HypervisorType hypervisorType) {
|
||||||
|
List<HostVO> hosts = getPoolHostsList(clusterScope, hypervisorType);
|
||||||
|
s_logger.debug("Changing scope of the storage pool to Zone");
|
||||||
|
if (hosts != null) {
|
||||||
|
for (HostVO host : hosts) {
|
||||||
|
try {
|
||||||
|
storageMgr.connectHostToSharedPool(host.getId(), store.getId());
|
||||||
|
} catch (Exception e) {
|
||||||
|
s_logger.warn("Unable to establish a connection between " + host + " and " + store, e);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
dataStoreHelper.switchToZone(store, hypervisorType);
|
||||||
|
}
|
||||||
|
|
||||||
|
public void changeStoragePoolScopeToCluster(DataStore store, ClusterScope clusterScope, HypervisorType hypervisorType) {
|
||||||
|
Pair<List<StoragePoolHostVO>, Integer> hostPoolRecords = storagePoolHostDao.listByPoolIdNotInCluster(clusterScope.getScopeId(), store.getId());
|
||||||
|
s_logger.debug("Changing scope of the storage pool to Cluster");
|
||||||
|
if (hostPoolRecords.second() > 0) {
|
||||||
|
StoragePool pool = (StoragePool) store;
|
||||||
|
for (StoragePoolHostVO host : hostPoolRecords.first()) {
|
||||||
|
DeleteStoragePoolCommand deleteCmd = new DeleteStoragePoolCommand(pool);
|
||||||
|
final Answer answer = agentMgr.easySend(host.getHostId(), deleteCmd);
|
||||||
|
|
||||||
|
if (answer != null) {
|
||||||
|
if (!answer.getResult()) {
|
||||||
|
s_logger.debug("Failed to delete storage pool: " + answer.getResult());
|
||||||
|
} else if (HypervisorType.KVM != hypervisorType) {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
dataStoreHelper.switchToCluster(store, clusterScope);
|
||||||
|
}
|
||||||
|
}
|
||||||
@ -0,0 +1,127 @@
|
|||||||
|
// Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
// or more contributor license agreements. See the NOTICE file
|
||||||
|
// distributed with this work for additional information
|
||||||
|
// regarding copyright ownership. The ASF licenses this file
|
||||||
|
// to you under the Apache License, Version 2.0 (the
|
||||||
|
// "License"); you may not use this file except in compliance
|
||||||
|
// with the License. You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing,
|
||||||
|
// software distributed under the License is distributed on an
|
||||||
|
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||||
|
// KIND, either express or implied. See the License for the
|
||||||
|
// specific language governing permissions and limitations
|
||||||
|
// under the License.
|
||||||
|
|
||||||
|
package org.apache.cloudstack.storage.datastore.lifecycle;
|
||||||
|
|
||||||
|
import static org.mockito.ArgumentMatchers.eq;
|
||||||
|
|
||||||
|
import java.util.Arrays;
|
||||||
|
import java.util.List;
|
||||||
|
|
||||||
|
import org.apache.cloudstack.engine.subsystem.api.storage.ClusterScope;
|
||||||
|
import org.apache.cloudstack.storage.datastore.PrimaryDataStoreImpl;
|
||||||
|
import org.apache.cloudstack.storage.datastore.db.StoragePoolVO;
|
||||||
|
import org.apache.cloudstack.storage.volume.datastore.PrimaryDataStoreHelper;
|
||||||
|
import org.junit.BeforeClass;
|
||||||
|
import org.junit.Test;
|
||||||
|
import org.junit.runner.RunWith;
|
||||||
|
import org.mockito.InjectMocks;
|
||||||
|
import org.mockito.Mock;
|
||||||
|
import org.mockito.Mockito;
|
||||||
|
import org.mockito.Spy;
|
||||||
|
import org.mockito.junit.MockitoJUnitRunner;
|
||||||
|
import org.springframework.test.util.ReflectionTestUtils;
|
||||||
|
|
||||||
|
import com.cloud.agent.AgentManager;
|
||||||
|
import com.cloud.agent.api.Answer;
|
||||||
|
import com.cloud.agent.api.DeleteStoragePoolCommand;
|
||||||
|
import com.cloud.host.HostVO;
|
||||||
|
import com.cloud.hypervisor.Hypervisor.HypervisorType;
|
||||||
|
import com.cloud.resource.ResourceManager;
|
||||||
|
import com.cloud.storage.Storage;
|
||||||
|
import com.cloud.storage.StorageManager;
|
||||||
|
import com.cloud.storage.StoragePoolHostVO;
|
||||||
|
import com.cloud.storage.dao.StoragePoolHostDao;
|
||||||
|
import com.cloud.utils.Pair;
|
||||||
|
|
||||||
|
@RunWith(MockitoJUnitRunner.class)
|
||||||
|
public class BasePrimaryDataStoreLifeCycleImplTest {
|
||||||
|
|
||||||
|
@Mock
|
||||||
|
private StoragePoolHostDao storagePoolHostDao;
|
||||||
|
|
||||||
|
@Mock
|
||||||
|
private PrimaryDataStoreHelper dataStoreHelper;
|
||||||
|
|
||||||
|
@Mock
|
||||||
|
private AgentManager agentManager;
|
||||||
|
|
||||||
|
@Mock
|
||||||
|
private ResourceManager resourceManager;
|
||||||
|
|
||||||
|
@Mock
|
||||||
|
private StorageManager storageManager;
|
||||||
|
|
||||||
|
@Spy
|
||||||
|
@InjectMocks
|
||||||
|
private BasePrimaryDataStoreLifeCycleImpl dataStoreLifeCycle;
|
||||||
|
|
||||||
|
private static final Long POOL_ID = 1L;
|
||||||
|
private static final Long CLUSTER_ID = 2L;
|
||||||
|
private static final Long POD_ID = 3L;
|
||||||
|
private static final Long ZONE_ID = 4L;
|
||||||
|
private static final Long HOST_ID = 5L;
|
||||||
|
|
||||||
|
private static ClusterScope clusterScope;
|
||||||
|
private static PrimaryDataStoreImpl store;
|
||||||
|
|
||||||
|
|
||||||
|
@BeforeClass
|
||||||
|
public static void init() {
|
||||||
|
clusterScope = new ClusterScope(CLUSTER_ID, POD_ID, ZONE_ID);
|
||||||
|
StoragePoolVO pool = new StoragePoolVO(POOL_ID, null, null, Storage.StoragePoolType.NetworkFilesystem, 0L, 0L, 0L, 0L, null, 0, null);
|
||||||
|
store = new PrimaryDataStoreImpl();
|
||||||
|
store.configure(pool, null, null);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testChangeStoragePoolScopeToZone() throws Exception {
|
||||||
|
Mockito.when(resourceManager.listAllHostsInOneZoneNotInClusterByHypervisor(HypervisorType.KVM, ZONE_ID, CLUSTER_ID)).thenReturn(null);
|
||||||
|
|
||||||
|
dataStoreLifeCycle.changeStoragePoolScopeToZone(store, clusterScope, HypervisorType.KVM);
|
||||||
|
|
||||||
|
Mockito.verify(dataStoreHelper, Mockito.times(1)).switchToZone(store, HypervisorType.KVM);
|
||||||
|
|
||||||
|
HostVO host = new HostVO(null);
|
||||||
|
ReflectionTestUtils.setField(host, "id", HOST_ID);
|
||||||
|
List<HypervisorType> hypervisorTypes = Arrays.asList(HypervisorType.KVM, HypervisorType.VMware);
|
||||||
|
Mockito.when(resourceManager.listAllHostsInOneZoneNotInClusterByHypervisors(hypervisorTypes, ZONE_ID, CLUSTER_ID)).thenReturn(Arrays.asList(host));
|
||||||
|
Mockito.when(storageManager.connectHostToSharedPool(HOST_ID, POOL_ID)).thenReturn(true);
|
||||||
|
|
||||||
|
dataStoreLifeCycle.changeStoragePoolScopeToZone(store, clusterScope, null);
|
||||||
|
|
||||||
|
Mockito.verify(dataStoreHelper, Mockito.times(1)).switchToZone(store, null);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testChangeStoragePoolScopeToCluster() {
|
||||||
|
Pair<List<StoragePoolHostVO>, Integer> hostPoolRecords = new Pair<>(null, 0);
|
||||||
|
Mockito.when(storagePoolHostDao.listByPoolIdNotInCluster(CLUSTER_ID, POOL_ID)).thenReturn(hostPoolRecords);
|
||||||
|
Mockito.doNothing().when(dataStoreHelper).switchToCluster(store, clusterScope);
|
||||||
|
|
||||||
|
dataStoreLifeCycle.changeStoragePoolScopeToCluster(store, clusterScope, HypervisorType.KVM);
|
||||||
|
|
||||||
|
hostPoolRecords.set(Arrays.asList(new StoragePoolHostVO(POOL_ID, HOST_ID, null)), 1);
|
||||||
|
Answer answer = new Answer(null, false, null);
|
||||||
|
Mockito.when(storagePoolHostDao.listByPoolIdNotInCluster(CLUSTER_ID, POOL_ID)).thenReturn(hostPoolRecords);
|
||||||
|
Mockito.when(agentManager.easySend(eq(HOST_ID), Mockito.any(DeleteStoragePoolCommand.class))).thenReturn(answer);
|
||||||
|
|
||||||
|
dataStoreLifeCycle.changeStoragePoolScopeToCluster(store, clusterScope, HypervisorType.KVM);
|
||||||
|
|
||||||
|
Mockito.verify(dataStoreHelper, Mockito.times(2)).switchToCluster(store, clusterScope);
|
||||||
|
}
|
||||||
|
}
|
||||||
@ -0,0 +1,52 @@
|
|||||||
|
//
|
||||||
|
// Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
// or more contributor license agreements. See the NOTICE file
|
||||||
|
// distributed with this work for additional information
|
||||||
|
// regarding copyright ownership. The ASF licenses this file
|
||||||
|
// to you under the Apache License, Version 2.0 (the
|
||||||
|
// "License"); you may not use this file except in compliance
|
||||||
|
// with the License. You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing,
|
||||||
|
// software distributed under the License is distributed on an
|
||||||
|
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||||
|
// KIND, either express or implied. See the License for the
|
||||||
|
// specific language governing permissions and limitations
|
||||||
|
// under the License.
|
||||||
|
//
|
||||||
|
|
||||||
|
package com.cloud.hypervisor.kvm.resource.wrapper;
|
||||||
|
|
||||||
|
import java.util.Map;
|
||||||
|
|
||||||
|
import org.apache.log4j.Logger;
|
||||||
|
|
||||||
|
import com.cloud.agent.api.Answer;
|
||||||
|
import com.cloud.agent.api.PrepareStorageClientAnswer;
|
||||||
|
import com.cloud.agent.api.PrepareStorageClientCommand;
|
||||||
|
import com.cloud.hypervisor.kvm.resource.LibvirtComputingResource;
|
||||||
|
import com.cloud.hypervisor.kvm.storage.KVMStoragePoolManager;
|
||||||
|
import com.cloud.resource.CommandWrapper;
|
||||||
|
import com.cloud.resource.ResourceWrapper;
|
||||||
|
import com.cloud.utils.Ternary;
|
||||||
|
|
||||||
|
@ResourceWrapper(handles = PrepareStorageClientCommand.class)
|
||||||
|
public class LibvirtPrepareStorageClientCommandWrapper extends CommandWrapper<PrepareStorageClientCommand, Answer, LibvirtComputingResource> {
|
||||||
|
|
||||||
|
private static final Logger s_logger = Logger.getLogger(LibvirtPrepareStorageClientCommandWrapper.class);
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public Answer execute(PrepareStorageClientCommand cmd, LibvirtComputingResource libvirtComputingResource) {
|
||||||
|
final KVMStoragePoolManager storagePoolMgr = libvirtComputingResource.getStoragePoolMgr();
|
||||||
|
Ternary<Boolean, Map<String, String>, String> prepareStorageClientResult = storagePoolMgr.prepareStorageClient(cmd.getPoolType(), cmd.getPoolUuid(), cmd.getDetails());
|
||||||
|
if (!prepareStorageClientResult.first()) {
|
||||||
|
String msg = prepareStorageClientResult.third();
|
||||||
|
s_logger.debug("Unable to prepare storage client, due to: " + msg);
|
||||||
|
return new PrepareStorageClientAnswer(cmd, false, msg);
|
||||||
|
}
|
||||||
|
Map<String, String> details = prepareStorageClientResult.second();
|
||||||
|
return new PrepareStorageClientAnswer(cmd, true, details);
|
||||||
|
}
|
||||||
|
}
|
||||||
@ -0,0 +1,49 @@
|
|||||||
|
//
|
||||||
|
// Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
// or more contributor license agreements. See the NOTICE file
|
||||||
|
// distributed with this work for additional information
|
||||||
|
// regarding copyright ownership. The ASF licenses this file
|
||||||
|
// to you under the Apache License, Version 2.0 (the
|
||||||
|
// "License"); you may not use this file except in compliance
|
||||||
|
// with the License. You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing,
|
||||||
|
// software distributed under the License is distributed on an
|
||||||
|
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||||
|
// KIND, either express or implied. See the License for the
|
||||||
|
// specific language governing permissions and limitations
|
||||||
|
// under the License.
|
||||||
|
//
|
||||||
|
|
||||||
|
package com.cloud.hypervisor.kvm.resource.wrapper;
|
||||||
|
|
||||||
|
import org.apache.log4j.Logger;
|
||||||
|
|
||||||
|
import com.cloud.agent.api.Answer;
|
||||||
|
import com.cloud.agent.api.UnprepareStorageClientAnswer;
|
||||||
|
import com.cloud.agent.api.UnprepareStorageClientCommand;
|
||||||
|
import com.cloud.hypervisor.kvm.resource.LibvirtComputingResource;
|
||||||
|
import com.cloud.hypervisor.kvm.storage.KVMStoragePoolManager;
|
||||||
|
import com.cloud.resource.CommandWrapper;
|
||||||
|
import com.cloud.resource.ResourceWrapper;
|
||||||
|
import com.cloud.utils.Pair;
|
||||||
|
|
||||||
|
@ResourceWrapper(handles = UnprepareStorageClientCommand.class)
|
||||||
|
public class LibvirtUnprepareStorageClientCommandWrapper extends CommandWrapper<UnprepareStorageClientCommand, Answer, LibvirtComputingResource> {
|
||||||
|
|
||||||
|
private static final Logger s_logger = Logger.getLogger(LibvirtUnprepareStorageClientCommandWrapper.class);
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public Answer execute(UnprepareStorageClientCommand cmd, LibvirtComputingResource libvirtComputingResource) {
|
||||||
|
final KVMStoragePoolManager storagePoolMgr = libvirtComputingResource.getStoragePoolMgr();
|
||||||
|
Pair<Boolean, String> unprepareStorageClientResult = storagePoolMgr.unprepareStorageClient(cmd.getPoolType(), cmd.getPoolUuid());
|
||||||
|
if (!unprepareStorageClientResult.first()) {
|
||||||
|
String msg = unprepareStorageClientResult.second();
|
||||||
|
s_logger.debug("Couldn't unprepare storage client, due to: " + msg);
|
||||||
|
return new UnprepareStorageClientAnswer(cmd, false, msg);
|
||||||
|
}
|
||||||
|
return new UnprepareStorageClientAnswer(cmd, true);
|
||||||
|
}
|
||||||
|
}
|
||||||
@ -45,6 +45,8 @@ import com.cloud.storage.Storage;
|
|||||||
import com.cloud.storage.Storage.StoragePoolType;
|
import com.cloud.storage.Storage.StoragePoolType;
|
||||||
import com.cloud.storage.StorageLayer;
|
import com.cloud.storage.StorageLayer;
|
||||||
import com.cloud.storage.Volume;
|
import com.cloud.storage.Volume;
|
||||||
|
import com.cloud.utils.Pair;
|
||||||
|
import com.cloud.utils.Ternary;
|
||||||
import com.cloud.utils.exception.CloudRuntimeException;
|
import com.cloud.utils.exception.CloudRuntimeException;
|
||||||
import com.cloud.vm.VirtualMachine;
|
import com.cloud.vm.VirtualMachine;
|
||||||
|
|
||||||
@ -475,4 +477,13 @@ public class KVMStoragePoolManager {
|
|||||||
return adaptor.createTemplateFromDirectDownloadFile(templateFilePath, destTemplatePath, destPool, format, timeout);
|
return adaptor.createTemplateFromDirectDownloadFile(templateFilePath, destTemplatePath, destPool, format, timeout);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public Ternary<Boolean, Map<String, String>, String> prepareStorageClient(StoragePoolType type, String uuid, Map<String, String> details) {
|
||||||
|
StorageAdaptor adaptor = getStorageAdaptor(type);
|
||||||
|
return adaptor.prepareStorageClient(type, uuid, details);
|
||||||
|
}
|
||||||
|
|
||||||
|
public Pair<Boolean, String> unprepareStorageClient(StoragePoolType type, String uuid) {
|
||||||
|
StorageAdaptor adaptor = getStorageAdaptor(type);
|
||||||
|
return adaptor.unprepareStorageClient(type, uuid);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@ -27,6 +27,7 @@ import java.util.List;
|
|||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
import java.util.UUID;
|
import java.util.UUID;
|
||||||
|
|
||||||
|
import org.apache.cloudstack.storage.datastore.client.ScaleIOGatewayClient;
|
||||||
import org.apache.cloudstack.storage.datastore.util.ScaleIOUtil;
|
import org.apache.cloudstack.storage.datastore.util.ScaleIOUtil;
|
||||||
import org.apache.cloudstack.utils.cryptsetup.CryptSetup;
|
import org.apache.cloudstack.utils.cryptsetup.CryptSetup;
|
||||||
import org.apache.cloudstack.utils.cryptsetup.CryptSetupException;
|
import org.apache.cloudstack.utils.cryptsetup.CryptSetupException;
|
||||||
@ -43,6 +44,8 @@ import org.libvirt.LibvirtException;
|
|||||||
|
|
||||||
import com.cloud.storage.Storage;
|
import com.cloud.storage.Storage;
|
||||||
import com.cloud.storage.StorageManager;
|
import com.cloud.storage.StorageManager;
|
||||||
|
import com.cloud.utils.Pair;
|
||||||
|
import com.cloud.utils.Ternary;
|
||||||
import com.cloud.utils.exception.CloudRuntimeException;
|
import com.cloud.utils.exception.CloudRuntimeException;
|
||||||
import com.cloud.utils.script.OutputInterpreter;
|
import com.cloud.utils.script.OutputInterpreter;
|
||||||
import com.cloud.utils.script.Script;
|
import com.cloud.utils.script.Script;
|
||||||
@ -564,6 +567,67 @@ public class ScaleIOStorageAdaptor implements StorageAdaptor {
|
|||||||
qemu.resize(options, objects, usableSizeBytes);
|
qemu.resize(options, objects, usableSizeBytes);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public Ternary<Boolean, Map<String, String>, String> prepareStorageClient(Storage.StoragePoolType type, String uuid, Map<String, String> details) {
|
||||||
|
if (!ScaleIOUtil.isSDCServiceInstalled()) {
|
||||||
|
logger.debug("SDC service not installed on host, preparing the SDC client not possible");
|
||||||
|
return new Ternary<>(false, null, "SDC service not installed on host");
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!ScaleIOUtil.isSDCServiceEnabled()) {
|
||||||
|
logger.debug("SDC service not enabled on host, enabling it");
|
||||||
|
if (!ScaleIOUtil.enableSDCService()) {
|
||||||
|
return new Ternary<>(false, null, "SDC service not enabled on host");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!ScaleIOUtil.isSDCServiceActive()) {
|
||||||
|
if (!ScaleIOUtil.startSDCService()) {
|
||||||
|
return new Ternary<>(false, null, "Couldn't start SDC service on host");
|
||||||
|
}
|
||||||
|
} else if (!ScaleIOUtil.restartSDCService()) {
|
||||||
|
return new Ternary<>(false, null, "Couldn't restart SDC service on host");
|
||||||
|
}
|
||||||
|
|
||||||
|
return new Ternary<>( true, getSDCDetails(details), "Prepared client successfully");
|
||||||
|
}
|
||||||
|
|
||||||
|
public Pair<Boolean, String> unprepareStorageClient(Storage.StoragePoolType type, String uuid) {
|
||||||
|
if (!ScaleIOUtil.isSDCServiceInstalled()) {
|
||||||
|
logger.debug("SDC service not installed on host, no need to unprepare the SDC client");
|
||||||
|
return new Pair<>(true, "SDC service not installed on host, no need to unprepare the SDC client");
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!ScaleIOUtil.isSDCServiceEnabled()) {
|
||||||
|
logger.debug("SDC service not enabled on host, no need to unprepare the SDC client");
|
||||||
|
return new Pair<>(true, "SDC service not enabled on host, no need to unprepare the SDC client");
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!ScaleIOUtil.stopSDCService()) {
|
||||||
|
return new Pair<>(false, "Couldn't stop SDC service on host");
|
||||||
|
}
|
||||||
|
|
||||||
|
return new Pair<>(true, "Unprepared SDC client successfully");
|
||||||
|
}
|
||||||
|
|
||||||
|
private Map<String, String> getSDCDetails(Map<String, String> details) {
|
||||||
|
Map<String, String> sdcDetails = new HashMap<String, String>();
|
||||||
|
if (details == null || !details.containsKey(ScaleIOGatewayClient.STORAGE_POOL_SYSTEM_ID)) {
|
||||||
|
return sdcDetails;
|
||||||
|
}
|
||||||
|
|
||||||
|
String storageSystemId = details.get(ScaleIOGatewayClient.STORAGE_POOL_SYSTEM_ID);
|
||||||
|
String sdcId = ScaleIOUtil.getSdcId(storageSystemId);
|
||||||
|
if (sdcId != null) {
|
||||||
|
sdcDetails.put(ScaleIOGatewayClient.SDC_ID, sdcId);
|
||||||
|
} else {
|
||||||
|
String sdcGuId = ScaleIOUtil.getSdcGuid();
|
||||||
|
if (sdcGuId != null) {
|
||||||
|
sdcDetails.put(ScaleIOGatewayClient.SDC_GUID, sdcGuId);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return sdcDetails;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Calculates usable size from raw size, assuming qcow2 requires 192k/1GB for metadata
|
* Calculates usable size from raw size, assuming qcow2 requires 192k/1GB for metadata
|
||||||
* We also remove 128MiB for encryption/fragmentation/safety factor.
|
* We also remove 128MiB for encryption/fragmentation/safety factor.
|
||||||
|
|||||||
@ -16,6 +16,7 @@
|
|||||||
// under the License.
|
// under the License.
|
||||||
package com.cloud.hypervisor.kvm.storage;
|
package com.cloud.hypervisor.kvm.storage;
|
||||||
|
|
||||||
|
import java.util.HashMap;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
|
|
||||||
@ -23,6 +24,8 @@ import org.apache.cloudstack.utils.qemu.QemuImg.PhysicalDiskFormat;
|
|||||||
|
|
||||||
import com.cloud.storage.Storage;
|
import com.cloud.storage.Storage;
|
||||||
import com.cloud.storage.Storage.StoragePoolType;
|
import com.cloud.storage.Storage.StoragePoolType;
|
||||||
|
import com.cloud.utils.Pair;
|
||||||
|
import com.cloud.utils.Ternary;
|
||||||
|
|
||||||
public interface StorageAdaptor {
|
public interface StorageAdaptor {
|
||||||
|
|
||||||
@ -114,4 +117,25 @@ public interface StorageAdaptor {
|
|||||||
default boolean supportsPhysicalDiskCopy(StoragePoolType type) {
|
default boolean supportsPhysicalDiskCopy(StoragePoolType type) {
|
||||||
return StoragePoolType.PowerFlex == type;
|
return StoragePoolType.PowerFlex == type;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Prepares the storage client.
|
||||||
|
* @param type type of the storage pool
|
||||||
|
* @param uuid uuid of the storage pool
|
||||||
|
* @param details any details of the storage pool that are required for client preparation
|
||||||
|
* @return status, client details, & message in case failed
|
||||||
|
*/
|
||||||
|
default Ternary<Boolean, Map<String, String>, String> prepareStorageClient(StoragePoolType type, String uuid, Map<String, String> details) {
|
||||||
|
return new Ternary<>(true, new HashMap<>(), "");
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Unprepares the storage client.
|
||||||
|
* @param type type of the storage pool
|
||||||
|
* @param uuid uuid of the storage pool
|
||||||
|
* @return status, & message in case failed
|
||||||
|
*/
|
||||||
|
default Pair<Boolean, String> unprepareStorageClient(StoragePoolType type, String uuid) {
|
||||||
|
return new Pair<>(true, "");
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@ -0,0 +1,87 @@
|
|||||||
|
/*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package com.cloud.hypervisor.kvm.resource.wrapper;
|
||||||
|
|
||||||
|
import java.util.HashMap;
|
||||||
|
import java.util.Map;
|
||||||
|
|
||||||
|
import org.apache.cloudstack.storage.datastore.client.ScaleIOGatewayClient;
|
||||||
|
import org.junit.Assert;
|
||||||
|
import org.junit.Test;
|
||||||
|
import org.junit.runner.RunWith;
|
||||||
|
import org.mockito.Mock;
|
||||||
|
import org.mockito.Mockito;
|
||||||
|
import org.mockito.Spy;
|
||||||
|
import org.mockito.junit.MockitoJUnitRunner;
|
||||||
|
|
||||||
|
import com.cloud.agent.api.PrepareStorageClientAnswer;
|
||||||
|
import com.cloud.agent.api.PrepareStorageClientCommand;
|
||||||
|
import com.cloud.hypervisor.kvm.resource.LibvirtComputingResource;
|
||||||
|
import com.cloud.hypervisor.kvm.storage.KVMStoragePoolManager;
|
||||||
|
import com.cloud.storage.Storage;
|
||||||
|
import com.cloud.utils.Ternary;
|
||||||
|
|
||||||
|
@RunWith(MockitoJUnitRunner.class)
|
||||||
|
public class LibvirtPrepareStorageClientCommandWrapperTest {
|
||||||
|
|
||||||
|
@Spy
|
||||||
|
LibvirtPrepareStorageClientCommandWrapper libvirtPrepareStorageClientCommandWrapperSpy = Mockito.spy(LibvirtPrepareStorageClientCommandWrapper.class);
|
||||||
|
|
||||||
|
@Mock
|
||||||
|
LibvirtComputingResource libvirtComputingResourceMock;
|
||||||
|
|
||||||
|
private final static String poolUuid = "345fc603-2d7e-47d2-b719-a0110b3732e6";
|
||||||
|
private final static String systemId = "218ce1797566a00f";
|
||||||
|
private final static String sdcId = "301b852c00000003";
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testPrepareStorageClientSuccess() {
|
||||||
|
Map<String, String> details = new HashMap<>();
|
||||||
|
details.put(ScaleIOGatewayClient.STORAGE_POOL_SYSTEM_ID, systemId);
|
||||||
|
PrepareStorageClientCommand cmd = Mockito.mock(PrepareStorageClientCommand.class);
|
||||||
|
Mockito.when(cmd.getPoolType()).thenReturn(Storage.StoragePoolType.PowerFlex);
|
||||||
|
Mockito.when(cmd.getPoolUuid()).thenReturn(poolUuid);
|
||||||
|
Mockito.when(cmd.getDetails()).thenReturn(details);
|
||||||
|
|
||||||
|
KVMStoragePoolManager storagePoolMgr = Mockito.mock(KVMStoragePoolManager.class);
|
||||||
|
Mockito.when(libvirtComputingResourceMock.getStoragePoolMgr()).thenReturn(storagePoolMgr);
|
||||||
|
details.put(ScaleIOGatewayClient.SDC_ID, sdcId);
|
||||||
|
Mockito.when(storagePoolMgr.prepareStorageClient(cmd.getPoolType(), cmd.getPoolUuid(), cmd.getDetails())).thenReturn(new Ternary<>(true, details, ""));
|
||||||
|
|
||||||
|
PrepareStorageClientAnswer result = (PrepareStorageClientAnswer) libvirtPrepareStorageClientCommandWrapperSpy.execute(cmd, libvirtComputingResourceMock);
|
||||||
|
|
||||||
|
Assert.assertTrue(result.getResult());
|
||||||
|
Assert.assertEquals(sdcId, result.getDetailsMap().get(ScaleIOGatewayClient.SDC_ID));
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testPrepareStorageClientFailure() {
|
||||||
|
Map<String, String> details = new HashMap<>();
|
||||||
|
details.put(ScaleIOGatewayClient.STORAGE_POOL_SYSTEM_ID, systemId);
|
||||||
|
PrepareStorageClientCommand cmd = Mockito.mock(PrepareStorageClientCommand.class);
|
||||||
|
Mockito.when(cmd.getPoolType()).thenReturn(Storage.StoragePoolType.PowerFlex);
|
||||||
|
Mockito.when(cmd.getPoolUuid()).thenReturn(poolUuid);
|
||||||
|
Mockito.when(cmd.getDetails()).thenReturn(details);
|
||||||
|
|
||||||
|
KVMStoragePoolManager storagePoolMgr = Mockito.mock(KVMStoragePoolManager.class);
|
||||||
|
Mockito.when(libvirtComputingResourceMock.getStoragePoolMgr()).thenReturn(storagePoolMgr);
|
||||||
|
Mockito.when(storagePoolMgr.prepareStorageClient(cmd.getPoolType(), cmd.getPoolUuid(), cmd.getDetails())).thenReturn(new Ternary<>(false, new HashMap<>() , "Prepare storage client failed"));
|
||||||
|
|
||||||
|
PrepareStorageClientAnswer result = (PrepareStorageClientAnswer) libvirtPrepareStorageClientCommandWrapperSpy.execute(cmd, libvirtComputingResourceMock);
|
||||||
|
|
||||||
|
Assert.assertFalse(result.getResult());
|
||||||
|
Assert.assertEquals("Prepare storage client failed", result.getDetails());
|
||||||
|
}
|
||||||
|
}
|
||||||
@ -0,0 +1,73 @@
|
|||||||
|
/*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package com.cloud.hypervisor.kvm.resource.wrapper;
|
||||||
|
|
||||||
|
import org.junit.Assert;
|
||||||
|
import org.junit.Test;
|
||||||
|
import org.junit.runner.RunWith;
|
||||||
|
import org.mockito.Mock;
|
||||||
|
import org.mockito.Mockito;
|
||||||
|
import org.mockito.Spy;
|
||||||
|
import org.mockito.junit.MockitoJUnitRunner;
|
||||||
|
|
||||||
|
import com.cloud.agent.api.UnprepareStorageClientAnswer;
|
||||||
|
import com.cloud.agent.api.UnprepareStorageClientCommand;
|
||||||
|
import com.cloud.hypervisor.kvm.resource.LibvirtComputingResource;
|
||||||
|
import com.cloud.hypervisor.kvm.storage.KVMStoragePoolManager;
|
||||||
|
import com.cloud.storage.Storage;
|
||||||
|
import com.cloud.utils.Pair;
|
||||||
|
|
||||||
|
@RunWith(MockitoJUnitRunner.class)
|
||||||
|
public class LibvirtUnprepareStorageClientCommandWrapperTest {
|
||||||
|
|
||||||
|
@Spy
|
||||||
|
LibvirtUnprepareStorageClientCommandWrapper libvirtUnprepareStorageClientCommandWrapperSpy = Mockito.spy(LibvirtUnprepareStorageClientCommandWrapper.class);
|
||||||
|
|
||||||
|
@Mock
|
||||||
|
LibvirtComputingResource libvirtComputingResourceMock;
|
||||||
|
|
||||||
|
private final static String poolUuid = "345fc603-2d7e-47d2-b719-a0110b3732e6";
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testUnprepareStorageClientSuccess() {
|
||||||
|
UnprepareStorageClientCommand cmd = Mockito.mock(UnprepareStorageClientCommand.class);
|
||||||
|
Mockito.when(cmd.getPoolType()).thenReturn(Storage.StoragePoolType.PowerFlex);
|
||||||
|
Mockito.when(cmd.getPoolUuid()).thenReturn(poolUuid);
|
||||||
|
|
||||||
|
KVMStoragePoolManager storagePoolMgr = Mockito.mock(KVMStoragePoolManager.class);
|
||||||
|
Mockito.when(libvirtComputingResourceMock.getStoragePoolMgr()).thenReturn(storagePoolMgr);
|
||||||
|
Mockito.when(storagePoolMgr.unprepareStorageClient(cmd.getPoolType(), cmd.getPoolUuid())).thenReturn(new Pair<>(true, ""));
|
||||||
|
|
||||||
|
UnprepareStorageClientAnswer result = (UnprepareStorageClientAnswer) libvirtUnprepareStorageClientCommandWrapperSpy.execute(cmd, libvirtComputingResourceMock);
|
||||||
|
|
||||||
|
Assert.assertTrue(result.getResult());
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testUnprepareStorageClientFailure() {
|
||||||
|
UnprepareStorageClientCommand cmd = Mockito.mock(UnprepareStorageClientCommand.class);
|
||||||
|
Mockito.when(cmd.getPoolType()).thenReturn(Storage.StoragePoolType.PowerFlex);
|
||||||
|
Mockito.when(cmd.getPoolUuid()).thenReturn(poolUuid);
|
||||||
|
|
||||||
|
KVMStoragePoolManager storagePoolMgr = Mockito.mock(KVMStoragePoolManager.class);
|
||||||
|
Mockito.when(libvirtComputingResourceMock.getStoragePoolMgr()).thenReturn(storagePoolMgr);
|
||||||
|
Mockito.when(storagePoolMgr.unprepareStorageClient(cmd.getPoolType(), cmd.getPoolUuid())).thenReturn(new Pair<>(false, "Unprepare storage client failed"));
|
||||||
|
|
||||||
|
UnprepareStorageClientAnswer result = (UnprepareStorageClientAnswer) libvirtUnprepareStorageClientCommandWrapperSpy.execute(cmd, libvirtComputingResourceMock);
|
||||||
|
|
||||||
|
Assert.assertFalse(result.getResult());
|
||||||
|
Assert.assertEquals("Unprepare storage client failed", result.getDetails());
|
||||||
|
}
|
||||||
|
}
|
||||||
@ -17,13 +17,50 @@
|
|||||||
|
|
||||||
package com.cloud.hypervisor.kvm.storage;
|
package com.cloud.hypervisor.kvm.storage;
|
||||||
|
|
||||||
|
import static org.mockito.Mockito.when;
|
||||||
|
|
||||||
|
import java.util.HashMap;
|
||||||
|
import java.util.Map;
|
||||||
|
|
||||||
|
import org.apache.cloudstack.storage.datastore.client.ScaleIOGatewayClient;
|
||||||
|
import org.apache.cloudstack.storage.datastore.util.ScaleIOUtil;
|
||||||
|
import org.junit.After;
|
||||||
import org.junit.Assert;
|
import org.junit.Assert;
|
||||||
|
import org.junit.Before;
|
||||||
import org.junit.Test;
|
import org.junit.Test;
|
||||||
import org.junit.runner.RunWith;
|
import org.junit.runner.RunWith;
|
||||||
|
import org.mockito.Mock;
|
||||||
|
import org.mockito.MockedStatic;
|
||||||
|
import org.mockito.Mockito;
|
||||||
import org.mockito.junit.MockitoJUnitRunner;
|
import org.mockito.junit.MockitoJUnitRunner;
|
||||||
|
|
||||||
|
import com.cloud.storage.Storage;
|
||||||
|
import com.cloud.storage.StorageLayer;
|
||||||
|
import com.cloud.utils.Pair;
|
||||||
|
import com.cloud.utils.Ternary;
|
||||||
|
import com.cloud.utils.script.Script;
|
||||||
|
|
||||||
@RunWith(MockitoJUnitRunner.class)
|
@RunWith(MockitoJUnitRunner.class)
|
||||||
public class ScaleIOStorageAdaptorTest {
|
public class ScaleIOStorageAdaptorTest {
|
||||||
|
|
||||||
|
@Mock
|
||||||
|
StorageLayer storageLayer;
|
||||||
|
ScaleIOStorageAdaptor scaleIOStorageAdaptor;
|
||||||
|
|
||||||
|
private final static String poolUuid = "345fc603-2d7e-47d2-b719-a0110b3732e6";
|
||||||
|
private static MockedStatic<Script> mockedScript;
|
||||||
|
|
||||||
|
@Before
|
||||||
|
public void setUp() {
|
||||||
|
mockedScript = Mockito.mockStatic(Script.class);
|
||||||
|
scaleIOStorageAdaptor = Mockito.spy(ScaleIOStorageAdaptor.class);
|
||||||
|
}
|
||||||
|
|
||||||
|
@After
|
||||||
|
public void tearDown() {
|
||||||
|
mockedScript.close();
|
||||||
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void getUsableBytesFromRawBytesTest() {
|
public void getUsableBytesFromRawBytesTest() {
|
||||||
Assert.assertEquals("Overhead calculated for 8Gi size", 8454111232L, ScaleIOStorageAdaptor.getUsableBytesFromRawBytes(8L << 30));
|
Assert.assertEquals("Overhead calculated for 8Gi size", 8454111232L, ScaleIOStorageAdaptor.getUsableBytesFromRawBytes(8L << 30));
|
||||||
@ -31,4 +68,158 @@ public class ScaleIOStorageAdaptorTest {
|
|||||||
Assert.assertEquals("Overhead calculated for 500Gi size", 536636342272L, ScaleIOStorageAdaptor.getUsableBytesFromRawBytes(500L << 30));
|
Assert.assertEquals("Overhead calculated for 500Gi size", 536636342272L, ScaleIOStorageAdaptor.getUsableBytesFromRawBytes(500L << 30));
|
||||||
Assert.assertEquals("Unsupported small size", 0, ScaleIOStorageAdaptor.getUsableBytesFromRawBytes(1L));
|
Assert.assertEquals("Unsupported small size", 0, ScaleIOStorageAdaptor.getUsableBytesFromRawBytes(1L));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testPrepareStorageClient_SDCServiceNotInstalled() {
|
||||||
|
when(Script.runSimpleBashScriptForExitValue(Mockito.eq("systemctl status scini"))).thenReturn(4);
|
||||||
|
|
||||||
|
Ternary<Boolean, Map<String, String>, String> result = scaleIOStorageAdaptor.prepareStorageClient(Storage.StoragePoolType.PowerFlex, poolUuid, new HashMap<>());
|
||||||
|
|
||||||
|
Assert.assertFalse(result.first());
|
||||||
|
Assert.assertNull(result.second());
|
||||||
|
Assert.assertEquals("SDC service not installed on host", result.third());
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testPrepareStorageClient_SDCServiceNotEnabled() {
|
||||||
|
when(Script.runSimpleBashScriptForExitValue(Mockito.eq("systemctl status scini"))).thenReturn(3);
|
||||||
|
when(Script.runSimpleBashScriptForExitValue(Mockito.eq("systemctl is-enabled scini"))).thenReturn(1);
|
||||||
|
when(Script.runSimpleBashScriptForExitValue(Mockito.eq("systemctl enable scini"))).thenReturn(1);
|
||||||
|
|
||||||
|
Ternary<Boolean, Map<String, String>, String> result = scaleIOStorageAdaptor.prepareStorageClient(Storage.StoragePoolType.PowerFlex, poolUuid, new HashMap<>());
|
||||||
|
|
||||||
|
Assert.assertFalse(result.first());
|
||||||
|
Assert.assertNull(result.second());
|
||||||
|
Assert.assertEquals("SDC service not enabled on host", result.third());
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testPrepareStorageClient_SDCServiceNotRestarted() {
|
||||||
|
when(Script.runSimpleBashScriptForExitValue(Mockito.eq("systemctl status scini"))).thenReturn(3);
|
||||||
|
when(Script.runSimpleBashScriptForExitValue(Mockito.eq("systemctl is-enabled scini"))).thenReturn(0);
|
||||||
|
when(Script.runSimpleBashScriptForExitValue(Mockito.eq("systemctl is-active scini"))).thenReturn(0);
|
||||||
|
when(Script.runSimpleBashScriptForExitValue(Mockito.eq("systemctl restart scini"))).thenReturn(1);
|
||||||
|
|
||||||
|
Ternary<Boolean, Map<String, String>, String> result = scaleIOStorageAdaptor.prepareStorageClient(Storage.StoragePoolType.PowerFlex, poolUuid, new HashMap<>());
|
||||||
|
|
||||||
|
Assert.assertFalse(result.first());
|
||||||
|
Assert.assertNull(result.second());
|
||||||
|
Assert.assertEquals("Couldn't restart SDC service on host", result.third());
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testPrepareStorageClient_SDCServiceRestarted() {
|
||||||
|
when(Script.runSimpleBashScriptForExitValue(Mockito.eq("systemctl status scini"))).thenReturn(3);
|
||||||
|
when(Script.runSimpleBashScriptForExitValue(Mockito.eq("systemctl is-enabled scini"))).thenReturn(0);
|
||||||
|
when(Script.runSimpleBashScriptForExitValue(Mockito.eq("systemctl is-active scini"))).thenReturn(0);
|
||||||
|
when(Script.runSimpleBashScriptForExitValue(Mockito.eq("systemctl restart scini"))).thenReturn(0);
|
||||||
|
|
||||||
|
Ternary<Boolean, Map<String, String>, String> result = scaleIOStorageAdaptor.prepareStorageClient(Storage.StoragePoolType.PowerFlex, poolUuid, new HashMap<>());
|
||||||
|
|
||||||
|
Assert.assertTrue(result.first());
|
||||||
|
Assert.assertNotNull(result.second());
|
||||||
|
Assert.assertTrue(result.second().isEmpty());
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testPrepareStorageClient_SDCServiceNotStarted() {
|
||||||
|
when(Script.runSimpleBashScriptForExitValue(Mockito.eq("systemctl status scini"))).thenReturn(3);
|
||||||
|
when(Script.runSimpleBashScriptForExitValue(Mockito.eq("systemctl is-enabled scini"))).thenReturn(0);
|
||||||
|
when(Script.runSimpleBashScriptForExitValue(Mockito.eq("systemctl is-active scini"))).thenReturn(1);
|
||||||
|
when(Script.runSimpleBashScriptForExitValue(Mockito.eq("systemctl start scini"))).thenReturn(1);
|
||||||
|
|
||||||
|
Ternary<Boolean, Map<String, String>, String> result = scaleIOStorageAdaptor.prepareStorageClient(Storage.StoragePoolType.PowerFlex, poolUuid, new HashMap<>());
|
||||||
|
|
||||||
|
Assert.assertFalse(result.first());
|
||||||
|
Assert.assertNull(result.second());
|
||||||
|
Assert.assertEquals("Couldn't start SDC service on host", result.third());
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testPrepareStorageClient_SDCServiceStartedReturnSDCId() {
|
||||||
|
Map<String, String> details = new HashMap<>();
|
||||||
|
String systemId = "218ce1797566a00f";
|
||||||
|
details.put(ScaleIOGatewayClient.STORAGE_POOL_SYSTEM_ID, systemId);
|
||||||
|
|
||||||
|
try (MockedStatic<ScaleIOUtil> ignored = Mockito.mockStatic(ScaleIOUtil.class)) {
|
||||||
|
when(ScaleIOUtil.isSDCServiceInstalled()).thenReturn(true);
|
||||||
|
when(ScaleIOUtil.isSDCServiceEnabled()).thenReturn(true);
|
||||||
|
when(ScaleIOUtil.isSDCServiceActive()).thenReturn(false);
|
||||||
|
when(ScaleIOUtil.startSDCService()).thenReturn(true);
|
||||||
|
String sdcId = "301b852c00000003";
|
||||||
|
when(ScaleIOUtil.getSdcId(systemId)).thenReturn(sdcId);
|
||||||
|
|
||||||
|
Ternary<Boolean, Map<String, String>, String> result = scaleIOStorageAdaptor.prepareStorageClient(Storage.StoragePoolType.PowerFlex, poolUuid, details);
|
||||||
|
|
||||||
|
Assert.assertTrue(result.first());
|
||||||
|
Assert.assertNotNull(result.second());
|
||||||
|
Assert.assertEquals(sdcId, result.second().get(ScaleIOGatewayClient.SDC_ID));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testPrepareStorageClient_SDCServiceStartedReturnSDCGuid() {
|
||||||
|
Map<String, String> details = new HashMap<>();
|
||||||
|
String systemId = "218ce1797566a00f";
|
||||||
|
details.put(ScaleIOGatewayClient.STORAGE_POOL_SYSTEM_ID, systemId);
|
||||||
|
|
||||||
|
String sdcGuid = "B0E3BFB8-C20B-43BF-93C8-13339E85AA50";
|
||||||
|
try (MockedStatic<ScaleIOUtil> ignored = Mockito.mockStatic(ScaleIOUtil.class)) {
|
||||||
|
when(ScaleIOUtil.isSDCServiceInstalled()).thenReturn(true);
|
||||||
|
when(ScaleIOUtil.isSDCServiceEnabled()).thenReturn(true);
|
||||||
|
when(ScaleIOUtil.isSDCServiceActive()).thenReturn(false);
|
||||||
|
when(ScaleIOUtil.startSDCService()).thenReturn(true);
|
||||||
|
when(ScaleIOUtil.getSdcId(systemId)).thenReturn(null);
|
||||||
|
when(ScaleIOUtil.getSdcGuid()).thenReturn(sdcGuid);
|
||||||
|
|
||||||
|
Ternary<Boolean, Map<String, String>, String> result = scaleIOStorageAdaptor.prepareStorageClient(Storage.StoragePoolType.PowerFlex, poolUuid, details);
|
||||||
|
Assert.assertTrue(result.first());
|
||||||
|
Assert.assertNotNull(result.second());
|
||||||
|
Assert.assertEquals(sdcGuid, result.second().get(ScaleIOGatewayClient.SDC_GUID));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testUnprepareStorageClient_SDCServiceNotInstalled() {
|
||||||
|
when(Script.runSimpleBashScriptForExitValue(Mockito.eq("systemctl status scini"))).thenReturn(4);
|
||||||
|
|
||||||
|
Pair<Boolean, String> result = scaleIOStorageAdaptor.unprepareStorageClient(Storage.StoragePoolType.PowerFlex, poolUuid);
|
||||||
|
|
||||||
|
Assert.assertTrue(result.first());
|
||||||
|
Assert.assertEquals("SDC service not installed on host, no need to unprepare the SDC client", result.second());
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testUnprepareStorageClient_SDCServiceNotEnabled() {
|
||||||
|
when(Script.runSimpleBashScriptForExitValue(Mockito.eq("systemctl status scini"))).thenReturn(3);
|
||||||
|
when(Script.runSimpleBashScriptForExitValue(Mockito.eq("systemctl is-enabled scini"))).thenReturn(1);
|
||||||
|
|
||||||
|
Pair<Boolean, String> result = scaleIOStorageAdaptor.unprepareStorageClient(Storage.StoragePoolType.PowerFlex, poolUuid);
|
||||||
|
|
||||||
|
Assert.assertTrue(result.first());
|
||||||
|
Assert.assertEquals("SDC service not enabled on host, no need to unprepare the SDC client", result.second());
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testUnprepareStorageClient_SDCServiceNotStopped() {
|
||||||
|
when(Script.runSimpleBashScriptForExitValue(Mockito.eq("systemctl status scini"))).thenReturn(3);
|
||||||
|
when(Script.runSimpleBashScriptForExitValue(Mockito.eq("systemctl is-enabled scini"))).thenReturn(0);
|
||||||
|
when(Script.runSimpleBashScriptForExitValue(Mockito.eq("systemctl stop scini"))).thenReturn(1);
|
||||||
|
|
||||||
|
Pair<Boolean, String> result = scaleIOStorageAdaptor.unprepareStorageClient(Storage.StoragePoolType.PowerFlex, poolUuid);
|
||||||
|
|
||||||
|
Assert.assertFalse(result.first());
|
||||||
|
Assert.assertEquals("Couldn't stop SDC service on host", result.second());
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testUnprepareStorageClient_SDCServiceStopped() {
|
||||||
|
when(Script.runSimpleBashScriptForExitValue(Mockito.eq("systemctl status scini"))).thenReturn(3);
|
||||||
|
when(Script.runSimpleBashScriptForExitValue(Mockito.eq("systemctl is-enabled scini"))).thenReturn(0);
|
||||||
|
when(Script.runSimpleBashScriptForExitValue(Mockito.eq("systemctl stop scini"))).thenReturn(0);
|
||||||
|
|
||||||
|
Pair<Boolean, String> result = scaleIOStorageAdaptor.unprepareStorageClient(Storage.StoragePoolType.PowerFlex, poolUuid);
|
||||||
|
|
||||||
|
Assert.assertTrue(result.first());
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@ -63,7 +63,7 @@ import org.apache.logging.log4j.Logger;
|
|||||||
/**
|
/**
|
||||||
* Manages the lifecycle of a Managed Data Store in CloudStack
|
* Manages the lifecycle of a Managed Data Store in CloudStack
|
||||||
*/
|
*/
|
||||||
public class AdaptiveDataStoreLifeCycleImpl implements PrimaryDataStoreLifeCycle {
|
public class AdaptiveDataStoreLifeCycleImpl extends BasePrimaryDataStoreLifeCycleImpl implements PrimaryDataStoreLifeCycle {
|
||||||
@Inject
|
@Inject
|
||||||
private PrimaryDataStoreDao _storagePoolDao;
|
private PrimaryDataStoreDao _storagePoolDao;
|
||||||
protected Logger logger = LogManager.getLogger(getClass());
|
protected Logger logger = LogManager.getLogger(getClass());
|
||||||
|
|||||||
@ -66,7 +66,7 @@ import com.cloud.storage.StoragePoolHostVO;
|
|||||||
import com.cloud.storage.dao.StoragePoolHostDao;
|
import com.cloud.storage.dao.StoragePoolHostDao;
|
||||||
import com.cloud.utils.exception.CloudRuntimeException;
|
import com.cloud.utils.exception.CloudRuntimeException;
|
||||||
|
|
||||||
public class ElastistorPrimaryDataStoreLifeCycle implements PrimaryDataStoreLifeCycle {
|
public class ElastistorPrimaryDataStoreLifeCycle extends BasePrimaryDataStoreLifeCycleImpl implements PrimaryDataStoreLifeCycle {
|
||||||
protected Logger logger = LogManager.getLogger(getClass());
|
protected Logger logger = LogManager.getLogger(getClass());
|
||||||
|
|
||||||
@Inject
|
@Inject
|
||||||
|
|||||||
@ -20,11 +20,11 @@ package org.apache.cloudstack.storage.datastore.lifecycle;
|
|||||||
|
|
||||||
import com.cloud.agent.api.StoragePoolInfo;
|
import com.cloud.agent.api.StoragePoolInfo;
|
||||||
import com.cloud.capacity.CapacityManager;
|
import com.cloud.capacity.CapacityManager;
|
||||||
|
import com.cloud.dc.ClusterDetailsDao;
|
||||||
import com.cloud.dc.ClusterVO;
|
import com.cloud.dc.ClusterVO;
|
||||||
import com.cloud.dc.DataCenterVO;
|
import com.cloud.dc.DataCenterVO;
|
||||||
import com.cloud.dc.dao.DataCenterDao;
|
|
||||||
import com.cloud.dc.ClusterDetailsDao;
|
|
||||||
import com.cloud.dc.dao.ClusterDao;
|
import com.cloud.dc.dao.ClusterDao;
|
||||||
|
import com.cloud.dc.dao.DataCenterDao;
|
||||||
import com.cloud.host.Host;
|
import com.cloud.host.Host;
|
||||||
import com.cloud.host.HostVO;
|
import com.cloud.host.HostVO;
|
||||||
import com.cloud.host.dao.HostDao;
|
import com.cloud.host.dao.HostDao;
|
||||||
@ -43,10 +43,10 @@ import com.cloud.utils.exception.CloudRuntimeException;
|
|||||||
import org.apache.cloudstack.engine.subsystem.api.storage.ClusterScope;
|
import org.apache.cloudstack.engine.subsystem.api.storage.ClusterScope;
|
||||||
import org.apache.cloudstack.engine.subsystem.api.storage.DataStore;
|
import org.apache.cloudstack.engine.subsystem.api.storage.DataStore;
|
||||||
import org.apache.cloudstack.engine.subsystem.api.storage.HostScope;
|
import org.apache.cloudstack.engine.subsystem.api.storage.HostScope;
|
||||||
|
import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreInfo;
|
||||||
import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreLifeCycle;
|
import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreLifeCycle;
|
||||||
import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreParameters;
|
import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreParameters;
|
||||||
import org.apache.cloudstack.engine.subsystem.api.storage.ZoneScope;
|
import org.apache.cloudstack.engine.subsystem.api.storage.ZoneScope;
|
||||||
import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreInfo;
|
|
||||||
import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao;
|
import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao;
|
||||||
import org.apache.cloudstack.storage.datastore.db.StoragePoolVO;
|
import org.apache.cloudstack.storage.datastore.db.StoragePoolVO;
|
||||||
import org.apache.cloudstack.storage.datastore.util.DateraUtil;
|
import org.apache.cloudstack.storage.datastore.util.DateraUtil;
|
||||||
@ -59,7 +59,7 @@ import java.util.ArrayList;
|
|||||||
import java.util.List;
|
import java.util.List;
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
|
|
||||||
public class DateraPrimaryDataStoreLifeCycle implements PrimaryDataStoreLifeCycle {
|
public class DateraPrimaryDataStoreLifeCycle extends BasePrimaryDataStoreLifeCycleImpl implements PrimaryDataStoreLifeCycle {
|
||||||
protected Logger logger = LogManager.getLogger(getClass());
|
protected Logger logger = LogManager.getLogger(getClass());
|
||||||
|
|
||||||
@Inject
|
@Inject
|
||||||
@ -396,6 +396,15 @@ public class DateraPrimaryDataStoreLifeCycle implements PrimaryDataStoreLifeCycl
|
|||||||
dataStoreHelper.disable(dataStore);
|
dataStoreHelper.disable(dataStore);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void changeStoragePoolScopeToZone(DataStore store, ClusterScope clusterScope, HypervisorType hypervisorType) {
|
||||||
|
/*
|
||||||
|
* We need to attach all VMware, Xenserver and KVM hosts in the zone.
|
||||||
|
* So pass hypervisorType as null.
|
||||||
|
*/
|
||||||
|
super.changeStoragePoolScopeToZone(store, clusterScope, null);
|
||||||
|
}
|
||||||
|
|
||||||
private HypervisorType getHypervisorTypeForCluster(long clusterId) {
|
private HypervisorType getHypervisorTypeForCluster(long clusterId) {
|
||||||
ClusterVO cluster = _clusterDao.findById(clusterId);
|
ClusterVO cluster = _clusterDao.findById(clusterId);
|
||||||
|
|
||||||
|
|||||||
@ -73,7 +73,7 @@ import java.util.List;
|
|||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
import java.util.UUID;
|
import java.util.UUID;
|
||||||
|
|
||||||
public class CloudStackPrimaryDataStoreLifeCycleImpl implements PrimaryDataStoreLifeCycle {
|
public class CloudStackPrimaryDataStoreLifeCycleImpl extends BasePrimaryDataStoreLifeCycleImpl implements PrimaryDataStoreLifeCycle {
|
||||||
protected Logger logger = LogManager.getLogger(getClass());
|
protected Logger logger = LogManager.getLogger(getClass());
|
||||||
@Inject
|
@Inject
|
||||||
protected ResourceManager _resourceMgr;
|
protected ResourceManager _resourceMgr;
|
||||||
|
|||||||
@ -54,7 +54,7 @@ import org.apache.cloudstack.storage.volume.datastore.PrimaryDataStoreHelper;
|
|||||||
import org.apache.logging.log4j.Logger;
|
import org.apache.logging.log4j.Logger;
|
||||||
import org.apache.logging.log4j.LogManager;
|
import org.apache.logging.log4j.LogManager;
|
||||||
|
|
||||||
public class LinstorPrimaryDataStoreLifeCycleImpl implements PrimaryDataStoreLifeCycle {
|
public class LinstorPrimaryDataStoreLifeCycleImpl extends BasePrimaryDataStoreLifeCycleImpl implements PrimaryDataStoreLifeCycle {
|
||||||
protected Logger logger = LogManager.getLogger(getClass());
|
protected Logger logger = LogManager.getLogger(getClass());
|
||||||
|
|
||||||
@Inject
|
@Inject
|
||||||
|
|||||||
@ -30,6 +30,7 @@ import org.apache.cloudstack.engine.subsystem.api.storage.HostScope;
|
|||||||
import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreLifeCycle;
|
import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreLifeCycle;
|
||||||
import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreParameters;
|
import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreParameters;
|
||||||
import org.apache.cloudstack.engine.subsystem.api.storage.ZoneScope;
|
import org.apache.cloudstack.engine.subsystem.api.storage.ZoneScope;
|
||||||
|
import org.apache.cloudstack.storage.datastore.lifecycle.BasePrimaryDataStoreLifeCycleImpl;
|
||||||
import org.apache.cloudstack.storage.datastore.util.NexentaUtil;
|
import org.apache.cloudstack.storage.datastore.util.NexentaUtil;
|
||||||
import org.apache.cloudstack.storage.volume.datastore.PrimaryDataStoreHelper;
|
import org.apache.cloudstack.storage.volume.datastore.PrimaryDataStoreHelper;
|
||||||
import org.apache.logging.log4j.Logger;
|
import org.apache.logging.log4j.Logger;
|
||||||
@ -46,6 +47,7 @@ import com.cloud.storage.StoragePool;
|
|||||||
import com.cloud.storage.StoragePoolAutomation;
|
import com.cloud.storage.StoragePoolAutomation;
|
||||||
|
|
||||||
public class NexentaPrimaryDataStoreLifeCycle
|
public class NexentaPrimaryDataStoreLifeCycle
|
||||||
|
extends BasePrimaryDataStoreLifeCycleImpl
|
||||||
implements PrimaryDataStoreLifeCycle {
|
implements PrimaryDataStoreLifeCycle {
|
||||||
protected Logger logger = LogManager.getLogger(getClass());
|
protected Logger logger = LogManager.getLogger(getClass());
|
||||||
|
|
||||||
@ -177,6 +179,15 @@ public class NexentaPrimaryDataStoreLifeCycle
|
|||||||
dataStoreHelper.disable(dataStore);
|
dataStoreHelper.disable(dataStore);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void changeStoragePoolScopeToZone(DataStore store, ClusterScope clusterScope, Hypervisor.HypervisorType hypervisorType) {
|
||||||
|
/*
|
||||||
|
* We need to attach all VMware, Xenserver and KVM hosts in the zone.
|
||||||
|
* So pass hypervisorType as null.
|
||||||
|
*/
|
||||||
|
super.changeStoragePoolScopeToZone(store, clusterScope, null);
|
||||||
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public boolean deleteDataStore(DataStore store) {
|
public boolean deleteDataStore(DataStore store) {
|
||||||
return dataStoreHelper.deletePrimaryDataStore(store);
|
return dataStoreHelper.deletePrimaryDataStore(store);
|
||||||
|
|||||||
@ -146,4 +146,12 @@ public class SamplePrimaryDataStoreLifeCycleImpl implements PrimaryDataStoreLife
|
|||||||
@Override
|
@Override
|
||||||
public void disableStoragePool(DataStore store) {
|
public void disableStoragePool(DataStore store) {
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void changeStoragePoolScopeToZone(DataStore store, ClusterScope clusterScope, HypervisorType hypervisorType) {
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void changeStoragePoolScopeToCluster(DataStore store, ClusterScope clusterScope, HypervisorType hypervisorType) {
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@ -79,6 +79,7 @@ public interface ScaleIOGatewayClient {
|
|||||||
VolumeStatistics getVolumeStatistics(String volumeId);
|
VolumeStatistics getVolumeStatistics(String volumeId);
|
||||||
String getSystemId(String protectionDomainId);
|
String getSystemId(String protectionDomainId);
|
||||||
List<Volume> listVolumesInStoragePool(String poolId);
|
List<Volume> listVolumesInStoragePool(String poolId);
|
||||||
|
List<Volume> listVolumesMappedToSdc(String sdcId);
|
||||||
|
|
||||||
// SDC APIs
|
// SDC APIs
|
||||||
List<Sdc> listSdcs();
|
List<Sdc> listSdcs();
|
||||||
@ -86,6 +87,7 @@ public interface ScaleIOGatewayClient {
|
|||||||
String getSdcIdByGuid(String sdcGuid);
|
String getSdcIdByGuid(String sdcGuid);
|
||||||
Sdc getSdcByIp(String ipAddress);
|
Sdc getSdcByIp(String ipAddress);
|
||||||
Sdc getConnectedSdcByIp(String ipAddress);
|
Sdc getConnectedSdcByIp(String ipAddress);
|
||||||
|
int getConnectedSdcsCount();
|
||||||
boolean haveConnectedSdcs();
|
boolean haveConnectedSdcs();
|
||||||
boolean isSdcConnected(String sdcId);
|
boolean isSdcConnected(String sdcId);
|
||||||
boolean isSdcConnectedByIP(String ipAddress);
|
boolean isSdcConnectedByIP(String ipAddress);
|
||||||
|
|||||||
@ -1004,6 +1004,17 @@ public class ScaleIOGatewayClientImpl implements ScaleIOGatewayClient {
|
|||||||
return new ArrayList<>();
|
return new ArrayList<>();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public List<Volume> listVolumesMappedToSdc(String sdcId) {
|
||||||
|
Preconditions.checkArgument(StringUtils.isNotEmpty(sdcId), "SDC id cannot be null");
|
||||||
|
|
||||||
|
Volume[] volumes = get("/instances/Sdc::" + sdcId + "/relationships/Volume", Volume[].class);
|
||||||
|
if (volumes != null) {
|
||||||
|
return Arrays.asList(volumes);
|
||||||
|
}
|
||||||
|
return new ArrayList<>();
|
||||||
|
}
|
||||||
|
|
||||||
///////////////////////////////////////////////
|
///////////////////////////////////////////////
|
||||||
//////////////// SDC APIs /////////////////////
|
//////////////// SDC APIs /////////////////////
|
||||||
///////////////////////////////////////////////
|
///////////////////////////////////////////////
|
||||||
@ -1062,6 +1073,21 @@ public class ScaleIOGatewayClientImpl implements ScaleIOGatewayClient {
|
|||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public int getConnectedSdcsCount() {
|
||||||
|
List<Sdc> sdcs = listSdcs();
|
||||||
|
int connectedSdcsCount = 0;
|
||||||
|
if(sdcs != null) {
|
||||||
|
for (Sdc sdc : sdcs) {
|
||||||
|
if (MDM_CONNECTED_STATE.equalsIgnoreCase(sdc.getMdmConnectionState())) {
|
||||||
|
connectedSdcsCount++;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return connectedSdcsCount;
|
||||||
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public boolean haveConnectedSdcs() {
|
public boolean haveConnectedSdcs() {
|
||||||
List<Sdc> sdcs = listSdcs();
|
List<Sdc> sdcs = listSdcs();
|
||||||
|
|||||||
@ -56,6 +56,8 @@ import org.apache.cloudstack.storage.datastore.db.SnapshotDataStoreVO;
|
|||||||
import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailVO;
|
import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailVO;
|
||||||
import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailsDao;
|
import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailsDao;
|
||||||
import org.apache.cloudstack.storage.datastore.db.StoragePoolVO;
|
import org.apache.cloudstack.storage.datastore.db.StoragePoolVO;
|
||||||
|
import org.apache.cloudstack.storage.datastore.manager.ScaleIOSDCManager;
|
||||||
|
import org.apache.cloudstack.storage.datastore.manager.ScaleIOSDCManagerImpl;
|
||||||
import org.apache.cloudstack.storage.datastore.util.ScaleIOUtil;
|
import org.apache.cloudstack.storage.datastore.util.ScaleIOUtil;
|
||||||
import org.apache.cloudstack.storage.to.SnapshotObjectTO;
|
import org.apache.cloudstack.storage.to.SnapshotObjectTO;
|
||||||
import org.apache.cloudstack.storage.to.VolumeObjectTO;
|
import org.apache.cloudstack.storage.to.VolumeObjectTO;
|
||||||
@ -100,6 +102,7 @@ import com.cloud.storage.dao.VolumeDao;
|
|||||||
import com.cloud.storage.dao.VolumeDetailsDao;
|
import com.cloud.storage.dao.VolumeDetailsDao;
|
||||||
import com.cloud.utils.NumbersUtil;
|
import com.cloud.utils.NumbersUtil;
|
||||||
import com.cloud.utils.Pair;
|
import com.cloud.utils.Pair;
|
||||||
|
import com.cloud.utils.component.ComponentContext;
|
||||||
import com.cloud.utils.exception.CloudRuntimeException;
|
import com.cloud.utils.exception.CloudRuntimeException;
|
||||||
import com.cloud.vm.VMInstanceVO;
|
import com.cloud.vm.VMInstanceVO;
|
||||||
import com.cloud.vm.VirtualMachine;
|
import com.cloud.vm.VirtualMachine;
|
||||||
@ -142,9 +145,10 @@ public class ScaleIOPrimaryDataStoreDriver implements PrimaryDataStoreDriver {
|
|||||||
private VolumeService volumeService;
|
private VolumeService volumeService;
|
||||||
@Inject
|
@Inject
|
||||||
private VolumeOrchestrationService volumeMgr;
|
private VolumeOrchestrationService volumeMgr;
|
||||||
|
private ScaleIOSDCManager sdcManager;
|
||||||
|
|
||||||
public ScaleIOPrimaryDataStoreDriver() {
|
public ScaleIOPrimaryDataStoreDriver() {
|
||||||
|
sdcManager = new ScaleIOSDCManagerImpl();
|
||||||
}
|
}
|
||||||
|
|
||||||
public ScaleIOGatewayClient getScaleIOClient(final Long storagePoolId) throws Exception {
|
public ScaleIOGatewayClient getScaleIOClient(final Long storagePoolId) throws Exception {
|
||||||
@ -152,7 +156,8 @@ public class ScaleIOPrimaryDataStoreDriver implements PrimaryDataStoreDriver {
|
|||||||
}
|
}
|
||||||
|
|
||||||
private boolean setVolumeLimitsOnSDC(VolumeVO volume, Host host, DataStore dataStore, Long iopsLimit, Long bandwidthLimitInKbps) throws Exception {
|
private boolean setVolumeLimitsOnSDC(VolumeVO volume, Host host, DataStore dataStore, Long iopsLimit, Long bandwidthLimitInKbps) throws Exception {
|
||||||
final String sdcId = getConnectedSdc(dataStore.getId(), host.getId());
|
sdcManager = ComponentContext.inject(sdcManager);
|
||||||
|
final String sdcId = sdcManager.prepareSDC(host, dataStore);
|
||||||
if (StringUtils.isBlank(sdcId)) {
|
if (StringUtils.isBlank(sdcId)) {
|
||||||
alertHostSdcDisconnection(host);
|
alertHostSdcDisconnection(host);
|
||||||
throw new CloudRuntimeException("Unable to grant access to volume: " + volume.getId() + ", no Sdc connected with host ip: " + host.getPrivateIpAddress());
|
throw new CloudRuntimeException("Unable to grant access to volume: " + volume.getId() + ", no Sdc connected with host ip: " + host.getPrivateIpAddress());
|
||||||
@ -188,6 +193,13 @@ public class ScaleIOPrimaryDataStoreDriver implements PrimaryDataStoreDriver {
|
|||||||
@Override
|
@Override
|
||||||
public boolean grantAccess(DataObject dataObject, Host host, DataStore dataStore) {
|
public boolean grantAccess(DataObject dataObject, Host host, DataStore dataStore) {
|
||||||
try {
|
try {
|
||||||
|
sdcManager = ComponentContext.inject(sdcManager);
|
||||||
|
final String sdcId = sdcManager.prepareSDC(host, dataStore);
|
||||||
|
if (StringUtils.isBlank(sdcId)) {
|
||||||
|
alertHostSdcDisconnection(host);
|
||||||
|
throw new CloudRuntimeException(String.format("Unable to grant access to %s: %s, no Sdc connected with host ip: %s", dataObject.getType(), dataObject.getId(), host.getPrivateIpAddress()));
|
||||||
|
}
|
||||||
|
|
||||||
if (DataObjectType.VOLUME.equals(dataObject.getType())) {
|
if (DataObjectType.VOLUME.equals(dataObject.getType())) {
|
||||||
final VolumeVO volume = volumeDao.findById(dataObject.getId());
|
final VolumeVO volume = volumeDao.findById(dataObject.getId());
|
||||||
logger.debug("Granting access for PowerFlex volume: " + volume.getPath());
|
logger.debug("Granting access for PowerFlex volume: " + volume.getPath());
|
||||||
@ -195,25 +207,11 @@ public class ScaleIOPrimaryDataStoreDriver implements PrimaryDataStoreDriver {
|
|||||||
} else if (DataObjectType.TEMPLATE.equals(dataObject.getType())) {
|
} else if (DataObjectType.TEMPLATE.equals(dataObject.getType())) {
|
||||||
final VMTemplateStoragePoolVO templatePoolRef = vmTemplatePoolDao.findByPoolTemplate(dataStore.getId(), dataObject.getId(), null);
|
final VMTemplateStoragePoolVO templatePoolRef = vmTemplatePoolDao.findByPoolTemplate(dataStore.getId(), dataObject.getId(), null);
|
||||||
logger.debug("Granting access for PowerFlex template volume: " + templatePoolRef.getInstallPath());
|
logger.debug("Granting access for PowerFlex template volume: " + templatePoolRef.getInstallPath());
|
||||||
|
|
||||||
final String sdcId = getConnectedSdc(dataStore.getId(), host.getId());
|
|
||||||
if (StringUtils.isBlank(sdcId)) {
|
|
||||||
alertHostSdcDisconnection(host);
|
|
||||||
throw new CloudRuntimeException("Unable to grant access to template: " + dataObject.getId() + ", no Sdc connected with host ip: " + host.getPrivateIpAddress());
|
|
||||||
}
|
|
||||||
|
|
||||||
final ScaleIOGatewayClient client = getScaleIOClient(dataStore.getId());
|
final ScaleIOGatewayClient client = getScaleIOClient(dataStore.getId());
|
||||||
return client.mapVolumeToSdc(ScaleIOUtil.getVolumePath(templatePoolRef.getInstallPath()), sdcId);
|
return client.mapVolumeToSdc(ScaleIOUtil.getVolumePath(templatePoolRef.getInstallPath()), sdcId);
|
||||||
} else if (DataObjectType.SNAPSHOT.equals(dataObject.getType())) {
|
} else if (DataObjectType.SNAPSHOT.equals(dataObject.getType())) {
|
||||||
SnapshotInfo snapshot = (SnapshotInfo) dataObject;
|
SnapshotInfo snapshot = (SnapshotInfo) dataObject;
|
||||||
logger.debug("Granting access for PowerFlex volume snapshot: " + snapshot.getPath());
|
logger.debug("Granting access for PowerFlex volume snapshot: " + snapshot.getPath());
|
||||||
|
|
||||||
final String sdcId = getConnectedSdc(dataStore.getId(), host.getId());
|
|
||||||
if (StringUtils.isBlank(sdcId)) {
|
|
||||||
alertHostSdcDisconnection(host);
|
|
||||||
throw new CloudRuntimeException("Unable to grant access to snapshot: " + dataObject.getId() + ", no Sdc connected with host ip: " + host.getPrivateIpAddress());
|
|
||||||
}
|
|
||||||
|
|
||||||
final ScaleIOGatewayClient client = getScaleIOClient(dataStore.getId());
|
final ScaleIOGatewayClient client = getScaleIOClient(dataStore.getId());
|
||||||
return client.mapVolumeToSdc(ScaleIOUtil.getVolumePath(snapshot.getPath()), sdcId);
|
return client.mapVolumeToSdc(ScaleIOUtil.getVolumePath(snapshot.getPath()), sdcId);
|
||||||
}
|
}
|
||||||
@ -237,40 +235,29 @@ public class ScaleIOPrimaryDataStoreDriver implements PrimaryDataStoreDriver {
|
|||||||
}
|
}
|
||||||
|
|
||||||
try {
|
try {
|
||||||
|
final String sdcId = getConnectedSdc(dataStore.getId(), host.getId());
|
||||||
|
if (StringUtils.isBlank(sdcId)) {
|
||||||
|
logger.warn(String.format("Unable to revoke access for %s: %s, no Sdc connected with host ip: %s", dataObject.getType(), dataObject.getId(), host.getPrivateIpAddress()));
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
final ScaleIOGatewayClient client = getScaleIOClient(dataStore.getId());
|
||||||
if (DataObjectType.VOLUME.equals(dataObject.getType())) {
|
if (DataObjectType.VOLUME.equals(dataObject.getType())) {
|
||||||
final VolumeVO volume = volumeDao.findById(dataObject.getId());
|
final VolumeVO volume = volumeDao.findById(dataObject.getId());
|
||||||
logger.debug("Revoking access for PowerFlex volume: " + volume.getPath());
|
logger.debug("Revoking access for PowerFlex volume: " + volume.getPath());
|
||||||
|
|
||||||
final String sdcId = getConnectedSdc(dataStore.getId(), host.getId());
|
|
||||||
if (StringUtils.isBlank(sdcId)) {
|
|
||||||
throw new CloudRuntimeException("Unable to revoke access for volume: " + dataObject.getId() + ", no Sdc connected with host ip: " + host.getPrivateIpAddress());
|
|
||||||
}
|
|
||||||
|
|
||||||
final ScaleIOGatewayClient client = getScaleIOClient(dataStore.getId());
|
|
||||||
client.unmapVolumeFromSdc(ScaleIOUtil.getVolumePath(volume.getPath()), sdcId);
|
client.unmapVolumeFromSdc(ScaleIOUtil.getVolumePath(volume.getPath()), sdcId);
|
||||||
} else if (DataObjectType.TEMPLATE.equals(dataObject.getType())) {
|
} else if (DataObjectType.TEMPLATE.equals(dataObject.getType())) {
|
||||||
final VMTemplateStoragePoolVO templatePoolRef = vmTemplatePoolDao.findByPoolTemplate(dataStore.getId(), dataObject.getId(), null);
|
final VMTemplateStoragePoolVO templatePoolRef = vmTemplatePoolDao.findByPoolTemplate(dataStore.getId(), dataObject.getId(), null);
|
||||||
logger.debug("Revoking access for PowerFlex template volume: " + templatePoolRef.getInstallPath());
|
logger.debug("Revoking access for PowerFlex template volume: " + templatePoolRef.getInstallPath());
|
||||||
|
|
||||||
final String sdcId = getConnectedSdc(dataStore.getId(), host.getId());
|
|
||||||
if (StringUtils.isBlank(sdcId)) {
|
|
||||||
throw new CloudRuntimeException("Unable to revoke access for template: " + dataObject.getId() + ", no Sdc connected with host ip: " + host.getPrivateIpAddress());
|
|
||||||
}
|
|
||||||
|
|
||||||
final ScaleIOGatewayClient client = getScaleIOClient(dataStore.getId());
|
|
||||||
client.unmapVolumeFromSdc(ScaleIOUtil.getVolumePath(templatePoolRef.getInstallPath()), sdcId);
|
client.unmapVolumeFromSdc(ScaleIOUtil.getVolumePath(templatePoolRef.getInstallPath()), sdcId);
|
||||||
} else if (DataObjectType.SNAPSHOT.equals(dataObject.getType())) {
|
} else if (DataObjectType.SNAPSHOT.equals(dataObject.getType())) {
|
||||||
SnapshotInfo snapshot = (SnapshotInfo) dataObject;
|
SnapshotInfo snapshot = (SnapshotInfo) dataObject;
|
||||||
logger.debug("Revoking access for PowerFlex volume snapshot: " + snapshot.getPath());
|
logger.debug("Revoking access for PowerFlex volume snapshot: " + snapshot.getPath());
|
||||||
|
|
||||||
final String sdcId = getConnectedSdc(dataStore.getId(), host.getId());
|
|
||||||
if (StringUtils.isBlank(sdcId)) {
|
|
||||||
throw new CloudRuntimeException("Unable to revoke access for snapshot: " + dataObject.getId() + ", no Sdc connected with host ip: " + host.getPrivateIpAddress());
|
|
||||||
}
|
|
||||||
|
|
||||||
final ScaleIOGatewayClient client = getScaleIOClient(dataStore.getId());
|
|
||||||
client.unmapVolumeFromSdc(ScaleIOUtil.getVolumePath(snapshot.getPath()), sdcId);
|
client.unmapVolumeFromSdc(ScaleIOUtil.getVolumePath(snapshot.getPath()), sdcId);
|
||||||
}
|
}
|
||||||
|
if (client.listVolumesMappedToSdc(sdcId).isEmpty()) {
|
||||||
|
sdcManager = ComponentContext.inject(sdcManager);
|
||||||
|
sdcManager.stopSDC(host, dataStore);
|
||||||
|
}
|
||||||
} catch (Exception e) {
|
} catch (Exception e) {
|
||||||
logger.warn("Failed to revoke access due to: " + e.getMessage(), e);
|
logger.warn("Failed to revoke access due to: " + e.getMessage(), e);
|
||||||
}
|
}
|
||||||
@ -287,11 +274,16 @@ public class ScaleIOPrimaryDataStoreDriver implements PrimaryDataStoreDriver {
|
|||||||
|
|
||||||
final String sdcId = getConnectedSdc(dataStore.getId(), host.getId());
|
final String sdcId = getConnectedSdc(dataStore.getId(), host.getId());
|
||||||
if (StringUtils.isBlank(sdcId)) {
|
if (StringUtils.isBlank(sdcId)) {
|
||||||
throw new CloudRuntimeException("Unable to revoke access for volume: " + volumePath + ", no Sdc connected with host ip: " + host.getPrivateIpAddress());
|
logger.warn(String.format("Unable to revoke access for volume: %s, no Sdc connected with host ip: %s", volumePath, host.getPrivateIpAddress()));
|
||||||
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
final ScaleIOGatewayClient client = getScaleIOClient(dataStore.getId());
|
final ScaleIOGatewayClient client = getScaleIOClient(dataStore.getId());
|
||||||
client.unmapVolumeFromSdc(ScaleIOUtil.getVolumePath(volumePath), sdcId);
|
client.unmapVolumeFromSdc(ScaleIOUtil.getVolumePath(volumePath), sdcId);
|
||||||
|
if (client.listVolumesMappedToSdc(sdcId).isEmpty()) {
|
||||||
|
sdcManager = ComponentContext.inject(sdcManager);
|
||||||
|
sdcManager.stopSDC(host, dataStore);
|
||||||
|
}
|
||||||
} catch (Exception e) {
|
} catch (Exception e) {
|
||||||
logger.warn("Failed to revoke access due to: " + e.getMessage(), e);
|
logger.warn("Failed to revoke access due to: " + e.getMessage(), e);
|
||||||
}
|
}
|
||||||
@ -1374,6 +1366,28 @@ public class ScaleIOPrimaryDataStoreDriver implements PrimaryDataStoreDriver {
|
|||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean poolProvidesCustomStorageStats() {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public Map<String, String> getCustomStorageStats(StoragePool pool) {
|
||||||
|
Preconditions.checkArgument(pool != null, "pool cannot be null");
|
||||||
|
Map<String, String> customStats = new HashMap<>();
|
||||||
|
|
||||||
|
try {
|
||||||
|
final ScaleIOGatewayClient client = getScaleIOClient(pool.getId());
|
||||||
|
int connectedSdcsCount = client.getConnectedSdcsCount();
|
||||||
|
customStats.put(ScaleIOUtil.CONNECTED_SDC_COUNT_STAT, String.valueOf(connectedSdcsCount));
|
||||||
|
} catch (Exception e) {
|
||||||
|
String errMsg = "Unable to get custom storage stats for the pool: " + pool.getId() + " due to " + e.getMessage();
|
||||||
|
logger.error(errMsg);
|
||||||
|
}
|
||||||
|
|
||||||
|
return customStats;
|
||||||
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public Pair<Long, Long> getStorageStats(StoragePool storagePool) {
|
public Pair<Long, Long> getStorageStats(StoragePool storagePool) {
|
||||||
Preconditions.checkArgument(storagePool != null, "storagePool cannot be null");
|
Preconditions.checkArgument(storagePool != null, "storagePool cannot be null");
|
||||||
@ -1441,6 +1455,16 @@ public class ScaleIOPrimaryDataStoreDriver implements PrimaryDataStoreDriver {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean canHostPrepareStoragePoolAccess(Host host, StoragePool pool) {
|
||||||
|
if (host == null || pool == null) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
sdcManager = ComponentContext.inject(sdcManager);
|
||||||
|
return sdcManager.areSDCConnectionsWithinLimit(pool.getId());
|
||||||
|
}
|
||||||
|
|
||||||
private void alertHostSdcDisconnection(Host host) {
|
private void alertHostSdcDisconnection(Host host) {
|
||||||
if (host == null) {
|
if (host == null) {
|
||||||
return;
|
return;
|
||||||
|
|||||||
@ -75,7 +75,7 @@ import com.cloud.utils.UriUtils;
|
|||||||
import com.cloud.utils.crypt.DBEncryptionUtil;
|
import com.cloud.utils.crypt.DBEncryptionUtil;
|
||||||
import com.cloud.utils.exception.CloudRuntimeException;
|
import com.cloud.utils.exception.CloudRuntimeException;
|
||||||
|
|
||||||
public class ScaleIOPrimaryDataStoreLifeCycle implements PrimaryDataStoreLifeCycle {
|
public class ScaleIOPrimaryDataStoreLifeCycle extends BasePrimaryDataStoreLifeCycleImpl implements PrimaryDataStoreLifeCycle {
|
||||||
protected Logger logger = LogManager.getLogger(getClass());
|
protected Logger logger = LogManager.getLogger(getClass());
|
||||||
|
|
||||||
@Inject
|
@Inject
|
||||||
@ -261,8 +261,6 @@ public class ScaleIOPrimaryDataStoreLifeCycle implements PrimaryDataStoreLifeCyc
|
|||||||
throw new CloudRuntimeException("Unsupported hypervisor type: " + cluster.getHypervisorType().toString());
|
throw new CloudRuntimeException("Unsupported hypervisor type: " + cluster.getHypervisorType().toString());
|
||||||
}
|
}
|
||||||
|
|
||||||
checkConnectedSdcs(dataStore.getId());
|
|
||||||
|
|
||||||
PrimaryDataStoreInfo primaryDataStoreInfo = (PrimaryDataStoreInfo) dataStore;
|
PrimaryDataStoreInfo primaryDataStoreInfo = (PrimaryDataStoreInfo) dataStore;
|
||||||
List<HostVO> hostsInCluster = resourceManager.listAllUpAndEnabledHosts(Host.Type.Routing, primaryDataStoreInfo.getClusterId(),
|
List<HostVO> hostsInCluster = resourceManager.listAllUpAndEnabledHosts(Host.Type.Routing, primaryDataStoreInfo.getClusterId(),
|
||||||
primaryDataStoreInfo.getPodId(), primaryDataStoreInfo.getDataCenterId());
|
primaryDataStoreInfo.getPodId(), primaryDataStoreInfo.getDataCenterId());
|
||||||
@ -279,14 +277,12 @@ public class ScaleIOPrimaryDataStoreLifeCycle implements PrimaryDataStoreLifeCyc
|
|||||||
poolHosts.add(host);
|
poolHosts.add(host);
|
||||||
}
|
}
|
||||||
} catch (Exception e) {
|
} catch (Exception e) {
|
||||||
logger.warn("Unable to establish a connection between " + host + " and " + primaryDataStoreInfo, e);
|
logger.warn("Unable to establish a connection between host: " + host + " and pool: " + dataStore + "on the cluster: " + primaryDataStoreInfo.getClusterId(), e);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (poolHosts.isEmpty()) {
|
if (poolHosts.isEmpty()) {
|
||||||
logger.warn("No host can access storage pool '" + primaryDataStoreInfo + "' on cluster '" + primaryDataStoreInfo.getClusterId() + "'.");
|
logger.warn("No host can access storage pool '" + primaryDataStoreInfo + "' on cluster '" + primaryDataStoreInfo.getClusterId() + "'.");
|
||||||
primaryDataStoreDao.expunge(primaryDataStoreInfo.getId());
|
|
||||||
throw new CloudRuntimeException("Failed to create storage pool in the cluster: " + primaryDataStoreInfo.getClusterId() + " as it is not accessible to hosts");
|
|
||||||
}
|
}
|
||||||
|
|
||||||
dataStoreHelper.attachCluster(dataStore);
|
dataStoreHelper.attachCluster(dataStore);
|
||||||
@ -304,8 +300,6 @@ public class ScaleIOPrimaryDataStoreLifeCycle implements PrimaryDataStoreLifeCyc
|
|||||||
throw new CloudRuntimeException("Unsupported hypervisor type: " + hypervisorType.toString());
|
throw new CloudRuntimeException("Unsupported hypervisor type: " + hypervisorType.toString());
|
||||||
}
|
}
|
||||||
|
|
||||||
checkConnectedSdcs(dataStore.getId());
|
|
||||||
|
|
||||||
logger.debug("Attaching the pool to each of the hosts in the zone: " + scope.getScopeId());
|
logger.debug("Attaching the pool to each of the hosts in the zone: " + scope.getScopeId());
|
||||||
List<HostVO> hosts = resourceManager.listAllUpAndEnabledHostsInOneZoneByHypervisor(hypervisorType, scope.getScopeId());
|
List<HostVO> hosts = resourceManager.listAllUpAndEnabledHostsInOneZoneByHypervisor(hypervisorType, scope.getScopeId());
|
||||||
List<HostVO> poolHosts = new ArrayList<HostVO>();
|
List<HostVO> poolHosts = new ArrayList<HostVO>();
|
||||||
@ -315,35 +309,17 @@ public class ScaleIOPrimaryDataStoreLifeCycle implements PrimaryDataStoreLifeCyc
|
|||||||
poolHosts.add(host);
|
poolHosts.add(host);
|
||||||
}
|
}
|
||||||
} catch (Exception e) {
|
} catch (Exception e) {
|
||||||
logger.warn("Unable to establish a connection between " + host + " and " + dataStore, e);
|
logger.warn("Unable to establish a connection between host: " + host + " and pool: " + dataStore + "in the zone: " + scope.getScopeId(), e);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (poolHosts.isEmpty()) {
|
if (poolHosts.isEmpty()) {
|
||||||
logger.warn("No host can access storage pool " + dataStore + " in this zone.");
|
logger.warn("No host can access storage pool " + dataStore + " in the zone: " + scope.getScopeId());
|
||||||
primaryDataStoreDao.expunge(dataStore.getId());
|
|
||||||
throw new CloudRuntimeException("Failed to create storage pool as it is not accessible to hosts.");
|
|
||||||
}
|
}
|
||||||
|
|
||||||
dataStoreHelper.attachZone(dataStore);
|
dataStoreHelper.attachZone(dataStore);
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
private void checkConnectedSdcs(Long dataStoreId) {
|
|
||||||
boolean haveConnectedSdcs = false;
|
|
||||||
try {
|
|
||||||
ScaleIOGatewayClient client = ScaleIOGatewayClientConnectionPool.getInstance().getClient(dataStoreId, storagePoolDetailsDao);
|
|
||||||
haveConnectedSdcs = client.haveConnectedSdcs();
|
|
||||||
} catch (NoSuchAlgorithmException | KeyManagementException | URISyntaxException e) {
|
|
||||||
logger.error(String.format("Failed to create storage pool for datastore: %s", dataStoreId), e);
|
|
||||||
throw new CloudRuntimeException(String.format("Failed to establish connection with PowerFlex Gateway to create storage pool for datastore: %s", dataStoreId));
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!haveConnectedSdcs) {
|
|
||||||
logger.debug(String.format("No connected SDCs found for the PowerFlex storage pool of datastore: %s", dataStoreId));
|
|
||||||
throw new CloudRuntimeException(String.format("Failed to create storage pool as connected SDCs not found for datastore: %s", dataStoreId));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public boolean maintain(DataStore store) {
|
public boolean maintain(DataStore store) {
|
||||||
storagePoolAutomation.maintain(store);
|
storagePoolAutomation.maintain(store);
|
||||||
|
|||||||
@ -0,0 +1,47 @@
|
|||||||
|
// Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
// or more contributor license agreements. See the NOTICE file
|
||||||
|
// distributed with this work for additional information
|
||||||
|
// regarding copyright ownership. The ASF licenses this file
|
||||||
|
// to you under the Apache License, Version 2.0 (the
|
||||||
|
// "License"); you may not use this file except in compliance
|
||||||
|
// with the License. You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing,
|
||||||
|
// software distributed under the License is distributed on an
|
||||||
|
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||||
|
// KIND, either express or implied. See the License for the
|
||||||
|
// specific language governing permissions and limitations
|
||||||
|
// under the License.
|
||||||
|
|
||||||
|
package org.apache.cloudstack.storage.datastore.manager;
|
||||||
|
|
||||||
|
import org.apache.cloudstack.engine.subsystem.api.storage.DataStore;
|
||||||
|
|
||||||
|
import com.cloud.host.Host;
|
||||||
|
|
||||||
|
public interface ScaleIOSDCManager {
|
||||||
|
/**
|
||||||
|
* Checks SDC connections limit.
|
||||||
|
* @param storagePoolId the storage pool id
|
||||||
|
* @return true if SDC connections are within limit
|
||||||
|
*/
|
||||||
|
boolean areSDCConnectionsWithinLimit(Long storagePoolId);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Prepares/starts the SDC on the host.
|
||||||
|
* @param host the host
|
||||||
|
* @param dataStore the datastore
|
||||||
|
* @return SDC Id of the host
|
||||||
|
*/
|
||||||
|
String prepareSDC(Host host, DataStore dataStore);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Stops the SDC on the host.
|
||||||
|
* @param host the host
|
||||||
|
* @param dataStore the datastore
|
||||||
|
* @return true if SDC stopped on the host
|
||||||
|
*/
|
||||||
|
boolean stopSDC(Host host, DataStore dataStore);
|
||||||
|
}
|
||||||
@ -0,0 +1,346 @@
|
|||||||
|
// Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
// or more contributor license agreements. See the NOTICE file
|
||||||
|
// distributed with this work for additional information
|
||||||
|
// regarding copyright ownership. The ASF licenses this file
|
||||||
|
// to you under the Apache License, Version 2.0 (the
|
||||||
|
// "License"); you may not use this file except in compliance
|
||||||
|
// with the License. You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing,
|
||||||
|
// software distributed under the License is distributed on an
|
||||||
|
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||||
|
// KIND, either express or implied. See the License for the
|
||||||
|
// specific language governing permissions and limitations
|
||||||
|
// under the License.
|
||||||
|
|
||||||
|
package org.apache.cloudstack.storage.datastore.manager;
|
||||||
|
|
||||||
|
import java.util.HashMap;
|
||||||
|
import java.util.Map;
|
||||||
|
|
||||||
|
import javax.inject.Inject;
|
||||||
|
|
||||||
|
import org.apache.cloudstack.engine.subsystem.api.storage.DataStore;
|
||||||
|
import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStore;
|
||||||
|
import org.apache.cloudstack.framework.config.dao.ConfigurationDao;
|
||||||
|
import org.apache.cloudstack.storage.datastore.client.ScaleIOGatewayClient;
|
||||||
|
import org.apache.cloudstack.storage.datastore.client.ScaleIOGatewayClientConnectionPool;
|
||||||
|
import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailsDao;
|
||||||
|
import org.apache.commons.collections.MapUtils;
|
||||||
|
import org.apache.commons.lang3.StringUtils;
|
||||||
|
import org.apache.log4j.Logger;
|
||||||
|
import org.springframework.stereotype.Component;
|
||||||
|
|
||||||
|
import com.cloud.agent.AgentManager;
|
||||||
|
import com.cloud.agent.api.Answer;
|
||||||
|
import com.cloud.agent.api.PrepareStorageClientAnswer;
|
||||||
|
import com.cloud.agent.api.PrepareStorageClientCommand;
|
||||||
|
import com.cloud.agent.api.UnprepareStorageClientCommand;
|
||||||
|
import com.cloud.configuration.Config;
|
||||||
|
import com.cloud.exception.AgentUnavailableException;
|
||||||
|
import com.cloud.exception.OperationTimedoutException;
|
||||||
|
import com.cloud.host.Host;
|
||||||
|
import com.cloud.storage.StorageManager;
|
||||||
|
import com.cloud.storage.StoragePoolHostVO;
|
||||||
|
import com.cloud.storage.dao.StoragePoolHostDao;
|
||||||
|
import com.cloud.utils.NumbersUtil;
|
||||||
|
import com.cloud.utils.db.GlobalLock;
|
||||||
|
import com.cloud.utils.exception.CloudRuntimeException;
|
||||||
|
|
||||||
|
@Component
|
||||||
|
public class ScaleIOSDCManagerImpl implements ScaleIOSDCManager {
|
||||||
|
private static final Logger LOGGER = Logger.getLogger(ScaleIOSDCManagerImpl.class);
|
||||||
|
|
||||||
|
@Inject
|
||||||
|
AgentManager agentManager;
|
||||||
|
@Inject
|
||||||
|
StoragePoolHostDao storagePoolHostDao;
|
||||||
|
@Inject
|
||||||
|
StoragePoolDetailsDao storagePoolDetailsDao;
|
||||||
|
@Inject
|
||||||
|
ConfigurationDao configDao;
|
||||||
|
|
||||||
|
private static final String POWERFLEX_SDC_HOSTID_SYSTEMID_LOCK_FORMAT = "PowerFlexSDC-HostId:%s-SystemId:%s";
|
||||||
|
private static final String POWERFLEX_SDC_SYSTEMID_LOCK_FORMAT = "PowerFlexSDC-SystemId:%s";
|
||||||
|
|
||||||
|
public ScaleIOSDCManagerImpl() {
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean areSDCConnectionsWithinLimit(Long storagePoolId) {
|
||||||
|
try {
|
||||||
|
int connectedClientsLimit = StorageManager.STORAGE_POOL_CONNECTED_CLIENTS_LIMIT.valueIn(storagePoolId);
|
||||||
|
if (connectedClientsLimit <= 0) {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
int connectedSdcsCount = getScaleIOClient(storagePoolId).getConnectedSdcsCount();
|
||||||
|
if (connectedSdcsCount < connectedClientsLimit) {
|
||||||
|
LOGGER.debug(String.format("Current connected SDCs count: %d - SDC connections are within the limit (%d) on PowerFlex Storage with pool id: %d", connectedSdcsCount, connectedClientsLimit, storagePoolId));
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
LOGGER.debug(String.format("Current connected SDCs count: %d - SDC connections limit (%d) reached on PowerFlex Storage with pool id: %d", connectedSdcsCount, connectedClientsLimit, storagePoolId));
|
||||||
|
return false;
|
||||||
|
} catch (Exception e) {
|
||||||
|
String errMsg = "Unable to check SDC connections for the PowerFlex storage pool with id: " + storagePoolId + " due to " + e.getMessage();
|
||||||
|
LOGGER.warn(errMsg, e);
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String prepareSDC(Host host, DataStore dataStore) {
|
||||||
|
String systemId = storagePoolDetailsDao.findDetail(dataStore.getId(), ScaleIOGatewayClient.STORAGE_POOL_SYSTEM_ID).getValue();
|
||||||
|
if (systemId == null) {
|
||||||
|
throw new CloudRuntimeException("Unable to prepare SDC, failed to get the system id for PowerFlex storage pool: " + dataStore.getName());
|
||||||
|
}
|
||||||
|
|
||||||
|
GlobalLock hostIdStorageSystemIdLock = null;
|
||||||
|
GlobalLock storageSystemIdLock = null;
|
||||||
|
try {
|
||||||
|
String hostIdStorageSystemIdLockString = String.format(POWERFLEX_SDC_HOSTID_SYSTEMID_LOCK_FORMAT, host.getId(), systemId);
|
||||||
|
hostIdStorageSystemIdLock = GlobalLock.getInternLock(hostIdStorageSystemIdLockString);
|
||||||
|
if (hostIdStorageSystemIdLock == null) {
|
||||||
|
throw new CloudRuntimeException("Unable to prepare SDC, couldn't get global lock on " + hostIdStorageSystemIdLockString);
|
||||||
|
}
|
||||||
|
|
||||||
|
int storagePoolMaxWaitSeconds = NumbersUtil.parseInt(configDao.getValue(Config.StoragePoolMaxWaitSeconds.key()), 3600);
|
||||||
|
if (!hostIdStorageSystemIdLock.lock(storagePoolMaxWaitSeconds)) {
|
||||||
|
LOGGER.debug("Unable to prepare SDC, couldn't lock on " + hostIdStorageSystemIdLockString);
|
||||||
|
throw new CloudRuntimeException("Unable to prepare SDC, couldn't lock on " + hostIdStorageSystemIdLockString);
|
||||||
|
}
|
||||||
|
|
||||||
|
long poolId = dataStore.getId();
|
||||||
|
long hostId = host.getId();
|
||||||
|
String sdcId = getConnectedSdc(poolId, hostId);
|
||||||
|
if (StringUtils.isNotBlank(sdcId)) {
|
||||||
|
LOGGER.debug(String.format("SDC %s already connected for the pool: %d on host: %d, no need to prepare/start it", sdcId, poolId, hostId));
|
||||||
|
return sdcId;
|
||||||
|
}
|
||||||
|
|
||||||
|
String storageSystemIdLockString = String.format(POWERFLEX_SDC_SYSTEMID_LOCK_FORMAT, systemId);
|
||||||
|
storageSystemIdLock = GlobalLock.getInternLock(storageSystemIdLockString);
|
||||||
|
if (storageSystemIdLock == null) {
|
||||||
|
LOGGER.error("Unable to prepare SDC, couldn't get global lock on: " + storageSystemIdLockString);
|
||||||
|
throw new CloudRuntimeException("Unable to prepare SDC, couldn't get global lock on " + storageSystemIdLockString);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!storageSystemIdLock.lock(storagePoolMaxWaitSeconds)) {
|
||||||
|
LOGGER.error("Unable to prepare SDC, couldn't lock on " + storageSystemIdLockString);
|
||||||
|
throw new CloudRuntimeException("Unable to prepare SDC, couldn't lock on " + storageSystemIdLockString);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!areSDCConnectionsWithinLimit(poolId)) {
|
||||||
|
String errorMsg = String.format("Unable to check SDC connections or the connections limit reached for Powerflex storage (System ID: %s)", systemId);
|
||||||
|
LOGGER.error(errorMsg);
|
||||||
|
throw new CloudRuntimeException(errorMsg);
|
||||||
|
}
|
||||||
|
|
||||||
|
sdcId = prepareSDCOnHost(host, dataStore, systemId);
|
||||||
|
StoragePoolHostVO storagePoolHost = storagePoolHostDao.findByPoolHost(poolId, hostId);
|
||||||
|
|
||||||
|
if (StringUtils.isBlank(sdcId)) {
|
||||||
|
if (storagePoolHost != null) {
|
||||||
|
storagePoolHostDao.deleteStoragePoolHostDetails(hostId, poolId);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if (storagePoolHost == null) {
|
||||||
|
storagePoolHost = new StoragePoolHostVO(poolId, hostId, sdcId);
|
||||||
|
storagePoolHostDao.persist(storagePoolHost);
|
||||||
|
} else {
|
||||||
|
storagePoolHost.setLocalPath(sdcId);
|
||||||
|
storagePoolHostDao.update(storagePoolHost.getId(), storagePoolHost);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
int waitTimeInSecs = 15; // Wait for 15 secs (usual tests with SDC service start took 10-15 secs)
|
||||||
|
if (hostSdcConnected(sdcId, poolId, waitTimeInSecs)) {
|
||||||
|
return sdcId;
|
||||||
|
}
|
||||||
|
return null;
|
||||||
|
} finally {
|
||||||
|
if (storageSystemIdLock != null) {
|
||||||
|
storageSystemIdLock.unlock();
|
||||||
|
storageSystemIdLock.releaseRef();
|
||||||
|
}
|
||||||
|
if (hostIdStorageSystemIdLock != null) {
|
||||||
|
hostIdStorageSystemIdLock.unlock();
|
||||||
|
hostIdStorageSystemIdLock.releaseRef();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private String prepareSDCOnHost(Host host, DataStore dataStore, String systemId) {
|
||||||
|
LOGGER.debug(String.format("Preparing SDC on the host %s (%s)", host.getId(), host.getName()));
|
||||||
|
Map<String,String> details = new HashMap<>();
|
||||||
|
details.put(ScaleIOGatewayClient.STORAGE_POOL_SYSTEM_ID, systemId);
|
||||||
|
PrepareStorageClientCommand cmd = new PrepareStorageClientCommand(((PrimaryDataStore) dataStore).getPoolType(), dataStore.getUuid(), details);
|
||||||
|
int timeoutSeconds = 60;
|
||||||
|
cmd.setWait(timeoutSeconds);
|
||||||
|
|
||||||
|
PrepareStorageClientAnswer prepareStorageClientAnswer;
|
||||||
|
try {
|
||||||
|
prepareStorageClientAnswer = (PrepareStorageClientAnswer) agentManager.send(host.getId(), cmd);
|
||||||
|
} catch (AgentUnavailableException | OperationTimedoutException e) {
|
||||||
|
String err = String.format("Failed to prepare SDC on the host %s, due to: %s", host.getName(), e.getMessage());
|
||||||
|
LOGGER.error(err);
|
||||||
|
throw new CloudRuntimeException(err);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (prepareStorageClientAnswer == null) {
|
||||||
|
String err = String.format("Unable to prepare SDC on the host %s", host.getName());
|
||||||
|
LOGGER.error(err);
|
||||||
|
throw new CloudRuntimeException(err);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!prepareStorageClientAnswer.getResult()) {
|
||||||
|
String err = String.format("Unable to prepare SDC on the host %s, due to: %s", host.getName(), prepareStorageClientAnswer.getDetails());
|
||||||
|
LOGGER.error(err);
|
||||||
|
throw new CloudRuntimeException(err);
|
||||||
|
}
|
||||||
|
|
||||||
|
Map<String,String> poolDetails = prepareStorageClientAnswer.getDetailsMap();
|
||||||
|
if (MapUtils.isEmpty(poolDetails)) {
|
||||||
|
LOGGER.warn(String.format("PowerFlex storage SDC details not found on the host: %s, try (re)install SDC and restart agent", host.getId()));
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
String sdcId = null;
|
||||||
|
if (poolDetails.containsKey(ScaleIOGatewayClient.SDC_ID)) {
|
||||||
|
sdcId = poolDetails.get(ScaleIOGatewayClient.SDC_ID);
|
||||||
|
} else if (poolDetails.containsKey(ScaleIOGatewayClient.SDC_GUID)) {
|
||||||
|
String sdcGuid = poolDetails.get(ScaleIOGatewayClient.SDC_GUID);
|
||||||
|
sdcId = getHostSdcId(sdcGuid, dataStore.getId());
|
||||||
|
}
|
||||||
|
|
||||||
|
if (StringUtils.isBlank(sdcId)) {
|
||||||
|
LOGGER.warn(String.format("Couldn't retrieve PowerFlex storage SDC details from the host: %s, try (re)install SDC and restart agent", host.getId()));
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
return sdcId;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean stopSDC(Host host, DataStore dataStore) {
|
||||||
|
String systemId = storagePoolDetailsDao.findDetail(dataStore.getId(), ScaleIOGatewayClient.STORAGE_POOL_SYSTEM_ID).getValue();
|
||||||
|
if (systemId == null) {
|
||||||
|
throw new CloudRuntimeException("Unable to unprepare SDC, failed to get the system id for PowerFlex storage pool: " + dataStore.getName());
|
||||||
|
}
|
||||||
|
|
||||||
|
GlobalLock lock = null;
|
||||||
|
try {
|
||||||
|
String hostIdStorageSystemIdLockString = String.format(POWERFLEX_SDC_HOSTID_SYSTEMID_LOCK_FORMAT, host.getId(), systemId);
|
||||||
|
lock = GlobalLock.getInternLock(hostIdStorageSystemIdLockString);
|
||||||
|
if (lock == null) {
|
||||||
|
throw new CloudRuntimeException("Unable to unprepare SDC, couldn't get global lock on " + hostIdStorageSystemIdLockString);
|
||||||
|
}
|
||||||
|
|
||||||
|
int storagePoolMaxWaitSeconds = NumbersUtil.parseInt(configDao.getValue(Config.StoragePoolMaxWaitSeconds.key()), 3600);
|
||||||
|
if (!lock.lock(storagePoolMaxWaitSeconds)) {
|
||||||
|
LOGGER.debug("Unable to unprepare SDC, couldn't lock on " + hostIdStorageSystemIdLockString);
|
||||||
|
throw new CloudRuntimeException("Unable to unprepare SDC, couldn't lock on " + hostIdStorageSystemIdLockString);
|
||||||
|
}
|
||||||
|
|
||||||
|
long poolId = dataStore.getId();
|
||||||
|
long hostId = host.getId();
|
||||||
|
String sdcId = getConnectedSdc(poolId, hostId);
|
||||||
|
if (StringUtils.isBlank(sdcId)) {
|
||||||
|
LOGGER.debug("SDC not connected, no need to unprepare it");
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
return unprepareSDCOnHost(host, dataStore);
|
||||||
|
} finally {
|
||||||
|
if (lock != null) {
|
||||||
|
lock.unlock();
|
||||||
|
lock.releaseRef();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private boolean unprepareSDCOnHost(Host host, DataStore dataStore) {
|
||||||
|
LOGGER.debug(String.format("Unpreparing SDC on the host %s (%s)", host.getId(), host.getName()));
|
||||||
|
UnprepareStorageClientCommand cmd = new UnprepareStorageClientCommand(((PrimaryDataStore) dataStore).getPoolType(), dataStore.getUuid());
|
||||||
|
int timeoutSeconds = 60;
|
||||||
|
cmd.setWait(timeoutSeconds);
|
||||||
|
|
||||||
|
Answer unprepareStorageClientAnswer;
|
||||||
|
try {
|
||||||
|
unprepareStorageClientAnswer = agentManager.send(host.getId(), cmd);
|
||||||
|
} catch (AgentUnavailableException | OperationTimedoutException e) {
|
||||||
|
String err = String.format("Failed to unprepare SDC on the host %s due to: %s", host.getName(), e.getMessage());
|
||||||
|
LOGGER.error(err);
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!unprepareStorageClientAnswer.getResult()) {
|
||||||
|
String err = String.format("Unable to unprepare SDC on the the host %s due to: %s", host.getName(), unprepareStorageClientAnswer.getDetails());
|
||||||
|
LOGGER.error(err);
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
private String getHostSdcId(String sdcGuid, long poolId) {
|
||||||
|
try {
|
||||||
|
LOGGER.debug(String.format("Try to get host SDC Id for pool: %s, with SDC guid %s", poolId, sdcGuid));
|
||||||
|
ScaleIOGatewayClient client = getScaleIOClient(poolId);
|
||||||
|
return client.getSdcIdByGuid(sdcGuid);
|
||||||
|
} catch (Exception e) {
|
||||||
|
LOGGER.error(String.format("Failed to get host SDC Id for pool: %s", poolId), e);
|
||||||
|
throw new CloudRuntimeException(String.format("Failed to establish connection with PowerFlex Gateway to get host SDC Id for pool: %s", poolId));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private String getConnectedSdc(long poolId, long hostId) {
|
||||||
|
try {
|
||||||
|
StoragePoolHostVO poolHostVO = storagePoolHostDao.findByPoolHost(poolId, hostId);
|
||||||
|
if (poolHostVO == null) {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
final ScaleIOGatewayClient client = getScaleIOClient(poolId);
|
||||||
|
if (client.isSdcConnected(poolHostVO.getLocalPath())) {
|
||||||
|
return poolHostVO.getLocalPath();
|
||||||
|
}
|
||||||
|
} catch (Exception e) {
|
||||||
|
LOGGER.warn("Unable to get connected SDC for the host: " + hostId + " and storage pool: " + poolId + " due to " + e.getMessage(), e);
|
||||||
|
}
|
||||||
|
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
private boolean hostSdcConnected(String sdcId, long poolId, int waitTimeInSecs) {
|
||||||
|
LOGGER.debug(String.format("Waiting (for %d secs) for the SDC %s of the pool id: %d to connect", waitTimeInSecs, sdcId, poolId));
|
||||||
|
int timeBetweenTries = 1000; // Try more frequently (every sec) and return early if connected
|
||||||
|
while (waitTimeInSecs > 0) {
|
||||||
|
if (isHostSdcConnected(sdcId, poolId)) {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
waitTimeInSecs--;
|
||||||
|
try {
|
||||||
|
Thread.sleep(timeBetweenTries);
|
||||||
|
} catch (Exception ignore) {
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return isHostSdcConnected(sdcId, poolId);
|
||||||
|
}
|
||||||
|
|
||||||
|
private boolean isHostSdcConnected(String sdcId, long poolId) {
|
||||||
|
try {
|
||||||
|
final ScaleIOGatewayClient client = getScaleIOClient(poolId);
|
||||||
|
return client.isSdcConnected(sdcId);
|
||||||
|
} catch (Exception e) {
|
||||||
|
LOGGER.error("Failed to check host SDC connection", e);
|
||||||
|
throw new CloudRuntimeException("Failed to establish connection with PowerFlex Gateway to check host SDC connection");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private ScaleIOGatewayClient getScaleIOClient(final Long storagePoolId) throws Exception {
|
||||||
|
return ScaleIOGatewayClientConnectionPool.getInstance().getClient(storagePoolId, storagePoolDetailsDao);
|
||||||
|
}
|
||||||
|
}
|
||||||
@ -70,12 +70,33 @@ public class ScaleIOHostListener implements HypervisorHostListener {
|
|||||||
public boolean hostConnect(long hostId, long poolId) {
|
public boolean hostConnect(long hostId, long poolId) {
|
||||||
HostVO host = _hostDao.findById(hostId);
|
HostVO host = _hostDao.findById(hostId);
|
||||||
if (host == null) {
|
if (host == null) {
|
||||||
logger.error("Failed to add host by HostListener as host was not found with id : " + hostId);
|
logger.error("Failed to connect host by HostListener as host was not found with id : " + hostId);
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
StoragePool storagePool = (StoragePool)_dataStoreMgr.getDataStore(poolId, DataStoreRole.Primary);
|
StoragePool storagePool = (StoragePool)_dataStoreMgr.getDataStore(poolId, DataStoreRole.Primary);
|
||||||
|
StoragePoolHostVO storagePoolHost = _storagePoolHostDao.findByPoolHost(poolId, hostId);
|
||||||
|
String sdcId = getSdcIdOfHost(host, storagePool);
|
||||||
|
if (StringUtils.isBlank(sdcId)) {
|
||||||
|
if (storagePoolHost != null) {
|
||||||
|
_storagePoolHostDao.deleteStoragePoolHostDetails(hostId, poolId);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if (storagePoolHost == null) {
|
||||||
|
storagePoolHost = new StoragePoolHostVO(poolId, hostId, sdcId);
|
||||||
|
_storagePoolHostDao.persist(storagePoolHost);
|
||||||
|
} else {
|
||||||
|
storagePoolHost.setLocalPath(sdcId);
|
||||||
|
_storagePoolHostDao.update(storagePoolHost.getId(), storagePoolHost);
|
||||||
|
}
|
||||||
|
logger.info("Connection established between storage pool: " + storagePool + " and host: " + hostId);
|
||||||
|
}
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
private String getSdcIdOfHost(HostVO host, StoragePool storagePool) {
|
||||||
|
long hostId = host.getId();
|
||||||
|
long poolId = storagePool.getId();
|
||||||
String systemId = _storagePoolDetailsDao.findDetail(poolId, ScaleIOGatewayClient.STORAGE_POOL_SYSTEM_ID).getValue();
|
String systemId = _storagePoolDetailsDao.findDetail(poolId, ScaleIOGatewayClient.STORAGE_POOL_SYSTEM_ID).getValue();
|
||||||
if (systemId == null) {
|
if (systemId == null) {
|
||||||
throw new CloudRuntimeException("Failed to get the system id for PowerFlex storage pool " + storagePool.getName());
|
throw new CloudRuntimeException("Failed to get the system id for PowerFlex storage pool " + storagePool.getName());
|
||||||
@ -87,10 +108,10 @@ public class ScaleIOHostListener implements HypervisorHostListener {
|
|||||||
ModifyStoragePoolAnswer answer = sendModifyStoragePoolCommand(cmd, storagePool, hostId);
|
ModifyStoragePoolAnswer answer = sendModifyStoragePoolCommand(cmd, storagePool, hostId);
|
||||||
Map<String,String> poolDetails = answer.getPoolInfo().getDetails();
|
Map<String,String> poolDetails = answer.getPoolInfo().getDetails();
|
||||||
if (MapUtils.isEmpty(poolDetails)) {
|
if (MapUtils.isEmpty(poolDetails)) {
|
||||||
String msg = "SDC details not found on the host: " + hostId + ", (re)install SDC and restart agent";
|
String msg = "PowerFlex storage SDC details not found on the host: " + hostId + ", (re)install SDC and restart agent";
|
||||||
logger.warn(msg);
|
logger.warn(msg);
|
||||||
_alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_HOST, host.getDataCenterId(), host.getPodId(), "SDC not found on host: " + host.getUuid(), msg);
|
_alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_HOST, host.getDataCenterId(), host.getPodId(), "SDC not found on host: " + host.getUuid(), msg);
|
||||||
return false;
|
return null;
|
||||||
}
|
}
|
||||||
|
|
||||||
String sdcId = null;
|
String sdcId = null;
|
||||||
@ -102,30 +123,13 @@ public class ScaleIOHostListener implements HypervisorHostListener {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (StringUtils.isBlank(sdcId)) {
|
if (StringUtils.isBlank(sdcId)) {
|
||||||
String msg = "Couldn't retrieve SDC details from the host: " + hostId + ", (re)install SDC and restart agent";
|
String msg = "Couldn't retrieve PowerFlex storage SDC details from the host: " + hostId + ", (re)install SDC and restart agent";
|
||||||
logger.warn(msg);
|
logger.warn(msg);
|
||||||
_alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_HOST, host.getDataCenterId(), host.getPodId(), "SDC details not found on host: " + host.getUuid(), msg);
|
_alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_HOST, host.getDataCenterId(), host.getPodId(), "SDC details not found on host: " + host.getUuid(), msg);
|
||||||
return false;
|
return null;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!isHostSdcConnected(sdcId, poolId)) {
|
return sdcId;
|
||||||
logger.warn("SDC not connected on the host: " + hostId);
|
|
||||||
String msg = "SDC not connected on the host: " + hostId + ", reconnect the SDC to MDM and restart agent";
|
|
||||||
_alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_HOST, host.getDataCenterId(), host.getPodId(), "SDC disconnected on host: " + host.getUuid(), msg);
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
StoragePoolHostVO storagePoolHost = _storagePoolHostDao.findByPoolHost(poolId, hostId);
|
|
||||||
if (storagePoolHost == null) {
|
|
||||||
storagePoolHost = new StoragePoolHostVO(poolId, hostId, sdcId);
|
|
||||||
_storagePoolHostDao.persist(storagePoolHost);
|
|
||||||
} else {
|
|
||||||
storagePoolHost.setLocalPath(sdcId);
|
|
||||||
_storagePoolHostDao.update(storagePoolHost.getId(), storagePoolHost);
|
|
||||||
}
|
|
||||||
|
|
||||||
logger.info("Connection established between storage pool: " + storagePool + " and host: " + hostId);
|
|
||||||
return true;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
private String getHostSdcId(String sdcGuid, long poolId) {
|
private String getHostSdcId(String sdcGuid, long poolId) {
|
||||||
@ -139,16 +143,6 @@ public class ScaleIOHostListener implements HypervisorHostListener {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
private boolean isHostSdcConnected(String sdcId, long poolId) {
|
|
||||||
try {
|
|
||||||
ScaleIOGatewayClient client = ScaleIOGatewayClientConnectionPool.getInstance().getClient(poolId, _storagePoolDetailsDao);
|
|
||||||
return client.isSdcConnected(sdcId);
|
|
||||||
} catch (NoSuchAlgorithmException | KeyManagementException | URISyntaxException e) {
|
|
||||||
logger.error("Failed to check host sdc connection", e);
|
|
||||||
throw new CloudRuntimeException("Failed to establish connection with PowerFlex Gateway to check host sdc connection");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
private ModifyStoragePoolAnswer sendModifyStoragePoolCommand(ModifyStoragePoolCommand cmd, StoragePool storagePool, long hostId) {
|
private ModifyStoragePoolAnswer sendModifyStoragePoolCommand(ModifyStoragePoolCommand cmd, StoragePool storagePool, long hostId) {
|
||||||
Answer answer = _agentMgr.easySend(hostId, cmd);
|
Answer answer = _agentMgr.easySend(hostId, cmd);
|
||||||
|
|
||||||
@ -157,15 +151,15 @@ public class ScaleIOHostListener implements HypervisorHostListener {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (!answer.getResult()) {
|
if (!answer.getResult()) {
|
||||||
String msg = "Unable to attach storage pool " + storagePool.getId() + " to host " + hostId;
|
String msg = "Unable to attach PowerFlex storage pool " + storagePool.getId() + " to host " + hostId;
|
||||||
|
|
||||||
_alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_HOST, storagePool.getDataCenterId(), storagePool.getPodId(), msg, msg);
|
_alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_HOST, storagePool.getDataCenterId(), storagePool.getPodId(), msg, msg);
|
||||||
|
|
||||||
throw new CloudRuntimeException("Unable to establish a connection from agent to storage pool " + storagePool.getId() + " due to " + answer.getDetails() +
|
throw new CloudRuntimeException("Unable to establish a connection from agent to PowerFlex storage pool " + storagePool.getId() + " due to " + answer.getDetails() +
|
||||||
" (" + storagePool.getId() + ")");
|
" (" + storagePool.getId() + ")");
|
||||||
}
|
}
|
||||||
|
|
||||||
assert (answer instanceof ModifyStoragePoolAnswer) : "ModifyStoragePoolAnswer expected ; Pool = " + storagePool.getId() + " Host = " + hostId;
|
assert (answer instanceof ModifyStoragePoolAnswer) : "ModifyStoragePoolAnswer expected ; PowerFlex Storage Pool = " + storagePool.getId() + " Host = " + hostId;
|
||||||
|
|
||||||
return (ModifyStoragePoolAnswer) answer;
|
return (ModifyStoragePoolAnswer) answer;
|
||||||
}
|
}
|
||||||
|
|||||||
@ -50,6 +50,16 @@ public class ScaleIOUtil {
|
|||||||
|
|
||||||
private static final String RESCAN_CMD = "drv_cfg --rescan";
|
private static final String RESCAN_CMD = "drv_cfg --rescan";
|
||||||
|
|
||||||
|
private static final String SDC_SERVICE_STATUS_CMD = "systemctl status scini";
|
||||||
|
private static final String SDC_SERVICE_START_CMD = "systemctl start scini";
|
||||||
|
private static final String SDC_SERVICE_STOP_CMD = "systemctl stop scini";
|
||||||
|
private static final String SDC_SERVICE_RESTART_CMD = "systemctl restart scini";
|
||||||
|
|
||||||
|
private static final String SDC_SERVICE_IS_ACTIVE_CMD = "systemctl is-active scini";
|
||||||
|
private static final String SDC_SERVICE_IS_ENABLED_CMD = "systemctl is-enabled scini";
|
||||||
|
private static final String SDC_SERVICE_ENABLE_CMD = "systemctl enable scini";
|
||||||
|
|
||||||
|
public static final String CONNECTED_SDC_COUNT_STAT = "ConnectedSDCCount";
|
||||||
/**
|
/**
|
||||||
* Cmd for querying volumes in SDC
|
* Cmd for querying volumes in SDC
|
||||||
* Sample output for cmd: drv_cfg --query_vols:
|
* Sample output for cmd: drv_cfg --query_vols:
|
||||||
@ -183,4 +193,39 @@ public class ScaleIOUtil {
|
|||||||
|
|
||||||
return String.format("%s:%s", volumePath, volumeName);
|
return String.format("%s:%s", volumePath, volumeName);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public static boolean isSDCServiceInstalled() {
|
||||||
|
int exitValue = Script.runSimpleBashScriptForExitValue(SDC_SERVICE_STATUS_CMD);
|
||||||
|
return exitValue != 4;
|
||||||
|
}
|
||||||
|
|
||||||
|
public static boolean isSDCServiceActive() {
|
||||||
|
int exitValue = Script.runSimpleBashScriptForExitValue(SDC_SERVICE_IS_ACTIVE_CMD);
|
||||||
|
return exitValue == 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
public static boolean isSDCServiceEnabled() {
|
||||||
|
int exitValue = Script.runSimpleBashScriptForExitValue(SDC_SERVICE_IS_ENABLED_CMD);
|
||||||
|
return exitValue == 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
public static boolean enableSDCService() {
|
||||||
|
int exitValue = Script.runSimpleBashScriptForExitValue(SDC_SERVICE_ENABLE_CMD);
|
||||||
|
return exitValue == 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
public static boolean startSDCService() {
|
||||||
|
int exitValue = Script.runSimpleBashScriptForExitValue(SDC_SERVICE_START_CMD);
|
||||||
|
return exitValue == 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
public static boolean stopSDCService() {
|
||||||
|
int exitValue = Script.runSimpleBashScriptForExitValue(SDC_SERVICE_STOP_CMD);
|
||||||
|
return exitValue == 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
public static boolean restartSDCService() {
|
||||||
|
int exitValue = Script.runSimpleBashScriptForExitValue(SDC_SERVICE_RESTART_CMD);
|
||||||
|
return exitValue == 0;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@ -32,4 +32,6 @@
|
|||||||
<bean id="scaleioDataStoreProvider"
|
<bean id="scaleioDataStoreProvider"
|
||||||
class="org.apache.cloudstack.storage.datastore.provider.ScaleIOPrimaryDatastoreProvider" />
|
class="org.apache.cloudstack.storage.datastore.provider.ScaleIOPrimaryDatastoreProvider" />
|
||||||
|
|
||||||
|
<bean id="scaleioSDCManager" class="org.apache.cloudstack.storage.datastore.manager.ScaleIOSDCManagerImpl" />
|
||||||
|
|
||||||
</beans>
|
</beans>
|
||||||
|
|||||||
@ -23,6 +23,7 @@ import static com.google.common.truth.Truth.assertThat;
|
|||||||
import static org.mockito.ArgumentMatchers.any;
|
import static org.mockito.ArgumentMatchers.any;
|
||||||
import static org.mockito.ArgumentMatchers.anyLong;
|
import static org.mockito.ArgumentMatchers.anyLong;
|
||||||
import static org.mockito.ArgumentMatchers.eq;
|
import static org.mockito.ArgumentMatchers.eq;
|
||||||
|
import static org.mockito.Mockito.lenient;
|
||||||
import static org.mockito.Mockito.mock;
|
import static org.mockito.Mockito.mock;
|
||||||
import static org.mockito.Mockito.mockStatic;
|
import static org.mockito.Mockito.mockStatic;
|
||||||
import static org.mockito.Mockito.when;
|
import static org.mockito.Mockito.when;
|
||||||
@ -130,9 +131,9 @@ public class ScaleIOPrimaryDataStoreLifeCycleTest {
|
|||||||
ScaleIOGatewayClientImpl client = mock(ScaleIOGatewayClientImpl.class);
|
ScaleIOGatewayClientImpl client = mock(ScaleIOGatewayClientImpl.class);
|
||||||
ScaleIOGatewayClientConnectionPool pool = mock(ScaleIOGatewayClientConnectionPool.class);
|
ScaleIOGatewayClientConnectionPool pool = mock(ScaleIOGatewayClientConnectionPool.class);
|
||||||
scaleIOGatewayClientConnectionPoolMocked.when(() -> ScaleIOGatewayClientConnectionPool.getInstance()).thenReturn(pool);
|
scaleIOGatewayClientConnectionPoolMocked.when(() -> ScaleIOGatewayClientConnectionPool.getInstance()).thenReturn(pool);
|
||||||
when(pool.getClient(1L, storagePoolDetailsDao)).thenReturn(client);
|
lenient().when(pool.getClient(1L, storagePoolDetailsDao)).thenReturn(client);
|
||||||
|
|
||||||
when(client.haveConnectedSdcs()).thenReturn(true);
|
lenient().when(client.haveConnectedSdcs()).thenReturn(true);
|
||||||
|
|
||||||
final ZoneScope scope = new ZoneScope(1L);
|
final ZoneScope scope = new ZoneScope(1L);
|
||||||
|
|
||||||
|
|||||||
@ -64,7 +64,7 @@ import com.cloud.utils.exception.CloudRuntimeException;
|
|||||||
|
|
||||||
import com.google.common.base.Preconditions;
|
import com.google.common.base.Preconditions;
|
||||||
|
|
||||||
public class SolidFirePrimaryDataStoreLifeCycle implements PrimaryDataStoreLifeCycle {
|
public class SolidFirePrimaryDataStoreLifeCycle extends BasePrimaryDataStoreLifeCycleImpl implements PrimaryDataStoreLifeCycle {
|
||||||
protected Logger logger = LogManager.getLogger(getClass());
|
protected Logger logger = LogManager.getLogger(getClass());
|
||||||
|
|
||||||
@Inject private CapacityManager _capacityMgr;
|
@Inject private CapacityManager _capacityMgr;
|
||||||
@ -388,4 +388,13 @@ public class SolidFirePrimaryDataStoreLifeCycle implements PrimaryDataStoreLifeC
|
|||||||
public void disableStoragePool(DataStore dataStore) {
|
public void disableStoragePool(DataStore dataStore) {
|
||||||
_dataStoreHelper.disable(dataStore);
|
_dataStoreHelper.disable(dataStore);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void changeStoragePoolScopeToZone(DataStore store, ClusterScope clusterScope, HypervisorType hypervisorType) {
|
||||||
|
/*
|
||||||
|
* We need to attach all VMware, Xenserver and KVM hosts in the zone.
|
||||||
|
* So pass hypervisorType as null.
|
||||||
|
*/
|
||||||
|
super.changeStoragePoolScopeToZone(store, clusterScope, null);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@ -73,7 +73,7 @@ import com.cloud.user.dao.AccountDao;
|
|||||||
import com.cloud.utils.db.GlobalLock;
|
import com.cloud.utils.db.GlobalLock;
|
||||||
import com.cloud.utils.exception.CloudRuntimeException;
|
import com.cloud.utils.exception.CloudRuntimeException;
|
||||||
|
|
||||||
public class SolidFireSharedPrimaryDataStoreLifeCycle implements PrimaryDataStoreLifeCycle {
|
public class SolidFireSharedPrimaryDataStoreLifeCycle extends BasePrimaryDataStoreLifeCycleImpl implements PrimaryDataStoreLifeCycle {
|
||||||
protected Logger logger = LogManager.getLogger(getClass());
|
protected Logger logger = LogManager.getLogger(getClass());
|
||||||
|
|
||||||
@Inject private AccountDao accountDao;
|
@Inject private AccountDao accountDao;
|
||||||
|
|||||||
@ -61,7 +61,7 @@ import com.cloud.storage.dao.VMTemplateDetailsDao;
|
|||||||
import com.cloud.storage.dao.VMTemplatePoolDao;
|
import com.cloud.storage.dao.VMTemplatePoolDao;
|
||||||
import com.cloud.utils.exception.CloudRuntimeException;
|
import com.cloud.utils.exception.CloudRuntimeException;
|
||||||
|
|
||||||
public class StorPoolPrimaryDataStoreLifeCycle implements PrimaryDataStoreLifeCycle {
|
public class StorPoolPrimaryDataStoreLifeCycle extends BasePrimaryDataStoreLifeCycleImpl implements PrimaryDataStoreLifeCycle {
|
||||||
protected Logger logger = LogManager.getLogger(getClass());
|
protected Logger logger = LogManager.getLogger(getClass());
|
||||||
|
|
||||||
@Inject
|
@Inject
|
||||||
|
|||||||
@ -2021,8 +2021,8 @@ public class ApiDBUtils {
|
|||||||
return s_volJoinDao.newVolumeView(vr);
|
return s_volJoinDao.newVolumeView(vr);
|
||||||
}
|
}
|
||||||
|
|
||||||
public static StoragePoolResponse newStoragePoolResponse(StoragePoolJoinVO vr) {
|
public static StoragePoolResponse newStoragePoolResponse(StoragePoolJoinVO vr, boolean customStats) {
|
||||||
return s_poolJoinDao.newStoragePoolResponse(vr);
|
return s_poolJoinDao.newStoragePoolResponse(vr, customStats);
|
||||||
}
|
}
|
||||||
|
|
||||||
public static StorageTagResponse newStorageTagResponse(StoragePoolTagVO vr) {
|
public static StorageTagResponse newStorageTagResponse(StoragePoolTagVO vr) {
|
||||||
|
|||||||
@ -1452,7 +1452,7 @@ public class ApiResponseHelper implements ResponseGenerator {
|
|||||||
@Override
|
@Override
|
||||||
public StoragePoolResponse createStoragePoolResponse(StoragePool pool) {
|
public StoragePoolResponse createStoragePoolResponse(StoragePool pool) {
|
||||||
List<StoragePoolJoinVO> viewPools = ApiDBUtils.newStoragePoolView(pool);
|
List<StoragePoolJoinVO> viewPools = ApiDBUtils.newStoragePoolView(pool);
|
||||||
List<StoragePoolResponse> listPools = ViewResponseHelper.createStoragePoolResponse(viewPools.toArray(new StoragePoolJoinVO[viewPools.size()]));
|
List<StoragePoolResponse> listPools = ViewResponseHelper.createStoragePoolResponse(false, viewPools.toArray(new StoragePoolJoinVO[viewPools.size()]));
|
||||||
assert listPools != null && listPools.size() == 1 : "There should be one storage pool returned";
|
assert listPools != null && listPools.size() == 1 : "There should be one storage pool returned";
|
||||||
return listPools.get(0);
|
return listPools.get(0);
|
||||||
}
|
}
|
||||||
|
|||||||
@ -71,6 +71,7 @@ import org.apache.cloudstack.api.command.admin.storage.ListStorageTagsCmd;
|
|||||||
import org.apache.cloudstack.api.command.admin.storage.heuristics.ListSecondaryStorageSelectorsCmd;
|
import org.apache.cloudstack.api.command.admin.storage.heuristics.ListSecondaryStorageSelectorsCmd;
|
||||||
import org.apache.cloudstack.api.command.admin.template.ListTemplatesCmdByAdmin;
|
import org.apache.cloudstack.api.command.admin.template.ListTemplatesCmdByAdmin;
|
||||||
import org.apache.cloudstack.api.command.admin.user.ListUsersCmd;
|
import org.apache.cloudstack.api.command.admin.user.ListUsersCmd;
|
||||||
|
import org.apache.cloudstack.api.command.admin.vm.ListAffectedVmsForStorageScopeChangeCmd;
|
||||||
import org.apache.cloudstack.api.command.admin.zone.ListZonesCmdByAdmin;
|
import org.apache.cloudstack.api.command.admin.zone.ListZonesCmdByAdmin;
|
||||||
import org.apache.cloudstack.api.command.user.account.ListAccountsCmd;
|
import org.apache.cloudstack.api.command.user.account.ListAccountsCmd;
|
||||||
import org.apache.cloudstack.api.command.user.account.ListProjectAccountsCmd;
|
import org.apache.cloudstack.api.command.user.account.ListProjectAccountsCmd;
|
||||||
@ -128,6 +129,7 @@ import org.apache.cloudstack.api.response.StorageTagResponse;
|
|||||||
import org.apache.cloudstack.api.response.TemplateResponse;
|
import org.apache.cloudstack.api.response.TemplateResponse;
|
||||||
import org.apache.cloudstack.api.response.UserResponse;
|
import org.apache.cloudstack.api.response.UserResponse;
|
||||||
import org.apache.cloudstack.api.response.UserVmResponse;
|
import org.apache.cloudstack.api.response.UserVmResponse;
|
||||||
|
import org.apache.cloudstack.api.response.VirtualMachineResponse;
|
||||||
import org.apache.cloudstack.api.response.VolumeResponse;
|
import org.apache.cloudstack.api.response.VolumeResponse;
|
||||||
import org.apache.cloudstack.api.response.ZoneResponse;
|
import org.apache.cloudstack.api.response.ZoneResponse;
|
||||||
import org.apache.cloudstack.backup.BackupOfferingVO;
|
import org.apache.cloudstack.backup.BackupOfferingVO;
|
||||||
@ -213,8 +215,10 @@ import com.cloud.api.query.vo.UserVmJoinVO;
|
|||||||
import com.cloud.api.query.vo.VolumeJoinVO;
|
import com.cloud.api.query.vo.VolumeJoinVO;
|
||||||
import com.cloud.cluster.ManagementServerHostVO;
|
import com.cloud.cluster.ManagementServerHostVO;
|
||||||
import com.cloud.cluster.dao.ManagementServerHostDao;
|
import com.cloud.cluster.dao.ManagementServerHostDao;
|
||||||
|
import com.cloud.dc.ClusterVO;
|
||||||
import com.cloud.dc.DataCenter;
|
import com.cloud.dc.DataCenter;
|
||||||
import com.cloud.dc.DedicatedResourceVO;
|
import com.cloud.dc.DedicatedResourceVO;
|
||||||
|
import com.cloud.dc.dao.ClusterDao;
|
||||||
import com.cloud.dc.dao.DedicatedResourceDao;
|
import com.cloud.dc.dao.DedicatedResourceDao;
|
||||||
import com.cloud.domain.Domain;
|
import com.cloud.domain.Domain;
|
||||||
import com.cloud.domain.DomainVO;
|
import com.cloud.domain.DomainVO;
|
||||||
@ -594,6 +598,10 @@ public class QueryManagerImpl extends MutualExclusiveIdsManagerBase implements Q
|
|||||||
@Inject
|
@Inject
|
||||||
private StoragePoolHostDao storagePoolHostDao;
|
private StoragePoolHostDao storagePoolHostDao;
|
||||||
|
|
||||||
|
@Inject
|
||||||
|
private ClusterDao clusterDao;
|
||||||
|
|
||||||
|
|
||||||
private SearchCriteria<ServiceOfferingJoinVO> getMinimumCpuServiceOfferingJoinSearchCriteria(int cpu) {
|
private SearchCriteria<ServiceOfferingJoinVO> getMinimumCpuServiceOfferingJoinSearchCriteria(int cpu) {
|
||||||
SearchCriteria<ServiceOfferingJoinVO> sc = _srvOfferingJoinDao.createSearchCriteria();
|
SearchCriteria<ServiceOfferingJoinVO> sc = _srvOfferingJoinDao.createSearchCriteria();
|
||||||
SearchCriteria<ServiceOfferingJoinVO> sc1 = _srvOfferingJoinDao.createSearchCriteria();
|
SearchCriteria<ServiceOfferingJoinVO> sc1 = _srvOfferingJoinDao.createSearchCriteria();
|
||||||
@ -1148,6 +1156,58 @@ public class QueryManagerImpl extends MutualExclusiveIdsManagerBase implements Q
|
|||||||
return response;
|
return response;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public ListResponse<VirtualMachineResponse> listAffectedVmsForStorageScopeChange(ListAffectedVmsForStorageScopeChangeCmd cmd) {
|
||||||
|
Long poolId = cmd.getStorageId();
|
||||||
|
StoragePoolVO pool = storagePoolDao.findById(poolId);
|
||||||
|
if (pool == null) {
|
||||||
|
throw new IllegalArgumentException("Unable to find storage pool with ID: " + poolId);
|
||||||
|
}
|
||||||
|
|
||||||
|
ListResponse<VirtualMachineResponse> response = new ListResponse<>();
|
||||||
|
List<VirtualMachineResponse> responsesList = new ArrayList<>();
|
||||||
|
if (pool.getScope() != ScopeType.ZONE) {
|
||||||
|
response.setResponses(responsesList, 0);
|
||||||
|
return response;
|
||||||
|
}
|
||||||
|
|
||||||
|
Pair<List<VMInstanceVO>, Integer> vms = _vmInstanceDao.listByVmsNotInClusterUsingPool(cmd.getClusterIdForScopeChange(), poolId);
|
||||||
|
for (VMInstanceVO vm : vms.first()) {
|
||||||
|
VirtualMachineResponse resp = new VirtualMachineResponse();
|
||||||
|
resp.setObjectName(VirtualMachine.class.getSimpleName().toLowerCase());
|
||||||
|
resp.setId(vm.getUuid());
|
||||||
|
resp.setVmType(vm.getType().toString());
|
||||||
|
|
||||||
|
UserVmJoinVO userVM = null;
|
||||||
|
if (!vm.getType().isUsedBySystem()) {
|
||||||
|
userVM = _userVmJoinDao.findById(vm.getId());
|
||||||
|
}
|
||||||
|
if (userVM != null) {
|
||||||
|
if (userVM.getDisplayName() != null) {
|
||||||
|
resp.setVmName(userVM.getDisplayName());
|
||||||
|
} else {
|
||||||
|
resp.setVmName(userVM.getName());
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
resp.setVmName(vm.getInstanceName());
|
||||||
|
}
|
||||||
|
|
||||||
|
HostVO host = hostDao.findById(vm.getHostId());
|
||||||
|
if (host != null) {
|
||||||
|
resp.setHostId(host.getUuid());
|
||||||
|
resp.setHostName(host.getName());
|
||||||
|
ClusterVO cluster = clusterDao.findById(host.getClusterId());
|
||||||
|
if (cluster != null) {
|
||||||
|
resp.setClusterId(cluster.getUuid());
|
||||||
|
resp.setClusterName(cluster.getName());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
responsesList.add(resp);
|
||||||
|
}
|
||||||
|
response.setResponses(responsesList, vms.second());
|
||||||
|
return response;
|
||||||
|
}
|
||||||
|
|
||||||
private Object getObjectPossibleMethodValue(Object obj, String methodName) {
|
private Object getObjectPossibleMethodValue(Object obj, String methodName) {
|
||||||
Object result = null;
|
Object result = null;
|
||||||
|
|
||||||
@ -2971,7 +3031,7 @@ public class QueryManagerImpl extends MutualExclusiveIdsManagerBase implements Q
|
|||||||
public ListResponse<StoragePoolResponse> searchForStoragePools(ListStoragePoolsCmd cmd) {
|
public ListResponse<StoragePoolResponse> searchForStoragePools(ListStoragePoolsCmd cmd) {
|
||||||
Pair<List<StoragePoolJoinVO>, Integer> result = (ScopeType.HOST.name().equalsIgnoreCase(cmd.getScope()) && cmd.getHostId() != null) ?
|
Pair<List<StoragePoolJoinVO>, Integer> result = (ScopeType.HOST.name().equalsIgnoreCase(cmd.getScope()) && cmd.getHostId() != null) ?
|
||||||
searchForLocalStorages(cmd) : searchForStoragePoolsInternal(cmd);
|
searchForLocalStorages(cmd) : searchForStoragePoolsInternal(cmd);
|
||||||
return createStoragesPoolResponse(result);
|
return createStoragesPoolResponse(result, cmd.getCustomStats());
|
||||||
}
|
}
|
||||||
|
|
||||||
private Pair<List<StoragePoolJoinVO>, Integer> searchForLocalStorages(ListStoragePoolsCmd cmd) {
|
private Pair<List<StoragePoolJoinVO>, Integer> searchForLocalStorages(ListStoragePoolsCmd cmd) {
|
||||||
@ -2999,10 +3059,10 @@ public class QueryManagerImpl extends MutualExclusiveIdsManagerBase implements Q
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
private ListResponse<StoragePoolResponse> createStoragesPoolResponse(Pair<List<StoragePoolJoinVO>, Integer> storagePools) {
|
private ListResponse<StoragePoolResponse> createStoragesPoolResponse(Pair<List<StoragePoolJoinVO>, Integer> storagePools, boolean getCustomStats) {
|
||||||
ListResponse<StoragePoolResponse> response = new ListResponse<>();
|
ListResponse<StoragePoolResponse> response = new ListResponse<>();
|
||||||
|
|
||||||
List<StoragePoolResponse> poolResponses = ViewResponseHelper.createStoragePoolResponse(storagePools.first().toArray(new StoragePoolJoinVO[storagePools.first().size()]));
|
List<StoragePoolResponse> poolResponses = ViewResponseHelper.createStoragePoolResponse(getCustomStats, storagePools.first().toArray(new StoragePoolJoinVO[storagePools.first().size()]));
|
||||||
Map<String, Long> poolUuidToIdMap = storagePools.first().stream().collect(Collectors.toMap(StoragePoolJoinVO::getUuid, StoragePoolJoinVO::getId, (a, b) -> a));
|
Map<String, Long> poolUuidToIdMap = storagePools.first().stream().collect(Collectors.toMap(StoragePoolJoinVO::getUuid, StoragePoolJoinVO::getId, (a, b) -> a));
|
||||||
for (StoragePoolResponse poolResponse : poolResponses) {
|
for (StoragePoolResponse poolResponse : poolResponses) {
|
||||||
DataStore store = dataStoreManager.getPrimaryDataStore(poolResponse.getId());
|
DataStore store = dataStoreManager.getPrimaryDataStore(poolResponse.getId());
|
||||||
|
|||||||
@ -313,14 +313,14 @@ public class ViewResponseHelper {
|
|||||||
return new ArrayList<VolumeResponse>(vrDataList.values());
|
return new ArrayList<VolumeResponse>(vrDataList.values());
|
||||||
}
|
}
|
||||||
|
|
||||||
public static List<StoragePoolResponse> createStoragePoolResponse(StoragePoolJoinVO... pools) {
|
public static List<StoragePoolResponse> createStoragePoolResponse(boolean customStats, StoragePoolJoinVO... pools) {
|
||||||
LinkedHashMap<Long, StoragePoolResponse> vrDataList = new LinkedHashMap<>();
|
LinkedHashMap<Long, StoragePoolResponse> vrDataList = new LinkedHashMap<>();
|
||||||
// Initialise the vrdatalist with the input data
|
// Initialise the vrdatalist with the input data
|
||||||
for (StoragePoolJoinVO vr : pools) {
|
for (StoragePoolJoinVO vr : pools) {
|
||||||
StoragePoolResponse vrData = vrDataList.get(vr.getId());
|
StoragePoolResponse vrData = vrDataList.get(vr.getId());
|
||||||
if (vrData == null) {
|
if (vrData == null) {
|
||||||
// first time encountering this vm
|
// first time encountering this vm
|
||||||
vrData = ApiDBUtils.newStoragePoolResponse(vr);
|
vrData = ApiDBUtils.newStoragePoolResponse(vr, customStats);
|
||||||
} else {
|
} else {
|
||||||
// update tags
|
// update tags
|
||||||
vrData = ApiDBUtils.fillStoragePoolDetails(vrData, vr);
|
vrData = ApiDBUtils.fillStoragePoolDetails(vrData, vr);
|
||||||
|
|||||||
@ -28,7 +28,7 @@ import org.apache.cloudstack.storage.datastore.db.StoragePoolVO;
|
|||||||
|
|
||||||
public interface StoragePoolJoinDao extends GenericDao<StoragePoolJoinVO, Long> {
|
public interface StoragePoolJoinDao extends GenericDao<StoragePoolJoinVO, Long> {
|
||||||
|
|
||||||
StoragePoolResponse newStoragePoolResponse(StoragePoolJoinVO host);
|
StoragePoolResponse newStoragePoolResponse(StoragePoolJoinVO host, boolean customStats);
|
||||||
|
|
||||||
StoragePoolResponse setStoragePoolResponse(StoragePoolResponse response, StoragePoolJoinVO host);
|
StoragePoolResponse setStoragePoolResponse(StoragePoolResponse response, StoragePoolJoinVO host);
|
||||||
|
|
||||||
|
|||||||
@ -50,6 +50,9 @@ import com.cloud.utils.StringUtils;
|
|||||||
import com.cloud.utils.db.GenericDaoBase;
|
import com.cloud.utils.db.GenericDaoBase;
|
||||||
import com.cloud.utils.db.SearchBuilder;
|
import com.cloud.utils.db.SearchBuilder;
|
||||||
import com.cloud.utils.db.SearchCriteria;
|
import com.cloud.utils.db.SearchCriteria;
|
||||||
|
import org.apache.commons.collections.MapUtils;
|
||||||
|
|
||||||
|
import java.util.Map;
|
||||||
|
|
||||||
@Component
|
@Component
|
||||||
public class StoragePoolJoinDaoImpl extends GenericDaoBase<StoragePoolJoinVO, Long> implements StoragePoolJoinDao {
|
public class StoragePoolJoinDaoImpl extends GenericDaoBase<StoragePoolJoinVO, Long> implements StoragePoolJoinDao {
|
||||||
@ -100,7 +103,7 @@ public class StoragePoolJoinDaoImpl extends GenericDaoBase<StoragePoolJoinVO, Lo
|
|||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public StoragePoolResponse newStoragePoolResponse(StoragePoolJoinVO pool) {
|
public StoragePoolResponse newStoragePoolResponse(StoragePoolJoinVO pool, boolean customStats) {
|
||||||
StoragePool storagePool = storagePoolDao.findById(pool.getId());
|
StoragePool storagePool = storagePoolDao.findById(pool.getId());
|
||||||
StoragePoolResponse poolResponse = new StoragePoolResponse();
|
StoragePoolResponse poolResponse = new StoragePoolResponse();
|
||||||
poolResponse.setId(pool.getUuid());
|
poolResponse.setId(pool.getUuid());
|
||||||
@ -147,6 +150,13 @@ public class StoragePoolJoinDaoImpl extends GenericDaoBase<StoragePoolJoinVO, Lo
|
|||||||
PrimaryDataStoreDriver driver = (PrimaryDataStoreDriver) store.getDriver();
|
PrimaryDataStoreDriver driver = (PrimaryDataStoreDriver) store.getDriver();
|
||||||
long usedIops = driver.getUsedIops(storagePool);
|
long usedIops = driver.getUsedIops(storagePool);
|
||||||
poolResponse.setAllocatedIops(usedIops);
|
poolResponse.setAllocatedIops(usedIops);
|
||||||
|
|
||||||
|
if (customStats && driver.poolProvidesCustomStorageStats()) {
|
||||||
|
Map<String, String> storageCustomStats = driver.getCustomStorageStats(storagePool);
|
||||||
|
if (MapUtils.isNotEmpty(storageCustomStats)) {
|
||||||
|
poolResponse.setCustomStats(storageCustomStats);
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO: StatsCollector does not persist data
|
// TODO: StatsCollector does not persist data
|
||||||
|
|||||||
@ -1652,6 +1652,15 @@ StateListener<State, VirtualMachine.Event, VirtualMachine>, Configurable {
|
|||||||
}
|
}
|
||||||
|
|
||||||
logger.debug("Host: " + host.getId() + (hostCanAccessSPool ? " can" : " cannot") + " access pool: " + pool.getId());
|
logger.debug("Host: " + host.getId() + (hostCanAccessSPool ? " can" : " cannot") + " access pool: " + pool.getId());
|
||||||
|
if (!hostCanAccessSPool) {
|
||||||
|
if (_storageMgr.canHostPrepareStoragePoolAccess(host, pool)) {
|
||||||
|
logger.debug("Host: " + host.getId() + " can prepare access to pool: " + pool.getId());
|
||||||
|
hostCanAccessSPool = true;
|
||||||
|
} else {
|
||||||
|
logger.debug("Host: " + host.getId() + " cannot prepare access to pool: " + pool.getId());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
return hostCanAccessSPool;
|
return hostCanAccessSPool;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@ -717,9 +717,15 @@ public class IpAddressManagerImpl extends ManagerBase implements IpAddressManage
|
|||||||
@Override
|
@Override
|
||||||
@DB
|
@DB
|
||||||
public boolean disassociatePublicIpAddress(long addrId, long userId, Account caller) {
|
public boolean disassociatePublicIpAddress(long addrId, long userId, Account caller) {
|
||||||
|
|
||||||
boolean success = true;
|
boolean success = true;
|
||||||
IPAddressVO ipToBeDisassociated = _ipAddressDao.findById(addrId);
|
|
||||||
|
try {
|
||||||
|
IPAddressVO ipToBeDisassociated = _ipAddressDao.acquireInLockTable(addrId);
|
||||||
|
|
||||||
|
if (ipToBeDisassociated == null) {
|
||||||
|
logger.error(String.format("Unable to acquire lock on public IP %s.", addrId));
|
||||||
|
throw new CloudRuntimeException("Unable to acquire lock on public IP.");
|
||||||
|
}
|
||||||
|
|
||||||
PublicIpQuarantine publicIpQuarantine = null;
|
PublicIpQuarantine publicIpQuarantine = null;
|
||||||
// Cleanup all ip address resources - PF/LB/Static nat rules
|
// Cleanup all ip address resources - PF/LB/Static nat rules
|
||||||
@ -762,6 +768,9 @@ public class IpAddressManagerImpl extends ManagerBase implements IpAddressManage
|
|||||||
} else if (publicIpQuarantine != null) {
|
} else if (publicIpQuarantine != null) {
|
||||||
removePublicIpAddressFromQuarantine(publicIpQuarantine.getId(), "Public IP address removed from quarantine as there was an error while disassociating it.");
|
removePublicIpAddressFromQuarantine(publicIpQuarantine.getId(), "Public IP address removed from quarantine as there was an error while disassociating it.");
|
||||||
}
|
}
|
||||||
|
} finally {
|
||||||
|
_ipAddressDao.releaseFromLockTable(addrId);
|
||||||
|
}
|
||||||
|
|
||||||
return success;
|
return success;
|
||||||
}
|
}
|
||||||
|
|||||||
@ -1598,6 +1598,10 @@ public class NetworkModelImpl extends ManagerBase implements NetworkModel, Confi
|
|||||||
}
|
}
|
||||||
|
|
||||||
NetworkVO network = _networksDao.findById(networkId);
|
NetworkVO network = _networksDao.findById(networkId);
|
||||||
|
if (network == null) {
|
||||||
|
throw new CloudRuntimeException("Could not find network associated with public IP.");
|
||||||
|
}
|
||||||
|
|
||||||
NetworkOfferingVO offering = _networkOfferingDao.findById(network.getNetworkOfferingId());
|
NetworkOfferingVO offering = _networkOfferingDao.findById(network.getNetworkOfferingId());
|
||||||
if (offering.getGuestType() != GuestType.Isolated) {
|
if (offering.getGuestType() != GuestType.Isolated) {
|
||||||
return true;
|
return true;
|
||||||
|
|||||||
@ -202,19 +202,19 @@ public class FirewallManagerImpl extends ManagerBase implements FirewallService,
|
|||||||
return createFirewallRule(sourceIpAddressId, caller, rule.getXid(), rule.getSourcePortStart(), rule.getSourcePortEnd(), rule.getProtocol(),
|
return createFirewallRule(sourceIpAddressId, caller, rule.getXid(), rule.getSourcePortStart(), rule.getSourcePortEnd(), rule.getProtocol(),
|
||||||
rule.getSourceCidrList(), null, rule.getIcmpCode(), rule.getIcmpType(), null, rule.getType(), rule.getNetworkId(), rule.getTrafficType(), rule.isDisplay());
|
rule.getSourceCidrList(), null, rule.getIcmpCode(), rule.getIcmpType(), null, rule.getType(), rule.getNetworkId(), rule.getTrafficType(), rule.isDisplay());
|
||||||
}
|
}
|
||||||
|
|
||||||
//Destination CIDR capability is currently implemented for egress rules only. For others, the field is passed as null.
|
//Destination CIDR capability is currently implemented for egress rules only. For others, the field is passed as null.
|
||||||
@DB
|
@DB
|
||||||
protected FirewallRule createFirewallRule(final Long ipAddrId, Account caller, final String xId, final Integer portStart, final Integer portEnd,
|
protected FirewallRule createFirewallRule(final Long ipAddrId, Account caller, final String xId, final Integer portStart, final Integer portEnd, final String protocol,
|
||||||
final String protocol, final List<String> sourceCidrList, final List<String> destCidrList, final Integer icmpCode, final Integer icmpType, final Long relatedRuleId,
|
final List<String> sourceCidrList, final List<String> destCidrList, final Integer icmpCode, final Integer icmpType, final Long relatedRuleId,
|
||||||
final FirewallRule.FirewallRuleType type,
|
final FirewallRule.FirewallRuleType type, final Long networkId, final FirewallRule.TrafficType trafficType, final Boolean forDisplay) throws NetworkRuleConflictException {
|
||||||
final Long networkId, final FirewallRule.TrafficType trafficType, final Boolean forDisplay) throws NetworkRuleConflictException {
|
|
||||||
|
|
||||||
IPAddressVO ipAddress = null;
|
IPAddressVO ipAddress = null;
|
||||||
|
try {
|
||||||
|
// Validate ip address
|
||||||
if (ipAddrId != null) {
|
if (ipAddrId != null) {
|
||||||
// this for ingress firewall rule, for egress id is null
|
// this for ingress firewall rule, for egress id is null
|
||||||
ipAddress = _ipAddressDao.findById(ipAddrId);
|
ipAddress = _ipAddressDao.acquireInLockTable(ipAddrId);
|
||||||
// Validate ip address
|
if (ipAddress == null) {
|
||||||
if (ipAddress == null && type == FirewallRule.FirewallRuleType.User) {
|
|
||||||
throw new InvalidParameterValueException("Unable to create firewall rule; " + "couldn't locate IP address by id in the system");
|
throw new InvalidParameterValueException("Unable to create firewall rule; " + "couldn't locate IP address by id in the system");
|
||||||
}
|
}
|
||||||
_networkModel.checkIpForService(ipAddress, Service.Firewall, null);
|
_networkModel.checkIpForService(ipAddress, Service.Firewall, null);
|
||||||
@ -247,11 +247,8 @@ public class FirewallManagerImpl extends ManagerBase implements FirewallService,
|
|||||||
|
|
||||||
final Long accountIdFinal = accountId;
|
final Long accountIdFinal = accountId;
|
||||||
final Long domainIdFinal = domainId;
|
final Long domainIdFinal = domainId;
|
||||||
return Transaction.execute(new TransactionCallbackWithException<FirewallRuleVO, NetworkRuleConflictException>() {
|
return Transaction.execute((TransactionCallbackWithException<FirewallRuleVO, NetworkRuleConflictException>) status -> {
|
||||||
@Override
|
FirewallRuleVO newRule = new FirewallRuleVO(xId, ipAddrId, portStart, portEnd, protocol.toLowerCase(), networkId, accountIdFinal, domainIdFinal, Purpose.Firewall,
|
||||||
public FirewallRuleVO doInTransaction(TransactionStatus status) throws NetworkRuleConflictException {
|
|
||||||
FirewallRuleVO newRule =
|
|
||||||
new FirewallRuleVO(xId, ipAddrId, portStart, portEnd, protocol.toLowerCase(), networkId, accountIdFinal, domainIdFinal, Purpose.Firewall,
|
|
||||||
sourceCidrList, destCidrList, icmpCode, icmpType, relatedRuleId, trafficType);
|
sourceCidrList, destCidrList, icmpCode, icmpType, relatedRuleId, trafficType);
|
||||||
newRule.setType(type);
|
newRule.setType(type);
|
||||||
if (forDisplay != null) {
|
if (forDisplay != null) {
|
||||||
@ -269,8 +266,12 @@ public class FirewallManagerImpl extends ManagerBase implements FirewallService,
|
|||||||
CallContext.current().putContextParameter(FirewallRule.class, newRule.getId());
|
CallContext.current().putContextParameter(FirewallRule.class, newRule.getId());
|
||||||
|
|
||||||
return newRule;
|
return newRule;
|
||||||
}
|
|
||||||
});
|
});
|
||||||
|
} finally {
|
||||||
|
if (ipAddrId != null) {
|
||||||
|
_ipAddressDao.releaseFromLockTable(ipAddrId);
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
@ -676,9 +677,19 @@ public class FirewallManagerImpl extends ManagerBase implements FirewallService,
|
|||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
@DB
|
||||||
public boolean applyIngressFirewallRules(long ipId, Account caller) throws ResourceUnavailableException {
|
public boolean applyIngressFirewallRules(long ipId, Account caller) throws ResourceUnavailableException {
|
||||||
|
try {
|
||||||
|
IPAddressVO ipAddress = _ipAddressDao.acquireInLockTable(ipId);
|
||||||
|
if (ipAddress == null) {
|
||||||
|
logger.error(String.format("Unable to acquire lock for public IP [%s].", ipId));
|
||||||
|
throw new CloudRuntimeException("Unable to acquire lock for public IP.");
|
||||||
|
}
|
||||||
List<FirewallRuleVO> rules = _firewallDao.listByIpAndPurpose(ipId, Purpose.Firewall);
|
List<FirewallRuleVO> rules = _firewallDao.listByIpAndPurpose(ipId, Purpose.Firewall);
|
||||||
return applyFirewallRules(rules, false, caller);
|
return applyFirewallRules(rules, false, caller);
|
||||||
|
} finally {
|
||||||
|
_ipAddressDao.releaseFromLockTable(ipId);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
|||||||
@ -1812,13 +1812,12 @@ public class LoadBalancingRulesManagerImpl<Type> extends ManagerBase implements
|
|||||||
}
|
}
|
||||||
return cidr;
|
return cidr;
|
||||||
}
|
}
|
||||||
|
|
||||||
@DB
|
@DB
|
||||||
@Override
|
@Override
|
||||||
public LoadBalancer createPublicLoadBalancer(final String xId, final String name, final String description, final int srcPort, final int destPort,
|
public LoadBalancer createPublicLoadBalancer(final String xId, final String name, final String description, final int srcPort, final int destPort, final long sourceIpId,
|
||||||
final long sourceIpId,
|
final String protocol, final String algorithm, final boolean openFirewall, final CallContext caller, final String lbProtocol,
|
||||||
final String protocol, final String algorithm, final boolean openFirewall, final CallContext caller, final String lbProtocol, final Boolean forDisplay, String cidrList)
|
final Boolean forDisplay, String cidrList) throws NetworkRuleConflictException {
|
||||||
throws NetworkRuleConflictException {
|
|
||||||
|
|
||||||
if (!NetUtils.isValidPort(destPort)) {
|
if (!NetUtils.isValidPort(destPort)) {
|
||||||
throw new InvalidParameterValueException("privatePort is an invalid value: " + destPort);
|
throw new InvalidParameterValueException("privatePort is an invalid value: " + destPort);
|
||||||
}
|
}
|
||||||
@ -1827,7 +1826,9 @@ public class LoadBalancingRulesManagerImpl<Type> extends ManagerBase implements
|
|||||||
throw new InvalidParameterValueException("Invalid algorithm: " + algorithm);
|
throw new InvalidParameterValueException("Invalid algorithm: " + algorithm);
|
||||||
}
|
}
|
||||||
|
|
||||||
final IPAddressVO ipAddr = _ipAddressDao.findById(sourceIpId);
|
try {
|
||||||
|
final IPAddressVO ipAddr = _ipAddressDao.acquireInLockTable(sourceIpId);
|
||||||
|
|
||||||
// make sure ip address exists
|
// make sure ip address exists
|
||||||
if (ipAddr == null || !ipAddr.readyToUse()) {
|
if (ipAddr == null || !ipAddr.readyToUse()) {
|
||||||
InvalidParameterValueException ex = new InvalidParameterValueException("Unable to create load balancer rule, invalid IP address id specified");
|
InvalidParameterValueException ex = new InvalidParameterValueException("Unable to create load balancer rule, invalid IP address id specified");
|
||||||
@ -1858,24 +1859,8 @@ public class LoadBalancingRulesManagerImpl<Type> extends ManagerBase implements
|
|||||||
|
|
||||||
_firewallMgr.validateFirewallRule(caller.getCallingAccount(), ipAddr, srcPort, srcPort, protocol, Purpose.LoadBalancing, FirewallRuleType.User, networkId, null);
|
_firewallMgr.validateFirewallRule(caller.getCallingAccount(), ipAddr, srcPort, srcPort, protocol, Purpose.LoadBalancing, FirewallRuleType.User, networkId, null);
|
||||||
|
|
||||||
LoadBalancerVO newRule =
|
return Transaction.execute((TransactionCallbackWithException<LoadBalancerVO, NetworkRuleConflictException>) status -> {
|
||||||
new LoadBalancerVO(xId, name, description, sourceIpId, srcPort, destPort, algorithm, networkId, ipAddr.getAllocatedToAccountId(),
|
LoadBalancerVO newRule = new LoadBalancerVO(xId, name, description, sourceIpId, srcPort, destPort, algorithm, networkId, ipAddr.getAllocatedToAccountId(),
|
||||||
ipAddr.getAllocatedInDomainId(), lbProtocol, cidrList);
|
|
||||||
|
|
||||||
// verify rule is supported by Lb provider of the network
|
|
||||||
Ip sourceIp = getSourceIp(newRule);
|
|
||||||
LoadBalancingRule loadBalancing =
|
|
||||||
new LoadBalancingRule(newRule, new ArrayList<LbDestination>(), new ArrayList<LbStickinessPolicy>(), new ArrayList<LbHealthCheckPolicy>(), sourceIp, null,
|
|
||||||
lbProtocol);
|
|
||||||
if (!validateLbRule(loadBalancing)) {
|
|
||||||
throw new InvalidParameterValueException("LB service provider cannot support this rule");
|
|
||||||
}
|
|
||||||
|
|
||||||
return Transaction.execute(new TransactionCallbackWithException<LoadBalancerVO, NetworkRuleConflictException>() {
|
|
||||||
@Override
|
|
||||||
public LoadBalancerVO doInTransaction(TransactionStatus status) throws NetworkRuleConflictException {
|
|
||||||
LoadBalancerVO newRule =
|
|
||||||
new LoadBalancerVO(xId, name, description, sourceIpId, srcPort, destPort, algorithm, networkId, ipAddr.getAllocatedToAccountId(),
|
|
||||||
ipAddr.getAllocatedInDomainId(), lbProtocol, cidrList);
|
ipAddr.getAllocatedInDomainId(), lbProtocol, cidrList);
|
||||||
|
|
||||||
if (forDisplay != null) {
|
if (forDisplay != null) {
|
||||||
@ -1884,9 +1869,7 @@ public class LoadBalancingRulesManagerImpl<Type> extends ManagerBase implements
|
|||||||
|
|
||||||
// verify rule is supported by Lb provider of the network
|
// verify rule is supported by Lb provider of the network
|
||||||
Ip sourceIp = getSourceIp(newRule);
|
Ip sourceIp = getSourceIp(newRule);
|
||||||
LoadBalancingRule loadBalancing =
|
LoadBalancingRule loadBalancing = new LoadBalancingRule(newRule, new ArrayList<>(), new ArrayList<>(), new ArrayList<>(), sourceIp, null, lbProtocol);
|
||||||
new LoadBalancingRule(newRule, new ArrayList<LbDestination>(), new ArrayList<LbStickinessPolicy>(), new ArrayList<LbHealthCheckPolicy>(), sourceIp,
|
|
||||||
null, lbProtocol);
|
|
||||||
if (!validateLbRule(loadBalancing)) {
|
if (!validateLbRule(loadBalancing)) {
|
||||||
throw new InvalidParameterValueException("LB service provider cannot support this rule");
|
throw new InvalidParameterValueException("LB service provider cannot support this rule");
|
||||||
}
|
}
|
||||||
@ -1924,9 +1907,10 @@ public class LoadBalancingRulesManagerImpl<Type> extends ManagerBase implements
|
|||||||
removeLBRule(newRule);
|
removeLBRule(newRule);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
|
||||||
});
|
});
|
||||||
|
} finally {
|
||||||
|
_ipAddressDao.releaseFromLockTable(sourceIpId);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
|||||||
@ -205,7 +205,8 @@ public class RulesManagerImpl extends ManagerBase implements RulesManager, Rules
|
|||||||
|
|
||||||
final Long ipAddrId = rule.getSourceIpAddressId();
|
final Long ipAddrId = rule.getSourceIpAddressId();
|
||||||
|
|
||||||
IPAddressVO ipAddress = _ipAddressDao.findById(ipAddrId);
|
try {
|
||||||
|
IPAddressVO ipAddress = _ipAddressDao.acquireInLockTable(ipAddrId);
|
||||||
|
|
||||||
// Validate ip address
|
// Validate ip address
|
||||||
if (ipAddress == null) {
|
if (ipAddress == null) {
|
||||||
@ -307,9 +308,7 @@ public class RulesManagerImpl extends ManagerBase implements RulesManager, Rules
|
|||||||
|
|
||||||
final Ip dstIpFinal = dstIp;
|
final Ip dstIpFinal = dstIp;
|
||||||
final IPAddressVO ipAddressFinal = ipAddress;
|
final IPAddressVO ipAddressFinal = ipAddress;
|
||||||
return Transaction.execute(new TransactionCallbackWithException<PortForwardingRuleVO, NetworkRuleConflictException>() {
|
return Transaction.execute((TransactionCallbackWithException<PortForwardingRuleVO, NetworkRuleConflictException>) status -> {
|
||||||
@Override
|
|
||||||
public PortForwardingRuleVO doInTransaction(TransactionStatus status) throws NetworkRuleConflictException {
|
|
||||||
PortForwardingRuleVO newRule =
|
PortForwardingRuleVO newRule =
|
||||||
new PortForwardingRuleVO(rule.getXid(), rule.getSourceIpAddressId(), rule.getSourcePortStart(), rule.getSourcePortEnd(), dstIpFinal,
|
new PortForwardingRuleVO(rule.getXid(), rule.getSourceIpAddressId(), rule.getSourcePortStart(), rule.getSourcePortEnd(), dstIpFinal,
|
||||||
rule.getDestinationPortStart(), rule.getDestinationPortEnd(), rule.getProtocol().toLowerCase(), networkId, accountId, domainId, vmId);
|
rule.getDestinationPortStart(), rule.getDestinationPortEnd(), rule.getProtocol().toLowerCase(), networkId, accountId, domainId, vmId);
|
||||||
@ -318,7 +317,6 @@ public class RulesManagerImpl extends ManagerBase implements RulesManager, Rules
|
|||||||
newRule.setDisplay(forDisplay);
|
newRule.setDisplay(forDisplay);
|
||||||
}
|
}
|
||||||
newRule = _portForwardingDao.persist(newRule);
|
newRule = _portForwardingDao.persist(newRule);
|
||||||
|
|
||||||
// create firewallRule for 0.0.0.0/0 cidr
|
// create firewallRule for 0.0.0.0/0 cidr
|
||||||
if (openFirewall) {
|
if (openFirewall) {
|
||||||
_firewallMgr.createRuleForAllCidrs(ipAddrId, caller, rule.getSourcePortStart(), rule.getSourcePortEnd(), rule.getProtocol(), null, null,
|
_firewallMgr.createRuleForAllCidrs(ipAddrId, caller, rule.getSourcePortStart(), rule.getSourcePortEnd(), rule.getProtocol(), null, null,
|
||||||
@ -347,9 +345,7 @@ public class RulesManagerImpl extends ManagerBase implements RulesManager, Rules
|
|||||||
|
|
||||||
throw new CloudRuntimeException("Unable to add rule for the ip id=" + ipAddrId, e);
|
throw new CloudRuntimeException("Unable to add rule for the ip id=" + ipAddrId, e);
|
||||||
}
|
}
|
||||||
}
|
|
||||||
});
|
});
|
||||||
|
|
||||||
} finally {
|
} finally {
|
||||||
// release ip address if ipassoc was perfored
|
// release ip address if ipassoc was perfored
|
||||||
if (performedIpAssoc) {
|
if (performedIpAssoc) {
|
||||||
@ -358,6 +354,9 @@ public class RulesManagerImpl extends ManagerBase implements RulesManager, Rules
|
|||||||
_vpcMgr.unassignIPFromVpcNetwork(ip.getId(), networkId);
|
_vpcMgr.unassignIPFromVpcNetwork(ip.getId(), networkId);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
} finally {
|
||||||
|
_ipAddressDao.releaseFromLockTable(ipAddrId);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
@ -368,7 +367,8 @@ public class RulesManagerImpl extends ManagerBase implements RulesManager, Rules
|
|||||||
|
|
||||||
final Long ipAddrId = rule.getSourceIpAddressId();
|
final Long ipAddrId = rule.getSourceIpAddressId();
|
||||||
|
|
||||||
IPAddressVO ipAddress = _ipAddressDao.findById(ipAddrId);
|
try {
|
||||||
|
IPAddressVO ipAddress = _ipAddressDao.acquireInLockTable(ipAddrId);
|
||||||
|
|
||||||
// Validate ip address
|
// Validate ip address
|
||||||
if (ipAddress == null) {
|
if (ipAddress == null) {
|
||||||
@ -394,10 +394,7 @@ public class RulesManagerImpl extends ManagerBase implements RulesManager, Rules
|
|||||||
|
|
||||||
//String dstIp = _networkModel.getIpInNetwork(ipAddress.getAssociatedWithVmId(), networkId);
|
//String dstIp = _networkModel.getIpInNetwork(ipAddress.getAssociatedWithVmId(), networkId);
|
||||||
final String dstIp = ipAddress.getVmIp();
|
final String dstIp = ipAddress.getVmIp();
|
||||||
return Transaction.execute(new TransactionCallbackWithException<StaticNatRule, NetworkRuleConflictException>() {
|
return Transaction.execute((TransactionCallbackWithException<StaticNatRule, NetworkRuleConflictException>) status -> {
|
||||||
@Override
|
|
||||||
public StaticNatRule doInTransaction(TransactionStatus status) throws NetworkRuleConflictException {
|
|
||||||
|
|
||||||
FirewallRuleVO newRule =
|
FirewallRuleVO newRule =
|
||||||
new FirewallRuleVO(rule.getXid(), rule.getSourceIpAddressId(), rule.getSourcePortStart(), rule.getSourcePortEnd(), rule.getProtocol().toLowerCase(),
|
new FirewallRuleVO(rule.getXid(), rule.getSourceIpAddressId(), rule.getSourcePortStart(), rule.getSourcePortEnd(), rule.getProtocol().toLowerCase(),
|
||||||
networkId, accountId, domainId, rule.getPurpose(), null, null, null, null, null);
|
networkId, accountId, domainId, rule.getPurpose(), null, null, null, null, null);
|
||||||
@ -419,9 +416,7 @@ public class RulesManagerImpl extends ManagerBase implements RulesManager, Rules
|
|||||||
UsageEventUtils.publishUsageEvent(EventTypes.EVENT_NET_RULE_ADD, newRule.getAccountId(), 0, newRule.getId(), null, FirewallRule.class.getName(),
|
UsageEventUtils.publishUsageEvent(EventTypes.EVENT_NET_RULE_ADD, newRule.getAccountId(), 0, newRule.getId(), null, FirewallRule.class.getName(),
|
||||||
newRule.getUuid());
|
newRule.getUuid());
|
||||||
|
|
||||||
StaticNatRule staticNatRule = new StaticNatRuleImpl(newRule, dstIp);
|
return new StaticNatRuleImpl(newRule, dstIp);
|
||||||
|
|
||||||
return staticNatRule;
|
|
||||||
} catch (Exception e) {
|
} catch (Exception e) {
|
||||||
if (newRule != null) {
|
if (newRule != null) {
|
||||||
// no need to apply the rule as it wasn't programmed on the backend yet
|
// no need to apply the rule as it wasn't programmed on the backend yet
|
||||||
@ -434,9 +429,10 @@ public class RulesManagerImpl extends ManagerBase implements RulesManager, Rules
|
|||||||
}
|
}
|
||||||
throw new CloudRuntimeException("Unable to add static nat rule for the ip id=" + newRule.getSourceIpAddressId(), e);
|
throw new CloudRuntimeException("Unable to add static nat rule for the ip id=" + newRule.getSourceIpAddressId(), e);
|
||||||
}
|
}
|
||||||
}
|
|
||||||
});
|
});
|
||||||
|
} finally {
|
||||||
|
_ipAddressDao.releaseFromLockTable(ipAddrId);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
|||||||
@ -153,6 +153,7 @@ public class RemoteAccessVpnManagerImpl extends ManagerBase implements RemoteAcc
|
|||||||
return vpns;
|
return vpns;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
@DB
|
@DB
|
||||||
public RemoteAccessVpn createRemoteAccessVpn(final long publicIpId, String ipRange, boolean openFirewall, final Boolean forDisplay) throws NetworkRuleConflictException {
|
public RemoteAccessVpn createRemoteAccessVpn(final long publicIpId, String ipRange, boolean openFirewall, final Boolean forDisplay) throws NetworkRuleConflictException {
|
||||||
@ -170,7 +171,13 @@ public class RemoteAccessVpnManagerImpl extends ManagerBase implements RemoteAcc
|
|||||||
throw new InvalidParameterValueException("The Ip address is not ready to be used yet: " + ipAddr.getAddress());
|
throw new InvalidParameterValueException("The Ip address is not ready to be used yet: " + ipAddr.getAddress());
|
||||||
}
|
}
|
||||||
|
|
||||||
IPAddressVO ipAddress = _ipAddressDao.findById(publicIpId);
|
try {
|
||||||
|
IPAddressVO ipAddress = _ipAddressDao.acquireInLockTable(publicIpId);
|
||||||
|
|
||||||
|
if (ipAddress == null) {
|
||||||
|
logger.error(String.format("Unable to acquire lock on public IP %s.", publicIpId));
|
||||||
|
throw new CloudRuntimeException("Unable to acquire lock on public IP.");
|
||||||
|
}
|
||||||
|
|
||||||
Long networkId = ipAddress.getAssociatedWithNetworkId();
|
Long networkId = ipAddress.getAssociatedWithNetworkId();
|
||||||
if (networkId != null) {
|
if (networkId != null) {
|
||||||
@ -239,23 +246,22 @@ public class RemoteAccessVpnManagerImpl extends ManagerBase implements RemoteAcc
|
|||||||
final String newIpRange = NetUtils.long2Ip(++startIp) + "-" + range[1];
|
final String newIpRange = NetUtils.long2Ip(++startIp) + "-" + range[1];
|
||||||
final String sharedSecret = PasswordGenerator.generatePresharedKey(_pskLength);
|
final String sharedSecret = PasswordGenerator.generatePresharedKey(_pskLength);
|
||||||
|
|
||||||
return Transaction.execute(new TransactionCallbackWithException<RemoteAccessVpn, NetworkRuleConflictException>() {
|
return Transaction.execute((TransactionCallbackWithException<RemoteAccessVpn, NetworkRuleConflictException>) status -> {
|
||||||
@Override
|
|
||||||
public RemoteAccessVpn doInTransaction(TransactionStatus status) throws NetworkRuleConflictException {
|
|
||||||
if (vpcId == null) {
|
if (vpcId == null) {
|
||||||
_rulesMgr.reservePorts(ipAddr, NetUtils.UDP_PROTO, Purpose.Vpn, openFirewallFinal, caller, NetUtils.VPN_PORT, NetUtils.VPN_L2TP_PORT,
|
_rulesMgr.reservePorts(ipAddr, NetUtils.UDP_PROTO, Purpose.Vpn, openFirewallFinal, caller, NetUtils.VPN_PORT, NetUtils.VPN_L2TP_PORT,
|
||||||
NetUtils.VPN_NATT_PORT);
|
NetUtils.VPN_NATT_PORT);
|
||||||
}
|
}
|
||||||
RemoteAccessVpnVO vpnVO =
|
RemoteAccessVpnVO remoteAccessVpnVO = new RemoteAccessVpnVO(ipAddr.getAccountId(), ipAddr.getDomainId(), ipAddr.getAssociatedWithNetworkId(),
|
||||||
new RemoteAccessVpnVO(ipAddr.getAccountId(), ipAddr.getDomainId(), ipAddr.getAssociatedWithNetworkId(), publicIpId, vpcId, range[0], newIpRange,
|
publicIpId, vpcId, range[0], newIpRange, sharedSecret);
|
||||||
sharedSecret);
|
|
||||||
|
|
||||||
if (forDisplay != null) {
|
if (forDisplay != null) {
|
||||||
vpnVO.setDisplay(forDisplay);
|
remoteAccessVpnVO.setDisplay(forDisplay);
|
||||||
}
|
|
||||||
return _remoteAccessVpnDao.persist(vpnVO);
|
|
||||||
}
|
}
|
||||||
|
return _remoteAccessVpnDao.persist(remoteAccessVpnVO);
|
||||||
});
|
});
|
||||||
|
} finally {
|
||||||
|
_ipAddressDao.releaseFromLockTable(publicIpId);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
private void validateRemoteAccessVpnConfiguration() throws ConfigurationException {
|
private void validateRemoteAccessVpnConfiguration() throws ConfigurationException {
|
||||||
|
|||||||
@ -3409,6 +3409,26 @@ public class ResourceManagerImpl extends ManagerBase implements ResourceManager,
|
|||||||
return _hostGpuGroupsDao.customSearch(sc, searchFilter);
|
return _hostGpuGroupsDao.customSearch(sc, searchFilter);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public List<HostVO> listAllHostsInOneZoneNotInClusterByHypervisor(final HypervisorType type, final long dcId, final long clusterId) {
|
||||||
|
final QueryBuilder<HostVO> sc = QueryBuilder.create(HostVO.class);
|
||||||
|
sc.and(sc.entity().getHypervisorType(), Op.EQ, type);
|
||||||
|
sc.and(sc.entity().getDataCenterId(), Op.EQ, dcId);
|
||||||
|
sc.and(sc.entity().getClusterId(), Op.NEQ, clusterId);
|
||||||
|
sc.and(sc.entity().getStatus(), Op.EQ, Status.Up);
|
||||||
|
return sc.list();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public List<HostVO> listAllHostsInOneZoneNotInClusterByHypervisors(List<HypervisorType> types, final long dcId, final long clusterId) {
|
||||||
|
final QueryBuilder<HostVO> sc = QueryBuilder.create(HostVO.class);
|
||||||
|
sc.and(sc.entity().getHypervisorType(), Op.IN, types);
|
||||||
|
sc.and(sc.entity().getDataCenterId(), Op.EQ, dcId);
|
||||||
|
sc.and(sc.entity().getClusterId(), Op.NEQ, clusterId);
|
||||||
|
sc.and(sc.entity().getStatus(), Op.EQ, Status.Up);
|
||||||
|
return sc.list();
|
||||||
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public boolean isGPUDeviceAvailable(final long hostId, final String groupName, final String vgpuType) {
|
public boolean isGPUDeviceAvailable(final long hostId, final String groupName, final String vgpuType) {
|
||||||
if(!listAvailableGPUDevice(hostId, groupName, vgpuType).isEmpty()) {
|
if(!listAvailableGPUDevice(hostId, groupName, vgpuType).isEmpty()) {
|
||||||
|
|||||||
@ -211,6 +211,7 @@ import org.apache.cloudstack.api.command.admin.storage.AddImageStoreCmd;
|
|||||||
import org.apache.cloudstack.api.command.admin.storage.AddImageStoreS3CMD;
|
import org.apache.cloudstack.api.command.admin.storage.AddImageStoreS3CMD;
|
||||||
import org.apache.cloudstack.api.command.admin.storage.AddObjectStoragePoolCmd;
|
import org.apache.cloudstack.api.command.admin.storage.AddObjectStoragePoolCmd;
|
||||||
import org.apache.cloudstack.api.command.admin.storage.CancelPrimaryStorageMaintenanceCmd;
|
import org.apache.cloudstack.api.command.admin.storage.CancelPrimaryStorageMaintenanceCmd;
|
||||||
|
import org.apache.cloudstack.api.command.admin.storage.ChangeStoragePoolScopeCmd;
|
||||||
import org.apache.cloudstack.api.command.admin.storage.CreateSecondaryStagingStoreCmd;
|
import org.apache.cloudstack.api.command.admin.storage.CreateSecondaryStagingStoreCmd;
|
||||||
import org.apache.cloudstack.api.command.admin.storage.CreateStoragePoolCmd;
|
import org.apache.cloudstack.api.command.admin.storage.CreateStoragePoolCmd;
|
||||||
import org.apache.cloudstack.api.command.admin.storage.DeleteImageStoreCmd;
|
import org.apache.cloudstack.api.command.admin.storage.DeleteImageStoreCmd;
|
||||||
@ -523,6 +524,7 @@ import org.apache.cloudstack.api.command.user.vm.AddIpToVmNicCmd;
|
|||||||
import org.apache.cloudstack.api.command.user.vm.AddNicToVMCmd;
|
import org.apache.cloudstack.api.command.user.vm.AddNicToVMCmd;
|
||||||
import org.apache.cloudstack.api.command.user.vm.DeployVMCmd;
|
import org.apache.cloudstack.api.command.user.vm.DeployVMCmd;
|
||||||
import org.apache.cloudstack.api.command.user.vm.DestroyVMCmd;
|
import org.apache.cloudstack.api.command.user.vm.DestroyVMCmd;
|
||||||
|
import org.apache.cloudstack.api.command.admin.vm.ListAffectedVmsForStorageScopeChangeCmd;
|
||||||
import org.apache.cloudstack.api.command.user.vm.GetVMPasswordCmd;
|
import org.apache.cloudstack.api.command.user.vm.GetVMPasswordCmd;
|
||||||
import org.apache.cloudstack.api.command.user.vm.ListNicsCmd;
|
import org.apache.cloudstack.api.command.user.vm.ListNicsCmd;
|
||||||
import org.apache.cloudstack.api.command.user.vm.ListVMsCmd;
|
import org.apache.cloudstack.api.command.user.vm.ListVMsCmd;
|
||||||
@ -3566,6 +3568,7 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe
|
|||||||
cmdList.add(UpgradeRouterCmd.class);
|
cmdList.add(UpgradeRouterCmd.class);
|
||||||
cmdList.add(AddSwiftCmd.class);
|
cmdList.add(AddSwiftCmd.class);
|
||||||
cmdList.add(CancelPrimaryStorageMaintenanceCmd.class);
|
cmdList.add(CancelPrimaryStorageMaintenanceCmd.class);
|
||||||
|
cmdList.add(ChangeStoragePoolScopeCmd.class);
|
||||||
cmdList.add(CreateStoragePoolCmd.class);
|
cmdList.add(CreateStoragePoolCmd.class);
|
||||||
cmdList.add(DeletePoolCmd.class);
|
cmdList.add(DeletePoolCmd.class);
|
||||||
cmdList.add(ListSwiftsCmd.class);
|
cmdList.add(ListSwiftsCmd.class);
|
||||||
@ -4003,6 +4006,7 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe
|
|||||||
cmdList.add(CreateSecondaryStorageSelectorCmd.class);
|
cmdList.add(CreateSecondaryStorageSelectorCmd.class);
|
||||||
cmdList.add(UpdateSecondaryStorageSelectorCmd.class);
|
cmdList.add(UpdateSecondaryStorageSelectorCmd.class);
|
||||||
cmdList.add(RemoveSecondaryStorageSelectorCmd.class);
|
cmdList.add(RemoveSecondaryStorageSelectorCmd.class);
|
||||||
|
cmdList.add(ListAffectedVmsForStorageScopeChangeCmd.class);
|
||||||
|
|
||||||
|
|
||||||
// Out-of-band management APIs for admins
|
// Out-of-band management APIs for admins
|
||||||
|
|||||||
@ -54,6 +54,7 @@ import org.apache.cloudstack.annotation.AnnotationService;
|
|||||||
import org.apache.cloudstack.annotation.dao.AnnotationDao;
|
import org.apache.cloudstack.annotation.dao.AnnotationDao;
|
||||||
import org.apache.cloudstack.api.ApiConstants;
|
import org.apache.cloudstack.api.ApiConstants;
|
||||||
import org.apache.cloudstack.api.command.admin.storage.CancelPrimaryStorageMaintenanceCmd;
|
import org.apache.cloudstack.api.command.admin.storage.CancelPrimaryStorageMaintenanceCmd;
|
||||||
|
import org.apache.cloudstack.api.command.admin.storage.ChangeStoragePoolScopeCmd;
|
||||||
import org.apache.cloudstack.api.command.admin.storage.CreateSecondaryStagingStoreCmd;
|
import org.apache.cloudstack.api.command.admin.storage.CreateSecondaryStagingStoreCmd;
|
||||||
import org.apache.cloudstack.api.command.admin.storage.CreateStoragePoolCmd;
|
import org.apache.cloudstack.api.command.admin.storage.CreateStoragePoolCmd;
|
||||||
import org.apache.cloudstack.api.command.admin.storage.DeleteImageStoreCmd;
|
import org.apache.cloudstack.api.command.admin.storage.DeleteImageStoreCmd;
|
||||||
@ -256,6 +257,7 @@ import com.cloud.utils.exception.CloudRuntimeException;
|
|||||||
import com.cloud.vm.DiskProfile;
|
import com.cloud.vm.DiskProfile;
|
||||||
import com.cloud.vm.UserVmManager;
|
import com.cloud.vm.UserVmManager;
|
||||||
import com.cloud.vm.VMInstanceVO;
|
import com.cloud.vm.VMInstanceVO;
|
||||||
|
import com.cloud.vm.VirtualMachine;
|
||||||
import com.cloud.vm.VirtualMachine.State;
|
import com.cloud.vm.VirtualMachine.State;
|
||||||
import com.cloud.vm.dao.VMInstanceDao;
|
import com.cloud.vm.dao.VMInstanceDao;
|
||||||
import com.google.common.collect.Sets;
|
import com.google.common.collect.Sets;
|
||||||
@ -410,6 +412,9 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C
|
|||||||
|
|
||||||
private final Map<String, HypervisorHostListener> hostListeners = new HashMap<>();
|
private final Map<String, HypervisorHostListener> hostListeners = new HashMap<>();
|
||||||
|
|
||||||
|
private final Set<HypervisorType> zoneWidePoolSupportedHypervisorTypes = Sets.newHashSet(HypervisorType.KVM, HypervisorType.VMware,
|
||||||
|
HypervisorType.Hyperv, HypervisorType.LXC, HypervisorType.Any, HypervisorType.Simulator);
|
||||||
|
|
||||||
private static final String NFS_MOUNT_OPTIONS_INCORRECT = "An incorrect mount option was specified";
|
private static final String NFS_MOUNT_OPTIONS_INCORRECT = "An incorrect mount option was specified";
|
||||||
|
|
||||||
public boolean share(VMInstanceVO vm, List<VolumeVO> vols, HostVO host, boolean cancelPreviousShare) throws StorageUnavailableException {
|
public boolean share(VMInstanceVO vm, List<VolumeVO> vols, HostVO host, boolean cancelPreviousShare) throws StorageUnavailableException {
|
||||||
@ -569,6 +574,31 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C
|
|||||||
return storeDriver instanceof PrimaryDataStoreDriver && ((PrimaryDataStoreDriver)storeDriver).canProvideStorageStats();
|
return storeDriver instanceof PrimaryDataStoreDriver && ((PrimaryDataStoreDriver)storeDriver).canProvideStorageStats();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean poolProvidesCustomStorageStats(StoragePool pool) {
|
||||||
|
DataStoreProvider storeProvider = _dataStoreProviderMgr.getDataStoreProvider(pool.getStorageProviderName());
|
||||||
|
DataStoreDriver storeDriver = storeProvider.getDataStoreDriver();
|
||||||
|
return storeDriver instanceof PrimaryDataStoreDriver && ((PrimaryDataStoreDriver)storeDriver).poolProvidesCustomStorageStats();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public Map<String, String> getCustomStorageStats(StoragePool pool) {
|
||||||
|
if (pool == null) {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!pool.isManaged()) {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
DataStoreProvider storeProvider = _dataStoreProviderMgr.getDataStoreProvider(pool.getStorageProviderName());
|
||||||
|
DataStoreDriver storeDriver = storeProvider.getDataStoreDriver();
|
||||||
|
if (storeDriver instanceof PrimaryDataStoreDriver) {
|
||||||
|
return ((PrimaryDataStoreDriver)storeDriver).getCustomStorageStats(pool);
|
||||||
|
}
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public Answer getVolumeStats(StoragePool pool, Command cmd) {
|
public Answer getVolumeStats(StoragePool pool, Command cmd) {
|
||||||
DataStoreProvider storeProvider = _dataStoreProviderMgr.getDataStoreProvider(pool.getStorageProviderName());
|
DataStoreProvider storeProvider = _dataStoreProviderMgr.getDataStoreProvider(pool.getStorageProviderName());
|
||||||
@ -938,9 +968,7 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C
|
|||||||
throw new InvalidParameterValueException("Missing parameter hypervisor. Hypervisor type is required to create zone wide primary storage.");
|
throw new InvalidParameterValueException("Missing parameter hypervisor. Hypervisor type is required to create zone wide primary storage.");
|
||||||
}
|
}
|
||||||
|
|
||||||
Set<HypervisorType> supportedHypervisorTypes = Sets.newHashSet(HypervisorType.KVM, HypervisorType.VMware,
|
if (!zoneWidePoolSupportedHypervisorTypes.contains(hypervisorType)) {
|
||||||
HypervisorType.Hyperv, HypervisorType.LXC, HypervisorType.Any, HypervisorType.Simulator);
|
|
||||||
if (!supportedHypervisorTypes.contains(hypervisorType)) {
|
|
||||||
throw new InvalidParameterValueException("Zone wide storage pool is not supported for hypervisor type " + hypervisor);
|
throw new InvalidParameterValueException("Zone wide storage pool is not supported for hypervisor type " + hypervisor);
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
@ -1220,6 +1248,115 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C
|
|||||||
return (PrimaryDataStoreInfo)_dataStoreMgr.getDataStore(pool.getId(), DataStoreRole.Primary);
|
return (PrimaryDataStoreInfo)_dataStoreMgr.getDataStore(pool.getId(), DataStoreRole.Primary);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
private void changeStoragePoolScopeToZone(StoragePoolVO primaryStorage) {
|
||||||
|
/*
|
||||||
|
* For cluster wide primary storage the hypervisor type might not be set.
|
||||||
|
* So, get it from the clusterVO.
|
||||||
|
*/
|
||||||
|
Long clusterId = primaryStorage.getClusterId();
|
||||||
|
ClusterVO clusterVO = _clusterDao.findById(clusterId);
|
||||||
|
HypervisorType hypervisorType = clusterVO.getHypervisorType();
|
||||||
|
if (!zoneWidePoolSupportedHypervisorTypes.contains(hypervisorType)) {
|
||||||
|
throw new InvalidParameterValueException("Primary storage scope change to Zone is not supported for hypervisor type " + hypervisorType);
|
||||||
|
}
|
||||||
|
|
||||||
|
DataStoreProvider storeProvider = _dataStoreProviderMgr.getDataStoreProvider(primaryStorage.getStorageProviderName());
|
||||||
|
PrimaryDataStoreLifeCycle lifeCycle = (PrimaryDataStoreLifeCycle) storeProvider.getDataStoreLifeCycle();
|
||||||
|
|
||||||
|
DataStore primaryStore = _dataStoreMgr.getPrimaryDataStore(primaryStorage.getId());
|
||||||
|
ClusterScope clusterScope = new ClusterScope(primaryStorage.getClusterId(), null, primaryStorage.getDataCenterId());
|
||||||
|
|
||||||
|
lifeCycle.changeStoragePoolScopeToZone(primaryStore, clusterScope, hypervisorType);
|
||||||
|
}
|
||||||
|
|
||||||
|
private void changeStoragePoolScopeToCluster(StoragePoolVO primaryStorage, Long clusterId) {
|
||||||
|
if (clusterId == null) {
|
||||||
|
throw new InvalidParameterValueException("Cluster ID not provided");
|
||||||
|
}
|
||||||
|
ClusterVO clusterVO = _clusterDao.findById(clusterId);
|
||||||
|
if (clusterVO == null) {
|
||||||
|
throw new InvalidParameterValueException("Unable to find cluster by id " + clusterId);
|
||||||
|
}
|
||||||
|
if (clusterVO.getAllocationState().equals(Grouping.AllocationState.Disabled)) {
|
||||||
|
throw new PermissionDeniedException("Cannot perform this operation, Cluster is currently disabled: " + clusterId);
|
||||||
|
}
|
||||||
|
|
||||||
|
List<VirtualMachine.State> states = Arrays.asList(State.Starting, State.Running, State.Stopping, State.Migrating, State.Restoring);
|
||||||
|
|
||||||
|
Long id = primaryStorage.getId();
|
||||||
|
Pair<List<VMInstanceVO>, Integer> vmsNotInClusterUsingPool = _vmInstanceDao.listByVmsNotInClusterUsingPool(clusterId, id);
|
||||||
|
if (vmsNotInClusterUsingPool.second() != 0) {
|
||||||
|
throw new CloudRuntimeException(String.format("Cannot change scope of the storage pool [%s] to cluster [%s] " +
|
||||||
|
"as there are %s VMs with volumes in this pool that are running on other clusters. " +
|
||||||
|
"All such User VMs must be stopped and System VMs must be destroyed before proceeding. " +
|
||||||
|
"Please use the API listAffectedVmsForStorageScopeChange to get the list.",
|
||||||
|
primaryStorage.getName(), clusterVO.getName(), vmsNotInClusterUsingPool.second()));
|
||||||
|
}
|
||||||
|
|
||||||
|
DataStoreProvider storeProvider = _dataStoreProviderMgr.getDataStoreProvider(primaryStorage.getStorageProviderName());
|
||||||
|
PrimaryDataStoreLifeCycle lifeCycle = (PrimaryDataStoreLifeCycle) storeProvider.getDataStoreLifeCycle();
|
||||||
|
|
||||||
|
DataStore primaryStore = _dataStoreMgr.getPrimaryDataStore(id);
|
||||||
|
ClusterScope clusterScope = new ClusterScope(clusterId, clusterVO.getPodId(), primaryStorage.getDataCenterId());
|
||||||
|
|
||||||
|
lifeCycle.changeStoragePoolScopeToCluster(primaryStore, clusterScope, primaryStorage.getHypervisor());
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
@ActionEvent(eventType = EventTypes.EVENT_CHANGE_STORAGE_POOL_SCOPE, eventDescription = "changing storage pool scope")
|
||||||
|
public void changeStoragePoolScope(ChangeStoragePoolScopeCmd cmd) throws IllegalArgumentException, InvalidParameterValueException, PermissionDeniedException {
|
||||||
|
Long id = cmd.getId();
|
||||||
|
|
||||||
|
Long accountId = cmd.getEntityOwnerId();
|
||||||
|
if (!_accountMgr.isRootAdmin(accountId)) {
|
||||||
|
throw new PermissionDeniedException("Only root admin can perform this operation");
|
||||||
|
}
|
||||||
|
|
||||||
|
ScopeType newScope = EnumUtils.getEnumIgnoreCase(ScopeType.class, cmd.getScope());
|
||||||
|
if (newScope != ScopeType.ZONE && newScope != ScopeType.CLUSTER) {
|
||||||
|
throw new InvalidParameterValueException("Invalid scope " + cmd.getScope() + "for Primary storage");
|
||||||
|
}
|
||||||
|
|
||||||
|
StoragePoolVO primaryStorage = _storagePoolDao.findById(id);
|
||||||
|
if (primaryStorage == null) {
|
||||||
|
throw new IllegalArgumentException("Unable to find storage pool with ID: " + id);
|
||||||
|
}
|
||||||
|
|
||||||
|
String eventDetails = String.format(" Storage pool Id: %s to %s",primaryStorage.getUuid(), newScope);
|
||||||
|
CallContext.current().setEventDetails(eventDetails);
|
||||||
|
|
||||||
|
ScopeType currentScope = primaryStorage.getScope();
|
||||||
|
if (currentScope.equals(newScope)) {
|
||||||
|
throw new InvalidParameterValueException("New scope must be different than the current scope");
|
||||||
|
}
|
||||||
|
|
||||||
|
if (currentScope != ScopeType.ZONE && currentScope != ScopeType.CLUSTER) {
|
||||||
|
throw new InvalidParameterValueException("This operation is supported only for Primary storages having scope "
|
||||||
|
+ ScopeType.CLUSTER + " or " + ScopeType.ZONE);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!primaryStorage.getStatus().equals(StoragePoolStatus.Disabled)) {
|
||||||
|
throw new InvalidParameterValueException("Scope of the Primary storage with id "
|
||||||
|
+ primaryStorage.getUuid() +
|
||||||
|
" cannot be changed, as it is not in the Disabled state");
|
||||||
|
}
|
||||||
|
|
||||||
|
Long zoneId = primaryStorage.getDataCenterId();
|
||||||
|
DataCenterVO zone = _dcDao.findById(zoneId);
|
||||||
|
if (zone == null) {
|
||||||
|
throw new InvalidParameterValueException("Unable to find zone by id " + zoneId);
|
||||||
|
}
|
||||||
|
if (zone.getAllocationState().equals(Grouping.AllocationState.Disabled)) {
|
||||||
|
throw new PermissionDeniedException("Cannot perform this operation, Zone is currently disabled: " + zoneId);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (newScope.equals(ScopeType.ZONE)) {
|
||||||
|
changeStoragePoolScopeToZone(primaryStorage);
|
||||||
|
} else {
|
||||||
|
changeStoragePoolScopeToCluster(primaryStorage, cmd.getClusterId());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void removeStoragePoolFromCluster(long hostId, String iScsiName, StoragePool storagePool) {
|
public void removeStoragePoolFromCluster(long hostId, String iScsiName, StoragePool storagePool) {
|
||||||
final Map<String, String> details = new HashMap<>();
|
final Map<String, String> details = new HashMap<>();
|
||||||
@ -2652,6 +2789,21 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C
|
|||||||
return (storeDriver instanceof PrimaryDataStoreDriver && ((PrimaryDataStoreDriver)storeDriver).canHostAccessStoragePool(host, pool));
|
return (storeDriver instanceof PrimaryDataStoreDriver && ((PrimaryDataStoreDriver)storeDriver).canHostAccessStoragePool(host, pool));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean canHostPrepareStoragePoolAccess(Host host, StoragePool pool) {
|
||||||
|
if (host == null || pool == null) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!pool.isManaged()) {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
DataStoreProvider storeProvider = _dataStoreProviderMgr.getDataStoreProvider(pool.getStorageProviderName());
|
||||||
|
DataStoreDriver storeDriver = storeProvider.getDataStoreDriver();
|
||||||
|
return storeDriver instanceof PrimaryDataStoreDriver && ((PrimaryDataStoreDriver)storeDriver).canHostPrepareStoragePoolAccess(host, pool);
|
||||||
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
@DB
|
@DB
|
||||||
public Host getHost(long hostId) {
|
public Host getHost(long hostId) {
|
||||||
@ -3863,6 +4015,7 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C
|
|||||||
STORAGE_POOL_DISK_WAIT,
|
STORAGE_POOL_DISK_WAIT,
|
||||||
STORAGE_POOL_CLIENT_TIMEOUT,
|
STORAGE_POOL_CLIENT_TIMEOUT,
|
||||||
STORAGE_POOL_CLIENT_MAX_CONNECTIONS,
|
STORAGE_POOL_CLIENT_MAX_CONNECTIONS,
|
||||||
|
STORAGE_POOL_CONNECTED_CLIENTS_LIMIT,
|
||||||
STORAGE_POOL_IO_POLICY,
|
STORAGE_POOL_IO_POLICY,
|
||||||
PRIMARY_STORAGE_DOWNLOAD_WAIT,
|
PRIMARY_STORAGE_DOWNLOAD_WAIT,
|
||||||
SecStorageMaxMigrateSessions,
|
SecStorageMaxMigrateSessions,
|
||||||
|
|||||||
@ -18,19 +18,26 @@
|
|||||||
package com.cloud.api.query;
|
package com.cloud.api.query;
|
||||||
|
|
||||||
import com.cloud.api.query.dao.TemplateJoinDao;
|
import com.cloud.api.query.dao.TemplateJoinDao;
|
||||||
|
import com.cloud.api.query.dao.UserVmJoinDao;
|
||||||
import com.cloud.api.query.vo.EventJoinVO;
|
import com.cloud.api.query.vo.EventJoinVO;
|
||||||
import com.cloud.api.query.vo.TemplateJoinVO;
|
import com.cloud.api.query.vo.TemplateJoinVO;
|
||||||
|
import com.cloud.api.query.vo.UserVmJoinVO;
|
||||||
|
import com.cloud.dc.ClusterVO;
|
||||||
|
import com.cloud.dc.dao.ClusterDao;
|
||||||
import com.cloud.event.EventVO;
|
import com.cloud.event.EventVO;
|
||||||
import com.cloud.event.dao.EventDao;
|
import com.cloud.event.dao.EventDao;
|
||||||
import com.cloud.event.dao.EventJoinDao;
|
import com.cloud.event.dao.EventJoinDao;
|
||||||
import com.cloud.exception.InvalidParameterValueException;
|
import com.cloud.exception.InvalidParameterValueException;
|
||||||
import com.cloud.exception.PermissionDeniedException;
|
import com.cloud.exception.PermissionDeniedException;
|
||||||
|
import com.cloud.host.HostVO;
|
||||||
|
import com.cloud.host.dao.HostDao;
|
||||||
import com.cloud.network.Network;
|
import com.cloud.network.Network;
|
||||||
import com.cloud.network.VNF;
|
import com.cloud.network.VNF;
|
||||||
import com.cloud.network.dao.NetworkVO;
|
import com.cloud.network.dao.NetworkVO;
|
||||||
import com.cloud.server.ResourceTag;
|
import com.cloud.server.ResourceTag;
|
||||||
import com.cloud.storage.BucketVO;
|
import com.cloud.storage.BucketVO;
|
||||||
import com.cloud.storage.VMTemplateVO;
|
import com.cloud.storage.VMTemplateVO;
|
||||||
|
import com.cloud.storage.ScopeType;
|
||||||
import com.cloud.storage.dao.BucketDao;
|
import com.cloud.storage.dao.BucketDao;
|
||||||
import com.cloud.storage.dao.VMTemplateDao;
|
import com.cloud.storage.dao.VMTemplateDao;
|
||||||
import com.cloud.user.Account;
|
import com.cloud.user.Account;
|
||||||
@ -43,10 +50,14 @@ import com.cloud.utils.db.EntityManager;
|
|||||||
import com.cloud.utils.db.Filter;
|
import com.cloud.utils.db.Filter;
|
||||||
import com.cloud.utils.db.SearchBuilder;
|
import com.cloud.utils.db.SearchBuilder;
|
||||||
import com.cloud.utils.db.SearchCriteria;
|
import com.cloud.utils.db.SearchCriteria;
|
||||||
|
import com.cloud.vm.VMInstanceVO;
|
||||||
import com.cloud.vm.VirtualMachine;
|
import com.cloud.vm.VirtualMachine;
|
||||||
|
import com.cloud.vm.dao.VMInstanceDao;
|
||||||
|
|
||||||
import org.apache.cloudstack.acl.SecurityChecker;
|
import org.apache.cloudstack.acl.SecurityChecker;
|
||||||
import org.apache.cloudstack.api.ApiCommandResourceType;
|
import org.apache.cloudstack.api.ApiCommandResourceType;
|
||||||
import org.apache.cloudstack.api.command.admin.storage.ListObjectStoragePoolsCmd;
|
import org.apache.cloudstack.api.command.admin.storage.ListObjectStoragePoolsCmd;
|
||||||
|
import org.apache.cloudstack.api.command.admin.vm.ListAffectedVmsForStorageScopeChangeCmd;
|
||||||
import org.apache.cloudstack.api.command.user.bucket.ListBucketsCmd;
|
import org.apache.cloudstack.api.command.user.bucket.ListBucketsCmd;
|
||||||
import org.apache.cloudstack.api.command.user.event.ListEventsCmd;
|
import org.apache.cloudstack.api.command.user.event.ListEventsCmd;
|
||||||
import org.apache.cloudstack.api.command.user.resource.ListDetailOptionsCmd;
|
import org.apache.cloudstack.api.command.user.resource.ListDetailOptionsCmd;
|
||||||
@ -54,10 +65,13 @@ import org.apache.cloudstack.api.response.DetailOptionsResponse;
|
|||||||
import org.apache.cloudstack.api.response.EventResponse;
|
import org.apache.cloudstack.api.response.EventResponse;
|
||||||
import org.apache.cloudstack.api.response.ListResponse;
|
import org.apache.cloudstack.api.response.ListResponse;
|
||||||
import org.apache.cloudstack.api.response.ObjectStoreResponse;
|
import org.apache.cloudstack.api.response.ObjectStoreResponse;
|
||||||
|
import org.apache.cloudstack.api.response.VirtualMachineResponse;
|
||||||
import org.apache.cloudstack.context.CallContext;
|
import org.apache.cloudstack.context.CallContext;
|
||||||
import org.apache.cloudstack.storage.datastore.db.ObjectStoreDao;
|
import org.apache.cloudstack.storage.datastore.db.ObjectStoreDao;
|
||||||
import org.apache.cloudstack.storage.datastore.db.ObjectStoreVO;
|
import org.apache.cloudstack.storage.datastore.db.ObjectStoreVO;
|
||||||
import org.apache.commons.collections.CollectionUtils;
|
import org.apache.commons.collections.CollectionUtils;
|
||||||
|
import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao;
|
||||||
|
import org.apache.cloudstack.storage.datastore.db.StoragePoolVO;
|
||||||
import org.junit.Assert;
|
import org.junit.Assert;
|
||||||
import org.junit.Before;
|
import org.junit.Before;
|
||||||
import org.junit.Test;
|
import org.junit.Test;
|
||||||
@ -68,6 +82,7 @@ import org.mockito.MockedStatic;
|
|||||||
import org.mockito.Mockito;
|
import org.mockito.Mockito;
|
||||||
import org.mockito.Spy;
|
import org.mockito.Spy;
|
||||||
import org.mockito.junit.MockitoJUnitRunner;
|
import org.mockito.junit.MockitoJUnitRunner;
|
||||||
|
import org.springframework.test.util.ReflectionTestUtils;
|
||||||
|
|
||||||
import java.util.ArrayList;
|
import java.util.ArrayList;
|
||||||
import java.util.Arrays;
|
import java.util.Arrays;
|
||||||
@ -115,11 +130,26 @@ public class QueryManagerImplTest {
|
|||||||
@Mock
|
@Mock
|
||||||
ObjectStoreDao objectStoreDao;
|
ObjectStoreDao objectStoreDao;
|
||||||
|
|
||||||
|
@Mock
|
||||||
|
VMInstanceDao vmInstanceDao;
|
||||||
|
|
||||||
|
@Mock
|
||||||
|
PrimaryDataStoreDao storagePoolDao;
|
||||||
|
|
||||||
|
@Mock
|
||||||
|
HostDao hostDao;
|
||||||
|
|
||||||
|
@Mock
|
||||||
|
ClusterDao clusterDao;
|
||||||
|
|
||||||
@Mock
|
@Mock
|
||||||
BucketDao bucketDao;
|
BucketDao bucketDao;
|
||||||
@Mock
|
@Mock
|
||||||
VMTemplateDao templateDao;
|
VMTemplateDao templateDao;
|
||||||
|
|
||||||
|
@Mock
|
||||||
|
UserVmJoinDao userVmJoinDao;
|
||||||
|
|
||||||
private AccountVO account;
|
private AccountVO account;
|
||||||
private UserVO user;
|
private UserVO user;
|
||||||
|
|
||||||
@ -406,4 +436,45 @@ public class QueryManagerImplTest {
|
|||||||
result = queryManager.getHostTagsFromTemplateForServiceOfferingsListing(account, templateId);
|
result = queryManager.getHostTagsFromTemplateForServiceOfferingsListing(account, templateId);
|
||||||
Assert.assertTrue(CollectionUtils.isNotEmpty(result));
|
Assert.assertTrue(CollectionUtils.isNotEmpty(result));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public void testListAffectedVmsForScopeChange() {
|
||||||
|
Long clusterId = 1L;
|
||||||
|
Long poolId = 2L;
|
||||||
|
Long hostId = 3L;
|
||||||
|
Long vmId = 4L;
|
||||||
|
String vmName = "VM1";
|
||||||
|
|
||||||
|
ListAffectedVmsForStorageScopeChangeCmd cmd = new ListAffectedVmsForStorageScopeChangeCmd();
|
||||||
|
ReflectionTestUtils.setField(cmd, "clusterIdForScopeChange", clusterId);
|
||||||
|
ReflectionTestUtils.setField(cmd, "storageId", poolId);
|
||||||
|
|
||||||
|
StoragePoolVO pool = Mockito.mock(StoragePoolVO.class);
|
||||||
|
Mockito.when(pool.getScope()).thenReturn(ScopeType.CLUSTER);
|
||||||
|
Mockito.when(storagePoolDao.findById(poolId)).thenReturn(pool);
|
||||||
|
ListResponse<VirtualMachineResponse> response = queryManager.listAffectedVmsForStorageScopeChange(cmd);
|
||||||
|
Assert.assertEquals(response.getResponses().size(), 0);
|
||||||
|
|
||||||
|
VMInstanceVO instance = Mockito.mock(VMInstanceVO.class);
|
||||||
|
UserVmJoinVO userVM = Mockito.mock(UserVmJoinVO.class);
|
||||||
|
String instanceUuid = String.valueOf(UUID.randomUUID());
|
||||||
|
Pair<List<VMInstanceVO>, Integer> vms = new Pair<>(List.of(instance), 1);
|
||||||
|
HostVO host = Mockito.mock(HostVO.class);
|
||||||
|
ClusterVO cluster = Mockito.mock(ClusterVO.class);
|
||||||
|
|
||||||
|
Mockito.when(pool.getScope()).thenReturn(ScopeType.ZONE);
|
||||||
|
Mockito.when(instance.getUuid()).thenReturn(instanceUuid);
|
||||||
|
Mockito.when(instance.getType()).thenReturn(VirtualMachine.Type.Instance);
|
||||||
|
Mockito.when(instance.getHostId()).thenReturn(hostId);
|
||||||
|
Mockito.when(instance.getId()).thenReturn(vmId);
|
||||||
|
Mockito.when(userVM.getDisplayName()).thenReturn(vmName);
|
||||||
|
Mockito.when(vmInstanceDao.listByVmsNotInClusterUsingPool(clusterId, poolId)).thenReturn(vms);
|
||||||
|
Mockito.when(userVmJoinDao.findById(vmId)).thenReturn(userVM);
|
||||||
|
Mockito.when(hostDao.findById(hostId)).thenReturn(host);
|
||||||
|
Mockito.when(host.getClusterId()).thenReturn(clusterId);
|
||||||
|
Mockito.when(clusterDao.findById(clusterId)).thenReturn(cluster);
|
||||||
|
|
||||||
|
response = queryManager.listAffectedVmsForStorageScopeChange(cmd);
|
||||||
|
Assert.assertEquals(response.getResponses().get(0).getId(), instanceUuid);
|
||||||
|
Assert.assertEquals(response.getResponses().get(0).getName(), vmName);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@ -431,6 +431,17 @@ public class MockResourceManagerImpl extends ManagerBase implements ResourceMana
|
|||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public List<HostVO> listAllHostsInOneZoneNotInClusterByHypervisor(HypervisorType type, long dcId, long clusterId) {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public List<HostVO> listAllHostsInOneZoneNotInClusterByHypervisors(List<HypervisorType> types, long dcId, long clusterId) {
|
||||||
|
// TODO Auto-generated method stub
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
/* (non-Javadoc)
|
/* (non-Javadoc)
|
||||||
* @see com.cloud.resource.ResourceManager#listAvailHypervisorInZone(java.lang.Long, java.lang.Long)
|
* @see com.cloud.resource.ResourceManager#listAvailHypervisorInZone(java.lang.Long, java.lang.Long)
|
||||||
*/
|
*/
|
||||||
|
|||||||
@ -21,17 +21,22 @@ import java.util.Arrays;
|
|||||||
import java.util.HashMap;
|
import java.util.HashMap;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
|
import org.apache.cloudstack.api.command.admin.storage.ChangeStoragePoolScopeCmd;
|
||||||
import com.cloud.agent.api.StoragePoolInfo;
|
import com.cloud.agent.api.StoragePoolInfo;
|
||||||
|
import com.cloud.dc.ClusterVO;
|
||||||
|
import com.cloud.dc.DataCenter;
|
||||||
import com.cloud.dc.DataCenterVO;
|
import com.cloud.dc.DataCenterVO;
|
||||||
|
import com.cloud.dc.dao.ClusterDao;
|
||||||
import com.cloud.dc.dao.DataCenterDao;
|
import com.cloud.dc.dao.DataCenterDao;
|
||||||
import com.cloud.exception.ConnectionException;
|
import com.cloud.exception.ConnectionException;
|
||||||
import com.cloud.exception.InvalidParameterValueException;
|
import com.cloud.exception.InvalidParameterValueException;
|
||||||
import com.cloud.exception.PermissionDeniedException;
|
import com.cloud.exception.PermissionDeniedException;
|
||||||
import com.cloud.host.Host;
|
import com.cloud.host.Host;
|
||||||
import com.cloud.hypervisor.Hypervisor;
|
import com.cloud.hypervisor.Hypervisor.HypervisorType;
|
||||||
import com.cloud.storage.dao.VolumeDao;
|
import com.cloud.storage.dao.VolumeDao;
|
||||||
import com.cloud.user.AccountManager;
|
import com.cloud.user.AccountManagerImpl;
|
||||||
import com.cloud.utils.Pair;
|
import com.cloud.utils.Pair;
|
||||||
|
import com.cloud.utils.exception.CloudRuntimeException;
|
||||||
import com.cloud.vm.VMInstanceVO;
|
import com.cloud.vm.VMInstanceVO;
|
||||||
import com.cloud.vm.dao.VMInstanceDao;
|
import com.cloud.vm.dao.VMInstanceDao;
|
||||||
|
|
||||||
@ -54,6 +59,7 @@ import org.mockito.Mockito;
|
|||||||
import org.mockito.Spy;
|
import org.mockito.Spy;
|
||||||
import org.mockito.junit.MockitoJUnitRunner;
|
import org.mockito.junit.MockitoJUnitRunner;
|
||||||
import org.mockito.stubbing.Answer;
|
import org.mockito.stubbing.Answer;
|
||||||
|
import org.springframework.test.util.ReflectionTestUtils;
|
||||||
|
|
||||||
import com.cloud.agent.AgentManager;
|
import com.cloud.agent.AgentManager;
|
||||||
import com.cloud.agent.api.Command;
|
import com.cloud.agent.api.Command;
|
||||||
@ -93,10 +99,13 @@ public class StorageManagerImplTest {
|
|||||||
@Mock
|
@Mock
|
||||||
DataCenterDao dataCenterDao;
|
DataCenterDao dataCenterDao;
|
||||||
@Mock
|
@Mock
|
||||||
AccountManager accountManager;
|
AccountManagerImpl accountMgr;
|
||||||
@Mock
|
@Mock
|
||||||
StoragePoolDetailsDao storagePoolDetailsDao;
|
StoragePoolDetailsDao storagePoolDetailsDao;
|
||||||
|
|
||||||
|
@Mock
|
||||||
|
ClusterDao clusterDao;
|
||||||
|
|
||||||
@Spy
|
@Spy
|
||||||
@InjectMocks
|
@InjectMocks
|
||||||
private StorageManagerImpl storageManagerImpl;
|
private StorageManagerImpl storageManagerImpl;
|
||||||
@ -506,11 +515,74 @@ public class StorageManagerImplTest {
|
|||||||
.update(StorageManager.DataStoreDownloadFollowRedirects.key(),StorageManager.DataStoreDownloadFollowRedirects.defaultValue());
|
.update(StorageManager.DataStoreDownloadFollowRedirects.key(),StorageManager.DataStoreDownloadFollowRedirects.defaultValue());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
private ChangeStoragePoolScopeCmd mockChangeStoragePooolScopeCmd(String newScope) {
|
||||||
|
ChangeStoragePoolScopeCmd cmd = new ChangeStoragePoolScopeCmd();
|
||||||
|
ReflectionTestUtils.setField(cmd, "id", 1L);
|
||||||
|
ReflectionTestUtils.setField(cmd, "clusterId", 1L);
|
||||||
|
ReflectionTestUtils.setField(cmd, "scope", newScope);
|
||||||
|
return cmd;
|
||||||
|
}
|
||||||
|
|
||||||
|
private StoragePoolVO mockStoragePoolVOForChangeStoragePoolScope(ScopeType currentScope, StoragePoolStatus status) {
|
||||||
|
StoragePoolVO primaryStorage = new StoragePoolVO();
|
||||||
|
primaryStorage.setId(1L);
|
||||||
|
primaryStorage.setDataCenterId(1L);
|
||||||
|
primaryStorage.setClusterId(1L);
|
||||||
|
primaryStorage.setStatus(StoragePoolStatus.Disabled);
|
||||||
|
primaryStorage.setScope(currentScope);
|
||||||
|
primaryStorage.setStatus(status);
|
||||||
|
return primaryStorage;
|
||||||
|
}
|
||||||
|
|
||||||
|
private void prepareTestChangeStoragePoolScope(ScopeType currentScope, StoragePoolStatus status) {
|
||||||
|
final DataCenterVO zone = new DataCenterVO(1L, null, null, null, null, null, null, null, null, null, DataCenter.NetworkType.Advanced, null, null);
|
||||||
|
StoragePoolVO primaryStorage = mockStoragePoolVOForChangeStoragePoolScope(currentScope, status);
|
||||||
|
|
||||||
|
Mockito.when(accountMgr.isRootAdmin(Mockito.any())).thenReturn(true);
|
||||||
|
Mockito.when(dataCenterDao.findById(1L)).thenReturn(zone);
|
||||||
|
Mockito.when(storagePoolDao.findById(1L)).thenReturn(primaryStorage);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test(expected = InvalidParameterValueException.class)
|
||||||
|
public void testChangeStoragePoolScopeNotDisabledException() {
|
||||||
|
prepareTestChangeStoragePoolScope(ScopeType.CLUSTER, StoragePoolStatus.Initialized);
|
||||||
|
|
||||||
|
ChangeStoragePoolScopeCmd cmd = mockChangeStoragePooolScopeCmd("ZONE");
|
||||||
|
storageManagerImpl.changeStoragePoolScope(cmd);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test(expected = InvalidParameterValueException.class)
|
||||||
|
public void testChangeStoragePoolScopeToZoneHypervisorNotSupported() {
|
||||||
|
prepareTestChangeStoragePoolScope(ScopeType.CLUSTER, StoragePoolStatus.Disabled);
|
||||||
|
|
||||||
|
final ClusterVO cluster = new ClusterVO();
|
||||||
|
cluster.setHypervisorType(String.valueOf(HypervisorType.XenServer));
|
||||||
|
Mockito.when(clusterDao.findById(1L)).thenReturn(cluster);
|
||||||
|
|
||||||
|
ChangeStoragePoolScopeCmd cmd = mockChangeStoragePooolScopeCmd("ZONE");
|
||||||
|
storageManagerImpl.changeStoragePoolScope(cmd);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test(expected = CloudRuntimeException.class)
|
||||||
|
public void testChangeStoragePoolScopeToClusterVolumesPresentException() {
|
||||||
|
prepareTestChangeStoragePoolScope(ScopeType.ZONE, StoragePoolStatus.Disabled);
|
||||||
|
|
||||||
|
final ClusterVO cluster = new ClusterVO();
|
||||||
|
Mockito.when(clusterDao.findById(1L)).thenReturn(cluster);
|
||||||
|
|
||||||
|
VMInstanceVO instance = Mockito.mock(VMInstanceVO.class);
|
||||||
|
Pair<List<VMInstanceVO>, Integer> vms = new Pair<>(List.of(instance), 1);
|
||||||
|
Mockito.when(vmInstanceDao.listByVmsNotInClusterUsingPool(1L, 1L)).thenReturn(vms);
|
||||||
|
|
||||||
|
ChangeStoragePoolScopeCmd cmd = mockChangeStoragePooolScopeCmd("CLUSTER");
|
||||||
|
storageManagerImpl.changeStoragePoolScope(cmd);
|
||||||
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void testCheckNFSMountOptionsForCreateNoNFSMountOptions() {
|
public void testCheckNFSMountOptionsForCreateNoNFSMountOptions() {
|
||||||
Map<String, String> details = new HashMap<>();
|
Map<String, String> details = new HashMap<>();
|
||||||
try {
|
try {
|
||||||
storageManagerImpl.checkNFSMountOptionsForCreate(details, Hypervisor.HypervisorType.XenServer, "");
|
storageManagerImpl.checkNFSMountOptionsForCreate(details, HypervisorType.XenServer, "");
|
||||||
} catch (Exception e) {
|
} catch (Exception e) {
|
||||||
Assert.fail();
|
Assert.fail();
|
||||||
}
|
}
|
||||||
@ -521,8 +593,8 @@ public class StorageManagerImplTest {
|
|||||||
Map<String, String> details = new HashMap<>();
|
Map<String, String> details = new HashMap<>();
|
||||||
details.put(ApiConstants.NFS_MOUNT_OPTIONS, "vers=4.1");
|
details.put(ApiConstants.NFS_MOUNT_OPTIONS, "vers=4.1");
|
||||||
InvalidParameterValueException exception = Assert.assertThrows(InvalidParameterValueException.class,
|
InvalidParameterValueException exception = Assert.assertThrows(InvalidParameterValueException.class,
|
||||||
() -> storageManagerImpl.checkNFSMountOptionsForCreate(details, Hypervisor.HypervisorType.XenServer, ""));
|
() -> storageManagerImpl.checkNFSMountOptionsForCreate(details, HypervisorType.XenServer, ""));
|
||||||
Assert.assertEquals(exception.getMessage(), "NFS options can not be set for the hypervisor type " + Hypervisor.HypervisorType.XenServer);
|
Assert.assertEquals(exception.getMessage(), "NFS options can not be set for the hypervisor type " + HypervisorType.XenServer);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
@ -530,7 +602,7 @@ public class StorageManagerImplTest {
|
|||||||
Map<String, String> details = new HashMap<>();
|
Map<String, String> details = new HashMap<>();
|
||||||
details.put(ApiConstants.NFS_MOUNT_OPTIONS, "vers=4.1");
|
details.put(ApiConstants.NFS_MOUNT_OPTIONS, "vers=4.1");
|
||||||
InvalidParameterValueException exception = Assert.assertThrows(InvalidParameterValueException.class,
|
InvalidParameterValueException exception = Assert.assertThrows(InvalidParameterValueException.class,
|
||||||
() -> storageManagerImpl.checkNFSMountOptionsForCreate(details, Hypervisor.HypervisorType.KVM, ""));
|
() -> storageManagerImpl.checkNFSMountOptionsForCreate(details, HypervisorType.KVM, ""));
|
||||||
Assert.assertEquals(exception.getMessage(), "NFS options can only be set on pool type " + Storage.StoragePoolType.NetworkFilesystem);
|
Assert.assertEquals(exception.getMessage(), "NFS options can only be set on pool type " + Storage.StoragePoolType.NetworkFilesystem);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -552,7 +624,7 @@ public class StorageManagerImplTest {
|
|||||||
StoragePoolVO pool = new StoragePoolVO();
|
StoragePoolVO pool = new StoragePoolVO();
|
||||||
Long accountId = 1L;
|
Long accountId = 1L;
|
||||||
details.put(ApiConstants.NFS_MOUNT_OPTIONS, "vers=4.1");
|
details.put(ApiConstants.NFS_MOUNT_OPTIONS, "vers=4.1");
|
||||||
Mockito.when(accountManager.isRootAdmin(accountId)).thenReturn(false);
|
Mockito.when(accountMgr.isRootAdmin(accountId)).thenReturn(false);
|
||||||
PermissionDeniedException exception = Assert.assertThrows(PermissionDeniedException.class,
|
PermissionDeniedException exception = Assert.assertThrows(PermissionDeniedException.class,
|
||||||
() -> storageManagerImpl.checkNFSMountOptionsForUpdate(details, pool, accountId));
|
() -> storageManagerImpl.checkNFSMountOptionsForUpdate(details, pool, accountId));
|
||||||
Assert.assertEquals(exception.getMessage(), "Only root admin can modify nfs options");
|
Assert.assertEquals(exception.getMessage(), "Only root admin can modify nfs options");
|
||||||
@ -564,11 +636,11 @@ public class StorageManagerImplTest {
|
|||||||
StoragePoolVO pool = new StoragePoolVO();
|
StoragePoolVO pool = new StoragePoolVO();
|
||||||
Long accountId = 1L;
|
Long accountId = 1L;
|
||||||
details.put(ApiConstants.NFS_MOUNT_OPTIONS, "vers=4.1");
|
details.put(ApiConstants.NFS_MOUNT_OPTIONS, "vers=4.1");
|
||||||
Mockito.when(accountManager.isRootAdmin(accountId)).thenReturn(true);
|
Mockito.when(accountMgr.isRootAdmin(accountId)).thenReturn(true);
|
||||||
pool.setHypervisor(Hypervisor.HypervisorType.XenServer);
|
pool.setHypervisor(HypervisorType.XenServer);
|
||||||
InvalidParameterValueException exception = Assert.assertThrows(InvalidParameterValueException.class,
|
InvalidParameterValueException exception = Assert.assertThrows(InvalidParameterValueException.class,
|
||||||
() -> storageManagerImpl.checkNFSMountOptionsForUpdate(details, pool, accountId));
|
() -> storageManagerImpl.checkNFSMountOptionsForUpdate(details, pool, accountId));
|
||||||
Assert.assertEquals(exception.getMessage(), "NFS options can only be set for the hypervisor type " + Hypervisor.HypervisorType.KVM);
|
Assert.assertEquals(exception.getMessage(), "NFS options can only be set for the hypervisor type " + HypervisorType.KVM);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
@ -577,8 +649,8 @@ public class StorageManagerImplTest {
|
|||||||
StoragePoolVO pool = new StoragePoolVO();
|
StoragePoolVO pool = new StoragePoolVO();
|
||||||
Long accountId = 1L;
|
Long accountId = 1L;
|
||||||
details.put(ApiConstants.NFS_MOUNT_OPTIONS, "vers=4.1");
|
details.put(ApiConstants.NFS_MOUNT_OPTIONS, "vers=4.1");
|
||||||
Mockito.when(accountManager.isRootAdmin(accountId)).thenReturn(true);
|
Mockito.when(accountMgr.isRootAdmin(accountId)).thenReturn(true);
|
||||||
pool.setHypervisor(Hypervisor.HypervisorType.KVM);
|
pool.setHypervisor(HypervisorType.KVM);
|
||||||
pool.setPoolType(Storage.StoragePoolType.FiberChannel);
|
pool.setPoolType(Storage.StoragePoolType.FiberChannel);
|
||||||
InvalidParameterValueException exception = Assert.assertThrows(InvalidParameterValueException.class,
|
InvalidParameterValueException exception = Assert.assertThrows(InvalidParameterValueException.class,
|
||||||
() -> storageManagerImpl.checkNFSMountOptionsForUpdate(details, pool, accountId));
|
() -> storageManagerImpl.checkNFSMountOptionsForUpdate(details, pool, accountId));
|
||||||
@ -591,8 +663,8 @@ public class StorageManagerImplTest {
|
|||||||
StoragePoolVO pool = new StoragePoolVO();
|
StoragePoolVO pool = new StoragePoolVO();
|
||||||
Long accountId = 1L;
|
Long accountId = 1L;
|
||||||
details.put(ApiConstants.NFS_MOUNT_OPTIONS, "vers=4.1");
|
details.put(ApiConstants.NFS_MOUNT_OPTIONS, "vers=4.1");
|
||||||
Mockito.when(accountManager.isRootAdmin(accountId)).thenReturn(true);
|
Mockito.when(accountMgr.isRootAdmin(accountId)).thenReturn(true);
|
||||||
pool.setHypervisor(Hypervisor.HypervisorType.KVM);
|
pool.setHypervisor(HypervisorType.KVM);
|
||||||
pool.setPoolType(Storage.StoragePoolType.NetworkFilesystem);
|
pool.setPoolType(Storage.StoragePoolType.NetworkFilesystem);
|
||||||
pool.setStatus(StoragePoolStatus.Up);
|
pool.setStatus(StoragePoolStatus.Up);
|
||||||
InvalidParameterValueException exception = Assert.assertThrows(InvalidParameterValueException.class,
|
InvalidParameterValueException exception = Assert.assertThrows(InvalidParameterValueException.class,
|
||||||
@ -605,7 +677,7 @@ public class StorageManagerImplTest {
|
|||||||
String nfsMountOpts = "vers=4.1, nconnect=4,vers=4.2";
|
String nfsMountOpts = "vers=4.1, nconnect=4,vers=4.2";
|
||||||
Map<String, String> details = new HashMap<>();
|
Map<String, String> details = new HashMap<>();
|
||||||
details.put(ApiConstants.NFS_MOUNT_OPTIONS, nfsMountOpts);
|
details.put(ApiConstants.NFS_MOUNT_OPTIONS, nfsMountOpts);
|
||||||
storageManagerImpl.checkNFSMountOptionsForCreate(details, Hypervisor.HypervisorType.KVM, "nfs");
|
storageManagerImpl.checkNFSMountOptionsForCreate(details, HypervisorType.KVM, "nfs");
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test(expected = InvalidParameterValueException.class)
|
@Test(expected = InvalidParameterValueException.class)
|
||||||
@ -614,11 +686,11 @@ public class StorageManagerImplTest {
|
|||||||
Map<String, String> details = new HashMap<>();
|
Map<String, String> details = new HashMap<>();
|
||||||
details.put(ApiConstants.NFS_MOUNT_OPTIONS, nfsMountOpts);
|
details.put(ApiConstants.NFS_MOUNT_OPTIONS, nfsMountOpts);
|
||||||
StoragePoolVO pool = new StoragePoolVO();
|
StoragePoolVO pool = new StoragePoolVO();
|
||||||
pool.setHypervisor(Hypervisor.HypervisorType.KVM);
|
pool.setHypervisor(HypervisorType.KVM);
|
||||||
pool.setPoolType(Storage.StoragePoolType.NetworkFilesystem);
|
pool.setPoolType(Storage.StoragePoolType.NetworkFilesystem);
|
||||||
pool.setStatus(StoragePoolStatus.Maintenance);
|
pool.setStatus(StoragePoolStatus.Maintenance);
|
||||||
Long accountId = 1L;
|
Long accountId = 1L;
|
||||||
Mockito.when(accountManager.isRootAdmin(accountId)).thenReturn(true);
|
Mockito.when(accountMgr.isRootAdmin(accountId)).thenReturn(true);
|
||||||
storageManagerImpl.checkNFSMountOptionsForUpdate(details, pool, accountId);
|
storageManagerImpl.checkNFSMountOptionsForUpdate(details, pool, accountId);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
176
test/integration/smoke/test_primary_storage_scope.py
Normal file
176
test/integration/smoke/test_primary_storage_scope.py
Normal file
@ -0,0 +1,176 @@
|
|||||||
|
# Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
# or more contributor license agreements. See the NOTICE file
|
||||||
|
# distributed with this work for additional information
|
||||||
|
# regarding copyright ownership. The ASF licenses this file
|
||||||
|
# to you under the Apache License, Version 2.0 (the
|
||||||
|
# "License"); you may not use this file except in compliance
|
||||||
|
# with the License. You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing,
|
||||||
|
# software distributed under the License is distributed on an
|
||||||
|
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||||
|
# KIND, either express or implied. See the License for the
|
||||||
|
# specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
""" BVT tests for Primary Storage
|
||||||
|
"""
|
||||||
|
|
||||||
|
# Import System modules
|
||||||
|
# Import Local Modules
|
||||||
|
from marvin.cloudstackTestCase import *
|
||||||
|
from marvin.lib.base import (Host, StoragePool, Cluster, updateStoragePool, changeStoragePoolScope)
|
||||||
|
from marvin.lib.common import (get_zone, get_pod, list_clusters)
|
||||||
|
from marvin.lib.utils import cleanup_resources
|
||||||
|
from nose.plugins.attrib import attr
|
||||||
|
|
||||||
|
class TestPrimaryStorageScope(cloudstackTestCase):
|
||||||
|
|
||||||
|
def setUp(self):
|
||||||
|
|
||||||
|
self.apiclient = self.testClient.getApiClient()
|
||||||
|
self.dbclient = self.testClient.getDbConnection()
|
||||||
|
self.services = self.testClient.getParsedTestDataConfig()
|
||||||
|
self._cleanup = []
|
||||||
|
self.zone = get_zone(self.apiclient, self.testClient.getZoneForTests())
|
||||||
|
self.pod = get_pod(self.apiclient, self.zone.id)
|
||||||
|
self.debug("here")
|
||||||
|
self.debug(self.services)
|
||||||
|
self.cluster1 = list_clusters(self.apiclient)[0]
|
||||||
|
self.debug("here1")
|
||||||
|
self.debug(self.cluster1)
|
||||||
|
self.cluster = {
|
||||||
|
'clustername': 'C0_testScope',
|
||||||
|
'clustertype': 'CloudManaged'
|
||||||
|
}
|
||||||
|
self.cluster2 = Cluster.create(self.apiclient,
|
||||||
|
self.cluster,
|
||||||
|
zoneid=self.zone.id,
|
||||||
|
podid=self.pod.id,
|
||||||
|
hypervisor=self.cluster1.hypervisortype
|
||||||
|
)
|
||||||
|
self._cleanup.append(self.cluster2)
|
||||||
|
self.storage = StoragePool.create(self.apiclient,
|
||||||
|
self.services["nfs"],
|
||||||
|
scope = 'ZONE',
|
||||||
|
zoneid=self.zone.id,
|
||||||
|
hypervisor=self.cluster1.hypervisortype
|
||||||
|
)
|
||||||
|
self._cleanup.append(self.storage)
|
||||||
|
self.debug("Created storage pool %s in zone scope", self.storage.id)
|
||||||
|
return
|
||||||
|
|
||||||
|
def tearDown(self):
|
||||||
|
try:
|
||||||
|
cleanup_resources(self.apiclient, self._cleanup)
|
||||||
|
except Exception as e:
|
||||||
|
raise Exception("Warning: Exception during cleanup : %s" % e)
|
||||||
|
return
|
||||||
|
|
||||||
|
@attr(tags=["advanced", "advancedns", "smoke", "basic", "sg"], required_hardware="true")
|
||||||
|
def test_01_primary_storage_scope_change(self):
|
||||||
|
"""Test primary storage pool scope change
|
||||||
|
"""
|
||||||
|
|
||||||
|
# Disable storage pool
|
||||||
|
cmd = updateStoragePool.updateStoragePoolCmd()
|
||||||
|
cmd.id = self.storage.id
|
||||||
|
cmd.enabled = False
|
||||||
|
self.apiclient.updateStoragePool(cmd)
|
||||||
|
|
||||||
|
self.debug("Disabled storage pool : %s" % self.storage.id)
|
||||||
|
|
||||||
|
# Change storage pool scope to Cluster2
|
||||||
|
cmd = changeStoragePoolScope.changeStoragePoolScopeCmd()
|
||||||
|
cmd.id = self.storage.id
|
||||||
|
cmd.scope = "CLUSTER"
|
||||||
|
cmd.clusterid = self.cluster2.id
|
||||||
|
self.apiclient.changeStoragePoolScope(cmd)
|
||||||
|
|
||||||
|
self.debug("Changed scope of storage pool %s to cluster" % self.storage.id)
|
||||||
|
|
||||||
|
pool_id = self.dbclient.execute("select id from storage_pool where uuid=\"" + self.storage.id + "\"")[0][0]
|
||||||
|
host1 = Host.list(self.apiclient, clusterid=self.cluster1.id, listall=True)[0]
|
||||||
|
host1_id = self.dbclient.execute("select id from host where uuid=\"" + host1.id + "\"")[0][0]
|
||||||
|
|
||||||
|
pool_row = self.dbclient.execute("select cluster_id, pod_id, scope from storage_pool where id=" + str(pool_id))[0]
|
||||||
|
capacity_row = self.dbclient.execute("select cluster_id, pod_id from op_host_capacity where capacity_type=3 and host_id=" + str(pool_id))[0]
|
||||||
|
pool_host_rows = self.dbclient.execute("select id from storage_pool_host_ref where host_id=" + str(host1_id) + " and pool_id=" + str(pool_id))
|
||||||
|
|
||||||
|
self.assertIsNotNone(
|
||||||
|
pool_row[0],
|
||||||
|
"Cluster id should not be NULL for cluster scope"
|
||||||
|
)
|
||||||
|
self.assertIsNotNone(
|
||||||
|
pool_row[1],
|
||||||
|
"Pod id should not be NULL for cluster scope"
|
||||||
|
)
|
||||||
|
self.assertEqual(
|
||||||
|
pool_row[2],
|
||||||
|
"CLUSTER",
|
||||||
|
"Storage pool scope not changed to Cluster"
|
||||||
|
)
|
||||||
|
self.assertIsNotNone(
|
||||||
|
capacity_row[0],
|
||||||
|
"Cluster id should not be NULL in the op_host_capacity table"
|
||||||
|
)
|
||||||
|
self.assertIsNotNone(
|
||||||
|
capacity_row[1],
|
||||||
|
"Pod id set should not be NULL in the op_host_capacity table"
|
||||||
|
)
|
||||||
|
self.assertEqual(
|
||||||
|
len(pool_host_rows),
|
||||||
|
0,
|
||||||
|
"Storage pool not removed from the storage_pool_host_ref table for host on another cluster"
|
||||||
|
)
|
||||||
|
|
||||||
|
# Change storage pool scope to Zone
|
||||||
|
cmd = changeStoragePoolScope.changeStoragePoolScopeCmd()
|
||||||
|
cmd.id = self.storage.id
|
||||||
|
cmd.scope = "ZONE"
|
||||||
|
self.apiclient.changeStoragePoolScope(cmd)
|
||||||
|
|
||||||
|
self.debug("Changed scope of storage pool %s to zone" % self.storage.id)
|
||||||
|
|
||||||
|
pool_row = self.dbclient.execute("select cluster_id, pod_id, scope from storage_pool where id=" + str(pool_id))[0]
|
||||||
|
capacity_row = self.dbclient.execute("select cluster_id, pod_id from op_host_capacity where capacity_type=3 and host_id=" + str(pool_id))[0]
|
||||||
|
pool_host_rows = self.dbclient.execute("select id from storage_pool_host_ref where host_id=" + str(host1_id) + " and pool_id=" + str(pool_id))
|
||||||
|
|
||||||
|
self.assertIsNone(
|
||||||
|
pool_row[0],
|
||||||
|
"Cluster id not set to NULL for zone scope"
|
||||||
|
)
|
||||||
|
self.assertIsNone(
|
||||||
|
pool_row[1],
|
||||||
|
"Pod id not set to NULL for zone scope"
|
||||||
|
)
|
||||||
|
self.assertEqual(
|
||||||
|
pool_row[2],
|
||||||
|
"ZONE",
|
||||||
|
"Storage pool scope not changed to ZONE"
|
||||||
|
)
|
||||||
|
self.assertIsNone(
|
||||||
|
capacity_row[0],
|
||||||
|
"Cluster id not set to NULL in the op_host_capacity table"
|
||||||
|
)
|
||||||
|
self.assertIsNone(
|
||||||
|
capacity_row[1],
|
||||||
|
"Pod id not set to NULL in the op_host_capacity table"
|
||||||
|
)
|
||||||
|
self.assertEqual(
|
||||||
|
len(pool_host_rows),
|
||||||
|
1,
|
||||||
|
"Storage pool not added to the storage_pool_host_ref table for host on another cluster"
|
||||||
|
)
|
||||||
|
|
||||||
|
# Enable storage pool
|
||||||
|
cmd = updateStoragePool.updateStoragePoolCmd()
|
||||||
|
cmd.id = self.storage.id
|
||||||
|
cmd.enabled = True
|
||||||
|
response = self.apiclient.updateStoragePool(cmd)
|
||||||
|
self.assertEqual(
|
||||||
|
response.state,
|
||||||
|
"Up",
|
||||||
|
"Storage pool couldn't be enabled"
|
||||||
|
)
|
||||||
@ -145,6 +145,7 @@ known_categories = {
|
|||||||
'StorageMaintenance': 'Storage Pool',
|
'StorageMaintenance': 'Storage Pool',
|
||||||
'StoragePool': 'Storage Pool',
|
'StoragePool': 'Storage Pool',
|
||||||
'StorageProvider': 'Storage Pool',
|
'StorageProvider': 'Storage Pool',
|
||||||
|
'StorageScope' : 'Storage Pool',
|
||||||
'updateStorageCapabilities' : 'Storage Pool',
|
'updateStorageCapabilities' : 'Storage Pool',
|
||||||
'SecurityGroup': 'Security Group',
|
'SecurityGroup': 'Security Group',
|
||||||
'SSH': 'SSH',
|
'SSH': 'SSH',
|
||||||
|
|||||||
@ -62,6 +62,7 @@
|
|||||||
"label.action.change.password": "Change password",
|
"label.action.change.password": "Change password",
|
||||||
"label.action.clear.webhook.deliveries": "Clear deliveries",
|
"label.action.clear.webhook.deliveries": "Clear deliveries",
|
||||||
"label.action.delete.webhook.deliveries": "Delete deliveries",
|
"label.action.delete.webhook.deliveries": "Delete deliveries",
|
||||||
|
"label.action.change.primary.storage.scope": "Change primary storage scope",
|
||||||
"label.action.configure.stickiness": "Stickiness",
|
"label.action.configure.stickiness": "Stickiness",
|
||||||
"label.action.copy.iso": "Copy ISO",
|
"label.action.copy.iso": "Copy ISO",
|
||||||
"label.action.copy.snapshot": "Copy Snapshot",
|
"label.action.copy.snapshot": "Copy Snapshot",
|
||||||
@ -2546,6 +2547,8 @@
|
|||||||
"message.action.manage.cluster": "Please confirm that you want to manage the cluster.",
|
"message.action.manage.cluster": "Please confirm that you want to manage the cluster.",
|
||||||
"message.action.patch.router": "Please confirm that you want to live patch the router. <br> This operation is equivalent updating the router packages and restarting the Network without cleanup.",
|
"message.action.patch.router": "Please confirm that you want to live patch the router. <br> This operation is equivalent updating the router packages and restarting the Network without cleanup.",
|
||||||
"message.action.patch.systemvm": "Please confirm that you want to patch the System VM.",
|
"message.action.patch.systemvm": "Please confirm that you want to patch the System VM.",
|
||||||
|
"message.action.primary.storage.scope.cluster": "Please confirm that you want to change the scope from zone to the specified cluster.<br>This operation will update the database and disconnect the storage pool from all hosts that were previously connected to the primary storage and are not part of the specified cluster.",
|
||||||
|
"message.action.primary.storage.scope.zone": "Please confirm that you want to change the scope from cluster to zone.<br>This operation will update the database and connect the storage pool to all hosts of the zone running the same hypervisor as set on the storage pool.",
|
||||||
"message.action.primarystorage.enable.maintenance.mode": "Warning: placing the primary storage into maintenance mode will cause all Instances using volumes from it to be stopped. Do you want to continue?",
|
"message.action.primarystorage.enable.maintenance.mode": "Warning: placing the primary storage into maintenance mode will cause all Instances using volumes from it to be stopped. Do you want to continue?",
|
||||||
"message.action.reboot.instance": "Please confirm that you want to reboot this Instance.",
|
"message.action.reboot.instance": "Please confirm that you want to reboot this Instance.",
|
||||||
"message.action.reboot.router": "All services provided by this virtual router will be interrupted. Please confirm that you want to reboot this router.",
|
"message.action.reboot.router": "All services provided by this virtual router will be interrupted. Please confirm that you want to reboot this router.",
|
||||||
@ -2665,6 +2668,8 @@
|
|||||||
"message.change.offering.for.volume.failed": "Change offering for the volume failed",
|
"message.change.offering.for.volume.failed": "Change offering for the volume failed",
|
||||||
"message.change.offering.for.volume.processing": "Changing offering for the volume...",
|
"message.change.offering.for.volume.processing": "Changing offering for the volume...",
|
||||||
"message.change.password": "Please change your password.",
|
"message.change.password": "Please change your password.",
|
||||||
|
"message.change.scope.failed": "Scope change failed",
|
||||||
|
"message.change.scope.processing": "Scope change in progress",
|
||||||
"message.cluster.dedicated": "Cluster Dedicated",
|
"message.cluster.dedicated": "Cluster Dedicated",
|
||||||
"message.cluster.dedication.released": "Cluster dedication released.",
|
"message.cluster.dedication.released": "Cluster dedication released.",
|
||||||
"message.config.health.monitor.failed": "Configure Health Monitor failed",
|
"message.config.health.monitor.failed": "Configure Health Monitor failed",
|
||||||
@ -3275,6 +3280,7 @@
|
|||||||
"message.success.change.offering": "Successfully changed offering",
|
"message.success.change.offering": "Successfully changed offering",
|
||||||
"message.success.change.password": "Successfully changed password for User",
|
"message.success.change.password": "Successfully changed password for User",
|
||||||
"message.success.clear.webhook.deliveries": "Successfully cleared webhook deliveries",
|
"message.success.clear.webhook.deliveries": "Successfully cleared webhook deliveries",
|
||||||
|
"message.success.change.scope": "Successfully changed scope for storage pool",
|
||||||
"message.success.config.backup.schedule": "Successfully configured Instance backup schedule",
|
"message.success.config.backup.schedule": "Successfully configured Instance backup schedule",
|
||||||
"message.success.config.health.monitor": "Successfully Configure Health Monitor",
|
"message.success.config.health.monitor": "Successfully Configure Health Monitor",
|
||||||
"message.success.config.sticky.policy": "Successfully configured sticky policy",
|
"message.success.config.sticky.policy": "Successfully configured sticky policy",
|
||||||
@ -3464,6 +3470,7 @@
|
|||||||
"message.volumes.unmanaged": "Volumes not controlled by CloudStack.",
|
"message.volumes.unmanaged": "Volumes not controlled by CloudStack.",
|
||||||
"message.vr.alert.upon.network.offering.creation.l2": "As virtual routers are not created for L2 Networks, the compute offering will not be used.",
|
"message.vr.alert.upon.network.offering.creation.l2": "As virtual routers are not created for L2 Networks, the compute offering will not be used.",
|
||||||
"message.vr.alert.upon.network.offering.creation.others": "As none of the obligatory services for creating a virtual router (VPN, DHCP, DNS, Firewall, LB, UserData, SourceNat, StaticNat, PortForwarding) are enabled, the virtual router will not be created and the compute offering will not be used.",
|
"message.vr.alert.upon.network.offering.creation.others": "As none of the obligatory services for creating a virtual router (VPN, DHCP, DNS, Firewall, LB, UserData, SourceNat, StaticNat, PortForwarding) are enabled, the virtual router will not be created and the compute offering will not be used.",
|
||||||
|
"message.warn.change.primary.storage.scope": "This feature is tested and supported for the following configurations:<br>KVM - NFS/Ceph - DefaultPrimary<br>VMware - NFS - DefaultPrimary<br>*There might be extra steps involved to make it work for other configurations.",
|
||||||
"message.warn.filetype": "jpg, jpeg, png, bmp and svg are the only supported image formats.",
|
"message.warn.filetype": "jpg, jpeg, png, bmp and svg are the only supported image formats.",
|
||||||
"message.warn.importing.instance.without.nic": "WARNING: This Instance is being imported without NICs and many Network resources will not be available. Consider creating a NIC via vCenter before importing or as soon as the Instance is imported.",
|
"message.warn.importing.instance.without.nic": "WARNING: This Instance is being imported without NICs and many Network resources will not be available. Consider creating a NIC via vCenter before importing or as soon as the Instance is imported.",
|
||||||
"message.warn.zone.mtu.update": "Please note that this limit won't affect pre-existing Network’s MTU settings",
|
"message.warn.zone.mtu.update": "Please note that this limit won't affect pre-existing Network’s MTU settings",
|
||||||
|
|||||||
@ -527,7 +527,7 @@
|
|||||||
</span>
|
</span>
|
||||||
</div>
|
</div>
|
||||||
<div class="resource-detail-item" v-if="resource.templateid">
|
<div class="resource-detail-item" v-if="resource.templateid">
|
||||||
<div class="resource-detail-item__label">{{ $t('label.templatename') }}</div>
|
<div class="resource-detail-item__label">{{ resource.templateformat === 'ISO'? $t('label.iso') : $t('label.templatename') }}</div>
|
||||||
<div class="resource-detail-item__details">
|
<div class="resource-detail-item__details">
|
||||||
<resource-icon v-if="resource.icon" :image="getImage(resource.icon.base64image)" size="1x" style="margin-right: 5px"/>
|
<resource-icon v-if="resource.icon" :image="getImage(resource.icon.base64image)" size="1x" style="margin-right: 5px"/>
|
||||||
<SaveOutlined v-else />
|
<SaveOutlined v-else />
|
||||||
@ -535,7 +535,7 @@
|
|||||||
</div>
|
</div>
|
||||||
</div>
|
</div>
|
||||||
<div class="resource-detail-item" v-if="resource.isoid">
|
<div class="resource-detail-item" v-if="resource.isoid">
|
||||||
<div class="resource-detail-item__label">{{ $t('label.iso') }}</div>
|
<div class="resource-detail-item__label">{{ $t('label.isoname') }}</div>
|
||||||
<div class="resource-detail-item__details">
|
<div class="resource-detail-item__details">
|
||||||
<resource-icon v-if="resource.icon" :image="getImage(resource.icon.base64image)" size="1x" style="margin-right: 5px"/>
|
<resource-icon v-if="resource.icon" :image="getImage(resource.icon.base64image)" size="1x" style="margin-right: 5px"/>
|
||||||
<UsbOutlined v-else />
|
<UsbOutlined v-else />
|
||||||
|
|||||||
@ -135,6 +135,26 @@ export default {
|
|||||||
dataView: true,
|
dataView: true,
|
||||||
show: (record) => { return ['Maintenance', 'PrepareForMaintenance', 'ErrorInMaintenance'].includes(record.state) }
|
show: (record) => { return ['Maintenance', 'PrepareForMaintenance', 'ErrorInMaintenance'].includes(record.state) }
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
api: 'changeStoragePoolScope',
|
||||||
|
icon: 'swap-outlined',
|
||||||
|
label: 'label.action.change.primary.storage.scope',
|
||||||
|
dataView: true,
|
||||||
|
popup: true,
|
||||||
|
show: (record) => {
|
||||||
|
return (record.state === 'Disabled' &&
|
||||||
|
(record.scope === 'CLUSTER' ||
|
||||||
|
record.scope === 'ZONE') &&
|
||||||
|
(record.hypervisor === 'KVM' ||
|
||||||
|
record.hypervisor === 'VMware' ||
|
||||||
|
record.hypervisor === 'HyperV' ||
|
||||||
|
record.hypervisor === 'LXC' ||
|
||||||
|
record.hypervisor === 'Any' ||
|
||||||
|
record.hypervisor === 'Simulator')
|
||||||
|
)
|
||||||
|
},
|
||||||
|
component: shallowRef(defineAsyncComponent(() => import('@/views/infra/ChangeStoragePoolScope.vue')))
|
||||||
|
},
|
||||||
{
|
{
|
||||||
api: 'deleteStoragePool',
|
api: 'deleteStoragePool',
|
||||||
icon: 'delete-outlined',
|
icon: 'delete-outlined',
|
||||||
|
|||||||
@ -21,6 +21,7 @@ import {
|
|||||||
ApiOutlined,
|
ApiOutlined,
|
||||||
AppstoreOutlined,
|
AppstoreOutlined,
|
||||||
ArrowDownOutlined,
|
ArrowDownOutlined,
|
||||||
|
ArrowRightOutlined,
|
||||||
ArrowUpOutlined,
|
ArrowUpOutlined,
|
||||||
ArrowsAltOutlined,
|
ArrowsAltOutlined,
|
||||||
AuditOutlined,
|
AuditOutlined,
|
||||||
@ -185,6 +186,7 @@ export default {
|
|||||||
app.component('ApiOutlined', ApiOutlined)
|
app.component('ApiOutlined', ApiOutlined)
|
||||||
app.component('AppstoreOutlined', AppstoreOutlined)
|
app.component('AppstoreOutlined', AppstoreOutlined)
|
||||||
app.component('ArrowDownOutlined', ArrowDownOutlined)
|
app.component('ArrowDownOutlined', ArrowDownOutlined)
|
||||||
|
app.component('ArrowRightOutlined', ArrowRightOutlined)
|
||||||
app.component('ArrowUpOutlined', ArrowUpOutlined)
|
app.component('ArrowUpOutlined', ArrowUpOutlined)
|
||||||
app.component('ArrowsAltOutlined', ArrowsAltOutlined)
|
app.component('ArrowsAltOutlined', ArrowsAltOutlined)
|
||||||
app.component('AuditOutlined', AuditOutlined)
|
app.component('AuditOutlined', AuditOutlined)
|
||||||
|
|||||||
223
ui/src/views/infra/ChangeStoragePoolScope.vue
Normal file
223
ui/src/views/infra/ChangeStoragePoolScope.vue
Normal file
@ -0,0 +1,223 @@
|
|||||||
|
// Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
// or more contributor license agreements. See the NOTICE file
|
||||||
|
// distributed with this work for additional information
|
||||||
|
// regarding copyright ownership. The ASF licenses this file
|
||||||
|
// to you under the Apache License, Version 2.0 (the
|
||||||
|
// "License"); you may not use this file except in compliance
|
||||||
|
// with the License. You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing,
|
||||||
|
// software distributed under the License is distributed on an
|
||||||
|
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||||
|
// KIND, either express or implied. See the License for the
|
||||||
|
// specific language governing permissions and limitations
|
||||||
|
// under the License.
|
||||||
|
|
||||||
|
<template>
|
||||||
|
<a-spin :spinning="loading">
|
||||||
|
<div class="form-layout" v-ctrl-enter="handleSubmitForm">
|
||||||
|
<div class="form">
|
||||||
|
<a-form
|
||||||
|
:ref="formRef"
|
||||||
|
:model="form"
|
||||||
|
:rules="rules"
|
||||||
|
layout="vertical"
|
||||||
|
@submit="handleSubmitForm">
|
||||||
|
<a-alert type="warning">
|
||||||
|
<template #message>
|
||||||
|
<span
|
||||||
|
v-html="(resource.scope=='ZONE' ? $t('message.action.primary.storage.scope.cluster') : $t('message.action.primary.storage.scope.zone')) +
|
||||||
|
'<br><br>' + $t('message.warn.change.primary.storage.scope')"></span>
|
||||||
|
</template>
|
||||||
|
</a-alert>
|
||||||
|
<p></p>
|
||||||
|
<a-form-item name="clusterid" ref="clusterid" v-if="resource.scope=='ZONE'">
|
||||||
|
<template #label>
|
||||||
|
<tooltip-label :title="$t('label.clustername')" :tooltip="placeholder.clusterid"/>
|
||||||
|
</template>
|
||||||
|
<a-select
|
||||||
|
v-model:value="form.clusterid"
|
||||||
|
:placeholder="placeholder.clusterid"
|
||||||
|
showSearch
|
||||||
|
optionFilterProp="label"
|
||||||
|
:filterOption="(input, option) => {
|
||||||
|
return option.label.toLowerCase().indexOf(input.toLowerCase()) >= 0
|
||||||
|
}"
|
||||||
|
@change="handleChangeCluster">
|
||||||
|
<a-select-option
|
||||||
|
v-for="cluster in clustersList"
|
||||||
|
:value="cluster.id"
|
||||||
|
:key="cluster.id"
|
||||||
|
:label="cluster.name">
|
||||||
|
{{ cluster.name }}
|
||||||
|
</a-select-option>
|
||||||
|
</a-select>
|
||||||
|
</a-form-item>
|
||||||
|
|
||||||
|
<div :span="24" class="action-button">
|
||||||
|
<a-button @click="closeAction">{{ $t('label.cancel') }}</a-button>
|
||||||
|
<a-button @click="handleSubmitForm" ref="submit" type="primary">{{ $t('label.ok') }}</a-button>
|
||||||
|
</div>
|
||||||
|
</a-form>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</a-spin>
|
||||||
|
</template>
|
||||||
|
|
||||||
|
<script>
|
||||||
|
import { ref, reactive, toRaw } from 'vue'
|
||||||
|
import { api } from '@/api'
|
||||||
|
import { mixinForm } from '@/utils/mixin'
|
||||||
|
import DedicateDomain from '../../components/view/DedicateDomain'
|
||||||
|
import ResourceIcon from '@/components/view/ResourceIcon'
|
||||||
|
import TooltipLabel from '@/components/widgets/TooltipLabel'
|
||||||
|
|
||||||
|
export default {
|
||||||
|
name: 'ChangeStoragePoolScope',
|
||||||
|
mixins: [mixinForm],
|
||||||
|
components: {
|
||||||
|
DedicateDomain,
|
||||||
|
ResourceIcon,
|
||||||
|
TooltipLabel
|
||||||
|
},
|
||||||
|
props: {
|
||||||
|
resource: {
|
||||||
|
type: Object,
|
||||||
|
required: true
|
||||||
|
}
|
||||||
|
},
|
||||||
|
data () {
|
||||||
|
return {
|
||||||
|
loading: false,
|
||||||
|
clustersList: [],
|
||||||
|
selectedCluster: null,
|
||||||
|
placeholder: {
|
||||||
|
clusterid: null
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
created () {
|
||||||
|
this.initForm()
|
||||||
|
this.fetchData()
|
||||||
|
},
|
||||||
|
methods: {
|
||||||
|
initForm () {
|
||||||
|
this.formRef = ref()
|
||||||
|
this.form = reactive({ })
|
||||||
|
this.rules = reactive({
|
||||||
|
clusterid: [{ required: true, message: this.$t('message.error.select') }]
|
||||||
|
})
|
||||||
|
},
|
||||||
|
fetchData () {
|
||||||
|
this.fetchClusters(this.resource.zoneid)
|
||||||
|
},
|
||||||
|
fetchClusters (zoneId) {
|
||||||
|
this.form.clusterid = null
|
||||||
|
this.clustersList = []
|
||||||
|
if (!zoneId) return
|
||||||
|
this.zoneId = zoneId
|
||||||
|
this.loading = true
|
||||||
|
api('listClusters', { zoneid: zoneId }).then(response => {
|
||||||
|
this.clustersList = response.listclustersresponse.cluster || []
|
||||||
|
this.form.clusterid = this.clustersList[0].id || null
|
||||||
|
if (this.form.clusterid) {
|
||||||
|
this.handleChangeCluster(this.form.clusterid)
|
||||||
|
}
|
||||||
|
}).catch(error => {
|
||||||
|
this.$notifyError(error)
|
||||||
|
this.clustersList = []
|
||||||
|
this.form.clusterid = null
|
||||||
|
}).finally(() => {
|
||||||
|
this.loading = false
|
||||||
|
})
|
||||||
|
},
|
||||||
|
handleChangeCluster (value) {
|
||||||
|
this.form.clusterid = value
|
||||||
|
this.selectedCluster = this.clustersList.find(i => i.id === this.form.clusterid)
|
||||||
|
},
|
||||||
|
handleSubmitForm () {
|
||||||
|
if (this.loading) return
|
||||||
|
this.formRef.value.validate().then(() => {
|
||||||
|
const formRaw = toRaw(this.form)
|
||||||
|
const values = this.handleRemoveFields(formRaw)
|
||||||
|
|
||||||
|
this.args = {}
|
||||||
|
if (this.resource.scope === 'ZONE') {
|
||||||
|
this.args = {
|
||||||
|
id: this.resource.id,
|
||||||
|
scope: 'CLUSTER',
|
||||||
|
clusterid: values.clusterid
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
this.args = {
|
||||||
|
id: this.resource.id,
|
||||||
|
scope: 'ZONE'
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
this.changeStoragePoolScope(this.args)
|
||||||
|
}).catch(error => {
|
||||||
|
this.formRef.value.scrollToField(error.errorFields[0].name)
|
||||||
|
})
|
||||||
|
},
|
||||||
|
closeAction () {
|
||||||
|
this.$emit('close-action')
|
||||||
|
},
|
||||||
|
changeStoragePoolScope (args) {
|
||||||
|
api('changeStoragePoolScope', args).then(json => {
|
||||||
|
this.$pollJob({
|
||||||
|
jobId: json.changestoragepoolscoperesponse.jobid,
|
||||||
|
title: this.$t('message.success.change.scope'),
|
||||||
|
description: args.name,
|
||||||
|
successMessage: this.$t('message.success.change.scope'),
|
||||||
|
successMethod: (result) => {
|
||||||
|
this.closeAction()
|
||||||
|
},
|
||||||
|
errorMessage: this.$t('message.change.scope.failed'),
|
||||||
|
loadingMessage: this.$t('message.change.scope.processing'),
|
||||||
|
catchMessage: this.$t('error.fetching.async.job.result')
|
||||||
|
})
|
||||||
|
this.closeAction()
|
||||||
|
}).catch(error => {
|
||||||
|
this.$notifyError(error)
|
||||||
|
}).finally(() => {
|
||||||
|
this.loading = false
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
}
|
||||||
|
</script>
|
||||||
|
|
||||||
|
<style lang="scss">
|
||||||
|
.form {
|
||||||
|
&__label {
|
||||||
|
margin-bottom: 5px;
|
||||||
|
|
||||||
|
.required {
|
||||||
|
margin-left: 10px;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
&__item {
|
||||||
|
margin-bottom: 20px;
|
||||||
|
}
|
||||||
|
.ant-select {
|
||||||
|
width: 85vw;
|
||||||
|
@media (min-width: 760px) {
|
||||||
|
width: 400px;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
.required {
|
||||||
|
color: #ff0000;
|
||||||
|
&-label {
|
||||||
|
display: none;
|
||||||
|
&--error {
|
||||||
|
display: block;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
</style>
|
||||||
Loading…
x
Reference in New Issue
Block a user