Merge branch '4.19'

This commit is contained in:
Vishesh 2024-06-25 18:53:57 +05:30
commit 3923f80c22
No known key found for this signature in database
GPG Key ID: 4E395186CBFA790B
91 changed files with 1869 additions and 1145 deletions

View File

@ -43,6 +43,12 @@ public class UpdateHypervisorCapabilitiesCmd extends BaseCmd {
@Parameter(name = ApiConstants.ID, type = CommandType.UUID, entityType = HypervisorCapabilitiesResponse.class, description = "ID of the hypervisor capability")
private Long id;
@Parameter(name = ApiConstants.HYPERVISOR, type = CommandType.STRING, description = "the hypervisor for which the hypervisor capabilities are to be updated", since = "4.19.1")
private String hypervisor;
@Parameter(name = ApiConstants.HYPERVISOR_VERSION, type = CommandType.STRING, description = "the hypervisor version for which the hypervisor capabilities are to be updated", since = "4.19.1")
private String hypervisorVersion;
@Parameter(name = ApiConstants.SECURITY_GROUP_EANBLED, type = CommandType.BOOLEAN, description = "set true to enable security group for this hypervisor.")
private Boolean securityGroupEnabled;
@ -73,6 +79,14 @@ public class UpdateHypervisorCapabilitiesCmd extends BaseCmd {
return id;
}
public String getHypervisor() {
return hypervisor;
}
public String getHypervisorVersion() {
return hypervisorVersion;
}
public Long getMaxGuestsLimit() {
return maxGuestsLimit;
}

View File

@ -201,8 +201,8 @@ public class ImportUnmanagedInstanceCmd extends BaseAsyncCmd {
for (Map<String, String> entry : (Collection<Map<String, String>>)nicNetworkList.values()) {
String nic = entry.get(VmDetailConstants.NIC);
String networkUuid = entry.get(VmDetailConstants.NETWORK);
if (logger.isTraceEnabled()) {
logger.trace(String.format("nic, '%s', goes on net, '%s'", nic, networkUuid));
if (logger.isDebugEnabled()) {
logger.debug(String.format("nic, '%s', goes on net, '%s'", nic, networkUuid));
}
if (StringUtils.isAnyEmpty(nic, networkUuid) || _entityMgr.findByUuid(Network.class, networkUuid) == null) {
throw new InvalidParameterValueException(String.format("Network ID: %s for NIC ID: %s is invalid", networkUuid, nic));
@ -219,8 +219,8 @@ public class ImportUnmanagedInstanceCmd extends BaseAsyncCmd {
for (Map<String, String> entry : (Collection<Map<String, String>>)nicIpAddressList.values()) {
String nic = entry.get(VmDetailConstants.NIC);
String ipAddress = StringUtils.defaultIfEmpty(entry.get(VmDetailConstants.IP4_ADDRESS), null);
if (logger.isTraceEnabled()) {
logger.trace(String.format("nic, '%s', gets ip, '%s'", nic, ipAddress));
if (logger.isDebugEnabled()) {
logger.debug(String.format("nic, '%s', gets ip, '%s'", nic, ipAddress));
}
if (StringUtils.isEmpty(nic)) {
throw new InvalidParameterValueException(String.format("NIC ID: '%s' is invalid for IP address mapping", nic));

View File

@ -30,6 +30,15 @@ public interface UnmanagedVMsManager extends VmImportService, UnmanageVMService,
"If set to true, do not remove VM nics (and its MAC addresses) when unmanaging a VM, leaving them allocated but not reserved. " +
"If set to false, nics are removed and MAC addresses can be reassigned", true, ConfigKey.Scope.Zone);
ConfigKey<Integer> RemoteKvmInstanceDisksCopyTimeout = new ConfigKey<>(Integer.class,
"remote.kvm.instance.disks.copy.timeout",
"Advanced",
"30",
"Timeout (in mins) to prepare and copy the disks of remote KVM instance while importing the instance from an external host",
true,
ConfigKey.Scope.Global,
null);
static boolean isSupported(Hypervisor.HypervisorType hypervisorType) {
return hypervisorType == VMware || hypervisorType == KVM;
}

View File

@ -17,7 +17,6 @@
package com.cloud.agent.api;
@LogLevel(LogLevel.Log4jLevel.Trace)
public class CheckVolumeAnswer extends Answer {
private long size;

View File

@ -21,7 +21,6 @@ package com.cloud.agent.api;
import com.cloud.agent.api.to.StorageFilerTO;
@LogLevel(LogLevel.Log4jLevel.Trace)
public class CheckVolumeCommand extends Command {
String srcFile;

View File

@ -17,7 +17,6 @@
package com.cloud.agent.api;
@LogLevel(LogLevel.Log4jLevel.Trace)
public class CopyRemoteVolumeAnswer extends Answer {
private String remoteIp;

View File

@ -21,16 +21,13 @@ package com.cloud.agent.api;
import com.cloud.agent.api.to.StorageFilerTO;
@LogLevel(LogLevel.Log4jLevel.Trace)
public class CopyRemoteVolumeCommand extends Command {
String remoteIp;
String username;
@LogLevel(LogLevel.Log4jLevel.Off)
String password;
String srcFile;
String tmpPath;
StorageFilerTO storageFilerTO;
public CopyRemoteVolumeCommand(String remoteIp, String username, String password) {

View File

@ -22,10 +22,10 @@ import org.apache.cloudstack.vm.UnmanagedInstanceTO;
import java.util.HashMap;
import java.util.List;
@LogLevel(LogLevel.Log4jLevel.Trace)
public class GetRemoteVmsAnswer extends Answer {
private String remoteIp;
@LogLevel(LogLevel.Log4jLevel.Trace)
private HashMap<String, UnmanagedInstanceTO> unmanagedInstances;
List<String> vmNames;

View File

@ -19,11 +19,11 @@
package com.cloud.agent.api;
@LogLevel(LogLevel.Log4jLevel.Trace)
public class GetRemoteVmsCommand extends Command {
String remoteIp;
String username;
@LogLevel(LogLevel.Log4jLevel.Off)
String password;
public GetRemoteVmsCommand(String remoteIp, String username, String password) {

View File

@ -21,10 +21,10 @@ import java.util.HashMap;
import org.apache.cloudstack.vm.UnmanagedInstanceTO;
@LogLevel(LogLevel.Log4jLevel.Trace)
public class GetUnmanagedInstancesAnswer extends Answer {
private String instanceName;
@LogLevel(LogLevel.Log4jLevel.Trace)
private HashMap<String, UnmanagedInstanceTO> unmanagedInstances;
GetUnmanagedInstancesAnswer() {

View File

@ -28,10 +28,10 @@ import org.apache.commons.collections.CollectionUtils;
* All managed instances will be filtered while trying to find unmanaged instances.
*/
@LogLevel(LogLevel.Log4jLevel.Trace)
public class GetUnmanagedInstancesCommand extends Command {
String instanceName;
@LogLevel(LogLevel.Log4jLevel.Trace)
List<String> managedInstancesNames;
public GetUnmanagedInstancesCommand() {

2
debian/control vendored
View File

@ -24,7 +24,7 @@ Description: CloudStack server library
Package: cloudstack-agent
Architecture: all
Depends: ${python:Depends}, ${python3:Depends}, openjdk-17-jre-headless | java17-runtime-headless | java17-runtime | zulu-17, cloudstack-common (= ${source:Version}), lsb-base (>= 9), openssh-client, qemu-kvm (>= 2.5) | qemu-system-x86 (>= 5.2), libvirt-bin (>= 1.3) | libvirt-daemon-system (>= 3.0), iproute2, ebtables, vlan, ipset, python3-libvirt, ethtool, iptables, cryptsetup, rng-tools, lsb-release, ufw, apparmor
Depends: ${python:Depends}, ${python3:Depends}, openjdk-17-jre-headless | java17-runtime-headless | java17-runtime | zulu-17, cloudstack-common (= ${source:Version}), lsb-base (>= 9), openssh-client, qemu-kvm (>= 2.5) | qemu-system-x86 (>= 5.2), libvirt-bin (>= 1.3) | libvirt-daemon-system (>= 3.0), iproute2, ebtables, vlan, ipset, python3-libvirt, ethtool, iptables, cryptsetup, rng-tools, lsb-release, ufw, apparmor, cpu-checker
Recommends: init-system-helpers
Conflicts: cloud-agent, cloud-agent-libs, cloud-agent-deps, cloud-agent-scripts
Description: CloudStack agent

View File

@ -3064,17 +3064,7 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra
@Override
@DB
public boolean shutdownNetwork(final long networkId, final ReservationContext context, final boolean cleanupElements) {
NetworkVO network = _networksDao.findById(networkId);
if (network.getState() == Network.State.Allocated) {
logger.debug("Network is already shutdown: {}", network);
return true;
}
if (network.getState() != Network.State.Implemented && network.getState() != Network.State.Shutdown) {
logger.debug("Network is not implemented: {}", network);
return false;
}
NetworkVO network = null;
try {
//do global lock for the network
network = _networksDao.acquireInLockTable(networkId, NetworkLockTimeout.value());

View File

@ -1493,18 +1493,17 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
for (VolumeVO vol : vols) {
VolumeInfo volumeInfo = volFactory.getVolume(vol.getId());
DataTO volTO = volumeInfo.getTO();
DiskTO disk = storageMgr.getDiskWithThrottling(volTO, vol.getVolumeType(), vol.getDeviceId(), vol.getPath(), vm.getServiceOfferingId(), vol.getDiskOfferingId());
DataStore dataStore = dataStoreMgr.getDataStore(vol.getPoolId(), DataStoreRole.Primary);
disk.setDetails(getDetails(volumeInfo, dataStore));
PrimaryDataStore primaryDataStore = (PrimaryDataStore)dataStore;
// This might impact other managed storages, enable requires access for migration in relevant datastore driver (currently enabled for PowerFlex storage pool only)
if (primaryDataStore.isManaged() && volService.requiresAccessForMigration(volumeInfo, dataStore)) {
volService.grantAccess(volFactory.getVolume(vol.getId()), dest.getHost(), dataStore);
}
// make sure this is done AFTER grantAccess, as grantAccess may change the volume's state
DataTO volTO = volumeInfo.getTO();
DiskTO disk = storageMgr.getDiskWithThrottling(volTO, vol.getVolumeType(), vol.getDeviceId(), vol.getPath(), vm.getServiceOfferingId(), vol.getDiskOfferingId());
disk.setDetails(getDetails(volumeInfo, dataStore));
vm.addDisk(disk);
}

View File

@ -16,6 +16,7 @@
// under the License.
package org.apache.cloudstack.engine.orchestration;
import static org.apache.cloudstack.engine.orchestration.service.NetworkOrchestrationService.NetworkLockTimeout;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.never;
import static org.mockito.Mockito.times;
@ -69,6 +70,7 @@ import com.cloud.network.guru.NetworkGuru;
import com.cloud.network.vpc.VpcManager;
import com.cloud.network.vpc.VpcVO;
import com.cloud.offerings.NetworkOfferingVO;
import com.cloud.utils.db.EntityManager;
import com.cloud.utils.exception.CloudRuntimeException;
import com.cloud.utils.net.Ip;
import com.cloud.vm.DomainRouterVO;
@ -93,7 +95,7 @@ import junit.framework.TestCase;
@RunWith(JUnit4.class)
public class NetworkOrchestratorTest extends TestCase {
NetworkOrchestrator testOrchastrator = Mockito.spy(new NetworkOrchestrator());
NetworkOrchestrator testOrchestrator = Mockito.spy(new NetworkOrchestrator());
private String guruName = "GuestNetworkGuru";
private String dhcpProvider = "VirtualRouter";
@ -112,21 +114,22 @@ public class NetworkOrchestratorTest extends TestCase {
@Before
public void setUp() {
// make class-scope mocks
testOrchastrator._nicDao = mock(NicDao.class);
testOrchastrator._networksDao = mock(NetworkDao.class);
testOrchastrator._networkModel = mock(NetworkModel.class);
testOrchastrator._nicSecondaryIpDao = mock(NicSecondaryIpDao.class);
testOrchastrator._ntwkSrvcDao = mock(NetworkServiceMapDao.class);
testOrchastrator._nicIpAliasDao = mock(NicIpAliasDao.class);
testOrchastrator._ipAddressDao = mock(IPAddressDao.class);
testOrchastrator._vlanDao = mock(VlanDao.class);
testOrchastrator._networkModel = mock(NetworkModel.class);
testOrchastrator._nicExtraDhcpOptionDao = mock(NicExtraDhcpOptionDao.class);
testOrchastrator.routerDao = mock(DomainRouterDao.class);
testOrchastrator.routerNetworkDao = mock(RouterNetworkDao.class);
testOrchastrator._vpcMgr = mock(VpcManager.class);
testOrchastrator.routerJoinDao = mock(DomainRouterJoinDao.class);
testOrchastrator._ipAddrMgr = mock(IpAddressManager.class);
testOrchestrator._nicDao = mock(NicDao.class);
testOrchestrator._networksDao = mock(NetworkDao.class);
testOrchestrator._networkModel = mock(NetworkModel.class);
testOrchestrator._nicSecondaryIpDao = mock(NicSecondaryIpDao.class);
testOrchestrator._ntwkSrvcDao = mock(NetworkServiceMapDao.class);
testOrchestrator._nicIpAliasDao = mock(NicIpAliasDao.class);
testOrchestrator._ipAddressDao = mock(IPAddressDao.class);
testOrchestrator._vlanDao = mock(VlanDao.class);
testOrchestrator._networkModel = mock(NetworkModel.class);
testOrchestrator._nicExtraDhcpOptionDao = mock(NicExtraDhcpOptionDao.class);
testOrchestrator.routerDao = mock(DomainRouterDao.class);
testOrchestrator.routerNetworkDao = mock(RouterNetworkDao.class);
testOrchestrator._vpcMgr = mock(VpcManager.class);
testOrchestrator.routerJoinDao = mock(DomainRouterJoinDao.class);
testOrchestrator._ipAddrMgr = mock(IpAddressManager.class);
testOrchestrator._entityMgr = mock(EntityManager.class);
DhcpServiceProvider provider = mock(DhcpServiceProvider.class);
Map<Network.Capability, String> capabilities = new HashMap<Network.Capability, String>();
@ -135,13 +138,13 @@ public class NetworkOrchestratorTest extends TestCase {
when(provider.getCapabilities()).thenReturn(services);
capabilities.put(Network.Capability.DhcpAccrossMultipleSubnets, "true");
when(testOrchastrator._ntwkSrvcDao.getProviderForServiceInNetwork(ArgumentMatchers.anyLong(), ArgumentMatchers.eq(Service.Dhcp))).thenReturn(dhcpProvider);
when(testOrchastrator._networkModel.getElementImplementingProvider(dhcpProvider)).thenReturn(provider);
when(testOrchestrator._ntwkSrvcDao.getProviderForServiceInNetwork(ArgumentMatchers.anyLong(), ArgumentMatchers.eq(Service.Dhcp))).thenReturn(dhcpProvider);
when(testOrchestrator._networkModel.getElementImplementingProvider(dhcpProvider)).thenReturn(provider);
when(guru.getName()).thenReturn(guruName);
List<NetworkGuru> networkGurus = new ArrayList<NetworkGuru>();
networkGurus.add(guru);
testOrchastrator.networkGurus = networkGurus;
testOrchestrator.networkGurus = networkGurus;
when(networkOffering.getGuestType()).thenReturn(GuestType.L2);
when(networkOffering.getId()).thenReturn(networkOfferingId);
@ -156,21 +159,21 @@ public class NetworkOrchestratorTest extends TestCase {
// make sure that release dhcp will be called
when(vm.getType()).thenReturn(Type.User);
when(testOrchastrator._networkModel.areServicesSupportedInNetwork(network.getId(), Service.Dhcp)).thenReturn(true);
when(testOrchestrator._networkModel.areServicesSupportedInNetwork(network.getId(), Service.Dhcp)).thenReturn(true);
when(network.getTrafficType()).thenReturn(TrafficType.Guest);
when(network.getGuestType()).thenReturn(GuestType.Shared);
when(testOrchastrator._nicDao.listByNetworkIdTypeAndGatewayAndBroadcastUri(nic.getNetworkId(), VirtualMachine.Type.User, nic.getIPv4Gateway(), nic.getBroadcastUri()))
when(testOrchestrator._nicDao.listByNetworkIdTypeAndGatewayAndBroadcastUri(nic.getNetworkId(), VirtualMachine.Type.User, nic.getIPv4Gateway(), nic.getBroadcastUri()))
.thenReturn(new ArrayList<NicVO>());
when(network.getGuruName()).thenReturn(guruName);
when(testOrchastrator._networksDao.findById(nic.getNetworkId())).thenReturn(network);
when(testOrchestrator._networksDao.findById(nic.getNetworkId())).thenReturn(network);
testOrchastrator.removeNic(vm, nic);
testOrchestrator.removeNic(vm, nic);
verify(nic, times(1)).setState(Nic.State.Deallocating);
verify(testOrchastrator._networkModel, times(2)).getElementImplementingProvider(dhcpProvider);
verify(testOrchastrator._ntwkSrvcDao, times(2)).getProviderForServiceInNetwork(network.getId(), Service.Dhcp);
verify(testOrchastrator._networksDao, times(2)).findById(nic.getNetworkId());
verify(testOrchestrator._networkModel, times(2)).getElementImplementingProvider(dhcpProvider);
verify(testOrchestrator._ntwkSrvcDao, times(2)).getProviderForServiceInNetwork(network.getId(), Service.Dhcp);
verify(testOrchestrator._networksDao, times(2)).findById(nic.getNetworkId());
}
@Test
public void testDontRemoveDhcpServiceFromDomainRouter() {
@ -183,14 +186,14 @@ public class NetworkOrchestratorTest extends TestCase {
when(vm.getType()).thenReturn(Type.DomainRouter);
when(network.getGuruName()).thenReturn(guruName);
when(testOrchastrator._networksDao.findById(nic.getNetworkId())).thenReturn(network);
when(testOrchestrator._networksDao.findById(nic.getNetworkId())).thenReturn(network);
testOrchastrator.removeNic(vm, nic);
testOrchestrator.removeNic(vm, nic);
verify(nic, times(1)).setState(Nic.State.Deallocating);
verify(testOrchastrator._networkModel, never()).getElementImplementingProvider(dhcpProvider);
verify(testOrchastrator._ntwkSrvcDao, never()).getProviderForServiceInNetwork(network.getId(), Service.Dhcp);
verify(testOrchastrator._networksDao, times(1)).findById(nic.getNetworkId());
verify(testOrchestrator._networkModel, never()).getElementImplementingProvider(dhcpProvider);
verify(testOrchestrator._ntwkSrvcDao, never()).getProviderForServiceInNetwork(network.getId(), Service.Dhcp);
verify(testOrchestrator._networksDao, times(1)).findById(nic.getNetworkId());
}
@Test
public void testDontRemoveDhcpServiceWhenNotProvided() {
@ -201,45 +204,45 @@ public class NetworkOrchestratorTest extends TestCase {
// make sure that release dhcp will *not* be called
when(vm.getType()).thenReturn(Type.User);
when(testOrchastrator._networkModel.areServicesSupportedInNetwork(network.getId(), Service.Dhcp)).thenReturn(false);
when(testOrchestrator._networkModel.areServicesSupportedInNetwork(network.getId(), Service.Dhcp)).thenReturn(false);
when(network.getGuruName()).thenReturn(guruName);
when(testOrchastrator._networksDao.findById(nic.getNetworkId())).thenReturn(network);
when(testOrchestrator._networksDao.findById(nic.getNetworkId())).thenReturn(network);
testOrchastrator.removeNic(vm, nic);
testOrchestrator.removeNic(vm, nic);
verify(nic, times(1)).setState(Nic.State.Deallocating);
verify(testOrchastrator._networkModel, never()).getElementImplementingProvider(dhcpProvider);
verify(testOrchastrator._ntwkSrvcDao, never()).getProviderForServiceInNetwork(network.getId(), Service.Dhcp);
verify(testOrchastrator._networksDao, times(1)).findById(nic.getNetworkId());
verify(testOrchestrator._networkModel, never()).getElementImplementingProvider(dhcpProvider);
verify(testOrchestrator._ntwkSrvcDao, never()).getProviderForServiceInNetwork(network.getId(), Service.Dhcp);
verify(testOrchestrator._networksDao, times(1)).findById(nic.getNetworkId());
}
@Test
public void testCheckL2OfferingServicesEmptyServices() {
when(testOrchastrator._networkModel.listNetworkOfferingServices(networkOfferingId)).thenReturn(new ArrayList<>());
when(testOrchastrator._networkModel.areServicesSupportedByNetworkOffering(networkOfferingId, Service.UserData)).thenReturn(false);
testOrchastrator.checkL2OfferingServices(networkOffering);
when(testOrchestrator._networkModel.listNetworkOfferingServices(networkOfferingId)).thenReturn(new ArrayList<>());
when(testOrchestrator._networkModel.areServicesSupportedByNetworkOffering(networkOfferingId, Service.UserData)).thenReturn(false);
testOrchestrator.checkL2OfferingServices(networkOffering);
}
@Test
public void testCheckL2OfferingServicesUserDataOnly() {
when(testOrchastrator._networkModel.listNetworkOfferingServices(networkOfferingId)).thenReturn(Arrays.asList(Service.UserData));
when(testOrchastrator._networkModel.areServicesSupportedByNetworkOffering(networkOfferingId, Service.UserData)).thenReturn(true);
testOrchastrator.checkL2OfferingServices(networkOffering);
when(testOrchestrator._networkModel.listNetworkOfferingServices(networkOfferingId)).thenReturn(Arrays.asList(Service.UserData));
when(testOrchestrator._networkModel.areServicesSupportedByNetworkOffering(networkOfferingId, Service.UserData)).thenReturn(true);
testOrchestrator.checkL2OfferingServices(networkOffering);
}
@Test(expected = InvalidParameterValueException.class)
public void testCheckL2OfferingServicesMultipleServicesIncludingUserData() {
when(testOrchastrator._networkModel.listNetworkOfferingServices(networkOfferingId)).thenReturn(Arrays.asList(Service.UserData, Service.Dhcp));
when(testOrchastrator._networkModel.areServicesSupportedByNetworkOffering(networkOfferingId, Service.UserData)).thenReturn(true);
testOrchastrator.checkL2OfferingServices(networkOffering);
when(testOrchestrator._networkModel.listNetworkOfferingServices(networkOfferingId)).thenReturn(Arrays.asList(Service.UserData, Service.Dhcp));
when(testOrchestrator._networkModel.areServicesSupportedByNetworkOffering(networkOfferingId, Service.UserData)).thenReturn(true);
testOrchestrator.checkL2OfferingServices(networkOffering);
}
@Test(expected = InvalidParameterValueException.class)
public void testCheckL2OfferingServicesMultipleServicesNotIncludingUserData() {
when(testOrchastrator._networkModel.listNetworkOfferingServices(networkOfferingId)).thenReturn(Arrays.asList(Service.Dns, Service.Dhcp));
when(testOrchastrator._networkModel.areServicesSupportedByNetworkOffering(networkOfferingId, Service.UserData)).thenReturn(false);
testOrchastrator.checkL2OfferingServices(networkOffering);
when(testOrchestrator._networkModel.listNetworkOfferingServices(networkOfferingId)).thenReturn(Arrays.asList(Service.Dns, Service.Dhcp));
when(testOrchestrator._networkModel.areServicesSupportedByNetworkOffering(networkOfferingId, Service.UserData)).thenReturn(false);
testOrchestrator.checkL2OfferingServices(networkOffering);
}
@Test
@ -251,7 +254,7 @@ public class NetworkOrchestratorTest extends TestCase {
configureTestConfigureNicProfileBasedOnRequestedIpTests(nicProfile, 0l, false, IPAddressVO.State.Free, "192.168.100.1", "255.255.255.0", "00-88-14-4D-4C-FB",
requestedNicProfile, null, "192.168.100.150");
testOrchastrator.configureNicProfileBasedOnRequestedIp(requestedNicProfile, nicProfile, network);
testOrchestrator.configureNicProfileBasedOnRequestedIp(requestedNicProfile, nicProfile, network);
verifyAndAssert("192.168.100.150", "192.168.100.1", "255.255.255.0", nicProfile, 1, 1);
}
@ -265,7 +268,7 @@ public class NetworkOrchestratorTest extends TestCase {
configureTestConfigureNicProfileBasedOnRequestedIpTests(nicProfile, 0l, false, IPAddressVO.State.Free, "192.168.100.1", "255.255.255.0", "00-88-14-4D-4C-FB",
requestedNicProfile, "00-88-14-4D-4C-FB", "192.168.100.150");
testOrchastrator.configureNicProfileBasedOnRequestedIp(requestedNicProfile, nicProfile, network);
testOrchestrator.configureNicProfileBasedOnRequestedIp(requestedNicProfile, nicProfile, network);
verifyAndAssert("192.168.100.150", "192.168.100.1", "255.255.255.0", nicProfile, 1, 0);
}
@ -292,7 +295,7 @@ public class NetworkOrchestratorTest extends TestCase {
configureTestConfigureNicProfileBasedOnRequestedIpTests(nicProfile, 0l, false, IPAddressVO.State.Free, "192.168.100.1", "255.255.255.0", "00-88-14-4D-4C-FB",
requestedNicProfile, null, requestedIpv4Address);
testOrchastrator.configureNicProfileBasedOnRequestedIp(requestedNicProfile, nicProfile, network);
testOrchestrator.configureNicProfileBasedOnRequestedIp(requestedNicProfile, nicProfile, network);
verifyAndAssert(null, null, null, nicProfile, 0, 0);
}
@ -319,7 +322,7 @@ public class NetworkOrchestratorTest extends TestCase {
configureTestConfigureNicProfileBasedOnRequestedIpTests(nicProfile, 0l, false, IPAddressVO.State.Free, ipv4Gateway, "255.255.255.0", "00-88-14-4D-4C-FB",
requestedNicProfile, "00-88-14-4D-4C-FB", "192.168.100.150");
testOrchastrator.configureNicProfileBasedOnRequestedIp(requestedNicProfile, nicProfile, network);
testOrchestrator.configureNicProfileBasedOnRequestedIp(requestedNicProfile, nicProfile, network);
verifyAndAssert(null, null, null, nicProfile, 1, 0);
}
@ -345,7 +348,7 @@ public class NetworkOrchestratorTest extends TestCase {
configureTestConfigureNicProfileBasedOnRequestedIpTests(nicProfile, 0l, false, IPAddressVO.State.Free, "192.168.100.1", ipv4Netmask, "00-88-14-4D-4C-FB",
requestedNicProfile, "00-88-14-4D-4C-FB", "192.168.100.150");
testOrchastrator.configureNicProfileBasedOnRequestedIp(requestedNicProfile, nicProfile, network);
testOrchestrator.configureNicProfileBasedOnRequestedIp(requestedNicProfile, nicProfile, network);
verifyAndAssert(null, null, null, nicProfile, 1, 0);
}
@ -357,9 +360,9 @@ public class NetworkOrchestratorTest extends TestCase {
configureTestConfigureNicProfileBasedOnRequestedIpTests(nicProfile, 0l, false, IPAddressVO.State.Free, "192.168.100.1", "255.255.255.0", "00-88-14-4D-4C-FB",
requestedNicProfile, "00-88-14-4D-4C-FB", "192.168.100.150");
when(testOrchastrator._vlanDao.findByNetworkIdAndIpv4(Mockito.anyLong(), Mockito.anyString())).thenReturn(null);
when(testOrchestrator._vlanDao.findByNetworkIdAndIpv4(Mockito.anyLong(), Mockito.anyString())).thenReturn(null);
testOrchastrator.configureNicProfileBasedOnRequestedIp(requestedNicProfile, nicProfile, network);
testOrchestrator.configureNicProfileBasedOnRequestedIp(requestedNicProfile, nicProfile, network);
verifyAndAssert(null, null, null, nicProfile, 0, 0);
}
@ -375,21 +378,21 @@ public class NetworkOrchestratorTest extends TestCase {
when(ipVoSpy.getState()).thenReturn(state);
if (ipVoIsNull) {
when(testOrchastrator._ipAddressDao.findByIpAndSourceNetworkId(Mockito.anyLong(), Mockito.anyString())).thenReturn(ipVoSpy);
when(testOrchestrator._ipAddressDao.findByIpAndSourceNetworkId(Mockito.anyLong(), Mockito.anyString())).thenReturn(ipVoSpy);
} else {
when(testOrchastrator._ipAddressDao.findByIpAndSourceNetworkId(Mockito.anyLong(), Mockito.anyString())).thenReturn(ipVoSpy);
when(testOrchestrator._ipAddressDao.findByIpAndSourceNetworkId(Mockito.anyLong(), Mockito.anyString())).thenReturn(ipVoSpy);
}
VlanVO vlanSpy = Mockito.spy(new VlanVO(Vlan.VlanType.DirectAttached, "vlanTag", vlanGateway, vlanNetmask, 0l, "192.168.100.100 - 192.168.100.200", 0l, new Long(0l),
"ip6Gateway", "ip6Cidr", "ip6Range"));
Mockito.doReturn(0l).when(vlanSpy).getId();
when(testOrchastrator._vlanDao.findByNetworkIdAndIpv4(Mockito.anyLong(), Mockito.anyString())).thenReturn(vlanSpy);
when(testOrchastrator._ipAddressDao.acquireInLockTable(Mockito.anyLong())).thenReturn(ipVoSpy);
when(testOrchastrator._ipAddressDao.update(Mockito.anyLong(), Mockito.any(IPAddressVO.class))).thenReturn(true);
when(testOrchastrator._ipAddressDao.releaseFromLockTable(Mockito.anyLong())).thenReturn(true);
when(testOrchestrator._vlanDao.findByNetworkIdAndIpv4(Mockito.anyLong(), Mockito.anyString())).thenReturn(vlanSpy);
when(testOrchestrator._ipAddressDao.acquireInLockTable(Mockito.anyLong())).thenReturn(ipVoSpy);
when(testOrchestrator._ipAddressDao.update(Mockito.anyLong(), Mockito.any(IPAddressVO.class))).thenReturn(true);
when(testOrchestrator._ipAddressDao.releaseFromLockTable(Mockito.anyLong())).thenReturn(true);
try {
when(testOrchastrator._networkModel.getNextAvailableMacAddressInNetwork(Mockito.anyLong())).thenReturn(macAddress);
when(testOrchestrator._networkModel.getNextAvailableMacAddressInNetwork(Mockito.anyLong())).thenReturn(macAddress);
} catch (InsufficientAddressCapacityException e) {
e.printStackTrace();
}
@ -397,9 +400,9 @@ public class NetworkOrchestratorTest extends TestCase {
private void verifyAndAssert(String requestedIpv4Address, String ipv4Gateway, String ipv4Netmask, NicProfile nicProfile, int acquireLockAndCheckIfIpv4IsFreeTimes,
int nextMacAddressTimes) {
verify(testOrchastrator, times(acquireLockAndCheckIfIpv4IsFreeTimes)).acquireLockAndCheckIfIpv4IsFree(Mockito.any(Network.class), Mockito.anyString());
verify(testOrchestrator, times(acquireLockAndCheckIfIpv4IsFreeTimes)).acquireLockAndCheckIfIpv4IsFree(Mockito.any(Network.class), Mockito.anyString());
try {
verify(testOrchastrator._networkModel, times(nextMacAddressTimes)).getNextAvailableMacAddressInNetwork(Mockito.anyLong());
verify(testOrchestrator._networkModel, times(nextMacAddressTimes)).getNextAvailableMacAddressInNetwork(Mockito.anyLong());
} catch (InsufficientAddressCapacityException e) {
e.printStackTrace();
}
@ -441,27 +444,27 @@ public class NetworkOrchestratorTest extends TestCase {
ipVoSpy.setState(state);
ipVoSpy.setState(state);
if (isIPAddressVONull) {
when(testOrchastrator._ipAddressDao.findByIpAndSourceNetworkId(Mockito.anyLong(), Mockito.anyString())).thenReturn(null);
when(testOrchestrator._ipAddressDao.findByIpAndSourceNetworkId(Mockito.anyLong(), Mockito.anyString())).thenReturn(null);
} else {
when(testOrchastrator._ipAddressDao.findByIpAndSourceNetworkId(Mockito.anyLong(), Mockito.anyString())).thenReturn(ipVoSpy);
when(testOrchestrator._ipAddressDao.findByIpAndSourceNetworkId(Mockito.anyLong(), Mockito.anyString())).thenReturn(ipVoSpy);
}
when(testOrchastrator._ipAddressDao.acquireInLockTable(Mockito.anyLong())).thenReturn(ipVoSpy);
when(testOrchastrator._ipAddressDao.releaseFromLockTable(Mockito.anyLong())).thenReturn(true);
when(testOrchastrator._ipAddressDao.update(Mockito.anyLong(), Mockito.any(IPAddressVO.class))).thenReturn(true);
when(testOrchestrator._ipAddressDao.acquireInLockTable(Mockito.anyLong())).thenReturn(ipVoSpy);
when(testOrchestrator._ipAddressDao.releaseFromLockTable(Mockito.anyLong())).thenReturn(true);
when(testOrchestrator._ipAddressDao.update(Mockito.anyLong(), Mockito.any(IPAddressVO.class))).thenReturn(true);
testOrchastrator.acquireLockAndCheckIfIpv4IsFree(network, "192.168.100.150");
testOrchestrator.acquireLockAndCheckIfIpv4IsFree(network, "192.168.100.150");
verify(testOrchastrator._ipAddressDao, Mockito.times(findByIpTimes)).findByIpAndSourceNetworkId(Mockito.anyLong(), Mockito.anyString());
verify(testOrchastrator._ipAddressDao, Mockito.times(acquireLockTimes)).acquireInLockTable(Mockito.anyLong());
verify(testOrchastrator._ipAddressDao, Mockito.times(releaseFromLockTimes)).releaseFromLockTable(Mockito.anyLong());
verify(testOrchastrator._ipAddressDao, Mockito.times(updateTimes)).update(Mockito.anyLong(), Mockito.any(IPAddressVO.class));
verify(testOrchastrator, Mockito.times(validateTimes)).validateLockedRequestedIp(Mockito.any(IPAddressVO.class), Mockito.any(IPAddressVO.class));
verify(testOrchestrator._ipAddressDao, Mockito.times(findByIpTimes)).findByIpAndSourceNetworkId(Mockito.anyLong(), Mockito.anyString());
verify(testOrchestrator._ipAddressDao, Mockito.times(acquireLockTimes)).acquireInLockTable(Mockito.anyLong());
verify(testOrchestrator._ipAddressDao, Mockito.times(releaseFromLockTimes)).releaseFromLockTable(Mockito.anyLong());
verify(testOrchestrator._ipAddressDao, Mockito.times(updateTimes)).update(Mockito.anyLong(), Mockito.any(IPAddressVO.class));
verify(testOrchestrator, Mockito.times(validateTimes)).validateLockedRequestedIp(Mockito.any(IPAddressVO.class), Mockito.any(IPAddressVO.class));
}
@Test(expected = InvalidParameterValueException.class)
public void validateLockedRequestedIpTestNullLockedIp() {
IPAddressVO ipVoSpy = Mockito.spy(new IPAddressVO(new Ip("192.168.100.100"), 0l, 0l, 0l, true));
testOrchastrator.validateLockedRequestedIp(ipVoSpy, null);
testOrchestrator.validateLockedRequestedIp(ipVoSpy, null);
}
@Test
@ -476,7 +479,7 @@ public class NetworkOrchestratorTest extends TestCase {
IPAddressVO lockedIp = ipVoSpy;
lockedIp.setState(states[i]);
try {
testOrchastrator.validateLockedRequestedIp(ipVoSpy, lockedIp);
testOrchestrator.validateLockedRequestedIp(ipVoSpy, lockedIp);
} catch (InvalidParameterValueException e) {
expectedException = true;
}
@ -489,7 +492,7 @@ public class NetworkOrchestratorTest extends TestCase {
IPAddressVO ipVoSpy = Mockito.spy(new IPAddressVO(new Ip("192.168.100.100"), 0l, 0l, 0l, true));
IPAddressVO lockedIp = ipVoSpy;
lockedIp.setState(State.Free);
testOrchastrator.validateLockedRequestedIp(ipVoSpy, lockedIp);
testOrchestrator.validateLockedRequestedIp(ipVoSpy, lockedIp);
}
@Test
@ -500,16 +503,16 @@ public class NetworkOrchestratorTest extends TestCase {
when(vm.getType()).thenReturn(Type.User);
when(network.getGuruName()).thenReturn(guruName);
when(testOrchastrator._networksDao.findById(nic.getNetworkId())).thenReturn(network);
when(testOrchestrator._networksDao.findById(nic.getNetworkId())).thenReturn(network);
Long nicId = 1L;
when(nic.getId()).thenReturn(nicId);
when(vm.getParameter(VirtualMachineProfile.Param.PreserveNics)).thenReturn(true);
testOrchastrator.removeNic(vm, nic);
testOrchestrator.removeNic(vm, nic);
verify(nic, never()).setState(Nic.State.Deallocating);
verify(testOrchastrator._nicDao, never()).remove(nicId);
verify(testOrchestrator._nicDao, never()).remove(nicId);
}
public void encodeVlanIdIntoBroadcastUriTestVxlan() {
@ -568,7 +571,7 @@ public class NetworkOrchestratorTest extends TestCase {
@Test(expected = InvalidParameterValueException.class)
public void encodeVlanIdIntoBroadcastUriTestNullNetwork() {
URI resultUri = testOrchastrator.encodeVlanIdIntoBroadcastUri("vxlan://123", null);
URI resultUri = testOrchestrator.encodeVlanIdIntoBroadcastUri("vxlan://123", null);
}
private void encodeVlanIdIntoBroadcastUriPrepareAndTest(String vlanId, String isolationMethod, String expectedIsolation, String expectedUri) {
@ -577,7 +580,7 @@ public class NetworkOrchestratorTest extends TestCase {
isolationMethods.add(isolationMethod);
physicalNetwork.setIsolationMethods(isolationMethods);
URI resultUri = testOrchastrator.encodeVlanIdIntoBroadcastUri(vlanId, physicalNetwork);
URI resultUri = testOrchestrator.encodeVlanIdIntoBroadcastUri(vlanId, physicalNetwork);
Assert.assertEquals(expectedIsolation, resultUri.getScheme());
Assert.assertEquals(expectedUri, resultUri.toString());
@ -595,17 +598,17 @@ public class NetworkOrchestratorTest extends TestCase {
Mockito.when(network.getDns2()).thenReturn(ip4Dns[1]);
Mockito.when(network.getIp6Dns1()).thenReturn(ip6Dns[0]);
Mockito.when(network.getIp6Dns2()).thenReturn(ip6Dns[1]);
Mockito.when(testOrchastrator._networkModel.getNetworkRate(networkId, vmId)).thenReturn(networkRate);
Mockito.when(testOrchestrator._networkModel.getNetworkRate(networkId, vmId)).thenReturn(networkRate);
NicVO nicVO = Mockito.mock(NicVO.class);
Mockito.when(nicVO.isDefaultNic()).thenReturn(isDefaultNic);
Mockito.when(testOrchastrator._nicDao.findById(nicId)).thenReturn(nicVO);
Mockito.when(testOrchastrator._nicDao.update(nicId, nicVO)).thenReturn(true);
Mockito.when(testOrchastrator._networkModel.isSecurityGroupSupportedInNetwork(network)).thenReturn(false);
Mockito.when(testOrchastrator._networkModel.getNetworkTag(hypervisorType, network)).thenReturn(null);
Mockito.when(testOrchastrator._ntwkSrvcDao.getDistinctProviders(networkId)).thenReturn(new ArrayList<>());
testOrchastrator.networkElements = new ArrayList<>();
Mockito.when(testOrchastrator._nicExtraDhcpOptionDao.listByNicId(nicId)).thenReturn(new ArrayList<>());
Mockito.when(testOrchastrator._ntwkSrvcDao.areServicesSupportedInNetwork(networkId, Service.Dhcp)).thenReturn(false);
Mockito.when(testOrchestrator._nicDao.findById(nicId)).thenReturn(nicVO);
Mockito.when(testOrchestrator._nicDao.update(nicId, nicVO)).thenReturn(true);
Mockito.when(testOrchestrator._networkModel.isSecurityGroupSupportedInNetwork(network)).thenReturn(false);
Mockito.when(testOrchestrator._networkModel.getNetworkTag(hypervisorType, network)).thenReturn(null);
Mockito.when(testOrchestrator._ntwkSrvcDao.getDistinctProviders(networkId)).thenReturn(new ArrayList<>());
testOrchestrator.networkElements = new ArrayList<>();
Mockito.when(testOrchestrator._nicExtraDhcpOptionDao.listByNicId(nicId)).thenReturn(new ArrayList<>());
Mockito.when(testOrchestrator._ntwkSrvcDao.areServicesSupportedInNetwork(networkId, Service.Dhcp)).thenReturn(false);
VirtualMachineProfile virtualMachineProfile = Mockito.mock(VirtualMachineProfile.class);
Mockito.when(virtualMachineProfile.getType()).thenReturn(vmType);
Mockito.when(virtualMachineProfile.getId()).thenReturn(vmId);
@ -634,7 +637,7 @@ public class NetworkOrchestratorTest extends TestCase {
Mockito.when(vpcVO.getIp4Dns1()).thenReturn(null);
Mockito.when(vpcVO.getIp6Dns1()).thenReturn(null);
}
Mockito.when(testOrchastrator._vpcMgr.getActiveVpc(vpcId)).thenReturn(vpcVO);
Mockito.when(testOrchestrator._vpcMgr.getActiveVpc(vpcId)).thenReturn(vpcVO);
} else {
Mockito.when(routerVO.getVpcId()).thenReturn(null);
Long routerNetworkId = 2L;
@ -648,13 +651,13 @@ public class NetworkOrchestratorTest extends TestCase {
Mockito.when(routerNetworkVO.getDns1()).thenReturn(null);
Mockito.when(routerNetworkVO.getIp6Dns1()).thenReturn(null);
}
Mockito.when(testOrchastrator.routerNetworkDao.getRouterNetworks(vmId)).thenReturn(List.of(routerNetworkId));
Mockito.when(testOrchastrator._networksDao.findById(routerNetworkId)).thenReturn(routerNetworkVO);
Mockito.when(testOrchestrator.routerNetworkDao.getRouterNetworks(vmId)).thenReturn(List.of(routerNetworkId));
Mockito.when(testOrchestrator._networksDao.findById(routerNetworkId)).thenReturn(routerNetworkVO);
}
Mockito.when(testOrchastrator.routerDao.findById(vmId)).thenReturn(routerVO);
Mockito.when(testOrchestrator.routerDao.findById(vmId)).thenReturn(routerVO);
NicProfile profile = null;
try {
profile = testOrchastrator.prepareNic(virtualMachineProfile, deployDestination, reservationContext, nicId, network);
profile = testOrchestrator.prepareNic(virtualMachineProfile, deployDestination, reservationContext, nicId, network);
} catch (InsufficientCapacityException | ResourceUnavailableException e) {
Assert.fail(String.format("Failure with exception %s", e.getMessage()));
}
@ -723,7 +726,7 @@ public class NetworkOrchestratorTest extends TestCase {
Mockito.when(dataCenter.getNetworkType()).thenReturn(DataCenter.NetworkType.Advanced);
Mockito.when(network.getGateway()).thenReturn(networkGateway);
Mockito.when(network.getCidr()).thenReturn(networkCidr);
Pair<String, String> pair = testOrchastrator.getNetworkGatewayAndNetmaskForNicImport(network, dataCenter, ipAddress);
Pair<String, String> pair = testOrchestrator.getNetworkGatewayAndNetmaskForNicImport(network, dataCenter, ipAddress);
Assert.assertNotNull(pair);
Assert.assertEquals(networkGateway, pair.first());
Assert.assertEquals(networkNetmask, pair.second());
@ -743,9 +746,9 @@ public class NetworkOrchestratorTest extends TestCase {
Mockito.when(vlan.getVlanNetmask()).thenReturn(defaultNetworkNetmask);
Mockito.when(dataCenter.getNetworkType()).thenReturn(DataCenter.NetworkType.Basic);
Mockito.when(ipAddressVO.getVlanId()).thenReturn(1L);
Mockito.when(testOrchastrator._vlanDao.findById(1L)).thenReturn(vlan);
Mockito.when(testOrchastrator._ipAddressDao.findByIp(ipAddress)).thenReturn(ipAddressVO);
Pair<String, String> pair = testOrchastrator.getNetworkGatewayAndNetmaskForNicImport(network, dataCenter, ipAddress);
Mockito.when(testOrchestrator._vlanDao.findById(1L)).thenReturn(vlan);
Mockito.when(testOrchestrator._ipAddressDao.findByIp(ipAddress)).thenReturn(ipAddressVO);
Pair<String, String> pair = testOrchestrator.getNetworkGatewayAndNetmaskForNicImport(network, dataCenter, ipAddress);
Assert.assertNotNull(pair);
Assert.assertEquals(defaultNetworkGateway, pair.first());
Assert.assertEquals(defaultNetworkNetmask, pair.second());
@ -757,7 +760,7 @@ public class NetworkOrchestratorTest extends TestCase {
DataCenter dataCenter = Mockito.mock(DataCenter.class);
Network.IpAddresses ipAddresses = Mockito.mock(Network.IpAddresses.class);
Mockito.when(network.getGuestType()).thenReturn(GuestType.L2);
Assert.assertNull(testOrchastrator.getSelectedIpForNicImport(network, dataCenter, ipAddresses));
Assert.assertNull(testOrchestrator.getSelectedIpForNicImport(network, dataCenter, ipAddresses));
}
@Test
@ -769,8 +772,8 @@ public class NetworkOrchestratorTest extends TestCase {
Mockito.when(dataCenter.getNetworkType()).thenReturn(DataCenter.NetworkType.Advanced);
String ipAddress = "10.1.10.10";
Mockito.when(ipAddresses.getIp4Address()).thenReturn(ipAddress);
Mockito.when(testOrchastrator._ipAddrMgr.acquireGuestIpAddress(network, ipAddress)).thenReturn(ipAddress);
String guestIp = testOrchastrator.getSelectedIpForNicImport(network, dataCenter, ipAddresses);
Mockito.when(testOrchestrator._ipAddrMgr.acquireGuestIpAddress(network, ipAddress)).thenReturn(ipAddress);
String guestIp = testOrchestrator.getSelectedIpForNicImport(network, dataCenter, ipAddresses);
Assert.assertEquals(ipAddress, guestIp);
}
@ -791,8 +794,8 @@ public class NetworkOrchestratorTest extends TestCase {
Mockito.when(ipAddressVO.getState()).thenReturn(State.Free);
Mockito.when(network.getId()).thenReturn(networkId);
Mockito.when(dataCenter.getId()).thenReturn(dataCenterId);
Mockito.when(testOrchastrator._ipAddressDao.findBySourceNetworkIdAndDatacenterIdAndState(networkId, dataCenterId, State.Free)).thenReturn(ipAddressVO);
String ipAddress = testOrchastrator.getSelectedIpForNicImport(network, dataCenter, ipAddresses);
Mockito.when(testOrchestrator._ipAddressDao.findBySourceNetworkIdAndDatacenterIdAndState(networkId, dataCenterId, State.Free)).thenReturn(ipAddressVO);
String ipAddress = testOrchestrator.getSelectedIpForNicImport(network, dataCenter, ipAddresses);
Assert.assertEquals(freeIp, ipAddress);
}
@ -814,8 +817,8 @@ public class NetworkOrchestratorTest extends TestCase {
Mockito.when(network.getId()).thenReturn(networkId);
Mockito.when(dataCenter.getId()).thenReturn(dataCenterId);
Mockito.when(ipAddresses.getIp4Address()).thenReturn(requestedIp);
Mockito.when(testOrchastrator._ipAddressDao.findByIp(requestedIp)).thenReturn(ipAddressVO);
String ipAddress = testOrchastrator.getSelectedIpForNicImport(network, dataCenter, ipAddresses);
Mockito.when(testOrchestrator._ipAddressDao.findByIp(requestedIp)).thenReturn(ipAddressVO);
String ipAddress = testOrchestrator.getSelectedIpForNicImport(network, dataCenter, ipAddresses);
Assert.assertEquals(requestedIp, ipAddress);
}
@ -837,7 +840,54 @@ public class NetworkOrchestratorTest extends TestCase {
Mockito.when(network.getId()).thenReturn(networkId);
Mockito.when(dataCenter.getId()).thenReturn(dataCenterId);
Mockito.when(ipAddresses.getIp4Address()).thenReturn(requestedIp);
Mockito.when(testOrchastrator._ipAddressDao.findByIp(requestedIp)).thenReturn(ipAddressVO);
testOrchastrator.getSelectedIpForNicImport(network, dataCenter, ipAddresses);
Mockito.when(testOrchestrator._ipAddressDao.findByIp(requestedIp)).thenReturn(ipAddressVO);
testOrchestrator.getSelectedIpForNicImport(network, dataCenter, ipAddresses);
}
@Test
public void testShutdownNetworkAcquireLockFailed() {
ReservationContext reservationContext = Mockito.mock(ReservationContext.class);
NetworkVO network = mock(NetworkVO.class);
long networkId = 1;
when(testOrchestrator._networksDao.acquireInLockTable(Mockito.anyLong(), Mockito.anyInt())).thenReturn(null);
boolean shutdownNetworkStatus = testOrchestrator.shutdownNetwork(networkId, reservationContext, false);
Assert.assertFalse(shutdownNetworkStatus);
verify(testOrchestrator._networksDao, times(1)).acquireInLockTable(networkId, NetworkLockTimeout.value());
}
@Test
public void testShutdownNetworkInAllocatedState() {
ReservationContext reservationContext = Mockito.mock(ReservationContext.class);
NetworkVO network = mock(NetworkVO.class);
long networkId = 1;
when(testOrchestrator._networksDao.acquireInLockTable(Mockito.anyLong(), Mockito.anyInt())).thenReturn(network);
when(network.getId()).thenReturn(networkId);
when(network.getState()).thenReturn(Network.State.Allocated);
boolean shutdownNetworkStatus = testOrchestrator.shutdownNetwork(networkId, reservationContext, false);
Assert.assertTrue(shutdownNetworkStatus);
verify(network, times(1)).getState();
verify(testOrchestrator._networksDao, times(1)).acquireInLockTable(networkId, NetworkLockTimeout.value());
verify(testOrchestrator._networksDao, times(1)).releaseFromLockTable(networkId);
}
@Test
public void testShutdownNetworkInImplementingState() {
ReservationContext reservationContext = Mockito.mock(ReservationContext.class);
NetworkVO network = mock(NetworkVO.class);
long networkId = 1;
when(testOrchestrator._networksDao.acquireInLockTable(Mockito.anyLong(), Mockito.anyInt())).thenReturn(network);
when(network.getId()).thenReturn(networkId);
when(network.getState()).thenReturn(Network.State.Implementing);
boolean shutdownNetworkStatus = testOrchestrator.shutdownNetwork(networkId, reservationContext, false);
Assert.assertFalse(shutdownNetworkStatus);
verify(network, times(3)).getState();
verify(testOrchestrator._networksDao, times(1)).acquireInLockTable(networkId, NetworkLockTimeout.value());
verify(testOrchestrator._networksDao, times(1)).releaseFromLockTable(networkId);
}
}

View File

@ -80,6 +80,18 @@ public class HypervisorCapabilitiesVO implements HypervisorCapabilities {
this.uuid = UUID.randomUUID().toString();
}
public HypervisorCapabilitiesVO(HypervisorCapabilitiesVO source) {
this.hypervisorType = source.getHypervisorType();
this.hypervisorVersion = source.getHypervisorVersion();
this.maxGuestsLimit = source.getMaxGuestsLimit();
this.maxDataVolumesLimit = source.getMaxDataVolumesLimit();
this.maxHostsPerCluster = source.getMaxHostsPerCluster();
this.securityGroupEnabled = source.isSecurityGroupEnabled();
this.storageMotionSupported = source.isStorageMotionSupported();
this.vmSnapshotEnabled = source.isVmSnapshotEnabled();
this.uuid = UUID.randomUUID().toString();
}
/**
* @param hypervisorType the hypervisorType to set
*/

View File

@ -75,8 +75,10 @@ public interface VmStatsDao extends GenericDao<VmStatsVO, Long> {
/**
* Removes (expunges) all VM stats with {@code timestamp} less than
* a given Date.
* @param limit the maximum date to keep stored. Records that exceed this limit will be removed.
* @param limitDate the maximum date to keep stored. Records that exceed this limit will be removed.
* @param limitPerQuery the maximum amount of rows to be removed in a single query. We loop if there are still rows to be removed after a given query.
* If 0 or negative, no limit is used.
*/
void removeAllByTimestampLessThan(Date limit);
void removeAllByTimestampLessThan(Date limitDate, long limitPerQuery);
}

View File

@ -21,6 +21,8 @@ import java.util.List;
import javax.annotation.PostConstruct;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.springframework.stereotype.Component;
import com.cloud.utils.db.Filter;
@ -33,6 +35,8 @@ import com.cloud.vm.VmStatsVO;
@Component
public class VmStatsDaoImpl extends GenericDaoBase<VmStatsVO, Long> implements VmStatsDao {
protected Logger logger = LogManager.getLogger(getClass());
protected SearchBuilder<VmStatsVO> vmIdSearch;
protected SearchBuilder<VmStatsVO> vmIdTimestampGreaterThanEqualSearch;
protected SearchBuilder<VmStatsVO> vmIdTimestampLessThanEqualSearch;
@ -113,10 +117,22 @@ public class VmStatsDaoImpl extends GenericDaoBase<VmStatsVO, Long> implements V
}
@Override
public void removeAllByTimestampLessThan(Date limit) {
public void removeAllByTimestampLessThan(Date limitDate, long limitPerQuery) {
SearchCriteria<VmStatsVO> sc = timestampSearch.create();
sc.setParameters("timestamp", limit);
expunge(sc);
sc.setParameters("timestamp", limitDate);
logger.debug(String.format("Starting to remove all vm_stats rows older than [%s].", limitDate));
long totalRemoved = 0;
long removed;
do {
removed = expunge(sc, limitPerQuery);
totalRemoved += removed;
logger.trace(String.format("Removed [%s] vm_stats rows on the last update and a sum of [%s] vm_stats rows older than [%s] until now.", removed, totalRemoved, limitDate));
} while (limitPerQuery > 0 && removed >= limitPerQuery);
logger.info(String.format("Removed a total of [%s] vm_stats rows older than [%s].", totalRemoved, limitDate));
}
}

View File

@ -130,7 +130,7 @@ public class ImageStoreDaoImpl extends GenericDaoBase<ImageStoreVO, Long> implem
}
if (scope.getScopeId() != null) {
SearchCriteria<ImageStoreVO> scc = createSearchCriteria();
scc.addOr("scope", SearchCriteria.Op.EQ, ScopeType.ZONE);
scc.addOr("scope", SearchCriteria.Op.EQ, ScopeType.REGION);
scc.addOr("dcId", SearchCriteria.Op.EQ, scope.getScopeId());
sc.addAnd("scope", SearchCriteria.Op.SC, scc);
}

View File

@ -65,3 +65,8 @@ CREATE TABLE IF NOT EXISTS `cloud_usage`.`usage_vpc` (
CALL `cloud_usage`.`IDEMPOTENT_ADD_COLUMN`('cloud_usage.cloud_usage', 'state', 'VARCHAR(100) DEFAULT NULL');
CALL `cloud`.`IDEMPOTENT_ADD_COLUMN`('cloud.user_data', 'removed', 'datetime COMMENT "date removed or null, if still present"');
-- Update options for config - host.allocators.order
UPDATE `cloud`.`configuration` SET
`options` = 'FirstFitRouting,RandomAllocator,TestingAllocator,FirstFitAllocator,RecreateHostAllocator'
WHERE `name` = 'host.allocators.order';

View File

@ -24,6 +24,7 @@ import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Objects;
import java.util.Random;
import java.util.Set;
import java.util.UUID;
@ -44,6 +45,7 @@ import org.apache.cloudstack.engine.subsystem.api.storage.EndPoint;
import org.apache.cloudstack.engine.subsystem.api.storage.EndPointSelector;
import org.apache.cloudstack.engine.subsystem.api.storage.HostScope;
import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreStateMachine;
import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStore;
import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreStateMachine.Event;
import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreDriver;
import org.apache.cloudstack.engine.subsystem.api.storage.Scope;
@ -69,7 +71,6 @@ import org.apache.cloudstack.storage.datastore.db.SnapshotDataStoreDao;
import org.apache.cloudstack.storage.datastore.db.StoragePoolVO;
import org.apache.cloudstack.storage.to.PrimaryDataStoreTO;
import org.apache.cloudstack.storage.to.VolumeObjectTO;
import org.apache.commons.collections.MapUtils;
import org.apache.commons.lang3.StringUtils;
import org.apache.logging.log4j.Logger;
import org.apache.logging.log4j.LogManager;
@ -82,7 +83,6 @@ import com.cloud.agent.api.MigrateCommand.MigrateDiskInfo;
import com.cloud.agent.api.ModifyTargetsAnswer;
import com.cloud.agent.api.ModifyTargetsCommand;
import com.cloud.agent.api.PrepareForMigrationCommand;
import com.cloud.agent.api.storage.CheckStorageAvailabilityCommand;
import com.cloud.agent.api.storage.CopyVolumeAnswer;
import com.cloud.agent.api.storage.CopyVolumeCommand;
import com.cloud.agent.api.storage.MigrateVolumeAnswer;
@ -141,12 +141,16 @@ import java.util.HashSet;
import java.util.stream.Collectors;
import org.apache.commons.collections.CollectionUtils;
import static org.apache.cloudstack.vm.UnmanagedVMsManagerImpl.KVM_VM_IMPORT_DEFAULT_TEMPLATE_NAME;
import static org.apache.cloudstack.vm.UnmanagedVMsManagerImpl.VM_IMPORT_DEFAULT_TEMPLATE_NAME;
public class StorageSystemDataMotionStrategy implements DataMotionStrategy {
protected Logger logger = LogManager.getLogger(getClass());
private static final Random RANDOM = new Random(System.nanoTime());
private static final int LOCK_TIME_IN_SECONDS = 300;
private static final String OPERATION_NOT_SUPPORTED = "This operation is not supported.";
@Inject
protected AgentManager agentManager;
@Inject
@ -684,8 +688,6 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy {
private void handleVolumeMigrationFromNonManagedStorageToManagedStorage(VolumeInfo srcVolumeInfo, VolumeInfo destVolumeInfo,
AsyncCompletionCallback<CopyCommandResult> callback) {
String errMsg = null;
try {
HypervisorType hypervisorType = srcVolumeInfo.getHypervisorType();
@ -696,37 +698,21 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy {
if (HypervisorType.XenServer.equals(hypervisorType)) {
handleVolumeMigrationForXenServer(srcVolumeInfo, destVolumeInfo);
}
else {
destVolumeInfo = _volumeDataFactory.getVolume(destVolumeInfo.getId(), destVolumeInfo.getDataStore());
DataTO dataTO = destVolumeInfo.getTO();
CopyCmdAnswer copyCmdAnswer = new CopyCmdAnswer(dataTO);
CopyCommandResult result = new CopyCommandResult(null, copyCmdAnswer);
callback.complete(result);
} else {
handleVolumeMigrationForKVM(srcVolumeInfo, destVolumeInfo, callback);
}
}
catch (Exception ex) {
errMsg = "Migration operation failed in 'StorageSystemDataMotionStrategy.handleVolumeMigrationFromNonManagedStorageToManagedStorage': " +
String errMsg = "Migration operation failed in 'StorageSystemDataMotionStrategy.handleVolumeMigrationFromNonManagedStorageToManagedStorage': " +
ex.getMessage();
throw new CloudRuntimeException(errMsg, ex);
}
finally {
CopyCmdAnswer copyCmdAnswer;
if (errMsg != null) {
copyCmdAnswer = new CopyCmdAnswer(errMsg);
}
else {
destVolumeInfo = _volumeDataFactory.getVolume(destVolumeInfo.getId(), destVolumeInfo.getDataStore());
DataTO dataTO = destVolumeInfo.getTO();
copyCmdAnswer = new CopyCmdAnswer(dataTO);
}
CopyCommandResult result = new CopyCommandResult(null, copyCmdAnswer);
result.setResult(errMsg);
callback.complete(result);
}
}
private void handleVolumeMigrationForXenServer(VolumeInfo srcVolumeInfo, VolumeInfo destVolumeInfo) {
@ -845,12 +831,25 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy {
checkAvailableForMigration(vm);
String errMsg = null;
HostVO hostVO = null;
try {
destVolumeInfo.getDataStore().getDriver().createAsync(destVolumeInfo.getDataStore(), destVolumeInfo, null);
VolumeVO volumeVO = _volumeDao.findById(destVolumeInfo.getId());
updatePathFromScsiName(volumeVO);
destVolumeInfo = _volumeDataFactory.getVolume(destVolumeInfo.getId(), destVolumeInfo.getDataStore());
HostVO hostVO = getHostOnWhichToExecuteMigrationCommand(srcVolumeInfo, destVolumeInfo);
hostVO = getHostOnWhichToExecuteMigrationCommand(srcVolumeInfo, destVolumeInfo);
// if managed we need to grant access
PrimaryDataStore pds = (PrimaryDataStore)this.dataStoreMgr.getPrimaryDataStore(destVolumeInfo.getDataStore().getUuid());
if (pds == null) {
throw new CloudRuntimeException("Unable to find primary data store driver for this volume");
}
// grant access (for managed volumes)
_volumeService.grantAccess(destVolumeInfo, hostVO, destVolumeInfo.getDataStore());
// re-retrieve volume to get any updated information from grant
destVolumeInfo = _volumeDataFactory.getVolume(destVolumeInfo.getId(), destVolumeInfo.getDataStore());
// migrate the volume via the hypervisor
String path = migrateVolumeForKVM(srcVolumeInfo, destVolumeInfo, hostVO, "Unable to migrate the volume from non-managed storage to managed storage");
@ -871,6 +870,18 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy {
throw new CloudRuntimeException(errMsg, ex);
}
} finally {
// revoke access (for managed volumes)
if (hostVO != null) {
try {
_volumeService.revokeAccess(destVolumeInfo, hostVO, destVolumeInfo.getDataStore());
} catch (Exception e) {
logger.warn(String.format("Failed to revoke access for volume 'name=%s,uuid=%s' after a migration attempt", destVolumeInfo.getVolume(), destVolumeInfo.getUuid()), e);
}
}
// re-retrieve volume to get any updated information from grant
destVolumeInfo = _volumeDataFactory.getVolume(destVolumeInfo.getId(), destVolumeInfo.getDataStore());
CopyCmdAnswer copyCmdAnswer;
if (errMsg != null) {
copyCmdAnswer = new CopyCmdAnswer(errMsg);
@ -921,6 +932,125 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy {
return hostVO;
}
private VolumeInfo createTemporaryVolumeCopyOfSnapshotAdaptive(SnapshotInfo snapshotInfo) {
VolumeInfo tempVolumeInfo = null;
VolumeVO tempVolumeVO = null;
try {
tempVolumeVO = new VolumeVO(Volume.Type.DATADISK, snapshotInfo.getName() + "_" + System.currentTimeMillis() + ".TMP",
snapshotInfo.getDataCenterId(), snapshotInfo.getDomainId(), snapshotInfo.getAccountId(), 0, ProvisioningType.THIN, snapshotInfo.getSize(), 0L, 0L, "");
tempVolumeVO.setPoolId(snapshotInfo.getDataStore().getId());
_volumeDao.persist(tempVolumeVO);
tempVolumeInfo = this._volFactory.getVolume(tempVolumeVO.getId());
if (snapshotInfo.getDataStore().getDriver().canCopy(snapshotInfo, tempVolumeInfo)) {
snapshotInfo.getDataStore().getDriver().copyAsync(snapshotInfo, tempVolumeInfo, null, null);
// refresh volume info as data could have changed
tempVolumeInfo = this._volFactory.getVolume(tempVolumeVO.getId());
} else {
throw new CloudRuntimeException("Storage driver indicated it could create a volume from the snapshot but rejected the subsequent request to do so");
}
return tempVolumeInfo;
} catch (Throwable e) {
try {
if (tempVolumeInfo != null) {
tempVolumeInfo.getDataStore().getDriver().deleteAsync(tempVolumeInfo.getDataStore(), tempVolumeInfo, null);
}
// cleanup temporary volume
if (tempVolumeVO != null) {
_volumeDao.remove(tempVolumeVO.getId());
}
} catch (Throwable e2) {
logger.warn("Failed to delete temporary volume created for copy", e2);
}
throw e;
}
}
/**
* Simplier logic for copy from snapshot for adaptive driver only.
* @param snapshotInfo
* @param destData
* @param callback
*/
private void handleCopyAsyncToSecondaryStorageAdaptive(SnapshotInfo snapshotInfo, DataObject destData, AsyncCompletionCallback<CopyCommandResult> callback) {
CopyCmdAnswer copyCmdAnswer = null;
DataObject srcFinal = null;
HostVO hostVO = null;
DataStore srcDataStore = null;
boolean tempRequired = false;
try {
snapshotInfo.processEvent(Event.CopyingRequested);
hostVO = getHost(snapshotInfo);
DataObject destOnStore = destData;
srcDataStore = snapshotInfo.getDataStore();
int primaryStorageDownloadWait = StorageManager.PRIMARY_STORAGE_DOWNLOAD_WAIT.value();
CopyCommand copyCommand = null;
if (!Boolean.parseBoolean(srcDataStore.getDriver().getCapabilities().get("CAN_DIRECT_ATTACH_SNAPSHOT"))) {
srcFinal = createTemporaryVolumeCopyOfSnapshotAdaptive(snapshotInfo);
tempRequired = true;
} else {
srcFinal = snapshotInfo;
}
_volumeService.grantAccess(srcFinal, hostVO, srcDataStore);
DataTO srcTo = srcFinal.getTO();
// have to set PATH as extraOptions due to logic in KVM hypervisor processor
HashMap<String,String> extraDetails = new HashMap<>();
extraDetails.put(DiskTO.PATH, srcTo.getPath());
copyCommand = new CopyCommand(srcFinal.getTO(), destOnStore.getTO(), primaryStorageDownloadWait,
VirtualMachineManager.ExecuteInSequence.value());
copyCommand.setOptions(extraDetails);
copyCmdAnswer = (CopyCmdAnswer)agentManager.send(hostVO.getId(), copyCommand);
} catch (Exception ex) {
String msg = "Failed to create template from snapshot (Snapshot ID = " + snapshotInfo.getId() + ") : ";
logger.warn(msg, ex);
throw new CloudRuntimeException(msg + ex.getMessage(), ex);
}
finally {
// remove access tot he volume that was used
if (srcFinal != null && hostVO != null && srcDataStore != null) {
_volumeService.revokeAccess(srcFinal, hostVO, srcDataStore);
}
// delete the temporary volume if it was needed
if (srcFinal != null && tempRequired) {
try {
srcFinal.getDataStore().getDriver().deleteAsync(srcFinal.getDataStore(), srcFinal, null);
} catch (Throwable e) {
logger.warn("Failed to delete temporary volume created for copy", e);
}
}
// check we have a reasonable result
String errMsg = null;
if (copyCmdAnswer == null || (!copyCmdAnswer.getResult() && copyCmdAnswer.getDetails() == null)) {
errMsg = "Unable to create template from snapshot";
copyCmdAnswer = new CopyCmdAnswer(errMsg);
} else if (!copyCmdAnswer.getResult() && StringUtils.isEmpty(copyCmdAnswer.getDetails())) {
errMsg = "Unable to create template from snapshot";
} else if (!copyCmdAnswer.getResult()) {
errMsg = copyCmdAnswer.getDetails();
}
//submit processEvent
if (StringUtils.isEmpty(errMsg)) {
snapshotInfo.processEvent(Event.OperationSuccessed);
} else {
snapshotInfo.processEvent(Event.OperationFailed);
}
CopyCommandResult result = new CopyCommandResult(null, copyCmdAnswer);
result.setResult(copyCmdAnswer.getDetails());
callback.complete(result);
}
}
/**
* This function is responsible for copying a snapshot from managed storage to secondary storage. This is used in the following two cases:
* 1) When creating a template from a snapshot
@ -931,6 +1061,13 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy {
* @param callback callback for async
*/
private void handleCopyAsyncToSecondaryStorage(SnapshotInfo snapshotInfo, DataObject destData, AsyncCompletionCallback<CopyCommandResult> callback) {
// if this flag is set (true or false), we will fall out to use simplier logic for the Adaptive handler
if (snapshotInfo.getDataStore().getDriver().getCapabilities().get("CAN_DIRECT_ATTACH_SNAPSHOT") != null) {
handleCopyAsyncToSecondaryStorageAdaptive(snapshotInfo, destData, callback);
return;
}
String errMsg = null;
CopyCmdAnswer copyCmdAnswer = null;
boolean usingBackendSnapshot = false;
@ -1697,14 +1834,13 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy {
private CopyCmdAnswer copyImageToVolume(DataObject srcDataObject, VolumeInfo destVolumeInfo, HostVO hostVO) {
int primaryStorageDownloadWait = StorageManager.PRIMARY_STORAGE_DOWNLOAD_WAIT.value();
CopyCommand copyCommand = new CopyCommand(srcDataObject.getTO(), destVolumeInfo.getTO(), primaryStorageDownloadWait,
VirtualMachineManager.ExecuteInSequence.value());
CopyCmdAnswer copyCmdAnswer;
try {
_volumeService.grantAccess(destVolumeInfo, hostVO, destVolumeInfo.getDataStore());
CopyCommand copyCommand = new CopyCommand(srcDataObject.getTO(), destVolumeInfo.getTO(), primaryStorageDownloadWait,
VirtualMachineManager.ExecuteInSequence.value());
Map<String, String> destDetails = getVolumeDetails(destVolumeInfo);
copyCommand.setOptions2(destDetails);
@ -1729,42 +1865,6 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy {
return copyCmdAnswer;
}
/**
* Use normal volume semantics (create a volume known to cloudstack, ask the storage driver to create it as a copy of the snapshot)
* @param volumeVO
* @param snapshotInfo
*/
public void prepTempVolumeForCopyFromSnapshot(SnapshotInfo snapshotInfo) {
VolumeVO volumeVO = null;
try {
volumeVO = new VolumeVO(Volume.Type.DATADISK, snapshotInfo.getName() + "_" + System.currentTimeMillis() + ".TMP",
snapshotInfo.getDataCenterId(), snapshotInfo.getDomainId(), snapshotInfo.getAccountId(), 0, ProvisioningType.THIN, snapshotInfo.getSize(), 0L, 0L, "");
volumeVO.setPoolId(snapshotInfo.getDataStore().getId());
_volumeDao.persist(volumeVO);
VolumeInfo tempVolumeInfo = this._volFactory.getVolume(volumeVO.getId());
if (snapshotInfo.getDataStore().getDriver().canCopy(snapshotInfo, tempVolumeInfo)) {
snapshotInfo.getDataStore().getDriver().copyAsync(snapshotInfo, tempVolumeInfo, null, null);
// refresh volume info as data could have changed
tempVolumeInfo = this._volFactory.getVolume(volumeVO.getId());
// save the "temp" volume info into the snapshot details (we need this to clean up at the end)
_snapshotDetailsDao.addDetail(snapshotInfo.getId(), "TemporaryVolumeCopyUUID", tempVolumeInfo.getUuid(), true);
_snapshotDetailsDao.addDetail(snapshotInfo.getId(), "TemporaryVolumeCopyPath", tempVolumeInfo.getPath(), true);
// NOTE: for this to work, the Driver must return a custom SnapshotObjectTO object from getTO()
// whenever the TemporaryVolumeCopyPath is set.
} else {
throw new CloudRuntimeException("Storage driver indicated it could create a volume from the snapshot but rejected the subsequent request to do so");
}
} catch (Throwable e) {
// cleanup temporary volume
if (volumeVO != null) {
_volumeDao.remove(volumeVO.getId());
}
throw e;
}
}
/**
* If the underlying storage system is making use of read-only snapshots, this gives the storage system the opportunity to
* create a volume from the snapshot so that we can copy the VHD file that should be inside of the snapshot to secondary storage.
@ -1776,13 +1876,8 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy {
* resign the SR and the VDI that should be inside of the snapshot before copying the VHD file to secondary storage.
*/
private void createVolumeFromSnapshot(SnapshotInfo snapshotInfo) {
if ("true".equalsIgnoreCase(snapshotInfo.getDataStore().getDriver().getCapabilities().get("CAN_CREATE_TEMP_VOLUME_FROM_SNAPSHOT"))) {
prepTempVolumeForCopyFromSnapshot(snapshotInfo);
return;
}
SnapshotDetailsVO snapshotDetails = handleSnapshotDetails(snapshotInfo.getId(), "create");
try {
snapshotInfo.getDataStore().getDriver().createAsync(snapshotInfo.getDataStore(), snapshotInfo, null);
}
@ -1797,23 +1892,8 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy {
* invocation of createVolumeFromSnapshot(SnapshotInfo).
*/
private void deleteVolumeFromSnapshot(SnapshotInfo snapshotInfo) {
VolumeVO volumeVO = null;
// cleanup any temporary volume previously created for copy from a snapshot
if ("true".equalsIgnoreCase(snapshotInfo.getDataStore().getDriver().getCapabilities().get("CAN_CREATE_TEMP_VOLUME_FROM_SNAPSHOT"))) {
SnapshotDetailsVO tempUuid = null;
tempUuid = _snapshotDetailsDao.findDetail(snapshotInfo.getId(), "TemporaryVolumeCopyUUID");
if (tempUuid == null || tempUuid.getValue() == null) {
return;
}
volumeVO = _volumeDao.findByUuid(tempUuid.getValue());
if (volumeVO != null) {
_volumeDao.remove(volumeVO.getId());
}
_snapshotDetailsDao.remove(tempUuid.getId());
_snapshotDetailsDao.removeDetail(snapshotInfo.getId(), "TemporaryVolumeCopyUUID");
return;
}
try {
logger.debug("Cleaning up temporary volume created for copy from a snapshot");
SnapshotDetailsVO snapshotDetails = handleSnapshotDetails(snapshotInfo.getId(), "delete");
@ -1823,6 +1903,10 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy {
finally {
_snapshotDetailsDao.remove(snapshotDetails.getId());
}
} catch (Throwable e) {
logger.warn("Failed to clean up temporary volume created for copy from a snapshot, transction will not be failed but an adminstrator should clean this up: " + snapshotInfo.getUuid() + " - " + snapshotInfo.getPath(), e);
}
}
private void handleQualityOfServiceForVolumeMigration(VolumeInfo volumeInfo, PrimaryDataStoreDriver.QualityOfServiceState qualityOfServiceState) {
@ -1906,7 +1990,7 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy {
throw new CloudRuntimeException("Invalid hypervisor type (only KVM supported for this operation at the time being)");
}
verifyLiveMigrationForKVM(volumeDataStoreMap, destHost);
verifyLiveMigrationForKVM(volumeDataStoreMap);
VMInstanceVO vmInstance = _vmDao.findById(vmTO.getId());
vmTO.setState(vmInstance.getState());
@ -1933,7 +2017,10 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy {
continue;
}
if (srcVolumeInfo.getTemplateId() != null) {
VMTemplateVO vmTemplate = _vmTemplateDao.findById(vmInstance.getTemplateId());
if (srcVolumeInfo.getTemplateId() != null &&
Objects.nonNull(vmTemplate) &&
!Arrays.asList(KVM_VM_IMPORT_DEFAULT_TEMPLATE_NAME, VM_IMPORT_DEFAULT_TEMPLATE_NAME).contains(vmTemplate.getName())) {
logger.debug(String.format("Copying template [%s] of volume [%s] from source storage pool [%s] to target storage pool [%s].", srcVolumeInfo.getTemplateId(), srcVolumeInfo.getId(), sourceStoragePool.getId(), destStoragePool.getId()));
copyTemplateToTargetFilesystemStorageIfNeeded(srcVolumeInfo, sourceStoragePool, destDataStore, destStoragePool, destHost);
} else {
@ -1977,8 +2064,8 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy {
MigrateCommand.MigrateDiskInfo migrateDiskInfo;
boolean isNonManagedNfsToNfsOrSharedMountPointToNfs = supportStoragePoolType(sourceStoragePool.getPoolType()) && destStoragePool.getPoolType() == StoragePoolType.NetworkFilesystem && !managedStorageDestination;
if (isNonManagedNfsToNfsOrSharedMountPointToNfs) {
boolean isNonManagedToNfs = supportStoragePoolType(sourceStoragePool.getPoolType(), StoragePoolType.Filesystem) && destStoragePool.getPoolType() == StoragePoolType.NetworkFilesystem && !managedStorageDestination;
if (isNonManagedToNfs) {
migrateDiskInfo = new MigrateCommand.MigrateDiskInfo(srcVolumeInfo.getPath(),
MigrateCommand.MigrateDiskInfo.DiskType.FILE,
MigrateCommand.MigrateDiskInfo.DriverType.QCOW2,
@ -2152,7 +2239,7 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy {
if (srcVolumeInfo.getHypervisorType() == HypervisorType.KVM &&
srcVolumeInfo.getTemplateId() != null && srcVolumeInfo.getPoolId() != null) {
VMTemplateVO template = _vmTemplateDao.findById(srcVolumeInfo.getTemplateId());
if (template.getFormat() != null && template.getFormat() != Storage.ImageFormat.ISO) {
if (Objects.nonNull(template) && template.getFormat() != null && template.getFormat() != Storage.ImageFormat.ISO) {
VMTemplateStoragePoolVO ref = templatePoolDao.findByPoolTemplate(srcVolumeInfo.getPoolId(), srcVolumeInfo.getTemplateId(), null);
return ref != null ? ref.getInstallPath() : null;
}
@ -2357,9 +2444,8 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy {
* At a high level: The source storage cannot be managed and
* the destination storages can be all managed or all not managed, not mixed.
*/
protected void verifyLiveMigrationForKVM(Map<VolumeInfo, DataStore> volumeDataStoreMap, Host destHost) {
protected void verifyLiveMigrationForKVM(Map<VolumeInfo, DataStore> volumeDataStoreMap) {
Boolean storageTypeConsistency = null;
Map<String, Storage.StoragePoolType> sourcePools = new HashMap<>();
for (Map.Entry<VolumeInfo, DataStore> entry : volumeDataStoreMap.entrySet()) {
VolumeInfo volumeInfo = entry.getKey();
@ -2386,47 +2472,6 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy {
} else if (storageTypeConsistency != destStoragePoolVO.isManaged()) {
throw new CloudRuntimeException("Destination storage pools must be either all managed or all not managed");
}
addSourcePoolToPoolsMap(sourcePools, srcStoragePoolVO, destStoragePoolVO);
}
verifyDestinationStorage(sourcePools, destHost);
}
/**
* Adds source storage pool to the migration map if the destination pool is not managed and it is NFS.
*/
protected void addSourcePoolToPoolsMap(Map<String, Storage.StoragePoolType> sourcePools, StoragePoolVO srcStoragePoolVO, StoragePoolVO destStoragePoolVO) {
if (destStoragePoolVO.isManaged() || !StoragePoolType.NetworkFilesystem.equals(destStoragePoolVO.getPoolType())) {
logger.trace(String.format("Skipping adding source pool [%s] to map due to destination pool [%s] is managed or not NFS.", srcStoragePoolVO, destStoragePoolVO));
return;
}
String sourceStoragePoolUuid = srcStoragePoolVO.getUuid();
if (!sourcePools.containsKey(sourceStoragePoolUuid)) {
sourcePools.put(sourceStoragePoolUuid, srcStoragePoolVO.getPoolType());
}
}
/**
* Perform storage validation on destination host for KVM live storage migrations.
* Validate that volume source storage pools are mounted on the destination host prior the migration
* @throws CloudRuntimeException if any source storage pool is not mounted on the destination host
*/
private void verifyDestinationStorage(Map<String, Storage.StoragePoolType> sourcePools, Host destHost) {
if (MapUtils.isNotEmpty(sourcePools)) {
logger.debug("Verifying source pools are already available on destination host " + destHost.getUuid());
CheckStorageAvailabilityCommand cmd = new CheckStorageAvailabilityCommand(sourcePools);
try {
Answer answer = agentManager.send(destHost.getId(), cmd);
if (answer == null || !answer.getResult()) {
throw new CloudRuntimeException("Storage verification failed on host "
+ destHost.getUuid() +": " + answer.getDetails());
}
} catch (AgentUnavailableException | OperationTimedoutException e) {
e.printStackTrace();
throw new CloudRuntimeException("Cannot perform storage verification on host " + destHost.getUuid() +
"due to: " + e.getMessage());
}
}
}
@ -2497,15 +2542,15 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy {
int primaryStorageDownloadWait = StorageManager.PRIMARY_STORAGE_DOWNLOAD_WAIT.value();
CopyCommand copyCommand = new CopyCommand(volumeInfo.getTO(), templateInfo.getTO(), primaryStorageDownloadWait, VirtualMachineManager.ExecuteInSequence.value());
try {
handleQualityOfServiceForVolumeMigration(volumeInfo, PrimaryDataStoreDriver.QualityOfServiceState.MIGRATION);
if (srcVolumeDetached || StoragePoolType.PowerFlex == storagePoolVO.getPoolType()) {
if (srcVolumeDetached || StoragePoolType.PowerFlex == storagePoolVO.getPoolType() || StoragePoolType.FiberChannel == storagePoolVO.getPoolType()) {
_volumeService.grantAccess(volumeInfo, hostVO, srcDataStore);
}
CopyCommand copyCommand = new CopyCommand(volumeInfo.getTO(), templateInfo.getTO(), primaryStorageDownloadWait, VirtualMachineManager.ExecuteInSequence.value());
Map<String, String> srcDetails = getVolumeDetails(volumeInfo);
copyCommand.setOptions(srcDetails);
@ -2534,7 +2579,7 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy {
throw new CloudRuntimeException(msg + ex.getMessage(), ex);
}
finally {
if (srcVolumeDetached || StoragePoolType.PowerFlex == storagePoolVO.getPoolType()) {
if (srcVolumeDetached || StoragePoolType.PowerFlex == storagePoolVO.getPoolType() || StoragePoolType.FiberChannel == storagePoolVO.getPoolType()) {
try {
_volumeService.revokeAccess(volumeInfo, hostVO, srcDataStore);
}
@ -2629,13 +2674,7 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy {
long snapshotId = snapshotInfo.getId();
// if the snapshot required a temporary volume be created check if the UUID is set so we can
// retrieve the temporary volume's path to use during remote copy
List<SnapshotDetailsVO> storedDetails = _snapshotDetailsDao.findDetails(snapshotInfo.getId(), "TemporaryVolumeCopyPath");
if (storedDetails != null && storedDetails.size() > 0) {
String value = storedDetails.get(0).getValue();
snapshotDetails.put(DiskTO.PATH, value);
} else if (storagePoolVO.getPoolType() == StoragePoolType.PowerFlex || storagePoolVO.getPoolType() == StoragePoolType.FiberChannel) {
if (storagePoolVO.getPoolType() == StoragePoolType.PowerFlex || storagePoolVO.getPoolType() == StoragePoolType.FiberChannel) {
snapshotDetails.put(DiskTO.IQN, snapshotInfo.getPath());
} else {
snapshotDetails.put(DiskTO.IQN, getSnapshotProperty(snapshotId, DiskTO.IQN));
@ -2851,6 +2890,8 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy {
Map<String, String> srcDetails = getVolumeDetails(srcVolumeInfo);
Map<String, String> destDetails = getVolumeDetails(destVolumeInfo);
_volumeService.grantAccess(srcVolumeInfo, hostVO, srcVolumeInfo.getDataStore());
MigrateVolumeCommand migrateVolumeCommand = new MigrateVolumeCommand(srcVolumeInfo.getTO(), destVolumeInfo.getTO(),
srcDetails, destDetails, StorageManager.KvmStorageOfflineMigrationWait.value());
@ -2893,18 +2934,18 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy {
StoragePoolVO storagePoolVO = _storagePoolDao.findById(srcVolumeInfo.getPoolId());
Map<String, String> srcDetails = getVolumeDetails(srcVolumeInfo);
CopyVolumeCommand copyVolumeCommand = new CopyVolumeCommand(srcVolumeInfo.getId(), destVolumeInfo.getPath(), storagePoolVO,
destVolumeInfo.getDataStore().getUri(), true, StorageManager.KvmStorageOfflineMigrationWait.value(), true);
copyVolumeCommand.setSrcData(srcVolumeInfo.getTO());
copyVolumeCommand.setSrcDetails(srcDetails);
handleQualityOfServiceForVolumeMigration(srcVolumeInfo, PrimaryDataStoreDriver.QualityOfServiceState.MIGRATION);
if (srcVolumeDetached) {
_volumeService.grantAccess(srcVolumeInfo, hostVO, srcVolumeInfo.getDataStore());
}
CopyVolumeCommand copyVolumeCommand = new CopyVolumeCommand(srcVolumeInfo.getId(), destVolumeInfo.getPath(), storagePoolVO,
destVolumeInfo.getDataStore().getUri(), true, StorageManager.KvmStorageOfflineMigrationWait.value(), true);
copyVolumeCommand.setSrcData(srcVolumeInfo.getTO());
copyVolumeCommand.setSrcDetails(srcDetails);
CopyVolumeAnswer copyVolumeAnswer = (CopyVolumeAnswer)agentManager.send(hostVO.getId(), copyVolumeCommand);
if (copyVolumeAnswer == null || !copyVolumeAnswer.getResult()) {
@ -2976,18 +3017,19 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy {
srcData = cacheData;
}
CopyCommand copyCommand = new CopyCommand(srcData.getTO(), volumeInfo.getTO(), primaryStorageDownloadWait, VirtualMachineManager.ExecuteInSequence.value());
try {
CopyCommand copyCommand = null;
if (Snapshot.LocationType.PRIMARY.equals(locationType)) {
_volumeService.grantAccess(snapshotInfo, hostVO, snapshotInfo.getDataStore());
Map<String, String> srcDetails = getSnapshotDetails(snapshotInfo);
copyCommand = new CopyCommand(srcData.getTO(), volumeInfo.getTO(), primaryStorageDownloadWait, VirtualMachineManager.ExecuteInSequence.value());
copyCommand.setOptions(srcDetails);
}
} else {
_volumeService.grantAccess(volumeInfo, hostVO, volumeInfo.getDataStore());
copyCommand = new CopyCommand(srcData.getTO(), volumeInfo.getTO(), primaryStorageDownloadWait, VirtualMachineManager.ExecuteInSequence.value());
}
Map<String, String> destDetails = getVolumeDetails(volumeInfo);

View File

@ -476,19 +476,19 @@ public class KvmNonManagedStorageSystemDataMotionTest {
@Test
public void testVerifyLiveMigrationMapForKVM() {
kvmNonManagedStorageDataMotionStrategy.verifyLiveMigrationForKVM(migrationMap, host2);
kvmNonManagedStorageDataMotionStrategy.verifyLiveMigrationForKVM(migrationMap);
}
@Test(expected = CloudRuntimeException.class)
public void testVerifyLiveMigrationMapForKVMNotExistingSource() {
when(primaryDataStoreDao.findById(POOL_1_ID)).thenReturn(null);
kvmNonManagedStorageDataMotionStrategy.verifyLiveMigrationForKVM(migrationMap, host2);
kvmNonManagedStorageDataMotionStrategy.verifyLiveMigrationForKVM(migrationMap);
}
@Test(expected = CloudRuntimeException.class)
public void testVerifyLiveMigrationMapForKVMNotExistingDest() {
when(primaryDataStoreDao.findById(POOL_2_ID)).thenReturn(null);
kvmNonManagedStorageDataMotionStrategy.verifyLiveMigrationForKVM(migrationMap, host2);
kvmNonManagedStorageDataMotionStrategy.verifyLiveMigrationForKVM(migrationMap);
}
@Test(expected = CloudRuntimeException.class)
@ -497,7 +497,7 @@ public class KvmNonManagedStorageSystemDataMotionTest {
when(pool1.getId()).thenReturn(POOL_1_ID);
when(pool2.getId()).thenReturn(POOL_2_ID);
lenient().when(pool2.isManaged()).thenReturn(false);
kvmNonManagedStorageDataMotionStrategy.verifyLiveMigrationForKVM(migrationMap, host2);
kvmNonManagedStorageDataMotionStrategy.verifyLiveMigrationForKVM(migrationMap);
}
@Test

View File

@ -23,7 +23,6 @@ import static org.junit.Assert.assertFalse;
import static org.mockito.Mockito.doReturn;
import static org.mockito.Mockito.lenient;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.never;
import static org.mockito.MockitoAnnotations.initMocks;
import java.util.HashMap;
@ -48,7 +47,6 @@ import org.mockito.Mock;
import org.mockito.Mockito;
import org.mockito.Spy;
import org.mockito.junit.MockitoJUnitRunner;
import org.mockito.verification.VerificationMode;
import com.cloud.agent.api.MigrateCommand;
import com.cloud.host.HostVO;
@ -62,7 +60,6 @@ import com.cloud.storage.VolumeVO;
import java.util.AbstractMap;
import java.util.Arrays;
import java.util.HashSet;
import java.util.LinkedList;
import java.util.List;
import java.util.Set;
@ -372,72 +369,4 @@ public class StorageSystemDataMotionStrategyTest {
assertFalse(strategy.isStoragePoolTypeInList(StoragePoolType.SharedMountPoint, listTypes));
}
@Test
public void validateAddSourcePoolToPoolsMapDestinationPoolIsManaged() {
Mockito.doReturn(true).when(destinationStoragePoolVoMock).isManaged();
strategy.addSourcePoolToPoolsMap(mapStringStoragePoolTypeMock, sourceStoragePoolVoMock, destinationStoragePoolVoMock);
Mockito.verify(destinationStoragePoolVoMock).isManaged();
Mockito.verifyNoMoreInteractions(mapStringStoragePoolTypeMock, sourceStoragePoolVoMock, destinationStoragePoolVoMock);
}
@Test
public void validateAddSourcePoolToPoolsMapDestinationPoolIsNotNFS() {
List<StoragePoolType> storagePoolTypes = new LinkedList<>(Arrays.asList(StoragePoolType.values()));
storagePoolTypes.remove(StoragePoolType.NetworkFilesystem);
Mockito.doReturn(false).when(destinationStoragePoolVoMock).isManaged();
storagePoolTypes.forEach(poolType -> {
Mockito.doReturn(poolType).when(destinationStoragePoolVoMock).getPoolType();
strategy.addSourcePoolToPoolsMap(mapStringStoragePoolTypeMock, sourceStoragePoolVoMock, destinationStoragePoolVoMock);
});
VerificationMode times = Mockito.times(storagePoolTypes.size());
Mockito.verify(destinationStoragePoolVoMock, times).isManaged();
Mockito.verify(destinationStoragePoolVoMock, times).getPoolType();
Mockito.verifyNoMoreInteractions(mapStringStoragePoolTypeMock, sourceStoragePoolVoMock, destinationStoragePoolVoMock);
}
@Test
public void validateAddSourcePoolToPoolsMapMapContainsKey() {
Mockito.doReturn(false).when(destinationStoragePoolVoMock).isManaged();
Mockito.doReturn(StoragePoolType.NetworkFilesystem).when(destinationStoragePoolVoMock).getPoolType();
Mockito.doReturn("").when(sourceStoragePoolVoMock).getUuid();
Mockito.doReturn(true).when(mapStringStoragePoolTypeMock).containsKey(Mockito.anyString());
strategy.addSourcePoolToPoolsMap(mapStringStoragePoolTypeMock, sourceStoragePoolVoMock, destinationStoragePoolVoMock);
Mockito.verify(destinationStoragePoolVoMock, never()).getScope();
Mockito.verify(destinationStoragePoolVoMock).isManaged();
Mockito.verify(destinationStoragePoolVoMock).getPoolType();
Mockito.verify(sourceStoragePoolVoMock).getUuid();
Mockito.verify(mapStringStoragePoolTypeMock).containsKey(Mockito.anyString());
Mockito.verifyNoMoreInteractions(mapStringStoragePoolTypeMock, sourceStoragePoolVoMock, destinationStoragePoolVoMock);
}
@Test
public void validateAddSourcePoolToPoolsMapMapDoesNotContainsKey() {
List<StoragePoolType> storagePoolTypes = new LinkedList<>(Arrays.asList(StoragePoolType.values()));
Mockito.doReturn(false).when(destinationStoragePoolVoMock).isManaged();
Mockito.doReturn(StoragePoolType.NetworkFilesystem).when(destinationStoragePoolVoMock).getPoolType();
Mockito.doReturn("").when(sourceStoragePoolVoMock).getUuid();
Mockito.doReturn(false).when(mapStringStoragePoolTypeMock).containsKey(Mockito.anyString());
Mockito.doReturn(null).when(mapStringStoragePoolTypeMock).put(Mockito.anyString(), Mockito.any());
storagePoolTypes.forEach(poolType -> {
Mockito.doReturn(poolType).when(sourceStoragePoolVoMock).getPoolType();
strategy.addSourcePoolToPoolsMap(mapStringStoragePoolTypeMock, sourceStoragePoolVoMock, destinationStoragePoolVoMock);
});
VerificationMode times = Mockito.times(storagePoolTypes.size());
Mockito.verify(destinationStoragePoolVoMock, never()).getScope();
Mockito.verify(destinationStoragePoolVoMock, times).isManaged();
Mockito.verify(destinationStoragePoolVoMock, times).getPoolType();
Mockito.verify(sourceStoragePoolVoMock, times).getUuid();
Mockito.verify(mapStringStoragePoolTypeMock, times).containsKey(Mockito.anyString());
Mockito.verify(sourceStoragePoolVoMock, times).getPoolType();
Mockito.verify(mapStringStoragePoolTypeMock, times).put(Mockito.anyString(), Mockito.any());
Mockito.verifyNoMoreInteractions(mapStringStoragePoolTypeMock, sourceStoragePoolVoMock, destinationStoragePoolVoMock);
}
}

View File

@ -202,7 +202,7 @@ public class ImageStoreProviderManagerImpl implements ImageStoreProviderManager,
// No store with space found
logger.error(String.format("Can't find an image storage in zone with less than %d usage",
Math.round(_statsCollector.getImageStoreCapacityThreshold()*100)));
Math.round(_statsCollector.getImageStoreCapacityThreshold() * 100)));
return null;
}

View File

@ -247,6 +247,14 @@ public interface GenericDao<T, ID extends Serializable> {
int expungeList(List<ID> ids);
/**
* Delete the entity beans specified by the search criteria with a given limit
* @param sc Search criteria
* @param limit Maximum number of rows that will be affected
* @return Number of rows deleted
*/
int expunge(SearchCriteria<T> sc, long limit);
/**
* expunge the removed rows.
*/

View File

@ -1236,6 +1236,12 @@ public abstract class GenericDaoBase<T, ID extends Serializable> extends Compone
}
// FIXME: Does not work for joins.
@Override
public int expunge(final SearchCriteria<T> sc, long limit) {
Filter filter = new Filter(limit);
return expunge(sc, filter);
}
@Override
public int expunge(final SearchCriteria<T> sc, final Filter filter) {
if (sc == null) {

View File

@ -102,7 +102,9 @@ public class DefaultModuleDefinitionSet implements ModuleDefinitionSet {
logger.debug(String.format("Trying to obtain module [%s] context.", moduleDefinitionName));
ApplicationContext context = getApplicationContext(moduleDefinitionName);
try {
if (context.containsBean("moduleStartup")) {
if (context == null) {
logger.warn(String.format("Application context not found for module definition [%s]", moduleDefinitionName));
} else if (context.containsBean("moduleStartup")) {
Runnable runnable = context.getBean("moduleStartup", Runnable.class);
logger.info(String.format("Starting module [%s].", moduleDefinitionName));
runnable.run();

View File

@ -120,7 +120,9 @@ public class DynamicRoleBasedAPIAccessChecker extends AdapterBase implements API
}
if (accountRole.getRoleType() == RoleType.Admin && accountRole.getId() == RoleType.Admin.getId()) {
logger.info(String.format("Account [%s] is Root Admin or Domain Admin, all APIs are allowed.", account));
if (logger.isTraceEnabled()) {
logger.trace(String.format("Account [%s] is Root Admin or Domain Admin, all APIs are allowed.", account));
}
return true;
}

View File

@ -72,7 +72,9 @@ public class ProjectRoleBasedApiAccessChecker extends AdapterBase implements AP
Project project = CallContext.current().getProject();
if (project == null) {
logger.warn(String.format("Project is null, ProjectRoleBasedApiAccessChecker only applies to projects, returning APIs [%s] for user [%s] as allowed.", apiNames, user));
if (logger.isTraceEnabled()) {
logger.trace(String.format("Project is null, ProjectRoleBasedApiAccessChecker only applies to projects, returning APIs [%s] for user [%s] as allowed.", apiNames, user));
}
return apiNames;
}
@ -110,8 +112,10 @@ public class ProjectRoleBasedApiAccessChecker extends AdapterBase implements AP
Project project = CallContext.current().getProject();
if (project == null) {
logger.warn(String.format("Project is null, ProjectRoleBasedApiAccessChecker only applies to projects, returning API [%s] for user [%s] as allowed.", apiCommandName,
if (logger.isTraceEnabled()) {
logger.trace(String.format("Project is null, ProjectRoleBasedApiAccessChecker only applies to projects, returning API [%s] for user [%s] as allowed.", apiCommandName,
user));
}
return true;
}

View File

@ -3797,29 +3797,29 @@ public class LibvirtComputingResource extends ServerResourceBase implements Serv
}
public List<String> getAllVmNames(final Connect conn) {
final ArrayList<String> la = new ArrayList<String>();
final ArrayList<String> domainNames = new ArrayList<String>();
try {
final String names[] = conn.listDefinedDomains();
for (int i = 0; i < names.length; i++) {
la.add(names[i]);
domainNames.add(names[i]);
}
} catch (final LibvirtException e) {
LOGGER.warn("Failed to list Defined domains", e);
logger.warn("Failed to list defined domains", e);
}
int[] ids = null;
try {
ids = conn.listDomains();
} catch (final LibvirtException e) {
LOGGER.warn("Failed to list domains", e);
return la;
logger.warn("Failed to list domains", e);
return domainNames;
}
Domain dm = null;
for (int i = 0; i < ids.length; i++) {
try {
dm = conn.domainLookupByID(ids[i]);
la.add(dm.getName());
domainNames.add(dm.getName());
} catch (final LibvirtException e) {
LOGGER.warn("Unable to get vms", e);
} finally {
@ -3833,7 +3833,7 @@ public class LibvirtComputingResource extends ServerResourceBase implements Serv
}
}
return la;
return domainNames;
}
private HashMap<String, HostVmStateReportEntry> getHostVmStateReport() {
@ -5379,20 +5379,31 @@ public class LibvirtComputingResource extends ServerResourceBase implements Serv
/*
Scp volume from remote host to local directory
*/
public String copyVolume(String srcIp, String username, String password, String localDir, String remoteFile, String tmpPath) {
try {
public String copyVolume(String srcIp, String username, String password, String localDir, String remoteFile, String tmpPath, int timeoutInSecs) {
String outputFile = UUID.randomUUID().toString();
try {
StringBuilder command = new StringBuilder("qemu-img convert -O qcow2 ");
command.append(remoteFile);
command.append(" "+tmpPath);
command.append(" " + tmpPath);
command.append(outputFile);
logger.debug("Converting remoteFile: "+remoteFile);
SshHelper.sshExecute(srcIp, 22, username, null, password, command.toString());
logger.debug("Copying remoteFile to: "+localDir);
SshHelper.scpFrom(srcIp, 22, username, null, password, localDir, tmpPath+outputFile);
logger.debug("Successfully copyied remoteFile to: "+localDir+"/"+outputFile);
logger.debug(String.format("Converting remote disk file: %s, output file: %s%s (timeout: %d secs)", remoteFile, tmpPath, outputFile, timeoutInSecs));
SshHelper.sshExecute(srcIp, 22, username, null, password, command.toString(), timeoutInSecs * 1000);
logger.debug("Copying converted remote disk file " + outputFile + " to: " + localDir);
SshHelper.scpFrom(srcIp, 22, username, null, password, localDir, tmpPath + outputFile);
logger.debug("Successfully copied converted remote disk file to: " + localDir + "/" + outputFile);
return outputFile;
} catch (Exception e) {
try {
String deleteRemoteConvertedFileCmd = String.format("rm -f %s%s", tmpPath, outputFile);
SshHelper.sshExecute(srcIp, 22, username, null, password, deleteRemoteConvertedFileCmd);
} catch (Exception ignored) {
}
try {
FileUtils.deleteQuietly(new File(localDir + "/" + outputFile));
} catch (Exception ignored) {
}
throw new RuntimeException(e);
}
}

View File

@ -43,7 +43,6 @@ public final class LibvirtCopyRemoteVolumeCommandWrapper extends CommandWrapper<
@Override
public Answer execute(final CopyRemoteVolumeCommand command, final LibvirtComputingResource libvirtComputingResource) {
String result = null;
String srcIp = command.getRemoteIp();
String username = command.getUsername();
String password = command.getPassword();
@ -53,23 +52,25 @@ public final class LibvirtCopyRemoteVolumeCommandWrapper extends CommandWrapper<
KVMStoragePoolManager poolMgr = libvirtComputingResource.getStoragePoolMgr();
KVMStoragePool pool = poolMgr.getStoragePool(storageFilerTO.getType(), storageFilerTO.getUuid());
String dstPath = pool.getLocalPath();
int timeoutInSecs = command.getWait();
try {
if (storageFilerTO.getType() == Storage.StoragePoolType.Filesystem ||
storageFilerTO.getType() == Storage.StoragePoolType.NetworkFilesystem) {
String filename = libvirtComputingResource.copyVolume(srcIp, username, password, dstPath, srcFile, tmpPath);
logger.debug("Volume Copy Successful");
String filename = libvirtComputingResource.copyVolume(srcIp, username, password, dstPath, srcFile, tmpPath, timeoutInSecs);
logger.debug("Volume " + srcFile + " copy successful, copied to file: " + filename);
final KVMPhysicalDisk vol = pool.getPhysicalDisk(filename);
final String path = vol.getPath();
long size = getVirtualSizeFromFile(path);
return new CopyRemoteVolumeAnswer(command, "", filename, size);
} else {
return new Answer(command, false, "Unsupported Storage Pool");
String msg = "Unsupported storage pool type: " + storageFilerTO.getType().toString() + ", only local and NFS pools are supported";
return new Answer(command, false, msg);
}
} catch (final Exception e) {
logger.error("Error while copying file from remote host: "+ e.getMessage());
return new Answer(command, false, result);
logger.error("Error while copying volume file from remote host: " + e.getMessage(), e);
String msg = "Failed to copy volume due to: " + e.getMessage();
return new Answer(command, false, msg);
}
}

View File

@ -47,37 +47,38 @@ public final class LibvirtGetRemoteVmsCommandWrapper extends CommandWrapper<GetR
@Override
public Answer execute(final GetRemoteVmsCommand command, final LibvirtComputingResource libvirtComputingResource) {
String hypervisorURI = "qemu+tcp://" + command.getRemoteIp() + "/system";
String remoteIp = command.getRemoteIp();
String hypervisorURI = "qemu+tcp://" + remoteIp + "/system";
HashMap<String, UnmanagedInstanceTO> unmanagedInstances = new HashMap<>();
try {
Connect conn = LibvirtConnection.getConnection(hypervisorURI);
final List<String> allVmNames = libvirtComputingResource.getAllVmNames(conn);
logger.info(String.format("Found %d VMs on the remote host %s", allVmNames.size(), remoteIp));
for (String name : allVmNames) {
final Domain domain = libvirtComputingResource.getDomain(conn, name);
final DomainInfo.DomainState ps = domain.getInfo().state;
final VirtualMachine.PowerState state = libvirtComputingResource.convertToPowerState(ps);
logger.debug("VM " + domain.getName() + " - powerstate: " + ps + ", state: " + state.toString());
logger.debug(String.format("Remote VM %s - powerstate: %s, state: %s", domain.getName(), ps.toString(), state.toString()));
if (state == VirtualMachine.PowerState.PowerOff) {
try {
UnmanagedInstanceTO instance = getUnmanagedInstance(libvirtComputingResource, domain, conn);
unmanagedInstances.put(instance.getName(), instance);
} catch (Exception e) {
logger.error("Couldn't fetch VM " + domain.getName() + " details, due to: " + e.getMessage(), e);
logger.error("Couldn't fetch remote VM " + domain.getName() + " details, due to: " + e.getMessage(), e);
}
}
domain.free();
}
logger.debug("Found " + unmanagedInstances.size() + " stopped VMs on host " + command.getRemoteIp());
logger.debug("Found " + unmanagedInstances.size() + " stopped VMs on remote host " + remoteIp);
return new GetRemoteVmsAnswer(command, "", unmanagedInstances);
} catch (final LibvirtException e) {
logger.error("Failed to list stopped VMs on remote host " + command.getRemoteIp() + ", due to: " + e.getMessage(), e);
logger.error("Failed to list stopped VMs on remote host " + remoteIp + ", due to: " + e.getMessage(), e);
if (e.getMessage().toLowerCase().contains("connection refused")) {
return new Answer(command, false, "Unable to connect to remote host " + command.getRemoteIp() + ", please check the libvirtd tcp connectivity and retry");
return new Answer(command, false, "Unable to connect to remote host " + remoteIp + ", please check the libvirtd tcp connectivity and retry");
}
return new Answer(command, false, "Unable to list stopped VMs on remote host " + command.getRemoteIp() + ", due to: " + e.getMessage());
return new Answer(command, false, "Unable to list stopped VMs on remote host " + remoteIp + ", due to: " + e.getMessage());
}
}
@ -103,8 +104,8 @@ public final class LibvirtGetRemoteVmsCommandWrapper extends CommandWrapper<GetR
return instance;
} catch (Exception e) {
logger.debug("Unable to retrieve unmanaged instance info, due to: " + e.getMessage(), e);
throw new CloudRuntimeException("Unable to retrieve unmanaged instance info, due to: " + e.getMessage());
logger.debug("Unable to retrieve remote unmanaged instance info, due to: " + e.getMessage(), e);
throw new CloudRuntimeException("Unable to retrieve remote unmanaged instance info, due to: " + e.getMessage());
}
}

View File

@ -300,15 +300,27 @@ public class LibvirtMigrateVolumeCommandWrapper extends CommandWrapper<MigrateVo
(destVolumeObjectTO.getPath() != null ? destVolumeObjectTO.getPath() : UUID.randomUUID().toString());
try {
storagePoolManager.connectPhysicalDisk(srcPrimaryDataStore.getPoolType(), srcPrimaryDataStore.getUuid(), srcPath, srcDetails);
KVMStoragePool sourceStoragePool = storagePoolManager.getStoragePool(srcPrimaryDataStore.getPoolType(), srcPrimaryDataStore.getUuid());
if (!sourceStoragePool.connectPhysicalDisk(srcPath, srcDetails)) {
return new MigrateVolumeAnswer(command, false, "Unable to connect source volume on hypervisor", srcPath);
}
KVMPhysicalDisk srcPhysicalDisk = storagePoolManager.getPhysicalDisk(srcPrimaryDataStore.getPoolType(), srcPrimaryDataStore.getUuid(), srcPath);
if (srcPhysicalDisk == null) {
return new MigrateVolumeAnswer(command, false, "Unable to get handle to source volume on hypervisor", srcPath);
}
KVMStoragePool destPrimaryStorage = storagePoolManager.getStoragePool(destPrimaryDataStore.getPoolType(), destPrimaryDataStore.getUuid());
storagePoolManager.connectPhysicalDisk(destPrimaryDataStore.getPoolType(), destPrimaryDataStore.getUuid(), destPath, destDetails);
if (!destPrimaryStorage.connectPhysicalDisk(destPath, destDetails)) {
return new MigrateVolumeAnswer(command, false, "Unable to connect destination volume on hypervisor", srcPath);
}
storagePoolManager.copyPhysicalDisk(srcPhysicalDisk, destPath, destPrimaryStorage, command.getWaitInMillSeconds());
KVMPhysicalDisk newDiskCopy = storagePoolManager.copyPhysicalDisk(srcPhysicalDisk, destPath, destPrimaryStorage, command.getWaitInMillSeconds());
if (newDiskCopy == null) {
return new MigrateVolumeAnswer(command, false, "Copy command failed to return handle to copied physical disk", destPath);
}
}
catch (Exception ex) {
return new MigrateVolumeAnswer(command, false, ex.getMessage(), null);

View File

@ -16,12 +16,35 @@
// under the License.
package com.cloud.hypervisor.kvm.storage;
import java.net.InetAddress;
import java.net.UnknownHostException;
import org.apache.log4j.Logger;
import com.cloud.storage.Storage;
import com.cloud.utils.exception.CloudRuntimeException;
public class FiberChannelAdapter extends MultipathSCSIAdapterBase {
private Logger LOGGER = Logger.getLogger(getClass());
private String hostname = null;
private String hostnameFq = null;
public FiberChannelAdapter() {
LOGGER.info("Loaded FiberChannelAdapter for StorageLayer");
// get the hostname - we need this to compare to connid values
try {
InetAddress inetAddress = InetAddress.getLocalHost();
hostname = inetAddress.getHostName(); // basic hostname
if (hostname.indexOf(".") > 0) {
hostname = hostname.substring(0, hostname.indexOf(".")); // strip off domain
}
hostnameFq = inetAddress.getCanonicalHostName(); // fully qualified hostname
LOGGER.info("Loaded FiberChannelAdapter for StorageLayer on host [" + hostname + "]");
} catch (UnknownHostException e) {
LOGGER.error("Error getting hostname", e);
}
}
@Override
@ -76,6 +99,11 @@ public class FiberChannelAdapter extends MultipathSCSIAdapterBase {
address = value;
} else if (key.equals("connid")) {
connectionId = value;
} else if (key.startsWith("connid.")) {
String inHostname = key.substring(7);
if (inHostname != null && (inHostname.equals(this.hostname) || inHostname.equals(this.hostnameFq))) {
connectionId = value;
}
}
}
}

View File

@ -134,6 +134,10 @@ import com.cloud.utils.exception.CloudRuntimeException;
import com.cloud.utils.script.Script;
import com.cloud.utils.storage.S3.S3Utils;
import com.cloud.vm.VmDetailConstants;
import org.apache.cloudstack.utils.cryptsetup.KeyFile;
import org.apache.cloudstack.utils.qemu.QemuImageOptions;
import org.apache.cloudstack.utils.qemu.QemuObject.EncryptFormat;
import java.util.ArrayList;
public class KVMStorageProcessor implements StorageProcessor {
protected Logger logger = LogManager.getLogger(getClass());
@ -267,7 +271,7 @@ public class KVMStorageProcessor implements StorageProcessor {
Map<String, String> details = primaryStore.getDetails();
String path = details != null ? details.get("managedStoreTarget") : null;
String path = derivePath(primaryStore, destData, details);
if (!storagePoolMgr.connectPhysicalDisk(primaryStore.getPoolType(), primaryStore.getUuid(), path, details)) {
logger.warn("Failed to connect physical disk at path: " + path + ", in storage pool id: " + primaryStore.getUuid());
@ -327,6 +331,16 @@ public class KVMStorageProcessor implements StorageProcessor {
}
}
private String derivePath(PrimaryDataStoreTO primaryStore, DataTO destData, Map<String, String> details) {
String path = null;
if (primaryStore.getPoolType() == StoragePoolType.FiberChannel) {
path = destData.getPath();
} else {
path = details != null ? details.get("managedStoreTarget") : null;
}
return path;
}
// this is much like PrimaryStorageDownloadCommand, but keeping it separate. copies template direct to root disk
private KVMPhysicalDisk templateToPrimaryDownload(final String templateUrl, final KVMStoragePool primaryPool, final String volUuid, final Long size, final int timeout) {
final int index = templateUrl.lastIndexOf("/");
@ -406,7 +420,7 @@ public class KVMStorageProcessor implements StorageProcessor {
vol = templateToPrimaryDownload(templatePath, primaryPool, volume.getUuid(), volume.getSize(), cmd.getWaitInMillSeconds());
} if (storagePoolMgr.supportsPhysicalDiskCopy(primaryPool.getType())) {
Map<String, String> details = primaryStore.getDetails();
String path = details != null ? details.get("managedStoreTarget") : null;
String path = derivePath(primaryStore, destData, details);
if (!storagePoolMgr.connectPhysicalDisk(primaryStore.getPoolType(), primaryStore.getUuid(), templatePath, details)) {
logger.warn("Failed to connect base template volume at path: " + templatePath + ", in storage pool id: " + primaryStore.getUuid());
@ -1047,7 +1061,7 @@ public class KVMStorageProcessor implements StorageProcessor {
srcVolume.clearPassphrase();
if (isCreatedFromVmSnapshot) {
logger.debug("Ignoring removal of vm snapshot on primary as this snapshot is created from vm snapshot");
} else if (primaryPool.getType() != StoragePoolType.RBD) {
} else if (primaryPool != null && primaryPool.getType() != StoragePoolType.RBD) {
deleteSnapshotOnPrimary(cmd, snapshot, primaryPool);
}
@ -1748,7 +1762,7 @@ public class KVMStorageProcessor implements StorageProcessor {
snapshotPath = getSnapshotPathInPrimaryStorage(primaryPool.getLocalPath(), snapshotName);
String diskLabel = takeVolumeSnapshot(resource.getDisks(conn, vmName), snapshotName, diskPath, vm);
String convertResult = convertBaseFileToSnapshotFileInPrimaryStorageDir(primaryPool, diskPath, snapshotPath, volume, cmd.getWait());
String convertResult = convertBaseFileToSnapshotFileInPrimaryStorageDir(primaryPool, disk, snapshotPath, volume, cmd.getWait());
mergeSnapshotIntoBaseFile(vm, diskLabel, diskPath, snapshotName, volume, conn);
@ -1817,7 +1831,7 @@ public class KVMStorageProcessor implements StorageProcessor {
}
} else {
snapshotPath = getSnapshotPathInPrimaryStorage(primaryPool.getLocalPath(), snapshotName);
String convertResult = convertBaseFileToSnapshotFileInPrimaryStorageDir(primaryPool, diskPath, snapshotPath, volume, cmd.getWait());
String convertResult = convertBaseFileToSnapshotFileInPrimaryStorageDir(primaryPool, disk, snapshotPath, volume, cmd.getWait());
validateConvertResult(convertResult, snapshotPath);
}
}
@ -1940,26 +1954,43 @@ public class KVMStorageProcessor implements StorageProcessor {
* @param snapshotPath Path to convert the base file;
* @return null if the conversion occurs successfully or an error message that must be handled.
*/
protected String convertBaseFileToSnapshotFileInPrimaryStorageDir(KVMStoragePool primaryPool, String baseFile, String snapshotPath, VolumeObjectTO volume, int wait) {
try {
logger.debug(String.format("Trying to convert volume [%s] (%s) to snapshot [%s].", volume, baseFile, snapshotPath));
protected String convertBaseFileToSnapshotFileInPrimaryStorageDir(KVMStoragePool primaryPool,
KVMPhysicalDisk baseFile, String snapshotPath, VolumeObjectTO volume, int wait) {
try (KeyFile srcKey = new KeyFile(volume.getPassphrase())) {
logger.debug(
String.format("Trying to convert volume [%s] (%s) to snapshot [%s].", volume, baseFile, snapshotPath));
primaryPool.createFolder(TemplateConstants.DEFAULT_SNAPSHOT_ROOT_DIR);
convertTheBaseFileToSnapshot(baseFile, snapshotPath, wait, srcKey);
} catch (QemuImgException | LibvirtException | IOException ex) {
return String.format("Failed to convert %s snapshot of volume [%s] to [%s] due to [%s].", volume, baseFile,
snapshotPath, ex.getMessage());
}
QemuImgFile srcFile = new QemuImgFile(baseFile);
logger.debug(String.format("Converted volume [%s] (from path \"%s\") to snapshot [%s].", volume, baseFile,
snapshotPath));
return null;
}
private void convertTheBaseFileToSnapshot(KVMPhysicalDisk baseFile, String snapshotPath, int wait, KeyFile srcKey)
throws LibvirtException, QemuImgException {
List<QemuObject> qemuObjects = new ArrayList<>();
Map<String, String> options = new HashMap<>();
QemuImageOptions qemuImageOpts = new QemuImageOptions(baseFile.getPath());
if (srcKey.isSet()) {
String srcKeyName = "sec0";
qemuObjects.add(QemuObject.prepareSecretForQemuImg(baseFile.getFormat(), EncryptFormat.LUKS,
srcKey.toString(), srcKeyName, options));
qemuImageOpts = new QemuImageOptions(baseFile.getFormat(), baseFile.getPath(), srcKeyName);
}
QemuImgFile srcFile = new QemuImgFile(baseFile.getPath());
srcFile.setFormat(PhysicalDiskFormat.QCOW2);
QemuImgFile destFile = new QemuImgFile(snapshotPath);
destFile.setFormat(PhysicalDiskFormat.QCOW2);
QemuImg q = new QemuImg(wait);
q.convert(srcFile, destFile);
logger.debug(String.format("Converted volume [%s] (from path \"%s\") to snapshot [%s].", volume, baseFile, snapshotPath));
return null;
} catch (QemuImgException | LibvirtException ex) {
return String.format("Failed to convert %s snapshot of volume [%s] to [%s] due to [%s].", volume, baseFile, snapshotPath, ex.getMessage());
}
q.convert(srcFile, destFile, options, qemuObjects, qemuImageOpts, null, true);
}
/**
@ -2467,8 +2498,7 @@ public class KVMStorageProcessor implements StorageProcessor {
if (!storagePoolMgr.connectPhysicalDisk(destPrimaryStore.getPoolType(), destPrimaryStore.getUuid(), destVolumePath, destPrimaryStore.getDetails())) {
logger.warn("Failed to connect dest volume at path: " + destVolumePath + ", in storage pool id: " + destPrimaryStore.getUuid());
}
String managedStoreTarget = destPrimaryStore.getDetails() != null ? destPrimaryStore.getDetails().get("managedStoreTarget") : null;
destVolumeName = managedStoreTarget != null ? managedStoreTarget : destVolumePath;
destVolumeName = derivePath(destPrimaryStore, destData, destPrimaryStore.getDetails());
} else {
final String volumeName = UUID.randomUUID().toString();
destVolumeName = volumeName + "." + destFormat.getFileExtension();

View File

@ -273,6 +273,16 @@ public class LibvirtStorageAdaptor implements StorageAdaptor {
}
}
private void checkNetfsStoragePoolMounted(String uuid) {
String targetPath = _mountPoint + File.separator + uuid;
int mountpointResult = Script.runSimpleBashScriptForExitValue("mountpoint -q " + targetPath);
if (mountpointResult != 0) {
String errMsg = String.format("libvirt failed to mount storage pool %s at %s", uuid, targetPath);
logger.error(errMsg);
throw new CloudRuntimeException(errMsg);
}
}
private StoragePool createNetfsStoragePool(PoolType fsType, Connect conn, String uuid, String host, String path) throws LibvirtException {
String targetPath = _mountPoint + File.separator + uuid;
LibvirtStoragePoolDef spd = new LibvirtStoragePoolDef(fsType, uuid, uuid, host, path, targetPath);
@ -699,6 +709,10 @@ public class LibvirtStorageAdaptor implements StorageAdaptor {
sp.create(0);
}
if (type == StoragePoolType.NetworkFilesystem) {
checkNetfsStoragePoolMounted(name);
}
return getStoragePool(name);
} catch (LibvirtException e) {
String error = e.toString();
@ -763,10 +777,10 @@ public class LibvirtStorageAdaptor implements StorageAdaptor {
if (e.toString().contains("exit status 16")) {
String targetPath = _mountPoint + File.separator + uuid;
logger.error("deleteStoragePool removed pool from libvirt, but libvirt had trouble unmounting the pool. Trying umount location " + targetPath +
"again in a few seconds");
" again in a few seconds");
String result = Script.runSimpleBashScript("sleep 5 && umount " + targetPath);
if (result == null) {
logger.error("Succeeded in unmounting " + targetPath);
logger.info("Succeeded in unmounting " + targetPath);
return true;
}
logger.error("Failed to unmount " + targetPath);

View File

@ -21,18 +21,15 @@ import java.io.BufferedReader;
import java.io.File;
import java.io.IOException;
import java.io.InputStreamReader;
import java.util.Arrays;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Timer;
import java.util.TimerTask;
import java.util.UUID;
import java.util.concurrent.TimeUnit;
import org.apache.cloudstack.utils.qemu.QemuImg;
import org.apache.cloudstack.utils.qemu.QemuImg.PhysicalDiskFormat;
import org.apache.cloudstack.utils.qemu.QemuImgException;
import org.apache.cloudstack.utils.qemu.QemuImgFile;
import com.cloud.storage.Storage;
@ -44,7 +41,6 @@ import com.cloud.utils.script.Script;
import org.apache.commons.lang3.StringUtils;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.libvirt.LibvirtException;
import org.joda.time.Duration;
public abstract class MultipathSCSIAdapterBase implements StorageAdaptor {
@ -56,6 +52,14 @@ public abstract class MultipathSCSIAdapterBase implements StorageAdaptor {
*/
static byte[] CLEANUP_LOCK = new byte[0];
/**
* List of supported OUI's (needed for path-based cleanup logic on disconnects after live migrations)
*/
static String[] SUPPORTED_OUI_LIST = {
"0002ac", // HPE Primera 3PAR
"24a937" // Pure Flasharray
};
/**
* Property keys and defaults
*/
@ -83,6 +87,7 @@ public abstract class MultipathSCSIAdapterBase implements StorageAdaptor {
* Initialize static program-wide configurations and background jobs
*/
static {
long cleanupFrequency = CLEANUP_FREQUENCY_SECS.getFinalValue() * 1000;
boolean cleanupEnabled = CLEANUP_ENABLED.getFinalValue();
@ -97,16 +102,13 @@ public abstract class MultipathSCSIAdapterBase implements StorageAdaptor {
throw new Error("Unable to find the disconnectVolume.sh script");
}
resizeScript = Script.findScript(STORAGE_SCRIPTS_DIR.getFinalValue(), resizeScript);
if (resizeScript == null) {
throw new Error("Unable to find the resizeVolume.sh script");
}
copyScript = Script.findScript(STORAGE_SCRIPTS_DIR.getFinalValue(), copyScript);
if (copyScript == null) {
throw new Error("Unable to find the copyVolume.sh script");
}
resizeScript = Script.findScript(STORAGE_SCRIPTS_DIR.getFinalValue(), resizeScript);
if (cleanupEnabled) {
cleanupScript = Script.findScript(STORAGE_SCRIPTS_DIR.getFinalValue(), cleanupScript);
if (cleanupScript == null) {
@ -138,9 +140,6 @@ public abstract class MultipathSCSIAdapterBase implements StorageAdaptor {
public abstract boolean isStoragePoolTypeSupported(Storage.StoragePoolType type);
/**
* We expect WWN values in the volumePath so need to convert it to an actual physical path
*/
public abstract AddressInfo parseAndValidatePath(String path);
@Override
@ -152,6 +151,7 @@ public abstract class MultipathSCSIAdapterBase implements StorageAdaptor {
return null;
}
// we expect WWN values in the volumePath so need to convert it to an actual physical path
AddressInfo address = parseAndValidatePath(volumePath);
return getPhysicalDisk(address, pool);
}
@ -187,15 +187,23 @@ public abstract class MultipathSCSIAdapterBase implements StorageAdaptor {
if (StringUtils.isEmpty(volumePath)) {
LOGGER.error("Unable to connect physical disk due to insufficient data - volume path is undefined");
throw new CloudRuntimeException("Unable to connect physical disk due to insufficient data - volume path is underfined");
return false;
}
if (pool == null) {
LOGGER.error("Unable to connect physical disk due to insufficient data - pool is not set");
throw new CloudRuntimeException("Unable to connect physical disk due to insufficient data - pool is not set");
return false;
}
// we expect WWN values in the volumePath so need to convert it to an actual physical path
AddressInfo address = this.parseAndValidatePath(volumePath);
// validate we have a connection id - we can't proceed without that
if (address.getConnectionId() == null) {
LOGGER.error("Unable to connect volume with address [" + address.getPath() + "] of the storage pool: " + pool.getUuid() + " - connection id is not set in provided path");
return false;
}
int waitTimeInSec = diskWaitTimeSecs;
if (details != null && details.containsKey(StorageManager.STORAGE_POOL_DISK_WAIT.toString())) {
String waitTime = details.get(StorageManager.STORAGE_POOL_DISK_WAIT.toString());
@ -208,31 +216,62 @@ public abstract class MultipathSCSIAdapterBase implements StorageAdaptor {
@Override
public boolean disconnectPhysicalDisk(String volumePath, KVMStoragePool pool) {
LOGGER.debug(String.format("disconnectPhysicalDiskByPath(volumePath,pool) called with args (%s, %s) START", volumePath, pool.getUuid()));
if (LOGGER.isDebugEnabled()) LOGGER.debug(String.format("disconnectPhysicalDisk(volumePath,pool) called with args (%s, %s) START", volumePath, pool.getUuid()));
AddressInfo address = this.parseAndValidatePath(volumePath);
if (address.getAddress() == null) {
if (LOGGER.isDebugEnabled()) LOGGER.debug(String.format("disconnectPhysicalDisk(volumePath,pool) returning FALSE, volume path has no address field", volumePath, pool.getUuid()));
return false;
}
ScriptResult result = runScript(disconnectScript, 60000L, address.getAddress().toLowerCase());
if (LOGGER.isDebugEnabled()) LOGGER.debug("multipath flush output: " + result.getResult());
LOGGER.debug(String.format("disconnectPhysicalDiskByPath(volumePath,pool) called with args (%s, %s) COMPLETE [rc=%s]", volumePath, pool.getUuid(), result.getResult())); return true;
if (result.getExitCode() != 0) {
LOGGER.warn(String.format("Disconnect failed for path [%s] with return code [%s]", address.getAddress().toLowerCase(), result.getExitCode()));
}
if (LOGGER.isDebugEnabled()) {
LOGGER.debug("multipath flush output: " + result.getResult());
LOGGER.debug(String.format("disconnectPhysicalDisk(volumePath,pool) called with args (%s, %s) COMPLETE [rc=%s]", volumePath, pool.getUuid(), result.getResult()));
}
return (result.getExitCode() == 0);
}
@Override
public boolean disconnectPhysicalDisk(Map<String, String> volumeToDisconnect) {
LOGGER.debug(String.format("disconnectPhysicalDiskByPath(volumeToDisconnect) called with arg bag [not implemented]:") + " " + volumeToDisconnect);
LOGGER.debug(String.format("disconnectPhysicalDisk(volumeToDisconnect) called with arg bag [not implemented]:") + " " + volumeToDisconnect);
return false;
}
@Override
public boolean disconnectPhysicalDiskByPath(String localPath) {
LOGGER.debug(String.format("disconnectPhysicalDiskByPath(localPath) called with args (%s) STARTED", localPath));
ScriptResult result = runScript(disconnectScript, 60000L, localPath.replace("/dev/mapper/3", ""));
if (LOGGER.isDebugEnabled()) LOGGER.debug("multipath flush output: " + result.getResult());
LOGGER.debug(String.format("disconnectPhysicalDiskByPath(localPath) called with args (%s) COMPLETE [rc=%s]", localPath, result.getExitCode())); return true;
if (localPath == null) {
return false;
}
if (LOGGER.isDebugEnabled()) LOGGER.debug(String.format("disconnectPhysicalDiskByPath(localPath) called with args (%s) START", localPath));
if (localPath.startsWith("/dev/mapper/")) {
String multipathName = localPath.replace("/dev/mapper/3", "");
// this ensures we only disconnect multipath devices supported by this driver
for (String oui: SUPPORTED_OUI_LIST) {
if (multipathName.length() > 1 && multipathName.substring(2).startsWith(oui)) {
ScriptResult result = runScript(disconnectScript, 60000L, multipathName);
if (result.getExitCode() != 0) {
LOGGER.warn(String.format("Disconnect failed for path [%s] with return code [%s]", multipathName, result.getExitCode()));
}
if (LOGGER.isDebugEnabled()) {
LOGGER.debug("multipath flush output: " + result.getResult());
LOGGER.debug(String.format("disconnectPhysicalDiskByPath(localPath) called with args (%s) COMPLETE [rc=%s]", localPath, result.getExitCode()));
}
return (result.getExitCode() == 0);
}
}
}
if (LOGGER.isDebugEnabled()) LOGGER.debug(String.format("disconnectPhysicalDiskByPath(localPath) returning FALSE, volume path is not a multipath volume: %s", localPath));
return false;
}
@Override
public boolean deletePhysicalDisk(String uuid, KVMStoragePool pool, Storage.ImageFormat format) {
LOGGER.info(String.format("deletePhysicalDisk(uuid,pool,format) called with args (%s, %s, %s) [not implemented]", uuid, pool.getUuid(), format.toString()));
return true;
return false;
}
@Override
@ -276,15 +315,9 @@ public abstract class MultipathSCSIAdapterBase implements StorageAdaptor {
return true;
}
/**
* Validate inputs and return the source file for a template copy
* @param templateFilePath
* @param destTemplatePath
* @param destPool
* @param format
* @return
*/
File createTemplateFromDirectDownloadFileValidate(String templateFilePath, String destTemplatePath, KVMStoragePool destPool, Storage.ImageFormat format) {
@Override
public KVMPhysicalDisk createTemplateFromDirectDownloadFile(String templateFilePath, String destTemplatePath, KVMStoragePool destPool, Storage.ImageFormat format, int timeout) {
if (StringUtils.isAnyEmpty(templateFilePath, destTemplatePath) || destPool == null) {
LOGGER.error("Unable to create template from direct download template file due to insufficient data");
throw new CloudRuntimeException("Unable to create template from direct download template file due to insufficient data");
@ -297,57 +330,18 @@ public abstract class MultipathSCSIAdapterBase implements StorageAdaptor {
throw new CloudRuntimeException("Direct download template file " + templateFilePath + " does not exist on this host");
}
if (destTemplatePath == null || destTemplatePath.isEmpty()) {
LOGGER.error("Failed to create template, target template disk path not provided");
throw new CloudRuntimeException("Target template disk path not provided");
}
if (this.isStoragePoolTypeSupported(destPool.getType())) {
throw new CloudRuntimeException("Unsupported storage pool type: " + destPool.getType().toString());
}
if (Storage.ImageFormat.RAW.equals(format) && Storage.ImageFormat.QCOW2.equals(format)) {
LOGGER.error("Failed to create template, unsupported template format: " + format.toString());
throw new CloudRuntimeException("Unsupported template format: " + format.toString());
}
return sourceFile;
}
String extractSourceTemplateIfNeeded(File sourceFile, String templateFilePath) {
String srcTemplateFilePath = templateFilePath;
if (isTemplateExtractable(templateFilePath)) {
srcTemplateFilePath = sourceFile.getParent() + "/" + UUID.randomUUID().toString();
LOGGER.debug("Extract the downloaded template " + templateFilePath + " to " + srcTemplateFilePath);
String extractCommand = getExtractCommandForDownloadedFile(templateFilePath, srcTemplateFilePath);
Script.runSimpleBashScript(extractCommand);
Script.runSimpleBashScript("rm -f " + templateFilePath);
}
return srcTemplateFilePath;
}
QemuImg.PhysicalDiskFormat deriveImgFileFormat(Storage.ImageFormat format) {
if (format == Storage.ImageFormat.RAW) {
return QemuImg.PhysicalDiskFormat.RAW;
} else if (format == Storage.ImageFormat.QCOW2) {
return QemuImg.PhysicalDiskFormat.QCOW2;
} else {
return QemuImg.PhysicalDiskFormat.RAW;
}
}
@Override
public KVMPhysicalDisk createTemplateFromDirectDownloadFile(String templateFilePath, String destTemplatePath, KVMStoragePool destPool, Storage.ImageFormat format, int timeout) {
File sourceFile = createTemplateFromDirectDownloadFileValidate(templateFilePath, destTemplatePath, destPool, format);
LOGGER.debug("Create template from direct download template - file path: " + templateFilePath + ", dest path: " + destTemplatePath + ", format: " + format.toString());
KVMPhysicalDisk sourceDisk = destPool.getPhysicalDisk(sourceFile.getAbsolutePath());
KVMPhysicalDisk sourceDisk = destPool.getPhysicalDisk(templateFilePath);
return copyPhysicalDisk(sourceDisk, destTemplatePath, destPool, timeout, null, null, Storage.ProvisioningType.THIN);
}
@Override
public KVMPhysicalDisk copyPhysicalDisk(KVMPhysicalDisk disk, String name, KVMStoragePool destPool, int timeout,
byte[] srcPassphrase, byte[] dstPassphrase, Storage.ProvisioningType provisioningType) {
if (StringUtils.isEmpty(name) || disk == null || destPool == null) {
LOGGER.error("Unable to copy physical disk due to insufficient data");
throw new CloudRuntimeException("Unable to copy physical disk due to insufficient data");
}
validateForDiskCopy(disk, name, destPool);
LOGGER.info("Copying FROM source physical disk " + disk.getPath() + ", size: " + disk.getSize() + ", virtualsize: " + disk.getVirtualSize()+ ", format: " + disk.getFormat());
KVMPhysicalDisk destDisk = destPool.getPhysicalDisk(name);
@ -367,60 +361,34 @@ public abstract class MultipathSCSIAdapterBase implements StorageAdaptor {
LOGGER.info("Copying TO destination physical disk " + destDisk.getPath() + ", size: " + destDisk.getSize() + ", virtualsize: " + destDisk.getVirtualSize()+ ", format: " + destDisk.getFormat());
QemuImgFile srcFile = new QemuImgFile(disk.getPath(), disk.getFormat());
QemuImgFile destFile = new QemuImgFile(destDisk.getPath(), destDisk.getFormat());
LOGGER.debug("Starting COPY from source downloaded template " + srcFile.getFileName() + " to Primera volume: " + destDisk.getPath());
LOGGER.debug("Starting COPY from source path " + srcFile.getFileName() + " to target volume path: " + destDisk.getPath());
ScriptResult result = runScript(copyScript, timeout, destDisk.getFormat().toString().toLowerCase(), srcFile.getFileName(), destFile.getFileName());
int rc = result.getExitCode();
if (rc != 0) {
throw new CloudRuntimeException("Failed to convert from " + srcFile.getFileName() + " to " + destFile.getFileName() + " the error was: " + rc + " - " + result.getResult());
}
LOGGER.debug("Successfully converted source downloaded template " + srcFile.getFileName() + " to Primera volume: " + destDisk.getPath() + " " + result.getResult());
LOGGER.debug("Successfully converted source volume at " + srcFile.getFileName() + " to destination volume: " + destDisk.getPath() + " " + result.getResult());
return destDisk;
}
void validateForDiskCopy(KVMPhysicalDisk disk, String name, KVMStoragePool destPool) {
if (StringUtils.isEmpty(name) || disk == null || destPool == null) {
LOGGER.error("Unable to copy physical disk due to insufficient data");
throw new CloudRuntimeException("Unable to copy physical disk due to insufficient data");
}
}
/**
* Copy a disk path to another disk path using QemuImg command
* @param disk
* @param destDisk
* @param name
* @param timeout
*/
void qemuCopy(KVMPhysicalDisk disk, KVMPhysicalDisk destDisk, String name, int timeout) {
QemuImg qemu;
try {
qemu = new QemuImg(timeout);
} catch (LibvirtException | QemuImgException e) {
throw new CloudRuntimeException (e);
}
QemuImgFile srcFile = null;
QemuImgFile destFile = null;
try {
srcFile = new QemuImgFile(disk.getPath(), disk.getFormat());
destFile = new QemuImgFile(destDisk.getPath(), destDisk.getFormat());
LOGGER.debug("Starting copy from source disk image " + srcFile.getFileName() + " to volume: " + destDisk.getPath());
qemu.convert(srcFile, destFile, true);
LOGGER.debug("Successfully converted source disk image " + srcFile.getFileName() + " to volume: " + destDisk.getPath());
} catch (QemuImgException | LibvirtException e) {
try {
Map<String, String> srcInfo = qemu.info(srcFile);
LOGGER.debug("Source disk info: " + Arrays.asList(srcInfo));
} catch (Exception ignored) {
LOGGER.warn("Unable to get info from source disk: " + disk.getName());
}
String errMsg = String.format("Unable to convert/copy from %s to %s, due to: %s", disk.getName(), name, ((StringUtils.isEmpty(e.getMessage())) ? "an unknown error" : e.getMessage()));
LOGGER.error(errMsg);
throw new CloudRuntimeException(errMsg, e);
private static final ScriptResult runScript(String script, long timeout, String...args) {
ScriptResult result = new ScriptResult();
Script cmd = new Script(script, Duration.millis(timeout), LOGGER);
cmd.add(args);
OutputInterpreter.OneLineParser parser = new OutputInterpreter.OneLineParser();
String output = cmd.execute(parser);
// its possible the process never launches which causes an NPE on getExitValue below
if (output != null && output.contains("Unable to execute the command")) {
result.setResult(output);
result.setExitCode(-1);
return result;
}
result.setResult(output);
result.setExitCode(cmd.getExitValue());
return result;
}
@Override
@ -461,25 +429,9 @@ public abstract class MultipathSCSIAdapterBase implements StorageAdaptor {
}
}
private static final ScriptResult runScript(String script, long timeout, String...args) {
ScriptResult result = new ScriptResult();
Script cmd = new Script(script, Duration.millis(timeout), LOGGER);
cmd.add(args);
OutputInterpreter.OneLineParser parser = new OutputInterpreter.OneLineParser();
String output = cmd.execute(parser);
// its possible the process never launches which causes an NPE on getExitValue below
if (output != null && output.contains("Unable to execute the command")) {
result.setResult(output);
result.setExitCode(-1);
return result;
}
result.setResult(output);
result.setExitCode(cmd.getExitValue());
return result;
}
boolean waitForDiskToBecomeAvailable(AddressInfo address, KVMStoragePool pool, long waitTimeInSec) {
LOGGER.debug("Waiting for the volume with id: " + address.getPath() + " of the storage pool: " + pool.getUuid() + " to become available for " + waitTimeInSec + " secs");
long scriptTimeoutSecs = 30; // how long to wait for each script execution to run
long maxTries = 10; // how many max retries to attempt the script
long waitTimeInMillis = waitTimeInSec * 1000; // how long overall to wait
@ -557,40 +509,6 @@ public abstract class MultipathSCSIAdapterBase implements StorageAdaptor {
return false;
}
void runConnectScript(String lun, AddressInfo address) {
try {
ProcessBuilder builder = new ProcessBuilder(connectScript, lun, address.getAddress());
Process p = builder.start();
int rc = p.waitFor();
StringBuffer output = new StringBuffer();
if (rc == 0) {
BufferedReader input = new BufferedReader(new InputStreamReader(p.getInputStream()));
String line = null;
while ((line = input.readLine()) != null) {
output.append(line);
output.append(" ");
}
} else {
LOGGER.warn("Failure discovering LUN via " + connectScript);
BufferedReader error = new BufferedReader(new InputStreamReader(p.getErrorStream()));
String line = null;
while ((line = error.readLine()) != null) {
LOGGER.warn("error --> " + line);
}
}
} catch (IOException | InterruptedException e) {
throw new CloudRuntimeException("Problem performing scan on SCSI hosts", e);
}
}
void sleep(long sleepTimeMs) {
try {
Thread.sleep(sleepTimeMs);
} catch (Exception ex) {
// don't do anything
}
}
long getPhysicalDiskSize(String diskPath) {
if (StringUtils.isEmpty(diskPath)) {
return 0;

View File

@ -26,19 +26,10 @@ import com.cloud.hypervisor.kvm.resource.wrapper.LibvirtUtilitiesHelper;
import com.cloud.storage.template.TemplateConstants;
import com.cloud.utils.Pair;
import com.cloud.utils.exception.CloudRuntimeException;
import javax.naming.ConfigurationException;
import com.cloud.utils.script.Script;
import java.io.File;
import java.io.IOException;
import java.nio.file.Files;
import java.nio.file.Path;
import java.util.ArrayList;
import java.util.HashSet;
import java.util.List;
import java.util.Set;
import org.apache.cloudstack.storage.to.SnapshotObjectTO;
import org.apache.cloudstack.storage.to.VolumeObjectTO;
import org.apache.cloudstack.utils.qemu.QemuImageOptions;
import org.apache.cloudstack.utils.qemu.QemuImg;
import org.apache.cloudstack.utils.qemu.QemuImgException;
import org.apache.cloudstack.utils.qemu.QemuImgFile;
@ -59,6 +50,17 @@ import org.mockito.MockitoAnnotations;
import org.mockito.Spy;
import org.mockito.junit.MockitoJUnitRunner;
import javax.naming.ConfigurationException;
import java.io.File;
import java.io.IOException;
import java.nio.file.Files;
import java.nio.file.Path;
import java.util.ArrayList;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
@RunWith(MockitoJUnitRunner.class)
public class KVMStorageProcessorTest {
@ -259,40 +261,48 @@ public class KVMStorageProcessorTest {
}
@Test
public void convertBaseFileToSnapshotFileInPrimaryStorageDirTestFailToConvertWithQemuImgExceptionReturnErrorMessage() throws Exception {
String baseFile = "baseFile";
String snapshotPath = "snapshotPath";
public void convertBaseFileToSnapshotFileInPrimaryStorageDirTestFailToConvertWithQemuImgExceptionReturnErrorMessage() throws QemuImgException {
KVMPhysicalDisk baseFile = Mockito.mock(KVMPhysicalDisk.class);
String errorMessage = "error";
String expectedResult = String.format("Failed to convert %s snapshot of volume [%s] to [%s] due to [%s].", volumeObjectToMock, baseFile, snapshotPath, errorMessage);
KVMStoragePool primaryPoolMock = Mockito.mock(KVMStoragePool.class);
KVMPhysicalDisk baseFileMock = Mockito.mock(KVMPhysicalDisk.class);
VolumeObjectTO volumeMock = Mockito.mock(VolumeObjectTO.class);
QemuImgFile srcFileMock = Mockito.mock(QemuImgFile.class);
QemuImgFile destFileMock = Mockito.mock(QemuImgFile.class);
QemuImg qemuImgMock = Mockito.mock(QemuImg.class);
Mockito.doReturn(true).when(kvmStoragePoolMock).createFolder(Mockito.anyString());
try (MockedConstruction<QemuImg> ignored = Mockito.mockConstruction(QemuImg.class, (mock,context) -> {
Mockito.doThrow(new QemuImgException(errorMessage)).when(mock).convert(Mockito.any(QemuImgFile.class), Mockito.any(QemuImgFile.class));
})) {
String result = storageProcessorSpy.convertBaseFileToSnapshotFileInPrimaryStorageDir(kvmStoragePoolMock, baseFile, snapshotPath, volumeObjectToMock, 1);
Assert.assertEquals(expectedResult, result);
Mockito.when(baseFileMock.getPath()).thenReturn("/path/to/baseFile");
Mockito.when(primaryPoolMock.createFolder(Mockito.anyString())).thenReturn(true);
try (MockedConstruction<Script> scr = Mockito.mockConstruction(Script.class, ((mock, context) -> {
Mockito.doReturn("").when(mock).execute();
}));
MockedConstruction<QemuImg> qemu = Mockito.mockConstruction(QemuImg.class, ((mock, context) -> {
Mockito.lenient().doThrow(new QemuImgException(errorMessage)).when(mock).convert(Mockito.any(QemuImgFile.class), Mockito.any(QemuImgFile.class), Mockito.any(Map.class),
Mockito.any(List.class), Mockito.any(QemuImageOptions.class),Mockito.nullable(String.class), Mockito.any(Boolean.class));
}))) {
String test = storageProcessor.convertBaseFileToSnapshotFileInPrimaryStorageDir(primaryPoolMock, baseFileMock, "/path/to/snapshot", volumeMock, 0);
Assert.assertNotNull(test);
}
}
@Test
public void convertBaseFileToSnapshotFileInPrimaryStorageDirTestFailToConvertWithLibvirtExceptionReturnErrorMessage() throws Exception {
String baseFile = "baseFile";
KVMPhysicalDisk baseFile = Mockito.mock(KVMPhysicalDisk.class);
String snapshotPath = "snapshotPath";
String errorMessage = "null";
String expectedResult = String.format("Failed to convert %s snapshot of volume [%s] to [%s] due to [%s].", volumeObjectToMock, baseFile, snapshotPath, errorMessage);
QemuImg qemuImg = Mockito.mock(QemuImg.class);
Mockito.doReturn(true).when(kvmStoragePoolMock).createFolder(Mockito.anyString());
try (MockedConstruction<QemuImg> ignored = Mockito.mockConstruction(QemuImg.class, (mock,context) -> {
Mockito.doThrow(LibvirtException.class).when(mock).convert(Mockito.any(QemuImgFile.class), Mockito.any(QemuImgFile.class));
try (MockedConstruction<QemuImg> ignored = Mockito.mockConstructionWithAnswer(QemuImg.class, invocation -> {
throw Mockito.mock(LibvirtException.class);
})) {
String result = storageProcessorSpy.convertBaseFileToSnapshotFileInPrimaryStorageDir(kvmStoragePoolMock, baseFile, snapshotPath, volumeObjectToMock, 1);
Assert.assertEquals(expectedResult, result);
Assert.assertNotNull(result);
}
}
@Test
public void convertBaseFileToSnapshotFileInPrimaryStorageDirTestConvertSuccessReturnNull() throws Exception {
String baseFile = "baseFile";
KVMPhysicalDisk baseFile = Mockito.mock(KVMPhysicalDisk.class);
String snapshotPath = "snapshotPath";
Mockito.doReturn(true).when(kvmStoragePoolMock).createFolder(Mockito.anyString());

View File

@ -135,16 +135,13 @@ public class MockVmManagerImpl extends ManagerBase implements MockVmManager {
}
if (vm == null) {
final int vncPort = 0;
if (vncPort < 0) {
return "Unable to allocate VNC port";
}
vm = new MockVMVO();
}
vm.setCpu(cpuHz);
vm.setMemory(ramSize);
vm.setPowerState(PowerState.PowerOn);
vm.setName(vmName);
vm.setVncPort(vncPort);
vm.setVncPort(0);
vm.setHostId(host.getId());
vm.setBootargs(bootArgs);
if (vmName.startsWith("s-")) {
@ -169,24 +166,6 @@ public class MockVmManagerImpl extends ManagerBase implements MockVmManager {
txn = TransactionLegacy.open(TransactionLegacy.CLOUD_DB);
txn.close();
}
} else {
if (vm.getPowerState() == PowerState.PowerOff) {
vm.setPowerState(PowerState.PowerOn);
txn = TransactionLegacy.open(TransactionLegacy.SIMULATOR_DB);
try {
txn.start();
_mockVmDao.update(vm.getId(), (MockVMVO)vm);
txn.commit();
} catch (final Exception ex) {
txn.rollback();
throw new CloudRuntimeException("unable to update vm " + vm.getName(), ex);
} finally {
txn.close();
txn = TransactionLegacy.open(TransactionLegacy.CLOUD_DB);
txn.close();
}
}
}
if (vm.getPowerState() == PowerState.PowerOn && vmName.startsWith("s-")) {
String prvIp = null;

View File

@ -560,7 +560,6 @@ public class VmwareManagerImpl extends ManagerBase implements VmwareManager, Vmw
@Override
public Pair<String, Long> getSecondaryStorageStoreUrlAndId(long dcId) {
String secUrl = null;
Long secId = null;
DataStore secStore = _dataStoreMgr.getImageStoreWithFreeCapacity(dcId);
@ -570,18 +569,17 @@ public class VmwareManagerImpl extends ManagerBase implements VmwareManager, Vmw
}
if (secUrl == null) {
// we are using non-NFS image store, then use cache storage instead
logger.info("Secondary storage is not NFS, we need to use staging storage");
logger.info("Secondary storage is either not having free capacity or not NFS, then use cache/staging storage instead");
DataStore cacheStore = _dataStoreMgr.getImageCacheStore(dcId);
if (cacheStore != null) {
secUrl = cacheStore.getUri();
secId = cacheStore.getId();
} else {
logger.warn("No staging storage is found when non-NFS secondary storage is used");
logger.warn("No cache/staging storage found when NFS secondary storage with free capacity not available or non-NFS secondary storage is used");
}
}
return new Pair<String, Long>(secUrl, secId);
return new Pair<>(secUrl, secId);
}
@Override
@ -597,13 +595,12 @@ public class VmwareManagerImpl extends ManagerBase implements VmwareManager, Vmw
}
if (urlIdList.isEmpty()) {
// we are using non-NFS image store, then use cache storage instead
logger.info("Secondary storage is not NFS, we need to use staging storage");
logger.info("Secondary storage is either not having free capacity or not NFS, then use cache/staging storage instead");
DataStore cacheStore = _dataStoreMgr.getImageCacheStore(dcId);
if (cacheStore != null) {
urlIdList.add(new Pair<>(cacheStore.getUri(), cacheStore.getId()));
} else {
logger.warn("No staging storage is found when non-NFS secondary storage is used");
logger.warn("No cache/staging storage found when NFS secondary storage with free capacity not available or non-NFS secondary storage is used");
}
}

View File

@ -48,6 +48,7 @@ import java.util.stream.Collectors;
import javax.naming.ConfigurationException;
import javax.xml.datatype.XMLGregorianCalendar;
import com.cloud.capacity.CapacityManager;
import com.cloud.hypervisor.vmware.mo.HostDatastoreBrowserMO;
import com.vmware.vim25.FileInfo;
import com.vmware.vim25.FileQueryFlags;
@ -2277,15 +2278,15 @@ public class VmwareResource extends ServerResourceBase implements StoragePoolRes
// attach ISO (for patching of system VM)
Pair<String, Long> secStoreUrlAndId = mgr.getSecondaryStorageStoreUrlAndId(Long.parseLong(_dcId));
String secStoreUrl = secStoreUrlAndId.first();
Long secStoreId = secStoreUrlAndId.second();
if (secStoreUrl == null) {
String msg = "secondary storage for dc " + _dcId + " is not ready yet?";
String msg = String.format("NFS secondary or cache storage of dc %s either doesn't have enough capacity (has reached %d%% usage threshold) or not ready yet, or non-NFS secondary storage is used",
_dcId, Math.round(CapacityManager.SecondaryStorageCapacityThreshold.value() * 100));
throw new Exception(msg);
}
ManagedObjectReference morSecDs = prepareSecondaryDatastoreOnHost(secStoreUrl);
if (morSecDs == null) {
String msg = "Failed to prepare secondary storage on host, secondary store url: " + secStoreUrl;
String msg = "Failed to prepare secondary storage on host, NFS secondary or cache store url: " + secStoreUrl + " in dc "+ _dcId;
throw new Exception(msg);
}
DatastoreMO secDsMo = new DatastoreMO(hyperHost.getContext(), morSecDs);
@ -4613,15 +4614,15 @@ public class VmwareResource extends ServerResourceBase implements StoragePoolRes
List<Pair<String, Long>> secStoreUrlAndIdList = mgr.getSecondaryStorageStoresUrlAndIdList(Long.parseLong(_dcId));
for (Pair<String, Long> secStoreUrlAndId : secStoreUrlAndIdList) {
String secStoreUrl = secStoreUrlAndId.first();
Long secStoreId = secStoreUrlAndId.second();
if (secStoreUrl == null) {
String msg = String.format("Secondary storage for dc %s is not ready yet?", _dcId);
String msg = String.format("NFS secondary or cache storage of dc %s either doesn't have enough capacity (has reached %d%% usage threshold) or not ready yet, or non-NFS secondary storage is used",
_dcId, Math.round(CapacityManager.SecondaryStorageCapacityThreshold.value() * 100));
throw new Exception(msg);
}
ManagedObjectReference morSecDs = prepareSecondaryDatastoreOnHost(secStoreUrl);
if (morSecDs == null) {
String msg = "Failed to prepare secondary storage on host, secondary store url: " + secStoreUrl;
String msg = "Failed to prepare secondary storage on host, NFS secondary or cache store url: " + secStoreUrl + " in dc "+ _dcId;
throw new Exception(msg);
}
}
@ -7343,14 +7344,14 @@ public class VmwareResource extends ServerResourceBase implements StoragePoolRes
VmwareManager mgr = targetHyperHost.getContext().getStockObject(VmwareManager.CONTEXT_STOCK_NAME);
Pair<String, Long> secStoreUrlAndId = mgr.getSecondaryStorageStoreUrlAndId(Long.parseLong(_dcId));
String secStoreUrl = secStoreUrlAndId.first();
Long secStoreId = secStoreUrlAndId.second();
if (secStoreUrl == null) {
String msg = "secondary storage for dc " + _dcId + " is not ready yet?";
String msg = String.format("NFS secondary or cache storage of dc %s either doesn't have enough capacity (has reached %d%% usage threshold) or not ready yet, or non-NFS secondary storage is used",
_dcId, Math.round(CapacityManager.SecondaryStorageCapacityThreshold.value() * 100));
throw new Exception(msg);
}
ManagedObjectReference morSecDs = prepareSecondaryDatastoreOnSpecificHost(secStoreUrl, targetHyperHost);
if (morSecDs == null) {
throw new Exception(String.format("Failed to prepare secondary storage on host, secondary store url: %s", secStoreUrl));
throw new Exception(String.format("Failed to prepare secondary storage on host, NFS secondary or cache store url: %s in dc %s", secStoreUrl, _dcId));
}
}

View File

@ -96,6 +96,9 @@ public class KubernetesServiceHelperImpl extends AdapterBase implements Kubernet
KubernetesCluster kubernetesCluster = kubernetesClusterDao.findById(vmMapVO.getClusterId());
String msg = "Instance is a part of a Kubernetes cluster";
if (kubernetesCluster != null) {
if (KubernetesCluster.ClusterType.ExternalManaged.equals(kubernetesCluster.getClusterType())) {
return;
}
msg += String.format(": %s", kubernetesCluster.getName());
}
msg += ". Use Instance delete option from Kubernetes cluster details or scale API for " +

View File

@ -58,14 +58,30 @@ public class KubernetesServiceHelperImplTest {
}
@Test(expected = CloudRuntimeException.class)
public void testCheckVmCanBeDestroyedInCluster() {
public void testCheckVmCanBeDestroyedInCloudManagedCluster() {
UserVm vm = Mockito.mock(UserVm.class);
Mockito.when(vm.getId()).thenReturn(1L);
Mockito.when(vm.getUserVmType()).thenReturn(UserVmManager.CKS_NODE);
KubernetesClusterVmMapVO map = Mockito.mock(KubernetesClusterVmMapVO.class);
Mockito.when(map.getClusterId()).thenReturn(1L);
Mockito.when(kubernetesClusterVmMapDao.findByVmId(1L)).thenReturn(map);
Mockito.when(kubernetesClusterDao.findById(1L)).thenReturn(Mockito.mock(KubernetesClusterVO.class));
KubernetesClusterVO kubernetesCluster = Mockito.mock(KubernetesClusterVO.class);
Mockito.when(kubernetesClusterDao.findById(1L)).thenReturn(kubernetesCluster);
Mockito.when(kubernetesCluster.getClusterType()).thenReturn(KubernetesCluster.ClusterType.CloudManaged);
kubernetesServiceHelper.checkVmCanBeDestroyed(vm);
}
@Test
public void testCheckVmCanBeDestroyedInExternalManagedCluster() {
UserVm vm = Mockito.mock(UserVm.class);
Mockito.when(vm.getId()).thenReturn(1L);
Mockito.when(vm.getUserVmType()).thenReturn(UserVmManager.CKS_NODE);
KubernetesClusterVmMapVO map = Mockito.mock(KubernetesClusterVmMapVO.class);
Mockito.when(map.getClusterId()).thenReturn(1L);
Mockito.when(kubernetesClusterVmMapDao.findByVmId(1L)).thenReturn(map);
KubernetesClusterVO kubernetesCluster = Mockito.mock(KubernetesClusterVO.class);
Mockito.when(kubernetesClusterDao.findById(1L)).thenReturn(kubernetesCluster);
Mockito.when(kubernetesCluster.getClusterType()).thenReturn(KubernetesCluster.ClusterType.ExternalManaged);
kubernetesServiceHelper.checkVmCanBeDestroyed(vm);
}
}

View File

@ -104,7 +104,10 @@ public class ShutdownManagerImpl extends ManagerBase implements ShutdownManager,
this.shutdownTask = null;
}
this.shutdownTask = new ShutdownTask(this);
timer.scheduleAtFixedRate(shutdownTask, 0, 30L * 1000);
long period = 30L * 1000;
long delay = period / 2;
logger.debug(String.format("Scheduling shutdown task with delay: %d and period: %d", delay, period));
timer.scheduleAtFixedRate(shutdownTask, delay, period);
}
@Override

View File

@ -56,3 +56,44 @@ This provides instructions of which provider implementation class to load when t
## Build and Deploy the Jar
Once you build the new jar, start Cloudstack Management Server or, if a standalone jar, add it to the classpath before start. You should now have a new storage provider of the designated name once Cloudstack finishes loading
all configured modules.
### Test Cases
The following test cases should be run against configured installations of each storage array in a working Cloudstack installation.
1. Create New Primera Storage Pool for Zone
2. Create New Primera Storage Pool for Cluster
3. Update Primera Storage Pool for Zone
4. Update Primera Storage Pool for Cluster
5. Create VM with Root Disk using Primera pool
6. Create VM with Root and Data Disk using Primera pool
7. Create VM with Root Disk using NFS and Data Disk on Primera pool
8. Create VM with Root Disk on Primera Pool and Data Disk on NFS
9. Snapshot root disk with VM using Primera Pool for root disk
10. Snapshot data disk with VM using Primera Pool for data disk
11. Snapshot VM (non-memory) with root and data disk using Primera pool
12. Snapshot VM (non-memory) with root disk using Primera pool and data disk using NFS
13. Snapshot VM (non-memory) with root disk using NFS pool and data disk using Primera pool
14. Create new template from previous snapshot root disk on Primera pool
15. Create new volume from previous snapshot root disk on Primera pool
16. Create new volume from previous snapshot data disk on Primera pool
17. Create new VM using template created from Primera root snapshot and using Primera as root volume pool
18. Create new VM using template created from Primera root snapshot and using NFS as root volume pool
19. Delete previously created Primera snapshot
20. Create previously created Primera volume attached to a VM that is running (should fail)
21. Create previously created Primera volume attached to a VM that is not running (should fail)
22. Detach a Primera volume from a non-running VM (should work)
23. Attach a Primera volume to a running VM (should work)
24. Attach a Primera volume to a non-running VM (should work)
25. Create a 'thin' Disk Offering tagged for Primera pool and provision and attach a data volume to a VM using this offering (ttpv=true, reduce=false)
26. Create a 'sparse' Disk Offering tagged for Primera pool and provision and attach a data volume to a VM using this offering (ttpv=false, reduce=true)
27. Create a 'fat' Disk Offering and tagged for Primera pool and provision and attach a data volume to a VM using this offering (should fail as 'fat' not supported)
28. Perform volume migration of root volume from Primera pool to NFS pool on stopped VM
29. Perform volume migration of root volume from NFS pool to Primera pool on stopped VM
30. Perform volume migration of data volume from Primera pool to NFS pool on stopped VM
31. Perform volume migration of data volume from NFS pool to Primera pool on stopped VM
32. Perform VM data migration for a VM with 1 or more data volumes from all volumes on Primera pool to all volumes on NFS pool
33. Perform VM data migration for a VM with 1 or more data volumes from all volumes on NFS pool to all volumes on Primera pool
34. Perform live migration of a VM with a Primera root disk
35. Perform live migration of a VM with a Primera data disk and NFS root disk
36. Perform live migration of a VM with a Primera root disk and NFS data disk
37. Perform volume migration between 2 Primera pools on the same backend Primera IP address
38. Perform volume migration between 2 Primera pools on different Primera IP address

View File

@ -69,14 +69,14 @@ public interface ProviderAdapter {
* @param request
* @return
*/
public String attach(ProviderAdapterContext context, ProviderAdapterDataObject request);
public String attach(ProviderAdapterContext context, ProviderAdapterDataObject request, String hostname);
/**
* Detach the host from the storage context
* @param context
* @param request
*/
public void detach(ProviderAdapterContext context, ProviderAdapterDataObject request);
public void detach(ProviderAdapterContext context, ProviderAdapterDataObject request, String hostname);
/**
* Delete the provided volume/object
@ -154,4 +154,22 @@ public interface ProviderAdapter {
* @return
*/
public boolean canAccessHost(ProviderAdapterContext context, String hostname);
/**
* Returns true if the provider allows direct attach/connection of snapshots to a host
* @return
*/
public boolean canDirectAttachSnapshot();
/**
* Given a ProviderAdapterDataObject, return a map of connection IDs to connection values. Generally
* this would be used to return a map of hostnames and the VLUN ID for the attachment associated with
* that hostname. If the provider is using a hostgroup/hostset model where the ID is assigned in common
* across all hosts in the group, then the map MUST contain a single entry with host key set as a wildcard
* character (exactly '*').
* @param dataIn
* @return
*/
public Map<String, String> getConnectionIdMap(ProviderAdapterDataObject dataIn);
}

View File

@ -19,6 +19,10 @@ package org.apache.cloudstack.storage.datastore.adapter;
import java.util.Map;
public interface ProviderAdapterFactory {
/** Name of the provider */
public String getProviderName();
/** create a new instance of a provider adapter */
public ProviderAdapter create(String url, Map<String, String> details);
/** returns true if this type of adapter can directly attach snapshots to hosts */
public Object canDirectAttachSnapshot();
}

View File

@ -21,7 +21,6 @@ public class ProviderVolumeNamer {
private static final String SNAPSHOT_PREFIX = "snap";
private static final String VOLUME_PREFIX = "vol";
private static final String TEMPLATE_PREFIX = "tpl";
/** Simple method to allow sharing storage setup, primarily in lab/testing environment */
private static final String ENV_PREFIX = System.getProperty("adaptive.storage.provider.envIdentifier");
public static String generateObjectName(ProviderAdapterContext context, ProviderAdapterDataObject obj) {

View File

@ -31,6 +31,7 @@ import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreCapabilities;
import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreStateMachine;
import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotInfo;
import org.apache.cloudstack.engine.subsystem.api.storage.TemplateInfo;
import org.apache.cloudstack.engine.subsystem.api.storage.VolumeDataFactory;
import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo;
import org.apache.cloudstack.engine.subsystem.api.storage.VolumeService;
import org.apache.cloudstack.framework.async.AsyncCompletionCallback;
@ -42,6 +43,7 @@ import org.apache.cloudstack.storage.datastore.adapter.ProviderAdapterConstants;
import org.apache.cloudstack.storage.datastore.adapter.ProviderAdapterContext;
import org.apache.cloudstack.storage.datastore.adapter.ProviderAdapterDataObject;
import org.apache.cloudstack.storage.datastore.adapter.ProviderAdapterDiskOffering;
import org.apache.cloudstack.storage.datastore.adapter.ProviderAdapterFactory;
import org.apache.cloudstack.storage.datastore.adapter.ProviderSnapshot;
import org.apache.cloudstack.storage.datastore.adapter.ProviderVolume;
import org.apache.cloudstack.storage.datastore.adapter.ProviderVolumeStats;
@ -52,10 +54,12 @@ import org.apache.cloudstack.storage.datastore.db.SnapshotDataStoreVO;
import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailsDao;
import org.apache.cloudstack.storage.datastore.db.StoragePoolVO;
import org.apache.cloudstack.storage.datastore.provider.AdaptivePrimaryDatastoreAdapterFactoryMap;
import org.apache.cloudstack.storage.image.store.TemplateObject;
import org.apache.cloudstack.storage.snapshot.SnapshotObject;
import org.apache.cloudstack.storage.to.SnapshotObjectTO;
import org.apache.cloudstack.storage.to.TemplateObjectTO;
import org.apache.cloudstack.storage.to.VolumeObjectTO;
import org.apache.cloudstack.storage.volume.VolumeObject;
import org.apache.cloudstack.storage.snapshot.SnapshotObject;
import com.cloud.agent.api.Answer;
import com.cloud.agent.api.to.DataObjectType;
@ -72,7 +76,6 @@ import com.cloud.storage.DiskOfferingVO;
import com.cloud.storage.ResizeVolumePayload;
import com.cloud.storage.SnapshotVO;
import com.cloud.storage.Storage.ImageFormat;
import com.cloud.storage.StoragePool;
import com.cloud.storage.VMTemplateStoragePoolVO;
import com.cloud.storage.VMTemplateVO;
@ -134,6 +137,8 @@ public class AdaptiveDataStoreDriverImpl extends CloudStackPrimaryDataStoreDrive
DomainDao _domainDao;
@Inject
VolumeService _volumeService;
@Inject
VolumeDataFactory volumeDataFactory;
private AdaptivePrimaryDatastoreAdapterFactoryMap _adapterFactoryMap = null;
@ -143,9 +148,54 @@ public class AdaptiveDataStoreDriverImpl extends CloudStackPrimaryDataStoreDrive
@Override
public DataTO getTO(DataObject data) {
// we need to get connectionId and and the VLUN ID for currently attached hosts to add to the DataTO object
DataTO to = null;
if (data.getType() == DataObjectType.VOLUME) {
VolumeObjectTO vto = new VolumeObjectTO((VolumeObject)data);
vto.setPath(getPath(data));
to = vto;
} else if (data.getType() == DataObjectType.TEMPLATE) {
TemplateObjectTO tto = new TemplateObjectTO((TemplateObject)data);
tto.setPath(getPath(data));
to = tto;
} else if (data.getType() == DataObjectType.SNAPSHOT) {
SnapshotObjectTO sto = new SnapshotObjectTO((SnapshotObject)data);
sto.setPath(getPath(data));
to = sto;
} else {
to = super.getTO(data);
}
return to;
}
/*
* For the given data object, return the path with current connection info. If a snapshot
* object is passed, we will determine if a temporary volume is avialable for that
* snapshot object and return that conneciton info instead.
*/
String getPath(DataObject data) {
StoragePoolVO storagePool = _storagePoolDao.findById(data.getDataStore().getId());
Map<String, String> details = _storagePoolDao.getDetails(storagePool.getId());
ProviderAdapter api = getAPI(storagePool, details);
ProviderAdapterDataObject dataIn = newManagedDataObject(data, storagePool);
/** This means the object is not yet associated with the external provider so path is null */
if (dataIn.getExternalName() == null) {
return null;
}
ProviderAdapterContext context = newManagedVolumeContext(data);
Map<String,String> connIdMap = api.getConnectionIdMap(dataIn);
ProviderVolume volume = api.getVolume(context, dataIn);
// if this is an existing object, generate the path for it.
String finalPath = null;
if (volume != null) {
finalPath = generatePathInfo(volume, connIdMap);
}
return finalPath;
}
@Override
public DataStoreTO getStoreTO(DataStore store) {
return null;
@ -218,11 +268,8 @@ public class AdaptiveDataStoreDriverImpl extends CloudStackPrimaryDataStoreDrive
dataIn.setExternalName(volume.getExternalName());
dataIn.setExternalUuid(volume.getExternalUuid());
// add the volume to the host set
String connectionId = api.attach(context, dataIn);
// update the cloudstack metadata about the volume
persistVolumeOrTemplateData(storagePool, details, dataObject, volume, connectionId);
persistVolumeOrTemplateData(storagePool, details, dataObject, volume, null);
result = new CreateCmdResult(dataObject.getUuid(), new Answer(null));
result.setSuccess(true);
@ -289,6 +336,7 @@ public class AdaptiveDataStoreDriverImpl extends CloudStackPrimaryDataStoreDrive
ProviderAdapterContext context = newManagedVolumeContext(destdata);
ProviderAdapterDataObject sourceIn = newManagedDataObject(srcdata, storagePool);
ProviderAdapterDataObject destIn = newManagedDataObject(destdata, storagePool);
outVolume = api.copy(context, sourceIn, destIn);
// populate this data - it may be needed later
@ -303,17 +351,9 @@ public class AdaptiveDataStoreDriverImpl extends CloudStackPrimaryDataStoreDrive
api.resize(context, destIn, destdata.getSize());
}
String connectionId = api.attach(context, destIn);
String finalPath;
// format: type=fiberwwn; address=<address>; connid=<connid>
if (connectionId != null) {
finalPath = String.format("type=%s; address=%s; connid=%s", outVolume.getAddressType().toString(), outVolume.getAddress().toLowerCase(), connectionId);
} else {
finalPath = String.format("type=%s; address=%s;", outVolume.getAddressType().toString(), outVolume.getAddress().toLowerCase());
}
persistVolumeData(storagePool, details, destdata, outVolume, connectionId);
// initial volume info does not have connection map yet. That is added when grantAccess is called later.
String finalPath = generatePathInfo(outVolume, null);
persistVolumeData(storagePool, details, destdata, outVolume, null);
logger.info("Copy completed from [" + srcdata.getUuid() + "] to [" + destdata.getUuid() + "]");
VolumeObjectTO voto = new VolumeObjectTO();
@ -443,6 +483,66 @@ public class AdaptiveDataStoreDriverImpl extends CloudStackPrimaryDataStoreDrive
}
public boolean grantAccess(DataObject dataObject, Host host, DataStore dataStore) {
logger.debug("Granting host " + host.getName() + " access to volume " + dataObject.getUuid());
try {
StoragePoolVO storagePool = _storagePoolDao.findById(dataObject.getDataStore().getId());
Map<String, String> details = _storagePoolDao.getDetails(storagePool.getId());
ProviderAdapter api = getAPI(storagePool, details);
ProviderAdapterContext context = newManagedVolumeContext(dataObject);
ProviderAdapterDataObject sourceIn = newManagedDataObject(dataObject, storagePool);
api.attach(context, sourceIn, host.getName());
// rewrite the volume data, especially the connection string for informational purposes - unless it was turned off above
ProviderVolume vol = api.getVolume(context, sourceIn);
ProviderAdapterDataObject dataIn = newManagedDataObject(dataObject, storagePool);
Map<String,String> connIdMap = api.getConnectionIdMap(dataIn);
persistVolumeOrTemplateData(storagePool, details, dataObject, vol, connIdMap);
logger.info("Granted host " + host.getName() + " access to volume " + dataObject.getUuid());
return true;
} catch (Throwable e) {
String msg = "Error granting host " + host.getName() + " access to volume " + dataObject.getUuid() + ":" + e.getMessage();
logger.error(msg);
throw new CloudRuntimeException(msg, e);
}
}
public void revokeAccess(DataObject dataObject, Host host, DataStore dataStore) {
// nothing to do if the host is null
if (dataObject == null || host == null || dataStore == null) {
return;
}
logger.debug("Revoking access for host " + host.getName() + " to volume " + dataObject.getUuid());
try {
StoragePoolVO storagePool = _storagePoolDao.findById(dataObject.getDataStore().getId());
Map<String, String> details = _storagePoolDao.getDetails(storagePool.getId());
ProviderAdapter api = getAPI(storagePool, details);
ProviderAdapterContext context = newManagedVolumeContext(dataObject);
ProviderAdapterDataObject sourceIn = newManagedDataObject(dataObject, storagePool);
api.detach(context, sourceIn, host.getName());
// rewrite the volume data, especially the connection string for informational purposes
ProviderVolume vol = api.getVolume(context, sourceIn);
ProviderAdapterDataObject dataIn = newManagedDataObject(dataObject, storagePool);
Map<String,String> connIdMap = api.getConnectionIdMap(dataIn);
persistVolumeOrTemplateData(storagePool, details, dataObject, vol, connIdMap);
logger.info("Revoked access for host " + host.getName() + " to volume " + dataObject.getUuid());
} catch (Throwable e) {
String msg = "Error revoking access for host " + host.getName() + " to volume " + dataObject.getUuid() + ":" + e.getMessage();
logger.error(msg);
throw new CloudRuntimeException(msg, e);
}
}
@Override
public void handleQualityOfServiceForVolumeMigration(VolumeInfo volumeInfo,
QualityOfServiceState qualityOfServiceState) {
@ -493,15 +593,7 @@ public class AdaptiveDataStoreDriverImpl extends CloudStackPrimaryDataStoreDrive
// add the snapshot to the host group (needed for copying to non-provider storage
// to create templates, etc)
String connectionId = null;
String finalAddress = outSnapshot.getAddress();
if (outSnapshot.canAttachDirectly()) {
connectionId = api.attach(context, inSnapshotDO);
if (connectionId != null) {
finalAddress = finalAddress + "::" + connectionId;
}
}
snapshotTO.setPath(finalAddress);
snapshotTO.setName(outSnapshot.getName());
snapshotTO.setHypervisorType(HypervisorType.KVM);
@ -632,10 +724,12 @@ public class AdaptiveDataStoreDriverImpl extends CloudStackPrimaryDataStoreDrive
mapCapabilities.put(DataStoreCapabilities.CAN_CREATE_VOLUME_FROM_SNAPSHOT.toString(), Boolean.TRUE.toString());
mapCapabilities.put(DataStoreCapabilities.CAN_CREATE_VOLUME_FROM_VOLUME.toString(), Boolean.TRUE.toString()); // set to false because it causes weird behavior when copying templates to root volumes
mapCapabilities.put(DataStoreCapabilities.CAN_REVERT_VOLUME_TO_SNAPSHOT.toString(), Boolean.TRUE.toString());
// indicates the datastore can create temporary volumes for use when copying
// data from a snapshot
mapCapabilities.put("CAN_CREATE_TEMP_VOLUME_FROM_SNAPSHOT", Boolean.TRUE.toString());
ProviderAdapterFactory factory = _adapterFactoryMap.getFactory(this.getProviderName());
if (factory != null) {
mapCapabilities.put("CAN_DIRECT_ATTACH_SNAPSHOT", factory.canDirectAttachSnapshot().toString());
} else {
mapCapabilities.put("CAN_DIRECT_ATTACH_SNAPSHOT", Boolean.FALSE.toString());
}
return mapCapabilities;
}
@ -668,6 +762,11 @@ public class AdaptiveDataStoreDriverImpl extends CloudStackPrimaryDataStoreDrive
return true;
}
@Override
public boolean requiresAccessForMigration(DataObject dataObject) {
return true;
}
public String getProviderName() {
return providerName;
}
@ -716,8 +815,13 @@ public class AdaptiveDataStoreDriverImpl extends CloudStackPrimaryDataStoreDrive
object.setType(ProviderAdapterDataObject.Type.VOLUME);
ProviderVolumeStats stats = api.getVolumeStats(context, object);
Long provisionedSizeInBytes = stats.getActualUsedInBytes();
Long allocatedSizeInBytes = stats.getAllocatedInBytes();
Long provisionedSizeInBytes = null;
Long allocatedSizeInBytes = null;
if (stats != null) {
provisionedSizeInBytes = stats.getActualUsedInBytes();
allocatedSizeInBytes = stats.getAllocatedInBytes();
}
if (provisionedSizeInBytes == null || allocatedSizeInBytes == null) {
return null;
}
@ -735,31 +839,19 @@ public class AdaptiveDataStoreDriverImpl extends CloudStackPrimaryDataStoreDrive
}
void persistVolumeOrTemplateData(StoragePoolVO storagePool, Map<String, String> storagePoolDetails,
DataObject dataObject, ProviderVolume volume, String connectionId) {
DataObject dataObject, ProviderVolume volume, Map<String,String> connIdMap) {
if (dataObject.getType() == DataObjectType.VOLUME) {
persistVolumeData(storagePool, storagePoolDetails, dataObject, volume, connectionId);
persistVolumeData(storagePool, storagePoolDetails, dataObject, volume, connIdMap);
} else if (dataObject.getType() == DataObjectType.TEMPLATE) {
persistTemplateData(storagePool, storagePoolDetails, dataObject, volume, connectionId);
persistTemplateData(storagePool, storagePoolDetails, dataObject, volume, connIdMap);
}
}
void persistVolumeData(StoragePoolVO storagePool, Map<String, String> details, DataObject dataObject,
ProviderVolume managedVolume, String connectionId) {
ProviderVolume managedVolume, Map<String,String> connIdMap) {
VolumeVO volumeVO = _volumeDao.findById(dataObject.getId());
// if its null check if the storage provider returned one that is already set
if (connectionId == null) {
connectionId = managedVolume.getExternalConnectionId();
}
String finalPath;
// format: type=fiberwwn; address=<address>; connid=<connid>
if (connectionId != null) {
finalPath = String.format("type=%s; address=%s; connid=%s", managedVolume.getAddressType().toString(), managedVolume.getAddress().toLowerCase(), connectionId);
} else {
finalPath = String.format("type=%s; address=%s;", managedVolume.getAddressType().toString(), managedVolume.getAddress().toLowerCase());
}
String finalPath = generatePathInfo(managedVolume, connIdMap);
volumeVO.setPath(finalPath);
volumeVO.setFormat(ImageFormat.RAW);
volumeVO.setPoolId(storagePool.getId());
@ -784,25 +876,31 @@ public class AdaptiveDataStoreDriverImpl extends CloudStackPrimaryDataStoreDrive
}
void persistTemplateData(StoragePoolVO storagePool, Map<String, String> details, DataObject dataObject,
ProviderVolume volume, String connectionId) {
ProviderVolume volume, Map<String,String> connIdMap) {
TemplateInfo templateInfo = (TemplateInfo) dataObject;
VMTemplateStoragePoolVO templatePoolRef = _vmTemplatePoolDao.findByPoolTemplate(storagePool.getId(),
templateInfo.getId(), null);
// template pool ref doesn't have a details object so we'll save:
// 1. external name ==> installPath
// 2. address ==> local download path
if (connectionId == null) {
templatePoolRef.setInstallPath(String.format("type=%s; address=%s", volume.getAddressType().toString(),
volume.getAddress().toLowerCase()));
} else {
templatePoolRef.setInstallPath(String.format("type=%s; address=%s; connid=%s", volume.getAddressType().toString(),
volume.getAddress().toLowerCase(), connectionId));
}
templatePoolRef.setInstallPath(generatePathInfo(volume, connIdMap));
templatePoolRef.setLocalDownloadPath(volume.getExternalName());
templatePoolRef.setTemplateSize(volume.getAllocatedSizeInBytes());
_vmTemplatePoolDao.update(templatePoolRef.getId(), templatePoolRef);
}
String generatePathInfo(ProviderVolume volume, Map<String,String> connIdMap) {
String finalPath = String.format("type=%s; address=%s; providerName=%s; providerID=%s;",
volume.getAddressType().toString(), volume.getAddress().toLowerCase(), volume.getExternalName(), volume.getExternalUuid());
// if a map was provided, add the connection IDs to the path info. the map is all the possible vlun id's used
// across each host or the hostset (represented with host name key as "*");
if (connIdMap != null && connIdMap.size() > 0) {
for (String key: connIdMap.keySet()) {
finalPath += String.format(" connid.%s=%s;", key, connIdMap.get(key));
}
}
return finalPath;
}
ProviderAdapterContext newManagedVolumeContext(DataObject obj) {
ProviderAdapterContext ctx = new ProviderAdapterContext();
if (obj instanceof VolumeInfo) {
@ -899,4 +997,8 @@ public class AdaptiveDataStoreDriverImpl extends CloudStackPrimaryDataStoreDrive
dataIn.setType(ProviderAdapterDataObject.Type.valueOf(data.getType().toString()));
return dataIn;
}
public boolean volumesRequireGrantAccessWhenUsed() {
return true;
}
}

View File

@ -190,7 +190,6 @@ public class AdaptiveDataStoreLifeCycleImpl implements PrimaryDataStoreLifeCycle
parameters.setName(dsName);
parameters.setProviderName(providerName);
parameters.setManaged(true);
parameters.setCapacityBytes(capacityBytes);
parameters.setUsedBytes(0);
parameters.setCapacityIops(capacityIops);
parameters.setHypervisorType(HypervisorType.KVM);
@ -224,7 +223,7 @@ public class AdaptiveDataStoreLifeCycleImpl implements PrimaryDataStoreLifeCycle
// if we have user-provided capacity bytes, validate they do not exceed the manaaged storage capacity bytes
ProviderVolumeStorageStats stats = api.getManagedStorageStats();
if (capacityBytes != null && capacityBytes != 0) {
if (capacityBytes != null && capacityBytes != 0 && stats != null) {
if (stats.getCapacityInBytes() > 0) {
if (stats.getCapacityInBytes() < capacityBytes) {
throw new InvalidParameterValueException("Capacity bytes provided exceeds the capacity of the storage endpoint: provided by user: " + capacityBytes + ", storage capacity from storage provider: " + stats.getCapacityInBytes());
@ -234,8 +233,8 @@ public class AdaptiveDataStoreLifeCycleImpl implements PrimaryDataStoreLifeCycle
}
// if we have no user-provided capacity bytes, use the ones provided by storage
else {
if (stats.getCapacityInBytes() <= 0) {
throw new InvalidParameterValueException("Capacity bytes note available from the storage provider, user provided capacity bytes must be specified");
if (stats == null || stats.getCapacityInBytes() <= 0) {
throw new InvalidParameterValueException("Capacity bytes not available from the storage provider, user provided capacity bytes must be specified");
}
parameters.setCapacityBytes(stats.getCapacityInBytes());
}
@ -384,8 +383,8 @@ public class AdaptiveDataStoreLifeCycleImpl implements PrimaryDataStoreLifeCycle
* Update the storage pool configuration
*/
@Override
public void updateStoragePool(StoragePool storagePool, Map<String, String> details) {
_adapterFactoryMap.updateAPI(storagePool.getUuid(), storagePool.getStorageProviderName(), details);
public void updateStoragePool(StoragePool storagePool, Map<String, String> newDetails) {
_adapterFactoryMap.updateAPI(storagePool.getUuid(), storagePool.getStorageProviderName(), newDetails);
}
/**

View File

@ -132,4 +132,8 @@ public class AdaptivePrimaryDatastoreAdapterFactoryMap {
logger.debug("Creating new ProviderAdapter object for endpoint: " + providerName + "@" + url);
return api;
}
public ProviderAdapterFactory getFactory(String providerName) {
return this.factoryMap.get(providerName);
}
}

View File

@ -55,6 +55,8 @@ public class AdaptivePrimaryHostListener implements HypervisorHostListener {
if (storagePoolHost == null) {
storagePoolHost = new StoragePoolHostVO(poolId, hostId, "");
storagePoolHostDao.persist(storagePoolHost);
} else {
return false;
}
return true;
}

View File

@ -23,9 +23,9 @@ import java.net.URL;
import java.security.KeyManagementException;
import java.security.KeyStoreException;
import java.security.NoSuchAlgorithmException;
import java.text.SimpleDateFormat;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import javax.net.ssl.HostnameVerifier;
@ -110,7 +110,8 @@ public class FlashArrayAdapter implements ProviderAdapter {
}
@Override
public ProviderVolume create(ProviderAdapterContext context, ProviderAdapterDataObject dataObject, ProviderAdapterDiskOffering offering, long size) {
public ProviderVolume create(ProviderAdapterContext context, ProviderAdapterDataObject dataObject,
ProviderAdapterDiskOffering offering, long size) {
FlashArrayVolume request = new FlashArrayVolume();
request.setExternalName(
pod + "::" + ProviderVolumeNamer.generateObjectName(context, dataObject));
@ -129,30 +130,50 @@ public class FlashArrayAdapter implements ProviderAdapter {
* cluster (depending on Cloudstack Storage Pool configuration)
*/
@Override
public String attach(ProviderAdapterContext context, ProviderAdapterDataObject dataObject) {
public String attach(ProviderAdapterContext context, ProviderAdapterDataObject dataObject, String hostname) {
// should not happen but double check for sanity
if (dataObject.getType() == ProviderAdapterDataObject.Type.SNAPSHOT) {
throw new RuntimeException("This storage provider does not support direct attachments of snapshots to hosts");
}
String volumeName = normalizeName(pod, dataObject.getExternalName());
try {
FlashArrayList<FlashArrayConnection> list = POST("/connections?host_group_names=" + hostgroup + "&volume_names=" + volumeName, null, new TypeReference<FlashArrayList<FlashArrayConnection>> () { });
FlashArrayList<FlashArrayConnection> list = null;
FlashArrayHost host = getHost(hostname);
if (host != null) {
list = POST("/connections?host_names=" + host.getName() + "&volume_names=" + volumeName, null,
new TypeReference<FlashArrayList<FlashArrayConnection>>() {
});
}
if (list == null || list.getItems() == null || list.getItems().size() == 0) {
throw new RuntimeException("Volume attach did not return lun information");
}
FlashArrayConnection connection = (FlashArrayConnection)this.getFlashArrayItem(list);
FlashArrayConnection connection = (FlashArrayConnection) this.getFlashArrayItem(list);
if (connection.getLun() == null) {
throw new RuntimeException("Volume attach missing lun field");
}
return ""+connection.getLun();
return "" + connection.getLun();
} catch (Throwable e) {
// the volume is already attached. happens in some scenarios where orchestration creates the volume before copying to it
// the volume is already attached. happens in some scenarios where orchestration
// creates the volume before copying to it
if (e.toString().contains("Connection already exists")) {
FlashArrayList<FlashArrayConnection> list = GET("/connections?volume_names=" + volumeName,
new TypeReference<FlashArrayList<FlashArrayConnection>>() {
});
if (list != null && list.getItems() != null) {
return ""+list.getItems().get(0).getLun();
for (FlashArrayConnection conn : list.getItems()) {
if (conn.getHost() != null && conn.getHost().getName() != null &&
(conn.getHost().getName().equals(hostname) || conn.getHost().getName().equals(hostname.substring(0, hostname.indexOf('.')))) &&
conn.getLun() != null) {
return "" + conn.getLun();
}
}
throw new RuntimeException("Volume lun is not found in existing connection");
} else {
throw new RuntimeException("Volume lun is not found in existing connection");
}
@ -163,23 +184,42 @@ public class FlashArrayAdapter implements ProviderAdapter {
}
@Override
public void detach(ProviderAdapterContext context, ProviderAdapterDataObject dataObject) {
public void detach(ProviderAdapterContext context, ProviderAdapterDataObject dataObject, String hostname) {
String volumeName = normalizeName(pod, dataObject.getExternalName());
// hostname is always provided by cloudstack, but we will detach from hostgroup
// if this pool is configured to use hostgroup for attachments
if (hostgroup != null) {
DELETE("/connections?host_group_names=" + hostgroup + "&volume_names=" + volumeName);
}
FlashArrayHost host = getHost(hostname);
if (host != null) {
DELETE("/connections?host_names=" + host.getName() + "&volume_names=" + volumeName);
}
}
@Override
public void delete(ProviderAdapterContext context, ProviderAdapterDataObject dataObject) {
// public void deleteVolume(String volumeNamespace, String volumeName) {
// first make sure we are disconnected
removeVlunsAll(context, pod, dataObject.getExternalName());
String fullName = normalizeName(pod, dataObject.getExternalName());
FlashArrayVolume volume = new FlashArrayVolume();
volume.setDestroyed(true);
// rename as we delete so it doesn't conflict if the template or volume is ever recreated
// pure keeps the volume(s) around in a Destroyed bucket for a period of time post delete
String timestamp = new SimpleDateFormat("yyyyMMddHHmmss").format(new java.util.Date());
volume.setExternalName(fullName + "-" + timestamp);
try {
PATCH("/volumes?names=" + fullName, volume, new TypeReference<FlashArrayList<FlashArrayVolume>>() {
});
// now delete it with new name
volume.setDestroyed(true);
PATCH("/volumes?names=" + fullName + "-" + timestamp, volume, new TypeReference<FlashArrayList<FlashArrayVolume>>() {
});
} catch (CloudRuntimeException e) {
if (e.toString().contains("Volume does not exist")) {
return;
@ -206,8 +246,6 @@ public class FlashArrayAdapter implements ProviderAdapter {
return null;
}
populateConnectionId(volume);
return volume;
} catch (Exception e) {
// assume any exception is a not found. Flash returns 400's for most errors
@ -218,7 +256,7 @@ public class FlashArrayAdapter implements ProviderAdapter {
@Override
public ProviderVolume getVolumeByAddress(ProviderAdapterContext context, AddressType addressType, String address) {
// public FlashArrayVolume getVolumeByWwn(String wwn) {
if (address == null ||addressType == null) {
if (address == null || addressType == null) {
throw new RuntimeException("Invalid search criteria provided for getVolumeByAddress");
}
@ -243,13 +281,11 @@ public class FlashArrayAdapter implements ProviderAdapter {
return null;
}
volume = (FlashArrayVolume)this.getFlashArrayItem(list);
volume = (FlashArrayVolume) this.getFlashArrayItem(list);
if (volume != null && volume.getAddress() == null) {
return null;
}
populateConnectionId(volume);
return volume;
} catch (Exception e) {
// assume any exception is a not found. Flash returns 400's for most errors
@ -257,32 +293,6 @@ public class FlashArrayAdapter implements ProviderAdapter {
}
}
private void populateConnectionId(FlashArrayVolume volume) {
// we need to see if there is a connection (lun) associated with this volume.
// note we assume 1 lun for the hostgroup associated with this object
FlashArrayList<FlashArrayConnection> list = null;
try {
list = GET("/connections?volume_names=" + volume.getExternalName(),
new TypeReference<FlashArrayList<FlashArrayConnection>>() {
});
} catch (CloudRuntimeException e) {
// this means there is no attachment associated with this volume on the array
if (e.toString().contains("Bad Request")) {
return;
}
}
if (list != null && list.getItems() != null) {
for (FlashArrayConnection conn: list.getItems()) {
if (conn.getHostGroup() != null && conn.getHostGroup().getName().equals(this.hostgroup)) {
volume.setExternalConnectionId(""+conn.getLun());
break;
}
}
}
}
@Override
public void resize(ProviderAdapterContext context, ProviderAdapterDataObject dataObject, long newSizeInBytes) {
// public void resizeVolume(String volumeNamespace, String volumeName, long
@ -300,7 +310,8 @@ public class FlashArrayAdapter implements ProviderAdapter {
* @return
*/
@Override
public ProviderSnapshot snapshot(ProviderAdapterContext context, ProviderAdapterDataObject sourceDataObject, ProviderAdapterDataObject targetDataObject) {
public ProviderSnapshot snapshot(ProviderAdapterContext context, ProviderAdapterDataObject sourceDataObject,
ProviderAdapterDataObject targetDataObject) {
// public FlashArrayVolume snapshotVolume(String volumeNamespace, String
// volumeName, String snapshotName) {
FlashArrayList<FlashArrayVolume> list = POST(
@ -355,11 +366,12 @@ public class FlashArrayAdapter implements ProviderAdapter {
}
@Override
public ProviderVolume copy(ProviderAdapterContext context, ProviderAdapterDataObject sourceDataObject, ProviderAdapterDataObject destDataObject) {
public ProviderVolume copy(ProviderAdapterContext context, ProviderAdapterDataObject sourceDataObject,
ProviderAdapterDataObject destDataObject) {
// private ManagedVolume copy(ManagedVolume sourceVolume, String destNamespace,
// String destName) {
if (sourceDataObject == null || sourceDataObject.getExternalName() == null
||sourceDataObject.getType() == null) {
|| sourceDataObject.getType() == null) {
throw new RuntimeException("Provided volume has no external source information");
}
@ -425,12 +437,6 @@ public class FlashArrayAdapter implements ProviderAdapter {
@Override
public void validate() {
login();
// check if hostgroup and pod from details really exist - we will
// require a distinct configuration object/connection object for each type
if (this.getHostgroup(hostgroup) == null) {
throw new RuntimeException("Hostgroup [" + hostgroup + "] not found in FlashArray at [" + url
+ "], please validate configuration");
}
if (this.getVolumeNamespace(pod) == null) {
throw new RuntimeException(
@ -478,40 +484,36 @@ public class FlashArrayAdapter implements ProviderAdapter {
throw new RuntimeException("Unable to validate host access because a hostname was not provided");
}
List<String> members = getHostgroupMembers(hostgroup);
// check for fqdn and shortname combinations. this assumes there is at least a shortname match in both the storage array and cloudstack
// hostname configuration
String shortname;
if (hostname.indexOf('.') > 0) {
shortname = hostname.substring(0, (hostname.indexOf('.')));
} else {
shortname = hostname;
}
for (String member : members) {
// exact match (short or long names)
if (member.equals(hostname)) {
FlashArrayHost host = getHost(hostname);
if (host != null) {
return true;
}
// primera has short name and cloudstack had long name
if (member.equals(shortname)) {
return true;
}
// member has long name but cloudstack had shortname
if (member.indexOf('.') > 0) {
if (member.substring(0, (member.indexOf('.'))).equals(shortname)) {
return true;
}
}
}
return false;
}
private FlashArrayHost getHost(String hostname) {
FlashArrayList<FlashArrayHost> list = null;
try {
list = GET("/hosts?names=" + hostname,
new TypeReference<FlashArrayList<FlashArrayHost>>() {
});
} catch (Exception e) {
}
if (list == null) {
if (hostname.indexOf('.') > 0) {
list = GET("/hosts?names=" + hostname.substring(0, (hostname.indexOf('.'))),
new TypeReference<FlashArrayList<FlashArrayHost>>() {
});
}
}
return (FlashArrayHost) getFlashArrayItem(list);
}
private String getAccessToken() {
refreshSession(false);
return accessToken;
}
@ -528,13 +530,21 @@ public class FlashArrayAdapter implements ProviderAdapter {
}
} catch (Exception e) {
// retry frequently but not every request to avoid DDOS on storage API
logger.warn("Failed to refresh FlashArray API key for " + username + "@" + url + ", will retry in 5 seconds",
logger.warn(
"Failed to refresh FlashArray API key for " + username + "@" + url + ", will retry in 5 seconds",
e);
keyExpiration = System.currentTimeMillis() + (5 * 1000);
}
}
private void validateLoginInfo(String urlStr) {
/**
* Login to the array and get an access token
*/
private void login() {
username = connectionDetails.get(ProviderAdapter.API_USERNAME_KEY);
password = connectionDetails.get(ProviderAdapter.API_PASSWORD_KEY);
String urlStr = connectionDetails.get(ProviderAdapter.API_URL_KEY);
URL urlFull;
try {
urlFull = new URL(urlStr);
@ -572,15 +582,6 @@ public class FlashArrayAdapter implements ProviderAdapter {
}
}
hostgroup = connectionDetails.get(FlashArrayAdapter.HOSTGROUP);
if (hostgroup == null) {
hostgroup = queryParms.get(FlashArrayAdapter.HOSTGROUP);
if (hostgroup == null) {
throw new RuntimeException(
FlashArrayAdapter.STORAGE_POD + " paramater/option required to configure this storage pool");
}
}
apiLoginVersion = connectionDetails.get(FlashArrayAdapter.API_LOGIN_VERSION);
if (apiLoginVersion == null) {
apiLoginVersion = queryParms.get(FlashArrayAdapter.API_LOGIN_VERSION);
@ -597,6 +598,12 @@ public class FlashArrayAdapter implements ProviderAdapter {
}
}
// retrieve for legacy purposes. if set, we'll remove any connections to hostgroup we find and use the host
hostgroup = connectionDetails.get(FlashArrayAdapter.HOSTGROUP);
if (hostgroup == null) {
hostgroup = queryParms.get(FlashArrayAdapter.HOSTGROUP);
}
String connTimeoutStr = connectionDetails.get(FlashArrayAdapter.CONNECT_TIMEOUT_MS);
if (connTimeoutStr == null) {
connTimeoutStr = queryParms.get(FlashArrayAdapter.CONNECT_TIMEOUT_MS);
@ -652,16 +659,7 @@ public class FlashArrayAdapter implements ProviderAdapter {
} else {
skipTlsValidation = true;
}
}
/**
* Login to the array and get an access token
*/
private void login() {
username = connectionDetails.get(ProviderAdapter.API_USERNAME_KEY);
password = connectionDetails.get(ProviderAdapter.API_PASSWORD_KEY);
String urlStr = connectionDetails.get(ProviderAdapter.API_URL_KEY);
validateLoginInfo(urlStr);
CloseableHttpResponse response = null;
try {
HttpPost request = new HttpPost(url + "/" + apiLoginVersion + "/auth/apitoken");
@ -750,7 +748,13 @@ public class FlashArrayAdapter implements ProviderAdapter {
if (list != null && list.getItems() != null) {
for (FlashArrayConnection conn : list.getItems()) {
DELETE("/connections?host_group_names=" + conn.getHostGroup().getName() + "&volume_names=" + volumeName);
if (hostgroup != null && conn.getHostGroup() != null && conn.getHostGroup().getName() != null) {
DELETE("/connections?host_group_names=" + conn.getHostGroup().getName() + "&volume_names="
+ volumeName);
break;
} else if (conn.getHost() != null && conn.getHost().getName() != null) {
DELETE("/connections?host_names=" + conn.getHost().getName() + "&volume_names=" + volumeName);
}
}
}
}
@ -763,32 +767,12 @@ public class FlashArrayAdapter implements ProviderAdapter {
}
private FlashArrayPod getVolumeNamespace(String name) {
FlashArrayList<FlashArrayPod> list = GET("/pods?names=" + name, new TypeReference<FlashArrayList<FlashArrayPod>>() {
FlashArrayList<FlashArrayPod> list = GET("/pods?names=" + name,
new TypeReference<FlashArrayList<FlashArrayPod>>() {
});
return (FlashArrayPod) getFlashArrayItem(list);
}
private FlashArrayHostgroup getHostgroup(String name) {
FlashArrayList<FlashArrayHostgroup> list = GET("/host-groups?name=" + name,
new TypeReference<FlashArrayList<FlashArrayHostgroup>>() {
});
return (FlashArrayHostgroup) getFlashArrayItem(list);
}
private List<String> getHostgroupMembers(String groupname) {
FlashArrayGroupMemberReferenceList list = GET("/hosts/host-groups?group_names=" + groupname,
new TypeReference<FlashArrayGroupMemberReferenceList>() {
});
if (list == null || list.getItems().size() == 0) {
return null;
}
List<String> hostnames = new ArrayList<String>();
for (FlashArrayGroupMemberReference ref : list.getItems()) {
hostnames.add(ref.getMember().getName());
}
return hostnames;
}
private FlashArrayVolume getSnapshot(String snapshotName) {
FlashArrayList<FlashArrayVolume> list = GET("/volume-snapshots?names=" + snapshotName,
new TypeReference<FlashArrayList<FlashArrayVolume>>() {
@ -857,7 +841,8 @@ public class FlashArrayAdapter implements ProviderAdapter {
}
return null;
} catch (UnsupportedOperationException | IOException e) {
throw new CloudRuntimeException("Error processing response from FlashArray [" + url + path + "]", e);
throw new CloudRuntimeException("Error processing response from FlashArray [" + url + path + "]",
e);
}
} else if (statusCode == 400) {
try {
@ -1084,4 +1069,39 @@ public class FlashArrayAdapter implements ProviderAdapter {
}
return sizeInBytes;
}
@Override
public Map<String, String> getConnectionIdMap(ProviderAdapterDataObject dataIn) {
Map<String, String> map = new HashMap<String, String>();
// flasharray doesn't let you directly map a snapshot to a host, so we'll just return an empty map
if (dataIn.getType() == ProviderAdapterDataObject.Type.SNAPSHOT) {
return map;
}
try {
FlashArrayList<FlashArrayConnection> list = GET("/connections?volume_names=" + dataIn.getExternalName(),
new TypeReference<FlashArrayList<FlashArrayConnection>>() {
});
if (list != null && list.getItems() != null) {
for (FlashArrayConnection conn : list.getItems()) {
if (conn.getHost() != null) {
map.put(conn.getHost().getName(), "" + conn.getLun());
}
}
}
} catch (Exception e) {
// flasharray returns a 400 if the volume doesn't exist, so we'll just return an empty object.
if (logger.isTraceEnabled()) {
logger.trace("Error getting connection map for volume [" + dataIn.getExternalName() + "]: " + e.toString(), e);
}
}
return map;
}
@Override
public boolean canDirectAttachSnapshot() {
return false;
}
}

View File

@ -33,4 +33,9 @@ public class FlashArrayAdapterFactory implements ProviderAdapterFactory {
return new FlashArrayAdapter(url, details);
}
@Override
public Object canDirectAttachSnapshot() {
return false;
}
}

View File

@ -0,0 +1,46 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.apache.cloudstack.storage.datastore.adapter.flasharray;
import java.util.List;
import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
import com.fasterxml.jackson.annotation.JsonInclude;
import com.fasterxml.jackson.annotation.JsonProperty;
@JsonIgnoreProperties(ignoreUnknown = true)
@JsonInclude(JsonInclude.Include.NON_NULL)
public class FlashArrayHost {
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
public List<String> getWwns() {
return wwns;
}
public void setWwns(List<String> wwns) {
this.wwns = wwns;
}
@JsonProperty("name")
private String name;
@JsonProperty("wwns")
private List<String> wwns;
}

View File

@ -83,7 +83,7 @@ public class FlashArrayVolume implements ProviderSnapshot {
@JsonIgnore
public String getPodName() {
if (pod != null) {
return pod.getName();
return pod.name;
} else {
return null;
}
@ -129,7 +129,7 @@ public class FlashArrayVolume implements ProviderSnapshot {
}
public void setPodName(String podname) {
FlashArrayVolumePod pod = new FlashArrayVolumePod();
pod.setName(podname);
pod.name = podname;
this.pod = pod;
}
@Override

View File

@ -24,20 +24,7 @@ import com.fasterxml.jackson.annotation.JsonProperty;
@JsonInclude(JsonInclude.Include.NON_NULL)
public class FlashArrayVolumePod {
@JsonProperty("id")
private String id;
public String id;
@JsonProperty("name")
private String name;
public String getId() {
return id;
}
public void setId(String id) {
this.id = id;
}
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
public String name;
}

View File

@ -24,7 +24,6 @@ import java.security.KeyManagementException;
import java.security.KeyStoreException;
import java.security.NoSuchAlgorithmException;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import javax.net.ssl.HostnameVerifier;
@ -74,7 +73,7 @@ public class PrimeraAdapter implements ProviderAdapter {
public static final String TASK_WAIT_TIMEOUT_MS = "taskWaitTimeoutMs";
private static final long KEY_TTL_DEFAULT = (1000 * 60 * 14);
private static final long CONNECT_TIMEOUT_MS_DEFAULT = 600000;
private static final long CONNECT_TIMEOUT_MS_DEFAULT = 60 * 1000;
private static final long TASK_WAIT_TIMEOUT_MS_DEFAULT = 10 * 60 * 1000;
public static final long BYTES_IN_MiB = 1048576;
@ -107,18 +106,11 @@ public class PrimeraAdapter implements ProviderAdapter {
this.refreshSession(true);
}
/**
* Validate that the hostgroup and pod from the details data exists. Each
* configuration object/connection needs a distinct set of these 2 things.
*/
@Override
public void validate() {
login();
if (this.getHostset(hostset) == null) {
throw new RuntimeException("Hostgroup [" + hostset + "] not found in FlashArray at [" + url
+ "], please validate configuration");
}
// check if hostgroup and pod from details really exist - we will
// require a distinct configuration object/connection object for each type
if (this.getCpg(cpg) == null) {
throw new RuntimeException(
"Pod [" + cpg + "] not found in FlashArray at [" + url + "], please validate configuration");
@ -127,6 +119,15 @@ public class PrimeraAdapter implements ProviderAdapter {
@Override
public void disconnect() {
logger.info("PrimeraAdapter:disconnect(): closing session");
try {
_client.close();
} catch (IOException e) {
logger.warn("PrimeraAdapter:refreshSession(): Error closing client connection", e);
} finally {
_client = null;
keyExpiration = -1;
}
return;
}
@ -177,10 +178,15 @@ public class PrimeraAdapter implements ProviderAdapter {
}
@Override
public String attach(ProviderAdapterContext context, ProviderAdapterDataObject dataIn) {
public String attach(ProviderAdapterContext context, ProviderAdapterDataObject dataIn, String hostname) {
assert dataIn.getExternalName() != null : "External name not provided internally on volume attach";
PrimeraHostset.PrimeraHostsetVLUNRequest request = new PrimeraHostset.PrimeraHostsetVLUNRequest();
request.setHostname("set:" + hostset);
PrimeraHost host = getHost(hostname);
if (host == null) {
throw new RuntimeException("Unable to find host " + hostname + " on storage provider");
}
request.setHostname(host.getName());
request.setVolumeName(dataIn.getExternalName());
request.setAutoLun(true);
// auto-lun returned here: Location: /api/v1/vluns/test_vv02,252,mysystem,2:2:4
@ -195,12 +201,36 @@ public class PrimeraAdapter implements ProviderAdapter {
return toks[1];
}
@Override
/**
* This detaches ALL vlun's for the provided volume name IF they are associated to this hostset
* @param context
* @param request
*/
public void detach(ProviderAdapterContext context, ProviderAdapterDataObject request) {
detach(context, request, null);
}
@Override
public void detach(ProviderAdapterContext context, ProviderAdapterDataObject request, String hostname) {
// we expect to only be attaching one hostset to the vluns, so on detach we'll
// remove ALL vluns we find.
assert request.getExternalName() != null : "External name not provided internally on volume detach";
removeAllVluns(request.getExternalName());
PrimeraVlunList list = getVluns(request.getExternalName());
if (list != null && list.getMembers().size() > 0) {
list.getMembers().forEach(vlun -> {
// remove any hostset from old code if configured
if (hostset != null && vlun.getHostname() != null && vlun.getHostname().equals("set:" + hostset)) {
removeVlun(request.getExternalName(), vlun.getLun(), vlun.getHostname());
}
if (hostname != null) {
if (vlun.getHostname().equals(hostname) || vlun.getHostname().equals(hostname.split("\\.")[0])) {
removeVlun(request.getExternalName(), vlun.getLun(), vlun.getHostname());
}
}
});
}
}
public void removeVlun(String name, Integer lunid, String hostString) {
@ -209,20 +239,7 @@ public class PrimeraAdapter implements ProviderAdapter {
DELETE("/vluns/" + name + "," + lunid + "," + hostString);
}
/**
* Removes all vluns - this should only be done when you are sure the volume is no longer in use
* @param name
*/
public void removeAllVluns(String name) {
PrimeraVlunList list = getVolumeHostsets(name);
if (list != null && list.getMembers() != null) {
for (PrimeraVlun vlun: list.getMembers()) {
removeVlun(vlun.getVolumeName(), vlun.getLun(), vlun.getHostname());
}
}
}
public PrimeraVlunList getVolumeHostsets(String name) {
public PrimeraVlunList getVluns(String name) {
String query = "%22volumeName%20EQ%20" + name + "%22";
return GET("/vluns?query=" + query, new TypeReference<PrimeraVlunList>() {});
}
@ -232,7 +249,7 @@ public class PrimeraAdapter implements ProviderAdapter {
assert request.getExternalName() != null : "External name not provided internally on volume delete";
// first remove vluns (take volumes from vluns) from hostset
removeAllVluns(request.getExternalName());
detach(context, request);
DELETE("/volumes/" + request.getExternalName());
}
@ -421,6 +438,7 @@ public class PrimeraAdapter implements ProviderAdapter {
if (cpgobj == null || cpgobj.getTotalSpaceMiB() == 0) {
return null;
}
Long capacityBytes = 0L;
if (cpgobj.getsDGrowth() != null) {
capacityBytes = cpgobj.getsDGrowth().getLimitMiB() * PrimeraAdapter.BYTES_IN_MiB;
@ -454,73 +472,59 @@ public class PrimeraAdapter implements ProviderAdapter {
@Override
public boolean canAccessHost(ProviderAdapterContext context, String hostname) {
PrimeraHostset hostset = getHostset(this.hostset);
List<String> members = hostset.getSetmembers();
// check for fqdn and shortname combinations. this assumes there is at least a shortname match in both the storage array and cloudstack
// hostname configuration
String shortname;
if (hostname.indexOf('.') > 0) {
shortname = hostname.substring(0, (hostname.indexOf('.')));
} else {
shortname = hostname;
}
for (String member: members) {
// exact match (short or long names)
if (member.equals(hostname)) {
// check that the array has the host configured
PrimeraHost host = this.getHost(hostname);
if (host != null) {
// if hostset is configured we'll additionally check if the host is in it (legacy/original behavior)
return true;
}
// primera has short name and cloudstack had long name
if (member.equals(shortname)) {
return true;
}
// member has long name but cloudstack had shortname
int index = member.indexOf(".");
if (index > 0) {
if (member.substring(0, (member.indexOf('.'))).equals(shortname)) {
return true;
}
}
}
return false;
}
private PrimeraHost getHost(String name) {
PrimeraHost host = GET("/hosts/" + name, new TypeReference<PrimeraHost>() { });
if (host == null) {
if (name.indexOf('.') > 0) {
host = this.getHost(name.substring(0, (name.indexOf('.'))));
}
}
return host;
}
private PrimeraCpg getCpg(String name) {
return GET("/cpgs/" + name, new TypeReference<PrimeraCpg>() {
});
}
private PrimeraHostset getHostset(String name) {
return GET("/hostsets/" + name, new TypeReference<PrimeraHostset>() {
});
}
private String getSessionKey() {
refreshSession(false);
return key;
}
private synchronized void refreshSession(boolean force) {
private synchronized String refreshSession(boolean force) {
try {
if (force || keyExpiration < System.currentTimeMillis()) {
if (force || keyExpiration < (System.currentTimeMillis()-15000)) {
// close client to force connection reset on appliance -- not doing this can result in NotAuthorized error...guessing
_client.close();;
_client = null;
disconnect();
login();
keyExpiration = System.currentTimeMillis() + keyTtl;
logger.debug("PrimeraAdapter:refreshSession(): session created or refreshed with key=" + key + ", expiration=" + keyExpiration);
} else {
if (logger.isTraceEnabled()) {
logger.trace("PrimeraAdapter:refreshSession(): using existing session key=" + key + ", expiration=" + keyExpiration);
}
}
} catch (Exception e) {
// retry frequently but not every request to avoid DDOS on storage API
logger.warn("Failed to refresh Primera API key for " + username + "@" + url + ", will retry in 5 seconds", e);
keyExpiration = System.currentTimeMillis() + (5*1000);
}
return key;
}
/**
* Login to the array and get an access token
*/
private void login() {
username = connectionDetails.get(ProviderAdapter.API_USERNAME_KEY);
password = connectionDetails.get(ProviderAdapter.API_PASSWORD_KEY);
String urlStr = connectionDetails.get(ProviderAdapter.API_URL_KEY);
private void validateLoginInfo(String urlStr) {
URL urlFull;
try {
urlFull = new URL(urlStr);
@ -554,7 +558,7 @@ public class PrimeraAdapter implements ProviderAdapter {
cpg = queryParms.get(PrimeraAdapter.CPG);
if (cpg == null) {
throw new RuntimeException(
PrimeraAdapter.CPG + " paramater/option required to configure this storage pool");
PrimeraAdapter.CPG + " parameter/option required to configure this storage pool");
}
}
@ -567,13 +571,10 @@ public class PrimeraAdapter implements ProviderAdapter {
}
}
// if this is null, we will use direct-to-host vlunids (preferred)
hostset = connectionDetails.get(PrimeraAdapter.HOSTSET);
if (hostset == null) {
hostset = queryParms.get(PrimeraAdapter.HOSTSET);
if (hostset == null) {
throw new RuntimeException(
PrimeraAdapter.HOSTSET + " paramater/option required to configure this storage pool");
}
}
String connTimeoutStr = connectionDetails.get(PrimeraAdapter.CONNECT_TIMEOUT_MS);
@ -630,16 +631,7 @@ public class PrimeraAdapter implements ProviderAdapter {
} else {
skipTlsValidation = true;
}
}
/**
* Login to the array and get an access token
*/
private void login() {
username = connectionDetails.get(ProviderAdapter.API_USERNAME_KEY);
password = connectionDetails.get(ProviderAdapter.API_PASSWORD_KEY);
String urlStr = connectionDetails.get(ProviderAdapter.API_URL_KEY);
validateLoginInfo(urlStr);
CloseableHttpResponse response = null;
try {
HttpPost request = new HttpPost(url + "/credentials");
@ -653,6 +645,9 @@ public class PrimeraAdapter implements ProviderAdapter {
if (statusCode == 200 | statusCode == 201) {
PrimeraKey keyobj = mapper.readValue(response.getEntity().getContent(), PrimeraKey.class);
key = keyobj.getKey();
// Set the key expiration to x minutes from now
this.keyExpiration = System.currentTimeMillis() + keyTtl;
logger.info("PrimeraAdapter:login(): successful, new session: New key=" + key + ", expiration=" + this.keyExpiration);
} else if (statusCode == 401 || statusCode == 403) {
throw new RuntimeException("Authentication or Authorization to Primera [" + url + "] with user [" + username
+ "] failed, unable to retrieve session token");
@ -713,15 +708,15 @@ public class PrimeraAdapter implements ProviderAdapter {
private <T> T POST(String path, Object input, final TypeReference<T> type) {
CloseableHttpResponse response = null;
try {
this.refreshSession(false);
String session_key = this.refreshSession(false);
HttpPost request = new HttpPost(url + path);
request.addHeader("Content-Type", "application/json");
request.addHeader("Accept", "application/json");
request.addHeader("X-HP3PAR-WSAPI-SessionKey", getSessionKey());
request.addHeader("X-HP3PAR-WSAPI-SessionKey", session_key);
try {
String data = mapper.writeValueAsString(input);
request.setEntity(new StringEntity(data));
logger.debug("POST data: " + request.getEntity());
if (logger.isTraceEnabled()) logger.trace("POST data: " + request.getEntity());
} catch (UnsupportedEncodingException | JsonProcessingException e) {
throw new RuntimeException(
"Error processing request payload to [" + url + "] for path [" + path + "]", e);
@ -798,10 +793,11 @@ public class PrimeraAdapter implements ProviderAdapter {
CloseableHttpResponse response = null;
try {
this.refreshSession(false);
String session_key = this.refreshSession(false);
HttpPut request = new HttpPut(url + path);
request.addHeader("Content-Type", "application/json");
request.addHeader("Accept", "application/json");
request.addHeader("X-HP3PAR-WSAPI-SessionKey", getSessionKey());
request.addHeader("X-HP3PAR-WSAPI-SessionKey", session_key);
String data = mapper.writeValueAsString(input);
request.setEntity(new StringEntity(data));
@ -851,10 +847,11 @@ public class PrimeraAdapter implements ProviderAdapter {
CloseableHttpResponse response = null;
try {
this.refreshSession(false);
String session_key = this.refreshSession(false);
HttpGet request = new HttpGet(url + path);
request.addHeader("Content-Type", "application/json");
request.addHeader("Accept", "application/json");
request.addHeader("X-HP3PAR-WSAPI-SessionKey", getSessionKey());
request.addHeader("X-HP3PAR-WSAPI-SessionKey", session_key);
CloseableHttpClient client = getClient();
response = (CloseableHttpResponse) client.execute(request);
@ -893,10 +890,11 @@ public class PrimeraAdapter implements ProviderAdapter {
CloseableHttpResponse response = null;
try {
this.refreshSession(false);
String session_key = this.refreshSession(false);
HttpDelete request = new HttpDelete(url + path);
request.addHeader("Content-Type", "application/json");
request.addHeader("Accept", "application/json");
request.addHeader("X-HP3PAR-WSAPI-SessionKey", getSessionKey());
request.addHeader("X-HP3PAR-WSAPI-SessionKey", session_key);
CloseableHttpClient client = getClient();
response = (CloseableHttpResponse) client.execute(request);
@ -927,5 +925,22 @@ public class PrimeraAdapter implements ProviderAdapter {
}
}
@Override
public Map<String, String> getConnectionIdMap(ProviderAdapterDataObject dataIn) {
Map<String,String> connIdMap = new HashMap<String,String>();
PrimeraVlunList list = this.getVluns(dataIn.getExternalName());
if (list != null && list.getMembers() != null && list.getMembers().size() > 0) {
for (PrimeraVlun vlun: list.getMembers()) {
connIdMap.put(vlun.getHostname(), ""+vlun.getLun());
}
}
return connIdMap;
}
@Override
public boolean canDirectAttachSnapshot() {
return true;
}
}

View File

@ -33,4 +33,9 @@ public class PrimeraAdapterFactory implements ProviderAdapterFactory {
return new PrimeraAdapter(url, details);
}
@Override
public Object canDirectAttachSnapshot() {
return true;
}
}

View File

@ -0,0 +1,56 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.apache.cloudstack.storage.datastore.adapter.primera;
import java.util.List;
import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
import com.fasterxml.jackson.annotation.JsonInclude;
@JsonIgnoreProperties(ignoreUnknown = true)
@JsonInclude(JsonInclude.Include.NON_NULL)
public class PrimeraHost {
private Integer id;
private String name;
private List<PrimeraPort> fcPaths;
private PrimeraHostDescriptor descriptors;
public Integer getId() {
return id;
}
public void setId(Integer id) {
this.id = id;
}
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
public List<PrimeraPort> getFcPaths() {
return fcPaths;
}
public void setFcPaths(List<PrimeraPort> fcPaths) {
this.fcPaths = fcPaths;
}
public PrimeraHostDescriptor getDescriptors() {
return descriptors;
}
public void setDescriptors(PrimeraHostDescriptor descriptors) {
this.descriptors = descriptors;
}
}

View File

@ -0,0 +1,40 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.apache.cloudstack.storage.datastore.adapter.primera;
import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
import com.fasterxml.jackson.annotation.JsonInclude;
@JsonIgnoreProperties(ignoreUnknown = true)
@JsonInclude(JsonInclude.Include.NON_NULL)
public class PrimeraHostDescriptor {
private String IPAddr = null;
private String os = null;
public String getIPAddr() {
return IPAddr;
}
public void setIPAddr(String iPAddr) {
IPAddr = iPAddr;
}
public String getOs() {
return os;
}
public void setOs(String os) {
this.os = os;
}
}

View File

@ -34,105 +34,115 @@ public class PrimeraHostset {
private String uuid;
private Map<String, Object> additionalProperties = new LinkedHashMap<String, Object>();
public String getComment() {
return comment;
}
public void setComment(String comment) {
this.comment = comment;
}
public Integer getId() {
return id;
}
public void setId(Integer id) {
this.id = id;
}
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
public List<String> getSetmembers() {
return setmembers;
}
public void setSetmembers(List<String> setmembers) {
this.setmembers = setmembers;
}
public String getUuid() {
return uuid;
}
public void setUuid(String uuid) {
this.uuid = uuid;
}
public Map<String, Object> getAdditionalProperties() {
return additionalProperties;
}
public void setAdditionalProperties(Map<String, Object> additionalProperties) {
this.additionalProperties = additionalProperties;
}
// adds members to a hostset
public static class PrimeraHostsetVLUNRequest {
private String volumeName;
private Boolean autoLun = true;
private Integer lun = 0;
private Integer maxAutoLun = 0;
/**
* This can be a single hostname OR the set of hosts in the format
* "set:<hostset>".
* For the purposes of this driver, its expected that the predominate usecase is
* to use
* a hostset that is aligned with a CloudStack Cluster.
*/
// hostset format: "set:<hostset>"
private String hostname;
public String getVolumeName() {
return volumeName;
}
public void setVolumeName(String volumeName) {
this.volumeName = volumeName;
}
public Boolean getAutoLun() {
return autoLun;
}
public void setAutoLun(Boolean autoLun) {
this.autoLun = autoLun;
}
public Integer getLun() {
return lun;
}
public void setLun(Integer lun) {
this.lun = lun;
}
public Integer getMaxAutoLun() {
return maxAutoLun;
}
public void setMaxAutoLun(Integer maxAutoLun) {
this.maxAutoLun = maxAutoLun;
}
public String getHostname() {
return hostname;
}
public void setHostname(String hostname) {
this.hostname = hostname;
}

View File

@ -0,0 +1,40 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.apache.cloudstack.storage.datastore.adapter.primera;
import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
import com.fasterxml.jackson.annotation.JsonInclude;
@JsonIgnoreProperties(ignoreUnknown = true)
@JsonInclude(JsonInclude.Include.NON_NULL)
public class PrimeraPort {
private String wwn;
private PrimeraPortPos portPos;
public String getWwn() {
return wwn;
}
public void setWwn(String wwn) {
this.wwn = wwn;
}
public PrimeraPortPos getPortPos() {
return portPos;
}
public void setPortPos(PrimeraPortPos portPos) {
this.portPos = portPos;
}
}

View File

@ -0,0 +1,47 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.apache.cloudstack.storage.datastore.adapter.primera;
import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
import com.fasterxml.jackson.annotation.JsonInclude;
@JsonIgnoreProperties(ignoreUnknown = true)
@JsonInclude(JsonInclude.Include.NON_NULL)
public class PrimeraPortPos {
private Integer cardPort;
private Integer node;
private Integer slot;
public Integer getCardPort() {
return cardPort;
}
public void setCardPort(Integer cardPort) {
this.cardPort = cardPort;
}
public Integer getNode() {
return node;
}
public void setNode(Integer node) {
this.node = node;
}
public Integer getSlot() {
return slot;
}
public void setSlot(Integer slot) {
this.slot = slot;
}
}

View File

@ -35,7 +35,7 @@ public class PrimeraVolumeCopyRequestParameters {
private String snapCPG = null;
private Boolean skipZero = null;
private Boolean saveSnapshot = null;
/** 1=HIGH, 2=MED, 3=LOW */
// 1=HIGH, 2=MED, 3=LOW
private Integer priority = null;
public String getDestVolume() {
return destVolume;

View File

@ -22,10 +22,7 @@ import com.fasterxml.jackson.annotation.JsonInclude;
@JsonIgnoreProperties(ignoreUnknown = true)
@JsonInclude(JsonInclude.Include.NON_NULL)
public class PrimeraVolumePromoteRequest {
/**
* Defines action for the request as described at https://support.hpe.com/hpesc/public/docDisplay?docId=a00114827en_us&page=v25706371.html
*/
private Integer action = 4;
private Integer action = 4; // PROMOTE_VIRTUAL_COPY, https://support.hpe.com/hpesc/public/docDisplay?docId=a00114827en_us&page=v25706371.html
private Boolean online = true;
private Integer priority = 2; // MEDIUM
private Boolean allowRemoteCopyParent = true;

View File

@ -22,12 +22,22 @@ package com.cloud.hypervisor.kvm.resource.wrapper;
import static com.cloud.hypervisor.kvm.storage.StorPoolStorageAdaptor.SP_LOG;
import java.io.File;
import java.io.IOException;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import org.apache.cloudstack.storage.command.CopyCmdAnswer;
import org.apache.cloudstack.storage.to.SnapshotObjectTO;
import org.apache.cloudstack.utils.cryptsetup.KeyFile;
import org.apache.cloudstack.utils.qemu.QemuImageOptions;
import org.apache.cloudstack.utils.qemu.QemuImg;
import org.apache.cloudstack.utils.qemu.QemuImg.PhysicalDiskFormat;
import org.apache.cloudstack.utils.qemu.QemuImgException;
import org.apache.cloudstack.utils.qemu.QemuImgFile;
import org.apache.cloudstack.utils.qemu.QemuObject;
import org.apache.cloudstack.utils.qemu.QemuObject.EncryptFormat;
import org.apache.commons.io.FileUtils;
import com.cloud.agent.api.storage.StorPoolBackupSnapshotCommand;
@ -57,28 +67,24 @@ public final class StorPoolBackupSnapshotCommandWrapper extends CommandWrapper<S
SP_LOG("StorpoolBackupSnapshotCommandWrapper.execute: src=" + src.getPath() + "dst=" + dst.getPath());
StorPoolStorageAdaptor.attachOrDetachVolume("attach", "snapshot", src.getPath());
srcPath = src.getPath();
final QemuImgFile srcFile = new QemuImgFile(srcPath, PhysicalDiskFormat.RAW);
long size = 0;
String srcKeyName = "sec0";
String destKeyName = "sec1";
List<QemuObject> qemuObjects = new ArrayList<>();
Map<String, String> options = new HashMap<>();
QemuImageOptions qemuImageOpts = new QemuImageOptions(srcPath);
final QemuImg qemu = new QemuImg(cmd.getWaitInMillSeconds());
final DataStoreTO dstDataStore = dst.getDataStore();
if (!(dstDataStore instanceof NfsTO)) {
return new CopyCmdAnswer("Backup Storpool snapshot: Only NFS secondary supported at present!");
}
secondaryPool = storagePoolMgr.getStoragePoolByURI(dstDataStore.getUrl());
try (KeyFile srcKey = new KeyFile(src.getVolume().getPassphrase())) {
final String dstDir = secondaryPool.getLocalPath() + File.separator + dst.getPath();
FileUtils.forceMkdir(new File(dstDir));
final String dstPath = dstDir + File.separator + dst.getName();
final QemuImgFile dstFile = new QemuImgFile(dstPath, PhysicalDiskFormat.QCOW2);
final QemuImg qemu = new QemuImg(cmd.getWaitInMillSeconds());
qemu.convert(srcFile, dstFile);
SP_LOG("StorpoolBackupSnapshotCommandWrapper srcFileFormat=%s, dstFileFormat=%s", srcFile.getFormat(), dstFile.getFormat());
final File snapFile = new File(dstPath);
final long size = snapFile.exists() ? snapFile.length() : 0;
size = convertSnapshot(srcPath, secondaryPool, dst, srcKeyName, qemuObjects, options, qemuImageOpts,
qemu, srcKey);
}
final SnapshotObjectTO snapshot = new SnapshotObjectTO();
snapshot.setPath(dst.getPath() + File.separator + dst.getName());
@ -104,4 +110,31 @@ public final class StorPoolBackupSnapshotCommandWrapper extends CommandWrapper<S
}
}
}
private long convertSnapshot(String srcPath, KVMStoragePool secondaryPool, final SnapshotObjectTO dst,
String srcKeyName, List<QemuObject> qemuObjects, Map<String, String> options,
QemuImageOptions qemuImageOpts, final QemuImg qemu, KeyFile srcKey) throws IOException, QemuImgException {
long size;
final QemuImgFile srcFile = new QemuImgFile(srcPath, PhysicalDiskFormat.RAW);
final String dstDir = secondaryPool.getLocalPath() + File.separator + dst.getPath();
FileUtils.forceMkdir(new File(dstDir));
final String dstPath = dstDir + File.separator + dst.getName();
final QemuImgFile dstFile = new QemuImgFile(dstPath, PhysicalDiskFormat.QCOW2);
if (srcKey.isSet()) {
qemuObjects.add(QemuObject.prepareSecretForQemuImg(PhysicalDiskFormat.RAW, EncryptFormat.LUKS,
srcKey.toString(), srcKeyName, options));
qemuImageOpts = new QemuImageOptions(PhysicalDiskFormat.RAW, srcPath, srcKeyName);
dstFile.setFormat(PhysicalDiskFormat.LUKS);
}
qemuImageOpts.setImageOptsFlag(true);
qemu.convert(srcFile, dstFile, options, qemuObjects, qemuImageOpts, null, true);
SP_LOG("StorpoolBackupSnapshotCommandWrapper srcFileFormat=%s, dstFileFormat=%s", srcFile.getFormat(), dstFile.getFormat());
final File snapFile = new File(dstPath);
size = snapFile.exists() ? snapFile.length() : 0;
return size;
}
}

View File

@ -66,6 +66,11 @@ public class OAuth2UserAuthenticator extends AdapterBase implements UserAuthenti
final String[] provider = (String[])requestParameters.get(ApiConstants.PROVIDER);
final String[] emailArray = (String[])requestParameters.get(ApiConstants.EMAIL);
final String[] secretCodeArray = (String[])requestParameters.get(ApiConstants.SECRET_CODE);
if (provider == null) {
return new Pair<Boolean, ActionOnFailedAuthentication>(false, null);
}
String oauthProvider = ((provider == null) ? null : provider[0]);
String email = ((emailArray == null) ? null : emailArray[0]);
String secretCode = ((secretCodeArray == null) ? null : secretCodeArray[0]);

View File

@ -22,7 +22,7 @@ OUTPUT_FILE=${3:?"Output file/path is required"}
echo "$(date): qemu-img convert -n -p -W -t none -O ${OUTPUT_FORMAT} ${INPUT_FILE} ${OUTPUT_FILE}"
qemu-img convert -n -p -W -t none -O ${OUTPUT_FORMAT} ${INPUT_FILE} ${OUTPUT_FILE} && {
qemu-img convert -n -p -W -t writeback -O ${OUTPUT_FORMAT} ${INPUT_FILE} ${OUTPUT_FILE} && {
# if its a block device make sure we flush caches before exiting
lsblk ${OUTPUT_FILE} >/dev/null 2>&1 && {
blockdev --flushbufs ${OUTPUT_FILE}

View File

@ -294,10 +294,11 @@ public class VolumeJoinDaoImpl extends GenericDaoBaseWithTagInformation<VolumeJo
ServiceOffering serviceOffering = null;
if (computeOnlyDiskOffering != null) {
serviceOffering = ApiDBUtils.findServiceOfferingByComputeOnlyDiskOffering(volume.getDiskOfferingId(), false);
}
if (serviceOffering == null) {
// Check again for removed ones
serviceOffering = ApiDBUtils.findServiceOfferingByComputeOnlyDiskOffering(volume.getDiskOfferingId(), true);
}
}
return serviceOffering;
}

View File

@ -139,6 +139,7 @@ import org.apache.cloudstack.storage.datastore.db.StoragePoolVO;
import org.apache.cloudstack.userdata.UserDataManager;
import org.apache.cloudstack.utils.jsinterpreter.TagAsRuleHelper;
import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils;
import org.apache.cloudstack.vm.UnmanagedVMsManager;
import org.apache.commons.collections.CollectionUtils;
import org.apache.commons.collections.MapUtils;
import org.apache.commons.lang3.EnumUtils;
@ -564,6 +565,7 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati
configValuesForValidation.add(StorageManager.STORAGE_POOL_CLIENT_TIMEOUT.key());
configValuesForValidation.add(StorageManager.STORAGE_POOL_CLIENT_MAX_CONNECTIONS.key());
configValuesForValidation.add(UserDataManager.VM_USERDATA_MAX_LENGTH_STRING);
configValuesForValidation.add(UnmanagedVMsManager.RemoteKvmInstanceDisksCopyTimeout.key());
}
private void weightBasedParametersForValidation() {

View File

@ -116,7 +116,7 @@ public class AgentBasedConsoleProxyManager extends ManagerBase implements Consol
_consoleProxyPort = NumbersUtil.parseInt(value, ConsoleProxyManager.DEFAULT_PROXY_VNC_PORT);
}
value = configs.get("consoleproxy.sslEnabled");
value = configs.get(ConsoleProxySslEnabled.key());
if (value != null && value.equalsIgnoreCase("true")) {
_sslEnabled = true;
}

View File

@ -213,7 +213,7 @@ public abstract class AgentHookBase implements AgentHook {
byte[] ksBits = null;
String consoleProxyUrlDomain = _configDao.getValue(Config.ConsoleProxyUrlDomain.key());
String consoleProxySslEnabled = _configDao.getValue("consoleproxy.sslEnabled");
String consoleProxySslEnabled = _configDao.getValue(ConsoleProxyManager.ConsoleProxySslEnabled.key());
if (!StringUtils.isEmpty(consoleProxyUrlDomain) && !StringUtils.isEmpty(consoleProxySslEnabled)
&& consoleProxySslEnabled.equalsIgnoreCase("true")) {
ksBits = _ksMgr.getKeystoreBits(ConsoleProxyManager.CERTIFICATE_NAME, ConsoleProxyManager.CERTIFICATE_NAME, storePassword);

View File

@ -36,6 +36,9 @@ public interface ConsoleProxyManager extends Manager, ConsoleProxyService {
String ALERT_SUBJECT = "proxy-alert";
String CERTIFICATE_NAME = "CPVMCertificate";
ConfigKey<Boolean> ConsoleProxySslEnabled = new ConfigKey<>(ConfigKey.CATEGORY_ADVANCED, Boolean.class, "consoleproxy.sslEnabled", "false",
"Enable SSL for console proxy", false);
ConfigKey<Boolean> NoVncConsoleDefault = new ConfigKey<>(ConfigKey.CATEGORY_ADVANCED, Boolean.class, "novnc.console.default", "true",
"If true, noVNC console will be default console for virtual machines", true);

View File

@ -1118,7 +1118,7 @@ public class ConsoleProxyManagerImpl extends ManagerBase implements ConsoleProxy
Map<String, String> configs = configurationDao.getConfiguration("management-server", params);
String value = configs.get("consoleproxy.sslEnabled");
String value = configs.get(ConsoleProxySslEnabled.key());
if (value != null && value.equalsIgnoreCase("true")) {
sslEnabled = true;
}
@ -1607,7 +1607,7 @@ public class ConsoleProxyManagerImpl extends ManagerBase implements ConsoleProxy
@Override
public ConfigKey<?>[] getConfigKeys() {
return new ConfigKey<?>[] { NoVncConsoleDefault, NoVncConsoleSourceIpCheckEnabled };
return new ConfigKey<?>[] { ConsoleProxySslEnabled, NoVncConsoleDefault, NoVncConsoleSourceIpCheckEnabled };
}
protected ConsoleProxyStatus parseJsonToConsoleProxyStatus(String json) throws JsonParseException {

View File

@ -72,7 +72,7 @@ public class StaticConsoleProxyManager extends AgentBasedConsoleProxyManager imp
_ip = "127.0.0.1";
}
String value = (String)params.get("consoleproxy.sslEnabled");
String value = (String)params.get(ConsoleProxySslEnabled.key());
if (value != null && value.equalsIgnoreCase("true")) {
_sslEnabled = true;
}

View File

@ -62,6 +62,7 @@ import java.net.URI;
import java.util.Arrays;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.UUID;
@ -70,6 +71,10 @@ import static com.cloud.configuration.ConfigurationManagerImpl.ADD_HOST_ON_SERVI
public abstract class LibvirtServerDiscoverer extends DiscovererBase implements Discoverer, Listener, ResourceStateAdapter {
private final int _waitTime = 5; /* wait for 5 minutes */
private final static HashSet<String> COMPATIBLE_HOST_OSES = new HashSet<>(Arrays.asList("Rocky", "Rocky Linux",
"Red", "Red Hat Enterprise Linux", "Oracle", "Oracle Linux Server", "AlmaLinux"));
private String _kvmPrivateNic;
private String _kvmPublicNic;
private String _kvmGuestNic;
@ -468,7 +473,7 @@ public abstract class LibvirtServerDiscoverer extends DiscovererBase implements
_hostDao.loadDetails(oneHost);
String hostOsInCluster = oneHost.getDetail("Host.OS");
String hostOs = ssCmd.getHostDetails().get("Host.OS");
if (!hostOsInCluster.equalsIgnoreCase(hostOs)) {
if (!isHostOsCompatibleWithOtherHost(hostOsInCluster, hostOs)) {
String msg = String.format("host: %s with hostOS, \"%s\"into a cluster, in which there are \"%s\" hosts added", firstCmd.getPrivateIpAddress(), hostOs, hostOsInCluster);
if (hostOs != null && hostOs.startsWith(hostOsInCluster)) {
logger.warn(String.format("Adding %s. This may or may not be ok!", msg));
@ -483,6 +488,17 @@ public abstract class LibvirtServerDiscoverer extends DiscovererBase implements
return _resourceMgr.fillRoutingHostVO(host, ssCmd, getHypervisorType(), host.getDetails(), null);
}
protected boolean isHostOsCompatibleWithOtherHost(String hostOsInCluster, String hostOs) {
if (hostOsInCluster.equalsIgnoreCase(hostOs)) {
return true;
}
if (COMPATIBLE_HOST_OSES.contains(hostOsInCluster) && COMPATIBLE_HOST_OSES.contains(hostOs)) {
logger.info(String.format("The host OS (%s) is compatible with the existing host OS (%s) in the cluster.", hostOs, hostOsInCluster));
return true;
}
return false;
}
@Override
public HostVO createHostVOForDirectConnectAgent(HostVO host, StartupCommand[] startup, ServerResource resource, Map<String, String> details, List<String> hostTags) {
// TODO Auto-generated method stub

View File

@ -129,7 +129,7 @@ public class PublicNetworkGuru extends AdapterBase implements NetworkGuru {
if (vm.getType().equals(VirtualMachine.Type.ConsoleProxy) || vm.getType().equals(VirtualMachine.Type.SecondaryStorageVm)) {
forSystemVms = true;
}
PublicIp ip = _ipAddrMgr.assignPublicIpAddress(dc.getId(), null, vm.getOwner(), VlanType.VirtualNetwork, null, null, false, forSystemVms);
PublicIp ip = _ipAddrMgr.assignPublicIpAddress(dc.getId(), null, vm.getOwner(), VlanType.VirtualNetwork, null, null, forSystemVms, forSystemVms);
nic.setIPv4Address(ip.getAddress().toString());
nic.setIPv4Gateway(ip.getGateway());
nic.setIPv4Netmask(ip.getNetmask());

View File

@ -5151,22 +5151,51 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe
return new Pair<>(result.first(), result.second());
}
protected HypervisorCapabilitiesVO getHypervisorCapabilitiesForUpdate(final Long id, final String hypervisorStr, final String hypervisorVersion) {
if (id == null && StringUtils.isAllEmpty(hypervisorStr, hypervisorVersion)) {
throw new InvalidParameterValueException("Either ID or hypervisor and hypervisor version must be specified");
}
if (id != null) {
if (!StringUtils.isAllBlank(hypervisorStr, hypervisorVersion)) {
throw new InvalidParameterValueException("ID can not be specified together with hypervisor and hypervisor version");
}
HypervisorCapabilitiesVO hpvCapabilities = _hypervisorCapabilitiesDao.findById(id, true);
if (hpvCapabilities == null) {
final InvalidParameterValueException ex = new InvalidParameterValueException("unable to find the hypervisor capabilities for specified id");
ex.addProxyObject(id.toString(), "Id");
throw ex;
}
return hpvCapabilities;
}
if (StringUtils.isAnyBlank(hypervisorStr, hypervisorVersion)) {
throw new InvalidParameterValueException("Hypervisor and hypervisor version must be specified together");
}
HypervisorType hypervisorType = HypervisorType.getType(hypervisorStr);
if (hypervisorType == HypervisorType.None) {
throw new InvalidParameterValueException("Invalid hypervisor specified");
}
HypervisorCapabilitiesVO hpvCapabilities = _hypervisorCapabilitiesDao.findByHypervisorTypeAndVersion(hypervisorType, hypervisorVersion);
if (hpvCapabilities == null) {
final InvalidParameterValueException ex = new InvalidParameterValueException("Unable to find the hypervisor capabilities for specified hypervisor and hypervisor version");
ex.addProxyObject(hypervisorStr, "hypervisor");
ex.addProxyObject(hypervisorVersion, "hypervisorVersion");
throw ex;
}
return hpvCapabilities;
}
@Override
public HypervisorCapabilities updateHypervisorCapabilities(UpdateHypervisorCapabilitiesCmd cmd) {
final Long id = cmd.getId();
Long id = cmd.getId();
final String hypervisorStr = cmd.getHypervisor();
final String hypervisorVersion = cmd.getHypervisorVersion();
final Boolean securityGroupEnabled = cmd.getSecurityGroupEnabled();
final Long maxGuestsLimit = cmd.getMaxGuestsLimit();
final Integer maxDataVolumesLimit = cmd.getMaxDataVolumesLimit();
final Boolean storageMotionSupported = cmd.getStorageMotionSupported();
final Integer maxHostsPerClusterLimit = cmd.getMaxHostsPerClusterLimit();
final Boolean vmSnapshotEnabled = cmd.getVmSnapshotEnabled();
HypervisorCapabilitiesVO hpvCapabilities = _hypervisorCapabilitiesDao.findById(id, true);
if (hpvCapabilities == null) {
final InvalidParameterValueException ex = new InvalidParameterValueException("unable to find the hypervisor capabilities for specified id");
ex.addProxyObject(id.toString(), "Id");
throw ex;
}
HypervisorCapabilitiesVO hpvCapabilities = getHypervisorCapabilitiesForUpdate(id, hypervisorStr, hypervisorVersion);
final boolean updateNeeded = securityGroupEnabled != null || maxGuestsLimit != null ||
maxDataVolumesLimit != null || storageMotionSupported != null || maxHostsPerClusterLimit != null ||
@ -5174,7 +5203,14 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe
if (!updateNeeded) {
return hpvCapabilities;
}
if (StringUtils.isNotBlank(hypervisorVersion) && !hpvCapabilities.getHypervisorVersion().equals(hypervisorVersion)) {
logger.debug(String.format("Hypervisor capabilities for hypervisor: %s and version: %s does not exist, creating a copy from the parent version: %s for update.", hypervisorStr, hypervisorVersion, hpvCapabilities.getHypervisorVersion()));
HypervisorCapabilitiesVO copy = new HypervisorCapabilitiesVO(hpvCapabilities);
copy.setHypervisorVersion(hypervisorVersion);
hpvCapabilities = _hypervisorCapabilitiesDao.persist(copy);
}
id = hpvCapabilities.getId();
hpvCapabilities = _hypervisorCapabilitiesDao.createForUpdate(id);
if (securityGroupEnabled != null) {

View File

@ -113,9 +113,6 @@ import com.cloud.org.Cluster;
import com.cloud.resource.ResourceManager;
import com.cloud.resource.ResourceState;
import com.cloud.serializer.GsonHelper;
import com.cloud.server.StatsCollector.AbstractStatsCollector;
import com.cloud.server.StatsCollector.AutoScaleMonitor;
import com.cloud.server.StatsCollector.StorageCollector;
import com.cloud.storage.ImageStoreDetailsUtil;
import com.cloud.storage.ScopeType;
import com.cloud.storage.Storage;
@ -285,6 +282,11 @@ public class StatsCollector extends ManagerBase implements ComponentMethodInterc
protected static ConfigKey<Boolean> vmStatsCollectUserVMOnly = new ConfigKey<>("Advanced", Boolean.class, "vm.stats.user.vm.only", "false",
"When set to 'false' stats for system VMs will be collected otherwise stats collection will be done only for user VMs", true);
protected static ConfigKey<Long> vmStatsRemoveBatchSize = new ConfigKey<>("Advanced", Long.class, "vm.stats.remove.batch.size", "0", "Indicates the" +
" limit applied to delete vm_stats entries while running the clean-up task. With this, ACS will run the delete query, applying the limit, as many times as necessary" +
" to delete all entries older than the value defined in vm.stats.max.retention.time. This is advised when retaining several days of records, which can lead to slowness" +
" on the delete query. Zero (0) means that no limit will be applied, therefore, the query will run once and without limit, keeping the default behavior.", true);
protected static ConfigKey<Boolean> vmDiskStatsRetentionEnabled = new ConfigKey<>("Advanced", Boolean.class, "vm.disk.stats.retention.enabled", "false",
"When set to 'true' stats for VM disks will be stored in the database otherwise disk stats will not be stored", true);
@ -1963,7 +1965,7 @@ public class StatsCollector extends ManagerBase implements ComponentMethodInterc
logger.trace("Removing older VM stats records.");
Date now = new Date();
Date limit = DateUtils.addMinutes(now, -maxRetentionTime);
vmStatsDao.removeAllByTimestampLessThan(limit);
vmStatsDao.removeAllByTimestampLessThan(limit, vmStatsRemoveBatchSize.value());
}
/**
@ -2137,7 +2139,7 @@ public class StatsCollector extends ManagerBase implements ComponentMethodInterc
@Override
public ConfigKey<?>[] getConfigKeys() {
return new ConfigKey<?>[] {vmDiskStatsInterval, vmDiskStatsIntervalMin, vmNetworkStatsInterval, vmNetworkStatsIntervalMin, StatsTimeout, statsOutputUri,
return new ConfigKey<?>[] {vmDiskStatsInterval, vmDiskStatsIntervalMin, vmNetworkStatsInterval, vmNetworkStatsIntervalMin, StatsTimeout, statsOutputUri, vmStatsRemoveBatchSize,
vmStatsIncrementMetrics, vmStatsMaxRetentionTime, vmStatsCollectUserVMOnly, vmDiskStatsRetentionEnabled, vmDiskStatsMaxRetentionTime,
MANAGEMENT_SERVER_STATUS_COLLECTION_INTERVAL,
DATABASE_SERVER_STATUS_COLLECTION_INTERVAL,

View File

@ -1055,7 +1055,7 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic
created = false;
VolumeInfo vol = volFactory.getVolume(cmd.getEntityId());
vol.stateTransit(Volume.Event.DestroyRequested);
throw new CloudRuntimeException("Failed to create volume: " + volume.getId(), e);
throw new CloudRuntimeException("Failed to create volume: " + volume.getUuid(), e);
} finally {
if (!created) {
logger.trace("Decrementing volume resource count for account id=" + volume.getAccountId() + " as volume failed to create on the backend");
@ -3319,6 +3319,13 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic
}
DiskOfferingVO newDiskOffering = retrieveAndValidateNewDiskOffering(cmd);
// if no new disk offering was provided, and match is required, default to the offering of the
// original volume. otherwise it falls through with no check and the target volume may
// not work correctly in some scenarios with the target provider. Adminstrator
// can disable this flag dynamically for certain bulk migration scenarios if required.
if (newDiskOffering == null && Boolean.TRUE.equals(MatchStoragePoolTagsWithDiskOffering.value())) {
newDiskOffering = diskOffering;
}
validateConditionsToReplaceDiskOfferingOfVolume(vol, newDiskOffering, destPool);
if (vm != null) {
@ -3404,14 +3411,12 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic
Account caller = CallContext.current().getCallingAccount();
DataCenter zone = null;
Volume volume = _volsDao.findById(cmd.getId());
if (volume != null) {
if (volume == null) {
throw new InvalidParameterValueException(String.format("Provided volume id is not valid: %s", cmd.getId()));
}
zone = _dcDao.findById(volume.getDataCenterId());
}
_accountMgr.checkAccess(caller, newDiskOffering, zone);
DiskOfferingVO currentDiskOffering = _diskOfferingDao.findById(volume.getDiskOfferingId());
if (VolumeApiServiceImpl.MatchStoragePoolTagsWithDiskOffering.valueIn(zone.getId()) && !doesNewDiskOfferingHasTagsAsOldDiskOffering(currentDiskOffering, newDiskOffering)) {
throw new InvalidParameterValueException(String.format("Existing disk offering storage tags of the volume %s does not contain in the new disk offering %s ", volume.getUuid(), newDiskOffering.getUuid()));
}
return newDiskOffering;
}
@ -3496,6 +3501,18 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic
return doesTargetStorageSupportDiskOffering(destPool, targetStoreTags);
}
public static boolean doesNewDiskOfferingHasTagsAsOldDiskOffering(DiskOfferingVO oldDO, DiskOfferingVO newDO) {
String[] oldDOStorageTags = oldDO.getTagsArray();
String[] newDOStorageTags = newDO.getTagsArray();
if (oldDOStorageTags.length == 0) {
return true;
}
if (newDOStorageTags.length == 0) {
return false;
}
return CollectionUtils.isSubCollection(Arrays.asList(oldDOStorageTags), Arrays.asList(newDOStorageTags));
}
@Override
public boolean doesTargetStorageSupportDiskOffering(StoragePool destPool, String diskOfferingTags) {
Pair<List<String>, Boolean> storagePoolTags = getStoragePoolTags(destPool);
@ -3525,18 +3542,6 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic
return result;
}
public static boolean doesNewDiskOfferingHasTagsAsOldDiskOffering(DiskOfferingVO oldDO, DiskOfferingVO newDO) {
String[] oldDOStorageTags = oldDO.getTagsArray();
String[] newDOStorageTags = newDO.getTagsArray();
if (oldDOStorageTags.length == 0) {
return true;
}
if (newDOStorageTags.length == 0) {
return false;
}
return CollectionUtils.isSubCollection(Arrays.asList(oldDOStorageTags), Arrays.asList(newDOStorageTags));
}
/**
* Returns a {@link Pair}, where the first value is the list of the StoragePool tags, and the second value is whether the returned tags are to be interpreted as a rule,
* or a normal list of tags.

View File

@ -188,6 +188,7 @@ import com.cloud.user.AccountManager;
import com.cloud.user.AccountService;
import com.cloud.user.AccountVO;
import com.cloud.user.ResourceLimitService;
import com.cloud.user.User;
import com.cloud.user.UserData;
import com.cloud.user.dao.AccountDao;
import com.cloud.uservm.UserVm;
@ -1446,6 +1447,7 @@ public class TemplateManagerImpl extends ManagerBase implements TemplateManager,
// Input validation
final Long id = cmd.getId();
final Account caller = CallContext.current().getCallingAccount();
final User user = CallContext.current().getCallingUser();
List<String> accountNames = cmd.getAccountNames();
List<Long> projectIds = cmd.getProjectIds();
Boolean isFeatured = cmd.isFeatured();
@ -1515,9 +1517,8 @@ public class TemplateManagerImpl extends ManagerBase implements TemplateManager,
}
if (owner.getType() == Account.Type.PROJECT) {
// Currently project owned templates cannot be shared outside project but is available to all users within project by default.
throw new InvalidParameterValueException("Update template permissions is an invalid operation on template " + template.getName() +
". Project owned templates cannot be shared outside template.");
// if it is a project owned template/iso, the user must at least have access to be allowed to share it.
_accountMgr.checkAccess(user, template);
}
// check configuration parameter(allow.public.user.templates) value for

View File

@ -6510,6 +6510,12 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir
+ " hypervisors: [%s].", hypervisorType, HYPERVISORS_THAT_CAN_DO_STORAGE_MIGRATION_ON_NON_USER_VMS));
}
List<VolumeVO> vols = _volsDao.findByInstance(vm.getId());
if (vols.size() > 1 &&
!(HypervisorType.VMware.equals(hypervisorType) || HypervisorType.KVM.equals(hypervisorType))) {
throw new InvalidParameterValueException("Data disks attached to the vm, can not migrate. Need to detach data disks first");
}
// Check that Vm does not have VM Snapshots
if (_vmSnapshotDao.findByVm(vmId).size() > 0) {
throw new InvalidParameterValueException("VM's disk cannot be migrated, please remove all the VM Snapshots for this VM");
@ -7391,10 +7397,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir
if (template == null) {
throw new InvalidParameterValueException(String.format("Template for VM: %s cannot be found", vm.getUuid()));
}
if (!template.isPublicTemplate()) {
Account templateOwner = _accountMgr.getAccount(template.getAccountId());
_accountMgr.checkAccess(newAccount, null, true, templateOwner);
}
_accountMgr.checkAccess(newAccount, AccessType.UseEntry, true, template);
// VV 5: check the new account can create vm in the domain
DomainVO domain = _domainDao.findById(cmd.getDomainId());

View File

@ -93,7 +93,9 @@ public class SnapshotHelper {
*/
public void expungeTemporarySnapshot(boolean kvmSnapshotOnlyInPrimaryStorage, SnapshotInfo snapInfo) {
if (!kvmSnapshotOnlyInPrimaryStorage) {
if (snapInfo != null) {
logger.trace(String.format("Snapshot [%s] is not a temporary backup to create a volume from snapshot. Not expunging it.", snapInfo.getId()));
}
return;
}

View File

@ -796,13 +796,20 @@ public class UnmanagedVMsManagerImpl implements UnmanagedVMsManager {
}
}
copyRemoteVolumeCommand.setTempPath(tmpPath);
int copyTimeout = UnmanagedVMsManager.RemoteKvmInstanceDisksCopyTimeout.value();
if (copyTimeout <= 0) {
copyTimeout = Integer.valueOf(UnmanagedVMsManager.RemoteKvmInstanceDisksCopyTimeout.defaultValue());
}
int copyTimeoutInSecs = copyTimeout * 60;
copyRemoteVolumeCommand.setWait(copyTimeoutInSecs);
logger.error(String.format("Initiating copy remote volume %s from %s, timeout %d secs", path, remoteUrl, copyTimeoutInSecs));
Answer answer = agentManager.easySend(dest.getHost().getId(), copyRemoteVolumeCommand);
if (!(answer instanceof CopyRemoteVolumeAnswer)) {
throw new CloudRuntimeException("Error while copying volume");
throw new CloudRuntimeException("Error while copying volume of remote instance: " + answer.getDetails());
}
CopyRemoteVolumeAnswer copyRemoteVolumeAnswer = (CopyRemoteVolumeAnswer) answer;
if(!copyRemoteVolumeAnswer.getResult()) {
throw new CloudRuntimeException("Error while copying volume");
throw new CloudRuntimeException("Unable to copy volume of remote instance");
}
diskProfile.setSize(copyRemoteVolumeAnswer.getSize());
DiskProfile profile = volumeManager.updateImportedVolume(type, diskOffering, vm, template, deviceId,
@ -815,7 +822,6 @@ public class UnmanagedVMsManagerImpl implements UnmanagedVMsManager {
Volume.Type type, VirtualMachineTemplate template,
Long deviceId, Long hostId, String diskPath, DiskProfile diskProfile) {
List<StoragePoolVO> storagePools = primaryDataStoreDao.findLocalStoragePoolsByHostAndTags(hostId, null);
if(storagePools.size() < 1) {
throw new CloudRuntimeException("Local Storage not found for host");
}
@ -828,7 +834,6 @@ public class UnmanagedVMsManagerImpl implements UnmanagedVMsManager {
return new Pair<>(profile, storagePool);
}
private Pair<DiskProfile, StoragePool> importKVMSharedDisk(VirtualMachine vm, DiskOffering diskOffering,
Volume.Type type, VirtualMachineTemplate template,
Long deviceId, Long poolId, String diskPath, DiskProfile diskProfile) {
@ -840,7 +845,6 @@ public class UnmanagedVMsManagerImpl implements UnmanagedVMsManager {
return new Pair<>(profile, storagePool);
}
private Pair<DiskProfile, StoragePool> importDisk(UnmanagedInstanceTO.Disk disk, VirtualMachine vm, Cluster cluster, DiskOffering diskOffering,
Volume.Type type, String name, Long diskSize, Long minIops, Long maxIops, VirtualMachineTemplate template,
Account owner, Long deviceId) {
@ -2368,7 +2372,6 @@ public class UnmanagedVMsManagerImpl implements UnmanagedVMsManager {
throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, String.format("Import failed for Vm: %s. Suitable deployment destination not found", userVm.getInstanceName()));
}
Map<Volume, StoragePool> storage = dest.getStorageForDisks();
Volume volume = volumeDao.findById(diskProfile.getVolumeId());
StoragePool storagePool = storage.get(volume);
@ -2388,7 +2391,6 @@ public class UnmanagedVMsManagerImpl implements UnmanagedVMsManager {
}
diskProfile.setSize(checkVolumeAnswer.getSize());
List<Pair<DiskProfile, StoragePool>> diskProfileStoragePoolList = new ArrayList<>();
try {
long deviceId = 1L;
@ -2409,7 +2411,6 @@ public class UnmanagedVMsManagerImpl implements UnmanagedVMsManager {
return userVm;
}
private NetworkVO getDefaultNetwork(DataCenter zone, Account owner, boolean selectAny) throws InsufficientCapacityException, ResourceAllocationException {
NetworkVO defaultNetwork = null;
@ -2467,7 +2468,6 @@ public class UnmanagedVMsManagerImpl implements UnmanagedVMsManager {
return defaultNetwork;
}
//generate unit test
public ListResponse<UnmanagedInstanceResponse> listVmsForImport(ListVmsForImportCmd cmd) {
final Account caller = CallContext.current().getCallingAccount();
if (caller.getType() != Account.Type.ADMIN) {
@ -2507,8 +2507,8 @@ public class UnmanagedVMsManagerImpl implements UnmanagedVMsManager {
private HashMap<String, UnmanagedInstanceTO> getRemoteVmsOnKVMHost(long zoneId, String remoteHostUrl, String username, String password) {
//ToDo: add option to list one Vm by name
List<HostVO> hosts = resourceManager.listAllUpAndEnabledHostsInOneZoneByHypervisor(Hypervisor.HypervisorType.KVM, zoneId);
if(hosts.size() < 1) {
throw new CloudRuntimeException("No hosts available for VM import");
if (hosts.size() < 1) {
throw new CloudRuntimeException("No hosts available to list VMs on remote host " + remoteHostUrl);
}
HostVO host = hosts.get(0);
GetRemoteVmsCommand getRemoteVmsCommand = new GetRemoteVmsCommand(remoteHostUrl, username, password);
@ -2536,6 +2536,9 @@ public class UnmanagedVMsManagerImpl implements UnmanagedVMsManager {
@Override
public ConfigKey<?>[] getConfigKeys() {
return new ConfigKey<?>[]{UnmanageVMPreserveNic};
return new ConfigKey<?>[]{
UnmanageVMPreserveNic,
RemoteKvmInstanceDisksCopyTimeout
};
}
}

View File

@ -0,0 +1,54 @@
//
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
//
package com.cloud.hypervisor.kvm.discoverer;
import org.junit.Assert;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.mockito.Spy;
import org.mockito.junit.MockitoJUnitRunner;
@RunWith(MockitoJUnitRunner.class)
public class LibvirtServerDiscovererTest {
@Spy
private LibvirtServerDiscoverer libvirtServerDiscoverer;
@Test
public void validateCompatibleOses() {
validateCompatibleOs("Rocky Linux", "Rocky Linux", true);
validateCompatibleOs("Rocky", "Rocky Linux", true);
validateCompatibleOs("Red", "Red Hat Enterprise Linux", true);
validateCompatibleOs("Oracle", "Oracle Linux Server", true);
validateCompatibleOs("Rocky Linux", "Red Hat Enterprise Linux", true);
validateCompatibleOs("AlmaLinux", "Red Hat Enterprise Linux", true);
validateCompatibleOs("Windows", "Rocky Linux", false);
validateCompatibleOs("SUSE", "Rocky Linux", false);
}
private void validateCompatibleOs(String hostOsInCluster, String hostOs, boolean expected) {
if (expected) {
Assert.assertTrue(libvirtServerDiscoverer.isHostOsCompatibleWithOtherHost(hostOsInCluster, hostOs));
} else {
Assert.assertFalse(libvirtServerDiscoverer.isHostOsCompatibleWithOtherHost(hostOsInCluster, hostOs));
}
}
}

View File

@ -28,7 +28,7 @@ import com.cloud.user.VmDiskStatisticsVO;
import com.cloud.utils.exception.CloudRuntimeException;
import com.cloud.vm.VmStats;
import com.cloud.vm.VmStatsVO;
import com.cloud.vm.dao.VmStatsDao;
import com.cloud.vm.dao.VmStatsDaoImpl;
import com.google.gson.Gson;
import com.tngtech.java.junit.dataprovider.DataProvider;
import com.tngtech.java.junit.dataprovider.DataProviderRunner;
@ -81,7 +81,7 @@ public class StatsCollectorTest {
private static final String DEFAULT_DATABASE_NAME = "cloudstack";
@Mock
VmStatsDao vmStatsDaoMock = Mockito.mock(VmStatsDao.class);
VmStatsDaoImpl vmStatsDaoMock;
@Mock
VmStatsEntry statsForCurrentIterationMock;
@ -304,7 +304,7 @@ public class StatsCollectorTest {
statsCollector.cleanUpVirtualMachineStats();
Mockito.verify(vmStatsDaoMock, Mockito.never()).removeAllByTimestampLessThan(Mockito.any());
Mockito.verify(vmStatsDaoMock, Mockito.never()).removeAllByTimestampLessThan(Mockito.any(), Mockito.anyLong());
}
@Test
@ -313,7 +313,7 @@ public class StatsCollectorTest {
statsCollector.cleanUpVirtualMachineStats();
Mockito.verify(vmStatsDaoMock).removeAllByTimestampLessThan(Mockito.any());
Mockito.verify(vmStatsDaoMock).removeAllByTimestampLessThan(Mockito.any(), Mockito.anyLong());
}
@Test

View File

@ -210,6 +210,11 @@ public class MockUsageEventDao implements UsageEventDao{
return 0;
}
@Override
public int expunge(SearchCriteria<UsageEventVO> sc, long limit) {
return 0;
}
@Override
public void expunge() {

View File

@ -1043,7 +1043,7 @@ class TestCpuCapServiceOfferings(cloudstackTestCase):
#Get host CPU usage from top command before and after VM consuming 100% CPU
find_pid_cmd = "ps -ax | grep '%s' | head -1 | awk '{print $1}'" % self.vm.id
pid = ssh_host.execute(find_pid_cmd)[0]
cpu_usage_cmd = "top -b n 1 p %s | tail -1 | awk '{print $9}'" % pid
cpu_usage_cmd = "top -b -n 1 -p %s | tail -1 | awk '{print $9}'" % pid
host_cpu_usage_before_str = ssh_host.execute(cpu_usage_cmd)[0]
host_cpu_usage_before = round(float(host_cpu_usage_before_str))

View File

@ -2347,6 +2347,7 @@ export default {
args.domainid = store.getters.project?.id ? null : this.owner.domainid
args.projectid = store.getters.project?.id || this.owner.projectid
args.templatefilter = templateFilter
args.projectid = -1
args.details = 'all'
args.showicon = 'true'
args.id = this.templateId
@ -2369,6 +2370,7 @@ export default {
}
args.zoneid = _.get(this.zone, 'id')
args.isoFilter = isoFilter
args.projectid = -1
args.bootable = true
args.showicon = 'true'
args.id = this.isoId

View File

@ -39,12 +39,18 @@ import com.cloud.utils.Pair;
public class SshHelper {
private static final int DEFAULT_CONNECT_TIMEOUT = 180000;
private static final int DEFAULT_KEX_TIMEOUT = 60000;
private static final int DEFAULT_WAIT_RESULT_TIMEOUT = 120000;
protected static Logger LOGGER = LogManager.getLogger(SshHelper.class);
public static Pair<Boolean, String> sshExecute(String host, int port, String user, File pemKeyFile, String password, String command) throws Exception {
return sshExecute(host, port, user, pemKeyFile, password, command, DEFAULT_CONNECT_TIMEOUT, DEFAULT_KEX_TIMEOUT, 120000);
return sshExecute(host, port, user, pemKeyFile, password, command, DEFAULT_CONNECT_TIMEOUT, DEFAULT_KEX_TIMEOUT, DEFAULT_WAIT_RESULT_TIMEOUT);
}
public static Pair<Boolean, String> sshExecute(String host, int port, String user, File pemKeyFile, String password, String command, int waitResultTimeoutInMs) throws Exception {
return sshExecute(host, port, user, pemKeyFile, password, command, DEFAULT_CONNECT_TIMEOUT, DEFAULT_KEX_TIMEOUT, waitResultTimeoutInMs);
}
public static void scpTo(String host, int port, String user, File pemKeyFile, String password, String remoteTargetDirectory, String localFile, String fileMode)