mirror of
https://github.com/apache/cloudstack.git
synced 2025-10-26 08:42:29 +01:00
Merge branch 'main' into nsx-integration
This commit is contained in:
commit
053521077c
@ -19,7 +19,7 @@ default_stages: [commit, push]
|
||||
default_language_version:
|
||||
# force all unspecified Python hooks to run python3
|
||||
python: python3
|
||||
minimum_pre_commit_version: "2.18.0"
|
||||
minimum_pre_commit_version: "2.17.0"
|
||||
repos:
|
||||
- repo: meta
|
||||
hooks:
|
||||
|
||||
@ -87,7 +87,8 @@ public class CreateNetworkOfferingCmd extends BaseCmd {
|
||||
@Parameter(name = ApiConstants.SPECIFY_VLAN, type = CommandType.BOOLEAN, description = "true if network offering supports vlans")
|
||||
private Boolean specifyVlan;
|
||||
|
||||
@Parameter(name = ApiConstants.AVAILABILITY, type = CommandType.STRING, description = "the availability of network offering. Default value is Optional")
|
||||
@Parameter(name = ApiConstants.AVAILABILITY, type = CommandType.STRING, description = "the availability of network offering. The default value is Optional. "
|
||||
+ " Another value is Required, which will make it as the default network offering for new networks ")
|
||||
private String availability;
|
||||
|
||||
@Parameter(name = ApiConstants.NETWORKRATE, type = CommandType.INTEGER, description = "data transfer rate in megabits per second allowed")
|
||||
|
||||
@ -55,7 +55,7 @@ public class UpdateNetworkOfferingCmd extends BaseCmd {
|
||||
private String displayText;
|
||||
|
||||
@Parameter(name = ApiConstants.AVAILABILITY, type = CommandType.STRING, description = "the availability of network offering."
|
||||
+ " Default value is Required for Guest Virtual network offering; Optional for Guest Direct network offering")
|
||||
+ " The value is Required makes this network offering default for Guest Virtual Networks. Only one network offering can have the value Required ")
|
||||
private String availability;
|
||||
|
||||
@Parameter(name = ApiConstants.SORT_KEY, type = CommandType.INTEGER, description = "sort key of the network offering, integer")
|
||||
|
||||
@ -29,6 +29,10 @@ db.cloud.driver=@DBDRIVER@
|
||||
db.cloud.port=3306
|
||||
db.cloud.name=cloud
|
||||
|
||||
# Connection URI to the database "cloud". When this property is set, only the following properties will be used along with it: db.cloud.maxActive, db.cloud.maxIdle, db.cloud.maxWait, db.cloud.username, db.cloud.password, db.cloud.driver, db.cloud.validationQuery, db.cloud.isolation.level. Other properties will be ignored.
|
||||
db.cloud.uri=
|
||||
|
||||
|
||||
# CloudStack database tuning parameters
|
||||
db.cloud.maxActive=250
|
||||
db.cloud.maxIdle=30
|
||||
@ -61,6 +65,10 @@ db.usage.driver=@DBDRIVER@
|
||||
db.usage.port=3306
|
||||
db.usage.name=cloud_usage
|
||||
|
||||
# Connection URI to the database "usage". When this property is set, only the following properties will be used along with it: db.usage.maxActive, db.cloud.maxIdle, db.cloud.maxWait, db.usage.username, db.usage.password, db.usage.driver, db.usage.validationQuery, db.usage.isolation.level. Other properties will be ignored.
|
||||
db.usage.uri=
|
||||
|
||||
|
||||
# usage database tuning parameters
|
||||
db.usage.maxActive=100
|
||||
db.usage.maxIdle=30
|
||||
@ -79,6 +87,9 @@ db.simulator.maxIdle=30
|
||||
db.simulator.maxWait=10000
|
||||
db.simulator.autoReconnect=true
|
||||
|
||||
# Connection URI to the database "simulator". When this property is set, only the following properties will be used along with it: db.simulator.host, db.simulator.port, db.simulator.name, db.simulator.autoReconnect. Other properties will be ignored.
|
||||
db.simulator.uri=
|
||||
|
||||
|
||||
# High Availability And Cluster Properties
|
||||
db.ha.enabled=false
|
||||
|
||||
@ -842,6 +842,12 @@
|
||||
<overWrite>false</overWrite>
|
||||
<outputDirectory>${project.build.directory}/lib</outputDirectory>
|
||||
</artifactItem>
|
||||
<artifactItem>
|
||||
<groupId>com.linbit.linstor.api</groupId>
|
||||
<artifactId>java-linstor</artifactId>
|
||||
<overWrite>false</overWrite>
|
||||
<outputDirectory>${project.build.directory}/lib</outputDirectory>
|
||||
</artifactItem>
|
||||
<artifactItem>
|
||||
<groupId>org.bouncycastle</groupId>
|
||||
<artifactId>bctls-jdk15on</artifactId>
|
||||
@ -885,6 +891,7 @@
|
||||
<exclude>mysql:mysql-connector-java</exclude>
|
||||
<exclude>org.apache.cloudstack:cloud-plugin-storage-volume-storpool</exclude>
|
||||
<exclude>org.apache.cloudstack:cloud-plugin-storage-volume-linstor</exclude>
|
||||
<exclude>com.linbit.linstor.api:java-linstor</exclude>
|
||||
</excludes>
|
||||
</artifactSet>
|
||||
<transformers>
|
||||
|
||||
@ -581,7 +581,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
|
||||
allocate(vmInstanceName, template, serviceOffering, new DiskOfferingInfo(diskOffering), new ArrayList<>(), networks, plan, hyperType, null, null);
|
||||
}
|
||||
|
||||
private VirtualMachineGuru getVmGuru(final VirtualMachine vm) {
|
||||
VirtualMachineGuru getVmGuru(final VirtualMachine vm) {
|
||||
if(vm != null) {
|
||||
return _vmGurus.get(vm.getType());
|
||||
}
|
||||
@ -1457,6 +1457,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
|
||||
}
|
||||
if (canRetry) {
|
||||
try {
|
||||
conditionallySetPodToDeployIn(vm);
|
||||
changeState(vm, Event.OperationFailed, null, work, Step.Done);
|
||||
} catch (final NoTransitionException e) {
|
||||
throw new ConcurrentOperationException(e.getMessage());
|
||||
@ -1512,7 +1513,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
|
||||
}
|
||||
String networkName = String.format("D%s-A%s-Z%s", domain.getId(), acc.getId(), zone.getId());
|
||||
if (Objects.isNull(networkVO.getVpcId())) {
|
||||
networkName += "-S"+networkVO.getId();
|
||||
networkName += "-S" + networkVO.getId();
|
||||
} else {
|
||||
VpcVO vpc = vpcDao.findById(networkVO.getVpcId());
|
||||
if (Objects.isNull(vpc)) {
|
||||
@ -1523,6 +1524,24 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
|
||||
networkToNetworkNameMap.put(networkVO.getId(), networkName);
|
||||
}
|
||||
|
||||
/**
|
||||
* Setting pod id to null can result in migration of Volumes across pods. This is not desirable for VMs which
|
||||
* have a volume in Ready state (happens when a VM is shutdown and started again).
|
||||
* So, we set it to null only when
|
||||
* migration of VM across cluster is enabled
|
||||
* Or, volumes are still in allocated state for that VM (happens when VM is Starting/deployed for the first time)
|
||||
*/
|
||||
private void conditionallySetPodToDeployIn(VMInstanceVO vm) {
|
||||
if (MIGRATE_VM_ACROSS_CLUSTERS.valueIn(vm.getDataCenterId()) || areAllVolumesAllocated(vm.getId())) {
|
||||
vm.setPodIdToDeployIn(null);
|
||||
}
|
||||
}
|
||||
|
||||
boolean areAllVolumesAllocated(long vmId) {
|
||||
final List<VolumeVO> vols = _volsDao.findByInstance(vmId);
|
||||
return CollectionUtils.isEmpty(vols) || vols.stream().allMatch(v -> Volume.State.Allocated.equals(v.getState()));
|
||||
}
|
||||
|
||||
private void logBootModeParameters(Map<VirtualMachineProfile.Param, Object> params) {
|
||||
if (params == null) {
|
||||
return;
|
||||
|
||||
@ -19,9 +19,12 @@ package com.cloud.vm;
|
||||
|
||||
import static org.junit.Assert.assertEquals;
|
||||
import static org.junit.Assert.assertFalse;
|
||||
import static org.junit.Assert.assertNull;
|
||||
import static org.junit.Assert.assertTrue;
|
||||
import static org.mockito.Matchers.any;
|
||||
import static org.mockito.Matchers.anyLong;
|
||||
import static org.mockito.ArgumentMatchers.any;
|
||||
import static org.mockito.ArgumentMatchers.anyLong;
|
||||
import static org.mockito.Mockito.doNothing;
|
||||
import static org.mockito.Mockito.doReturn;
|
||||
import static org.mockito.Mockito.mock;
|
||||
import static org.mockito.Mockito.times;
|
||||
import static org.mockito.Mockito.when;
|
||||
@ -47,6 +50,23 @@ import com.cloud.network.vpc.VpcVO;
|
||||
import com.cloud.network.vpc.dao.VpcDao;
|
||||
import com.cloud.user.AccountVO;
|
||||
import com.cloud.user.dao.AccountDao;
|
||||
import com.cloud.dc.ClusterDetailsDao;
|
||||
import com.cloud.dc.ClusterDetailsVO;
|
||||
import com.cloud.dc.Pod;
|
||||
import com.cloud.deploy.DeployDestination;
|
||||
import com.cloud.deploy.DeploymentPlanningManager;
|
||||
import com.cloud.hypervisor.HypervisorGuruManager;
|
||||
import com.cloud.org.Cluster;
|
||||
import com.cloud.template.VirtualMachineTemplate;
|
||||
import com.cloud.user.Account;
|
||||
import com.cloud.user.User;
|
||||
import com.cloud.utils.Journal;
|
||||
import com.cloud.utils.Pair;
|
||||
import com.cloud.utils.Ternary;
|
||||
import com.cloud.utils.db.EntityManager;
|
||||
import com.cloud.utils.fsm.StateMachine2;
|
||||
import com.cloud.vm.dao.UserVmDetailsDao;
|
||||
import org.apache.cloudstack.context.CallContext;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.StoragePoolAllocator;
|
||||
import org.apache.cloudstack.framework.config.ConfigKey;
|
||||
import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao;
|
||||
@ -58,10 +78,11 @@ import org.junit.runner.RunWith;
|
||||
import org.mockito.InOrder;
|
||||
import org.mockito.InjectMocks;
|
||||
import org.mockito.Mock;
|
||||
import org.mockito.MockedStatic;
|
||||
import org.mockito.Mockito;
|
||||
import org.mockito.Spy;
|
||||
import org.mockito.junit.MockitoJUnitRunner;
|
||||
import org.mockito.stubbing.Answer;
|
||||
import org.mockito.runners.MockitoJUnitRunner;
|
||||
|
||||
import com.cloud.agent.AgentManager;
|
||||
import com.cloud.agent.api.Command;
|
||||
@ -100,13 +121,14 @@ import com.cloud.utils.exception.CloudRuntimeException;
|
||||
import com.cloud.vm.VirtualMachine.State;
|
||||
import com.cloud.vm.dao.UserVmDao;
|
||||
import com.cloud.vm.dao.VMInstanceDao;
|
||||
import org.springframework.test.util.ReflectionTestUtils;
|
||||
|
||||
@RunWith(MockitoJUnitRunner.class)
|
||||
public class VirtualMachineManagerImplTest {
|
||||
|
||||
@Spy
|
||||
@InjectMocks
|
||||
private VirtualMachineManagerImpl virtualMachineManagerImpl;
|
||||
private VirtualMachineManagerImpl virtualMachineManagerImpl = new VirtualMachineManagerImpl();
|
||||
@Mock
|
||||
private AgentManager agentManagerMock;
|
||||
@Mock
|
||||
@ -177,6 +199,20 @@ public class VirtualMachineManagerImplTest {
|
||||
private DataCenterDao dcDao;
|
||||
@Mock
|
||||
private VpcDao vpcDao;
|
||||
@Mock
|
||||
private EntityManager _entityMgr;
|
||||
@Mock
|
||||
private DeploymentPlanningManager _dpMgr;
|
||||
@Mock
|
||||
private HypervisorGuruManager _hvGuruMgr;
|
||||
@Mock
|
||||
private ClusterDetailsDao _clusterDetailsDao;
|
||||
@Mock
|
||||
private UserVmDetailsDao userVmDetailsDao;
|
||||
@Mock
|
||||
private ItWorkDao _workDao;
|
||||
@Mock
|
||||
protected StateMachine2<State, VirtualMachine.Event, VirtualMachine> _stateMachine;
|
||||
|
||||
@Before
|
||||
public void setup() {
|
||||
@ -455,7 +491,7 @@ public class VirtualMachineManagerImplTest {
|
||||
HashMap<Long, Long> userDefinedVolumeToStoragePoolMap = new HashMap<>();
|
||||
userDefinedVolumeToStoragePoolMap.put(volumeMockId, storagePoolVoMockId);
|
||||
|
||||
Mockito.doNothing().when(virtualMachineManagerImpl).executeManagedStorageChecksWhenTargetStoragePoolProvided(Mockito.any(StoragePoolVO.class), Mockito.any(VolumeVO.class), Mockito.any(StoragePoolVO.class));
|
||||
Mockito.doNothing().when(virtualMachineManagerImpl).executeManagedStorageChecksWhenTargetStoragePoolProvided(any(StoragePoolVO.class), any(VolumeVO.class), any(StoragePoolVO.class));
|
||||
Mockito.doReturn(null).when(storagePoolHostDaoMock).findByPoolHost(storagePoolVoMockId, hostMockId);
|
||||
|
||||
virtualMachineManagerImpl.buildMapUsingUserInformation(virtualMachineProfileMock, hostMock, userDefinedVolumeToStoragePoolMap);
|
||||
@ -467,8 +503,8 @@ public class VirtualMachineManagerImplTest {
|
||||
HashMap<Long, Long> userDefinedVolumeToStoragePoolMap = Mockito.spy(new HashMap<>());
|
||||
userDefinedVolumeToStoragePoolMap.put(volumeMockId, storagePoolVoMockId);
|
||||
|
||||
Mockito.doNothing().when(virtualMachineManagerImpl).executeManagedStorageChecksWhenTargetStoragePoolProvided(Mockito.any(StoragePoolVO.class), Mockito.any(VolumeVO.class),
|
||||
Mockito.any(StoragePoolVO.class));
|
||||
Mockito.doNothing().when(virtualMachineManagerImpl).executeManagedStorageChecksWhenTargetStoragePoolProvided(any(StoragePoolVO.class), any(VolumeVO.class),
|
||||
any(StoragePoolVO.class));
|
||||
Mockito.doReturn(Mockito.mock(StoragePoolHostVO.class)).when(storagePoolHostDaoMock).findByPoolHost(storagePoolVoMockId, hostMockId);
|
||||
|
||||
Map<Volume, StoragePool> volumeToPoolObjectMap = virtualMachineManagerImpl.buildMapUsingUserInformation(virtualMachineProfileMock, hostMock, userDefinedVolumeToStoragePoolMap);
|
||||
@ -504,7 +540,7 @@ public class VirtualMachineManagerImplTest {
|
||||
virtualMachineManagerImpl.executeManagedStorageChecksWhenTargetStoragePoolNotProvided(hostMock, storagePoolVoMock, volumeVoMock);
|
||||
|
||||
Mockito.verify(storagePoolVoMock).isManaged();
|
||||
Mockito.verify(storagePoolHostDaoMock, Mockito.times(0)).findByPoolHost(Mockito.anyLong(), Mockito.anyLong());
|
||||
Mockito.verify(storagePoolHostDaoMock, Mockito.times(0)).findByPoolHost(anyLong(), anyLong());
|
||||
}
|
||||
|
||||
@Test
|
||||
@ -528,15 +564,15 @@ public class VirtualMachineManagerImplTest {
|
||||
|
||||
@Test
|
||||
public void getCandidateStoragePoolsToMigrateLocalVolumeTestLocalVolume() {
|
||||
Mockito.doReturn(Mockito.mock(DiskOfferingVO.class)).when(diskOfferingDaoMock).findById(Mockito.anyLong());
|
||||
Mockito.doReturn(Mockito.mock(DiskOfferingVO.class)).when(diskOfferingDaoMock).findById(anyLong());
|
||||
|
||||
Mockito.doReturn(true).when(storagePoolVoMock).isLocal();
|
||||
|
||||
List<StoragePool> poolListMock = new ArrayList<>();
|
||||
poolListMock.add(storagePoolVoMock);
|
||||
|
||||
Mockito.doReturn(poolListMock).when(storagePoolAllocatorMock).allocateToPool(Mockito.any(DiskProfile.class), Mockito.any(VirtualMachineProfile.class), Mockito.any(DeploymentPlan.class),
|
||||
Mockito.any(ExcludeList.class), Mockito.eq(StoragePoolAllocator.RETURN_UPTO_ALL));
|
||||
Mockito.doReturn(poolListMock).when(storagePoolAllocatorMock).allocateToPool(any(DiskProfile.class), any(VirtualMachineProfile.class), any(DeploymentPlan.class),
|
||||
any(ExcludeList.class), Mockito.eq(StoragePoolAllocator.RETURN_UPTO_ALL));
|
||||
|
||||
List<StoragePool> poolList = virtualMachineManagerImpl.getCandidateStoragePoolsToMigrateLocalVolume(virtualMachineProfileMock, dataCenterDeploymentMock, volumeVoMock);
|
||||
|
||||
@ -546,15 +582,15 @@ public class VirtualMachineManagerImplTest {
|
||||
|
||||
@Test
|
||||
public void getCandidateStoragePoolsToMigrateLocalVolumeTestCrossClusterMigration() {
|
||||
Mockito.doReturn(Mockito.mock(DiskOfferingVO.class)).when(diskOfferingDaoMock).findById(Mockito.anyLong());
|
||||
Mockito.doReturn(Mockito.mock(DiskOfferingVO.class)).when(diskOfferingDaoMock).findById(anyLong());
|
||||
|
||||
Mockito.doReturn(false).when(storagePoolVoMock).isLocal();
|
||||
|
||||
List<StoragePool> poolListMock = new ArrayList<>();
|
||||
poolListMock.add(storagePoolVoMock);
|
||||
|
||||
Mockito.doReturn(poolListMock).when(storagePoolAllocatorMock).allocateToPool(Mockito.any(DiskProfile.class), Mockito.any(VirtualMachineProfile.class), Mockito.any(DeploymentPlan.class),
|
||||
Mockito.any(ExcludeList.class), Mockito.eq(StoragePoolAllocator.RETURN_UPTO_ALL));
|
||||
Mockito.doReturn(poolListMock).when(storagePoolAllocatorMock).allocateToPool(any(DiskProfile.class), any(VirtualMachineProfile.class), any(DeploymentPlan.class),
|
||||
any(ExcludeList.class), Mockito.eq(StoragePoolAllocator.RETURN_UPTO_ALL));
|
||||
|
||||
Mockito.doReturn(true).when(virtualMachineManagerImpl).isStorageCrossClusterMigration(clusterMockId, storagePoolVoMock);
|
||||
List<StoragePool> poolList = virtualMachineManagerImpl.getCandidateStoragePoolsToMigrateLocalVolume(virtualMachineProfileMock, dataCenterDeploymentMock, volumeVoMock);
|
||||
@ -565,15 +601,15 @@ public class VirtualMachineManagerImplTest {
|
||||
|
||||
@Test
|
||||
public void getCandidateStoragePoolsToMigrateLocalVolumeTestWithinClusterMigration() {
|
||||
Mockito.doReturn(Mockito.mock(DiskOfferingVO.class)).when(diskOfferingDaoMock).findById(Mockito.anyLong());
|
||||
Mockito.doReturn(Mockito.mock(DiskOfferingVO.class)).when(diskOfferingDaoMock).findById(anyLong());
|
||||
|
||||
Mockito.doReturn(false).when(storagePoolVoMock).isLocal();
|
||||
|
||||
List<StoragePool> poolListMock = new ArrayList<>();
|
||||
poolListMock.add(storagePoolVoMock);
|
||||
|
||||
Mockito.doReturn(poolListMock).when(storagePoolAllocatorMock).allocateToPool(Mockito.any(DiskProfile.class), Mockito.any(VirtualMachineProfile.class), Mockito.any(DeploymentPlan.class),
|
||||
Mockito.any(ExcludeList.class), Mockito.eq(StoragePoolAllocator.RETURN_UPTO_ALL));
|
||||
Mockito.doReturn(poolListMock).when(storagePoolAllocatorMock).allocateToPool(any(DiskProfile.class), any(VirtualMachineProfile.class), any(DeploymentPlan.class),
|
||||
any(ExcludeList.class), Mockito.eq(StoragePoolAllocator.RETURN_UPTO_ALL));
|
||||
|
||||
Mockito.doReturn(false).when(virtualMachineManagerImpl).isStorageCrossClusterMigration(clusterMockId, storagePoolVoMock);
|
||||
List<StoragePool> poolList = virtualMachineManagerImpl.getCandidateStoragePoolsToMigrateLocalVolume(virtualMachineProfileMock, dataCenterDeploymentMock, volumeVoMock);
|
||||
@ -593,33 +629,33 @@ public class VirtualMachineManagerImplTest {
|
||||
|
||||
virtualMachineManagerImpl.setStoragePoolAllocators(storagePoolAllocatorsMock);
|
||||
|
||||
Mockito.doReturn(Mockito.mock(DiskOfferingVO.class)).when(diskOfferingDaoMock).findById(Mockito.anyLong());
|
||||
Mockito.doReturn(Mockito.mock(DiskOfferingVO.class)).when(diskOfferingDaoMock).findById(anyLong());
|
||||
|
||||
Mockito.doReturn(false).when(storagePoolVoMock).isLocal();
|
||||
|
||||
List<StoragePool> poolListMock = new ArrayList<>();
|
||||
poolListMock.add(storagePoolVoMock);
|
||||
|
||||
Mockito.doReturn(poolListMock).when(storagePoolAllocatorMock).allocateToPool(Mockito.any(DiskProfile.class), Mockito.any(VirtualMachineProfile.class), Mockito.any(DeploymentPlan.class),
|
||||
Mockito.any(ExcludeList.class), Mockito.eq(StoragePoolAllocator.RETURN_UPTO_ALL));
|
||||
Mockito.doReturn(poolListMock).when(storagePoolAllocatorMock).allocateToPool(any(DiskProfile.class), any(VirtualMachineProfile.class), any(DeploymentPlan.class),
|
||||
any(ExcludeList.class), Mockito.eq(StoragePoolAllocator.RETURN_UPTO_ALL));
|
||||
|
||||
Mockito.doReturn(null).when(storagePoolAllocatorMock2).allocateToPool(Mockito.any(DiskProfile.class), Mockito.any(VirtualMachineProfile.class), Mockito.any(DeploymentPlan.class),
|
||||
Mockito.any(ExcludeList.class), Mockito.eq(StoragePoolAllocator.RETURN_UPTO_ALL));
|
||||
Mockito.doReturn(null).when(storagePoolAllocatorMock2).allocateToPool(any(DiskProfile.class), any(VirtualMachineProfile.class), any(DeploymentPlan.class),
|
||||
any(ExcludeList.class), Mockito.eq(StoragePoolAllocator.RETURN_UPTO_ALL));
|
||||
|
||||
Mockito.doReturn(new ArrayList<>()).when(storagePoolAllocatorMock3).allocateToPool(Mockito.any(DiskProfile.class), Mockito.any(VirtualMachineProfile.class), Mockito.any(DeploymentPlan.class),
|
||||
Mockito.any(ExcludeList.class), Mockito.eq(StoragePoolAllocator.RETURN_UPTO_ALL));
|
||||
Mockito.doReturn(new ArrayList<>()).when(storagePoolAllocatorMock3).allocateToPool(any(DiskProfile.class), any(VirtualMachineProfile.class), any(DeploymentPlan.class),
|
||||
any(ExcludeList.class), Mockito.eq(StoragePoolAllocator.RETURN_UPTO_ALL));
|
||||
|
||||
Mockito.doReturn(false).when(virtualMachineManagerImpl).isStorageCrossClusterMigration(clusterMockId, storagePoolVoMock);
|
||||
List<StoragePool> poolList = virtualMachineManagerImpl.getCandidateStoragePoolsToMigrateLocalVolume(virtualMachineProfileMock, dataCenterDeploymentMock, volumeVoMock);
|
||||
|
||||
Assert.assertTrue(poolList.isEmpty());
|
||||
|
||||
Mockito.verify(storagePoolAllocatorMock).allocateToPool(Mockito.any(DiskProfile.class), Mockito.any(VirtualMachineProfile.class), Mockito.any(DeploymentPlan.class),
|
||||
Mockito.any(ExcludeList.class), Mockito.eq(StoragePoolAllocator.RETURN_UPTO_ALL));
|
||||
Mockito.verify(storagePoolAllocatorMock2).allocateToPool(Mockito.any(DiskProfile.class), Mockito.any(VirtualMachineProfile.class), Mockito.any(DeploymentPlan.class),
|
||||
Mockito.any(ExcludeList.class), Mockito.eq(StoragePoolAllocator.RETURN_UPTO_ALL));
|
||||
Mockito.verify(storagePoolAllocatorMock3).allocateToPool(Mockito.any(DiskProfile.class), Mockito.any(VirtualMachineProfile.class), Mockito.any(DeploymentPlan.class),
|
||||
Mockito.any(ExcludeList.class), Mockito.eq(StoragePoolAllocator.RETURN_UPTO_ALL));
|
||||
Mockito.verify(storagePoolAllocatorMock).allocateToPool(any(DiskProfile.class), any(VirtualMachineProfile.class), any(DeploymentPlan.class),
|
||||
any(ExcludeList.class), Mockito.eq(StoragePoolAllocator.RETURN_UPTO_ALL));
|
||||
Mockito.verify(storagePoolAllocatorMock2).allocateToPool(any(DiskProfile.class), any(VirtualMachineProfile.class), any(DeploymentPlan.class),
|
||||
any(ExcludeList.class), Mockito.eq(StoragePoolAllocator.RETURN_UPTO_ALL));
|
||||
Mockito.verify(storagePoolAllocatorMock3).allocateToPool(any(DiskProfile.class), any(VirtualMachineProfile.class), any(DeploymentPlan.class),
|
||||
any(ExcludeList.class), Mockito.eq(StoragePoolAllocator.RETURN_UPTO_ALL));
|
||||
}
|
||||
|
||||
@Test(expected = CloudRuntimeException.class)
|
||||
@ -708,8 +744,8 @@ public class VirtualMachineManagerImplTest {
|
||||
HashMap<Volume, StoragePool> volumeToPoolObjectMap = new HashMap<>();
|
||||
|
||||
Mockito.doReturn(ScopeType.CLUSTER).when(storagePoolVoMock).getScope();
|
||||
Mockito.doNothing().when(virtualMachineManagerImpl).executeManagedStorageChecksWhenTargetStoragePoolNotProvided(Mockito.any(), Mockito.any(), Mockito.any());
|
||||
Mockito.doReturn(false).when(virtualMachineManagerImpl).isStorageCrossClusterMigration(Mockito.anyLong(), Mockito.any());
|
||||
Mockito.doNothing().when(virtualMachineManagerImpl).executeManagedStorageChecksWhenTargetStoragePoolNotProvided(any(), any(), any());
|
||||
Mockito.doReturn(false).when(virtualMachineManagerImpl).isStorageCrossClusterMigration(anyLong(), any());
|
||||
|
||||
virtualMachineManagerImpl.createStoragePoolMappingsForVolumes(virtualMachineProfileMock, dataCenterDeploymentMock, volumeToPoolObjectMap, allVolumes);
|
||||
|
||||
@ -732,7 +768,7 @@ public class VirtualMachineManagerImplTest {
|
||||
|
||||
Mockito.doReturn(volumesNotMapped).when(virtualMachineManagerImpl).findVolumesThatWereNotMappedByTheUser(virtualMachineProfileMock, volumeToPoolObjectMap);
|
||||
Mockito.doNothing().when(virtualMachineManagerImpl).createStoragePoolMappingsForVolumes(Mockito.eq(virtualMachineProfileMock),
|
||||
Mockito.any(DataCenterDeployment.class), Mockito.eq(volumeToPoolObjectMap), Mockito.eq(volumesNotMapped));
|
||||
any(DataCenterDeployment.class), Mockito.eq(volumeToPoolObjectMap), Mockito.eq(volumesNotMapped));
|
||||
|
||||
Map<Volume, StoragePool> mappingVolumeAndStoragePool = virtualMachineManagerImpl.createMappingVolumeAndStoragePool(virtualMachineProfileMock, hostMock, new HashMap<>());
|
||||
|
||||
@ -742,7 +778,7 @@ public class VirtualMachineManagerImplTest {
|
||||
inOrder.verify(virtualMachineManagerImpl).buildMapUsingUserInformation(Mockito.eq(virtualMachineProfileMock), Mockito.eq(hostMock), Mockito.anyMapOf(Long.class, Long.class));
|
||||
inOrder.verify(virtualMachineManagerImpl).findVolumesThatWereNotMappedByTheUser(virtualMachineProfileMock, volumeToPoolObjectMap);
|
||||
inOrder.verify(virtualMachineManagerImpl).createStoragePoolMappingsForVolumes(Mockito.eq(virtualMachineProfileMock),
|
||||
Mockito.any(DataCenterDeployment.class), Mockito.eq(volumeToPoolObjectMap), Mockito.eq(volumesNotMapped));
|
||||
any(DataCenterDeployment.class), Mockito.eq(volumeToPoolObjectMap), Mockito.eq(volumesNotMapped));
|
||||
}
|
||||
|
||||
@Test
|
||||
@ -796,11 +832,11 @@ public class VirtualMachineManagerImplTest {
|
||||
|
||||
private void prepareAndTestIsRootVolumeOnLocalStorage(ScopeType scope, boolean expected) {
|
||||
StoragePoolVO storagePoolVoMock = Mockito.mock(StoragePoolVO.class);
|
||||
Mockito.doReturn(storagePoolVoMock).when(storagePoolDaoMock).findById(Mockito.anyLong());
|
||||
Mockito.doReturn(storagePoolVoMock).when(storagePoolDaoMock).findById(anyLong());
|
||||
Mockito.doReturn(scope).when(storagePoolVoMock).getScope();
|
||||
List<VolumeVO> mockedVolumes = new ArrayList<>();
|
||||
mockedVolumes.add(volumeVoMock);
|
||||
Mockito.doReturn(mockedVolumes).when(volumeDaoMock).findByInstanceAndType(Mockito.anyLong(), Mockito.any());
|
||||
Mockito.doReturn(mockedVolumes).when(volumeDaoMock).findByInstanceAndType(anyLong(), any());
|
||||
|
||||
boolean result = virtualMachineManagerImpl.isRootVolumeOnLocalStorage(0l);
|
||||
|
||||
@ -828,7 +864,7 @@ public class VirtualMachineManagerImplTest {
|
||||
}
|
||||
|
||||
private void prepareAndRunCheckIfNewOfferingStorageScopeMatchesStoragePool(boolean isRootOnLocal, boolean isOfferingUsingLocal) {
|
||||
Mockito.doReturn(isRootOnLocal).when(virtualMachineManagerImpl).isRootVolumeOnLocalStorage(Mockito.anyLong());
|
||||
Mockito.doReturn(isRootOnLocal).when(virtualMachineManagerImpl).isRootVolumeOnLocalStorage(anyLong());
|
||||
Mockito.doReturn("vmInstanceMockedToString").when(vmInstanceMock).toString();
|
||||
Mockito.doReturn(isOfferingUsingLocal).when(diskOfferingMock).isUseLocalStorage();
|
||||
virtualMachineManagerImpl.checkIfNewOfferingStorageScopeMatchesStoragePool(vmInstanceMock, diskOfferingMock);
|
||||
@ -924,7 +960,8 @@ public class VirtualMachineManagerImplTest {
|
||||
VirtualMachine.Type.User, 1L, HypervisorType.KVM, 1L, 1L, 1L,
|
||||
1L, false, false);
|
||||
|
||||
VirtualMachineTO vmTO = new VirtualMachineTO() {};
|
||||
VirtualMachineTO vmTO = new VirtualMachineTO() {
|
||||
};
|
||||
UserVmJoinVO userVm = new UserVmJoinVO();
|
||||
NetworkVO networkVO = mock(NetworkVO.class);
|
||||
AccountVO accountVO = mock(AccountVO.class);
|
||||
@ -958,4 +995,189 @@ public class VirtualMachineManagerImplTest {
|
||||
assertEquals(vmTO.getNetworkIdToNetworkNameMap().size(), 1);
|
||||
assertEquals(vmTO.getNetworkIdToNetworkNameMap().get(5L), "D3-A2-Z1-V4-S5");
|
||||
}
|
||||
|
||||
public void testOrchestrateStartNonNullPodId() throws Exception {
|
||||
VMInstanceVO vmInstance = new VMInstanceVO();
|
||||
ReflectionTestUtils.setField(vmInstance, "id", 1L);
|
||||
ReflectionTestUtils.setField(vmInstance, "uuid", "vm-uuid");
|
||||
ReflectionTestUtils.setField(vmInstance, "serviceOfferingId", 2L);
|
||||
ReflectionTestUtils.setField(vmInstance, "instanceName", "myVm");
|
||||
ReflectionTestUtils.setField(vmInstance, "hostId", 2L);
|
||||
ReflectionTestUtils.setField(vmInstance, "type", VirtualMachine.Type.User);
|
||||
ReflectionTestUtils.setField(vmInstance, "dataCenterId", 1L);
|
||||
ReflectionTestUtils.setField(vmInstance, "hypervisorType", HypervisorType.KVM);
|
||||
|
||||
VirtualMachineGuru vmGuru = mock(VirtualMachineGuru.class);
|
||||
|
||||
User user = mock(User.class);
|
||||
|
||||
Account account = mock(Account.class);
|
||||
|
||||
ReservationContext ctx = mock(ReservationContext.class);
|
||||
|
||||
ItWorkVO work = mock(ItWorkVO.class);
|
||||
|
||||
ServiceOfferingVO serviceOffering = mock(ServiceOfferingVO.class);
|
||||
|
||||
VirtualMachineTemplate template = mock(VirtualMachineTemplate.class);
|
||||
when(template.isDeployAsIs()).thenReturn(false);
|
||||
|
||||
DataCenterDeployment plan = mock(DataCenterDeployment.class);
|
||||
when(plan.getDataCenterId()).thenReturn(1L);
|
||||
when(plan.getPodId()).thenReturn(1L);
|
||||
|
||||
Map<VirtualMachineProfile.Param, Object> params = new HashMap<>();
|
||||
|
||||
DeploymentPlanner planner = mock(DeploymentPlanner.class);
|
||||
|
||||
when(vmInstanceDaoMock.findByUuid("vm-uuid")).thenReturn(vmInstance);
|
||||
|
||||
doReturn(vmGuru).when(virtualMachineManagerImpl).getVmGuru(vmInstance);
|
||||
|
||||
Ternary<VMInstanceVO, ReservationContext, ItWorkVO> start = new Ternary<>(vmInstance, ctx, work);
|
||||
Mockito.doReturn(start).when(virtualMachineManagerImpl).changeToStartState(vmGuru, vmInstance, user, account);
|
||||
|
||||
when(ctx.getJournal()).thenReturn(Mockito.mock(Journal.class));
|
||||
|
||||
when(serviceOfferingDaoMock.findById(vmInstance.getId(), vmInstance.getServiceOfferingId())).thenReturn(serviceOffering);
|
||||
|
||||
when(_entityMgr.findByIdIncludingRemoved(VirtualMachineTemplate.class, vmInstance.getTemplateId())).thenReturn(template);
|
||||
|
||||
Host destHost = mock(Host.class);
|
||||
Pod destPod = mock(Pod.class);
|
||||
DeployDestination dest = mock(DeployDestination.class);
|
||||
when(dest.getHost()).thenReturn(destHost);
|
||||
when(dest.getPod()).thenReturn(destPod);
|
||||
when(dest.getCluster()).thenReturn(mock(Cluster.class));
|
||||
when(destHost.getId()).thenReturn(1L);
|
||||
when(destPod.getId()).thenReturn(2L);
|
||||
when(_dpMgr.planDeployment(any(VirtualMachineProfileImpl.class), any(DataCenterDeployment.class), any(ExcludeList.class), any(DeploymentPlanner.class))).thenReturn(dest);
|
||||
|
||||
doNothing().when(virtualMachineManagerImpl).checkIfTemplateNeededForCreatingVmVolumes(vmInstance);
|
||||
|
||||
when(_workDao.updateStep(any(), any())).thenReturn(true);
|
||||
when(_stateMachine.transitTo(vmInstance, VirtualMachine.Event.OperationRetry, new Pair(vmInstance.getHostId(), 1L), vmInstanceDaoMock)).thenThrow(new CloudRuntimeException("Error while transitioning"));
|
||||
when(_stateMachine.transitTo(vmInstance, VirtualMachine.Event.OperationFailed, new Pair(vmInstance.getHostId(), null), vmInstanceDaoMock)).thenReturn(true);
|
||||
|
||||
|
||||
Cluster cluster = mock(Cluster.class);
|
||||
when(dest.getCluster()).thenReturn(cluster);
|
||||
ClusterDetailsVO cluster_detail_cpu = mock(ClusterDetailsVO.class);
|
||||
ClusterDetailsVO cluster_detail_ram = mock(ClusterDetailsVO.class);
|
||||
when(cluster.getId()).thenReturn(1L);
|
||||
when(_clusterDetailsDao.findDetail(1L, VmDetailConstants.CPU_OVER_COMMIT_RATIO)).thenReturn(cluster_detail_cpu);
|
||||
when(_clusterDetailsDao.findDetail(1L, VmDetailConstants.MEMORY_OVER_COMMIT_RATIO)).thenReturn(cluster_detail_ram);
|
||||
when(userVmDetailsDao.findDetail(anyLong(), Mockito.anyString())).thenReturn(null);
|
||||
when(cluster_detail_cpu.getValue()).thenReturn("1.0");
|
||||
when(cluster_detail_ram.getValue()).thenReturn("1.0");
|
||||
doReturn(false).when(virtualMachineManagerImpl).areAllVolumesAllocated(Mockito.anyLong());
|
||||
|
||||
CallContext callContext = mock(CallContext.class);
|
||||
when(callContext.getCallingAccount()).thenReturn(account);
|
||||
when(callContext.getCallingUser()).thenReturn(user);
|
||||
try (MockedStatic<CallContext> ignored = Mockito.mockStatic(CallContext.class)) {
|
||||
when(CallContext.current()).thenReturn(callContext);
|
||||
|
||||
try {
|
||||
virtualMachineManagerImpl.orchestrateStart("vm-uuid", params, plan, planner);
|
||||
} catch (CloudRuntimeException e) {
|
||||
assertEquals(e.getMessage(), "Error while transitioning");
|
||||
}
|
||||
}
|
||||
|
||||
assertEquals(vmInstance.getPodIdToDeployIn(), (Long) destPod.getId());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testOrchestrateStartNullPodId() throws Exception {
|
||||
VMInstanceVO vmInstance = new VMInstanceVO();
|
||||
ReflectionTestUtils.setField(vmInstance, "id", 1L);
|
||||
ReflectionTestUtils.setField(vmInstance, "uuid", "vm-uuid");
|
||||
ReflectionTestUtils.setField(vmInstance, "serviceOfferingId", 2L);
|
||||
ReflectionTestUtils.setField(vmInstance, "instanceName", "myVm");
|
||||
ReflectionTestUtils.setField(vmInstance, "hostId", 2L);
|
||||
ReflectionTestUtils.setField(vmInstance, "type", VirtualMachine.Type.User);
|
||||
ReflectionTestUtils.setField(vmInstance, "dataCenterId", 1L);
|
||||
ReflectionTestUtils.setField(vmInstance, "hypervisorType", HypervisorType.KVM);
|
||||
|
||||
VirtualMachineGuru vmGuru = mock(VirtualMachineGuru.class);
|
||||
|
||||
User user = mock(User.class);
|
||||
|
||||
Account account = mock(Account.class);
|
||||
|
||||
ReservationContext ctx = mock(ReservationContext.class);
|
||||
|
||||
ItWorkVO work = mock(ItWorkVO.class);
|
||||
|
||||
ServiceOfferingVO serviceOffering = mock(ServiceOfferingVO.class);
|
||||
|
||||
VirtualMachineTemplate template = mock(VirtualMachineTemplate.class);
|
||||
when(template.isDeployAsIs()).thenReturn(false);
|
||||
|
||||
DataCenterDeployment plan = mock(DataCenterDeployment.class);
|
||||
when(plan.getDataCenterId()).thenReturn(1L);
|
||||
when(plan.getPodId()).thenReturn(1L);
|
||||
|
||||
Map<VirtualMachineProfile.Param, Object> params = new HashMap<>();
|
||||
|
||||
DeploymentPlanner planner = mock(DeploymentPlanner.class);
|
||||
|
||||
when(vmInstanceDaoMock.findByUuid("vm-uuid")).thenReturn(vmInstance);
|
||||
|
||||
doReturn(vmGuru).when(virtualMachineManagerImpl).getVmGuru(vmInstance);
|
||||
|
||||
Ternary<VMInstanceVO, ReservationContext, ItWorkVO> start = new Ternary<>(vmInstance, ctx, work);
|
||||
Mockito.doReturn(start).when(virtualMachineManagerImpl).changeToStartState(vmGuru, vmInstance, user, account);
|
||||
|
||||
when(ctx.getJournal()).thenReturn(Mockito.mock(Journal.class));
|
||||
|
||||
when(serviceOfferingDaoMock.findById(vmInstance.getId(), vmInstance.getServiceOfferingId())).thenReturn(serviceOffering);
|
||||
|
||||
when(_entityMgr.findByIdIncludingRemoved(VirtualMachineTemplate.class, vmInstance.getTemplateId())).thenReturn(template);
|
||||
|
||||
Host destHost = mock(Host.class);
|
||||
Pod destPod = mock(Pod.class);
|
||||
DeployDestination dest = mock(DeployDestination.class);
|
||||
when(dest.getHost()).thenReturn(destHost);
|
||||
when(dest.getPod()).thenReturn(destPod);
|
||||
when(dest.getCluster()).thenReturn(mock(Cluster.class));
|
||||
when(destHost.getId()).thenReturn(1L);
|
||||
when(destPod.getId()).thenReturn(2L);
|
||||
when(_dpMgr.planDeployment(any(VirtualMachineProfileImpl.class), any(DataCenterDeployment.class), any(ExcludeList.class), any(DeploymentPlanner.class))).thenReturn(dest);
|
||||
|
||||
doNothing().when(virtualMachineManagerImpl).checkIfTemplateNeededForCreatingVmVolumes(vmInstance);
|
||||
|
||||
when(_workDao.updateStep(any(), any())).thenReturn(true);
|
||||
when(_stateMachine.transitTo(vmInstance, VirtualMachine.Event.OperationRetry, new Pair(vmInstance.getHostId(), 1L), vmInstanceDaoMock)).thenThrow(new CloudRuntimeException("Error while transitioning"));
|
||||
when(_stateMachine.transitTo(vmInstance, VirtualMachine.Event.OperationFailed, new Pair(vmInstance.getHostId(), null), vmInstanceDaoMock)).thenReturn(true);
|
||||
|
||||
|
||||
Cluster cluster = mock(Cluster.class);
|
||||
when(dest.getCluster()).thenReturn(cluster);
|
||||
ClusterDetailsVO cluster_detail_cpu = mock(ClusterDetailsVO.class);
|
||||
ClusterDetailsVO cluster_detail_ram = mock(ClusterDetailsVO.class);
|
||||
when(cluster.getId()).thenReturn(1L);
|
||||
when(_clusterDetailsDao.findDetail(1L, VmDetailConstants.CPU_OVER_COMMIT_RATIO)).thenReturn(cluster_detail_cpu);
|
||||
when(_clusterDetailsDao.findDetail(1L, VmDetailConstants.MEMORY_OVER_COMMIT_RATIO)).thenReturn(cluster_detail_ram);
|
||||
when(userVmDetailsDao.findDetail(anyLong(), Mockito.anyString())).thenReturn(null);
|
||||
when(cluster_detail_cpu.getValue()).thenReturn("1.0");
|
||||
when(cluster_detail_ram.getValue()).thenReturn("1.0");
|
||||
doReturn(true).when(virtualMachineManagerImpl).areAllVolumesAllocated(Mockito.anyLong());
|
||||
|
||||
CallContext callContext = mock(CallContext.class);
|
||||
when(callContext.getCallingAccount()).thenReturn(account);
|
||||
when(callContext.getCallingUser()).thenReturn(user);
|
||||
try (MockedStatic<CallContext> ignored = Mockito.mockStatic(CallContext.class)) {
|
||||
when(CallContext.current()).thenReturn(callContext);
|
||||
|
||||
try {
|
||||
virtualMachineManagerImpl.orchestrateStart("vm-uuid", params, plan, planner);
|
||||
} catch (CloudRuntimeException e) {
|
||||
assertEquals(e.getMessage(), "Error while transitioning");
|
||||
}
|
||||
}
|
||||
|
||||
assertNull(vmInstance.getPodIdToDeployIn());
|
||||
}
|
||||
}
|
||||
|
||||
@ -0,0 +1 @@
|
||||
mock-maker-inline
|
||||
@ -53,6 +53,11 @@
|
||||
<artifactId>cloud-utils</artifactId>
|
||||
<version>${project.version}</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.mariadb.jdbc</groupId>
|
||||
<artifactId>mariadb-java-client</artifactId>
|
||||
<version>3.1.4</version>
|
||||
</dependency>
|
||||
</dependencies>
|
||||
<build>
|
||||
<plugins>
|
||||
|
||||
@ -38,6 +38,7 @@ public class DriverLoader {
|
||||
DRIVERS.put("jdbc:mysql", "com.mysql.cj.jdbc.Driver");
|
||||
DRIVERS.put("jdbc:postgresql", "org.postgresql.Driver");
|
||||
DRIVERS.put("jdbc:h2", "org.h2.Driver");
|
||||
DRIVERS.put("jdbc:mariadb", "org.mariadb.jdbc.Driver");
|
||||
|
||||
LOADED_DRIVERS = new ArrayList<String>();
|
||||
}
|
||||
|
||||
@ -38,6 +38,7 @@ import org.apache.commons.dbcp2.DriverManagerConnectionFactory;
|
||||
import org.apache.commons.dbcp2.PoolableConnection;
|
||||
import org.apache.commons.dbcp2.PoolableConnectionFactory;
|
||||
import org.apache.commons.dbcp2.PoolingDataSource;
|
||||
import org.apache.commons.lang3.StringUtils;
|
||||
import org.apache.commons.pool2.ObjectPool;
|
||||
import org.apache.commons.pool2.impl.GenericObjectPool;
|
||||
import org.apache.commons.pool2.impl.GenericObjectPoolConfig;
|
||||
@ -1001,7 +1002,7 @@ public class TransactionLegacy implements Closeable {
|
||||
private static DataSource s_ds;
|
||||
private static DataSource s_usageDS;
|
||||
private static DataSource s_simulatorDS;
|
||||
private static boolean s_dbHAEnabled;
|
||||
protected static boolean s_dbHAEnabled;
|
||||
|
||||
static {
|
||||
// Initialize with assumed db.properties file
|
||||
@ -1032,11 +1033,6 @@ public class TransactionLegacy implements Closeable {
|
||||
final long cloudMaxWait = Long.parseLong(dbProps.getProperty("db.cloud.maxWait"));
|
||||
final String cloudUsername = dbProps.getProperty("db.cloud.username");
|
||||
final String cloudPassword = dbProps.getProperty("db.cloud.password");
|
||||
final String cloudHost = dbProps.getProperty("db.cloud.host");
|
||||
final String cloudDriver = dbProps.getProperty("db.cloud.driver");
|
||||
final int cloudPort = Integer.parseInt(dbProps.getProperty("db.cloud.port"));
|
||||
final String cloudDbName = dbProps.getProperty("db.cloud.name");
|
||||
final boolean cloudAutoReconnect = Boolean.parseBoolean(dbProps.getProperty("db.cloud.autoReconnect"));
|
||||
final String cloudValidationQuery = dbProps.getProperty("db.cloud.validationQuery");
|
||||
final String cloudIsolationLevel = dbProps.getProperty("db.cloud.isolation.level");
|
||||
|
||||
@ -1059,16 +1055,6 @@ public class TransactionLegacy implements Closeable {
|
||||
final boolean cloudTestWhileIdle = Boolean.parseBoolean(dbProps.getProperty("db.cloud.testWhileIdle"));
|
||||
final long cloudTimeBtwEvictionRunsMillis = Long.parseLong(dbProps.getProperty("db.cloud.timeBetweenEvictionRunsMillis"));
|
||||
final long cloudMinEvcitableIdleTimeMillis = Long.parseLong(dbProps.getProperty("db.cloud.minEvictableIdleTimeMillis"));
|
||||
final boolean cloudPoolPreparedStatements = Boolean.parseBoolean(dbProps.getProperty("db.cloud.poolPreparedStatements"));
|
||||
final String url = dbProps.getProperty("db.cloud.url.params");
|
||||
|
||||
String cloudDbHAParams = null;
|
||||
String cloudReplicas = null;
|
||||
if (s_dbHAEnabled) {
|
||||
cloudDbHAParams = getDBHAParams("cloud", dbProps);
|
||||
cloudReplicas = dbProps.getProperty("db.cloud.replicas");
|
||||
s_logger.info("The replicas configured for Cloud Data base is/are : " + cloudReplicas);
|
||||
}
|
||||
|
||||
final boolean useSSL = Boolean.parseBoolean(dbProps.getProperty("db.cloud.useSSL"));
|
||||
if (useSSL) {
|
||||
@ -1078,13 +1064,12 @@ public class TransactionLegacy implements Closeable {
|
||||
System.setProperty("javax.net.ssl.trustStorePassword", dbProps.getProperty("db.cloud.trustStorePassword"));
|
||||
}
|
||||
|
||||
final String cloudConnectionUri = cloudDriver + "://" + cloudHost + (s_dbHAEnabled ? "," + cloudReplicas : "") + ":" + cloudPort + "/" + cloudDbName +
|
||||
"?autoReconnect=" + cloudAutoReconnect + (url != null ? "&" + url : "") + (useSSL ? "&useSSL=true" : "") +
|
||||
(s_dbHAEnabled ? "&" + cloudDbHAParams : "") + (s_dbHAEnabled ? "&loadBalanceStrategy=" + loadBalanceStrategy : "");
|
||||
DriverLoader.loadDriver(cloudDriver);
|
||||
Pair<String, String> cloudUriAndDriver = getConnectionUriAndDriver(dbProps, loadBalanceStrategy, useSSL, "cloud");
|
||||
|
||||
DriverLoader.loadDriver(cloudUriAndDriver.second());
|
||||
|
||||
// Default Data Source for CloudStack
|
||||
s_ds = createDataSource(cloudConnectionUri, cloudUsername, cloudPassword, cloudMaxActive, cloudMaxIdle, cloudMaxWait,
|
||||
s_ds = createDataSource(cloudUriAndDriver.first(), cloudUsername, cloudPassword, cloudMaxActive, cloudMaxIdle, cloudMaxWait,
|
||||
cloudTimeBtwEvictionRunsMillis, cloudMinEvcitableIdleTimeMillis, cloudTestWhileIdle, cloudTestOnBorrow,
|
||||
cloudValidationQuery, isolationLevel);
|
||||
|
||||
@ -1094,20 +1079,13 @@ public class TransactionLegacy implements Closeable {
|
||||
final long usageMaxWait = Long.parseLong(dbProps.getProperty("db.usage.maxWait"));
|
||||
final String usageUsername = dbProps.getProperty("db.usage.username");
|
||||
final String usagePassword = dbProps.getProperty("db.usage.password");
|
||||
final String usageHost = dbProps.getProperty("db.usage.host");
|
||||
final String usageDriver = dbProps.getProperty("db.usage.driver");
|
||||
final int usagePort = Integer.parseInt(dbProps.getProperty("db.usage.port"));
|
||||
final String usageDbName = dbProps.getProperty("db.usage.name");
|
||||
final boolean usageAutoReconnect = Boolean.parseBoolean(dbProps.getProperty("db.usage.autoReconnect"));
|
||||
final String usageUrl = dbProps.getProperty("db.usage.url.params");
|
||||
|
||||
final String usageConnectionUri = usageDriver + "://" + usageHost + (s_dbHAEnabled ? "," + dbProps.getProperty("db.cloud.replicas") : "") + ":" + usagePort +
|
||||
"/" + usageDbName + "?autoReconnect=" + usageAutoReconnect + (usageUrl != null ? "&" + usageUrl : "") +
|
||||
(s_dbHAEnabled ? "&" + getDBHAParams("usage", dbProps) : "") + (s_dbHAEnabled ? "&loadBalanceStrategy=" + loadBalanceStrategy : "");
|
||||
DriverLoader.loadDriver(usageDriver);
|
||||
Pair<String, String> usageUriAndDriver = getConnectionUriAndDriver(dbProps, loadBalanceStrategy, useSSL, "usage");
|
||||
|
||||
DriverLoader.loadDriver(usageUriAndDriver.second());
|
||||
|
||||
// Data Source for usage server
|
||||
s_usageDS = createDataSource(usageConnectionUri, usageUsername, usagePassword,
|
||||
s_usageDS = createDataSource(usageUriAndDriver.first(), usageUsername, usagePassword,
|
||||
usageMaxActive, usageMaxIdle, usageMaxWait, null, null, null, null,
|
||||
null, isolationLevel);
|
||||
|
||||
@ -1118,14 +1096,28 @@ public class TransactionLegacy implements Closeable {
|
||||
final long simulatorMaxWait = Long.parseLong(dbProps.getProperty("db.simulator.maxWait"));
|
||||
final String simulatorUsername = dbProps.getProperty("db.simulator.username");
|
||||
final String simulatorPassword = dbProps.getProperty("db.simulator.password");
|
||||
final String simulatorHost = dbProps.getProperty("db.simulator.host");
|
||||
final String simulatorDriver = dbProps.getProperty("db.simulator.driver");
|
||||
final int simulatorPort = Integer.parseInt(dbProps.getProperty("db.simulator.port"));
|
||||
final String simulatorDbName = dbProps.getProperty("db.simulator.name");
|
||||
final boolean simulatorAutoReconnect = Boolean.parseBoolean(dbProps.getProperty("db.simulator.autoReconnect"));
|
||||
|
||||
final String simulatorConnectionUri = simulatorDriver + "://" + simulatorHost + ":" + simulatorPort + "/" + simulatorDbName + "?autoReconnect=" +
|
||||
simulatorAutoReconnect;
|
||||
String simulatorDriver;
|
||||
String simulatorConnectionUri;
|
||||
String simulatorUri = dbProps.getProperty("db.simulator.uri");
|
||||
|
||||
if (StringUtils.isEmpty(simulatorUri)) {
|
||||
simulatorDriver = dbProps.getProperty("db.simulator.driver");
|
||||
final int simulatorPort = Integer.parseInt(dbProps.getProperty("db.simulator.port"));
|
||||
final String simulatorDbName = dbProps.getProperty("db.simulator.name");
|
||||
final boolean simulatorAutoReconnect = Boolean.parseBoolean(dbProps.getProperty("db.simulator.autoReconnect"));
|
||||
final String simulatorHost = dbProps.getProperty("db.simulator.host");
|
||||
|
||||
simulatorConnectionUri = simulatorDriver + "://" + simulatorHost + ":" + simulatorPort + "/" + simulatorDbName + "?autoReconnect=" +
|
||||
simulatorAutoReconnect;
|
||||
} else {
|
||||
s_logger.warn("db.simulator.uri was set, ignoring the following properties on db.properties: [db.simulator.driver, db.simulator.host, db.simulator.port, "
|
||||
+ "db.simulator.name, db.simulator.autoReconnect].");
|
||||
String[] splitUri = simulatorUri.split(":");
|
||||
simulatorDriver = String.format("%s:%s", splitUri[0], splitUri[1]);
|
||||
simulatorConnectionUri = simulatorUri;
|
||||
}
|
||||
|
||||
DriverLoader.loadDriver(simulatorDriver);
|
||||
|
||||
s_simulatorDS = createDataSource(simulatorConnectionUri, simulatorUsername, simulatorPassword,
|
||||
@ -1143,6 +1135,85 @@ public class TransactionLegacy implements Closeable {
|
||||
}
|
||||
}
|
||||
|
||||
protected static Pair<String, String> getConnectionUriAndDriver(Properties dbProps, String loadBalanceStrategy, boolean useSSL, String schema) {
|
||||
String connectionUri;
|
||||
String driver;
|
||||
String propertyUri = dbProps.getProperty(String.format("db.%s.uri", schema));
|
||||
|
||||
if (StringUtils.isEmpty(propertyUri)) {
|
||||
driver = dbProps.getProperty(String.format("db.%s.driver", schema));
|
||||
connectionUri = getPropertiesAndBuildConnectionUri(dbProps, loadBalanceStrategy, driver, useSSL, schema);
|
||||
} else {
|
||||
s_logger.warn(String.format("db.%s.uri was set, ignoring the following properties for schema %s of db.properties: [host, port, name, driver, autoReconnect, url.params,"
|
||||
+ " replicas, ha.loadBalanceStrategy, ha.enable, failOverReadOnly, reconnectAtTxEnd, autoReconnectForPools, secondsBeforeRetrySource, queriesBeforeRetrySource, "
|
||||
+ "initialTimeout].", schema, schema));
|
||||
|
||||
String[] splitUri = propertyUri.split(":");
|
||||
driver = String.format("%s:%s", splitUri[0], splitUri[1]);
|
||||
|
||||
connectionUri = propertyUri;
|
||||
}
|
||||
s_logger.info(String.format("Using the following URI to connect to %s database [%s].", schema, connectionUri));
|
||||
return new Pair<>(connectionUri, driver);
|
||||
}
|
||||
|
||||
protected static String getPropertiesAndBuildConnectionUri(Properties dbProps, String loadBalanceStrategy, String driver, boolean useSSL, String schema) {
|
||||
String host = dbProps.getProperty(String.format("db.%s.host", schema));
|
||||
int port = Integer.parseInt(dbProps.getProperty(String.format("db.%s.port", schema)));
|
||||
String dbName = dbProps.getProperty(String.format("db.%s.name", schema));
|
||||
boolean autoReconnect = Boolean.parseBoolean(dbProps.getProperty(String.format("db.%s.autoReconnect", schema)));
|
||||
String urlParams = dbProps.getProperty(String.format("db.%s.url.params", schema));
|
||||
|
||||
String replicas = null;
|
||||
String dbHaParams = null;
|
||||
if (s_dbHAEnabled) {
|
||||
dbHaParams = getDBHAParams(schema, dbProps);
|
||||
replicas = dbProps.getProperty(String.format("db.%s.replicas", schema));
|
||||
s_logger.info(String.format("The replicas configured for %s data base are %s.", schema, replicas));
|
||||
}
|
||||
|
||||
return buildConnectionUri(loadBalanceStrategy, driver, useSSL, host, replicas, port, dbName, autoReconnect, urlParams, dbHaParams);
|
||||
}
|
||||
|
||||
protected static String buildConnectionUri(String loadBalanceStrategy, String driver, boolean useSSL, String host, String replicas, int port, String dbName, boolean autoReconnect,
|
||||
String urlParams, String dbHaParams) {
|
||||
|
||||
StringBuilder connectionUri = new StringBuilder();
|
||||
connectionUri.append(driver);
|
||||
connectionUri.append("://");
|
||||
connectionUri.append(host);
|
||||
|
||||
if (s_dbHAEnabled) {
|
||||
connectionUri.append(",");
|
||||
connectionUri.append(replicas);
|
||||
}
|
||||
|
||||
connectionUri.append(":");
|
||||
connectionUri.append(port);
|
||||
connectionUri.append("/");
|
||||
connectionUri.append(dbName);
|
||||
connectionUri.append("?autoReconnect=");
|
||||
connectionUri.append(autoReconnect);
|
||||
|
||||
if (urlParams != null) {
|
||||
connectionUri.append("&");
|
||||
connectionUri.append(urlParams);
|
||||
}
|
||||
|
||||
if (useSSL) {
|
||||
connectionUri.append("&useSSL=true");
|
||||
}
|
||||
|
||||
if (s_dbHAEnabled) {
|
||||
connectionUri.append("&");
|
||||
connectionUri.append(dbHaParams);
|
||||
connectionUri.append("&loadBalanceStrategy=");
|
||||
connectionUri.append(loadBalanceStrategy);
|
||||
}
|
||||
|
||||
return connectionUri.toString();
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a data source
|
||||
*/
|
||||
|
||||
@ -0,0 +1,117 @@
|
||||
// Licensed to the Apache Software Foundation (ASF) under one
|
||||
// or more contributor license agreements. See the NOTICE file
|
||||
// distributed with this work for additional information
|
||||
// regarding copyright ownership. The ASF licenses this file
|
||||
// to you under the Apache License, Version 2.0 (the
|
||||
// "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
package com.cloud.utils.db;
|
||||
|
||||
import com.cloud.utils.Pair;
|
||||
import org.junit.Assert;
|
||||
import org.junit.Before;
|
||||
import org.junit.Test;
|
||||
import org.junit.runner.RunWith;
|
||||
import org.mockito.junit.MockitoJUnitRunner;
|
||||
|
||||
import java.util.Properties;
|
||||
|
||||
@RunWith(MockitoJUnitRunner.class)
|
||||
public class TransactionLegacyTest {
|
||||
|
||||
Properties properties;
|
||||
|
||||
@Before
|
||||
public void setup(){
|
||||
properties = new Properties();
|
||||
properties.setProperty("db.cloud.host", "host");
|
||||
properties.setProperty("db.cloud.port", "5555");
|
||||
properties.setProperty("db.cloud.name", "name");
|
||||
properties.setProperty("db.cloud.autoReconnect", "false");
|
||||
properties.setProperty("db.cloud.url.params", "someParams");
|
||||
TransactionLegacy.s_dbHAEnabled = false;
|
||||
}
|
||||
@Test
|
||||
public void getConnectionUriAndDriverTestWithoutUri() {
|
||||
properties.setProperty("db.cloud.uri", "");
|
||||
properties.setProperty("db.cloud.driver", "driver");
|
||||
|
||||
Pair<String, String> result = TransactionLegacy.getConnectionUriAndDriver(properties, null, false, "cloud");
|
||||
|
||||
Assert.assertEquals("driver://host:5555/name?autoReconnect=false&someParams", result.first());
|
||||
Assert.assertEquals("driver", result.second());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void getConnectionUriAndDriverTestWithUri() {
|
||||
properties.setProperty("db.cloud.uri", "jdbc:driver:myFavoriteUri");
|
||||
|
||||
Pair<String, String> result = TransactionLegacy.getConnectionUriAndDriver(properties, null, false, "cloud");
|
||||
|
||||
Assert.assertEquals("jdbc:driver:myFavoriteUri", result.first());
|
||||
Assert.assertEquals("jdbc:driver", result.second());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void getPropertiesAndBuildConnectionUriTestDbHaDisabled() {
|
||||
String result = TransactionLegacy.getPropertiesAndBuildConnectionUri(properties, "strat", "driver", true, "cloud");
|
||||
|
||||
Assert.assertEquals("driver://host:5555/name?autoReconnect=false&someParams&useSSL=true", result);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void getPropertiesAndBuildConnectionUriTestDbHaEnabled() {
|
||||
TransactionLegacy.s_dbHAEnabled = true;
|
||||
properties.setProperty("db.cloud.failOverReadOnly", "true");
|
||||
properties.setProperty("db.cloud.reconnectAtTxEnd", "false");
|
||||
properties.setProperty("db.cloud.autoReconnectForPools", "true");
|
||||
properties.setProperty("db.cloud.secondsBeforeRetrySource", "25");
|
||||
properties.setProperty("db.cloud.queriesBeforeRetrySource", "105");
|
||||
properties.setProperty("db.cloud.initialTimeout", "1000");
|
||||
properties.setProperty("db.cloud.replicas", "second_host");
|
||||
|
||||
String result = TransactionLegacy.getPropertiesAndBuildConnectionUri(properties, "strat", "driver", true, "cloud");
|
||||
|
||||
Assert.assertEquals("driver://host,second_host:5555/name?autoReconnect=false&someParams&useSSL=true&failOverReadOnly=true&reconnectAtTxEnd=false&autoReconnectFor"
|
||||
+ "Pools=true&secondsBeforeRetrySource=25&queriesBeforeRetrySource=105&initialTimeout=1000&loadBalanceStrategy=strat", result);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void buildConnectionUriTestDbHaDisabled() {
|
||||
String result = TransactionLegacy.buildConnectionUri(null, "driver", false, "host", null, 5555, "cloud", false, null, null);
|
||||
|
||||
Assert.assertEquals("driver://host:5555/cloud?autoReconnect=false", result);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void buildConnectionUriTestDbHaEnabled() {
|
||||
TransactionLegacy.s_dbHAEnabled = true;
|
||||
|
||||
String result = TransactionLegacy.buildConnectionUri("strat", "driver", false, "host", "second_host", 5555, "cloud", false, null, "dbHaParams");
|
||||
|
||||
Assert.assertEquals("driver://host,second_host:5555/cloud?autoReconnect=false&dbHaParams&loadBalanceStrategy=strat", result);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void buildConnectionUriTestUrlParamsNotNull() {
|
||||
String result = TransactionLegacy.buildConnectionUri(null, "driver", false, "host", null, 5555, "cloud", false, "urlParams", null);
|
||||
|
||||
Assert.assertEquals("driver://host:5555/cloud?autoReconnect=false&urlParams", result);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void buildConnectionUriTestUseSslTrue() {
|
||||
String result = TransactionLegacy.buildConnectionUri(null, "driver", true, "host", null, 5555, "cloud", false, null, null);
|
||||
|
||||
Assert.assertEquals("driver://host:5555/cloud?autoReconnect=false&useSSL=true", result);
|
||||
}
|
||||
}
|
||||
@ -145,7 +145,7 @@ public class CreateKubernetesClusterCmd extends BaseAsyncCreateCmd {
|
||||
description = "root disk size in GB for each node")
|
||||
private Long nodeRootDiskSize;
|
||||
|
||||
@Parameter(name = ApiConstants.CLUSTER_TYPE, type = CommandType.STRING, required = true, description = "type of the cluster: CloudManaged, ExternalManaged", since="4.19.0")
|
||||
@Parameter(name = ApiConstants.CLUSTER_TYPE, type = CommandType.STRING, description = "type of the cluster: CloudManaged, ExternalManaged. The default value is CloudManaged.", since="4.19.0")
|
||||
private String clusterType;
|
||||
|
||||
/////////////////////////////////////////////////////
|
||||
|
||||
@ -43,6 +43,12 @@
|
||||
<artifactId>cloud-plugin-hypervisor-kvm</artifactId>
|
||||
<version>${project.version}</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.cloudstack</groupId>
|
||||
<artifactId>cloud-engine-storage-snapshot</artifactId>
|
||||
<version>${project.version}</version>
|
||||
<scope>compile</scope>
|
||||
</dependency>
|
||||
</dependencies>
|
||||
<build>
|
||||
<plugins>
|
||||
|
||||
@ -0,0 +1,28 @@
|
||||
// Licensed to the Apache Software Foundation (ASF) under one
|
||||
// or more contributor license agreements. See the NOTICE file
|
||||
// distributed with this work for additional information
|
||||
// regarding copyright ownership. The ASF licenses this file
|
||||
// to you under the Apache License, Version 2.0 (the
|
||||
// "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
package com.cloud.api.storage;
|
||||
|
||||
import com.cloud.agent.api.to.DataTO;
|
||||
import org.apache.cloudstack.storage.command.CopyCommand;
|
||||
|
||||
public class LinstorBackupSnapshotCommand extends CopyCommand
|
||||
{
|
||||
public LinstorBackupSnapshotCommand(DataTO srcData, DataTO destData, int timeout, boolean executeInSequence)
|
||||
{
|
||||
super(srcData, destData, timeout, executeInSequence);
|
||||
}
|
||||
}
|
||||
@ -0,0 +1,28 @@
|
||||
// Licensed to the Apache Software Foundation (ASF) under one
|
||||
// or more contributor license agreements. See the NOTICE file
|
||||
// distributed with this work for additional information
|
||||
// regarding copyright ownership. The ASF licenses this file
|
||||
// to you under the Apache License, Version 2.0 (the
|
||||
// "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
package com.cloud.api.storage;
|
||||
|
||||
import com.cloud.agent.api.to.DataTO;
|
||||
import org.apache.cloudstack.storage.command.CopyCommand;
|
||||
|
||||
public class LinstorRevertBackupSnapshotCommand extends CopyCommand
|
||||
{
|
||||
public LinstorRevertBackupSnapshotCommand(DataTO srcData, DataTO destData, int timeout, boolean executeInSequence)
|
||||
{
|
||||
super(srcData, destData, timeout, executeInSequence);
|
||||
}
|
||||
}
|
||||
@ -0,0 +1,167 @@
|
||||
// Licensed to the Apache Software Foundation (ASF) under one
|
||||
// or more contributor license agreements. See the NOTICE file
|
||||
// distributed with this work for additional information
|
||||
// regarding copyright ownership. The ASF licenses this file
|
||||
// to you under the Apache License, Version 2.0 (the
|
||||
// "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
package com.cloud.hypervisor.kvm.resource.wrapper;
|
||||
|
||||
import java.io.File;
|
||||
import java.io.IOException;
|
||||
|
||||
import com.cloud.agent.api.to.DataStoreTO;
|
||||
import com.cloud.agent.api.to.NfsTO;
|
||||
import com.cloud.api.storage.LinstorBackupSnapshotCommand;
|
||||
import com.cloud.hypervisor.kvm.resource.LibvirtComputingResource;
|
||||
import com.cloud.hypervisor.kvm.storage.KVMStoragePool;
|
||||
import com.cloud.hypervisor.kvm.storage.KVMStoragePoolManager;
|
||||
import com.cloud.resource.CommandWrapper;
|
||||
import com.cloud.resource.ResourceWrapper;
|
||||
import com.cloud.storage.Storage;
|
||||
import com.cloud.utils.script.Script;
|
||||
import org.apache.cloudstack.storage.command.CopyCmdAnswer;
|
||||
import org.apache.cloudstack.storage.to.SnapshotObjectTO;
|
||||
import org.apache.cloudstack.utils.qemu.QemuImg;
|
||||
import org.apache.cloudstack.utils.qemu.QemuImgException;
|
||||
import org.apache.cloudstack.utils.qemu.QemuImgFile;
|
||||
import org.apache.commons.io.FileUtils;
|
||||
import org.apache.log4j.Logger;
|
||||
import org.joda.time.Duration;
|
||||
import org.libvirt.LibvirtException;
|
||||
|
||||
@ResourceWrapper(handles = LinstorBackupSnapshotCommand.class)
|
||||
public final class LinstorBackupSnapshotCommandWrapper
|
||||
extends CommandWrapper<LinstorBackupSnapshotCommand, CopyCmdAnswer, LibvirtComputingResource>
|
||||
{
|
||||
private static final Logger s_logger = Logger.getLogger(LinstorBackupSnapshotCommandWrapper.class);
|
||||
|
||||
private String zfsSnapdev(boolean hide, String zfsUrl) {
|
||||
Script script = new Script("/usr/bin/zfs", Duration.millis(5000));
|
||||
script.add("set");
|
||||
script.add("snapdev=" + (hide ? "hidden" : "visible"));
|
||||
script.add(zfsUrl.substring(6)); // cutting zfs://
|
||||
return script.execute();
|
||||
}
|
||||
|
||||
private String qemuShrink(String path, long sizeByte, long timeout) {
|
||||
Script qemuImg = new Script("qemu-img", Duration.millis(timeout));
|
||||
qemuImg.add("resize");
|
||||
qemuImg.add("--shrink");
|
||||
qemuImg.add(path);
|
||||
qemuImg.add("" + sizeByte);
|
||||
return qemuImg.execute();
|
||||
}
|
||||
|
||||
static void cleanupSecondaryPool(final KVMStoragePool secondaryPool) {
|
||||
if (secondaryPool != null) {
|
||||
try {
|
||||
secondaryPool.delete();
|
||||
} catch (final Exception e) {
|
||||
s_logger.debug("Failed to delete secondary storage", e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private String convertImageToQCow2(
|
||||
final String srcPath,
|
||||
final SnapshotObjectTO dst,
|
||||
final KVMStoragePool secondaryPool,
|
||||
int waitMilliSeconds
|
||||
)
|
||||
throws LibvirtException, QemuImgException, IOException
|
||||
{
|
||||
final String dstDir = secondaryPool.getLocalPath() + File.separator + dst.getPath();
|
||||
FileUtils.forceMkdir(new File(dstDir));
|
||||
|
||||
final String dstPath = dstDir + File.separator + dst.getName();
|
||||
final QemuImgFile srcFile = new QemuImgFile(srcPath, QemuImg.PhysicalDiskFormat.RAW);
|
||||
final QemuImgFile dstFile = new QemuImgFile(dstPath, QemuImg.PhysicalDiskFormat.QCOW2);
|
||||
|
||||
// NOTE: the qemu img will also contain the drbd metadata at the end
|
||||
final QemuImg qemu = new QemuImg(waitMilliSeconds);
|
||||
qemu.convert(srcFile, dstFile);
|
||||
s_logger.info("Backup snapshot " + srcFile + " to " + dstPath);
|
||||
return dstPath;
|
||||
}
|
||||
|
||||
private SnapshotObjectTO setCorrectSnapshotSize(final SnapshotObjectTO dst, final String dstPath) {
|
||||
final File snapFile = new File(dstPath);
|
||||
final long size = snapFile.exists() ? snapFile.length() : 0;
|
||||
|
||||
final SnapshotObjectTO snapshot = new SnapshotObjectTO();
|
||||
snapshot.setPath(dst.getPath() + File.separator + dst.getName());
|
||||
snapshot.setPhysicalSize(size);
|
||||
return snapshot;
|
||||
}
|
||||
|
||||
@Override
|
||||
public CopyCmdAnswer execute(LinstorBackupSnapshotCommand cmd, LibvirtComputingResource serverResource)
|
||||
{
|
||||
s_logger.debug("LinstorBackupSnapshotCommandWrapper: " + cmd.getSrcTO().getPath() + " -> " + cmd.getDestTO().getPath());
|
||||
final SnapshotObjectTO src = (SnapshotObjectTO) cmd.getSrcTO();
|
||||
final SnapshotObjectTO dst = (SnapshotObjectTO) cmd.getDestTO();
|
||||
KVMStoragePool secondaryPool = null;
|
||||
final KVMStoragePoolManager storagePoolMgr = serverResource.getStoragePoolMgr();
|
||||
KVMStoragePool linstorPool = storagePoolMgr.getStoragePool(Storage.StoragePoolType.Linstor, src.getDataStore().getUuid());
|
||||
boolean zfsHidden = false;
|
||||
String srcPath = src.getPath();
|
||||
|
||||
if (linstorPool == null) {
|
||||
return new CopyCmdAnswer("Unable to get linstor storage pool from destination volume.");
|
||||
}
|
||||
|
||||
final DataStoreTO dstDataStore = dst.getDataStore();
|
||||
if (!(dstDataStore instanceof NfsTO)) {
|
||||
return new CopyCmdAnswer("Backup Linstor snapshot: Only NFS secondary supported at present!");
|
||||
}
|
||||
|
||||
try
|
||||
{
|
||||
// provide the linstor snapshot block device
|
||||
// on lvm thin this should already be there in /dev/mapper/vg-snapshotname
|
||||
// on zfs we need to unhide the snapshot block device
|
||||
s_logger.info("Src: " + srcPath + " | " + src.getName());
|
||||
if (srcPath.startsWith("zfs://")) {
|
||||
zfsHidden = true;
|
||||
if (zfsSnapdev(false, srcPath) != null) {
|
||||
return new CopyCmdAnswer("Unable to unhide zfs snapshot device.");
|
||||
}
|
||||
srcPath = "/dev/" + srcPath.substring(6);
|
||||
}
|
||||
|
||||
secondaryPool = storagePoolMgr.getStoragePoolByURI(dstDataStore.getUrl());
|
||||
|
||||
String dstPath = convertImageToQCow2(srcPath, dst, secondaryPool, cmd.getWaitInMillSeconds());
|
||||
|
||||
// resize to real volume size, cutting of drbd metadata
|
||||
String result = qemuShrink(dstPath, src.getVolume().getSize(), cmd.getWaitInMillSeconds());
|
||||
if (result != null) {
|
||||
return new CopyCmdAnswer("qemu-img shrink failed: " + result);
|
||||
}
|
||||
s_logger.info("Backup shrunk " + dstPath + " to actual size " + src.getVolume().getSize());
|
||||
|
||||
SnapshotObjectTO snapshot = setCorrectSnapshotSize(dst, dstPath);
|
||||
return new CopyCmdAnswer(snapshot);
|
||||
} catch (final Exception e) {
|
||||
final String error = String.format("Failed to backup snapshot with id [%s] with a pool %s, due to %s",
|
||||
cmd.getSrcTO().getId(), cmd.getSrcTO().getDataStore().getUuid(), e.getMessage());
|
||||
s_logger.error(error);
|
||||
return new CopyCmdAnswer(cmd, e);
|
||||
} finally {
|
||||
cleanupSecondaryPool(secondaryPool);
|
||||
if (zfsHidden) {
|
||||
zfsSnapdev(true, src.getPath());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -0,0 +1,92 @@
|
||||
// Licensed to the Apache Software Foundation (ASF) under one
|
||||
// or more contributor license agreements. See the NOTICE file
|
||||
// distributed with this work for additional information
|
||||
// regarding copyright ownership. The ASF licenses this file
|
||||
// to you under the Apache License, Version 2.0 (the
|
||||
// "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
package com.cloud.hypervisor.kvm.resource.wrapper;
|
||||
|
||||
import java.io.File;
|
||||
|
||||
import com.cloud.agent.api.to.DataStoreTO;
|
||||
import com.cloud.api.storage.LinstorRevertBackupSnapshotCommand;
|
||||
import com.cloud.hypervisor.kvm.resource.LibvirtComputingResource;
|
||||
import com.cloud.hypervisor.kvm.storage.KVMStoragePool;
|
||||
import com.cloud.hypervisor.kvm.storage.KVMStoragePoolManager;
|
||||
import com.cloud.resource.CommandWrapper;
|
||||
import com.cloud.resource.ResourceWrapper;
|
||||
import com.cloud.storage.Storage;
|
||||
import org.apache.cloudstack.storage.command.CopyCmdAnswer;
|
||||
import org.apache.cloudstack.storage.to.SnapshotObjectTO;
|
||||
import org.apache.cloudstack.storage.to.VolumeObjectTO;
|
||||
import org.apache.cloudstack.utils.qemu.QemuImg;
|
||||
import org.apache.cloudstack.utils.qemu.QemuImgException;
|
||||
import org.apache.cloudstack.utils.qemu.QemuImgFile;
|
||||
import org.apache.log4j.Logger;
|
||||
import org.libvirt.LibvirtException;
|
||||
|
||||
@ResourceWrapper(handles = LinstorRevertBackupSnapshotCommand.class)
|
||||
public final class LinstorRevertBackupSnapshotCommandWrapper
|
||||
extends CommandWrapper<LinstorRevertBackupSnapshotCommand, CopyCmdAnswer, LibvirtComputingResource>
|
||||
{
|
||||
private static final Logger s_logger = Logger.getLogger(LinstorRevertBackupSnapshotCommandWrapper.class);
|
||||
|
||||
private void convertQCow2ToRAW(final String srcPath, final String dstPath, int waitMilliSeconds)
|
||||
throws LibvirtException, QemuImgException
|
||||
{
|
||||
final QemuImgFile srcQemuFile = new QemuImgFile(
|
||||
srcPath, QemuImg.PhysicalDiskFormat.QCOW2);
|
||||
final QemuImg qemu = new QemuImg(waitMilliSeconds);
|
||||
final QemuImgFile dstFile = new QemuImgFile(dstPath, QemuImg.PhysicalDiskFormat.RAW);
|
||||
qemu.convert(srcQemuFile, dstFile);
|
||||
}
|
||||
|
||||
@Override
|
||||
public CopyCmdAnswer execute(LinstorRevertBackupSnapshotCommand cmd, LibvirtComputingResource serverResource)
|
||||
{
|
||||
s_logger.debug("LinstorRevertBackupSnapshotCommandWrapper: " + cmd.getSrcTO().getPath() + " -> " + cmd.getDestTO().getPath());
|
||||
final SnapshotObjectTO src = (SnapshotObjectTO) cmd.getSrcTO();
|
||||
final VolumeObjectTO dst = (VolumeObjectTO) cmd.getDestTO();
|
||||
KVMStoragePool secondaryPool = null;
|
||||
final KVMStoragePoolManager storagePoolMgr = serverResource.getStoragePoolMgr();
|
||||
KVMStoragePool linstorPool = storagePoolMgr.getStoragePool(Storage.StoragePoolType.Linstor, dst.getDataStore().getUuid());
|
||||
|
||||
if (linstorPool == null) {
|
||||
return new CopyCmdAnswer("Unable to get linstor storage pool from destination volume.");
|
||||
}
|
||||
|
||||
try
|
||||
{
|
||||
final DataStoreTO srcDataStore = src.getDataStore();
|
||||
File srcFile = new File(src.getPath());
|
||||
secondaryPool = storagePoolMgr.getStoragePoolByURI(
|
||||
srcDataStore.getUrl() + File.separator + srcFile.getParent());
|
||||
|
||||
convertQCow2ToRAW(
|
||||
secondaryPool.getLocalPath() + File.separator + srcFile.getName(),
|
||||
linstorPool.getPhysicalDisk(dst.getPath()).getPath(),
|
||||
cmd.getWaitInMillSeconds());
|
||||
|
||||
final VolumeObjectTO dstVolume = new VolumeObjectTO();
|
||||
dstVolume.setPath(dst.getPath());
|
||||
return new CopyCmdAnswer(dstVolume);
|
||||
} catch (final Exception e) {
|
||||
final String error = String.format("Failed to revert snapshot with id [%s] with a pool %s, due to %s",
|
||||
cmd.getSrcTO().getId(), cmd.getSrcTO().getDataStore().getUuid(), e.getMessage());
|
||||
s_logger.error(error);
|
||||
return new CopyCmdAnswer(cmd, e);
|
||||
} finally {
|
||||
LinstorBackupSnapshotCommandWrapper.cleanupSecondaryPool(secondaryPool);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -28,6 +28,7 @@ import java.util.StringJoiner;
|
||||
|
||||
import javax.annotation.Nonnull;
|
||||
|
||||
import org.apache.cloudstack.storage.datastore.util.LinstorUtil;
|
||||
import org.apache.cloudstack.utils.qemu.QemuImg;
|
||||
import org.apache.cloudstack.utils.qemu.QemuImgException;
|
||||
import org.apache.cloudstack.utils.qemu.QemuImgFile;
|
||||
@ -65,8 +66,8 @@ public class LinstorStorageAdaptor implements StorageAdaptor {
|
||||
return new DevelopersApi(client);
|
||||
}
|
||||
|
||||
private String getLinstorRscName(String name) {
|
||||
return "cs-" + name;
|
||||
private static String getLinstorRscName(String name) {
|
||||
return LinstorUtil.RSC_PREFIX + name;
|
||||
}
|
||||
|
||||
private String getHostname() {
|
||||
@ -214,6 +215,7 @@ public class LinstorStorageAdaptor implements StorageAdaptor {
|
||||
public KVMPhysicalDisk createPhysicalDisk(String name, KVMStoragePool pool, QemuImg.PhysicalDiskFormat format,
|
||||
Storage.ProvisioningType provisioningType, long size, byte[] passphrase)
|
||||
{
|
||||
s_logger.debug(String.format("Linstor.createPhysicalDisk: %s;%s", name, format));
|
||||
final String rscName = getLinstorRscName(name);
|
||||
LinstorStoragePool lpool = (LinstorStoragePool) pool;
|
||||
final DevelopersApi api = getLinstorAPI(pool);
|
||||
@ -254,6 +256,7 @@ public class LinstorStorageAdaptor implements StorageAdaptor {
|
||||
throw new CloudRuntimeException("Linstor: viewResources didn't return resources or volumes.");
|
||||
}
|
||||
} catch (ApiException apiEx) {
|
||||
s_logger.error(String.format("Linstor.createPhysicalDisk: ApiException: %s", apiEx.getBestMessage()));
|
||||
throw new CloudRuntimeException(apiEx.getBestMessage(), apiEx);
|
||||
}
|
||||
}
|
||||
@ -424,7 +427,7 @@ public class LinstorStorageAdaptor implements StorageAdaptor {
|
||||
@Override
|
||||
public KVMPhysicalDisk copyPhysicalDisk(KVMPhysicalDisk disk, String name, KVMStoragePool destPools, int timeout, byte[] srcPassphrase, byte[] destPassphrase, Storage.ProvisioningType provisioningType)
|
||||
{
|
||||
s_logger.debug("Linstor: copyPhysicalDisk");
|
||||
s_logger.debug(String.format("Linstor.copyPhysicalDisk: %s -> %s", disk.getPath(), name));
|
||||
final QemuImg.PhysicalDiskFormat sourceFormat = disk.getFormat();
|
||||
final String sourcePath = disk.getPath();
|
||||
|
||||
@ -433,6 +436,7 @@ public class LinstorStorageAdaptor implements StorageAdaptor {
|
||||
final KVMPhysicalDisk dstDisk = destPools.createPhysicalDisk(
|
||||
name, QemuImg.PhysicalDiskFormat.RAW, provisioningType, disk.getVirtualSize(), null);
|
||||
|
||||
s_logger.debug(String.format("Linstor.copyPhysicalDisk: dstPath: %s", dstDisk.getPath()));
|
||||
final QemuImgFile destFile = new QemuImgFile(dstDisk.getPath());
|
||||
destFile.setFormat(dstDisk.getFormat());
|
||||
destFile.setSize(disk.getVirtualSize());
|
||||
|
||||
@ -28,6 +28,7 @@ import com.linbit.linstor.api.model.ResourceDefinitionCloneStarted;
|
||||
import com.linbit.linstor.api.model.ResourceDefinitionCreate;
|
||||
import com.linbit.linstor.api.model.ResourceDefinitionModify;
|
||||
import com.linbit.linstor.api.model.ResourceGroupSpawn;
|
||||
import com.linbit.linstor.api.model.ResourceMakeAvailable;
|
||||
import com.linbit.linstor.api.model.ResourceWithVolumes;
|
||||
import com.linbit.linstor.api.model.Snapshot;
|
||||
import com.linbit.linstor.api.model.SnapshotRestore;
|
||||
@ -43,18 +44,26 @@ import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Objects;
|
||||
import java.util.Optional;
|
||||
|
||||
import com.cloud.agent.api.Answer;
|
||||
import com.cloud.agent.api.storage.ResizeVolumeAnswer;
|
||||
import com.cloud.agent.api.storage.ResizeVolumeCommand;
|
||||
import com.cloud.agent.api.to.DataObjectType;
|
||||
import com.cloud.agent.api.to.DataStoreTO;
|
||||
import com.cloud.agent.api.to.DataTO;
|
||||
import com.cloud.agent.api.to.DiskTO;
|
||||
import com.cloud.agent.api.to.StorageFilerTO;
|
||||
import com.cloud.api.storage.LinstorBackupSnapshotCommand;
|
||||
import com.cloud.api.storage.LinstorRevertBackupSnapshotCommand;
|
||||
import com.cloud.configuration.Config;
|
||||
import com.cloud.host.Host;
|
||||
import com.cloud.host.dao.HostDao;
|
||||
import com.cloud.storage.DataStoreRole;
|
||||
import com.cloud.storage.ResizeVolumePayload;
|
||||
import com.cloud.storage.SnapshotVO;
|
||||
import com.cloud.storage.Storage.StoragePoolType;
|
||||
import com.cloud.storage.Storage;
|
||||
import com.cloud.storage.StorageManager;
|
||||
import com.cloud.storage.StoragePool;
|
||||
import com.cloud.storage.VMTemplateStoragePoolVO;
|
||||
@ -67,8 +76,10 @@ import com.cloud.storage.dao.SnapshotDetailsVO;
|
||||
import com.cloud.storage.dao.VMTemplatePoolDao;
|
||||
import com.cloud.storage.dao.VolumeDao;
|
||||
import com.cloud.storage.dao.VolumeDetailsDao;
|
||||
import com.cloud.utils.NumbersUtil;
|
||||
import com.cloud.utils.Pair;
|
||||
import com.cloud.utils.exception.CloudRuntimeException;
|
||||
import com.cloud.vm.VirtualMachineManager;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.ChapInfo;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.CopyCommandResult;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.CreateCmdResult;
|
||||
@ -80,10 +91,14 @@ import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotInfo;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.TemplateInfo;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo;
|
||||
import org.apache.cloudstack.framework.async.AsyncCompletionCallback;
|
||||
import org.apache.cloudstack.framework.config.dao.ConfigurationDao;
|
||||
import org.apache.cloudstack.storage.RemoteHostEndPoint;
|
||||
import org.apache.cloudstack.storage.command.CommandResult;
|
||||
import org.apache.cloudstack.storage.command.CopyCommand;
|
||||
import org.apache.cloudstack.storage.command.CreateObjectAnswer;
|
||||
import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao;
|
||||
import org.apache.cloudstack.storage.datastore.db.StoragePoolVO;
|
||||
import org.apache.cloudstack.storage.datastore.util.LinstorConfigurationManager;
|
||||
import org.apache.cloudstack.storage.datastore.util.LinstorUtil;
|
||||
import org.apache.cloudstack.storage.to.SnapshotObjectTO;
|
||||
import org.apache.cloudstack.storage.volume.VolumeObject;
|
||||
@ -98,6 +113,10 @@ public class LinstorPrimaryDataStoreDriverImpl implements PrimaryDataStoreDriver
|
||||
@Inject private SnapshotDao _snapshotDao;
|
||||
@Inject private SnapshotDetailsDao _snapshotDetailsDao;
|
||||
@Inject private StorageManager _storageMgr;
|
||||
@Inject
|
||||
ConfigurationDao _configDao;
|
||||
@Inject
|
||||
private HostDao _hostDao;
|
||||
|
||||
public LinstorPrimaryDataStoreDriverImpl()
|
||||
{
|
||||
@ -109,10 +128,12 @@ public class LinstorPrimaryDataStoreDriverImpl implements PrimaryDataStoreDriver
|
||||
Map<String, String> mapCapabilities = new HashMap<>();
|
||||
|
||||
// Linstor will be restricted to only run on LVM-THIN and ZFS storage pools with ACS
|
||||
// This enables template caching on our primary storage
|
||||
mapCapabilities.put(DataStoreCapabilities.CAN_CREATE_VOLUME_FROM_VOLUME.toString(), Boolean.TRUE.toString());
|
||||
|
||||
// fetch if lvm-thin or ZFS
|
||||
mapCapabilities.put(DataStoreCapabilities.STORAGE_SYSTEM_SNAPSHOT.toString(), Boolean.TRUE.toString());
|
||||
boolean system_snapshot = !LinstorConfigurationManager.BackupSnapshots.value();
|
||||
mapCapabilities.put(DataStoreCapabilities.STORAGE_SYSTEM_SNAPSHOT.toString(), Boolean.toString(system_snapshot));
|
||||
|
||||
// CAN_CREATE_VOLUME_FROM_SNAPSHOT see note from CAN_CREATE_VOLUME_FROM_VOLUME
|
||||
mapCapabilities.put(DataStoreCapabilities.CAN_CREATE_VOLUME_FROM_SNAPSHOT.toString(), Boolean.TRUE.toString());
|
||||
@ -216,6 +237,7 @@ public class LinstorPrimaryDataStoreDriverImpl implements PrimaryDataStoreDriver
|
||||
}
|
||||
throw new CloudRuntimeException("Linstor: Unable to delete snapshot: " + rscDefName);
|
||||
}
|
||||
s_logger.info("Linstor: Deleted snapshot " + snapshotName + " for resource " + rscDefName);
|
||||
} catch (ApiException apiEx)
|
||||
{
|
||||
s_logger.error("Linstor: ApiEx - " + apiEx.getMessage());
|
||||
@ -402,27 +424,46 @@ public class LinstorPrimaryDataStoreDriverImpl implements PrimaryDataStoreDriver
|
||||
}
|
||||
}
|
||||
|
||||
private String createResource(VolumeInfo vol, StoragePoolVO storagePoolVO)
|
||||
{
|
||||
DevelopersApi linstorApi = LinstorUtil.getLinstorAPI(storagePoolVO.getHostAddress());
|
||||
final String rscGrp = storagePoolVO.getUserInfo() != null && !storagePoolVO.getUserInfo().isEmpty() ?
|
||||
private String getRscGrp(StoragePoolVO storagePoolVO) {
|
||||
return storagePoolVO.getUserInfo() != null && !storagePoolVO.getUserInfo().isEmpty() ?
|
||||
storagePoolVO.getUserInfo() : "DfltRscGrp";
|
||||
}
|
||||
|
||||
private String createResourceBase(
|
||||
String rscName, long sizeInBytes, String volName, String vmName, DevelopersApi api, String rscGrp) {
|
||||
ResourceGroupSpawn rscGrpSpawn = new ResourceGroupSpawn();
|
||||
final String rscName = LinstorUtil.RSC_PREFIX + vol.getUuid();
|
||||
rscGrpSpawn.setResourceDefinitionName(rscName);
|
||||
rscGrpSpawn.addVolumeSizesItem(vol.getSize() / 1024);
|
||||
rscGrpSpawn.addVolumeSizesItem(sizeInBytes / 1024);
|
||||
|
||||
try
|
||||
{
|
||||
s_logger.info("Linstor: Spawn resource " + rscName);
|
||||
ApiCallRcList answers = linstorApi.resourceGroupSpawn(rscGrp, rscGrpSpawn);
|
||||
ApiCallRcList answers = api.resourceGroupSpawn(rscGrp, rscGrpSpawn);
|
||||
checkLinstorAnswersThrow(answers);
|
||||
|
||||
applyAuxProps(linstorApi, rscName, vol.getName(), vol.getAttachedVmName());
|
||||
applyAuxProps(api, rscName, volName, vmName);
|
||||
|
||||
return getDeviceName(api, rscName);
|
||||
} catch (ApiException apiEx)
|
||||
{
|
||||
s_logger.error("Linstor: ApiEx - " + apiEx.getMessage());
|
||||
throw new CloudRuntimeException(apiEx.getBestMessage(), apiEx);
|
||||
}
|
||||
}
|
||||
|
||||
private String createResource(VolumeInfo vol, StoragePoolVO storagePoolVO) {
|
||||
DevelopersApi linstorApi = LinstorUtil.getLinstorAPI(storagePoolVO.getHostAddress());
|
||||
final String rscGrp = getRscGrp(storagePoolVO);
|
||||
|
||||
final String rscName = LinstorUtil.RSC_PREFIX + vol.getUuid();
|
||||
String deviceName = createResourceBase(
|
||||
rscName, vol.getSize(), vol.getName(), vol.getAttachedVmName(), linstorApi, rscGrp);
|
||||
|
||||
try
|
||||
{
|
||||
applyQoSSettings(storagePoolVO, linstorApi, rscName, vol.getMaxIops());
|
||||
|
||||
return getDeviceName(linstorApi, rscName);
|
||||
return deviceName;
|
||||
} catch (ApiException apiEx)
|
||||
{
|
||||
s_logger.error("Linstor: ApiEx - " + apiEx.getMessage());
|
||||
@ -487,8 +528,7 @@ public class LinstorPrimaryDataStoreDriverImpl implements PrimaryDataStoreDriver
|
||||
}
|
||||
|
||||
private String createResourceFromSnapshot(long csSnapshotId, String rscName, StoragePoolVO storagePoolVO) {
|
||||
final String rscGrp = storagePoolVO.getUserInfo() != null && !storagePoolVO.getUserInfo().isEmpty() ?
|
||||
storagePoolVO.getUserInfo() : "DfltRscGrp";
|
||||
final String rscGrp = getRscGrp(storagePoolVO);
|
||||
final DevelopersApi linstorApi = LinstorUtil.getLinstorAPI(storagePoolVO.getHostAddress());
|
||||
|
||||
SnapshotVO snapshotVO = _snapshotDao.findById(csSnapshotId);
|
||||
@ -654,6 +694,59 @@ public class LinstorPrimaryDataStoreDriverImpl implements PrimaryDataStoreDriver
|
||||
}
|
||||
}
|
||||
|
||||
private String revertSnapshotFromImageStore(
|
||||
final SnapshotInfo snapshot,
|
||||
final VolumeInfo volumeInfo,
|
||||
final DevelopersApi linstorApi,
|
||||
final String rscName)
|
||||
throws ApiException {
|
||||
String resultMsg = null;
|
||||
String value = _configDao.getValue(Config.BackupSnapshotWait.toString());
|
||||
int _backupsnapshotwait = NumbersUtil.parseInt(
|
||||
value, Integer.parseInt(Config.BackupSnapshotWait.getDefaultValue()));
|
||||
|
||||
LinstorRevertBackupSnapshotCommand cmd = new LinstorRevertBackupSnapshotCommand(
|
||||
snapshot.getTO(),
|
||||
volumeInfo.getTO(),
|
||||
_backupsnapshotwait,
|
||||
VirtualMachineManager.ExecuteInSequence.value());
|
||||
|
||||
Optional<RemoteHostEndPoint> optEP = getDiskfullEP(linstorApi, rscName);
|
||||
if (optEP.isPresent()) {
|
||||
Answer answer = optEP.get().sendMessage(cmd);
|
||||
if (!answer.getResult()) {
|
||||
resultMsg = answer.getDetails();
|
||||
}
|
||||
} else {
|
||||
resultMsg = "Unable to get matching Linstor endpoint.";
|
||||
}
|
||||
return resultMsg;
|
||||
}
|
||||
|
||||
private String doRevertSnapshot(final SnapshotInfo snapshot, final VolumeInfo volumeInfo) {
|
||||
final StoragePool pool = (StoragePool) volumeInfo.getDataStore();
|
||||
final DevelopersApi linstorApi = LinstorUtil.getLinstorAPI(pool.getHostAddress());
|
||||
final String rscName = LinstorUtil.RSC_PREFIX + volumeInfo.getUuid();
|
||||
String resultMsg;
|
||||
try {
|
||||
if (snapshot.getDataStore().getRole() == DataStoreRole.Primary) {
|
||||
final String snapName = LinstorUtil.RSC_PREFIX + snapshot.getUuid();
|
||||
|
||||
ApiCallRcList answers = linstorApi.resourceSnapshotRollback(rscName, snapName);
|
||||
resultMsg = checkLinstorAnswers(answers);
|
||||
} else if (snapshot.getDataStore().getRole() == DataStoreRole.Image) {
|
||||
resultMsg = revertSnapshotFromImageStore(snapshot, volumeInfo, linstorApi, rscName);
|
||||
} else {
|
||||
resultMsg = "Linstor: Snapshot revert datastore not supported";
|
||||
}
|
||||
} catch (ApiException apiEx) {
|
||||
s_logger.error("Linstor: ApiEx - " + apiEx.getMessage());
|
||||
resultMsg = apiEx.getBestMessage();
|
||||
}
|
||||
|
||||
return resultMsg;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void revertSnapshot(
|
||||
SnapshotInfo snapshot,
|
||||
@ -670,19 +763,7 @@ public class LinstorPrimaryDataStoreDriverImpl implements PrimaryDataStoreDriver
|
||||
return;
|
||||
}
|
||||
|
||||
String resultMsg;
|
||||
try {
|
||||
final StoragePool pool = (StoragePool) snapshot.getDataStore();
|
||||
final String rscName = LinstorUtil.RSC_PREFIX + volumeInfo.getUuid();
|
||||
final String snapName = LinstorUtil.RSC_PREFIX + snapshot.getUuid();
|
||||
final DevelopersApi linstorApi = LinstorUtil.getLinstorAPI(pool.getHostAddress());
|
||||
|
||||
ApiCallRcList answers = linstorApi.resourceSnapshotRollback(rscName, snapName);
|
||||
resultMsg = checkLinstorAnswers(answers);
|
||||
} catch (ApiException apiEx) {
|
||||
s_logger.error("Linstor: ApiEx - " + apiEx.getMessage());
|
||||
resultMsg = apiEx.getBestMessage();
|
||||
}
|
||||
String resultMsg = doRevertSnapshot(snapshot, volumeInfo);
|
||||
|
||||
if (callback != null)
|
||||
{
|
||||
@ -692,24 +773,211 @@ public class LinstorPrimaryDataStoreDriverImpl implements PrimaryDataStoreDriver
|
||||
}
|
||||
}
|
||||
|
||||
private static boolean canCopySnapshotCond(DataObject srcData, DataObject dstData) {
|
||||
return srcData.getType() == DataObjectType.SNAPSHOT && dstData.getType() == DataObjectType.SNAPSHOT
|
||||
&& (dstData.getDataStore().getRole() == DataStoreRole.Image
|
||||
|| dstData.getDataStore().getRole() == DataStoreRole.ImageCache);
|
||||
}
|
||||
|
||||
private static boolean canCopyTemplateCond(DataObject srcData, DataObject dstData) {
|
||||
return srcData.getType() == DataObjectType.TEMPLATE && dstData.getType() == DataObjectType.TEMPLATE
|
||||
&& dstData.getDataStore().getRole() == DataStoreRole.Primary
|
||||
&& (srcData.getDataStore().getRole() == DataStoreRole.Image
|
||||
|| srcData.getDataStore().getRole() == DataStoreRole.ImageCache);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean canCopy(DataObject srcData, DataObject destData)
|
||||
public boolean canCopy(DataObject srcData, DataObject dstData)
|
||||
{
|
||||
s_logger.debug("LinstorPrimaryDataStoreDriverImpl.canCopy: " + srcData.getType() + " -> " + dstData.getType());
|
||||
|
||||
if (canCopySnapshotCond(srcData, dstData)) {
|
||||
SnapshotInfo sinfo = (SnapshotInfo) srcData;
|
||||
VolumeInfo volume = sinfo.getBaseVolume();
|
||||
StoragePoolVO storagePool = _storagePoolDao.findById(volume.getPoolId());
|
||||
return storagePool.getStorageProviderName().equals(LinstorUtil.PROVIDER_NAME);
|
||||
} else if (canCopyTemplateCond(srcData, dstData)) {
|
||||
TemplateInfo tInfo = (TemplateInfo) dstData;
|
||||
StoragePoolVO storagePoolVO = _storagePoolDao.findById(dstData.getDataStore().getId());
|
||||
return storagePoolVO != null
|
||||
&& storagePoolVO.getPoolType() == Storage.StoragePoolType.Linstor
|
||||
&& tInfo.getSize() != null;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void copyAsync(DataObject srcData, DataObject destData, AsyncCompletionCallback<CopyCommandResult> callback)
|
||||
public void copyAsync(DataObject srcData, DataObject dstData, AsyncCompletionCallback<CopyCommandResult> callback)
|
||||
{
|
||||
// as long as canCopy is false, this isn't called
|
||||
s_logger.debug("Linstor: copyAsync with srcdata: " + srcData.getUuid());
|
||||
s_logger.debug("LinstorPrimaryDataStoreDriverImpl.copyAsync: "
|
||||
+ srcData.getType() + " -> " + dstData.getType());
|
||||
|
||||
final CopyCommandResult res;
|
||||
if (canCopySnapshotCond(srcData, dstData)) {
|
||||
String errMsg = null;
|
||||
Answer answer = copySnapshot(srcData, dstData);
|
||||
if (answer != null && !answer.getResult()) {
|
||||
errMsg = answer.getDetails();
|
||||
} else {
|
||||
// delete primary storage snapshot
|
||||
SnapshotInfo sinfo = (SnapshotInfo) srcData;
|
||||
VolumeInfo volume = sinfo.getBaseVolume();
|
||||
deleteSnapshot(
|
||||
srcData.getDataStore(),
|
||||
LinstorUtil.RSC_PREFIX + volume.getUuid(),
|
||||
LinstorUtil.RSC_PREFIX + sinfo.getUuid());
|
||||
}
|
||||
res = new CopyCommandResult(null, answer);
|
||||
res.setResult(errMsg);
|
||||
} else if (canCopyTemplateCond(srcData, dstData)) {
|
||||
Answer answer = copyTemplate(srcData, dstData);
|
||||
res = new CopyCommandResult(null, answer);
|
||||
} else {
|
||||
Answer answer = new Answer(null, false, "noimpl");
|
||||
res = new CopyCommandResult(null, answer);
|
||||
res.setResult("Not implemented yet");
|
||||
}
|
||||
callback.complete(res);
|
||||
}
|
||||
|
||||
private Optional<RemoteHostEndPoint> getLinstorEP(DevelopersApi api, String rscName) throws ApiException {
|
||||
List<String> linstorNodeNames = LinstorUtil.getLinstorNodeNames(api);
|
||||
Collections.shuffle(linstorNodeNames); // do not always pick the first linstor node
|
||||
|
||||
Host host = null;
|
||||
for (String nodeName : linstorNodeNames) {
|
||||
host = _hostDao.findByName(nodeName);
|
||||
if (host != null) {
|
||||
s_logger.info(String.format("Linstor: Make resource %s available on node %s ...", rscName, nodeName));
|
||||
ApiCallRcList answers = api.resourceMakeAvailableOnNode(rscName, nodeName, new ResourceMakeAvailable());
|
||||
if (!answers.hasError()) {
|
||||
break; // found working host
|
||||
} else {
|
||||
s_logger.error(
|
||||
String.format("Linstor: Unable to make resource %s on node %s available: %s",
|
||||
rscName,
|
||||
nodeName,
|
||||
LinstorUtil.getBestErrorMessage(answers)));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (host == null)
|
||||
{
|
||||
s_logger.error("Linstor: Couldn't create a resource on any cloudstack host.");
|
||||
return Optional.empty();
|
||||
}
|
||||
else
|
||||
{
|
||||
return Optional.of(RemoteHostEndPoint.getHypervisorHostEndPoint(host));
|
||||
}
|
||||
}
|
||||
|
||||
private Optional<RemoteHostEndPoint> getDiskfullEP(DevelopersApi api, String rscName) throws ApiException {
|
||||
com.linbit.linstor.api.model.StoragePool linSP =
|
||||
LinstorUtil.getDiskfulStoragePool(api, rscName);
|
||||
if (linSP != null)
|
||||
{
|
||||
Host host = _hostDao.findByName(linSP.getNodeName());
|
||||
if (host == null)
|
||||
{
|
||||
s_logger.error("Linstor: Host '" + linSP.getNodeName() + "' not found.");
|
||||
return Optional.empty();
|
||||
}
|
||||
else
|
||||
{
|
||||
return Optional.of(RemoteHostEndPoint.getHypervisorHostEndPoint(host));
|
||||
}
|
||||
}
|
||||
return Optional.empty();
|
||||
}
|
||||
|
||||
private Answer copyTemplate(DataObject srcData, DataObject dstData) {
|
||||
TemplateInfo tInfo = (TemplateInfo) dstData;
|
||||
final StoragePoolVO pool = _storagePoolDao.findById(dstData.getDataStore().getId());
|
||||
final DevelopersApi api = LinstorUtil.getLinstorAPI(pool.getHostAddress());
|
||||
final String rscName = LinstorUtil.RSC_PREFIX + dstData.getUuid();
|
||||
createResourceBase(
|
||||
LinstorUtil.RSC_PREFIX + dstData.getUuid(),
|
||||
tInfo.getSize(),
|
||||
tInfo.getName(),
|
||||
"",
|
||||
api,
|
||||
getRscGrp(pool));
|
||||
|
||||
int nMaxExecutionMinutes = NumbersUtil.parseInt(
|
||||
_configDao.getValue(Config.SecStorageCmdExecutionTimeMax.key()), 30);
|
||||
CopyCommand cmd = new CopyCommand(
|
||||
srcData.getTO(),
|
||||
dstData.getTO(),
|
||||
nMaxExecutionMinutes * 60 * 1000,
|
||||
VirtualMachineManager.ExecuteInSequence.value());
|
||||
Answer answer;
|
||||
|
||||
try {
|
||||
Optional<RemoteHostEndPoint> optEP = getLinstorEP(api, rscName);
|
||||
if (optEP.isPresent()) {
|
||||
answer = optEP.get().sendMessage(cmd);
|
||||
}
|
||||
else {
|
||||
answer = new Answer(cmd, false, "Unable to get matching Linstor endpoint.");
|
||||
}
|
||||
} catch (ApiException exc) {
|
||||
s_logger.error("copy template failed: ", exc);
|
||||
throw new CloudRuntimeException(exc.getBestMessage());
|
||||
}
|
||||
return answer;
|
||||
}
|
||||
|
||||
protected Answer copySnapshot(DataObject srcData, DataObject destData) {
|
||||
String value = _configDao.getValue(Config.BackupSnapshotWait.toString());
|
||||
int _backupsnapshotwait = NumbersUtil.parseInt(
|
||||
value, Integer.parseInt(Config.BackupSnapshotWait.getDefaultValue()));
|
||||
|
||||
SnapshotInfo snapshotInfo = (SnapshotInfo)srcData;
|
||||
Boolean snapshotFullBackup = snapshotInfo.getFullBackup();
|
||||
final StoragePoolVO pool = _storagePoolDao.findById(srcData.getDataStore().getId());
|
||||
final DevelopersApi api = LinstorUtil.getLinstorAPI(pool.getHostAddress());
|
||||
boolean fullSnapshot = true;
|
||||
if (snapshotFullBackup != null) {
|
||||
fullSnapshot = snapshotFullBackup;
|
||||
}
|
||||
Map<String, String> options = new HashMap<>();
|
||||
options.put("fullSnapshot", fullSnapshot + "");
|
||||
options.put(SnapshotInfo.BackupSnapshotAfterTakingSnapshot.key(),
|
||||
String.valueOf(SnapshotInfo.BackupSnapshotAfterTakingSnapshot.value()));
|
||||
options.put("volumeSize", snapshotInfo.getBaseVolume().getSize() + "");
|
||||
|
||||
try {
|
||||
CopyCommand cmd = new LinstorBackupSnapshotCommand(
|
||||
srcData.getTO(),
|
||||
destData.getTO(),
|
||||
_backupsnapshotwait,
|
||||
VirtualMachineManager.ExecuteInSequence.value());
|
||||
cmd.setOptions(options);
|
||||
|
||||
Optional<RemoteHostEndPoint> optEP = getDiskfullEP(
|
||||
api, LinstorUtil.RSC_PREFIX + snapshotInfo.getBaseVolume().getUuid());
|
||||
Answer answer;
|
||||
if (optEP.isPresent()) {
|
||||
answer = optEP.get().sendMessage(cmd);
|
||||
} else {
|
||||
answer = new Answer(cmd, false, "Unable to get matching Linstor endpoint.");
|
||||
}
|
||||
return answer;
|
||||
} catch (Exception e) {
|
||||
s_logger.debug("copy snapshot failed: ", e);
|
||||
throw new CloudRuntimeException(e.toString());
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@Override
|
||||
public void copyAsync(DataObject srcData, DataObject destData, Host destHost, AsyncCompletionCallback<CopyCommandResult> callback)
|
||||
{
|
||||
// as long as canCopy is false, this isn't called
|
||||
s_logger.debug("Linstor: copyAsync with srcdata: " + srcData.getUuid());
|
||||
s_logger.debug("Linstor: copyAsync with host");
|
||||
copyAsync(srcData, destData, callback);
|
||||
}
|
||||
|
||||
private CreateCmdResult notifyResize(
|
||||
@ -794,6 +1062,23 @@ public class LinstorPrimaryDataStoreDriverImpl implements PrimaryDataStoreDriver
|
||||
s_logger.debug("Linstor: handleQualityOfServiceForVolumeMigration");
|
||||
}
|
||||
|
||||
private Answer createAnswerAndPerstistDetails(DevelopersApi api, SnapshotInfo snapshotInfo, String rscName)
|
||||
throws ApiException {
|
||||
SnapshotObjectTO snapshotTO = (SnapshotObjectTO)snapshotInfo.getTO();
|
||||
com.linbit.linstor.api.model.StoragePool linStoragePool = LinstorUtil.getDiskfulStoragePool(api, rscName);
|
||||
if (linStoragePool == null) {
|
||||
throw new CloudRuntimeException("Linstor: Unable to find storage pool for resource " + rscName);
|
||||
}
|
||||
|
||||
final String path = LinstorUtil.getSnapshotPath(linStoragePool, rscName, LinstorUtil.RSC_PREFIX + snapshotInfo.getUuid());
|
||||
snapshotTO.setPath(path);
|
||||
SnapshotDetailsVO details = new SnapshotDetailsVO(
|
||||
snapshotInfo.getId(), snapshotInfo.getUuid(), path, false);
|
||||
_snapshotDetailsDao.persist(details);
|
||||
|
||||
return new CreateObjectAnswer(snapshotTO);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void takeSnapshot(SnapshotInfo snapshotInfo, AsyncCompletionCallback<CreateCmdResult> callback)
|
||||
{
|
||||
@ -823,12 +1108,11 @@ public class LinstorPrimaryDataStoreDriverImpl implements PrimaryDataStoreDriver
|
||||
result.setResult(errMsg);
|
||||
} else
|
||||
{
|
||||
s_logger.info(String.format("Successfully took snapshot from %s", rscName));
|
||||
s_logger.info(String.format("Successfully took snapshot %s from %s", snapshot.getName(), rscName));
|
||||
|
||||
SnapshotObjectTO snapshotObjectTo = (SnapshotObjectTO)snapshotInfo.getTO();
|
||||
snapshotObjectTo.setPath(rscName + "-" + snapshotInfo.getName());
|
||||
Answer answer = createAnswerAndPerstistDetails(api, snapshotInfo, rscName);
|
||||
|
||||
result = new CreateCmdResult(null, new CreateObjectAnswer(snapshotObjectTo));
|
||||
result = new CreateCmdResult(null, answer);
|
||||
result.setResult(null);
|
||||
}
|
||||
} catch (ApiException apiExc)
|
||||
|
||||
@ -27,16 +27,16 @@ import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreDriver
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreProvider;
|
||||
import org.apache.cloudstack.storage.datastore.driver.LinstorPrimaryDataStoreDriverImpl;
|
||||
import org.apache.cloudstack.storage.datastore.lifecycle.LinstorPrimaryDataStoreLifeCycleImpl;
|
||||
import org.apache.cloudstack.storage.datastore.util.LinstorUtil;
|
||||
|
||||
public class LinstorPrimaryDatastoreProviderImpl implements PrimaryDataStoreProvider {
|
||||
private final static String PROVIDER_NAME = "Linstor";
|
||||
protected PrimaryDataStoreDriver driver;
|
||||
protected HypervisorHostListener listener;
|
||||
protected DataStoreLifeCycle lifecycle;
|
||||
|
||||
@Override
|
||||
public String getName() {
|
||||
return PROVIDER_NAME;
|
||||
return LinstorUtil.PROVIDER_NAME;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
||||
@ -0,0 +1,40 @@
|
||||
// Licensed to the Apache Software Foundation (ASF) under one
|
||||
// or more contributor license agreements. See the NOTICE file
|
||||
// distributed with this work for additional information
|
||||
// regarding copyright ownership. The ASF licenses this file
|
||||
// to you under the Apache License, Version 2.0 (the
|
||||
// "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
package org.apache.cloudstack.storage.datastore.util;
|
||||
|
||||
import org.apache.cloudstack.framework.config.ConfigKey;
|
||||
import org.apache.cloudstack.framework.config.Configurable;
|
||||
|
||||
public class LinstorConfigurationManager implements Configurable
|
||||
{
|
||||
public static final ConfigKey<Boolean> BackupSnapshots = new ConfigKey<>(Boolean.class, "lin.backup.snapshots", "Advanced", "false",
|
||||
"Backup Linstor primary storage snapshots to secondary storage (deleting ps snapshot), only works on hyperconverged setups.", true, ConfigKey.Scope.Global, null);
|
||||
|
||||
public static final ConfigKey<?>[] CONFIG_KEYS = new ConfigKey<?>[] { BackupSnapshots };
|
||||
|
||||
@Override
|
||||
public String getConfigComponentName()
|
||||
{
|
||||
return LinstorConfigurationManager.class.getSimpleName();
|
||||
}
|
||||
|
||||
@Override
|
||||
public ConfigKey<?>[] getConfigKeys()
|
||||
{
|
||||
return CONFIG_KEYS;
|
||||
}
|
||||
}
|
||||
@ -20,12 +20,20 @@ import com.linbit.linstor.api.ApiClient;
|
||||
import com.linbit.linstor.api.ApiException;
|
||||
import com.linbit.linstor.api.Configuration;
|
||||
import com.linbit.linstor.api.DevelopersApi;
|
||||
import com.linbit.linstor.api.model.ApiCallRc;
|
||||
import com.linbit.linstor.api.model.ApiCallRcList;
|
||||
import com.linbit.linstor.api.model.Node;
|
||||
import com.linbit.linstor.api.model.ProviderKind;
|
||||
import com.linbit.linstor.api.model.ResourceGroup;
|
||||
import com.linbit.linstor.api.model.ResourceWithVolumes;
|
||||
import com.linbit.linstor.api.model.StoragePool;
|
||||
import com.linbit.linstor.api.model.Volume;
|
||||
|
||||
import javax.annotation.Nonnull;
|
||||
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
import com.cloud.utils.exception.CloudRuntimeException;
|
||||
import org.apache.log4j.Logger;
|
||||
@ -33,6 +41,7 @@ import org.apache.log4j.Logger;
|
||||
public class LinstorUtil {
|
||||
private static final Logger s_logger = Logger.getLogger(LinstorUtil.class);
|
||||
|
||||
public final static String PROVIDER_NAME = "Linstor";
|
||||
public static final String RSC_PREFIX = "cs-";
|
||||
public static final String RSC_GROUP = "resourceGroup";
|
||||
|
||||
@ -47,6 +56,86 @@ public class LinstorUtil {
|
||||
return new DevelopersApi(client);
|
||||
}
|
||||
|
||||
public static String getBestErrorMessage(ApiCallRcList answers) {
|
||||
return answers != null && !answers.isEmpty() ?
|
||||
answers.stream()
|
||||
.filter(ApiCallRc::isError)
|
||||
.findFirst()
|
||||
.map(ApiCallRc::getMessage)
|
||||
.orElse((answers.get(0)).getMessage()) : null;
|
||||
}
|
||||
|
||||
public static List<String> getLinstorNodeNames(@Nonnull DevelopersApi api) throws ApiException
|
||||
{
|
||||
List<Node> nodes = api.nodeList(
|
||||
Collections.emptyList(),
|
||||
Collections.emptyList(),
|
||||
null,
|
||||
null
|
||||
);
|
||||
|
||||
return nodes.stream().map(Node::getName).collect(Collectors.toList());
|
||||
}
|
||||
|
||||
public static com.linbit.linstor.api.model.StoragePool
|
||||
getDiskfulStoragePool(@Nonnull DevelopersApi api, @Nonnull String rscName) throws ApiException
|
||||
{
|
||||
List<ResourceWithVolumes> resources = api.viewResources(
|
||||
Collections.emptyList(),
|
||||
Collections.singletonList(rscName),
|
||||
Collections.emptyList(),
|
||||
Collections.emptyList(),
|
||||
null,
|
||||
null);
|
||||
|
||||
String nodeName = null;
|
||||
String storagePoolName = null;
|
||||
for (ResourceWithVolumes rwv : resources) {
|
||||
if (rwv.getVolumes().isEmpty()) {
|
||||
continue;
|
||||
}
|
||||
Volume vol = rwv.getVolumes().get(0);
|
||||
if (vol.getProviderKind() != ProviderKind.DISKLESS) {
|
||||
nodeName = rwv.getNodeName();
|
||||
storagePoolName = vol.getStoragePoolName();
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (nodeName == null) {
|
||||
return null;
|
||||
}
|
||||
|
||||
List<com.linbit.linstor.api.model.StoragePool> sps = api.viewStoragePools(
|
||||
Collections.singletonList(nodeName),
|
||||
Collections.singletonList(storagePoolName),
|
||||
Collections.emptyList(),
|
||||
null,
|
||||
null
|
||||
);
|
||||
return !sps.isEmpty() ? sps.get(0) : null;
|
||||
}
|
||||
|
||||
public static String getSnapshotPath(com.linbit.linstor.api.model.StoragePool sp, String rscName, String snapshotName) {
|
||||
final String suffix = "00000";
|
||||
final String backingPool = sp.getProps().get("StorDriver/StorPoolName");
|
||||
final String path;
|
||||
switch (sp.getProviderKind()) {
|
||||
case LVM_THIN:
|
||||
path = String.format("/dev/mapper/%s-%s_%s_%s",
|
||||
backingPool.split("/")[0], rscName.replace("-", "--"), suffix, snapshotName.replace("-", "--"));
|
||||
break;
|
||||
case ZFS:
|
||||
case ZFS_THIN:
|
||||
path = String.format("zfs://%s/%s_%s@%s", backingPool.split("/")[0], rscName, suffix, snapshotName);
|
||||
break;
|
||||
default:
|
||||
throw new CloudRuntimeException(
|
||||
String.format("Linstor: storage pool type %s doesn't support snapshots.", sp.getProviderKind()));
|
||||
}
|
||||
return path;
|
||||
}
|
||||
|
||||
public static long getCapacityBytes(String linstorUrl, String rscGroupName) {
|
||||
DevelopersApi linstorApi = getLinstorAPI(linstorUrl);
|
||||
try {
|
||||
|
||||
@ -29,4 +29,6 @@
|
||||
|
||||
<bean id="linstorPrimaryDataStoreProviderImpl"
|
||||
class="org.apache.cloudstack.storage.datastore.provider.LinstorPrimaryDatastoreProviderImpl" />
|
||||
<bean id="linstorConfigManager"
|
||||
class="org.apache.cloudstack.storage.datastore.util.LinstorConfigurationManager" />
|
||||
</beans>
|
||||
|
||||
5
pom.xml
5
pom.xml
@ -699,6 +699,11 @@
|
||||
<artifactId>xml-apis</artifactId>
|
||||
<version>2.0.2</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>com.linbit.linstor.api</groupId>
|
||||
<artifactId>java-linstor</artifactId>
|
||||
<version>${cs.java-linstor.version}</version>
|
||||
</dependency>
|
||||
</dependencies>
|
||||
</dependencyManagement>
|
||||
|
||||
|
||||
@ -15,10 +15,9 @@
|
||||
# specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
# Install the latest version of cloudmonkey
|
||||
cloudmonkey
|
||||
|
||||
# Marvin dependencies are installed via its bundle
|
||||
|
||||
pre-commit
|
||||
|
||||
# Install the SolidFire SDK for Python
|
||||
solidfire-sdk-python
|
||||
|
||||
@ -4004,12 +4004,8 @@ public class NetworkServiceImpl extends ManagerBase implements NetworkService, C
|
||||
throw new InvalidParameterException("Only one isolationMethod can be specified for a physical network at this time");
|
||||
}
|
||||
|
||||
if (vnetRange != null) {
|
||||
// Verify zone type
|
||||
if (zoneType == NetworkType.Basic || (zoneType == NetworkType.Advanced && zone.isSecurityGroupEnabled())) {
|
||||
throw new InvalidParameterValueException(
|
||||
"Can't add vnet range to the physical network in the zone that supports " + zoneType + " network, Security Group enabled: " + zone.isSecurityGroupEnabled());
|
||||
}
|
||||
if (vnetRange != null && zoneType == NetworkType.Basic) {
|
||||
throw new InvalidParameterValueException("Can't add vnet range to the physical network in the Basic zone");
|
||||
}
|
||||
|
||||
BroadcastDomainRange broadcastDomainRange = null;
|
||||
@ -4132,11 +4128,9 @@ public class NetworkServiceImpl extends ManagerBase implements NetworkService, C
|
||||
if (zone == null) {
|
||||
throwInvalidIdException("Zone with id=" + network.getDataCenterId() + " doesn't exist in the system", String.valueOf(network.getDataCenterId()), "dataCenterId");
|
||||
}
|
||||
if (newVnetRange != null) {
|
||||
if (zone.getNetworkType() == NetworkType.Basic || (zone.getNetworkType() == NetworkType.Advanced && zone.isSecurityGroupEnabled())) {
|
||||
throw new InvalidParameterValueException(
|
||||
"Can't add vnet range to the physical network in the zone that supports " + zone.getNetworkType() + " network, Security Group enabled: " + zone.isSecurityGroupEnabled());
|
||||
}
|
||||
|
||||
if (newVnetRange != null && zone.getNetworkType() == NetworkType.Basic) {
|
||||
throw new InvalidParameterValueException("Can't add vnet range to the physical network in the Basic zone");
|
||||
}
|
||||
|
||||
if (tags != null && tags.size() > 1) {
|
||||
|
||||
@ -3665,8 +3665,8 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir
|
||||
isSecurityGroupEnabledNetworkUsed = true;
|
||||
}
|
||||
|
||||
if (!(network.getTrafficType() == TrafficType.Guest && network.getGuestType() == Network.GuestType.Shared)) {
|
||||
throw new InvalidParameterValueException("Can specify only Shared Guest networks when" + " deploy vm in Advance Security Group enabled zone");
|
||||
if (network.getTrafficType() != TrafficType.Guest || !Arrays.asList(GuestType.Shared, GuestType.L2).contains(network.getGuestType())) {
|
||||
throw new InvalidParameterValueException("Can specify only Shared or L2 Guest networks when deploy vm in Advance Security Group enabled zone");
|
||||
}
|
||||
|
||||
_accountMgr.checkAccess(owner, AccessType.UseEntry, false, network);
|
||||
|
||||
@ -269,7 +269,7 @@
|
||||
</a-form-item>
|
||||
</a-col>
|
||||
<a-col :md="24" :lg="12" v-if="hyperVMWShow && !deployasis">
|
||||
<a-form-item :label="$t('label.nicadaptertype')" name="nicadaptertype" ref="nicadaptertype">
|
||||
<a-form-item ref="nicAdapterType" name="nicAdapterType" :label="$t('label.nicadaptertype')">
|
||||
<a-select
|
||||
v-model:value="form.nicAdapterType"
|
||||
showSearch
|
||||
@ -812,30 +812,30 @@ export default {
|
||||
|
||||
this.rootDisk.opts = controller
|
||||
},
|
||||
fetchNicAdapterType () {
|
||||
const nicAdapterType = []
|
||||
nicAdapterType.push({
|
||||
fetchNicAdapterTypes () {
|
||||
const nicAdapterTypes = []
|
||||
nicAdapterTypes.push({
|
||||
id: '',
|
||||
description: ''
|
||||
})
|
||||
nicAdapterType.push({
|
||||
nicAdapterTypes.push({
|
||||
id: 'E1000',
|
||||
description: 'E1000'
|
||||
})
|
||||
nicAdapterType.push({
|
||||
nicAdapterTypes.push({
|
||||
id: 'PCNet32',
|
||||
description: 'PCNet32'
|
||||
})
|
||||
nicAdapterType.push({
|
||||
nicAdapterTypes.push({
|
||||
id: 'Vmxnet2',
|
||||
description: 'Vmxnet2'
|
||||
})
|
||||
nicAdapterType.push({
|
||||
nicAdapterTypes.push({
|
||||
id: 'Vmxnet3',
|
||||
description: 'Vmxnet3'
|
||||
})
|
||||
|
||||
this.nicAdapterType.opts = nicAdapterType
|
||||
this.nicAdapterType.opts = nicAdapterTypes
|
||||
},
|
||||
fetchKeyboardType () {
|
||||
const keyboardType = []
|
||||
@ -1001,7 +1001,7 @@ export default {
|
||||
this.resetSelect(arrSelectReset)
|
||||
this.fetchFormat(hyperVisor)
|
||||
this.fetchRootDisk(hyperVisor)
|
||||
this.fetchNicAdapterType()
|
||||
this.fetchNicAdapterTypes()
|
||||
this.fetchKeyboardType()
|
||||
|
||||
this.form.rootDiskControllerType = this.rootDisk.opts.length > 0 ? 'osdefault' : ''
|
||||
@ -1016,10 +1016,10 @@ export default {
|
||||
delete this.form.zoneids
|
||||
}
|
||||
const formRaw = toRaw(this.form)
|
||||
const values = this.handleRemoveFields(formRaw)
|
||||
const formvalues = this.handleRemoveFields(formRaw)
|
||||
let params = {}
|
||||
for (const key in values) {
|
||||
const input = values[key]
|
||||
for (const key in formvalues) {
|
||||
const input = formvalues[key]
|
||||
|
||||
if (input === undefined) {
|
||||
continue
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user