Merge remote-tracking branch 'apache/4.15'

This commit is contained in:
Abhishek Kumar 2021-04-12 11:43:57 +05:30
commit cce736709e
19 changed files with 333 additions and 192 deletions

View File

@ -59,6 +59,7 @@ public interface VMSnapshot extends ControlledEntity, Identity, InternalIdentity
s_fsm.addTransition(Error, Event.ExpungeRequested, Expunging); s_fsm.addTransition(Error, Event.ExpungeRequested, Expunging);
s_fsm.addTransition(Expunging, Event.ExpungeRequested, Expunging); s_fsm.addTransition(Expunging, Event.ExpungeRequested, Expunging);
s_fsm.addTransition(Expunging, Event.OperationSucceeded, Removed); s_fsm.addTransition(Expunging, Event.OperationSucceeded, Removed);
s_fsm.addTransition(Expunging, Event.OperationFailed, Error);
} }
} }

View File

@ -85,6 +85,11 @@ public class ListUsageRecordsCmd extends BaseListCmd {
@Parameter(name = ApiConstants.OLD_FORMAT, type = CommandType.BOOLEAN, description = "Flag to enable description rendered in old format which uses internal database IDs instead of UUIDs. False by default.") @Parameter(name = ApiConstants.OLD_FORMAT, type = CommandType.BOOLEAN, description = "Flag to enable description rendered in old format which uses internal database IDs instead of UUIDs. False by default.")
private Boolean oldFormat; private Boolean oldFormat;
@Parameter(name = ApiConstants.IS_RECURSIVE, type = CommandType.BOOLEAN,
description = "Specify if usage records should be fetched recursively per domain. If an account id is passed, records will be limited to that account.",
since = "4.15")
private Boolean recursive = false;
///////////////////////////////////////////////////// /////////////////////////////////////////////////////
/////////////////// Accessors /////////////////////// /////////////////// Accessors ///////////////////////
///////////////////////////////////////////////////// /////////////////////////////////////////////////////
@ -153,6 +158,10 @@ public class ListUsageRecordsCmd extends BaseListCmd {
return oldFormat != null && oldFormat; return oldFormat != null && oldFormat;
} }
public Boolean isRecursive() {
return recursive;
}
///////////////////////////////////////////////////// /////////////////////////////////////////////////////
/////////////// API Implementation/////////////////// /////////////// API Implementation///////////////////
///////////////////////////////////////////////////// /////////////////////////////////////////////////////

View File

@ -55,9 +55,10 @@ public class MigrateVolumeCommand extends Command {
this.setWait(timeout); this.setWait(timeout);
} }
public MigrateVolumeCommand(long volumeId, String volumePath, StoragePool sourcePool, StoragePool targetPool) { public MigrateVolumeCommand(long volumeId, String volumePath, StoragePool sourcePool, StoragePool targetPool, String hostGuidInTargetCluster) {
this(volumeId,volumePath,targetPool, null, Volume.Type.UNKNOWN, -1); this(volumeId,volumePath,targetPool, null, Volume.Type.UNKNOWN, -1);
this.sourcePool = new StorageFilerTO(sourcePool); this.sourcePool = new StorageFilerTO(sourcePool);
this.hostGuidInTargetCluster = hostGuidInTargetCluster;
} }
public MigrateVolumeCommand(DataTO srcData, DataTO destData, Map<String, String> srcDetails, Map<String, String> destDetails, int timeout) { public MigrateVolumeCommand(DataTO srcData, DataTO destData, Map<String, String> srcDetails, Map<String, String> destDetails, int timeout) {
@ -69,11 +70,6 @@ public class MigrateVolumeCommand extends Command {
setWait(timeout); setWait(timeout);
} }
public MigrateVolumeCommand(long volumeId, String volumePath, StoragePool sourcePool, StoragePool targetPool, String targetClusterHost) {
this(volumeId, volumePath, sourcePool, targetPool);
this.hostGuidInTargetCluster = targetClusterHost;
}
@Override @Override
public boolean executeInSequence() { public boolean executeInSequence() {
return true; return true;
@ -107,6 +103,10 @@ public class MigrateVolumeCommand extends Command {
return volumeType; return volumeType;
} }
public String getHostGuidInTargetCluster() {
return hostGuidInTargetCluster;
}
public DataTO getSrcData() { public DataTO getSrcData() {
return srcData; return srcData;
} }
@ -131,10 +131,6 @@ public class MigrateVolumeCommand extends Command {
return destDetails; return destDetails;
} }
public String getHostGuidInTargetCluster() {
return hostGuidInTargetCluster;
}
public int getWaitInMillSeconds() { public int getWaitInMillSeconds() {
return getWait() * 1000; return getWait() * 1000;
} }

View File

@ -43,11 +43,6 @@ import java.util.concurrent.TimeUnit;
import javax.inject.Inject; import javax.inject.Inject;
import javax.naming.ConfigurationException; import javax.naming.ConfigurationException;
import com.cloud.api.query.dao.DomainRouterJoinDao;
import com.cloud.api.query.dao.UserVmJoinDao;
import com.cloud.api.query.vo.DomainRouterJoinVO;
import com.cloud.api.query.vo.UserVmJoinVO;
import com.cloud.deployasis.dao.UserVmDeployAsIsDetailsDao;
import org.apache.cloudstack.affinity.dao.AffinityGroupVMMapDao; import org.apache.cloudstack.affinity.dao.AffinityGroupVMMapDao;
import org.apache.cloudstack.api.ApiConstants; import org.apache.cloudstack.api.ApiConstants;
import org.apache.cloudstack.api.command.admin.vm.MigrateVMCmd; import org.apache.cloudstack.api.command.admin.vm.MigrateVMCmd;
@ -131,6 +126,10 @@ import com.cloud.agent.api.to.VirtualMachineTO;
import com.cloud.agent.manager.Commands; import com.cloud.agent.manager.Commands;
import com.cloud.agent.manager.allocator.HostAllocator; import com.cloud.agent.manager.allocator.HostAllocator;
import com.cloud.alert.AlertManager; import com.cloud.alert.AlertManager;
import com.cloud.api.query.dao.DomainRouterJoinDao;
import com.cloud.api.query.dao.UserVmJoinDao;
import com.cloud.api.query.vo.DomainRouterJoinVO;
import com.cloud.api.query.vo.UserVmJoinVO;
import com.cloud.capacity.CapacityManager; import com.cloud.capacity.CapacityManager;
import com.cloud.configuration.Resource.ResourceType; import com.cloud.configuration.Resource.ResourceType;
import com.cloud.dc.ClusterDetailsDao; import com.cloud.dc.ClusterDetailsDao;
@ -148,6 +147,7 @@ import com.cloud.deploy.DeploymentPlan;
import com.cloud.deploy.DeploymentPlanner; import com.cloud.deploy.DeploymentPlanner;
import com.cloud.deploy.DeploymentPlanner.ExcludeList; import com.cloud.deploy.DeploymentPlanner.ExcludeList;
import com.cloud.deploy.DeploymentPlanningManager; import com.cloud.deploy.DeploymentPlanningManager;
import com.cloud.deployasis.dao.UserVmDeployAsIsDetailsDao;
import com.cloud.event.EventTypes; import com.cloud.event.EventTypes;
import com.cloud.event.UsageEventUtils; import com.cloud.event.UsageEventUtils;
import com.cloud.event.UsageEventVO; import com.cloud.event.UsageEventVO;
@ -2346,7 +2346,6 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
String msg = String.format("Resetting lastHost for VM %s(%s)", vm.getInstanceName(), vm.getUuid()); String msg = String.format("Resetting lastHost for VM %s(%s)", vm.getInstanceName(), vm.getUuid());
s_logger.debug(msg); s_logger.debug(msg);
} }
vm.setLastHostId(null); vm.setLastHostId(null);
vm.setPodIdToDeployIn(rootVolumePool.getPodId()); vm.setPodIdToDeployIn(rootVolumePool.getPodId());
// OfflineVmwareMigration: a consecutive migration will fail probably (no host not pod) // OfflineVmwareMigration: a consecutive migration will fail probably (no host not pod)
@ -2409,7 +2408,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
if (hostId == null) { if (hostId == null) {
List<VolumeVO> volumes = _volsDao.findByInstanceAndType(vm.getId(), Type.ROOT); List<VolumeVO> volumes = _volsDao.findByInstanceAndType(vm.getId(), Type.ROOT);
if (CollectionUtils.isNotEmpty(volumes)) { if (CollectionUtils.isNotEmpty(volumes)) {
VolumeVO rootVolume = volumes.get(0); for (VolumeVO rootVolume : volumes) {
if (rootVolume.getPoolId() != null) { if (rootVolume.getPoolId() != null) {
StoragePoolVO pool = _storagePoolDao.findById(rootVolume.getPoolId()); StoragePoolVO pool = _storagePoolDao.findById(rootVolume.getPoolId());
if (pool != null && pool.getClusterId() != null) { if (pool != null && pool.getClusterId() != null) {
@ -2417,6 +2416,8 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
List<HostVO> hosts = _hostDao.findHypervisorHostInCluster(pool.getClusterId()); List<HostVO> hosts = _hostDao.findHypervisorHostInCluster(pool.getClusterId());
if (CollectionUtils.isNotEmpty(hosts)) { if (CollectionUtils.isNotEmpty(hosts)) {
hostId = hosts.get(0).getId(); hostId = hosts.get(0).getId();
break;
}
} }
} }
} }
@ -2527,7 +2528,6 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
return result; return result;
} }
private void postStorageMigrationCleanup(VMInstanceVO vm, Map<Volume, StoragePool> volumeToPool, HostVO srcHost, Long srcClusterId) throws InsufficientCapacityException { private void postStorageMigrationCleanup(VMInstanceVO vm, Map<Volume, StoragePool> volumeToPool, HostVO srcHost, Long srcClusterId) throws InsufficientCapacityException {
StoragePool rootVolumePool = null; StoragePool rootVolumePool = null;
if (MapUtils.isNotEmpty(volumeToPool)) { if (MapUtils.isNotEmpty(volumeToPool)) {
@ -2549,7 +2549,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
// If VM was cold migrated between clusters belonging to two different VMware DCs, // If VM was cold migrated between clusters belonging to two different VMware DCs,
// unregister the VM from the source host and cleanup the associated VM files. // unregister the VM from the source host and cleanup the associated VM files.
if (vm.getHypervisorType().equals(HypervisorType.VMware)) { if (vm.getHypervisorType().equals(HypervisorType.VMware)) {
afterStorageMigrationVmwareVMcleanup(rootVolumePool, vm, srcHost, srcClusterId); afterStorageMigrationVmwareVMCleanup(rootVolumePool, vm, srcHost, srcClusterId);
} }
} }
@ -2567,7 +2567,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
} }
} }
private void afterStorageMigrationVmwareVMcleanup(StoragePool destPool, VMInstanceVO vm, HostVO srcHost, Long srcClusterId) { private void afterStorageMigrationVmwareVMCleanup(StoragePool destPool, VMInstanceVO vm, HostVO srcHost, Long srcClusterId) {
// OfflineVmwareMigration: this should only happen on storage migration, else the guru would already have issued the command // OfflineVmwareMigration: this should only happen on storage migration, else the guru would already have issued the command
final Long destClusterId = destPool.getClusterId(); final Long destClusterId = destPool.getClusterId();
if (srcClusterId != null && destClusterId != null && ! srcClusterId.equals(destClusterId) && srcHost != null) { if (srcClusterId != null && destClusterId != null && ! srcClusterId.equals(destClusterId) && srcHost != null) {

View File

@ -230,11 +230,10 @@ public class DefaultVMSnapshotStrategy extends ManagerBase implements VMSnapshot
} else { } else {
String errMsg = (answer == null) ? null : answer.getDetails(); String errMsg = (answer == null) ? null : answer.getDetails();
s_logger.error("Delete vm snapshot " + vmSnapshot.getName() + " of vm " + userVm.getInstanceName() + " failed due to " + errMsg); s_logger.error("Delete vm snapshot " + vmSnapshot.getName() + " of vm " + userVm.getInstanceName() + " failed due to " + errMsg);
processAnswer(vmSnapshotVO, userVm, answer, hostId);
throw new CloudRuntimeException("Delete vm snapshot " + vmSnapshot.getName() + " of vm " + userVm.getInstanceName() + " failed due to " + errMsg); throw new CloudRuntimeException("Delete vm snapshot " + vmSnapshot.getName() + " of vm " + userVm.getInstanceName() + " failed due to " + errMsg);
} }
} catch (OperationTimedoutException e) { } catch (OperationTimedoutException | AgentUnavailableException e) {
throw new CloudRuntimeException("Delete vm snapshot " + vmSnapshot.getName() + " of vm " + userVm.getInstanceName() + " failed due to " + e.getMessage());
} catch (AgentUnavailableException e) {
throw new CloudRuntimeException("Delete vm snapshot " + vmSnapshot.getName() + " of vm " + userVm.getInstanceName() + " failed due to " + e.getMessage()); throw new CloudRuntimeException("Delete vm snapshot " + vmSnapshot.getName() + " of vm " + userVm.getInstanceName() + " failed due to " + e.getMessage());
} }
} }
@ -254,9 +253,13 @@ public class DefaultVMSnapshotStrategy extends ManagerBase implements VMSnapshot
finalizeRevert(vmSnapshot, answer.getVolumeTOs()); finalizeRevert(vmSnapshot, answer.getVolumeTOs());
vmSnapshotHelper.vmSnapshotStateTransitTo(vmSnapshot, VMSnapshot.Event.OperationSucceeded); vmSnapshotHelper.vmSnapshotStateTransitTo(vmSnapshot, VMSnapshot.Event.OperationSucceeded);
} else if (as instanceof DeleteVMSnapshotAnswer) { } else if (as instanceof DeleteVMSnapshotAnswer) {
if (as.getResult()) {
DeleteVMSnapshotAnswer answer = (DeleteVMSnapshotAnswer) as; DeleteVMSnapshotAnswer answer = (DeleteVMSnapshotAnswer) as;
finalizeDelete(vmSnapshot, answer.getVolumeTOs()); finalizeDelete(vmSnapshot, answer.getVolumeTOs());
vmSnapshotDao.remove(vmSnapshot.getId()); vmSnapshotDao.remove(vmSnapshot.getId());
} else {
vmSnapshotHelper.vmSnapshotStateTransitTo(vmSnapshot, VMSnapshot.Event.OperationFailed);
}
} }
} }
}); });

View File

@ -210,7 +210,24 @@ public class VMwareGuru extends HypervisorGuruBase implements HypervisorGuru, Co
return vmwareVmImplementer.implement(vm, toVirtualMachineTO(vm), getClusterId(vm.getId())); return vmwareVmImplementer.implement(vm, toVirtualMachineTO(vm), getClusterId(vm.getId()));
} }
Long getClusterId(long vmId) { private Long getClusterIdFromVmVolume(long vmId) {
Long clusterId = null;
List<VolumeVO> volumes = _volumeDao.findByInstanceAndType(vmId, Volume.Type.ROOT);
if (CollectionUtils.isNotEmpty(volumes)) {
for (VolumeVO rootVolume : volumes) {
if (rootVolume.getPoolId() != null) {
StoragePoolVO pool = _storagePoolDao.findById(rootVolume.getPoolId());
if (pool != null && pool.getClusterId() != null) {
clusterId = pool.getClusterId();
break;
}
}
}
}
return clusterId;
}
private Long getClusterId(long vmId) {
Long clusterId = null; Long clusterId = null;
Long hostId = null; Long hostId = null;
VMInstanceVO vm = _vmDao.findById(vmId); VMInstanceVO vm = _vmDao.findById(vmId);
@ -228,16 +245,7 @@ public class VMwareGuru extends HypervisorGuruBase implements HypervisorGuru, Co
if (host != null) { if (host != null) {
clusterId = host.getClusterId(); clusterId = host.getClusterId();
} else { } else {
List<VolumeVO> volumes = _volumeDao.findByInstanceAndType(vmId, Volume.Type.ROOT); clusterId = getClusterIdFromVmVolume(vmId);
if (CollectionUtils.isNotEmpty(volumes)) {
VolumeVO rootVolume = volumes.get(0);
if (rootVolume.getPoolId() != null) {
StoragePoolVO pool = _storagePoolDao.findById(rootVolume.getPoolId());
if (pool != null && pool.getClusterId() != null) {
clusterId = pool.getClusterId();
}
}
}
} }
return clusterId; return clusterId;
@ -1078,14 +1086,37 @@ public class VMwareGuru extends HypervisorGuruBase implements HypervisorGuru, Co
return null; return null;
} }
@Override public List<Command> finalizeMigrate(VirtualMachine vm, Map<Volume, StoragePool> volumeToPool) { private boolean isInterClusterMigration(Long srcClusterId, Long destClusterId) {
return srcClusterId != null && destClusterId != null && ! srcClusterId.equals(destClusterId);
}
private String getHostGuidInTargetCluster(boolean isInterClusterMigration, Long destClusterId) {
String hostGuidInTargetCluster = null;
if (isInterClusterMigration) {
Host hostInTargetCluster = null;
// Without host vMotion might fail between non-shared storages with error similar to,
// https://kb.vmware.com/s/article/1003795
// As this is offline migration VM won't be started on this host
List<HostVO> hosts = _hostDao.findHypervisorHostInCluster(destClusterId);
if (CollectionUtils.isNotEmpty(hosts)) {
hostInTargetCluster = hosts.get(0);
}
if (hostInTargetCluster == null) {
throw new CloudRuntimeException("Migration failed, unable to find suitable target host for VM placement while migrating between storage pools of different clusters without shared storages");
}
hostGuidInTargetCluster = hostInTargetCluster.getGuid();
}
return hostGuidInTargetCluster;
}
@Override
public List<Command> finalizeMigrate(VirtualMachine vm, Map<Volume, StoragePool> volumeToPool) {
List<Command> commands = new ArrayList<Command>(); List<Command> commands = new ArrayList<Command>();
// OfflineVmwareMigration: specialised migration command // OfflineVmwareMigration: specialised migration command
List<VolumeTO> vols = new ArrayList<>(); List<VolumeTO> vols = new ArrayList<>();
List<Pair<VolumeTO, StorageFilerTO>> volumeToFilerTo = new ArrayList<Pair<VolumeTO, StorageFilerTO>>(); List<Pair<VolumeTO, StorageFilerTO>> volumeToFilerTo = new ArrayList<Pair<VolumeTO, StorageFilerTO>>();
Long poolClusterId = null; Long poolClusterId = null;
Host hostInTargetCluster = null;
for (Map.Entry<Volume, StoragePool> entry : volumeToPool.entrySet()) { for (Map.Entry<Volume, StoragePool> entry : volumeToPool.entrySet()) {
Volume volume = entry.getKey(); Volume volume = entry.getKey();
StoragePool pool = entry.getValue(); StoragePool pool = entry.getValue();
@ -1099,21 +1130,9 @@ public class VMwareGuru extends HypervisorGuruBase implements HypervisorGuru, Co
} }
final Long destClusterId = poolClusterId; final Long destClusterId = poolClusterId;
final Long srcClusterId = getClusterId(vm.getId()); final Long srcClusterId = getClusterId(vm.getId());
final boolean isInterClusterMigration = srcClusterId != null && destClusterId != null && ! srcClusterId.equals(destClusterId); final boolean isInterClusterMigration = isInterClusterMigration(destClusterId, srcClusterId);
if (isInterClusterMigration) {
// Without host vMotion might fail between non-shared storages with error similar to,
// https://kb.vmware.com/s/article/1003795
// As this is offline migration VM won't be started on this host
List<HostVO> hosts = _hostDao.findHypervisorHostInCluster(destClusterId);
if (CollectionUtils.isNotEmpty(hosts)) {
hostInTargetCluster = hosts.get(0);
}
if (hostInTargetCluster == null) {
throw new CloudRuntimeException("Migration failed, unable to find suitable target host for VM placement while migrating between storage pools of different clusters without shared storages");
}
}
MigrateVmToPoolCommand migrateVmToPoolCommand = new MigrateVmToPoolCommand(vm.getInstanceName(), MigrateVmToPoolCommand migrateVmToPoolCommand = new MigrateVmToPoolCommand(vm.getInstanceName(),
volumeToFilerTo, hostInTargetCluster == null ? null : hostInTargetCluster.getGuid(), true); volumeToFilerTo, getHostGuidInTargetCluster(isInterClusterMigration, destClusterId), true);
commands.add(migrateVmToPoolCommand); commands.add(migrateVmToPoolCommand);
// OfflineVmwareMigration: cleanup if needed // OfflineVmwareMigration: cleanup if needed

View File

@ -48,8 +48,6 @@ import java.util.stream.Collectors;
import javax.naming.ConfigurationException; import javax.naming.ConfigurationException;
import javax.xml.datatype.XMLGregorianCalendar; import javax.xml.datatype.XMLGregorianCalendar;
import com.cloud.agent.api.SetupPersistentNetworkAnswer;
import com.cloud.agent.api.SetupPersistentNetworkCommand;
import org.apache.cloudstack.api.ApiConstants; import org.apache.cloudstack.api.ApiConstants;
import org.apache.cloudstack.storage.command.CopyCommand; import org.apache.cloudstack.storage.command.CopyCommand;
import org.apache.cloudstack.storage.command.StorageSubSystemCommand; import org.apache.cloudstack.storage.command.StorageSubSystemCommand;
@ -154,6 +152,8 @@ import com.cloud.agent.api.ScaleVmCommand;
import com.cloud.agent.api.SetupAnswer; import com.cloud.agent.api.SetupAnswer;
import com.cloud.agent.api.SetupCommand; import com.cloud.agent.api.SetupCommand;
import com.cloud.agent.api.SetupGuestNetworkCommand; import com.cloud.agent.api.SetupGuestNetworkCommand;
import com.cloud.agent.api.SetupPersistentNetworkAnswer;
import com.cloud.agent.api.SetupPersistentNetworkCommand;
import com.cloud.agent.api.StartAnswer; import com.cloud.agent.api.StartAnswer;
import com.cloud.agent.api.StartCommand; import com.cloud.agent.api.StartCommand;
import com.cloud.agent.api.StartupCommand; import com.cloud.agent.api.StartupCommand;
@ -4511,7 +4511,8 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
volumeDeviceKey.put(diskId, volumeId); volumeDeviceKey.put(diskId, volumeId);
} }
private ManagedObjectReference getTargetDatastoreMOReference(String destinationPool, VmwareHypervisorHost hyperHost) { private ManagedObjectReference getTargetDatastoreMOReference(String destinationPool,
VmwareHypervisorHost hyperHost) {
ManagedObjectReference morDs; ManagedObjectReference morDs;
try { try {
if (s_logger.isDebugEnabled()) { if (s_logger.isDebugEnabled()) {
@ -4632,9 +4633,9 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
// we need to spawn a worker VM to attach the volume to and move it // we need to spawn a worker VM to attach the volume to and move it
morSourceDS = HypervisorHostHelper.findDatastoreWithBackwardsCompatibility(hyperHost, cmd.getSourcePool().getUuid()); morSourceDS = HypervisorHostHelper.findDatastoreWithBackwardsCompatibility(hyperHost, cmd.getSourcePool().getUuid());
sourceDsMo = new DatastoreMO(hyperHost.getContext(), morSourceDS); sourceDsMo = new DatastoreMO(hyperHost.getContext(), morSourceDS);
VmwareHypervisorHost hostInTargetCluster = VmwareHelper.getHostMOFromHostName(getServiceContext(), VmwareHypervisorHost hyperHostInTargetCluster = VmwareHelper.getHostMOFromHostName(getServiceContext(),
cmd.getHostGuidInTargetCluster()); cmd.getHostGuidInTargetCluster());
VmwareHypervisorHost dsHost = hostInTargetCluster == null ? hyperHost : hostInTargetCluster; VmwareHypervisorHost dsHost = hyperHostInTargetCluster == null ? hyperHost : hyperHostInTargetCluster;
String targetDsName = cmd.getTargetPool().getUuid(); String targetDsName = cmd.getTargetPool().getUuid();
morDestinationDS = HypervisorHostHelper.findDatastoreWithBackwardsCompatibility(dsHost, targetDsName); morDestinationDS = HypervisorHostHelper.findDatastoreWithBackwardsCompatibility(dsHost, targetDsName);
if(morDestinationDS == null) { if(morDestinationDS == null) {
@ -4649,14 +4650,6 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
isvVolsInvolved = true; isvVolsInvolved = true;
vmName = getWorkerName(getServiceContext(), cmd, 0, destinationDsMo); vmName = getWorkerName(getServiceContext(), cmd, 0, destinationDsMo);
} }
String hardwareVersion = null;
if (hostInTargetCluster != null) {
Integer sourceHardwareVersion = HypervisorHostHelper.getHostHardwareVersion(hyperHost);
Integer destinationHardwareVersion = HypervisorHostHelper.getHostHardwareVersion(dsHost);
if (sourceHardwareVersion != null && destinationHardwareVersion != null && !sourceHardwareVersion.equals(destinationHardwareVersion)) {
hardwareVersion = String.valueOf(Math.min(sourceHardwareVersion, destinationHardwareVersion));
}
}
// OfflineVmwareMigration: refactor for re-use // OfflineVmwareMigration: refactor for re-use
// OfflineVmwareMigration: 1. find data(store) // OfflineVmwareMigration: 1. find data(store)
@ -4665,7 +4658,8 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
s_logger.info("Create worker VM " + vmName); s_logger.info("Create worker VM " + vmName);
// OfflineVmwareMigration: 2. create the worker with access to the data(store) // OfflineVmwareMigration: 2. create the worker with access to the data(store)
vmMo = HypervisorHostHelper.createWorkerVM(hyperHost, sourceDsMo, vmName, hardwareVersion); vmMo = HypervisorHostHelper.createWorkerVM(hyperHost, sourceDsMo, vmName,
HypervisorHostHelper.getMinimumHostHardwareVersion(hyperHost, hyperHostInTargetCluster));
if (vmMo == null) { if (vmMo == null) {
// OfflineVmwareMigration: don't throw a general Exception but think of a specific one // OfflineVmwareMigration: don't throw a general Exception but think of a specific one
throw new CloudRuntimeException("Unable to create a worker VM for volume operation"); throw new CloudRuntimeException("Unable to create a worker VM for volume operation");

View File

@ -35,6 +35,7 @@ import org.apache.cloudstack.engine.subsystem.api.storage.VolumeDataFactory;
import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo; import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo;
import org.apache.cloudstack.framework.async.AsyncCompletionCallback; import org.apache.cloudstack.framework.async.AsyncCompletionCallback;
import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao;
import org.apache.cloudstack.storage.datastore.db.StoragePoolVO;
import org.apache.cloudstack.storage.to.VolumeObjectTO; import org.apache.cloudstack.storage.to.VolumeObjectTO;
import org.apache.commons.collections.CollectionUtils; import org.apache.commons.collections.CollectionUtils;
import org.apache.log4j.Logger; import org.apache.log4j.Logger;
@ -86,20 +87,19 @@ public class VmwareStorageMotionStrategy implements DataMotionStrategy {
@Override @Override
public StrategyPriority canHandle(DataObject srcData, DataObject destData) { public StrategyPriority canHandle(DataObject srcData, DataObject destData) {
// OfflineVmwareMigration: return StrategyPriority.HYPERVISOR when destData is in a storage pool in the same vmware-cluster and both are volumes // OfflineVmwareMigration: return StrategyPriority.HYPERVISOR when destData is in a storage pool in the same pod or one of srcData & destData is in a zone-wide pool and both are volumes
if (isOnVmware(srcData, destData) if (isOnVmware(srcData, destData)
&& isOnPrimary(srcData, destData) && isOnPrimary(srcData, destData)
&& isVolumesOnly(srcData, destData) && isVolumesOnly(srcData, destData)
&& isIntraPodOrZoneWideStoreInvolved(srcData, destData)
&& isDettached(srcData)) { && isDettached(srcData)) {
if (s_logger.isDebugEnabled()) { if (s_logger.isDebugEnabled()) {
String msg = String.format("%s can handle the request because %d(%s) and %d(%s) share the VMware cluster %s (== %s)" String msg = String.format("%s can handle the request because %d(%s) and %d(%s) share the pod"
, this.getClass() , this.getClass()
, srcData.getId() , srcData.getId()
, srcData.getUuid() , srcData.getUuid()
, destData.getId() , destData.getId()
, destData.getUuid() , destData.getUuid());
, storagePoolDao.findById(srcData.getDataStore().getId()).getClusterId()
, storagePoolDao.findById(destData.getDataStore().getId()).getClusterId());
s_logger.debug(msg); s_logger.debug(msg);
} }
return StrategyPriority.HYPERVISOR; return StrategyPriority.HYPERVISOR;
@ -107,6 +107,17 @@ public class VmwareStorageMotionStrategy implements DataMotionStrategy {
return StrategyPriority.CANT_HANDLE; return StrategyPriority.CANT_HANDLE;
} }
private boolean isIntraPodOrZoneWideStoreInvolved(DataObject srcData, DataObject destData) {
DataStore srcStore = srcData.getDataStore();
StoragePoolVO srcPool = storagePoolDao.findById(srcStore.getId());
DataStore destStore = destData.getDataStore();
StoragePoolVO destPool = storagePoolDao.findById(destStore.getId());
if (srcPool.getPodId() != null && destPool.getPodId() != null) {
return srcPool.getPodId().equals(destPool.getPodId());
}
return (ScopeType.ZONE.equals(srcPool.getScope()) || ScopeType.ZONE.equals(destPool.getScope()));
}
private boolean isDettached(DataObject srcData) { private boolean isDettached(DataObject srcData) {
VolumeVO volume = volDao.findById(srcData.getId()); VolumeVO volume = volDao.findById(srcData.getId());
return volume.getInstanceId() == null; return volume.getInstanceId() == null;
@ -127,30 +138,37 @@ public class VmwareStorageMotionStrategy implements DataMotionStrategy {
&& HypervisorType.VMware.equals(destData.getTO().getHypervisorType()); && HypervisorType.VMware.equals(destData.getTO().getHypervisorType());
} }
private boolean isIntraCluster(DataObject srcData, DataObject destData) { private Pair<Long, String> getHostIdForVmAndHostGuidInTargetCluster(DataObject srcData, DataObject destData) {
DataStore srcStore = srcData.getDataStore(); StoragePool sourcePool = (StoragePool) srcData.getDataStore();
StoragePool srcPool = storagePoolDao.findById(srcStore.getId()); ScopeType sourceScopeType = srcData.getDataStore().getScope().getScopeType();
DataStore destStore = destData.getDataStore(); StoragePool targetPool = (StoragePool) destData.getDataStore();
StoragePool destPool = storagePoolDao.findById(destStore.getId()); ScopeType targetScopeType = destData.getDataStore().getScope().getScopeType();
if (srcPool.getClusterId() != null && destPool.getClusterId() != null) { Long hostId = null;
return srcPool.getClusterId().equals(destPool.getClusterId()); String hostGuidInTargetCluster = null;
if (ScopeType.CLUSTER.equals(sourceScopeType)) {
// Find Volume source cluster and select any Vmware hypervisor host to attach worker VM
hostId = findSuitableHostIdForWorkerVmPlacement(sourcePool.getClusterId());
if (hostId == null) {
throw new CloudRuntimeException("Offline Migration failed, unable to find suitable host for worker VM placement in the cluster of storage pool: " + sourcePool.getName());
} }
return false; if (ScopeType.CLUSTER.equals(targetScopeType) && !sourcePool.getClusterId().equals(targetPool.getClusterId())) {
// Without host vMotion might fail between non-shared storages with error similar to,
// https://kb.vmware.com/s/article/1003795
List<HostVO> hosts = hostDao.findHypervisorHostInCluster(targetPool.getClusterId());
if (CollectionUtils.isNotEmpty(hosts)) {
hostGuidInTargetCluster = hosts.get(0).getGuid();
} }
if (hostGuidInTargetCluster == null) {
/** throw new CloudRuntimeException("Offline Migration failed, unable to find suitable target host for worker VM placement while migrating between storage pools of different cluster without shared storages");
* Ensure that the scope of source and destination storage pools match }
* }
* @param srcData } else if (ScopeType.CLUSTER.equals(targetScopeType)) {
* @param destData hostId = findSuitableHostIdForWorkerVmPlacement(targetPool.getClusterId());
* @return if (hostId == null) {
*/ throw new CloudRuntimeException("Offline Migration failed, unable to find suitable host for worker VM placement in the cluster of storage pool: " + targetPool.getName());
private boolean isStoreScopeEqual(DataObject srcData, DataObject destData) { }
DataStore srcStore = srcData.getDataStore(); }
DataStore destStore = destData.getDataStore(); return new Pair<>(hostId, hostGuidInTargetCluster);
String msg = String.format("Storage scope of source pool is %s and of destination pool is %s", srcStore.getScope().toString(), destStore.getScope().toString());
s_logger.debug(msg);
return srcStore.getScope().getScopeType() == (destStore.getScope().getScopeType());
} }
@Override @Override
@ -187,41 +205,15 @@ public class VmwareStorageMotionStrategy implements DataMotionStrategy {
// OfflineVmwareMigration: we shouldn't be here as we would have refused in the canHandle call // OfflineVmwareMigration: we shouldn't be here as we would have refused in the canHandle call
throw new UnsupportedOperationException(); throw new UnsupportedOperationException();
} }
Pair<Long, String> hostIdForVmAndHostGuidInTargetCluster = getHostIdForVmAndHostGuidInTargetCluster(srcData, destData);
Long hostId = hostIdForVmAndHostGuidInTargetCluster.first();
StoragePool sourcePool = (StoragePool) srcData.getDataStore(); StoragePool sourcePool = (StoragePool) srcData.getDataStore();
ScopeType sourceScopeType = srcData.getDataStore().getScope().getScopeType();
StoragePool targetPool = (StoragePool) destData.getDataStore(); StoragePool targetPool = (StoragePool) destData.getDataStore();
ScopeType targetScopeType = destData.getDataStore().getScope().getScopeType();
Long hostId = null;
String hostGuidInTargetCluster = null;
if (ScopeType.CLUSTER.equals(sourceScopeType)) {
// Find Volume source cluster and select any Vmware hypervisor host to attach worker VM
hostId = findSuitableHostIdForWorkerVmPlacement(sourcePool.getClusterId());
if (hostId == null) {
throw new CloudRuntimeException("Offline Migration failed, unable to find suitable host for worker VM placement in the cluster of storage pool: " + sourcePool.getName());
}
if (ScopeType.CLUSTER.equals(targetScopeType) && !sourcePool.getClusterId().equals(targetPool.getClusterId())) {
// Without host vMotion might fail between non-shared storages with error similar to,
// https://kb.vmware.com/s/article/1003795
List<HostVO> hosts = hostDao.findHypervisorHostInCluster(targetPool.getClusterId());
if (CollectionUtils.isNotEmpty(hosts)) {
hostGuidInTargetCluster = hosts.get(0).getGuid();
}
if (hostGuidInTargetCluster == null) {
throw new CloudRuntimeException("Offline Migration failed, unable to find suitable target host for worker VM placement while migrating between storage pools of different cluster without shared storages");
}
}
} else if (ScopeType.CLUSTER.equals(targetScopeType)) {
hostId = findSuitableHostIdForWorkerVmPlacement(targetPool.getClusterId());
if (hostId == null) {
throw new CloudRuntimeException("Offline Migration failed, unable to find suitable host for worker VM placement in the cluster of storage pool: " + targetPool.getName());
}
}
MigrateVolumeCommand cmd = new MigrateVolumeCommand(srcData.getId() MigrateVolumeCommand cmd = new MigrateVolumeCommand(srcData.getId()
, srcData.getTO().getPath() , srcData.getTO().getPath()
, sourcePool , sourcePool
, targetPool , targetPool
, hostGuidInTargetCluster); , hostIdForVmAndHostGuidInTargetCluster.second());
// OfflineVmwareMigration: should be ((StoragePool)srcData.getDataStore()).getHypervisor() but that is NULL, so hardcoding
Answer answer; Answer answer;
if (hostId != null) { if (hostId != null) {
answer = agentMgr.easySend(hostId, cmd); answer = agentMgr.easySend(hostId, cmd);

View File

@ -21,10 +21,14 @@ import java.util.ArrayList;
import java.util.List; import java.util.List;
import javax.inject.Inject; import javax.inject.Inject;
import javax.naming.ConfigurationException;
import javax.naming.NamingException; import javax.naming.NamingException;
import javax.naming.ldap.LdapContext; import javax.naming.ldap.LdapContext;
import java.util.Map;
import java.util.UUID; import java.util.UUID;
import com.cloud.user.AccountManager;
import com.cloud.utils.component.ComponentLifecycleBase;
import com.cloud.utils.exception.CloudRuntimeException; import com.cloud.utils.exception.CloudRuntimeException;
import org.apache.cloudstack.api.LdapValidator; import org.apache.cloudstack.api.LdapValidator;
import org.apache.cloudstack.api.command.LDAPConfigCmd; import org.apache.cloudstack.api.command.LDAPConfigCmd;
@ -42,6 +46,8 @@ import org.apache.cloudstack.api.response.LdapConfigurationResponse;
import org.apache.cloudstack.api.response.LdapUserResponse; import org.apache.cloudstack.api.response.LdapUserResponse;
import org.apache.cloudstack.api.response.LinkAccountToLdapResponse; import org.apache.cloudstack.api.response.LinkAccountToLdapResponse;
import org.apache.cloudstack.api.response.LinkDomainToLdapResponse; import org.apache.cloudstack.api.response.LinkDomainToLdapResponse;
import org.apache.cloudstack.framework.messagebus.MessageBus;
import org.apache.cloudstack.framework.messagebus.MessageSubscriber;
import org.apache.cloudstack.ldap.dao.LdapConfigurationDao; import org.apache.cloudstack.ldap.dao.LdapConfigurationDao;
import org.apache.cloudstack.ldap.dao.LdapTrustMapDao; import org.apache.cloudstack.ldap.dao.LdapTrustMapDao;
import org.apache.commons.lang.Validate; import org.apache.commons.lang.Validate;
@ -57,7 +63,7 @@ import com.cloud.user.dao.AccountDao;
import com.cloud.utils.Pair; import com.cloud.utils.Pair;
@Component @Component
public class LdapManagerImpl implements LdapManager, LdapValidator { public class LdapManagerImpl extends ComponentLifecycleBase implements LdapManager, LdapValidator {
private static final Logger LOGGER = Logger.getLogger(LdapManagerImpl.class.getName()); private static final Logger LOGGER = Logger.getLogger(LdapManagerImpl.class.getName());
@Inject @Inject
@ -80,6 +86,9 @@ public class LdapManagerImpl implements LdapManager, LdapValidator {
@Inject @Inject
LdapTrustMapDao _ldapTrustMapDao; LdapTrustMapDao _ldapTrustMapDao;
@Inject
private MessageBus messageBus;
public LdapManagerImpl() { public LdapManagerImpl() {
super(); super();
} }
@ -93,6 +102,33 @@ public class LdapManagerImpl implements LdapManager, LdapValidator {
_ldapConfiguration = ldapConfiguration; _ldapConfiguration = ldapConfiguration;
} }
@Override
public boolean configure(String name, Map<String, Object> params) throws ConfigurationException {
super.configure(name, params);
LOGGER.debug("Configuring LDAP Manager");
messageBus.subscribe(AccountManager.MESSAGE_REMOVE_ACCOUNT_EVENT, new MessageSubscriber() {
@Override
public void onPublishMessage(String senderAddress, String subject, Object args) {
try {
final Account account = accountDao.findByIdIncludingRemoved((Long) args);
long domainId = account.getDomainId();
LdapTrustMapVO ldapTrustMapVO = _ldapTrustMapDao.findByAccount(domainId, account.getAccountId());
if (ldapTrustMapVO != null) {
String msg = String.format("Removing link between LDAP: %s - type: %s and account: %s on domain: %s",
ldapTrustMapVO.getName(), ldapTrustMapVO.getType().name(), account.getAccountId(), domainId);
LOGGER.debug(msg);
_ldapTrustMapDao.remove(ldapTrustMapVO.getId());
}
} catch (final Exception e) {
LOGGER.error("Caught exception while removing account linked to LDAP", e);
}
}
});
return true;
}
@Override @Override
public LdapConfigurationResponse addConfiguration(final LdapAddConfigurationCmd cmd) throws InvalidParameterValueException { public LdapConfigurationResponse addConfiguration(final LdapAddConfigurationCmd cmd) throws InvalidParameterValueException {
return addConfigurationInternal(cmd.getHostname(),cmd.getPort(),cmd.getDomainId()); return addConfigurationInternal(cmd.getHostname(),cmd.getPort(),cmd.getDomainId());

View File

@ -185,6 +185,7 @@ import com.cloud.vm.VmWorkResizeVolume;
import com.cloud.vm.VmWorkSerializer; import com.cloud.vm.VmWorkSerializer;
import com.cloud.vm.VmWorkTakeVolumeSnapshot; import com.cloud.vm.VmWorkTakeVolumeSnapshot;
import com.cloud.vm.dao.UserVmDao; import com.cloud.vm.dao.UserVmDao;
import com.cloud.vm.dao.UserVmDetailsDao;
import com.cloud.vm.dao.VMInstanceDao; import com.cloud.vm.dao.VMInstanceDao;
import com.cloud.vm.snapshot.VMSnapshotVO; import com.cloud.vm.snapshot.VMSnapshotVO;
import com.cloud.vm.snapshot.dao.VMSnapshotDao; import com.cloud.vm.snapshot.dao.VMSnapshotDao;
@ -228,6 +229,8 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic
@Inject @Inject
private UserVmDao _userVmDao; private UserVmDao _userVmDao;
@Inject @Inject
private UserVmDetailsDao userVmDetailsDao;
@Inject
private UserVmService _userVmService; private UserVmService _userVmService;
@Inject @Inject
private VolumeDataStoreDao _volumeStoreDao; private VolumeDataStoreDao _volumeStoreDao;
@ -952,9 +955,10 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic
HypervisorType hypervisorType = _volsDao.getHypervisorType(volume.getId()); HypervisorType hypervisorType = _volsDao.getHypervisorType(volume.getId());
if (hypervisorType != HypervisorType.KVM && hypervisorType != HypervisorType.XenServer && hypervisorType != HypervisorType.VMware && hypervisorType != HypervisorType.Any if (hypervisorType != HypervisorType.KVM && hypervisorType != HypervisorType.XenServer
&& hypervisorType != HypervisorType.VMware && hypervisorType != HypervisorType.Any
&& hypervisorType != HypervisorType.None) { && hypervisorType != HypervisorType.None) {
throw new InvalidParameterValueException("Hypervisor " + hypervisorType + " does not support rootdisksize override"); throw new InvalidParameterValueException("Hypervisor " + hypervisorType + " does not support volume resize");
} }
if (volume.getState() != Volume.State.Ready && volume.getState() != Volume.State.Allocated) { if (volume.getState() != Volume.State.Ready && volume.getState() != Volume.State.Allocated) {

View File

@ -26,6 +26,7 @@ import java.util.TimeZone;
import javax.inject.Inject; import javax.inject.Inject;
import javax.naming.ConfigurationException; import javax.naming.ConfigurationException;
import com.cloud.domain.Domain;
import org.apache.cloudstack.api.command.admin.usage.GenerateUsageRecordsCmd; import org.apache.cloudstack.api.command.admin.usage.GenerateUsageRecordsCmd;
import org.apache.cloudstack.api.command.admin.usage.ListUsageRecordsCmd; import org.apache.cloudstack.api.command.admin.usage.ListUsageRecordsCmd;
import org.apache.cloudstack.api.command.admin.usage.RemoveRawUsageRecordsCmd; import org.apache.cloudstack.api.command.admin.usage.RemoveRawUsageRecordsCmd;
@ -200,22 +201,41 @@ public class UsageServiceImpl extends ManagerBase implements UsageService, Manag
} }
} }
boolean isAdmin = false; boolean ignoreAccountId = false;
boolean isDomainAdmin = false; boolean isDomainAdmin = _accountService.isDomainAdmin(caller.getId());
boolean isNormalUser = _accountService.isNormalUser(caller.getId());
//If accountId couldn't be found using accountName and domainId, get it from userContext //If accountId couldn't be found using accountName and domainId, get it from userContext
if (accountId == null) { if (accountId == null) {
accountId = caller.getId(); accountId = caller.getId();
//List records for all the accounts if the caller account is of type admin. //List records for all the accounts if the caller account is of type admin.
//If account_id or account_name is explicitly mentioned, list records for the specified account only even if the caller is of type admin //If account_id or account_name is explicitly mentioned, list records for the specified account only even if the caller is of type admin
if (_accountService.isRootAdmin(caller.getId())) { ignoreAccountId = _accountService.isRootAdmin(caller.getId());
isAdmin = true;
} else if (_accountService.isDomainAdmin(caller.getId())) {
isDomainAdmin = true;
}
s_logger.debug("Account details not available. Using userContext accountId: " + accountId); s_logger.debug("Account details not available. Using userContext accountId: " + accountId);
} }
// Check if a domain admin is allowed to access the requested domain id
if (isDomainAdmin) {
if (domainId != null) {
Account callerAccount = _accountService.getAccount(caller.getId());
Domain domain = _domainDao.findById(domainId);
_accountService.checkAccess(callerAccount, domain);
} else {
// Domain admins can only access their own domain's usage records.
// Set the domain if not specified.
domainId = caller.getDomainId();
}
if (cmd.getAccountId() != null) {
// Check if a domain admin is allowed to access the requested account info.
checkDomainAdminAccountAccess(accountId, domainId);
}
}
// By default users do not have access to this API.
// Adding checks here in case someone changes the default access.
checkUserAccess(cmd, accountId, caller, isNormalUser);
Date startDate = cmd.getStartDate(); Date startDate = cmd.getStartDate();
Date endDate = cmd.getEndDate(); Date endDate = cmd.getEndDate();
if (startDate.after(endDate)) { if (startDate.after(endDate)) {
@ -234,23 +254,28 @@ public class UsageServiceImpl extends ManagerBase implements UsageService, Manag
SearchCriteria<UsageVO> sc = _usageDao.createSearchCriteria(); SearchCriteria<UsageVO> sc = _usageDao.createSearchCriteria();
if (accountId != -1 && accountId != Account.ACCOUNT_ID_SYSTEM && !isAdmin && !isDomainAdmin) { if (accountId != -1 && accountId != Account.ACCOUNT_ID_SYSTEM && !ignoreAccountId) {
// account exists and either domain on user role
// If not recursive and the account belongs to the user/domain admin, or the account was passed in, filter
if ((accountId == caller.getId() && !cmd.isRecursive()) || cmd.getAccountId() != null){
sc.addAnd("accountId", SearchCriteria.Op.EQ, accountId); sc.addAnd("accountId", SearchCriteria.Op.EQ, accountId);
} }
if (isDomainAdmin) {
SearchCriteria<DomainVO> sdc = _domainDao.createSearchCriteria();
sdc.addOr("path", SearchCriteria.Op.LIKE, _domainDao.findById(caller.getDomainId()).getPath() + "%");
List<DomainVO> domains = _domainDao.search(sdc, null);
List<Long> domainIds = new ArrayList<Long>();
for (DomainVO domain : domains)
domainIds.add(domain.getId());
sc.addAnd("domainId", SearchCriteria.Op.IN, domainIds.toArray());
} }
if (domainId != null) { if (domainId != null) {
if (cmd.isRecursive()) {
SearchCriteria<DomainVO> sdc = _domainDao.createSearchCriteria();
sdc.addOr("path", SearchCriteria.Op.LIKE, _domainDao.findById(domainId).getPath() + "%");
List<DomainVO> domains = _domainDao.search(sdc, null);
List<Long> domainIds = new ArrayList<Long>();
for (DomainVO domain : domains) {
domainIds.add(domain.getId());
}
sc.addAnd("domainId", SearchCriteria.Op.IN, domainIds.toArray());
} else {
sc.addAnd("domainId", SearchCriteria.Op.EQ, domainId); sc.addAnd("domainId", SearchCriteria.Op.EQ, domainId);
} }
}
if (usageType != null) { if (usageType != null) {
sc.addAnd("usageType", SearchCriteria.Op.EQ, usageType); sc.addAnd("usageType", SearchCriteria.Op.EQ, usageType);
@ -372,6 +397,46 @@ public class UsageServiceImpl extends ManagerBase implements UsageService, Manag
return new Pair<List<? extends Usage>, Integer>(usageRecords.first(), usageRecords.second()); return new Pair<List<? extends Usage>, Integer>(usageRecords.first(), usageRecords.second());
} }
private void checkUserAccess(ListUsageRecordsCmd cmd, Long accountId, Account caller, boolean isNormalUser) {
if (isNormalUser) {
// A user can only access their own account records
if (caller.getId() != accountId) {
throw new PermissionDeniedException("Users are only allowed to list usage records for their own account.");
}
// Users cannot get recursive records
if (cmd.isRecursive()) {
throw new PermissionDeniedException("Users are not allowed to list usage records recursively.");
}
// Users cannot get domain records
if (cmd.getDomainId() != null) {
throw new PermissionDeniedException("Users are not allowed to list usage records for a domain");
}
}
}
private void checkDomainAdminAccountAccess(Long accountId, Long domainId) {
Account account = _accountService.getAccount(accountId);
boolean matchFound = false;
if (account.getDomainId() == domainId) {
matchFound = true;
} else {
// Check if the account is in a child domain of this domain admin.
List<DomainVO> childDomains = _domainDao.findAllChildren(_domainDao.findById(domainId).getPath(), domainId);
for (DomainVO domainVO : childDomains) {
if (account.getDomainId() == domainVO.getId()) {
matchFound = true;
break;
}
}
}
if (!matchFound) {
throw new PermissionDeniedException("Domain admins may only retrieve usage records for accounts in their own domain and child domains.");
}
}
@Override @Override
public TimeZone getUsageTimezone() { public TimeZone getUsageTimezone() {
return _usageTimezone; return _usageTimezone;

View File

@ -7084,6 +7084,12 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir
} }
newVols.add(newVol); newVols.add(newVol);
if (userVmDetailsDao.findDetail(vm.getId(), VmDetailConstants.ROOT_DISK_SIZE) == null && !newVol.getSize().equals(template.getSize())) {
VolumeVO resizedVolume = (VolumeVO) newVol;
resizedVolume.setSize(template.getSize());
_volsDao.update(resizedVolume.getId(), resizedVolume);
}
// 1. Save usage event and update resource count for user vm volumes // 1. Save usage event and update resource count for user vm volumes
_resourceLimitMgr.incrementResourceCount(newVol.getAccountId(), ResourceType.volume, newVol.isDisplay()); _resourceLimitMgr.incrementResourceCount(newVol.getAccountId(), ResourceType.volume, newVol.isDisplay());
_resourceLimitMgr.incrementResourceCount(newVol.getAccountId(), ResourceType.primary_storage, newVol.isDisplay(), new Long(newVol.getSize())); _resourceLimitMgr.incrementResourceCount(newVol.getAccountId(), ResourceType.primary_storage, newVol.isDisplay(), new Long(newVol.getSize()));

View File

@ -37,7 +37,6 @@ COMMIT
-A INPUT -i eth0 -p udp -m udp --dport 53 -j ACCEPT -A INPUT -i eth0 -p udp -m udp --dport 53 -j ACCEPT
-A INPUT -i eth0 -p tcp -m tcp --dport 53 -j ACCEPT -A INPUT -i eth0 -p tcp -m tcp --dport 53 -j ACCEPT
-A INPUT -i eth1 -p tcp -m tcp -m state --state NEW,ESTABLISHED --dport 3922 -j ACCEPT -A INPUT -i eth1 -p tcp -m tcp -m state --state NEW,ESTABLISHED --dport 3922 -j ACCEPT
-A INPUT -i eth0 -p tcp -m tcp -m state --state NEW --dport 80 -j ACCEPT
-A FORWARD -i eth0 -o eth1 -m state --state RELATED,ESTABLISHED -j ACCEPT -A FORWARD -i eth0 -o eth1 -m state --state RELATED,ESTABLISHED -j ACCEPT
-A FORWARD -i eth2 -o eth0 -m state --state RELATED,ESTABLISHED -j ACCEPT -A FORWARD -i eth2 -o eth0 -m state --state RELATED,ESTABLISHED -j ACCEPT
-A FORWARD -i eth0 -o eth0 -m state --state NEW -j ACCEPT -A FORWARD -i eth0 -o eth0 -m state --state NEW -j ACCEPT

View File

@ -418,6 +418,8 @@ class CsIP:
["filter", "", "-A INPUT -i %s -p tcp -m tcp --dport 53 -s %s -j ACCEPT" % (self.dev, guestNetworkCidr)]) ["filter", "", "-A INPUT -i %s -p tcp -m tcp --dport 53 -s %s -j ACCEPT" % (self.dev, guestNetworkCidr)])
self.fw.append( self.fw.append(
["filter", "", "-A INPUT -i %s -p tcp -m tcp --dport 80 -s %s -m state --state NEW -j ACCEPT" % (self.dev, guestNetworkCidr)]) ["filter", "", "-A INPUT -i %s -p tcp -m tcp --dport 80 -s %s -m state --state NEW -j ACCEPT" % (self.dev, guestNetworkCidr)])
self.fw.append(
["filter", "", "-A INPUT -i %s -p tcp -m tcp --dport 443 -s %s -m state --state NEW -j ACCEPT" % (self.dev, guestNetworkCidr)])
self.fw.append( self.fw.append(
["filter", "", "-A INPUT -i %s -p tcp -m tcp --dport 8080 -s %s -m state --state NEW -j ACCEPT" % (self.dev, guestNetworkCidr)]) ["filter", "", "-A INPUT -i %s -p tcp -m tcp --dport 8080 -s %s -m state --state NEW -j ACCEPT" % (self.dev, guestNetworkCidr)])
self.fw.append( self.fw.append(
@ -467,9 +469,10 @@ class CsIP:
["filter", "", "-A INPUT -i %s -p udp -m udp --dport 53 -s %s -j ACCEPT" % (self.dev, guestNetworkCidr)]) ["filter", "", "-A INPUT -i %s -p udp -m udp --dport 53 -s %s -j ACCEPT" % (self.dev, guestNetworkCidr)])
self.fw.append( self.fw.append(
["filter", "", "-A INPUT -i %s -p tcp -m tcp --dport 53 -s %s -j ACCEPT" % (self.dev, guestNetworkCidr)]) ["filter", "", "-A INPUT -i %s -p tcp -m tcp --dport 53 -s %s -j ACCEPT" % (self.dev, guestNetworkCidr)])
self.fw.append( self.fw.append(
["filter", "", "-A INPUT -i %s -p tcp -m tcp --dport 80 -s %s -m state --state NEW -j ACCEPT" % (self.dev, guestNetworkCidr)]) ["filter", "", "-A INPUT -i %s -p tcp -m tcp --dport 80 -s %s -m state --state NEW -j ACCEPT" % (self.dev, guestNetworkCidr)])
self.fw.append(
["filter", "", "-A INPUT -i %s -p tcp -m tcp --dport 443 -s %s -m state --state NEW -j ACCEPT" % (self.dev, guestNetworkCidr)])
self.fw.append( self.fw.append(
["filter", "", "-A INPUT -i %s -p tcp -m tcp --dport 8080 -s %s -m state --state NEW -j ACCEPT" % (self.dev, guestNetworkCidr)]) ["filter", "", "-A INPUT -i %s -p tcp -m tcp --dport 8080 -s %s -m state --state NEW -j ACCEPT" % (self.dev, guestNetworkCidr)])
self.fw.append(["mangle", "", self.fw.append(["mangle", "",

View File

@ -59,16 +59,6 @@ class CsApache(CsApp):
file.commit() file.commit()
CsHelper.execute2("systemctl restart apache2", False) CsHelper.execute2("systemctl restart apache2", False)
self.fw.append([
"", "front",
"-A INPUT -i %s -d %s/32 -p tcp -m tcp -m state --state NEW --dport 80 -j ACCEPT" % (self.dev, self.ip)
])
self.fw.append([
"", "front",
"-A INPUT -i %s -d %s/32 -p tcp -m tcp -m state --state NEW --dport 443 -j ACCEPT" % (self.dev, self.ip)
])
class CsPasswdSvc(): class CsPasswdSvc():
""" """

View File

@ -1443,6 +1443,11 @@ class TestKVMLiveMigration(cloudstackTestCase):
if len(self.hosts) < 2: if len(self.hosts) < 2:
self.skipTest("Requires at least two hosts for performing migration related tests") self.skipTest("Requires at least two hosts for performing migration related tests")
for host in self.hosts:
if host.details['Host.OS'] in ['CentOS']:
self.skipTest("live migration is not stabily supported on CentOS")
def tearDown(self): def tearDown(self):
try: try:
cleanup_resources(self.apiclient, self.cleanup) cleanup_resources(self.apiclient, self.cleanup)

View File

@ -2267,4 +2267,21 @@ public class HypervisorHostHelper {
version = apiVersionHardwareVersionMap.get(hostApiVersion); version = apiVersionHardwareVersionMap.get(hostApiVersion);
return version; return version;
} }
/*
Finds minimum host hardware version as String, of two hosts when both of them are not null
and hardware version of both hosts is different.
Return null otherwise
*/
public static String getMinimumHostHardwareVersion(VmwareHypervisorHost host1, VmwareHypervisorHost host2) {
String hardwareVersion = null;
if (host1 != null & host2 != null) {
Integer host1Version = getHostHardwareVersion(host1);
Integer host2Version = getHostHardwareVersion(host2);
if (host1Version != null && host2Version != null && !host1Version.equals(host2Version)) {
hardwareVersion = String.valueOf(Math.min(host1Version, host2Version));
}
}
return hardwareVersion;
}
} }

View File

@ -16,6 +16,8 @@
// under the License. // under the License.
package com.cloud.hypervisor.vmware.mo; package com.cloud.hypervisor.vmware.mo;
import static com.cloud.utils.NumbersUtil.toHumanReadableSize;
import java.io.BufferedReader; import java.io.BufferedReader;
import java.io.BufferedWriter; import java.io.BufferedWriter;
import java.io.ByteArrayInputStream; import java.io.ByteArrayInputStream;
@ -39,6 +41,14 @@ import org.apache.commons.collections.CollectionUtils;
import org.apache.commons.lang.StringUtils; import org.apache.commons.lang.StringUtils;
import org.apache.log4j.Logger; import org.apache.log4j.Logger;
import com.cloud.hypervisor.vmware.mo.SnapshotDescriptor.SnapshotInfo;
import com.cloud.hypervisor.vmware.util.VmwareContext;
import com.cloud.hypervisor.vmware.util.VmwareHelper;
import com.cloud.utils.ActionDelegate;
import com.cloud.utils.Pair;
import com.cloud.utils.Ternary;
import com.cloud.utils.concurrency.NamedThreadFactory;
import com.cloud.utils.script.Script;
import com.google.gson.Gson; import com.google.gson.Gson;
import com.vmware.vim25.ArrayOfManagedObjectReference; import com.vmware.vim25.ArrayOfManagedObjectReference;
import com.vmware.vim25.ChoiceOption; import com.vmware.vim25.ChoiceOption;
@ -92,6 +102,7 @@ import com.vmware.vim25.VirtualMachineConfigInfo;
import com.vmware.vim25.VirtualMachineConfigOption; import com.vmware.vim25.VirtualMachineConfigOption;
import com.vmware.vim25.VirtualMachineConfigSpec; import com.vmware.vim25.VirtualMachineConfigSpec;
import com.vmware.vim25.VirtualMachineConfigSummary; import com.vmware.vim25.VirtualMachineConfigSummary;
import com.vmware.vim25.VirtualMachineDefinedProfileSpec;
import com.vmware.vim25.VirtualMachineFileInfo; import com.vmware.vim25.VirtualMachineFileInfo;
import com.vmware.vim25.VirtualMachineFileLayoutEx; import com.vmware.vim25.VirtualMachineFileLayoutEx;
import com.vmware.vim25.VirtualMachineMessage; import com.vmware.vim25.VirtualMachineMessage;
@ -106,18 +117,6 @@ import com.vmware.vim25.VirtualMachineSnapshotInfo;
import com.vmware.vim25.VirtualMachineSnapshotTree; import com.vmware.vim25.VirtualMachineSnapshotTree;
import com.vmware.vim25.VirtualSCSIController; import com.vmware.vim25.VirtualSCSIController;
import com.vmware.vim25.VirtualSCSISharing; import com.vmware.vim25.VirtualSCSISharing;
import com.vmware.vim25.VirtualMachineDefinedProfileSpec;
import com.cloud.hypervisor.vmware.mo.SnapshotDescriptor.SnapshotInfo;
import com.cloud.hypervisor.vmware.util.VmwareContext;
import com.cloud.hypervisor.vmware.util.VmwareHelper;
import com.cloud.utils.ActionDelegate;
import com.cloud.utils.Pair;
import com.cloud.utils.Ternary;
import com.cloud.utils.concurrency.NamedThreadFactory;
import com.cloud.utils.script.Script;
import static com.cloud.utils.NumbersUtil.toHumanReadableSize;
public class VirtualMachineMO extends BaseMO { public class VirtualMachineMO extends BaseMO {
private static final Logger s_logger = Logger.getLogger(VirtualMachineMO.class); private static final Logger s_logger = Logger.getLogger(VirtualMachineMO.class);
@ -460,9 +459,13 @@ public class VirtualMachineMO extends BaseMO {
return false; return false;
} }
public boolean changeDatastore(ManagedObjectReference morDataStore) throws Exception { public boolean changeDatastore(ManagedObjectReference morDataStore, VmwareHypervisorHost targetHost) throws Exception {
VirtualMachineRelocateSpec relocateSpec = new VirtualMachineRelocateSpec(); VirtualMachineRelocateSpec relocateSpec = new VirtualMachineRelocateSpec();
relocateSpec.setDatastore(morDataStore); relocateSpec.setDatastore(morDataStore);
if (targetHost != null) {
relocateSpec.setHost(targetHost.getMor());
relocateSpec.setPool(targetHost.getHyperHostOwnerResourcePool());
}
ManagedObjectReference morTask = _context.getService().relocateVMTask(_mor, relocateSpec, null); ManagedObjectReference morTask = _context.getService().relocateVMTask(_mor, relocateSpec, null);

View File

@ -40,6 +40,17 @@ import javax.xml.datatype.XMLGregorianCalendar;
import org.apache.commons.lang.StringUtils; import org.apache.commons.lang.StringUtils;
import org.apache.log4j.Logger; import org.apache.log4j.Logger;
import com.cloud.hypervisor.vmware.mo.CustomFieldConstants;
import com.cloud.hypervisor.vmware.mo.DatastoreMO;
import com.cloud.hypervisor.vmware.mo.DiskControllerType;
import com.cloud.hypervisor.vmware.mo.HostMO;
import com.cloud.hypervisor.vmware.mo.LicenseAssignmentManagerMO;
import com.cloud.hypervisor.vmware.mo.VirtualEthernetCardType;
import com.cloud.hypervisor.vmware.mo.VirtualMachineMO;
import com.cloud.hypervisor.vmware.mo.VmwareHypervisorHost;
import com.cloud.utils.Pair;
import com.cloud.utils.Ternary;
import com.cloud.utils.exception.ExceptionUtil;
import com.vmware.vim25.DistributedVirtualSwitchPortConnection; import com.vmware.vim25.DistributedVirtualSwitchPortConnection;
import com.vmware.vim25.DynamicProperty; import com.vmware.vim25.DynamicProperty;
import com.vmware.vim25.GuestOsDescriptor; import com.vmware.vim25.GuestOsDescriptor;
@ -56,7 +67,6 @@ import com.vmware.vim25.VirtualCdromRemotePassthroughBackingInfo;
import com.vmware.vim25.VirtualDevice; import com.vmware.vim25.VirtualDevice;
import com.vmware.vim25.VirtualDeviceBackingInfo; import com.vmware.vim25.VirtualDeviceBackingInfo;
import com.vmware.vim25.VirtualDeviceConnectInfo; import com.vmware.vim25.VirtualDeviceConnectInfo;
import com.vmware.vim25.VirtualUSBController;
import com.vmware.vim25.VirtualDisk; import com.vmware.vim25.VirtualDisk;
import com.vmware.vim25.VirtualDiskFlatVer1BackingInfo; import com.vmware.vim25.VirtualDiskFlatVer1BackingInfo;
import com.vmware.vim25.VirtualDiskFlatVer2BackingInfo; import com.vmware.vim25.VirtualDiskFlatVer2BackingInfo;
@ -72,21 +82,10 @@ import com.vmware.vim25.VirtualEthernetCardOpaqueNetworkBackingInfo;
import com.vmware.vim25.VirtualMachineConfigSpec; import com.vmware.vim25.VirtualMachineConfigSpec;
import com.vmware.vim25.VirtualMachineSnapshotTree; import com.vmware.vim25.VirtualMachineSnapshotTree;
import com.vmware.vim25.VirtualPCNet32; import com.vmware.vim25.VirtualPCNet32;
import com.vmware.vim25.VirtualUSBController;
import com.vmware.vim25.VirtualVmxnet2; import com.vmware.vim25.VirtualVmxnet2;
import com.vmware.vim25.VirtualVmxnet3; import com.vmware.vim25.VirtualVmxnet3;
import com.cloud.hypervisor.vmware.mo.DiskControllerType;
import com.cloud.hypervisor.vmware.mo.DatastoreMO;
import com.cloud.hypervisor.vmware.mo.HostMO;
import com.cloud.hypervisor.vmware.mo.CustomFieldConstants;
import com.cloud.hypervisor.vmware.mo.LicenseAssignmentManagerMO;
import com.cloud.hypervisor.vmware.mo.VirtualEthernetCardType;
import com.cloud.hypervisor.vmware.mo.VirtualMachineMO;
import com.cloud.hypervisor.vmware.mo.VmwareHypervisorHost;
import com.cloud.utils.Pair;
import com.cloud.utils.Ternary;
import com.cloud.utils.exception.ExceptionUtil;
public class VmwareHelper { public class VmwareHelper {
@SuppressWarnings("unused") @SuppressWarnings("unused")
private static final Logger s_logger = Logger.getLogger(VmwareHelper.class); private static final Logger s_logger = Logger.getLogger(VmwareHelper.class);