CLOUDSTACK-1608: don't support attach volume between different storage scopes

This commit is contained in:
Edison Su 2013-03-19 14:36:37 -07:00
parent b1a25cf917
commit c60ef79321
8 changed files with 120 additions and 56 deletions

View File

@ -0,0 +1,30 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.cloudstack.engine.subsystem.api.storage;
public abstract class AbstractScope implements Scope {
@Override
public boolean isSameScope(Scope scope) {
if (this.getScopeType() == scope.getScopeType() && this.getScopeId() == scope.getScopeId()) {
return true;
} else {
return false;
}
}
}

View File

@ -19,7 +19,7 @@
package org.apache.cloudstack.engine.subsystem.api.storage;
public class ClusterScope implements Scope {
public class ClusterScope extends AbstractScope {
private ScopeType type = ScopeType.CLUSTER;
private Long clusterId;
private Long podId;

View File

@ -19,7 +19,7 @@
package org.apache.cloudstack.engine.subsystem.api.storage;
public class HostScope implements Scope {
public class HostScope extends AbstractScope {
private ScopeType type = ScopeType.HOST;
private Long hostId;
public HostScope(Long hostId) {

View File

@ -20,5 +20,6 @@ package org.apache.cloudstack.engine.subsystem.api.storage;
public interface Scope {
public ScopeType getScopeType();
public boolean isSameScope(Scope scope);
public Long getScopeId();
}

View File

@ -19,7 +19,7 @@
package org.apache.cloudstack.engine.subsystem.api.storage;
public class ZoneScope implements Scope {
public class ZoneScope extends AbstractScope {
private ScopeType type = ScopeType.ZONE;
private Long zoneId;

View File

@ -0,0 +1,59 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.cloudstack.engine.subsystem.api.storage;
import static org.junit.Assert.*;
import junit.framework.Assert;
import org.junit.Test;
public class ScopeTest {
@Test
public void testZoneScope() {
ZoneScope zoneScope = new ZoneScope(1L);
ZoneScope zoneScope2 = new ZoneScope(1L);
Assert.assertTrue(zoneScope.isSameScope(zoneScope2));
ZoneScope zoneScope3 = new ZoneScope(2L);
Assert.assertFalse(zoneScope.isSameScope(zoneScope3));
}
@Test
public void testClusterScope() {
ClusterScope clusterScope = new ClusterScope(1L, 1L, 1L);
ClusterScope clusterScope2 = new ClusterScope(1L, 1L, 1L);
Assert.assertTrue(clusterScope.isSameScope(clusterScope2));
ClusterScope clusterScope3 = new ClusterScope(2L, 2L, 1L);
Assert.assertFalse(clusterScope.isSameScope(clusterScope3));
}
@Test
public void testHostScope() {
HostScope hostScope = new HostScope(1L);
HostScope hostScope2 = new HostScope(1L);
HostScope hostScope3 = new HostScope(2L);
Assert.assertTrue(hostScope.isSameScope(hostScope2));
Assert.assertFalse(hostScope.isSameScope(hostScope3));
}
}

View File

@ -29,6 +29,7 @@ import org.apache.cloudstack.engine.subsystem.api.storage.DataObjectType;
import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreDriver;
import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreProvider;
import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreRole;
import org.apache.cloudstack.engine.subsystem.api.storage.HostScope;
import org.apache.cloudstack.engine.subsystem.api.storage.ImageDataFactory;
import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreStateMachine;
import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreDriver;
@ -48,9 +49,11 @@ import org.apache.log4j.Logger;
import com.cloud.hypervisor.Hypervisor.HypervisorType;
import com.cloud.storage.Storage.StoragePoolType;
import com.cloud.storage.StoragePoolHostVO;
import com.cloud.storage.StoragePoolStatus;
import com.cloud.storage.VMTemplateStoragePoolVO;
import com.cloud.storage.VolumeVO;
import com.cloud.storage.dao.StoragePoolHostDao;
import com.cloud.storage.dao.VMTemplatePoolDao;
import com.cloud.storage.dao.VolumeDao;
import com.cloud.utils.component.ComponentContext;
@ -74,6 +77,8 @@ public class DefaultPrimaryDataStore implements PrimaryDataStore {
protected DataStoreProvider provider;
@Inject
VMTemplatePoolDao templatePoolDao;
@Inject
StoragePoolHostDao poolHostDao;
private VolumeDao volumeDao;
@ -152,6 +157,12 @@ public class DefaultPrimaryDataStore implements PrimaryDataStore {
vo.getDataCenterId());
} else if (vo.getScope() == ScopeType.ZONE) {
return new ZoneScope(vo.getDataCenterId());
} else if (vo.getScope() == ScopeType.HOST) {
List<StoragePoolHostVO> poolHosts = poolHostDao.listByPoolId(vo.getId());
if (poolHosts.size() > 0) {
return new HostScope(poolHosts.get(0).getHostId());
}
s_logger.debug("can't find a local storage in pool host table: " + vo.getId());
}
return null;
}

View File

@ -48,6 +48,7 @@ import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreProviderManag
import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreRole;
import org.apache.cloudstack.engine.subsystem.api.storage.ImageDataFactory;
import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreInfo;
import org.apache.cloudstack.engine.subsystem.api.storage.Scope;
import org.apache.cloudstack.engine.subsystem.api.storage.ScopeType;
import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotDataFactory;
import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotInfo;
@ -1439,64 +1440,26 @@ public class VolumeManagerImpl extends ManagerBase implements VolumeManager {
}
private boolean needMoveVolume(VolumeVO rootVolumeOfVm, VolumeInfo volume) {
StoragePoolVO vmRootVolumePool = _storagePoolDao
.findById(rootVolumeOfVm.getPoolId());
DiskOfferingVO volumeDiskOffering = _diskOfferingDao
.findById(volume.getDiskOfferingId());
String[] volumeTags = volumeDiskOffering.getTagsArray();
boolean isVolumeOnSharedPool = !volumeDiskOffering
.getUseLocalStorage();
StoragePoolVO sourcePool = _storagePoolDao.findById(volume
.getPoolId());
List<StoragePoolVO> matchingVMPools = _storagePoolDao
.findPoolsByTags(vmRootVolumePool.getDataCenterId(),
vmRootVolumePool.getPodId(),
vmRootVolumePool.getClusterId(), volumeTags
);
DataStore storeForRootVol = this.dataStoreMgr.getPrimaryDataStore(rootVolumeOfVm.getPoolId());
DataStore storeForDataVol = this.dataStoreMgr.getPrimaryDataStore(volume.getPoolId());
boolean moveVolumeNeeded = true;
if (matchingVMPools.size() == 0) {
String poolType;
if (vmRootVolumePool.getClusterId() != null) {
poolType = "cluster";
} else if (vmRootVolumePool.getPodId() != null) {
poolType = "pod";
} else {
poolType = "zone";
}
throw new CloudRuntimeException(
"There are no storage pools in the VM's " + poolType
+ " with all of the volume's tags ("
+ volumeDiskOffering.getTags() + ").");
} else {
long sourcePoolId = sourcePool.getId();
Long sourcePoolDcId = sourcePool.getDataCenterId();
Long sourcePoolPodId = sourcePool.getPodId();
Long sourcePoolClusterId = sourcePool.getClusterId();
for (StoragePoolVO vmPool : matchingVMPools) {
long vmPoolId = vmPool.getId();
Long vmPoolDcId = vmPool.getDataCenterId();
Long vmPoolPodId = vmPool.getPodId();
Long vmPoolClusterId = vmPool.getClusterId();
// Moving a volume is not required if storage pools belongs
// to same cluster in case of shared volume or
// identical storage pool in case of local
if (sourcePoolDcId == vmPoolDcId
&& sourcePoolPodId == vmPoolPodId
&& sourcePoolClusterId == vmPoolClusterId
&& (isVolumeOnSharedPool || sourcePoolId == vmPoolId)) {
moveVolumeNeeded = false;
break;
}
}
Scope storeForRootStoreScope = storeForRootVol.getScope();
if (storeForRootStoreScope == null) {
throw new CloudRuntimeException("Can't get scope of data store: " + storeForRootVol.getId());
}
return moveVolumeNeeded;
Scope storeForDataStoreScope = storeForDataVol.getScope();
if (storeForDataStoreScope == null) {
throw new CloudRuntimeException("Can't get scope of data store: " + storeForDataVol.getId());
}
if (storeForRootStoreScope.getScopeType() != storeForDataStoreScope.getScopeType()) {
throw new CloudRuntimeException("Can't move volume between scope: " + storeForDataStoreScope.getScopeType() + " and " + storeForRootStoreScope.getScopeType());
}
return storeForRootStoreScope.isSameScope(storeForRootStoreScope);
}
private VolumeVO sendAttachVolumeCommand(UserVmVO vm, VolumeVO volume, Long deviceId) {
String errorMsg = "Failed to attach volume: " + volume.getName()
+ " to VM: " + vm.getHostName();