mirror of
https://github.com/apache/cloudstack.git
synced 2025-10-26 08:42:29 +01:00
add xenserver 6.2.0 hotfix support, to optimize vdi copy
add xenserver hot fix Conflicts: api/src/com/cloud/vm/VirtualMachineName.java core/src/com/cloud/host/HostInfo.java core/src/org/apache/cloudstack/storage/to/SnapshotObjectTO.java deps/XenServerJava/src/com/xensource/xenapi/VDI.java engine/storage/snapshot/src/org/apache/cloudstack/storage/snapshot/XenserverSnapshotStrategy.java engine/storage/src/org/apache/cloudstack/storage/endpoint/DefaultEndPointSelector.java engine/storage/src/org/apache/cloudstack/storage/image/db/SnapshotDataStoreDaoImpl.java plugins/hypervisors/xen/src/com/cloud/hypervisor/XenServerGuru.java plugins/hypervisors/xen/src/com/cloud/hypervisor/xen/resource/CitrixResourceBase.java plugins/hypervisors/xen/src/com/cloud/hypervisor/xen/resource/XenServerPoolVms.java plugins/hypervisors/xen/src/com/cloud/hypervisor/xen/resource/XenServerStorageProcessor.java server/src/com/cloud/configuration/Config.java
This commit is contained in:
parent
c40d03b417
commit
8caf52c6bc
@ -26,6 +26,19 @@ import com.cloud.dc.Vlan;
|
||||
public class VirtualMachineName {
|
||||
public static final String SEPARATOR = "-";
|
||||
|
||||
public static boolean isValidCloudStackVmName(String name, String instance) {
|
||||
String[] parts = name.split(SEPARATOR);
|
||||
if (parts.length <= 1) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (!parts[parts.length - 1].equals(instance)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
public static String getVnetName(long vnetId) {
|
||||
StringBuilder vnet = new StringBuilder();
|
||||
Formatter formatter = new Formatter(vnet);
|
||||
|
||||
@ -21,5 +21,6 @@ public final class HostInfo {
|
||||
public static final String HOST_OS = "Host.OS"; //Fedora, XenServer, Ubuntu, etc
|
||||
public static final String HOST_OS_VERSION = "Host.OS.Version"; //12, 5.5, 9.10, etc
|
||||
public static final String HOST_OS_KERNEL_VERSION = "Host.OS.Kernel.Version"; //linux-2.6.31 etc
|
||||
|
||||
public static final String XS620_SNAPSHOT_HOTFIX = "xs620_snapshot_hotfix";
|
||||
}
|
||||
|
||||
|
||||
@ -16,6 +16,9 @@
|
||||
// under the License.
|
||||
package org.apache.cloudstack.storage.command;
|
||||
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
|
||||
import com.cloud.agent.api.Command;
|
||||
import com.cloud.agent.api.to.DataTO;
|
||||
|
||||
@ -24,6 +27,7 @@ public final class CopyCommand extends Command implements StorageSubSystemComman
|
||||
private DataTO destTO;
|
||||
private DataTO cacheTO;
|
||||
boolean executeInSequence = false;
|
||||
Map<String, String> options = new HashMap<String, String>();
|
||||
|
||||
public CopyCommand(DataTO srcData, DataTO destData, int timeout, boolean executeInSequence) {
|
||||
super();
|
||||
@ -66,4 +70,12 @@ public final class CopyCommand extends Command implements StorageSubSystemComman
|
||||
return this.getWait() * 1000;
|
||||
}
|
||||
|
||||
public void setOptions(Map<String, String> options) {
|
||||
this.options = options;
|
||||
}
|
||||
|
||||
public Map<String, String> getOptions() {
|
||||
return options;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@ -16,8 +16,11 @@
|
||||
// under the License.
|
||||
package org.apache.cloudstack.storage.to;
|
||||
|
||||
import java.util.ArrayList;
|
||||
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotInfo;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo;
|
||||
import org.apache.commons.lang.ArrayUtils;
|
||||
|
||||
import com.cloud.agent.api.to.DataObjectType;
|
||||
import com.cloud.agent.api.to.DataStoreTO;
|
||||
@ -34,6 +37,8 @@ public class SnapshotObjectTO implements DataTO {
|
||||
private HypervisorType hypervisorType;
|
||||
private long id;
|
||||
private boolean quiescevm;
|
||||
private String[] parents;
|
||||
|
||||
|
||||
public SnapshotObjectTO() {
|
||||
|
||||
@ -49,9 +54,17 @@ public class SnapshotObjectTO implements DataTO {
|
||||
}
|
||||
|
||||
SnapshotInfo parentSnapshot = snapshot.getParent();
|
||||
ArrayList<String> parentsArry = new ArrayList<String>();
|
||||
if (parentSnapshot != null) {
|
||||
this.parentSnapshotPath = parentSnapshot.getPath();
|
||||
while(parentSnapshot != null) {
|
||||
parentsArry.add(parentSnapshot.getPath());
|
||||
parentSnapshot = parentSnapshot.getParent();
|
||||
}
|
||||
parents = parentsArry.toArray(new String[parentsArry.size()]);
|
||||
ArrayUtils.reverse(parents);
|
||||
}
|
||||
|
||||
this.dataStore = snapshot.getDataStore().getTO();
|
||||
this.setName(snapshot.getName());
|
||||
this.hypervisorType = snapshot.getHypervisorType();
|
||||
@ -139,6 +152,10 @@ public class SnapshotObjectTO implements DataTO {
|
||||
this.quiescevm = quiescevm;
|
||||
}
|
||||
|
||||
public String[] getParents() {
|
||||
return parents;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return new StringBuilder("SnapshotTO[datastore=").append(dataStore).append("|volume=").append(volume).append("|path").append(path).append("]").toString();
|
||||
|
||||
@ -300,4 +300,19 @@ public class Event extends XenAPIObject {
|
||||
return Types.toString(result);
|
||||
}
|
||||
|
||||
public static Map properFrom(Connection c, Set<String> classes, String token, Double timeout) throws BadServerResponse, XenAPIException, XmlRpcException,
|
||||
Types.SessionNotRegistered,
|
||||
Types.EventsLost {
|
||||
String method_call = "event.from";
|
||||
String session = c.getSessionReference();
|
||||
Object[] method_params = {Marshalling.toXMLRPC(session), Marshalling.toXMLRPC(classes), Marshalling.toXMLRPC(token), Marshalling.toXMLRPC(timeout)};
|
||||
Map response = c.dispatch(method_call, method_params);
|
||||
Object result = response.get("Value");
|
||||
Map value = (Map)result;
|
||||
Map<String, Object> from = new HashMap<String, Object>();
|
||||
from.put("token", value.get("token"));
|
||||
from.put("events", Types.toSetOfEventRecord(value.get("events")));
|
||||
return from;
|
||||
}
|
||||
|
||||
}
|
||||
@ -1279,6 +1279,17 @@ public class Types
|
||||
String p1 = ErrorDescription.length > 1 ? ErrorDescription[1] : "";
|
||||
throw new Types.CrlNameInvalid(p1);
|
||||
}
|
||||
if (ErrorDescription[0].equals("VDI_NOT_SPARSE"))
|
||||
{
|
||||
String p1 = ErrorDescription.length > 1 ? ErrorDescription[1] : "";
|
||||
throw new Types.VdiNotSparse(p1);
|
||||
}
|
||||
if (ErrorDescription[0].equals("VDI_TOO_SMALL"))
|
||||
{
|
||||
String p1 = ErrorDescription.length > 1 ? ErrorDescription[1] : "";
|
||||
String p2 = ErrorDescription.length > 2 ? ErrorDescription[2] : "";
|
||||
throw new Types.VdiTooSmall(p1, p2);
|
||||
}
|
||||
if (ErrorDescription[0].equals("HOST_POWER_ON_MODE_DISABLED"))
|
||||
{
|
||||
throw new Types.HostPowerOnModeDisabled();
|
||||
@ -7822,6 +7833,45 @@ public class Types
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
* The VDI is too small. Please resize it to at least the minimum size.
|
||||
*/
|
||||
public static class VdiTooSmall extends XenAPIException {
|
||||
public final String vdi;
|
||||
public final String minimumSize;
|
||||
|
||||
/**
|
||||
* Create a new VdiTooSmall
|
||||
*
|
||||
* @param vdi
|
||||
* @param minimumSize
|
||||
*/
|
||||
public VdiTooSmall(String vdi, String minimumSize) {
|
||||
super("The VDI is too small. Please resize it to at least the minimum size.");
|
||||
this.vdi = vdi;
|
||||
this.minimumSize = minimumSize;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
* The VDI is not stored using a sparse format. It is not possible to query and manipulate only the changed blocks (or 'block differences' or 'disk deltas') between two VDIs. Please select a VDI which uses a sparse-aware technology such as VHD.
|
||||
*/
|
||||
public static class VdiNotSparse extends XenAPIException {
|
||||
public final String vdi;
|
||||
|
||||
/**
|
||||
* Create a new VdiNotSparse
|
||||
*
|
||||
* @param vdi
|
||||
*/
|
||||
public VdiNotSparse(String vdi) {
|
||||
super("The VDI is not stored using a sparse format. It is not possible to query and manipulate only the changed blocks (or 'block differences' or 'disk deltas') between two VDIs. Please select a VDI which uses a sparse-aware technology such as VHD.");
|
||||
this.vdi = vdi;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
* The hosts in this pool are not homogeneous.
|
||||
*/
|
||||
|
||||
@ -1608,6 +1608,29 @@ public class VDI extends XenAPIObject {
|
||||
return;
|
||||
}
|
||||
|
||||
/**
|
||||
* Copy either a full VDI or the block differences between two VDIs into either a fresh VDI or an existing VDI.
|
||||
*
|
||||
* @param sr The destination SR (only required if the destination VDI is not specified
|
||||
* @param baseVdi The base VDI (only required if copying only changed blocks, by default all blocks will be copied)
|
||||
* @param intoVdi The destination VDI to copy blocks into (if omitted then a destination SR must be provided and a fresh VDI will be created)
|
||||
* @return Task
|
||||
*/
|
||||
public Task copyAsync2(Connection c, SR sr, VDI baseVdi, VDI intoVdi) throws
|
||||
BadServerResponse,
|
||||
XenAPIException,
|
||||
XmlRpcException,
|
||||
Types.VdiReadonly,
|
||||
Types.VdiTooSmall,
|
||||
Types.VdiNotSparse {
|
||||
String method_call = "Async.VDI.copy";
|
||||
String session = c.getSessionReference();
|
||||
Object[] method_params = {Marshalling.toXMLRPC(session), Marshalling.toXMLRPC(this.ref), Marshalling.toXMLRPC(sr), Marshalling.toXMLRPC(baseVdi), Marshalling.toXMLRPC(intoVdi)};
|
||||
Map response = c.dispatch(method_call, method_params);
|
||||
Object result = response.get("Value");
|
||||
return Types.toTask(result);
|
||||
}
|
||||
|
||||
/**
|
||||
* Make a fresh VDI in the specified SR and copy the supplied VDI's data to the new disk
|
||||
*
|
||||
|
||||
@ -30,4 +30,6 @@ public interface EndPointSelector {
|
||||
List<EndPoint> selectAll(DataStore store);
|
||||
|
||||
EndPoint select(Scope scope, Long storeId);
|
||||
|
||||
EndPoint selectHypervisorHost(Scope scope);
|
||||
}
|
||||
|
||||
@ -397,10 +397,6 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
|
||||
ServiceOffering offering, DiskOffering diskOffering, List<StoragePool> avoids, long size, HypervisorType hyperType) {
|
||||
StoragePool pool = null;
|
||||
|
||||
if (diskOffering != null && diskOffering.isCustomized()) {
|
||||
diskOffering.setDiskSize(size);
|
||||
}
|
||||
|
||||
DiskProfile dskCh = null;
|
||||
if (volume.getVolumeType() == Type.ROOT && Storage.ImageFormat.ISO != template.getFormat()) {
|
||||
dskCh = createDiskCharacteristics(volume, template, dc, offering);
|
||||
@ -408,6 +404,10 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
|
||||
dskCh = createDiskCharacteristics(volume, template, dc, diskOffering);
|
||||
}
|
||||
|
||||
if (diskOffering != null && diskOffering.isCustomized()) {
|
||||
dskCh.setSize(size);
|
||||
}
|
||||
|
||||
dskCh.setHyperType(hyperType);
|
||||
|
||||
final HashSet<StoragePool> avoidPools = new HashSet<StoragePool>(avoids);
|
||||
|
||||
@ -52,4 +52,6 @@ public interface SnapshotDataStoreDao extends GenericDao<SnapshotDataStoreVO, Lo
|
||||
List<SnapshotDataStoreVO> listOnCache(long snapshotId);
|
||||
|
||||
void updateStoreRoleToCache(long storeId);
|
||||
|
||||
SnapshotDataStoreVO findLatestSnapshotForVolume(Long volumeId, DataStoreRole role);
|
||||
}
|
||||
|
||||
@ -18,13 +18,11 @@
|
||||
*/
|
||||
package org.apache.cloudstack.storage.motion;
|
||||
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
|
||||
import javax.inject.Inject;
|
||||
|
||||
import org.apache.log4j.Logger;
|
||||
import org.springframework.stereotype.Component;
|
||||
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.ClusterScope;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.CopyCommandResult;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.DataMotionStrategy;
|
||||
@ -46,6 +44,8 @@ import org.apache.cloudstack.framework.async.AsyncCompletionCallback;
|
||||
import org.apache.cloudstack.framework.config.dao.ConfigurationDao;
|
||||
import org.apache.cloudstack.storage.command.CopyCommand;
|
||||
import org.apache.cloudstack.storage.image.datastore.ImageStoreEntity;
|
||||
import org.apache.log4j.Logger;
|
||||
import org.springframework.stereotype.Component;
|
||||
|
||||
import com.cloud.agent.api.Answer;
|
||||
import com.cloud.agent.api.storage.MigrateVolumeAnswer;
|
||||
@ -470,6 +470,14 @@ public class AncientDataMotionStrategy implements DataMotionStrategy {
|
||||
int _backupsnapshotwait = NumbersUtil.parseInt(value, Integer.parseInt(Config.BackupSnapshotWait.getDefaultValue()));
|
||||
|
||||
DataObject cacheData = null;
|
||||
SnapshotInfo snapshotInfo = (SnapshotInfo)srcData;
|
||||
Object payload = snapshotInfo.getPayload();
|
||||
Boolean fullSnapshot = true;
|
||||
if (payload != null) {
|
||||
fullSnapshot = (Boolean)payload;
|
||||
}
|
||||
Map<String, String> options = new HashMap<String, String>();
|
||||
options.put("fullSnapshot", fullSnapshot.toString());
|
||||
Answer answer = null;
|
||||
try {
|
||||
if (needCacheStorage(srcData, destData)) {
|
||||
@ -478,6 +486,7 @@ public class AncientDataMotionStrategy implements DataMotionStrategy {
|
||||
|
||||
CopyCommand cmd = new CopyCommand(srcData.getTO(), destData.getTO(), _backupsnapshotwait, VirtualMachineManager.ExecuteInSequence.value());
|
||||
cmd.setCacheTO(cacheData.getTO());
|
||||
cmd.setOptions(options);
|
||||
EndPoint ep = selector.select(srcData, destData);
|
||||
if (ep == null) {
|
||||
String errMsg = "No remote endpoint to send command, check if host or ssvm is down?";
|
||||
@ -488,6 +497,7 @@ public class AncientDataMotionStrategy implements DataMotionStrategy {
|
||||
}
|
||||
} else {
|
||||
CopyCommand cmd = new CopyCommand(srcData.getTO(), destData.getTO(), _backupsnapshotwait, VirtualMachineManager.ExecuteInSequence.value());
|
||||
cmd.setOptions(options);
|
||||
EndPoint ep = selector.select(srcData, destData);
|
||||
if (ep == null) {
|
||||
String errMsg = "No remote endpoint to send command, check if host or ssvm is down?";
|
||||
|
||||
@ -22,7 +22,6 @@ import javax.inject.Inject;
|
||||
|
||||
import org.apache.log4j.Logger;
|
||||
import org.springframework.stereotype.Component;
|
||||
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.DataStore;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreStateMachine.Event;
|
||||
@ -104,29 +103,30 @@ public class XenserverSnapshotStrategy extends SnapshotStrategyBase {
|
||||
|
||||
// determine full snapshot backup or not
|
||||
|
||||
boolean fullBackup = false;
|
||||
|
||||
if (parentSnapshot != null) {
|
||||
int _deltaSnapshotMax = NumbersUtil.parseInt(configDao.getValue("snapshot.delta.max"), SnapshotManager.DELTAMAX);
|
||||
boolean fullBackup = true;
|
||||
SnapshotDataStoreVO parentSnapshotOnBackupStore = snapshotStoreDao.findLatestSnapshotForVolume(snapshot.getVolumeId(), DataStoreRole.Image);
|
||||
if (parentSnapshotOnBackupStore != null) {
|
||||
int _deltaSnapshotMax = NumbersUtil.parseInt(configDao.getValue("snapshot.delta.max"),
|
||||
SnapshotManager.DELTAMAX);
|
||||
int deltaSnap = _deltaSnapshotMax;
|
||||
|
||||
int i;
|
||||
SnapshotDataStoreVO parentSnapshotOnBackupStore = null;
|
||||
for (i = 1; i < deltaSnap; i++) {
|
||||
parentSnapshotOnBackupStore = snapshotStoreDao.findBySnapshot(parentSnapshot.getId(), DataStoreRole.Image);
|
||||
if (parentSnapshotOnBackupStore == null) {
|
||||
break;
|
||||
}
|
||||
Long prevBackupId = parentSnapshotOnBackupStore.getParentSnapshotId();
|
||||
|
||||
for (i = 1; i < deltaSnap; i++) {
|
||||
Long prevBackupId = parentSnapshotOnBackupStore.getParentSnapshotId();
|
||||
if (prevBackupId == 0) {
|
||||
break;
|
||||
}
|
||||
|
||||
parentSnapshotOnBackupStore = snapshotStoreDao.findBySnapshot(prevBackupId, DataStoreRole.Image);
|
||||
if (parentSnapshotOnBackupStore == null) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (i >= deltaSnap) {
|
||||
fullBackup = true;
|
||||
} else {
|
||||
fullBackup = false;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -27,6 +27,7 @@ import java.util.List;
|
||||
|
||||
import javax.inject.Inject;
|
||||
|
||||
import com.cloud.utils.db.Transaction;
|
||||
import org.apache.log4j.Logger;
|
||||
import org.springframework.stereotype.Component;
|
||||
|
||||
@ -60,6 +61,7 @@ public class DefaultEndPointSelector implements EndPointSelector {
|
||||
private final String findOneHostOnPrimaryStorage =
|
||||
"select h.id from host h, storage_pool_host_ref s where h.status = 'Up' and h.type = 'Routing' and h.resource_state = 'Enabled' and"
|
||||
+ " h.id = s.host_id and s.pool_id = ? ";
|
||||
private String findOneHypervisorHostInScope = "select h.id from host h where h.status = 'Up' and h.hypervisor_type is not null ";
|
||||
|
||||
protected boolean moveBetweenPrimaryImage(DataStore srcStore, DataStore destStore) {
|
||||
DataStoreRole srcRole = srcStore.getRole();
|
||||
@ -292,4 +294,51 @@ public class DefaultEndPointSelector implements EndPointSelector {
|
||||
}
|
||||
return endPoints;
|
||||
}
|
||||
|
||||
@Override
|
||||
public EndPoint selectHypervisorHost(Scope scope) {
|
||||
StringBuilder sbuilder = new StringBuilder();
|
||||
sbuilder.append(findOneHypervisorHostInScope);
|
||||
if (scope.getScopeType() == ScopeType.ZONE) {
|
||||
sbuilder.append(" and h.data_center_id = ");
|
||||
sbuilder.append(scope.getScopeId());
|
||||
} else if (scope.getScopeType() == ScopeType.CLUSTER) {
|
||||
sbuilder.append(" and h.cluster_id = ");
|
||||
sbuilder.append(scope.getScopeId());
|
||||
}
|
||||
sbuilder.append(" ORDER by rand() limit 1");
|
||||
|
||||
String sql = sbuilder.toString();
|
||||
PreparedStatement pstmt = null;
|
||||
ResultSet rs = null;
|
||||
HostVO host = null;
|
||||
TransactionLegacy txn = TransactionLegacy.currentTxn();
|
||||
|
||||
try {
|
||||
pstmt = txn.prepareStatement(sql);
|
||||
rs = pstmt.executeQuery();
|
||||
while (rs.next()) {
|
||||
long id = rs.getLong(1);
|
||||
host = hostDao.findById(id);
|
||||
}
|
||||
} catch (SQLException e) {
|
||||
s_logger.warn("can't find endpoint", e);
|
||||
} finally {
|
||||
try {
|
||||
if (rs != null) {
|
||||
rs.close();
|
||||
}
|
||||
if (pstmt != null) {
|
||||
pstmt.close();
|
||||
}
|
||||
} catch (SQLException e) {
|
||||
}
|
||||
}
|
||||
|
||||
if (host == null) {
|
||||
return null;
|
||||
}
|
||||
|
||||
return RemoteHostEndPoint.getHypervisorHostEndPoint(host);
|
||||
}
|
||||
}
|
||||
|
||||
@ -54,8 +54,13 @@ public class SnapshotDataStoreDaoImpl extends GenericDaoBase<SnapshotDataStoreVO
|
||||
private SearchBuilder<SnapshotDataStoreVO> snapshotSearch;
|
||||
private SearchBuilder<SnapshotDataStoreVO> storeSnapshotSearch;
|
||||
private SearchBuilder<SnapshotDataStoreVO> snapshotIdSearch;
|
||||
|
||||
private final String parentSearch = "select store_id, store_role, snapshot_id from cloud.snapshot_store_ref where store_id = ? "
|
||||
+ " and store_role = ? and volume_id = ? and state = 'Ready'" + " order by created DESC " + " limit 1";
|
||||
private final String findLatestSnapshot = "select store_id, store_role, snapshot_id from cloud.snapshot_store_ref where " +
|
||||
" store_role = ? and volume_id = ? and state = 'Ready'" +
|
||||
" order by created DESC " +
|
||||
" limit 1";
|
||||
|
||||
@Override
|
||||
public boolean configure(String name, Map<String, Object> params) throws ConfigurationException {
|
||||
@ -203,6 +208,33 @@ public class SnapshotDataStoreDaoImpl extends GenericDaoBase<SnapshotDataStoreVO
|
||||
return findOneBy(sc);
|
||||
}
|
||||
|
||||
@Override
|
||||
public SnapshotDataStoreVO findLatestSnapshotForVolume(Long volumeId, DataStoreRole role) {
|
||||
TransactionLegacy txn = TransactionLegacy.currentTxn();
|
||||
PreparedStatement pstmt = null;
|
||||
ResultSet rs = null;
|
||||
try {
|
||||
pstmt = txn.prepareStatement(findLatestSnapshot);
|
||||
pstmt.setString(1, role.toString());
|
||||
pstmt.setLong(2, volumeId);
|
||||
rs = pstmt.executeQuery();
|
||||
while (rs.next()) {
|
||||
long sid = rs.getLong(1);
|
||||
long snid = rs.getLong(3);
|
||||
return findByStoreSnapshot(role, sid, snid);
|
||||
}
|
||||
} catch (SQLException e) {
|
||||
s_logger.debug("Failed to find parent snapshot: " + e.toString());
|
||||
} finally {
|
||||
try {
|
||||
if (pstmt != null)
|
||||
pstmt.close();
|
||||
} catch (SQLException e) {
|
||||
}
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
@DB
|
||||
public SnapshotDataStoreVO findParent(DataStoreRole role, Long storeId, Long volumeId) {
|
||||
|
||||
@ -19,17 +19,30 @@ package com.cloud.hypervisor;
|
||||
import javax.ejb.Local;
|
||||
import javax.inject.Inject;
|
||||
|
||||
import com.cloud.agent.api.to.VirtualMachineTO;
|
||||
import com.cloud.agent.api.Command;
|
||||
import com.cloud.agent.api.to.*;
|
||||
import com.cloud.host.HostInfo;
|
||||
import com.cloud.host.HostVO;
|
||||
import com.cloud.host.dao.HostDao;
|
||||
import com.cloud.hypervisor.Hypervisor.HypervisorType;
|
||||
import com.cloud.storage.GuestOSVO;
|
||||
import com.cloud.storage.dao.GuestOSDao;
|
||||
import com.cloud.template.VirtualMachineTemplate.BootloaderType;
|
||||
import com.cloud.utils.Pair;
|
||||
import com.cloud.vm.VirtualMachineProfile;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.EndPoint;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.EndPointSelector;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.ZoneScope;
|
||||
import org.apache.cloudstack.storage.command.CopyCommand;
|
||||
|
||||
@Local(value = HypervisorGuru.class)
|
||||
public class XenServerGuru extends HypervisorGuruBase implements HypervisorGuru {
|
||||
@Inject
|
||||
GuestOSDao _guestOsDao;
|
||||
@Inject
|
||||
EndPointSelector endPointSelector;
|
||||
@Inject
|
||||
HostDao hostDao;
|
||||
|
||||
protected XenServerGuru() {
|
||||
super();
|
||||
@ -60,4 +73,29 @@ public class XenServerGuru extends HypervisorGuruBase implements HypervisorGuru
|
||||
public boolean trackVmHostChange() {
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Pair<Boolean, Long> getCommandHostDelegation(long hostId, Command cmd) {
|
||||
if (cmd instanceof CopyCommand) {
|
||||
CopyCommand cpyCommand = (CopyCommand)cmd;
|
||||
DataTO srcData = cpyCommand.getSrcTO();
|
||||
DataTO destData = cpyCommand.getDestTO();
|
||||
|
||||
if (srcData.getObjectType() == DataObjectType.SNAPSHOT && destData.getObjectType() == DataObjectType.TEMPLATE) {
|
||||
DataStoreTO srcStore = srcData.getDataStore();
|
||||
DataStoreTO destStore = destData.getDataStore();
|
||||
if (srcStore instanceof NfsTO && destStore instanceof NfsTO) {
|
||||
HostVO host = hostDao.findById(hostId);
|
||||
EndPoint ep = endPointSelector.selectHypervisorHost(new ZoneScope(host.getDataCenterId()));
|
||||
host = hostDao.findById(ep.getId());
|
||||
hostDao.loadDetails(host);
|
||||
boolean snapshotHotFix = Boolean.parseBoolean(host.getDetail(HostInfo.XS620_SNAPSHOT_HOTFIX));
|
||||
if (snapshotHotFix) {
|
||||
return new Pair<Boolean, Long>(Boolean.TRUE, new Long(ep.getId()));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return new Pair<Boolean, Long>(Boolean.FALSE, new Long(hostId));
|
||||
}
|
||||
}
|
||||
|
||||
@ -34,13 +34,6 @@ import javax.persistence.EntityExistsException;
|
||||
import org.apache.log4j.Logger;
|
||||
import org.apache.xmlrpc.XmlRpcException;
|
||||
|
||||
import com.xensource.xenapi.Connection;
|
||||
import com.xensource.xenapi.Host;
|
||||
import com.xensource.xenapi.Pool;
|
||||
import com.xensource.xenapi.Session;
|
||||
import com.xensource.xenapi.Types.SessionAuthenticationFailed;
|
||||
import com.xensource.xenapi.Types.XenAPIException;
|
||||
|
||||
import com.cloud.agent.AgentManager;
|
||||
import com.cloud.agent.Listener;
|
||||
import com.cloud.agent.api.AgentControlAnswer;
|
||||
@ -82,6 +75,7 @@ import com.cloud.hypervisor.xen.resource.XenServer602Resource;
|
||||
import com.cloud.hypervisor.xen.resource.XenServer610Resource;
|
||||
import com.cloud.hypervisor.xen.resource.XenServer620Resource;
|
||||
import com.cloud.hypervisor.xen.resource.XenServerConnectionPool;
|
||||
import com.cloud.hypervisor.xen.resource.Xenserver625Resource;
|
||||
import com.cloud.resource.Discoverer;
|
||||
import com.cloud.resource.DiscovererBase;
|
||||
import com.cloud.resource.ResourceManager;
|
||||
@ -98,6 +92,12 @@ import com.cloud.utils.db.QueryBuilder;
|
||||
import com.cloud.utils.db.SearchCriteria.Op;
|
||||
import com.cloud.utils.exception.CloudRuntimeException;
|
||||
import com.cloud.utils.exception.HypervisorVersionChangedException;
|
||||
import com.xensource.xenapi.Connection;
|
||||
import com.xensource.xenapi.Host;
|
||||
import com.xensource.xenapi.Pool;
|
||||
import com.xensource.xenapi.Session;
|
||||
import com.xensource.xenapi.Types.SessionAuthenticationFailed;
|
||||
import com.xensource.xenapi.Types.XenAPIException;
|
||||
|
||||
@Local(value = Discoverer.class)
|
||||
public class XcpServerDiscoverer extends DiscovererBase implements Discoverer, Listener, ResourceStateAdapter {
|
||||
@ -112,6 +112,7 @@ public class XcpServerDiscoverer extends DiscovererBase implements Discoverer, L
|
||||
protected String _guestNic;
|
||||
protected boolean _setupMultipath;
|
||||
protected String _instance;
|
||||
private String xs620snapshothotfix = "Xenserver-Vdi-Copy-HotFix";
|
||||
|
||||
@Inject
|
||||
protected AlertManager _alertMgr;
|
||||
@ -149,6 +150,11 @@ public class XcpServerDiscoverer extends DiscovererBase implements Discoverer, L
|
||||
}
|
||||
}
|
||||
|
||||
protected boolean xenserverHotFixEnabled() {
|
||||
//temporary fix, we should call xenserver api to get the hot fix is enabled or not.
|
||||
return Boolean.parseBoolean(_configDao.getValue(Config.XenServerHotFix.name()));
|
||||
}
|
||||
|
||||
@Override
|
||||
public Map<? extends ServerResource, Map<String, String>>
|
||||
find(long dcId, Long podId, Long clusterId, URI url, String username, String password, List<String> hostTags) throws DiscoveryException {
|
||||
@ -274,6 +280,8 @@ public class XcpServerDiscoverer extends DiscovererBase implements Discoverer, L
|
||||
hostOS = record.softwareVersion.get("platform_name");
|
||||
}
|
||||
|
||||
//Boolean xs620hotfix = record.tags.contains(xs620snapshothotfix);
|
||||
Boolean xs620hotfix = xenserverHotFixEnabled();
|
||||
String hostOSVer = prodVersion;
|
||||
String hostKernelVer = record.softwareVersion.get("linux");
|
||||
|
||||
@ -303,6 +311,7 @@ public class XcpServerDiscoverer extends DiscovererBase implements Discoverer, L
|
||||
details.put(HostInfo.HOST_OS_VERSION, hostOSVer);
|
||||
details.put(HostInfo.HOST_OS_KERNEL_VERSION, hostKernelVer);
|
||||
details.put(HostInfo.HYPERVISOR_VERSION, xenVersion);
|
||||
details.put(HostInfo.XS620_SNAPSHOT_HOTFIX, xs620hotfix.toString());
|
||||
|
||||
String privateNetworkLabel = _networkMgr.getDefaultManagementTrafficLabel(dcId, HypervisorType.XenServer);
|
||||
String storageNetworkLabel = _networkMgr.getDefaultStorageTrafficLabel(dcId, HypervisorType.XenServer);
|
||||
@ -452,9 +461,21 @@ public class XcpServerDiscoverer extends DiscovererBase implements Discoverer, L
|
||||
return new XenServer602Resource();
|
||||
else if (prodBrand.equals("XenServer") && prodVersion.equals("6.1.0"))
|
||||
return new XenServer610Resource();
|
||||
else if (prodBrand.equals("XenServer") && prodVersion.equals("6.2.0"))
|
||||
return new XenServer620Resource();
|
||||
else if (prodBrand.equals("XenServer") && prodVersion.equals("5.6.100")) {
|
||||
else if (prodBrand.equals("XenServer") && prodVersion.equals("6.2.0")) {
|
||||
/*
|
||||
Set<String> tags =record.tags;
|
||||
if (tags.contains(xs620snapshothotfix)) {
|
||||
return new Xenserver625Resource();
|
||||
} else {
|
||||
return new XenServer620Resource();
|
||||
}*/
|
||||
boolean hotfix = xenserverHotFixEnabled();
|
||||
if (hotfix) {
|
||||
return new Xenserver625Resource();
|
||||
} else {
|
||||
return new XenServer620Resource();
|
||||
}
|
||||
} else if (prodBrand.equals("XenServer") && prodVersion.equals("5.6.100")) {
|
||||
String prodVersionTextShort = record.softwareVersion.get("product_version_text_short").trim();
|
||||
if ("5.6 SP2".equals(prodVersionTextShort)) {
|
||||
return new XenServer56SP2Resource();
|
||||
@ -607,7 +628,12 @@ public class XcpServerDiscoverer extends DiscovererBase implements Discoverer, L
|
||||
} else if (prodBrand.equals("XenServer") && prodVersion.equals("6.1.0")) {
|
||||
resource = XenServer610Resource.class.getName();
|
||||
} else if (prodBrand.equals("XenServer") && prodVersion.equals("6.2.0")) {
|
||||
resource = XenServer620Resource.class.getName();
|
||||
String hotfix = details.get("Xenserer620HotFix");
|
||||
if (hotfix != null && hotfix.equalsIgnoreCase("Xenserver-Vdi-Copy-HotFix")) {
|
||||
resource = Xenserver625Resource.class.getName();
|
||||
} else {
|
||||
resource = XenServer620Resource.class.getName();
|
||||
}
|
||||
} else if (prodBrand.equals("XenServer") && prodVersion.equals("5.6.100")) {
|
||||
String prodVersionTextShort = details.get("product_version_text_short").trim();
|
||||
if ("5.6 SP2".equals(prodVersionTextShort)) {
|
||||
|
||||
@ -49,6 +49,9 @@ import javax.ejb.Local;
|
||||
import javax.naming.ConfigurationException;
|
||||
import javax.xml.parsers.DocumentBuilderFactory;
|
||||
|
||||
import org.apache.cloudstack.storage.command.StorageSubSystemCommand;
|
||||
import org.apache.cloudstack.storage.to.TemplateObjectTO;
|
||||
import org.apache.cloudstack.storage.to.VolumeObjectTO;
|
||||
import org.apache.commons.codec.binary.Base64;
|
||||
import org.apache.log4j.Logger;
|
||||
import org.apache.xmlrpc.XmlRpcException;
|
||||
@ -102,8 +105,6 @@ import com.cloud.agent.api.Answer;
|
||||
import com.cloud.agent.api.AttachIsoCommand;
|
||||
import com.cloud.agent.api.AttachVolumeAnswer;
|
||||
import com.cloud.agent.api.AttachVolumeCommand;
|
||||
import com.cloud.agent.api.BackupSnapshotAnswer;
|
||||
import com.cloud.agent.api.BackupSnapshotCommand;
|
||||
import com.cloud.agent.api.BumpUpPriorityCommand;
|
||||
import com.cloud.agent.api.CheckHealthAnswer;
|
||||
import com.cloud.agent.api.CheckHealthCommand;
|
||||
@ -121,13 +122,9 @@ import com.cloud.agent.api.CleanupNetworkRulesCmd;
|
||||
import com.cloud.agent.api.ClusterSyncAnswer;
|
||||
import com.cloud.agent.api.ClusterSyncCommand;
|
||||
import com.cloud.agent.api.Command;
|
||||
import com.cloud.agent.api.CreatePrivateTemplateFromSnapshotCommand;
|
||||
import com.cloud.agent.api.CreatePrivateTemplateFromVolumeCommand;
|
||||
import com.cloud.agent.api.CreateStoragePoolCommand;
|
||||
import com.cloud.agent.api.CreateVMSnapshotAnswer;
|
||||
import com.cloud.agent.api.CreateVMSnapshotCommand;
|
||||
import com.cloud.agent.api.CreateVolumeFromSnapshotAnswer;
|
||||
import com.cloud.agent.api.CreateVolumeFromSnapshotCommand;
|
||||
import com.cloud.agent.api.DeleteStoragePoolCommand;
|
||||
import com.cloud.agent.api.DeleteVMSnapshotAnswer;
|
||||
import com.cloud.agent.api.DeleteVMSnapshotCommand;
|
||||
@ -147,8 +144,6 @@ import com.cloud.agent.api.HostStatsEntry;
|
||||
import com.cloud.agent.api.HostVmStateReportEntry;
|
||||
import com.cloud.agent.api.MaintainAnswer;
|
||||
import com.cloud.agent.api.MaintainCommand;
|
||||
import com.cloud.agent.api.ManageSnapshotAnswer;
|
||||
import com.cloud.agent.api.ManageSnapshotCommand;
|
||||
import com.cloud.agent.api.MigrateAnswer;
|
||||
import com.cloud.agent.api.MigrateCommand;
|
||||
import com.cloud.agent.api.ModifySshKeysCommand;
|
||||
@ -247,7 +242,6 @@ import com.cloud.agent.api.storage.CopyVolumeAnswer;
|
||||
import com.cloud.agent.api.storage.CopyVolumeCommand;
|
||||
import com.cloud.agent.api.storage.CreateAnswer;
|
||||
import com.cloud.agent.api.storage.CreateCommand;
|
||||
import com.cloud.agent.api.storage.CreatePrivateTemplateAnswer;
|
||||
import com.cloud.agent.api.storage.DestroyCommand;
|
||||
import com.cloud.agent.api.storage.PrimaryStorageDownloadAnswer;
|
||||
import com.cloud.agent.api.storage.PrimaryStorageDownloadCommand;
|
||||
@ -262,10 +256,8 @@ import com.cloud.agent.api.to.IpAddressTO;
|
||||
import com.cloud.agent.api.to.NfsTO;
|
||||
import com.cloud.agent.api.to.NicTO;
|
||||
import com.cloud.agent.api.to.PortForwardingRuleTO;
|
||||
import com.cloud.agent.api.to.S3TO;
|
||||
import com.cloud.agent.api.to.StaticNatRuleTO;
|
||||
import com.cloud.agent.api.to.StorageFilerTO;
|
||||
import com.cloud.agent.api.to.SwiftTO;
|
||||
import com.cloud.agent.api.to.VirtualMachineTO;
|
||||
import com.cloud.agent.api.to.VolumeTO;
|
||||
import com.cloud.exception.InternalErrorException;
|
||||
@ -282,7 +274,6 @@ import com.cloud.network.rules.FirewallRule;
|
||||
import com.cloud.resource.ServerResource;
|
||||
import com.cloud.resource.hypervisor.HypervisorResource;
|
||||
import com.cloud.storage.Storage;
|
||||
import com.cloud.storage.Storage.ImageFormat;
|
||||
import com.cloud.storage.Storage.StoragePoolType;
|
||||
import com.cloud.storage.Volume;
|
||||
import com.cloud.storage.VolumeVO;
|
||||
@ -303,6 +294,41 @@ import com.cloud.vm.VirtualMachine;
|
||||
import com.cloud.vm.VirtualMachine.PowerState;
|
||||
import com.cloud.vm.VirtualMachine.State;
|
||||
import com.cloud.vm.snapshot.VMSnapshot;
|
||||
import com.google.gson.Gson;
|
||||
import com.trilead.ssh2.SCPClient;
|
||||
import com.xensource.xenapi.Bond;
|
||||
import com.xensource.xenapi.Connection;
|
||||
import com.xensource.xenapi.Console;
|
||||
import com.xensource.xenapi.Host;
|
||||
import com.xensource.xenapi.HostCpu;
|
||||
import com.xensource.xenapi.HostMetrics;
|
||||
import com.xensource.xenapi.Network;
|
||||
import com.xensource.xenapi.PBD;
|
||||
import com.xensource.xenapi.PIF;
|
||||
import com.xensource.xenapi.PIF.Record;
|
||||
import com.xensource.xenapi.Pool;
|
||||
import com.xensource.xenapi.SR;
|
||||
import com.xensource.xenapi.Session;
|
||||
import com.xensource.xenapi.Task;
|
||||
import com.xensource.xenapi.Types;
|
||||
import com.xensource.xenapi.Types.BadAsyncResult;
|
||||
import com.xensource.xenapi.Types.BadServerResponse;
|
||||
import com.xensource.xenapi.Types.ConsoleProtocol;
|
||||
import com.xensource.xenapi.Types.IpConfigurationMode;
|
||||
import com.xensource.xenapi.Types.OperationNotAllowed;
|
||||
import com.xensource.xenapi.Types.SrFull;
|
||||
import com.xensource.xenapi.Types.VbdType;
|
||||
import com.xensource.xenapi.Types.VmBadPowerState;
|
||||
import com.xensource.xenapi.Types.VmPowerState;
|
||||
import com.xensource.xenapi.Types.XenAPIException;
|
||||
import com.xensource.xenapi.VBD;
|
||||
import com.xensource.xenapi.VBDMetrics;
|
||||
import com.xensource.xenapi.VDI;
|
||||
import com.xensource.xenapi.VIF;
|
||||
import com.xensource.xenapi.VLAN;
|
||||
import com.xensource.xenapi.VM;
|
||||
import com.xensource.xenapi.VMGuestMetrics;
|
||||
import com.xensource.xenapi.XenAPIObject;
|
||||
|
||||
/**
|
||||
* CitrixResourceBase encapsulates the calls to the XenServer Xapi process
|
||||
@ -537,25 +563,13 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe
|
||||
} else if (clazz == ModifyStoragePoolCommand.class) {
|
||||
return execute((ModifyStoragePoolCommand)cmd);
|
||||
} else if (clazz == DeleteStoragePoolCommand.class) {
|
||||
return execute((DeleteStoragePoolCommand)cmd);
|
||||
} else if (clazz == CopyVolumeCommand.class) {
|
||||
return execute((CopyVolumeCommand)cmd);
|
||||
} else if (clazz == ResizeVolumeCommand.class) {
|
||||
return execute((ResizeVolumeCommand)cmd);
|
||||
return execute((DeleteStoragePoolCommand) cmd);
|
||||
}else if (clazz == ResizeVolumeCommand.class) {
|
||||
return execute((ResizeVolumeCommand) cmd);
|
||||
} else if (clazz == AttachVolumeCommand.class) {
|
||||
return execute((AttachVolumeCommand)cmd);
|
||||
} else if (clazz == AttachIsoCommand.class) {
|
||||
return execute((AttachIsoCommand)cmd);
|
||||
} else if (clazz == ManageSnapshotCommand.class) {
|
||||
return execute((ManageSnapshotCommand)cmd);
|
||||
} else if (clazz == BackupSnapshotCommand.class) {
|
||||
return execute((BackupSnapshotCommand)cmd);
|
||||
} else if (clazz == CreateVolumeFromSnapshotCommand.class) {
|
||||
return execute((CreateVolumeFromSnapshotCommand)cmd);
|
||||
} else if (clazz == CreatePrivateTemplateFromVolumeCommand.class) {
|
||||
return execute((CreatePrivateTemplateFromVolumeCommand)cmd);
|
||||
} else if (clazz == CreatePrivateTemplateFromSnapshotCommand.class) {
|
||||
return execute((CreatePrivateTemplateFromSnapshotCommand)cmd);
|
||||
return execute((AttachIsoCommand) cmd);
|
||||
} else if (clazz == UpgradeSnapshotCommand.class) {
|
||||
return execute((UpgradeSnapshotCommand)cmd);
|
||||
} else if (clazz == GetStorageStatsCommand.class) {
|
||||
@ -1482,6 +1496,7 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe
|
||||
return vm;
|
||||
}
|
||||
|
||||
|
||||
protected void finalizeVmMetaData(VM vm, Connection conn, VirtualMachineTO vmSpec) throws Exception {
|
||||
|
||||
Map<String, String> details = vmSpec.getDetails();
|
||||
@ -3607,13 +3622,21 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe
|
||||
throw new CloudRuntimeException("Com'on no control domain? What the crap?!#@!##$@");
|
||||
}
|
||||
|
||||
protected void umountSnapshotDir(Connection conn, Long dcId) {
|
||||
try {
|
||||
callHostPlugin(conn, "vmopsSnapshot", "unmountSnapshotsDir", "dcId", dcId.toString());
|
||||
} catch (Exception e) {
|
||||
s_logger.debug("Failed to umount snapshot dir",e);
|
||||
}
|
||||
}
|
||||
|
||||
protected ReadyAnswer execute(ReadyCommand cmd) {
|
||||
Connection conn = getConnection();
|
||||
Long dcId = cmd.getDataCenterId();
|
||||
// Ignore the result of the callHostPlugin. Even if unmounting the
|
||||
// snapshots dir fails, let Ready command
|
||||
// succeed.
|
||||
callHostPlugin(conn, "vmopsSnapshot", "unmountSnapshotsDir", "dcId", dcId.toString());
|
||||
umountSnapshotDir(conn, dcId);
|
||||
|
||||
setupLinkLocalNetwork(conn);
|
||||
// try to destroy CD-ROM device for all system VMs on this host
|
||||
@ -4019,109 +4042,6 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe
|
||||
}
|
||||
}
|
||||
|
||||
boolean swiftDownload(Connection conn, SwiftTO swift, String container, String rfilename, String dir, String lfilename, Boolean remote) {
|
||||
String result = null;
|
||||
try {
|
||||
result =
|
||||
callHostPluginAsync(conn, "swiftxen", "swift", 60 * 60, "op", "download", "url", swift.getUrl(), "account", swift.getAccount(), "username",
|
||||
swift.getUserName(), "key", swift.getKey(), "rfilename", rfilename, "dir", dir, "lfilename", lfilename, "remote", remote.toString());
|
||||
if (result != null && result.equals("true")) {
|
||||
return true;
|
||||
}
|
||||
} catch (Exception e) {
|
||||
s_logger.warn("swift download failed due to ", e);
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
boolean swiftUpload(Connection conn, SwiftTO swift, String container, String ldir, String lfilename, Boolean isISCSI, int wait) {
|
||||
String result = null;
|
||||
try {
|
||||
result =
|
||||
callHostPluginAsync(conn, "swiftxen", "swift", wait, "op", "upload", "url", swift.getUrl(), "account", swift.getAccount(), "username",
|
||||
swift.getUserName(), "key", swift.getKey(), "container", container, "ldir", ldir, "lfilename", lfilename, "isISCSI", isISCSI.toString());
|
||||
if (result != null && result.equals("true")) {
|
||||
return true;
|
||||
}
|
||||
} catch (Exception e) {
|
||||
s_logger.warn("swift upload failed due to " + e.toString(), e);
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
boolean swiftDelete(Connection conn, SwiftTO swift, String rfilename) {
|
||||
String result = null;
|
||||
try {
|
||||
result =
|
||||
callHostPlugin(conn, "swiftxen", "swift", "op", "delete", "url", swift.getUrl(), "account", swift.getAccount(), "username", swift.getUserName(), "key",
|
||||
swift.getKey(), "rfilename", rfilename);
|
||||
if (result != null && result.equals("true")) {
|
||||
return true;
|
||||
}
|
||||
} catch (Exception e) {
|
||||
s_logger.warn("swift download failed due to ", e);
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
public String swiftBackupSnapshot(Connection conn, SwiftTO swift, String srUuid, String snapshotUuid, String container, Boolean isISCSI, int wait) {
|
||||
String lfilename;
|
||||
String ldir;
|
||||
if (isISCSI) {
|
||||
ldir = "/dev/VG_XenStorage-" + srUuid;
|
||||
lfilename = "VHD-" + snapshotUuid;
|
||||
} else {
|
||||
ldir = "/var/run/sr-mount/" + srUuid;
|
||||
lfilename = snapshotUuid + ".vhd";
|
||||
}
|
||||
swiftUpload(conn, swift, container, ldir, lfilename, isISCSI, wait);
|
||||
return lfilename;
|
||||
}
|
||||
|
||||
protected String backupSnapshot(Connection conn, String primaryStorageSRUuid, Long dcId, Long accountId, Long volumeId, String secondaryStorageMountPath,
|
||||
String snapshotUuid, String prevBackupUuid, Boolean isISCSI, int wait, Long secHostId) {
|
||||
String backupSnapshotUuid = null;
|
||||
|
||||
if (prevBackupUuid == null) {
|
||||
prevBackupUuid = "";
|
||||
}
|
||||
|
||||
// Each argument is put in a separate line for readability.
|
||||
// Using more lines does not harm the environment.
|
||||
String backupUuid = UUID.randomUUID().toString();
|
||||
String results =
|
||||
callHostPluginAsync(conn, "vmopsSnapshot", "backupSnapshot", wait, "primaryStorageSRUuid", primaryStorageSRUuid, "dcId", dcId.toString(), "accountId",
|
||||
accountId.toString(), "volumeId", volumeId.toString(), "secondaryStorageMountPath", secondaryStorageMountPath, "snapshotUuid", snapshotUuid,
|
||||
"prevBackupUuid", prevBackupUuid, "backupUuid", backupUuid, "isISCSI", isISCSI.toString(), "secHostId", secHostId.toString());
|
||||
String errMsg = null;
|
||||
if (results == null || results.isEmpty()) {
|
||||
errMsg =
|
||||
"Could not copy backupUuid: " + backupSnapshotUuid + " of volumeId: " + volumeId + " from primary storage " + primaryStorageSRUuid +
|
||||
" to secondary storage " + secondaryStorageMountPath + " due to null";
|
||||
} else {
|
||||
|
||||
String[] tmp = results.split("#");
|
||||
String status = tmp[0];
|
||||
backupSnapshotUuid = tmp[1];
|
||||
// status == "1" if and only if backupSnapshotUuid != null
|
||||
// So we don't rely on status value but return backupSnapshotUuid as an
|
||||
// indicator of success.
|
||||
if (status != null && status.equalsIgnoreCase("1") && backupSnapshotUuid != null) {
|
||||
s_logger.debug("Successfully copied backupUuid: " + backupSnapshotUuid + " of volumeId: " + volumeId + " to secondary storage");
|
||||
return backupSnapshotUuid;
|
||||
} else {
|
||||
errMsg =
|
||||
"Could not copy backupUuid: " + backupSnapshotUuid + " of volumeId: " + volumeId + " from primary storage " + primaryStorageSRUuid +
|
||||
" to secondary storage " + secondaryStorageMountPath + " due to " + tmp[1];
|
||||
}
|
||||
}
|
||||
String source = backupUuid + ".vhd";
|
||||
killCopyProcess(conn, source);
|
||||
s_logger.warn(errMsg);
|
||||
return null;
|
||||
|
||||
}
|
||||
|
||||
protected String callHostPluginAsync(Connection conn, String plugin, String cmd, int wait, String... params) {
|
||||
int timeout = wait * 1000;
|
||||
Map<String, String> args = new HashMap<String, String>();
|
||||
@ -6273,14 +6193,6 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe
|
||||
}
|
||||
}
|
||||
|
||||
void destroyVDI(Connection conn, VDI vdi) {
|
||||
try {
|
||||
vdi.destroy(conn);
|
||||
} catch (Exception e) {
|
||||
String msg = "destroy VDI failed due to " + e.toString();
|
||||
s_logger.warn(msg);
|
||||
}
|
||||
}
|
||||
|
||||
public CreateAnswer execute(CreateCommand cmd) {
|
||||
Connection conn = getConnection();
|
||||
@ -6707,54 +6619,6 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe
|
||||
return new Answer(cmd, true, "Success");
|
||||
}
|
||||
|
||||
public CopyVolumeAnswer execute(final CopyVolumeCommand cmd) {
|
||||
Connection conn = getConnection();
|
||||
String volumeUUID = cmd.getVolumePath();
|
||||
StorageFilerTO poolTO = cmd.getPool();
|
||||
String secondaryStorageURL = cmd.getSecondaryStorageURL();
|
||||
boolean toSecondaryStorage = cmd.toSecondaryStorage();
|
||||
int wait = cmd.getWait();
|
||||
try {
|
||||
URI uri = new URI(secondaryStorageURL);
|
||||
String remoteVolumesMountPath = uri.getHost() + ":" + uri.getPath() + "/volumes/";
|
||||
String volumeFolder = String.valueOf(cmd.getVolumeId()) + "/";
|
||||
String mountpoint = remoteVolumesMountPath + volumeFolder;
|
||||
SR primaryStoragePool = getStorageRepository(conn, poolTO.getUuid());
|
||||
String srUuid = primaryStoragePool.getUuid(conn);
|
||||
if (toSecondaryStorage) {
|
||||
// Create the volume folder
|
||||
if (!createSecondaryStorageFolder(conn, remoteVolumesMountPath, volumeFolder)) {
|
||||
throw new InternalErrorException("Failed to create the volume folder.");
|
||||
}
|
||||
SR secondaryStorage = null;
|
||||
try {
|
||||
// Create a SR for the volume UUID folder
|
||||
secondaryStorage = createNfsSRbyURI(conn, new URI(secondaryStorageURL + "/volumes/" + volumeFolder), false);
|
||||
// Look up the volume on the source primary storage pool
|
||||
VDI srcVolume = getVDIbyUuid(conn, volumeUUID);
|
||||
// Copy the volume to secondary storage
|
||||
VDI destVolume = cloudVDIcopy(conn, srcVolume, secondaryStorage, wait);
|
||||
String destVolumeUUID = destVolume.getUuid(conn);
|
||||
return new CopyVolumeAnswer(cmd, true, null, null, destVolumeUUID);
|
||||
} finally {
|
||||
removeSR(conn, secondaryStorage);
|
||||
}
|
||||
} else {
|
||||
try {
|
||||
String volumePath = mountpoint + "/" + volumeUUID + ".vhd";
|
||||
String uuid = copy_vhd_from_secondarystorage(conn, volumePath, srUuid, wait);
|
||||
return new CopyVolumeAnswer(cmd, true, null, srUuid, uuid);
|
||||
} finally {
|
||||
deleteSecondaryStorageFolder(conn, remoteVolumesMountPath, volumeFolder);
|
||||
}
|
||||
}
|
||||
} catch (Exception e) {
|
||||
String msg = "Catch Exception " + e.getClass().getName() + " due to " + e.toString();
|
||||
s_logger.warn(msg, e);
|
||||
return new CopyVolumeAnswer(cmd, false, msg, null, null);
|
||||
}
|
||||
}
|
||||
|
||||
protected VDI createVdi(SR sr, String vdiNameLabel, Long volumeSize) throws Types.XenAPIException, XmlRpcException {
|
||||
VDI vdi = null;
|
||||
|
||||
@ -7276,140 +7140,6 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe
|
||||
return SRType.LVMOHBA.equals(type) || SRType.LVMOISCSI.equals(type) || SRType.LVM.equals(type);
|
||||
}
|
||||
|
||||
protected ManageSnapshotAnswer execute(final ManageSnapshotCommand cmd) {
|
||||
Connection conn = getConnection();
|
||||
long snapshotId = cmd.getSnapshotId();
|
||||
String snapshotName = cmd.getSnapshotName();
|
||||
|
||||
// By default assume failure
|
||||
boolean success = false;
|
||||
String cmdSwitch = cmd.getCommandSwitch();
|
||||
String snapshotOp = "Unsupported snapshot command." + cmdSwitch;
|
||||
if (cmdSwitch.equals(ManageSnapshotCommand.CREATE_SNAPSHOT)) {
|
||||
snapshotOp = "create";
|
||||
} else if (cmdSwitch.equals(ManageSnapshotCommand.DESTROY_SNAPSHOT)) {
|
||||
snapshotOp = "destroy";
|
||||
}
|
||||
String details = "ManageSnapshotCommand operation: " + snapshotOp + " Failed for snapshotId: " + snapshotId;
|
||||
String snapshotUUID = null;
|
||||
|
||||
try {
|
||||
if (cmdSwitch.equals(ManageSnapshotCommand.CREATE_SNAPSHOT)) {
|
||||
// Look up the volume
|
||||
String volumeUUID = cmd.getVolumePath();
|
||||
VDI volume = VDI.getByUuid(conn, volumeUUID);
|
||||
|
||||
// Create a snapshot
|
||||
VDI snapshot = volume.snapshot(conn, new HashMap<String, String>());
|
||||
|
||||
if (snapshotName != null) {
|
||||
snapshot.setNameLabel(conn, snapshotName);
|
||||
}
|
||||
// Determine the UUID of the snapshot
|
||||
|
||||
snapshotUUID = snapshot.getUuid(conn);
|
||||
String preSnapshotUUID = cmd.getSnapshotPath();
|
||||
//check if it is a empty snapshot
|
||||
if (preSnapshotUUID != null) {
|
||||
SR sr = volume.getSR(conn);
|
||||
String srUUID = sr.getUuid(conn);
|
||||
String type = sr.getType(conn);
|
||||
Boolean isISCSI = IsISCSI(type);
|
||||
String snapshotParentUUID = getVhdParent(conn, srUUID, snapshotUUID, isISCSI);
|
||||
|
||||
String preSnapshotParentUUID = getVhdParent(conn, srUUID, preSnapshotUUID, isISCSI);
|
||||
if (snapshotParentUUID != null && snapshotParentUUID.equals(preSnapshotParentUUID)) {
|
||||
// this is empty snapshot, remove it
|
||||
snapshot.destroy(conn);
|
||||
snapshotUUID = preSnapshotUUID;
|
||||
}
|
||||
|
||||
}
|
||||
success = true;
|
||||
details = null;
|
||||
} else if (cmd.getCommandSwitch().equals(ManageSnapshotCommand.DESTROY_SNAPSHOT)) {
|
||||
// Look up the snapshot
|
||||
snapshotUUID = cmd.getSnapshotPath();
|
||||
VDI snapshot = getVDIbyUuid(conn, snapshotUUID);
|
||||
|
||||
snapshot.destroy(conn);
|
||||
snapshotUUID = null;
|
||||
success = true;
|
||||
details = null;
|
||||
}
|
||||
} catch (XenAPIException e) {
|
||||
details += ", reason: " + e.toString();
|
||||
s_logger.warn(details, e);
|
||||
} catch (Exception e) {
|
||||
details += ", reason: " + e.toString();
|
||||
s_logger.warn(details, e);
|
||||
}
|
||||
return new ManageSnapshotAnswer(cmd, snapshotId, snapshotUUID, success, details);
|
||||
}
|
||||
|
||||
protected CreatePrivateTemplateAnswer execute(final CreatePrivateTemplateFromVolumeCommand cmd) {
|
||||
Connection conn = getConnection();
|
||||
String secondaryStoragePoolURL = cmd.getSecondaryStorageUrl();
|
||||
String volumeUUID = cmd.getVolumePath();
|
||||
Long accountId = cmd.getAccountId();
|
||||
String userSpecifiedName = cmd.getTemplateName();
|
||||
Long templateId = cmd.getTemplateId();
|
||||
int wait = cmd.getWait();
|
||||
String details = null;
|
||||
SR tmpltSR = null;
|
||||
boolean result = false;
|
||||
String secondaryStorageMountPath = null;
|
||||
String installPath = null;
|
||||
try {
|
||||
URI uri = new URI(secondaryStoragePoolURL);
|
||||
secondaryStorageMountPath = uri.getHost() + ":" + uri.getPath();
|
||||
installPath = "template/tmpl/" + accountId + "/" + templateId;
|
||||
if (!createSecondaryStorageFolder(conn, secondaryStorageMountPath, installPath)) {
|
||||
details = " Filed to create folder " + installPath + " in secondary storage";
|
||||
s_logger.warn(details);
|
||||
return new CreatePrivateTemplateAnswer(cmd, false, details);
|
||||
}
|
||||
|
||||
VDI volume = getVDIbyUuid(conn, volumeUUID);
|
||||
// create template SR
|
||||
URI tmpltURI = new URI(secondaryStoragePoolURL + "/" + installPath);
|
||||
tmpltSR = createNfsSRbyURI(conn, tmpltURI, false);
|
||||
|
||||
// copy volume to template SR
|
||||
VDI tmpltVDI = cloudVDIcopy(conn, volume, tmpltSR, wait);
|
||||
// scan makes XenServer pick up VDI physicalSize
|
||||
tmpltSR.scan(conn);
|
||||
if (userSpecifiedName != null) {
|
||||
tmpltVDI.setNameLabel(conn, userSpecifiedName);
|
||||
}
|
||||
|
||||
String tmpltUUID = tmpltVDI.getUuid(conn);
|
||||
String tmpltFilename = tmpltUUID + ".vhd";
|
||||
long virtualSize = tmpltVDI.getVirtualSize(conn);
|
||||
long physicalSize = tmpltVDI.getPhysicalUtilisation(conn);
|
||||
// create the template.properties file
|
||||
String templatePath = secondaryStorageMountPath + "/" + installPath;
|
||||
result = postCreatePrivateTemplate(conn, templatePath, tmpltFilename, tmpltUUID, userSpecifiedName, null, physicalSize, virtualSize, templateId);
|
||||
if (!result) {
|
||||
throw new CloudRuntimeException("Could not create the template.properties file on secondary storage dir: " + tmpltURI);
|
||||
}
|
||||
installPath = installPath + "/" + tmpltFilename;
|
||||
removeSR(conn, tmpltSR);
|
||||
tmpltSR = null;
|
||||
return new CreatePrivateTemplateAnswer(cmd, true, null, installPath, virtualSize, physicalSize, tmpltUUID, ImageFormat.VHD);
|
||||
} catch (Exception e) {
|
||||
if (tmpltSR != null) {
|
||||
removeSR(conn, tmpltSR);
|
||||
}
|
||||
if (secondaryStorageMountPath != null) {
|
||||
deleteSecondaryStorageFolder(conn, secondaryStorageMountPath, installPath);
|
||||
}
|
||||
details = "Creating template from volume " + volumeUUID + " failed due to " + e.toString();
|
||||
s_logger.error(details, e);
|
||||
}
|
||||
return new CreatePrivateTemplateAnswer(cmd, result, details);
|
||||
}
|
||||
|
||||
protected Answer execute(final UpgradeSnapshotCommand cmd) {
|
||||
|
||||
String secondaryStorageUrl = cmd.getSecondaryStorageUrl();
|
||||
@ -7439,60 +7169,6 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe
|
||||
return new Answer(cmd, false, "failure");
|
||||
}
|
||||
|
||||
protected CreatePrivateTemplateAnswer execute(final CreatePrivateTemplateFromSnapshotCommand cmd) {
|
||||
Connection conn = getConnection();
|
||||
Long accountId = cmd.getAccountId();
|
||||
Long volumeId = cmd.getVolumeId();
|
||||
String secondaryStorageUrl = cmd.getSecondaryStorageUrl();
|
||||
String backedUpSnapshotUuid = cmd.getSnapshotUuid();
|
||||
Long newTemplateId = cmd.getNewTemplateId();
|
||||
String userSpecifiedName = cmd.getTemplateName();
|
||||
int wait = cmd.getWait();
|
||||
// By default, assume failure
|
||||
String details = null;
|
||||
boolean result = false;
|
||||
String secondaryStorageMountPath = null;
|
||||
String installPath = null;
|
||||
try {
|
||||
URI uri = new URI(secondaryStorageUrl);
|
||||
secondaryStorageMountPath = uri.getHost() + ":" + uri.getPath();
|
||||
installPath = "template/tmpl/" + accountId + "/" + newTemplateId;
|
||||
if (!createSecondaryStorageFolder(conn, secondaryStorageMountPath, installPath)) {
|
||||
details = " Filed to create folder " + installPath + " in secondary storage";
|
||||
s_logger.warn(details);
|
||||
return new CreatePrivateTemplateAnswer(cmd, false, details);
|
||||
}
|
||||
String templatePath = secondaryStorageMountPath + "/" + installPath;
|
||||
// create snapshot SR
|
||||
String filename = backedUpSnapshotUuid;
|
||||
if (!filename.startsWith("VHD-") && !filename.endsWith(".vhd")) {
|
||||
filename = backedUpSnapshotUuid + ".vhd";
|
||||
}
|
||||
String snapshotPath = secondaryStorageMountPath + "/snapshots/" + accountId + "/" + volumeId + "/" + filename;
|
||||
String results = createTemplateFromSnapshot(conn, templatePath, snapshotPath, wait);
|
||||
String[] tmp = results.split("#");
|
||||
String tmpltUuid = tmp[1];
|
||||
long physicalSize = Long.parseLong(tmp[2]);
|
||||
long virtualSize = Long.parseLong(tmp[3]) * 1024 * 1024;
|
||||
String tmpltFilename = tmpltUuid + ".vhd";
|
||||
|
||||
// create the template.properties file
|
||||
result = postCreatePrivateTemplate(conn, templatePath, tmpltFilename, tmpltUuid, userSpecifiedName, null, physicalSize, virtualSize, newTemplateId);
|
||||
if (!result) {
|
||||
throw new CloudRuntimeException("Could not create the template.properties file on secondary storage dir: " + templatePath);
|
||||
}
|
||||
installPath = installPath + "/" + tmpltFilename;
|
||||
return new CreatePrivateTemplateAnswer(cmd, true, null, installPath, virtualSize, physicalSize, tmpltUuid, ImageFormat.VHD);
|
||||
} catch (Exception e) {
|
||||
if (secondaryStorageMountPath != null) {
|
||||
deleteSecondaryStorageFolder(conn, secondaryStorageMountPath, installPath);
|
||||
}
|
||||
details = "Creating template from snapshot " + backedUpSnapshotUuid + " failed due to " + e.toString();
|
||||
s_logger.error(details, e);
|
||||
}
|
||||
return new CreatePrivateTemplateAnswer(cmd, result, details);
|
||||
}
|
||||
|
||||
private boolean destroySnapshotOnPrimaryStorageExceptThis(Connection conn, String volumeUuid, String avoidSnapshotUuid) {
|
||||
try {
|
||||
VDI volume = getVDIbyUuid(conn, volumeUuid);
|
||||
@ -7523,203 +7199,6 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe
|
||||
return false;
|
||||
}
|
||||
|
||||
protected BackupSnapshotAnswer execute(final BackupSnapshotCommand cmd) {
|
||||
Connection conn = getConnection();
|
||||
String primaryStorageNameLabel = cmd.getPrimaryStoragePoolNameLabel();
|
||||
Long dcId = cmd.getDataCenterId();
|
||||
Long accountId = cmd.getAccountId();
|
||||
Long volumeId = cmd.getVolumeId();
|
||||
String secondaryStorageUrl = cmd.getSecondaryStorageUrl();
|
||||
String snapshotUuid = cmd.getSnapshotUuid(); // not null: Precondition.
|
||||
String prevBackupUuid = cmd.getPrevBackupUuid();
|
||||
String prevSnapshotUuid = cmd.getPrevSnapshotUuid();
|
||||
int wait = cmd.getWait();
|
||||
Long secHostId = cmd.getSecHostId();
|
||||
// By default assume failure
|
||||
String details = null;
|
||||
boolean success = false;
|
||||
String snapshotBackupUuid = null;
|
||||
boolean fullbackup = true;
|
||||
try {
|
||||
SR primaryStorageSR = getSRByNameLabelandHost(conn, primaryStorageNameLabel);
|
||||
if (primaryStorageSR == null) {
|
||||
throw new InternalErrorException("Could not backup snapshot because the primary Storage SR could not be created from the name label: " +
|
||||
primaryStorageNameLabel);
|
||||
}
|
||||
String psUuid = primaryStorageSR.getUuid(conn);
|
||||
Boolean isISCSI = IsISCSI(primaryStorageSR.getType(conn));
|
||||
URI uri = new URI(secondaryStorageUrl);
|
||||
String secondaryStorageMountPath = uri.getHost() + ":" + uri.getPath();
|
||||
VDI snapshotVdi = getVDIbyUuid(conn, snapshotUuid);
|
||||
String snapshotPaUuid = null;
|
||||
if (prevBackupUuid != null) {
|
||||
try {
|
||||
snapshotPaUuid = getVhdParent(conn, psUuid, snapshotUuid, isISCSI);
|
||||
if (snapshotPaUuid != null) {
|
||||
String snashotPaPaPaUuid = getVhdParent(conn, psUuid, snapshotPaUuid, isISCSI);
|
||||
String prevSnashotPaUuid = getVhdParent(conn, psUuid, prevSnapshotUuid, isISCSI);
|
||||
if (snashotPaPaPaUuid != null && prevSnashotPaUuid != null && prevSnashotPaUuid.equals(snashotPaPaPaUuid)) {
|
||||
fullbackup = false;
|
||||
}
|
||||
}
|
||||
} catch (Exception e) {
|
||||
}
|
||||
}
|
||||
|
||||
if (fullbackup) {
|
||||
// the first snapshot is always a full snapshot
|
||||
String folder = "snapshots/" + accountId + "/" + volumeId;
|
||||
if (!createSecondaryStorageFolder(conn, secondaryStorageMountPath, folder)) {
|
||||
details = " Filed to create folder " + folder + " in secondary storage";
|
||||
s_logger.warn(details);
|
||||
return new BackupSnapshotAnswer(cmd, false, details, null, false);
|
||||
}
|
||||
String snapshotMountpoint = secondaryStorageUrl + "/" + folder;
|
||||
SR snapshotSr = null;
|
||||
try {
|
||||
snapshotSr = createNfsSRbyURI(conn, new URI(snapshotMountpoint), false);
|
||||
VDI backedVdi = cloudVDIcopy(conn, snapshotVdi, snapshotSr, wait);
|
||||
snapshotBackupUuid = backedVdi.getUuid(conn);
|
||||
if (cmd.getSwift() != null) {
|
||||
try {
|
||||
swiftBackupSnapshot(conn, cmd.getSwift(), snapshotSr.getUuid(conn), snapshotBackupUuid, "S-" + volumeId.toString(), false, wait);
|
||||
snapshotBackupUuid = snapshotBackupUuid + ".vhd";
|
||||
} finally {
|
||||
deleteSnapshotBackup(conn, dcId, accountId, volumeId, secondaryStorageMountPath, snapshotBackupUuid);
|
||||
}
|
||||
} else if (cmd.getS3() != null) {
|
||||
try {
|
||||
backupSnapshotToS3(conn, cmd.getS3(), snapshotSr.getUuid(conn), snapshotBackupUuid, isISCSI, wait);
|
||||
snapshotBackupUuid = snapshotBackupUuid + ".vhd";
|
||||
} finally {
|
||||
deleteSnapshotBackup(conn, dcId, accountId, volumeId, secondaryStorageMountPath, snapshotBackupUuid);
|
||||
}
|
||||
}
|
||||
success = true;
|
||||
} finally {
|
||||
if (snapshotSr != null) {
|
||||
removeSR(conn, snapshotSr);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
String primaryStorageSRUuid = primaryStorageSR.getUuid(conn);
|
||||
if (cmd.getSwift() != null) {
|
||||
swiftBackupSnapshot(conn, cmd.getSwift(), primaryStorageSRUuid, snapshotPaUuid, "S-" + volumeId.toString(), isISCSI, wait);
|
||||
if (isISCSI) {
|
||||
snapshotBackupUuid = "VHD-" + snapshotPaUuid;
|
||||
} else {
|
||||
snapshotBackupUuid = snapshotPaUuid + ".vhd";
|
||||
}
|
||||
success = true;
|
||||
} else if (cmd.getS3() != null) {
|
||||
backupSnapshotToS3(conn, cmd.getS3(), primaryStorageSRUuid, snapshotPaUuid, isISCSI, wait);
|
||||
} else {
|
||||
snapshotBackupUuid =
|
||||
backupSnapshot(conn, primaryStorageSRUuid, dcId, accountId, volumeId, secondaryStorageMountPath, snapshotUuid, prevBackupUuid, isISCSI, wait,
|
||||
secHostId);
|
||||
success = (snapshotBackupUuid != null);
|
||||
}
|
||||
}
|
||||
String volumeUuid = cmd.getVolumePath();
|
||||
destroySnapshotOnPrimaryStorageExceptThis(conn, volumeUuid, snapshotUuid);
|
||||
if (success) {
|
||||
details = "Successfully backedUp the snapshotUuid: " + snapshotUuid + " to secondary storage.";
|
||||
|
||||
}
|
||||
} catch (XenAPIException e) {
|
||||
details = "BackupSnapshot Failed due to " + e.toString();
|
||||
s_logger.warn(details, e);
|
||||
} catch (Exception e) {
|
||||
details = "BackupSnapshot Failed due to " + e.getMessage();
|
||||
s_logger.warn(details, e);
|
||||
}
|
||||
|
||||
return new BackupSnapshotAnswer(cmd, success, details, snapshotBackupUuid, fullbackup);
|
||||
}
|
||||
|
||||
private boolean
|
||||
backupSnapshotToS3(final Connection connection, final S3TO s3, final String srUuid, final String snapshotUuid, final Boolean iSCSIFlag, final int wait) {
|
||||
|
||||
final String filename = iSCSIFlag ? "VHD-" + snapshotUuid : snapshotUuid + ".vhd";
|
||||
final String dir = (iSCSIFlag ? "/dev/VG_XenStorage-" : "/var/run/sr-mount/") + srUuid;
|
||||
final String key = String.format("/snapshots/%1$s", snapshotUuid);
|
||||
|
||||
try {
|
||||
|
||||
final List<String> parameters = newArrayList(flattenProperties(s3, S3Utils.ClientOptions.class));
|
||||
// https workaround for Introspector bug that does not
|
||||
// recognize Boolean accessor methods ...
|
||||
parameters.addAll(Arrays.asList("operation", "put", "filename", dir + "/" + filename, "iSCSIFlag", iSCSIFlag.toString(), "bucket", s3.getBucketName(), "key",
|
||||
key, "https", s3.isHttps() != null ? s3.isHttps().toString() : "null"));
|
||||
final String result = callHostPluginAsync(connection, "s3xen", "s3", wait, parameters.toArray(new String[parameters.size()]));
|
||||
|
||||
if (result != null && result.equals("true")) {
|
||||
return true;
|
||||
}
|
||||
|
||||
} catch (Exception e) {
|
||||
s_logger.error(String.format("S3 upload failed of snapshot %1$s due to %2$s.", snapshotUuid, e.toString()), e);
|
||||
}
|
||||
|
||||
return false;
|
||||
|
||||
}
|
||||
|
||||
protected CreateVolumeFromSnapshotAnswer execute(final CreateVolumeFromSnapshotCommand cmd) {
|
||||
Connection conn = getConnection();
|
||||
String primaryStorageNameLabel = cmd.getPrimaryStoragePoolNameLabel();
|
||||
Long accountId = cmd.getAccountId();
|
||||
Long volumeId = cmd.getVolumeId();
|
||||
String secondaryStorageUrl = cmd.getSecondaryStorageUrl();
|
||||
String backedUpSnapshotUuid = cmd.getSnapshotUuid();
|
||||
int wait = cmd.getWait();
|
||||
boolean result = false;
|
||||
// Generic error message.
|
||||
String details = null;
|
||||
String volumeUUID = null;
|
||||
SR snapshotSR = null;
|
||||
|
||||
if (secondaryStorageUrl == null) {
|
||||
details += " because the URL passed: " + secondaryStorageUrl + " is invalid.";
|
||||
return new CreateVolumeFromSnapshotAnswer(cmd, result, details, volumeUUID);
|
||||
}
|
||||
try {
|
||||
SR primaryStorageSR = getSRByNameLabelandHost(conn, primaryStorageNameLabel);
|
||||
if (primaryStorageSR == null) {
|
||||
throw new InternalErrorException("Could not create volume from snapshot because the primary Storage SR could not be created from the name label: " +
|
||||
primaryStorageNameLabel);
|
||||
}
|
||||
// Get the absolute path of the snapshot on the secondary storage.
|
||||
URI snapshotURI = new URI(secondaryStorageUrl + "/snapshots/" + accountId + "/" + volumeId);
|
||||
String filename = backedUpSnapshotUuid;
|
||||
if (!filename.startsWith("VHD-") && !filename.endsWith(".vhd")) {
|
||||
filename = backedUpSnapshotUuid + ".vhd";
|
||||
}
|
||||
String snapshotPath = snapshotURI.getHost() + ":" + snapshotURI.getPath() + "/" + filename;
|
||||
String srUuid = primaryStorageSR.getUuid(conn);
|
||||
volumeUUID = copy_vhd_from_secondarystorage(conn, snapshotPath, srUuid, wait);
|
||||
result = true;
|
||||
} catch (XenAPIException e) {
|
||||
details += " due to " + e.toString();
|
||||
s_logger.warn(details, e);
|
||||
} catch (Exception e) {
|
||||
details += " due to " + e.getMessage();
|
||||
s_logger.warn(details, e);
|
||||
} finally {
|
||||
// In all cases, if the temporary SR was created, forget it.
|
||||
if (snapshotSR != null) {
|
||||
removeSR(conn, snapshotSR);
|
||||
}
|
||||
}
|
||||
if (!result) {
|
||||
// Is this logged at a higher level?
|
||||
s_logger.error(details);
|
||||
}
|
||||
|
||||
// In all cases return something.
|
||||
return new CreateVolumeFromSnapshotAnswer(cmd, result, details, volumeUUID);
|
||||
}
|
||||
|
||||
protected VM getVM(Connection conn, String vmName) {
|
||||
// Look up VMs with the specified name
|
||||
Set<VM> vms;
|
||||
@ -7919,27 +7398,6 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe
|
||||
return parentUuid;
|
||||
}
|
||||
|
||||
protected boolean destroySnapshotOnPrimaryStorage(Connection conn, String snapshotUuid) {
|
||||
// Precondition snapshotUuid != null
|
||||
try {
|
||||
VDI snapshot = getVDIbyUuid(conn, snapshotUuid);
|
||||
if (snapshot == null) {
|
||||
throw new InternalErrorException("Could not destroy snapshot " + snapshotUuid + " because the snapshot VDI was null");
|
||||
}
|
||||
snapshot.destroy(conn);
|
||||
s_logger.debug("Successfully destroyed snapshotUuid: " + snapshotUuid + " on primary storage");
|
||||
return true;
|
||||
} catch (XenAPIException e) {
|
||||
String msg = "Destroy snapshotUuid: " + snapshotUuid + " on primary storage failed due to " + e.toString();
|
||||
s_logger.error(msg, e);
|
||||
} catch (Exception e) {
|
||||
String msg = "Destroy snapshotUuid: " + snapshotUuid + " on primary storage failed due to " + e.getMessage();
|
||||
s_logger.warn(msg, e);
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
protected String deleteSnapshotBackup(Connection conn, Long dcId, Long accountId, Long volumeId, String secondaryStorageMountPath, String backupUUID) {
|
||||
|
||||
// If anybody modifies the formatting below again, I'll skin them
|
||||
@ -7950,10 +7408,6 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe
|
||||
return result;
|
||||
}
|
||||
|
||||
protected boolean deleteSnapshotsDir(Connection conn, Long dcId, Long accountId, Long volumeId, String secondaryStorageMountPath) {
|
||||
return deleteSecondaryStorageFolder(conn, secondaryStorageMountPath, "snapshots" + "/" + accountId.toString() + "/" + volumeId.toString());
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean start() {
|
||||
return true;
|
||||
|
||||
@ -51,6 +51,11 @@ public class XenServerPoolVms {
|
||||
return pv == null ? State.Stopped : pv.second(); // if a VM is absent on the cluster, it is effectively in stopped state.
|
||||
}
|
||||
|
||||
public Ternary<String, State, String> get(String clusterId, String name) {
|
||||
HashMap<String, Ternary<String, State, String>> vms = getClusterVmState(clusterId);
|
||||
return vms.get(name);
|
||||
}
|
||||
|
||||
public void put(String clusterId, String hostUuid, String name, State state, String platform) {
|
||||
HashMap<String, Ternary<String, State, String>> vms = getClusterVmState(clusterId);
|
||||
vms.put(name, new Ternary<String, State, String>(hostUuid, state, platform));
|
||||
|
||||
@ -90,7 +90,7 @@ import com.cloud.utils.storage.encoding.Decoder;
|
||||
public class XenServerStorageProcessor implements StorageProcessor {
|
||||
private static final Logger s_logger = Logger.getLogger(XenServerStorageProcessor.class);
|
||||
protected CitrixResourceBase hypervisorResource;
|
||||
private String BaseMountPointOnHost = "/var/run/cloud_mount";
|
||||
protected String BaseMountPointOnHost = "/var/run/cloud_mount";
|
||||
|
||||
public XenServerStorageProcessor(CitrixResourceBase resource) {
|
||||
hypervisorResource = resource;
|
||||
|
||||
@ -0,0 +1,112 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package com.cloud.hypervisor.xen.resource;
|
||||
|
||||
import java.io.File;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import javax.ejb.Local;
|
||||
|
||||
import org.apache.cloudstack.hypervisor.xenserver.XenServerResourceNewBase;
|
||||
import org.apache.log4j.Logger;
|
||||
|
||||
import com.cloud.agent.api.StartupRoutingCommand;
|
||||
import com.cloud.resource.ServerResource;
|
||||
import com.cloud.storage.resource.StorageSubsystemCommandHandler;
|
||||
import com.cloud.storage.resource.StorageSubsystemCommandHandlerBase;
|
||||
import com.cloud.utils.exception.CloudRuntimeException;
|
||||
import com.cloud.utils.script.Script;
|
||||
import com.xensource.xenapi.Connection;
|
||||
|
||||
@Local(value=ServerResource.class)
|
||||
public class Xenserver625Resource extends XenServerResourceNewBase {
|
||||
private static final Logger s_logger = Logger.getLogger(XenServer620Resource.class);
|
||||
|
||||
public Xenserver625Resource() {
|
||||
super();
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void fillHostInfo(Connection conn, StartupRoutingCommand cmd) {
|
||||
super.fillHostInfo(conn, cmd);
|
||||
Map<String, String> details = cmd.getHostDetails();
|
||||
details.put("Xenserer620HotFix", "Xenserver-Vdi-Copy-HotFix");
|
||||
}
|
||||
|
||||
@Override
|
||||
protected String getGuestOsType(String stdType, boolean bootFromCD) {
|
||||
return CitrixHelper.getXenServer620GuestOsType(stdType, bootFromCD);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected List<File> getPatchFiles() {
|
||||
List<File> files = new ArrayList<File>();
|
||||
String patch = "scripts/vm/hypervisor/xenserver/xenserver62/patch";
|
||||
String patchfilePath = Script.findScript("", patch);
|
||||
if (patchfilePath == null) {
|
||||
throw new CloudRuntimeException("Unable to find patch file " + patch);
|
||||
}
|
||||
File file = new File(patchfilePath);
|
||||
files.add(file);
|
||||
return files;
|
||||
}
|
||||
|
||||
@Override
|
||||
public long getStaticMax(String os, boolean b, long dynamicMinRam, long dynamicMaxRam){
|
||||
long recommendedValue = CitrixHelper.getXenServer620StaticMax(os, b);
|
||||
if(recommendedValue == 0){
|
||||
s_logger.warn("No recommended value found for dynamic max, setting static max and dynamic max equal");
|
||||
return dynamicMaxRam;
|
||||
}
|
||||
long staticMax = Math.min(recommendedValue, 4l * dynamicMinRam); // XS constraint for stability
|
||||
if (dynamicMaxRam > staticMax){ // XS contraint that dynamic max <= static max
|
||||
s_logger.warn("dynamixMax " + dynamicMaxRam + " cant be greater than static max " + staticMax + ", can lead to stability issues. Setting static max as much as dynamic max ");
|
||||
return dynamicMaxRam;
|
||||
}
|
||||
return staticMax;
|
||||
}
|
||||
|
||||
@Override
|
||||
public long getStaticMin(String os, boolean b, long dynamicMinRam, long dynamicMaxRam){
|
||||
long recommendedValue = CitrixHelper.getXenServer620StaticMin(os, b);
|
||||
if(recommendedValue == 0){
|
||||
s_logger.warn("No recommended value found for dynamic min");
|
||||
return dynamicMinRam;
|
||||
}
|
||||
|
||||
if(dynamicMinRam < recommendedValue){ // XS contraint that dynamic min > static min
|
||||
s_logger.warn("Vm is set to dynamixMin " + dynamicMinRam + " less than the recommended static min " + recommendedValue + ", could lead to stability issues");
|
||||
}
|
||||
return dynamicMinRam;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected StorageSubsystemCommandHandler getStorageHandler() {
|
||||
XenServerStorageProcessor processor = new Xenserver625StorageProcessor(this);
|
||||
return new StorageSubsystemCommandHandlerBase(processor);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void umountSnapshotDir(Connection conn, Long dcId) {
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
@ -0,0 +1,822 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package com.cloud.hypervisor.xen.resource;
|
||||
|
||||
import java.io.File;
|
||||
import java.net.URI;
|
||||
import java.util.ArrayList;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.UUID;
|
||||
|
||||
import org.apache.cloudstack.storage.command.CopyCmdAnswer;
|
||||
import org.apache.cloudstack.storage.command.CopyCommand;
|
||||
import org.apache.cloudstack.storage.to.PrimaryDataStoreTO;
|
||||
import org.apache.cloudstack.storage.to.SnapshotObjectTO;
|
||||
import org.apache.cloudstack.storage.to.TemplateObjectTO;
|
||||
import org.apache.cloudstack.storage.to.VolumeObjectTO;
|
||||
import org.apache.commons.lang.StringUtils;
|
||||
import org.apache.log4j.Logger;
|
||||
|
||||
import com.cloud.agent.api.Answer;
|
||||
import com.cloud.agent.api.to.DataObjectType;
|
||||
import com.cloud.agent.api.to.DataStoreTO;
|
||||
import com.cloud.agent.api.to.DataTO;
|
||||
import com.cloud.agent.api.to.NfsTO;
|
||||
import com.cloud.agent.api.to.S3TO;
|
||||
import com.cloud.agent.api.to.SwiftTO;
|
||||
import com.cloud.exception.InternalErrorException;
|
||||
import com.cloud.storage.Storage;
|
||||
import com.cloud.utils.exception.CloudRuntimeException;
|
||||
import com.xensource.xenapi.Connection;
|
||||
import com.xensource.xenapi.Host;
|
||||
import com.xensource.xenapi.PBD;
|
||||
import com.xensource.xenapi.SR;
|
||||
import com.xensource.xenapi.Task;
|
||||
import com.xensource.xenapi.Types;
|
||||
import com.xensource.xenapi.VDI;
|
||||
|
||||
public class Xenserver625StorageProcessor extends XenServerStorageProcessor {
|
||||
private static final Logger s_logger = Logger.getLogger(XenServerStorageProcessor.class);
|
||||
|
||||
public Xenserver625StorageProcessor(CitrixResourceBase resource) {
|
||||
super(resource);
|
||||
}
|
||||
protected boolean mountNfs(Connection conn, String remoteDir, String localDir) {
|
||||
if (localDir == null) {
|
||||
localDir = "/var/cloud_mount/" + UUID.nameUUIDFromBytes(remoteDir.getBytes());
|
||||
}
|
||||
String results = hypervisorResource.callHostPluginAsync(conn, "cloud-plugin-storage", "mountNfsSecondaryStorage", 100 * 1000,
|
||||
"localDir", localDir, "remoteDir", remoteDir);
|
||||
if (results == null || results.isEmpty()) {
|
||||
String errMsg = "Could not mount secondary storage " + remoteDir + " on host ";
|
||||
s_logger.warn(errMsg);
|
||||
throw new CloudRuntimeException(errMsg);
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
protected boolean makeDirectory(Connection conn, String path) {
|
||||
String result = hypervisorResource.callHostPlugin(conn, "cloud-plugin-storage", "makeDirectory", "path", path);
|
||||
if (result == null || result.isEmpty()) {
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
protected SR createFileSR(Connection conn, String path) {
|
||||
SR sr = null;
|
||||
PBD pbd = null;
|
||||
try {
|
||||
Map<String, String> smConfig = new HashMap<String, String>();
|
||||
Host host = Host.getByUuid(conn, hypervisorResource.getHost().uuid);
|
||||
String uuid = UUID.randomUUID().toString();
|
||||
|
||||
sr = SR.introduce(conn,uuid, uuid, uuid, "file", "file", false, smConfig);
|
||||
PBD.Record record = new PBD.Record();
|
||||
record.host = host;
|
||||
record.SR = sr;
|
||||
smConfig.put("location", path);
|
||||
record.deviceConfig = smConfig;
|
||||
pbd = PBD.create(conn, record);
|
||||
pbd.plug(conn);
|
||||
sr.scan(conn);
|
||||
return sr;
|
||||
} catch (Exception e) {
|
||||
try {
|
||||
if (pbd != null) {
|
||||
pbd.destroy(conn);
|
||||
}
|
||||
} catch (Exception e1) {
|
||||
s_logger.debug("Failed to destroy pbd", e);
|
||||
}
|
||||
try {
|
||||
if (sr != null) {
|
||||
sr.forget(conn);
|
||||
}
|
||||
} catch (Exception e2) {
|
||||
s_logger.error("Failed to forget sr", e);
|
||||
}
|
||||
String msg = "createFileSR failed! due to " + e.toString();
|
||||
s_logger.warn(msg, e);
|
||||
throw new CloudRuntimeException(msg, e);
|
||||
}
|
||||
}
|
||||
|
||||
protected SR createFileSr(Connection conn, String remotePath, String dir) {
|
||||
String localDir = "/var/cloud_mount/" + UUID.nameUUIDFromBytes(remotePath.getBytes());
|
||||
mountNfs(conn, remotePath, localDir);
|
||||
SR sr = createFileSR(conn, localDir + "/" + dir);
|
||||
return sr;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Answer copyTemplateToPrimaryStorage(CopyCommand cmd) {
|
||||
DataTO srcData = cmd.getSrcTO();
|
||||
DataTO destData = cmd.getDestTO();
|
||||
int wait = cmd.getWait();
|
||||
DataStoreTO srcStore = srcData.getDataStore();
|
||||
Connection conn = hypervisorResource.getConnection();
|
||||
SR srcSr = null;
|
||||
try {
|
||||
if ((srcStore instanceof NfsTO) && (srcData.getObjectType() == DataObjectType.TEMPLATE)) {
|
||||
NfsTO srcImageStore = (NfsTO)srcStore;
|
||||
TemplateObjectTO srcTemplate = (TemplateObjectTO)srcData;
|
||||
String storeUrl = srcImageStore.getUrl();
|
||||
URI uri = new URI(storeUrl);
|
||||
String volumePath = srcData.getPath();
|
||||
volumePath = StringUtils.stripEnd(volumePath, "/");
|
||||
String[] splits = volumePath.split("/");
|
||||
String volumeDirectory = volumePath;
|
||||
if (splits.length > 4) {
|
||||
//"template/tmpl/dcid/templateId/templatename"
|
||||
int index = volumePath.lastIndexOf("/");
|
||||
volumeDirectory = volumePath.substring(0, index);
|
||||
}
|
||||
srcSr = createFileSr(conn, uri.getHost() + ":" + uri.getPath(), volumeDirectory);
|
||||
Set<VDI> vdis = srcSr.getVDIs(conn);
|
||||
if (vdis.size() != 1) {
|
||||
return new CopyCmdAnswer("Can't find template VDI under: " + uri.getHost() + ":" + uri.getPath() + "/" + volumeDirectory);
|
||||
}
|
||||
|
||||
VDI srcVdi = vdis.iterator().next();
|
||||
|
||||
PrimaryDataStoreTO destStore = (PrimaryDataStoreTO)destData.getDataStore();
|
||||
String poolName = destStore.getUuid();
|
||||
|
||||
|
||||
SR poolsr = null;
|
||||
Set<SR> srs = SR.getByNameLabel(conn, poolName);
|
||||
if (srs.size() != 1) {
|
||||
String msg = "There are " + srs.size() + " SRs with same name: " + poolName;
|
||||
s_logger.warn(msg);
|
||||
return new CopyCmdAnswer(msg);
|
||||
} else {
|
||||
poolsr = srs.iterator().next();
|
||||
}
|
||||
String pUuid = poolsr.getUuid(conn);
|
||||
boolean isISCSI = IsISCSI(poolsr.getType(conn));
|
||||
Task task = srcVdi.copyAsync2(conn, poolsr, null, null);
|
||||
// poll every 1 seconds ,
|
||||
hypervisorResource.waitForTask(conn, task, 1000, wait * 1000);
|
||||
hypervisorResource.checkForSuccess(conn, task);
|
||||
VDI tmpl = Types.toVDI(task, conn);
|
||||
VDI snapshotvdi = tmpl.snapshot(conn, new HashMap<String, String>());
|
||||
snapshotvdi.setNameLabel(conn, "Template " + srcTemplate.getName());
|
||||
tmpl.destroy(conn);
|
||||
poolsr.scan(conn);
|
||||
try{
|
||||
Thread.sleep(5000);
|
||||
} catch (Exception e) {
|
||||
}
|
||||
|
||||
TemplateObjectTO newVol = new TemplateObjectTO();
|
||||
newVol.setUuid(snapshotvdi.getUuid(conn));
|
||||
newVol.setPath(newVol.getUuid());
|
||||
newVol.setFormat(Storage.ImageFormat.VHD);
|
||||
return new CopyCmdAnswer(newVol);
|
||||
}
|
||||
}catch (Exception e) {
|
||||
String msg = "Catch Exception " + e.getClass().getName() + " for template + " + " due to " + e.toString();
|
||||
s_logger.warn(msg, e);
|
||||
return new CopyCmdAnswer(msg);
|
||||
} finally {
|
||||
if (srcSr != null) {
|
||||
hypervisorResource.removeSR(conn, srcSr);
|
||||
}
|
||||
}
|
||||
return new CopyCmdAnswer("not implemented yet");
|
||||
}
|
||||
|
||||
protected String backupSnapshot(Connection conn, String primaryStorageSRUuid, String localMountPoint, String path, String secondaryStorageMountPath, String snapshotUuid, String prevBackupUuid, String prevSnapshotUuid, Boolean isISCSI, int wait) {
|
||||
String errMsg = null;
|
||||
boolean mounted = false;
|
||||
boolean filesrcreated = false;
|
||||
boolean copied = false;
|
||||
if (prevBackupUuid == null) {
|
||||
prevBackupUuid = "";
|
||||
}
|
||||
SR ssSR = null;
|
||||
|
||||
String remoteDir = secondaryStorageMountPath;
|
||||
|
||||
try {
|
||||
ssSR = createFileSr(conn, remoteDir, path);
|
||||
filesrcreated = true;
|
||||
|
||||
VDI snapshotvdi = VDI.getByUuid(conn, snapshotUuid);
|
||||
Task task = null;
|
||||
if (wait == 0) {
|
||||
wait = 2 * 60 * 60;
|
||||
}
|
||||
VDI dvdi = null;
|
||||
try {
|
||||
VDI previousSnapshotVdi = null;
|
||||
if (prevSnapshotUuid != null) {
|
||||
previousSnapshotVdi = VDI.getByUuid(conn,prevSnapshotUuid);
|
||||
}
|
||||
task = snapshotvdi.copyAsync2(conn, ssSR, previousSnapshotVdi, null);
|
||||
// poll every 1 seconds ,
|
||||
hypervisorResource.waitForTask(conn, task, 1000, wait * 1000);
|
||||
hypervisorResource.checkForSuccess(conn, task);
|
||||
dvdi = Types.toVDI(task, conn);
|
||||
copied = true;
|
||||
} finally {
|
||||
if (task != null) {
|
||||
try {
|
||||
task.destroy(conn);
|
||||
} catch (Exception e1) {
|
||||
s_logger.warn("unable to destroy task(" + task.toString() + ") on host("
|
||||
+ ") due to ", e1);
|
||||
}
|
||||
}
|
||||
}
|
||||
String backupUuid = dvdi.getUuid(conn);
|
||||
return backupUuid;
|
||||
} catch (Exception e) {
|
||||
String msg = "Exception in backupsnapshot stage due to " + e.toString();
|
||||
s_logger.debug(msg);
|
||||
throw new CloudRuntimeException(msg, e);
|
||||
} finally {
|
||||
try {
|
||||
if (filesrcreated && ssSR != null) {
|
||||
hypervisorResource.removeSR(conn, ssSR);
|
||||
}
|
||||
} catch (Exception e) {
|
||||
s_logger.debug("Exception in backupsnapshot cleanup stage due to " + e.toString());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
protected String getVhdParent(Connection conn, String primaryStorageSRUuid, String snapshotUuid, Boolean isISCSI) {
|
||||
String parentUuid = hypervisorResource.callHostPlugin(conn, "cloud-plugin-storage", "getVhdParent", "primaryStorageSRUuid", primaryStorageSRUuid,
|
||||
"snapshotUuid", snapshotUuid, "isISCSI", isISCSI.toString());
|
||||
|
||||
if (parentUuid == null || parentUuid.isEmpty() || parentUuid.equalsIgnoreCase("None")) {
|
||||
s_logger.debug("Unable to get parent of VHD " + snapshotUuid + " in SR " + primaryStorageSRUuid);
|
||||
// errString is already logged.
|
||||
return null;
|
||||
}
|
||||
return parentUuid;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Answer backupSnapshot(CopyCommand cmd) {
|
||||
Connection conn = hypervisorResource.getConnection();
|
||||
DataTO srcData = cmd.getSrcTO();
|
||||
DataTO cacheData = cmd.getCacheTO();
|
||||
DataTO destData = cmd.getDestTO();
|
||||
int wait = cmd.getWait();
|
||||
PrimaryDataStoreTO primaryStore = (PrimaryDataStoreTO)srcData.getDataStore();
|
||||
String primaryStorageNameLabel = primaryStore.getUuid();
|
||||
String secondaryStorageUrl = null;
|
||||
NfsTO cacheStore = null;
|
||||
String destPath = null;
|
||||
if (cacheData != null) {
|
||||
cacheStore = (NfsTO)cacheData.getDataStore();
|
||||
secondaryStorageUrl = cacheStore.getUrl();
|
||||
destPath = cacheData.getPath();
|
||||
} else {
|
||||
cacheStore = (NfsTO)destData.getDataStore();
|
||||
secondaryStorageUrl = cacheStore.getUrl();
|
||||
destPath = destData.getPath();
|
||||
}
|
||||
|
||||
SnapshotObjectTO snapshotTO = (SnapshotObjectTO)srcData;
|
||||
SnapshotObjectTO snapshotOnImage = (SnapshotObjectTO)destData;
|
||||
String snapshotUuid = snapshotTO.getPath();
|
||||
|
||||
String prevBackupUuid = snapshotOnImage.getParentSnapshotPath();
|
||||
String prevSnapshotUuid = snapshotTO.getParentSnapshotPath();
|
||||
Map<String, String> options = cmd.getOptions();
|
||||
// By default assume failure
|
||||
String details = null;
|
||||
String snapshotBackupUuid = null;
|
||||
boolean fullbackup = Boolean.parseBoolean(options.get("fullSnapshot"));
|
||||
try {
|
||||
SR primaryStorageSR = hypervisorResource.getSRByNameLabelandHost(conn, primaryStorageNameLabel);
|
||||
if (primaryStorageSR == null) {
|
||||
throw new InternalErrorException("Could not backup snapshot because the primary Storage SR could not be created from the name label: " + primaryStorageNameLabel);
|
||||
}
|
||||
String psUuid = primaryStorageSR.getUuid(conn);
|
||||
Boolean isISCSI = IsISCSI(primaryStorageSR.getType(conn));
|
||||
|
||||
VDI snapshotVdi = getVDIbyUuid(conn, snapshotUuid);
|
||||
String snapshotPaUuid = null;
|
||||
|
||||
URI uri = new URI(secondaryStorageUrl);
|
||||
String secondaryStorageMountPath = uri.getHost() + ":" + uri.getPath();
|
||||
DataStoreTO destStore = destData.getDataStore();
|
||||
String folder = destPath;
|
||||
String finalPath = null;
|
||||
|
||||
String localMountPoint = BaseMountPointOnHost + File.separator + UUID.nameUUIDFromBytes(secondaryStorageUrl.getBytes()).toString();
|
||||
if (fullbackup) {
|
||||
SR snapshotSr = null;
|
||||
try {
|
||||
String localDir = "/var/cloud_mount/" + UUID.nameUUIDFromBytes(secondaryStorageMountPath.getBytes());
|
||||
mountNfs(conn, secondaryStorageMountPath, localDir);
|
||||
boolean result = makeDirectory(conn, localDir + "/" + folder);
|
||||
if (!result) {
|
||||
details = " Filed to create folder " + folder + " in secondary storage";
|
||||
s_logger.warn(details);
|
||||
return new CopyCmdAnswer(details);
|
||||
}
|
||||
|
||||
snapshotSr = createFileSr(conn, secondaryStorageMountPath, folder);
|
||||
|
||||
Task task = snapshotVdi.copyAsync2(conn, snapshotSr, null, null);
|
||||
// poll every 1 seconds ,
|
||||
hypervisorResource.waitForTask(conn, task, 1000, wait * 1000);
|
||||
hypervisorResource.checkForSuccess(conn, task);
|
||||
VDI backedVdi = Types.toVDI(task, conn);
|
||||
snapshotBackupUuid = backedVdi.getUuid(conn);
|
||||
|
||||
if( destStore instanceof SwiftTO) {
|
||||
try {
|
||||
String container = "S-" + snapshotTO.getVolume().getVolumeId().toString();
|
||||
String destSnapshotName = swiftBackupSnapshot(conn, (SwiftTO)destStore, snapshotSr.getUuid(conn), snapshotBackupUuid, container, false, wait);
|
||||
String swiftPath = container + File.separator + destSnapshotName;
|
||||
finalPath = swiftPath;
|
||||
} finally {
|
||||
try {
|
||||
deleteSnapshotBackup(conn, localMountPoint, folder, secondaryStorageMountPath, snapshotBackupUuid);
|
||||
} catch (Exception e) {
|
||||
s_logger.debug("Failed to delete snapshot on cache storages" ,e);
|
||||
}
|
||||
}
|
||||
|
||||
} else if (destStore instanceof S3TO) {
|
||||
try {
|
||||
finalPath = backupSnapshotToS3(conn, (S3TO) destStore, snapshotSr.getUuid(conn), folder, snapshotBackupUuid, isISCSI, wait);
|
||||
if (finalPath == null) {
|
||||
throw new CloudRuntimeException("S3 upload of snapshots " + snapshotBackupUuid + " failed");
|
||||
}
|
||||
} finally {
|
||||
try {
|
||||
deleteSnapshotBackup(conn, localMountPoint, folder, secondaryStorageMountPath, snapshotBackupUuid);
|
||||
} catch (Exception e) {
|
||||
s_logger.debug("Failed to delete snapshot on cache storages" ,e);
|
||||
}
|
||||
}
|
||||
// finalPath = folder + File.separator + snapshotBackupUuid;
|
||||
} else {
|
||||
finalPath = folder + File.separator + snapshotBackupUuid;
|
||||
}
|
||||
|
||||
} finally {
|
||||
if( snapshotSr != null) {
|
||||
hypervisorResource.removeSR(conn, snapshotSr);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
String primaryStorageSRUuid = primaryStorageSR.getUuid(conn);
|
||||
if( destStore instanceof SwiftTO ) {
|
||||
String container = "S-" + snapshotTO.getVolume().getVolumeId().toString();
|
||||
snapshotBackupUuid = swiftBackupSnapshot(conn, (SwiftTO)destStore, primaryStorageSRUuid, snapshotPaUuid, "S-" + snapshotTO.getVolume().getVolumeId().toString(), isISCSI, wait);
|
||||
finalPath = container + File.separator + snapshotBackupUuid;
|
||||
} else if (destStore instanceof S3TO ) {
|
||||
finalPath = backupSnapshotToS3(conn, (S3TO) destStore, primaryStorageSRUuid, folder, snapshotPaUuid, isISCSI, wait);
|
||||
if (finalPath == null) {
|
||||
throw new CloudRuntimeException("S3 upload of snapshots " + snapshotPaUuid + " failed");
|
||||
}
|
||||
} else {
|
||||
snapshotBackupUuid = backupSnapshot(conn, primaryStorageSRUuid, localMountPoint, folder,
|
||||
secondaryStorageMountPath, snapshotUuid, prevBackupUuid, prevSnapshotUuid, isISCSI, wait);
|
||||
|
||||
finalPath = folder + File.separator + snapshotBackupUuid;
|
||||
}
|
||||
}
|
||||
String volumeUuid = snapshotTO.getVolume().getPath();
|
||||
destroySnapshotOnPrimaryStorageExceptThis(conn, volumeUuid, snapshotUuid);
|
||||
|
||||
SnapshotObjectTO newSnapshot = new SnapshotObjectTO();
|
||||
newSnapshot.setPath(finalPath);
|
||||
if (fullbackup) {
|
||||
newSnapshot.setParentSnapshotPath(null);
|
||||
} else {
|
||||
newSnapshot.setParentSnapshotPath(prevBackupUuid);
|
||||
}
|
||||
return new CopyCmdAnswer(newSnapshot);
|
||||
} catch (Types.XenAPIException e) {
|
||||
details = "BackupSnapshot Failed due to " + e.toString();
|
||||
s_logger.warn(details, e);
|
||||
} catch (Exception e) {
|
||||
details = "BackupSnapshot Failed due to " + e.getMessage();
|
||||
s_logger.warn(details, e);
|
||||
}
|
||||
|
||||
return new CopyCmdAnswer(details);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Answer createTemplateFromVolume(CopyCommand cmd) {
|
||||
Connection conn = hypervisorResource.getConnection();
|
||||
VolumeObjectTO volume = (VolumeObjectTO)cmd.getSrcTO();
|
||||
TemplateObjectTO template = (TemplateObjectTO)cmd.getDestTO();
|
||||
NfsTO destStore = (NfsTO)cmd.getDestTO().getDataStore();
|
||||
int wait = cmd.getWait();
|
||||
|
||||
String secondaryStoragePoolURL = destStore.getUrl();
|
||||
String volumeUUID = volume.getPath();
|
||||
|
||||
String userSpecifiedName = template.getName();
|
||||
|
||||
|
||||
String details = null;
|
||||
SR tmpltSR = null;
|
||||
boolean result = false;
|
||||
String secondaryStorageMountPath = null;
|
||||
String installPath = null;
|
||||
try {
|
||||
URI uri = new URI(secondaryStoragePoolURL);
|
||||
secondaryStorageMountPath = uri.getHost() + ":" + uri.getPath();
|
||||
installPath = template.getPath();
|
||||
if( !hypervisorResource.createSecondaryStorageFolder(conn, secondaryStorageMountPath, installPath)) {
|
||||
details = " Filed to create folder " + installPath + " in secondary storage";
|
||||
s_logger.warn(details);
|
||||
return new CopyCmdAnswer(details);
|
||||
}
|
||||
|
||||
VDI vol = getVDIbyUuid(conn, volumeUUID);
|
||||
// create template SR
|
||||
tmpltSR = createFileSr(conn, uri.getHost() + ":" + uri.getPath(), installPath);
|
||||
|
||||
// copy volume to template SR
|
||||
Task task = vol.copyAsync2(conn, tmpltSR, null, null);
|
||||
// poll every 1 seconds ,
|
||||
hypervisorResource.waitForTask(conn, task, 1000, wait * 1000);
|
||||
hypervisorResource.checkForSuccess(conn, task);
|
||||
VDI tmpltVDI = Types.toVDI(task, conn);
|
||||
// scan makes XenServer pick up VDI physicalSize
|
||||
tmpltSR.scan(conn);
|
||||
if (userSpecifiedName != null) {
|
||||
tmpltVDI.setNameLabel(conn, userSpecifiedName);
|
||||
}
|
||||
|
||||
String tmpltUUID = tmpltVDI.getUuid(conn);
|
||||
String tmpltFilename = tmpltUUID + ".vhd";
|
||||
long virtualSize = tmpltVDI.getVirtualSize(conn);
|
||||
long physicalSize = tmpltVDI.getPhysicalUtilisation(conn);
|
||||
// create the template.properties file
|
||||
String templatePath = secondaryStorageMountPath + "/" + installPath;
|
||||
result = hypervisorResource.postCreatePrivateTemplate(conn, templatePath, tmpltFilename, tmpltUUID, userSpecifiedName, null, physicalSize, virtualSize, template.getId());
|
||||
if (!result) {
|
||||
throw new CloudRuntimeException("Could not create the template.properties file on secondary storage dir");
|
||||
}
|
||||
installPath = installPath + "/" + tmpltFilename;
|
||||
hypervisorResource.removeSR(conn, tmpltSR);
|
||||
tmpltSR = null;
|
||||
TemplateObjectTO newTemplate = new TemplateObjectTO();
|
||||
newTemplate.setPath(installPath);
|
||||
newTemplate.setFormat(Storage.ImageFormat.VHD);
|
||||
newTemplate.setSize(virtualSize);
|
||||
newTemplate.setPhysicalSize(physicalSize);
|
||||
newTemplate.setName(tmpltUUID);
|
||||
CopyCmdAnswer answer = new CopyCmdAnswer(newTemplate);
|
||||
return answer;
|
||||
} catch (Exception e) {
|
||||
if (tmpltSR != null) {
|
||||
hypervisorResource.removeSR(conn, tmpltSR);
|
||||
}
|
||||
if ( secondaryStorageMountPath != null) {
|
||||
hypervisorResource.deleteSecondaryStorageFolder(conn, secondaryStorageMountPath, installPath);
|
||||
}
|
||||
details = "Creating template from volume " + volumeUUID + " failed due to " + e.toString();
|
||||
s_logger.error(details, e);
|
||||
}
|
||||
return new CopyCmdAnswer(details);
|
||||
}
|
||||
|
||||
protected String getSnapshotUuid(String snapshotPath) {
|
||||
int index = snapshotPath.lastIndexOf(File.separator);
|
||||
String snapshotUuid = snapshotPath.substring(index + 1);
|
||||
index = snapshotUuid.lastIndexOf(".");
|
||||
if (index != -1) {
|
||||
snapshotUuid = snapshotUuid.substring(0, index);
|
||||
}
|
||||
return snapshotUuid;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Answer createVolumeFromSnapshot(CopyCommand cmd) {
|
||||
Connection conn = hypervisorResource.getConnection();
|
||||
DataTO srcData = cmd.getSrcTO();
|
||||
SnapshotObjectTO snapshot = (SnapshotObjectTO)srcData;
|
||||
DataTO destData = cmd.getDestTO();
|
||||
PrimaryDataStoreTO pool = (PrimaryDataStoreTO)destData.getDataStore();
|
||||
VolumeObjectTO volume = (VolumeObjectTO)destData;
|
||||
DataStoreTO imageStore = srcData.getDataStore();
|
||||
|
||||
if (!(imageStore instanceof NfsTO)) {
|
||||
return new CopyCmdAnswer("unsupported protocol");
|
||||
}
|
||||
|
||||
NfsTO nfsImageStore = (NfsTO)imageStore;
|
||||
String primaryStorageNameLabel = pool.getUuid();
|
||||
String secondaryStorageUrl = nfsImageStore.getUrl();
|
||||
int wait = cmd.getWait();
|
||||
boolean result = false;
|
||||
// Generic error message.
|
||||
String details = null;
|
||||
String volumeUUID = null;
|
||||
|
||||
if (secondaryStorageUrl == null) {
|
||||
details += " because the URL passed: " + secondaryStorageUrl + " is invalid.";
|
||||
return new CopyCmdAnswer(details);
|
||||
}
|
||||
SR srcSr = null;
|
||||
VDI destVdi = null;
|
||||
try {
|
||||
SR primaryStorageSR = hypervisorResource.getSRByNameLabelandHost(conn, primaryStorageNameLabel);
|
||||
if (primaryStorageSR == null) {
|
||||
throw new InternalErrorException("Could not create volume from snapshot because the primary Storage SR could not be created from the name label: "
|
||||
+ primaryStorageNameLabel);
|
||||
}
|
||||
String nameLabel = "cloud-" + UUID.randomUUID().toString();
|
||||
destVdi = createVdi(conn, nameLabel, primaryStorageSR, volume.getSize());
|
||||
volumeUUID = destVdi.getUuid(conn);
|
||||
String snapshotInstallPath = snapshot.getPath();
|
||||
int index = snapshotInstallPath.lastIndexOf(File.separator);
|
||||
String snapshotDirectory = snapshotInstallPath.substring(0, index);
|
||||
String snapshotUuid = getSnapshotUuid(snapshotInstallPath);
|
||||
|
||||
URI uri = new URI(secondaryStorageUrl);
|
||||
srcSr = createFileSr(conn, uri.getHost() + ":" + uri.getPath(), snapshotDirectory);
|
||||
|
||||
String[] parents = snapshot.getParents();
|
||||
List<VDI> snapshotChains = new ArrayList<VDI>();
|
||||
if (parents != null) {
|
||||
for(int i = 0; i < parents.length; i++) {
|
||||
String snChainPath = parents[i];
|
||||
String uuid = getSnapshotUuid(snChainPath);
|
||||
VDI chain = VDI.getByUuid(conn, uuid);
|
||||
snapshotChains.add(chain);
|
||||
}
|
||||
}
|
||||
|
||||
VDI snapshotVdi = VDI.getByUuid(conn, snapshotUuid);
|
||||
snapshotChains.add(snapshotVdi);
|
||||
|
||||
for(VDI snapChain : snapshotChains) {
|
||||
Task task = snapChain.copyAsync2(conn, null, null, destVdi);
|
||||
// poll every 1 seconds ,
|
||||
hypervisorResource.waitForTask(conn, task, 1000, wait * 1000);
|
||||
hypervisorResource.checkForSuccess(conn, task);
|
||||
}
|
||||
|
||||
result = true;
|
||||
destVdi = VDI.getByUuid(conn, volumeUUID);
|
||||
VDI.Record vdir = destVdi.getRecord(conn);
|
||||
VolumeObjectTO newVol = new VolumeObjectTO();
|
||||
newVol.setPath(volumeUUID);
|
||||
newVol.setSize(vdir.virtualSize);
|
||||
return new CopyCmdAnswer(newVol);
|
||||
} catch (Types.XenAPIException e) {
|
||||
details += " due to " + e.toString();
|
||||
s_logger.warn(details, e);
|
||||
} catch (Exception e) {
|
||||
details += " due to " + e.getMessage();
|
||||
s_logger.warn(details, e);
|
||||
} finally {
|
||||
if (srcSr != null) {
|
||||
hypervisorResource.removeSR(conn, srcSr);
|
||||
}
|
||||
if (!result && destVdi != null) {
|
||||
try {
|
||||
destVdi.destroy(conn);
|
||||
} catch (Exception e) {
|
||||
s_logger.debug("destroy dest vdi failed", e);
|
||||
}
|
||||
}
|
||||
}
|
||||
if (!result) {
|
||||
// Is this logged at a higher level?
|
||||
s_logger.error(details);
|
||||
}
|
||||
|
||||
// In all cases return something.
|
||||
return new CopyCmdAnswer(details);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Answer copyVolumeFromPrimaryToSecondary(CopyCommand cmd) {
|
||||
Connection conn = hypervisorResource.getConnection();
|
||||
VolumeObjectTO srcVolume = (VolumeObjectTO)cmd.getSrcTO();
|
||||
VolumeObjectTO destVolume = (VolumeObjectTO)cmd.getDestTO();
|
||||
int wait = cmd.getWait();
|
||||
DataStoreTO destStore = destVolume.getDataStore();
|
||||
|
||||
if (destStore instanceof NfsTO) {
|
||||
SR secondaryStorage = null;
|
||||
try {
|
||||
NfsTO nfsStore = (NfsTO)destStore;
|
||||
URI uri = new URI(nfsStore.getUrl());
|
||||
// Create the volume folder
|
||||
if (!hypervisorResource.createSecondaryStorageFolder(conn, uri.getHost() + ":" + uri.getPath(), destVolume.getPath())) {
|
||||
throw new InternalErrorException("Failed to create the volume folder.");
|
||||
}
|
||||
|
||||
// Create a SR for the volume UUID folder
|
||||
secondaryStorage = createFileSr(conn, uri.getHost() + ":" + uri.getPath(), destVolume.getPath());
|
||||
// Look up the volume on the source primary storage pool
|
||||
VDI srcVdi = getVDIbyUuid(conn, srcVolume.getPath());
|
||||
// Copy the volume to secondary storage
|
||||
Task task = srcVdi.copyAsync2(conn, secondaryStorage, null, null);
|
||||
// poll every 1 seconds ,
|
||||
hypervisorResource.waitForTask(conn, task, 1000, wait * 1000);
|
||||
hypervisorResource.checkForSuccess(conn, task);
|
||||
VDI destVdi = Types.toVDI(task, conn);
|
||||
String destVolumeUUID = destVdi.getUuid(conn);
|
||||
|
||||
VolumeObjectTO newVol = new VolumeObjectTO();
|
||||
newVol.setPath(destVolume.getPath() + File.separator + destVolumeUUID + ".vhd");
|
||||
newVol.setSize(srcVolume.getSize());
|
||||
return new CopyCmdAnswer(newVol);
|
||||
} catch (Exception e) {
|
||||
s_logger.debug("Failed to copy volume to secondary: " + e.toString());
|
||||
return new CopyCmdAnswer("Failed to copy volume to secondary: " + e.toString());
|
||||
} finally {
|
||||
hypervisorResource.removeSR(conn, secondaryStorage);
|
||||
}
|
||||
}
|
||||
return new CopyCmdAnswer("unsupported protocol");
|
||||
}
|
||||
|
||||
@Override
|
||||
public Answer copyVolumeFromImageCacheToPrimary(CopyCommand cmd) {
|
||||
Connection conn = hypervisorResource.getConnection();
|
||||
DataTO srcData = cmd.getSrcTO();
|
||||
DataTO destData = cmd.getDestTO();
|
||||
int wait = cmd.getWait();
|
||||
VolumeObjectTO srcVolume = (VolumeObjectTO)srcData;
|
||||
VolumeObjectTO destVolume = (VolumeObjectTO)destData;
|
||||
PrimaryDataStoreTO primaryStore = (PrimaryDataStoreTO)destVolume.getDataStore();
|
||||
DataStoreTO srcStore = srcVolume.getDataStore();
|
||||
|
||||
if (srcStore instanceof NfsTO) {
|
||||
NfsTO nfsStore = (NfsTO)srcStore;
|
||||
String volumePath = srcVolume.getPath();
|
||||
int index = volumePath.lastIndexOf("/");
|
||||
String volumeDirectory = volumePath.substring(0, index);
|
||||
String volumeUuid = volumePath.substring(index + 1);
|
||||
index = volumeUuid.indexOf(".");
|
||||
if (index != -1) {
|
||||
volumeUuid = volumeUuid.substring(0, index);
|
||||
}
|
||||
URI uri = null;
|
||||
try {
|
||||
uri = new URI(nfsStore.getUrl());
|
||||
} catch (Exception e) {
|
||||
return new CopyCmdAnswer(e.toString());
|
||||
}
|
||||
SR srcSr = createFileSr(conn, uri.getHost() + ":" + uri.getPath(), volumeDirectory);
|
||||
try {
|
||||
SR primaryStoragePool = hypervisorResource.getStorageRepository(conn, primaryStore.getUuid());
|
||||
VDI srcVdi = VDI.getByUuid(conn, volumeUuid);
|
||||
Task task = srcVdi.copyAsync2(conn, primaryStoragePool, null, null);
|
||||
// poll every 1 seconds ,
|
||||
hypervisorResource.waitForTask(conn, task, 1000, wait * 1000);
|
||||
hypervisorResource.checkForSuccess(conn, task);
|
||||
VDI destVdi = Types.toVDI(task, conn);
|
||||
VolumeObjectTO newVol = new VolumeObjectTO();
|
||||
newVol.setPath(destVdi.getUuid(conn));
|
||||
newVol.setSize(srcVolume.getSize());
|
||||
|
||||
return new CopyCmdAnswer(newVol);
|
||||
} catch (Exception e) {
|
||||
String msg = "Catch Exception " + e.getClass().getName() + " due to " + e.toString();
|
||||
s_logger.warn(msg, e);
|
||||
return new CopyCmdAnswer(e.toString());
|
||||
} finally {
|
||||
if (srcSr != null) {
|
||||
hypervisorResource.removeSR(conn, srcSr);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
s_logger.debug("unsupported protocol");
|
||||
return new CopyCmdAnswer("unsupported protocol");
|
||||
}
|
||||
|
||||
@Override
|
||||
public Answer createTemplateFromSnapshot(CopyCommand cmd) {
|
||||
Connection conn = hypervisorResource.getConnection();
|
||||
DataTO srcData = cmd.getSrcTO();
|
||||
DataTO destData = cmd.getDestTO();
|
||||
int wait = cmd.getWait();
|
||||
SnapshotObjectTO srcObj = (SnapshotObjectTO)srcData;
|
||||
TemplateObjectTO destObj = (TemplateObjectTO)destData;
|
||||
NfsTO srcStore = (NfsTO)srcObj.getDataStore();
|
||||
NfsTO destStore = (NfsTO)destObj.getDataStore();
|
||||
|
||||
URI srcUri = null;
|
||||
URI destUri = null;
|
||||
try {
|
||||
srcUri = new URI(srcStore.getUrl());
|
||||
destUri = new URI(destStore.getUrl());
|
||||
} catch (Exception e) {
|
||||
s_logger.debug("incorrect url", e);
|
||||
return new CopyCmdAnswer("incorrect url" + e.toString());
|
||||
}
|
||||
|
||||
String srcPath = srcObj.getPath();
|
||||
int index = srcPath.lastIndexOf("/");
|
||||
String srcDir = srcPath.substring(0, index);
|
||||
String destDir = destObj.getPath();
|
||||
SR srcSr = null;
|
||||
SR destSr = null;
|
||||
VDI destVdi = null;
|
||||
boolean result = false;
|
||||
try {
|
||||
srcSr = createFileSr(conn, srcUri.getHost() + ":" + srcUri.getPath(), srcDir);
|
||||
|
||||
String destNfsPath = destUri.getHost() + ":" + destUri.getPath();
|
||||
String localDir = "/var/cloud_mount/" + UUID.nameUUIDFromBytes(destNfsPath.getBytes());
|
||||
mountNfs(conn, destUri.getHost() + ":" + destUri.getPath(), localDir);
|
||||
makeDirectory(conn, localDir + "/" + destDir);
|
||||
destSr = createFileSR(conn, localDir + "/" + destDir);
|
||||
|
||||
String nameLabel = "cloud-" + UUID.randomUUID().toString();
|
||||
|
||||
String[] parents = srcObj.getParents();
|
||||
List<VDI> snapshotChains = new ArrayList<VDI>();
|
||||
if (parents != null) {
|
||||
for(int i = 0; i < parents.length; i++) {
|
||||
String snChainPath = parents[i];
|
||||
String uuid = getSnapshotUuid(snChainPath);
|
||||
VDI chain = VDI.getByUuid(conn, uuid);
|
||||
snapshotChains.add(chain);
|
||||
}
|
||||
}
|
||||
String snapshotUuid = getSnapshotUuid(srcPath);
|
||||
VDI snapshotVdi = VDI.getByUuid(conn, snapshotUuid);
|
||||
snapshotChains.add(snapshotVdi);
|
||||
|
||||
long templateVirtualSize = snapshotChains.get(0).getVirtualSize(conn);
|
||||
destVdi = createVdi(conn, nameLabel, destSr, templateVirtualSize);
|
||||
String destVdiUuid = destVdi.getUuid(conn);
|
||||
|
||||
for(VDI snapChain : snapshotChains) {
|
||||
Task task = snapChain.copyAsync2(conn, null, null, destVdi);
|
||||
// poll every 1 seconds ,
|
||||
hypervisorResource.waitForTask(conn, task, 1000, wait * 1000);
|
||||
hypervisorResource.checkForSuccess(conn, task);
|
||||
}
|
||||
|
||||
destVdi = VDI.getByUuid(conn, destVdiUuid);
|
||||
String templatePath = destDir + "/" + destVdiUuid + ".vhd";
|
||||
templatePath = templatePath.replaceAll("//","/");
|
||||
TemplateObjectTO newTemplate = new TemplateObjectTO();
|
||||
newTemplate.setPath(templatePath);
|
||||
newTemplate.setFormat(Storage.ImageFormat.VHD);
|
||||
newTemplate.setSize(destVdi.getVirtualSize(conn));
|
||||
newTemplate.setPhysicalSize(destVdi.getPhysicalUtilisation(conn));
|
||||
newTemplate.setName(destVdiUuid);
|
||||
|
||||
result = true;
|
||||
return new CopyCmdAnswer(newTemplate);
|
||||
} catch (Exception e) {
|
||||
s_logger.error("Failed create template from snapshot", e);
|
||||
return new CopyCmdAnswer("Failed create template from snapshot " + e.toString());
|
||||
} finally {
|
||||
if (!result) {
|
||||
if (destVdi != null) {
|
||||
try {
|
||||
destVdi.destroy(conn);
|
||||
} catch (Exception e) {
|
||||
s_logger.debug("Clean up left over on dest storage failed: ", e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (destSr != null) {
|
||||
hypervisorResource.removeSR(conn, destSr);
|
||||
}
|
||||
|
||||
if (srcSr != null) {
|
||||
hypervisorResource.removeSR(conn, srcSr);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
@ -0,0 +1,340 @@
|
||||
// Licensed to the Apache Software Foundation (ASF) under one
|
||||
// or more contributor license agreements. See the NOTICE file
|
||||
// distributed with this work for additional information
|
||||
// regarding copyright ownership. The ASF licenses this file
|
||||
// to you under the Apache License, Version 2.0 (the
|
||||
// "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
package org.apache.cloudstack.hypervisor.xenserver;
|
||||
|
||||
|
||||
import java.util.HashMap;
|
||||
import java.util.HashSet;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.TimeoutException;
|
||||
|
||||
import org.apache.log4j.Logger;
|
||||
import org.apache.xmlrpc.XmlRpcException;
|
||||
|
||||
import com.xensource.xenapi.Connection;
|
||||
import com.xensource.xenapi.Event;
|
||||
import com.xensource.xenapi.Host;
|
||||
import com.xensource.xenapi.Pool;
|
||||
import com.xensource.xenapi.Task;
|
||||
import com.xensource.xenapi.Types;
|
||||
import com.xensource.xenapi.Types.XenAPIException;
|
||||
import com.xensource.xenapi.VM;
|
||||
|
||||
import com.cloud.agent.api.Answer;
|
||||
import com.cloud.agent.api.ClusterSyncAnswer;
|
||||
import com.cloud.agent.api.ClusterSyncCommand;
|
||||
import com.cloud.agent.api.StartupCommand;
|
||||
import com.cloud.hypervisor.xen.resource.XenServer610Resource;
|
||||
import com.cloud.utils.Ternary;
|
||||
import com.cloud.utils.exception.CloudRuntimeException;
|
||||
import com.cloud.vm.VirtualMachine;
|
||||
import com.cloud.vm.VirtualMachineName;
|
||||
|
||||
/**
|
||||
*
|
||||
* XenServerResourceNewBase is an abstract base class that encapsulates how
|
||||
* CloudStack should interact with XenServer after a special XenServer
|
||||
* 6.2 hotfix. From here on, every Resource for future versions of
|
||||
* XenServer should use this as the base class. This base class lessens
|
||||
* the amount of load CloudStack places on Xapi because it doesn't use
|
||||
* polling as a means to collect data and figure out task completion.
|
||||
*
|
||||
* This base class differs from CitrixResourceBase in the following ways:
|
||||
* - VM states are detected using Event.from instead of polling. This
|
||||
* increases the number of threads CloudStack uses but the threads
|
||||
* are mostly idle just waiting for events from XenServer.
|
||||
* - stats are collected through the http interface rather than Xapi plugin.
|
||||
* This change may be promoted to CitrixResourceBase as it's also possible
|
||||
* in previous versions of XenServer.
|
||||
* - Asynchronous task completion is done throught Event.from rather than
|
||||
* polling.
|
||||
*
|
||||
*/
|
||||
public class XenServerResourceNewBase extends XenServer610Resource {
|
||||
private static final Logger s_logger = Logger.getLogger(XenServerResourceNewBase.class);
|
||||
protected VmEventListener _listener = null;
|
||||
|
||||
@Override
|
||||
public StartupCommand[] initialize() throws IllegalArgumentException {
|
||||
StartupCommand[] cmds = super.initialize();
|
||||
|
||||
Connection conn = getConnection();
|
||||
Pool pool;
|
||||
try {
|
||||
pool = Pool.getByUuid(conn, _host.pool);
|
||||
Pool.Record poolr = pool.getRecord(conn);
|
||||
|
||||
Host.Record masterRecord = poolr.master.getRecord(conn);
|
||||
if (_host.uuid.equals(masterRecord.uuid)) {
|
||||
_listener = new VmEventListener(true);
|
||||
_listener.start();
|
||||
} else {
|
||||
_listener = new VmEventListener(false);
|
||||
}
|
||||
} catch (XenAPIException e) {
|
||||
throw new CloudRuntimeException("Unable to determine who is the master", e);
|
||||
} catch (XmlRpcException e) {
|
||||
throw new CloudRuntimeException("Unable to determine who is the master", e);
|
||||
}
|
||||
return cmds;
|
||||
}
|
||||
|
||||
protected void waitForTask2(Connection c, Task task, long pollInterval, long timeout) throws XenAPIException, XmlRpcException, TimeoutException {
|
||||
long beginTime = System.currentTimeMillis();
|
||||
if (s_logger.isTraceEnabled()) {
|
||||
s_logger.trace("Task " + task.getNameLabel(c) + " (" + task.getType(c) + ") sent to " + c.getSessionReference() + " is pending completion with a " + timeout +
|
||||
"ms timeout");
|
||||
}
|
||||
Set<String> classes = new HashSet<String>();
|
||||
classes.add("Task/" + task.toString());
|
||||
String token = "";
|
||||
Double t = new Double(timeout / 1000);
|
||||
while (true) {
|
||||
Map<?, ?> map = Event.properFrom(c, classes, token, t);
|
||||
token = (String)map.get("token");
|
||||
@SuppressWarnings("unchecked")
|
||||
Set<Event.Record> events = (Set<Event.Record>)map.get("events");
|
||||
if (events.size() == 0) {
|
||||
String msg = "Async " + timeout / 1000 + " seconds timeout for task " + task.toString();
|
||||
s_logger.warn(msg);
|
||||
task.cancel(c);
|
||||
throw new TimeoutException(msg);
|
||||
}
|
||||
for (Event.Record rec : events) {
|
||||
if (!(rec.snapshot instanceof Task.Record)) {
|
||||
if (s_logger.isDebugEnabled()) {
|
||||
s_logger.debug("Skipping over " + rec);
|
||||
}
|
||||
continue;
|
||||
}
|
||||
|
||||
Task.Record taskRecord = (Task.Record)rec.snapshot;
|
||||
|
||||
if (taskRecord.status != Types.TaskStatusType.PENDING) {
|
||||
if (s_logger.isDebugEnabled()) {
|
||||
s_logger.debug("Task is done " + taskRecord.status);
|
||||
}
|
||||
return;
|
||||
} else {
|
||||
s_logger.debug("Task is not done " + taskRecord);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
protected Answer execute(final ClusterSyncCommand cmd) {
|
||||
if (!_listener.isListening()) {
|
||||
return new Answer(cmd);
|
||||
}
|
||||
|
||||
HashMap<String, Ternary<String, VirtualMachine.State, String>> newStates = _listener.getChanges();
|
||||
return new ClusterSyncAnswer(cmd.getClusterId(), newStates);
|
||||
}
|
||||
|
||||
protected class VmEventListener extends Thread {
|
||||
boolean _stop = false;
|
||||
HashMap<String, Ternary<String, VirtualMachine.State, String>> _changes = new HashMap<String, Ternary<String, VirtualMachine.State, String>>();
|
||||
boolean _isMaster;
|
||||
Set<String> _classes;
|
||||
String _token = "";
|
||||
|
||||
public VmEventListener(boolean isMaster) {
|
||||
_isMaster = isMaster;
|
||||
_classes = new HashSet<String>();
|
||||
_classes.add("VM");
|
||||
}
|
||||
|
||||
@Override
|
||||
public void run() {
|
||||
setName("XS-Listener-" + _host.ip);
|
||||
while (!_stop) {
|
||||
try {
|
||||
Connection conn = getConnection();
|
||||
Map<?, ?> results;
|
||||
try {
|
||||
results = Event.properFrom(conn, _classes, _token, new Double(30));
|
||||
} catch (Exception e) {
|
||||
s_logger.error("Retrying the waiting on VM events due to: ", e);
|
||||
continue;
|
||||
}
|
||||
|
||||
_token = (String)results.get("token");
|
||||
@SuppressWarnings("unchecked")
|
||||
Set<Event.Record> events = (Set<Event.Record>)results.get("events");
|
||||
for (Event.Record event : events) {
|
||||
try {
|
||||
if (!(event.snapshot instanceof VM.Record)) {
|
||||
if (s_logger.isDebugEnabled()) {
|
||||
s_logger.debug("The snapshot is not a VM: " + event);
|
||||
}
|
||||
continue;
|
||||
}
|
||||
VM.Record vm = (VM.Record)event.snapshot;
|
||||
|
||||
String hostUuid = null;
|
||||
if (vm.residentOn != null && !vm.residentOn.toWireString().contains("OpaqueRef:NULL")) {
|
||||
hostUuid = vm.residentOn.getUuid(conn);
|
||||
}
|
||||
recordChanges(conn, vm, hostUuid);
|
||||
} catch (Exception e) {
|
||||
s_logger.error("Skipping over " + event, e);
|
||||
}
|
||||
}
|
||||
} catch (Throwable th) {
|
||||
s_logger.error("Exception caught in eventlistener thread: ", th);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
protected void recordChanges(Connection conn, VM.Record rec, String hostUuid) {
|
||||
String vm = rec.nameLabel;
|
||||
if (!VirtualMachineName.isValidCloudStackVmName(vm, _instance)) {
|
||||
s_logger.debug("Skipping over VMs that does not conform to CloudStack naming convention: " + vm);
|
||||
return;
|
||||
}
|
||||
|
||||
VirtualMachine.State currentState = convertToState(rec.powerState);
|
||||
if (vm.startsWith("migrating")) {
|
||||
s_logger.warn("Skipping " + vm + " because it is migrating.");
|
||||
return;
|
||||
}
|
||||
|
||||
if (currentState == VirtualMachine.State.Stopped) {
|
||||
if (s_logger.isTraceEnabled()) {
|
||||
s_logger.trace("Double check the power state to make sure we got the correct state for " + vm);
|
||||
}
|
||||
currentState = getRealPowerState(conn, vm);
|
||||
}
|
||||
|
||||
boolean updateMap = false;
|
||||
boolean reportChange = false;
|
||||
|
||||
// NOTE: For now we only record change when the VM is stopped. We don't find out any VMs starting for now.
|
||||
synchronized (_cluster.intern()) {
|
||||
Ternary<String, VirtualMachine.State, String> oldState = s_vms.get(_cluster, vm);
|
||||
if (oldState == null) {
|
||||
if (s_logger.isTraceEnabled()) {
|
||||
s_logger.trace("Unable to find " + vm + " from previous map. Assuming it was in Stopped state.");
|
||||
}
|
||||
oldState = new Ternary<String, VirtualMachine.State, String>(null, VirtualMachine.State.Stopped, null);
|
||||
}
|
||||
|
||||
if (s_logger.isTraceEnabled()) {
|
||||
s_logger.trace(vm + ": current state=" + currentState + ", previous state=" + oldState);
|
||||
}
|
||||
|
||||
if (oldState.second() == VirtualMachine.State.Starting) {
|
||||
if (currentState == VirtualMachine.State.Running) {
|
||||
updateMap = true;
|
||||
reportChange = false;
|
||||
} else if (currentState == VirtualMachine.State.Stopped) {
|
||||
updateMap = false;
|
||||
reportChange = false;
|
||||
}
|
||||
} else if (oldState.second() == VirtualMachine.State.Migrating) {
|
||||
updateMap = true;
|
||||
reportChange = false;
|
||||
} else if (oldState.second() == VirtualMachine.State.Stopping) {
|
||||
if (currentState == VirtualMachine.State.Stopped) {
|
||||
updateMap = true;
|
||||
reportChange = false;
|
||||
} else if (currentState == VirtualMachine.State.Running) {
|
||||
updateMap = false;
|
||||
reportChange = false;
|
||||
}
|
||||
} else if (oldState.second() != currentState) {
|
||||
updateMap = true;
|
||||
reportChange = true;
|
||||
} else if (hostUuid != null && !hostUuid.equals(oldState.first())) {
|
||||
if (s_logger.isDebugEnabled()) {
|
||||
s_logger.debug("Detecting " + vm + " moved from " + oldState.first() + " to " + hostUuid);
|
||||
}
|
||||
reportChange = true;
|
||||
updateMap = true;
|
||||
}
|
||||
|
||||
if (updateMap) {
|
||||
s_vms.put(_cluster, hostUuid, vm, currentState);
|
||||
if (s_logger.isTraceEnabled()) {
|
||||
s_logger.trace("Updated " + vm + " to [" + hostUuid + ", " + currentState);
|
||||
}
|
||||
}
|
||||
if (reportChange) {
|
||||
Ternary<String, VirtualMachine.State, String> change = _changes.get(vm);
|
||||
if (hostUuid == null) {
|
||||
// This is really strange code. It looks like the sync
|
||||
// code wants this to be set, which is extremely weird
|
||||
// for VMs that are dead. Why would I want to set the
|
||||
// hostUuid if the VM is stopped.
|
||||
hostUuid = oldState.first();
|
||||
if (hostUuid == null) {
|
||||
hostUuid = _host.uuid;
|
||||
}
|
||||
}
|
||||
if (change == null) {
|
||||
change = new Ternary<String, VirtualMachine.State, String>(hostUuid, currentState, null);
|
||||
} else {
|
||||
change.first(hostUuid);
|
||||
change.second(currentState);
|
||||
}
|
||||
_changes.put(vm, change);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void start() {
|
||||
if (_isMaster) {
|
||||
// Throw away the initial set of events because they're history
|
||||
Connection conn = getConnection();
|
||||
Map<?, ?> results;
|
||||
try {
|
||||
results = Event.properFrom(conn, _classes, _token, new Double(30));
|
||||
} catch (Exception e) {
|
||||
s_logger.error("Retrying the waiting on VM events due to: ", e);
|
||||
throw new CloudRuntimeException("Unable to start a listener thread to listen to VM events", e);
|
||||
}
|
||||
_token = (String)results.get("token");
|
||||
s_logger.debug("Starting the event listener thread for " + _host.uuid);
|
||||
super.start();
|
||||
}
|
||||
}
|
||||
|
||||
public boolean isListening() {
|
||||
return _isMaster;
|
||||
}
|
||||
|
||||
public HashMap<String, Ternary<String, VirtualMachine.State, String>> getChanges() {
|
||||
synchronized (_cluster.intern()) {
|
||||
if (_changes.size() == 0) {
|
||||
return null;
|
||||
}
|
||||
HashMap<String, Ternary<String, VirtualMachine.State, String>> diff = _changes;
|
||||
_changes = new HashMap<String, Ternary<String, VirtualMachine.State, String>>();
|
||||
return diff;
|
||||
}
|
||||
}
|
||||
|
||||
public void signalStop() {
|
||||
_stop = true;
|
||||
interrupt();
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
301
scripts/vm/hypervisor/xenserver/xenserver62/cloud-plugin-storage
Normal file
301
scripts/vm/hypervisor/xenserver/xenserver62/cloud-plugin-storage
Normal file
@ -0,0 +1,301 @@
|
||||
#!/usr/bin/python
|
||||
# Licensed to the Apache Software Foundation (ASF) under one
|
||||
# or more contributor license agreements. See the NOTICE file
|
||||
# distributed with this work for additional information
|
||||
# regarding copyright ownership. The ASF licenses this file
|
||||
# to you under the Apache License, Version 2.0 (the
|
||||
# "License"); you may not use this file except in compliance
|
||||
# with the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing,
|
||||
# software distributed under the License is distributed on an
|
||||
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
# KIND, either express or implied. See the License for the
|
||||
# specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
# Version @VERSION@
|
||||
#
|
||||
# A plugin for executing script needed by vmops cloud
|
||||
|
||||
import os, sys, time
|
||||
import XenAPIPlugin
|
||||
if os.path.exists("/opt/xensource/sm"):
|
||||
sys.path.extend(["/opt/xensource/sm/", "/usr/local/sbin/", "/sbin/"])
|
||||
if os.path.exists("/usr/lib/xcp/sm"):
|
||||
sys.path.extend(["/usr/lib/xcp/sm/", "/usr/local/sbin/", "/sbin/"])
|
||||
|
||||
import SR, VDI, SRCommand, util, lvutil
|
||||
from util import CommandException
|
||||
import vhdutil
|
||||
import shutil
|
||||
import lvhdutil
|
||||
import errno
|
||||
import subprocess
|
||||
import xs_errors
|
||||
import cleanup
|
||||
import stat
|
||||
import random
|
||||
import cloudstack_pluginlib as lib
|
||||
import logging
|
||||
|
||||
lib.setup_logging("/var/log/cloud/vmops.log")
|
||||
|
||||
VHDUTIL = "vhd-util"
|
||||
VHD_PREFIX = 'VHD-'
|
||||
CLOUD_DIR = '/var/run/cloud_mount'
|
||||
|
||||
def echo(fn):
|
||||
def wrapped(*v, **k):
|
||||
name = fn.__name__
|
||||
logging.debug("#### VMOPS enter %s ####" % name )
|
||||
res = fn(*v, **k)
|
||||
logging.debug("#### VMOPS exit %s ####" % name )
|
||||
return res
|
||||
return wrapped
|
||||
|
||||
def getPrimarySRPath(primaryStorageSRUuid, isISCSI):
|
||||
if isISCSI:
|
||||
primarySRDir = lvhdutil.VG_PREFIX + primaryStorageSRUuid
|
||||
return os.path.join(lvhdutil.VG_LOCATION, primarySRDir)
|
||||
else:
|
||||
return os.path.join(SR.MOUNT_BASE, primaryStorageSRUuid)
|
||||
|
||||
def getBackupVHD(UUID):
|
||||
return UUID + '.' + SR.DEFAULT_TAP
|
||||
|
||||
def getVHD(UUID, isISCSI):
|
||||
if isISCSI:
|
||||
return VHD_PREFIX + UUID
|
||||
else:
|
||||
return UUID + '.' + SR.DEFAULT_TAP
|
||||
|
||||
def getIsTrueString(stringValue):
|
||||
booleanValue = False
|
||||
if (stringValue and stringValue == 'true'):
|
||||
booleanValue = True
|
||||
return booleanValue
|
||||
|
||||
def makeUnavailable(uuid, primarySRPath, isISCSI):
|
||||
if not isISCSI:
|
||||
return
|
||||
VHD = getVHD(uuid, isISCSI)
|
||||
path = os.path.join(primarySRPath, VHD)
|
||||
manageAvailability(path, '-an')
|
||||
return
|
||||
|
||||
def manageAvailability(path, value):
|
||||
if path.__contains__("/var/run/sr-mount"):
|
||||
return
|
||||
logging.debug("Setting availability of " + path + " to " + value)
|
||||
try:
|
||||
cmd = ['/usr/sbin/lvchange', value, path]
|
||||
util.pread2(cmd)
|
||||
except: #CommandException, (rc, cmdListStr, stderr):
|
||||
#errMsg = "CommandException thrown while executing: " + cmdListStr + " with return code: " + str(rc) + " and stderr: " + stderr
|
||||
errMsg = "Unexpected exception thrown by lvchange"
|
||||
logging.debug(errMsg)
|
||||
if value == "-ay":
|
||||
# Raise an error only if we are trying to make it available.
|
||||
# Just warn if we are trying to make it unavailable after the
|
||||
# snapshot operation is done.
|
||||
raise xs_errors.XenError(errMsg)
|
||||
return
|
||||
|
||||
|
||||
def checkVolumeAvailablility(path):
|
||||
try:
|
||||
if not isVolumeAvailable(path):
|
||||
# The VHD file is not available on XenSever. The volume is probably
|
||||
# inactive or detached.
|
||||
# Do lvchange -ay to make it available on XenServer
|
||||
manageAvailability(path, '-ay')
|
||||
except:
|
||||
errMsg = "Could not determine status of ISCSI path: " + path
|
||||
logging.debug(errMsg)
|
||||
raise xs_errors.XenError(errMsg)
|
||||
|
||||
success = False
|
||||
i = 0
|
||||
while i < 6:
|
||||
i = i + 1
|
||||
# Check if the vhd is actually visible by checking for the link
|
||||
# set isISCSI to true
|
||||
success = isVolumeAvailable(path)
|
||||
if success:
|
||||
logging.debug("Made vhd: " + path + " available and confirmed that it is visible")
|
||||
break
|
||||
|
||||
# Sleep for 10 seconds before checking again.
|
||||
time.sleep(10)
|
||||
|
||||
# If not visible within 1 min fail
|
||||
if not success:
|
||||
logging.debug("Could not make vhd: " + path + " available despite waiting for 1 minute. Does it exist?")
|
||||
|
||||
return success
|
||||
|
||||
def isVolumeAvailable(path):
|
||||
# Check if iscsi volume is available on this XenServer.
|
||||
status = "0"
|
||||
try:
|
||||
p = subprocess.Popen(["/bin/bash", "-c", "if [ -L " + path + " ]; then echo 1; else echo 0;fi"], stdout=subprocess.PIPE)
|
||||
status = p.communicate()[0].strip("\n")
|
||||
except:
|
||||
errMsg = "Could not determine status of ISCSI path: " + path
|
||||
logging.debug(errMsg)
|
||||
raise xs_errors.XenError(errMsg)
|
||||
|
||||
return (status == "1")
|
||||
|
||||
def scanParent(path):
|
||||
# Do a scan for the parent for ISCSI volumes
|
||||
# Note that the parent need not be visible on the XenServer
|
||||
parentUUID = ''
|
||||
try:
|
||||
lvName = os.path.basename(path)
|
||||
dirname = os.path.dirname(path)
|
||||
vgName = os.path.basename(dirname)
|
||||
vhdInfo = vhdutil.getVHDInfoLVM(lvName, lvhdutil.extractUuid, vgName)
|
||||
parentUUID = vhdInfo.parentUuid
|
||||
except:
|
||||
errMsg = "Could not get vhd parent of " + path
|
||||
logging.debug(errMsg)
|
||||
raise xs_errors.XenError(errMsg)
|
||||
return parentUUID
|
||||
|
||||
def getParentOfSnapshot(snapshotUuid, primarySRPath, isISCSI):
|
||||
snapshotVHD = getVHD(snapshotUuid, isISCSI)
|
||||
snapshotPath = os.path.join(primarySRPath, snapshotVHD)
|
||||
|
||||
baseCopyUuid = ''
|
||||
if isISCSI:
|
||||
checkVolumeAvailablility(snapshotPath)
|
||||
baseCopyUuid = scanParent(snapshotPath)
|
||||
else:
|
||||
baseCopyUuid = getParent(snapshotPath, isISCSI)
|
||||
|
||||
logging.debug("Base copy of snapshotUuid: " + snapshotUuid + " is " + baseCopyUuid)
|
||||
return baseCopyUuid
|
||||
|
||||
def getParent(path, isISCSI):
|
||||
parentUUID = ''
|
||||
try :
|
||||
if isISCSI:
|
||||
parentUUID = vhdutil.getParent(path, lvhdutil.extractUuid)
|
||||
else:
|
||||
parentUUID = vhdutil.getParent(path, cleanup.FileVDI.extractUuid)
|
||||
except:
|
||||
errMsg = "Could not get vhd parent of " + path
|
||||
logging.debug(errMsg)
|
||||
raise xs_errors.XenError(errMsg)
|
||||
return parentUUID
|
||||
|
||||
def getVhdParent(session, args):
|
||||
logging.debug("getParent with " + str(args))
|
||||
try:
|
||||
primaryStorageSRUuid = args['primaryStorageSRUuid']
|
||||
snapshotUuid = args['snapshotUuid']
|
||||
isISCSI = getIsTrueString(args['isISCSI'])
|
||||
|
||||
primarySRPath = getPrimarySRPath(primaryStorageSRUuid, isISCSI)
|
||||
logging.debug("primarySRPath: " + primarySRPath)
|
||||
|
||||
baseCopyUuid = getParentOfSnapshot(snapshotUuid, primarySRPath, isISCSI)
|
||||
|
||||
return baseCopyUuid
|
||||
except:
|
||||
logging.debug('getVhdParent', exc_info=True)
|
||||
raise xs_errors.XenError("Failed to getVhdParent")
|
||||
def makedirs(path):
|
||||
if not os.path.isdir(path):
|
||||
try:
|
||||
os.makedirs(path)
|
||||
except OSError, (errno, strerror):
|
||||
umount(path)
|
||||
if os.path.isdir(path):
|
||||
return
|
||||
errMsg = "OSError while creating " + path + " with errno: " + str(errno) + " and strerr: " + strerror
|
||||
logging.debug(errMsg)
|
||||
raise xs_errors.XenError(errMsg)
|
||||
return
|
||||
|
||||
def umount(localDir):
|
||||
try:
|
||||
cmd = ['umount', localDir]
|
||||
util.pread2(cmd)
|
||||
except CommandException:
|
||||
errMsg = "CommandException raised while trying to umount " + localDir
|
||||
logging.debug(errMsg)
|
||||
raise xs_errors.XenError(errMsg)
|
||||
|
||||
logging.debug("Successfully unmounted " + localDir)
|
||||
return
|
||||
|
||||
@echo
|
||||
def mountNfsSecondaryStorage(session, args):
|
||||
remoteDir = args['remoteDir']
|
||||
localDir = args['localDir']
|
||||
mounted = False
|
||||
f = open("/proc/mounts", 'r')
|
||||
for line in f:
|
||||
tokens = line.split(" ")
|
||||
if len(tokens) > 2 and tokens[0] == remoteDir and tokens[1] == localDir:
|
||||
mounted = True
|
||||
|
||||
if mounted:
|
||||
return "true"
|
||||
|
||||
makedirs(localDir)
|
||||
options = "soft,tcp,timeo=133,retrans=1"
|
||||
try:
|
||||
cmd = ['mount', '-o', options, remoteDir, localDir]
|
||||
txt = util.pread2(cmd)
|
||||
except:
|
||||
txt = ''
|
||||
errMsg = "Unexpected error while trying to mount " + remoteDir + " to " + localDir
|
||||
logging.debug(errMsg)
|
||||
raise xs_errors.XenError(errMsg)
|
||||
logging.debug("Successfully mounted " + remoteDir + " to " + localDir)
|
||||
|
||||
return "true"
|
||||
|
||||
@echo
|
||||
def umountNfsSecondaryStorage(session, args):
|
||||
localDir = args['localDir']
|
||||
try:
|
||||
cmd = ['umount', localDir]
|
||||
util.pread2(cmd)
|
||||
except CommandException:
|
||||
errMsg = "CommandException raised while trying to umount " + localDir
|
||||
logging.debug(errMsg)
|
||||
raise xs_errors.XenError(errMsg)
|
||||
try:
|
||||
os.system("rmdir " + localDir)
|
||||
except:
|
||||
pass
|
||||
logging.debug("Successfully unmounted " + localDir)
|
||||
return "true"
|
||||
|
||||
@echo
|
||||
def makeDirectory(session, args):
|
||||
path = args['path']
|
||||
if not os.path.isdir(path):
|
||||
try:
|
||||
os.makedirs(path)
|
||||
except OSError, (errno, strerror):
|
||||
if os.path.isdir(path):
|
||||
return "true"
|
||||
errMsg = "OSError while creating " + path + " with errno: " + str(errno) + " and strerr: " + strerror
|
||||
logging.debug(errMsg)
|
||||
raise xs_errors.XenError(errMsg)
|
||||
return "true"
|
||||
|
||||
if __name__ == "__main__":
|
||||
XenAPIPlugin.dispatch({"getVhdParent":getVhdParent, "mountNfsSecondaryStorage":mountNfsSecondaryStorage,
|
||||
"umountNfsSecondaryStorage":umountNfsSecondaryStorage,
|
||||
"makeDirectory":makeDirectory})
|
||||
|
||||
|
||||
74
scripts/vm/hypervisor/xenserver/xenserver62/patch
Normal file
74
scripts/vm/hypervisor/xenserver/xenserver62/patch
Normal file
@ -0,0 +1,74 @@
|
||||
# Licensed to the Apache Software Foundation (ASF) under one
|
||||
# or more contributor license agreements. See the NOTICE file
|
||||
# distributed with this work for additional information
|
||||
# regarding copyright ownership. The ASF licenses this file
|
||||
# to you under the Apache License, Version 2.0 (the
|
||||
# "License"); you may not use this file except in compliance
|
||||
# with the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing,
|
||||
# software distributed under the License is distributed on an
|
||||
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
# KIND, either express or implied. See the License for the
|
||||
# specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
# This file specifies the files that need
|
||||
# to be transferred over to the XenServer.
|
||||
# The format of this file is as follows:
|
||||
# [Name of file]=[source path],[file permission],[destination path]
|
||||
# [destination path] is required.
|
||||
# If [file permission] is missing, 755 is assumed.
|
||||
# If [source path] is missing, it looks in the same
|
||||
# directory as the patch file.
|
||||
# If [source path] starts with '/', then it is absolute path.
|
||||
# If [source path] starts with '~', then it is path relative to management server home directory.
|
||||
# If [source path] does not start with '/' or '~', then it is relative path to the location of the patch file.
|
||||
vmops=..,0755,/etc/xapi.d/plugins
|
||||
vmopspremium=..,0755,/etc/xapi.d/plugins
|
||||
xen-ovs-vif-flows.rules=..,0644,/etc/udev/rules.d
|
||||
ovs-vif-flows.py=..,0755,/etc/xapi.d/plugins
|
||||
cloudstack_plugins.conf=..,0644,/etc/xensource
|
||||
cloudstack_pluginlib.py=..,0755,/etc/xapi.d/plugins
|
||||
ovstunnel=..,0755,/etc/xapi.d/plugins
|
||||
cloud-plugin-storage=,0755,/etc/xapi.d/plugins
|
||||
hostvmstats.py=..,0755,/opt/xensource/sm
|
||||
systemvm.iso=../../../../../vms,0644,/opt/xensource/packages/iso
|
||||
id_rsa.cloud=../../../systemvm,0600,/root/.ssh
|
||||
network_info.sh=..,0755,/opt/cloud/bin
|
||||
setupxenserver.sh=..,0755,/opt/cloud/bin
|
||||
make_migratable.sh=..,0755,/opt/cloud/bin
|
||||
createipAlias.sh=..,0755,/opt/cloud/bin
|
||||
deleteipAlias.sh=..,0755,/opt/cloud/bin
|
||||
setup_iscsi.sh=..,0755,/opt/cloud/bin
|
||||
pingtest.sh=../../..,0755,/opt/cloud/bin
|
||||
dhcp_entry.sh=../../../../network/domr/,0755,/opt/cloud/bin
|
||||
save_password_to_domr.sh=../../../../network/domr/,0755,/opt/cloud/bin
|
||||
call_firewall.sh=../../../../network/domr/,0755,/opt/cloud/bin
|
||||
call_loadbalancer.sh=../../../../network/domr/,0755,/opt/cloud/bin
|
||||
router_proxy.sh=../../../../network/domr/,0755,/opt/cloud/bin
|
||||
cloud-setup-bonding.sh=..,0755,/opt/cloud/bin
|
||||
kill_copy_process.sh=..,0755,/opt/cloud/bin
|
||||
setup_heartbeat_sr.sh=..,0755,/opt/cloud/bin
|
||||
setup_heartbeat_file.sh=..,0755,/opt/cloud/bin
|
||||
check_heartbeat.sh=..,0755,/opt/cloud/bin
|
||||
xenheartbeat.sh=..,0755,/opt/cloud/bin
|
||||
launch_hb.sh=..,0755,/opt/cloud/bin
|
||||
vhd-util=..,0755,/opt/cloud/bin
|
||||
upgrade_snapshot.sh=..,0755,/opt/cloud/bin
|
||||
cloud-clean-vlan.sh=..,0755,/opt/cloud/bin
|
||||
cloud-prepare-upgrade.sh=..,0755,/opt/cloud/bin
|
||||
bumpUpPriority.sh=../../../../network/domr/,0755,/opt/cloud/bin
|
||||
swift=..,0755,/opt/cloud/bin
|
||||
swiftxen=..,0755,/etc/xapi.d/plugins
|
||||
s3xen=..,0755,/etc/xapi.d/plugins
|
||||
add_to_vcpus_params_live.sh=..,0755,/opt/cloud/bin
|
||||
ovs-pvlan=..,0755,/etc/xapi.d/plugins
|
||||
ovs-pvlan-dhcp-host.sh=../../../network,0755,/opt/cloud/bin
|
||||
ovs-pvlan-vm.sh=../../../network,0755,/opt/cloud/bin
|
||||
ovs-pvlan-cleanup.sh=../../../network,0755,/opt/cloud/bin
|
||||
ovs-get-dhcp-iface.sh=..,0755,/opt/cloud/bin
|
||||
ovs-get-bridge.sh=..,0755,/opt/cloud/bin
|
||||
cloudlog=..,0644,/etc/logrotate.d
|
||||
@ -1109,6 +1109,13 @@ public enum Config {
|
||||
"xenserver61",
|
||||
"default Xen PV driver version for registered template, valid value:xenserver56,xenserver61 ",
|
||||
"xenserver56,xenserver61"),
|
||||
XenServerHotFix("Advanced",
|
||||
ManagementServer.class,
|
||||
Boolean.class,
|
||||
"xen.hotfix.enabled",
|
||||
"false",
|
||||
"Enable/Disable xenserver hot fix",
|
||||
null),
|
||||
|
||||
// VMware
|
||||
VmwareUseNexusVSwitch(
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user