Merge branch 'master' into storage-offering-domains-zones

This commit is contained in:
Abhishek Kumar 2019-06-18 12:52:34 +05:30
commit cf347c89ea
123 changed files with 3932 additions and 2448 deletions

View File

@ -78,7 +78,9 @@ def handleMigrateBegin():
def executeCustomScripts(sysArgs):
createDirectoryIfNotExists(customDir, customDirPermissions)
if not os.path.exists(customDir) or not os.path.isdir(customDir):
return
scripts = getCustomScriptsFromDirectory()
for scriptName in scripts:
@ -127,12 +129,6 @@ def getCustomScriptsFromDirectory():
os.listdir(customDir)), key=lambda fileName: substringAfter(fileName, '_'))
def createDirectoryIfNotExists(dir, permissions):
if not os.path.exists(dir):
logger.info('Directory %s does not exist; creating it.' % dir)
os.makedirs(dir, permissions)
def substringAfter(s, delimiter):
return s.partition(delimiter)[2]

View File

@ -39,7 +39,6 @@ import java.util.concurrent.atomic.AtomicInteger;
import javax.naming.ConfigurationException;
import org.apache.cloudstack.agent.directdownload.SetupDirectDownloadCertificate;
import org.apache.cloudstack.agent.lb.SetupMSListAnswer;
import org.apache.cloudstack.agent.lb.SetupMSListCommand;
import org.apache.cloudstack.ca.PostCertificateRenewalCommand;
@ -630,8 +629,6 @@ public class Agent implements HandlerFactory, IAgentControl {
if (Host.Type.Routing.equals(_resource.getType())) {
scheduleServicesRestartTask();
}
} else if (cmd instanceof SetupDirectDownloadCertificate) {
answer = setupDirectDownloadCertificate((SetupDirectDownloadCertificate) cmd);
} else if (cmd instanceof SetupMSListCommand) {
answer = setupManagementServerList((SetupMSListCommand) cmd);
} else {
@ -683,31 +680,6 @@ public class Agent implements HandlerFactory, IAgentControl {
}
}
private Answer setupDirectDownloadCertificate(SetupDirectDownloadCertificate cmd) {
String certificate = cmd.getCertificate();
String certificateName = cmd.getCertificateName();
s_logger.info("Importing certificate " + certificateName + " into keystore");
final File agentFile = PropertiesUtil.findConfigFile("agent.properties");
if (agentFile == null) {
return new Answer(cmd, false, "Failed to find agent.properties file");
}
final String keyStoreFile = agentFile.getParent() + "/" + KeyStoreUtils.KS_FILENAME;
String cerFile = agentFile.getParent() + "/" + certificateName + ".cer";
Script.runSimpleBashScript(String.format("echo '%s' > %s", certificate, cerFile));
String privatePasswordFormat = "sed -n '/keystore.passphrase/p' '%s' 2>/dev/null | sed 's/keystore.passphrase=//g' 2>/dev/null";
String privatePasswordCmd = String.format(privatePasswordFormat, agentFile.getAbsolutePath());
String privatePassword = Script.runSimpleBashScript(privatePasswordCmd);
String importCommandFormat = "keytool -importcert -file %s -keystore %s -alias '%s' -storepass '%s' -noprompt";
String importCmd = String.format(importCommandFormat, cerFile, keyStoreFile, certificateName, privatePassword);
Script.runSimpleBashScript(importCmd);
return new Answer(cmd, true, "Certificate " + certificateName + " imported");
}
public Answer setupAgentKeystore(final SetupKeyStoreCommand cmd) {
final String keyStorePassword = cmd.getKeystorePassword();
final long validityDays = cmd.getValidityDays();

View File

@ -37,4 +37,6 @@ public interface DhcpServiceProvider extends NetworkElement {
boolean removeDhcpSupportForSubnet(Network network) throws ResourceUnavailableException;
boolean setExtraDhcpOptions(Network network, long nicId, Map<Integer, String> dhcpOptions);
boolean removeDhcpEntry(Network network, NicProfile nic, VirtualMachineProfile vmProfile) throws ResourceUnavailableException;
}

View File

@ -0,0 +1,86 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package com.cloud.storage;
import java.io.Serializable;
public class MigrationOptions implements Serializable {
private String srcPoolUuid;
private Storage.StoragePoolType srcPoolType;
private Type type;
private String srcBackingFilePath;
private boolean copySrcTemplate;
private String srcVolumeUuid;
private int timeout;
public enum Type {
LinkedClone, FullClone
}
public MigrationOptions() {
}
public MigrationOptions(String srcPoolUuid, Storage.StoragePoolType srcPoolType, String srcBackingFilePath, boolean copySrcTemplate) {
this.srcPoolUuid = srcPoolUuid;
this.srcPoolType = srcPoolType;
this.type = Type.LinkedClone;
this.srcBackingFilePath = srcBackingFilePath;
this.copySrcTemplate = copySrcTemplate;
}
public MigrationOptions(String srcPoolUuid, Storage.StoragePoolType srcPoolType, String srcVolumeUuid) {
this.srcPoolUuid = srcPoolUuid;
this.srcPoolType = srcPoolType;
this.type = Type.FullClone;
this.srcVolumeUuid = srcVolumeUuid;
}
public String getSrcPoolUuid() {
return srcPoolUuid;
}
public Storage.StoragePoolType getSrcPoolType() {
return srcPoolType;
}
public String getSrcBackingFilePath() {
return srcBackingFilePath;
}
public boolean isCopySrcTemplate() {
return copySrcTemplate;
}
public String getSrcVolumeUuid() {
return srcVolumeUuid;
}
public Type getType() {
return type;
}
public int getTimeout() {
return timeout;
}
public void setTimeout(int timeout) {
this.timeout = timeout;
}
}

View File

@ -24,6 +24,7 @@ import org.apache.cloudstack.api.BaseListTemplateOrIsoPermissionsCmd;
import org.apache.cloudstack.api.BaseUpdateTemplateOrIsoPermissionsCmd;
import org.apache.cloudstack.api.command.user.iso.DeleteIsoCmd;
import org.apache.cloudstack.api.command.user.iso.ExtractIsoCmd;
import org.apache.cloudstack.api.command.user.iso.GetUploadParamsForIsoCmd;
import org.apache.cloudstack.api.command.user.iso.RegisterIsoCmd;
import org.apache.cloudstack.api.command.user.iso.UpdateIsoCmd;
import org.apache.cloudstack.api.command.user.template.CopyTemplateCmd;
@ -45,10 +46,12 @@ public interface TemplateApiService {
VirtualMachineTemplate registerTemplate(RegisterTemplateCmd cmd) throws URISyntaxException, ResourceAllocationException;
public GetUploadParamsResponse registerTemplateForPostUpload(GetUploadParamsForTemplateCmd cmd) throws ResourceAllocationException, MalformedURLException;
GetUploadParamsResponse registerTemplateForPostUpload(GetUploadParamsForTemplateCmd cmd) throws ResourceAllocationException, MalformedURLException;
VirtualMachineTemplate registerIso(RegisterIsoCmd cmd) throws IllegalArgumentException, ResourceAllocationException;
GetUploadParamsResponse registerIsoForPostUpload(GetUploadParamsForIsoCmd cmd) throws ResourceAllocationException, MalformedURLException;
VirtualMachineTemplate copyTemplate(CopyTemplateCmd cmd) throws StorageUnavailableException, ResourceAllocationException;
VirtualMachineTemplate prepareTemplate(long templateId, long zoneId, Long storageId);

View File

@ -1,90 +1,87 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.apache.cloudstack.api.command.admin.direct.download;
import com.cloud.exception.ConcurrentOperationException;
import com.cloud.exception.InsufficientCapacityException;
import com.cloud.exception.ResourceAllocationException;
import com.cloud.exception.ResourceUnavailableException;
import com.cloud.exception.NetworkRuleConflictException;
import org.apache.cloudstack.acl.RoleType;
import org.apache.cloudstack.api.APICommand;
import org.apache.cloudstack.api.ApiConstants;
import org.apache.cloudstack.api.BaseCmd;
import org.apache.cloudstack.api.Parameter;
import org.apache.cloudstack.api.ServerApiException;
import org.apache.cloudstack.api.ApiErrorCode;
import org.apache.cloudstack.api.response.SuccessResponse;
import org.apache.cloudstack.context.CallContext;
import org.apache.cloudstack.direct.download.DirectDownloadManager;
import org.apache.log4j.Logger;
import javax.inject.Inject;
@APICommand(name = UploadTemplateDirectDownloadCertificate.APINAME,
description = "Upload a certificate for HTTPS direct template download on KVM hosts",
responseObject = SuccessResponse.class,
requestHasSensitiveInfo = true,
responseHasSensitiveInfo = true,
since = "4.11.0",
authorized = {RoleType.Admin})
public class UploadTemplateDirectDownloadCertificate extends BaseCmd {
@Inject
DirectDownloadManager directDownloadManager;
private static final Logger LOG = Logger.getLogger(UploadTemplateDirectDownloadCertificate.class);
public static final String APINAME = "uploadTemplateDirectDownloadCertificate";
@Parameter(name = ApiConstants.CERTIFICATE, type = BaseCmd.CommandType.STRING, required = true, length = 65535,
description = "SSL certificate")
private String certificate;
@Parameter(name = ApiConstants.NAME , type = BaseCmd.CommandType.STRING, required = true,
description = "Name for the uploaded certificate")
private String name;
@Parameter(name = ApiConstants.HYPERVISOR, type = BaseCmd.CommandType.STRING, required = true, description = "Hypervisor type")
private String hypervisor;
@Override
public void execute() throws ResourceUnavailableException, InsufficientCapacityException, ServerApiException, ConcurrentOperationException, ResourceAllocationException, NetworkRuleConflictException {
if (!hypervisor.equalsIgnoreCase("kvm")) {
throw new ServerApiException(ApiErrorCode.PARAM_ERROR, "Currently supporting KVM hosts only");
}
SuccessResponse response = new SuccessResponse(getCommandName());
try {
LOG.debug("Uploading certificate " + name + " to agents for Direct Download");
boolean result = directDownloadManager.uploadCertificateToHosts(certificate, name, hypervisor);
response.setSuccess(result);
setResponseObject(response);
} catch (Exception e) {
throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, e.getMessage());
}
}
@Override
public String getCommandName() {
return APINAME.toLowerCase() + BaseCmd.RESPONSE_SUFFIX;
}
@Override
public long getEntityOwnerId() {
return CallContext.current().getCallingAccount().getId();
}
}
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.apache.cloudstack.api.command.admin.direct.download;
import org.apache.cloudstack.acl.RoleType;
import org.apache.cloudstack.api.APICommand;
import org.apache.cloudstack.api.ApiConstants;
import org.apache.cloudstack.api.BaseCmd;
import org.apache.cloudstack.api.Parameter;
import org.apache.cloudstack.api.ServerApiException;
import org.apache.cloudstack.api.ApiErrorCode;
import org.apache.cloudstack.api.response.SuccessResponse;
import org.apache.cloudstack.context.CallContext;
import org.apache.cloudstack.direct.download.DirectDownloadManager;
import org.apache.log4j.Logger;
import javax.inject.Inject;
@APICommand(name = UploadTemplateDirectDownloadCertificateCmd.APINAME,
description = "Upload a certificate for HTTPS direct template download on KVM hosts",
responseObject = SuccessResponse.class,
requestHasSensitiveInfo = true,
responseHasSensitiveInfo = true,
since = "4.11.0",
authorized = {RoleType.Admin})
public class UploadTemplateDirectDownloadCertificateCmd extends BaseCmd {
@Inject
DirectDownloadManager directDownloadManager;
private static final Logger LOG = Logger.getLogger(UploadTemplateDirectDownloadCertificateCmd.class);
public static final String APINAME = "uploadTemplateDirectDownloadCertificate";
@Parameter(name = ApiConstants.CERTIFICATE, type = BaseCmd.CommandType.STRING, required = true, length = 65535,
description = "SSL certificate")
private String certificate;
@Parameter(name = ApiConstants.NAME , type = BaseCmd.CommandType.STRING, required = true,
description = "Name for the uploaded certificate")
private String name;
@Parameter(name = ApiConstants.HYPERVISOR, type = BaseCmd.CommandType.STRING, required = true, description = "Hypervisor type")
private String hypervisor;
@Override
public void execute() {
if (!hypervisor.equalsIgnoreCase("kvm")) {
throw new ServerApiException(ApiErrorCode.PARAM_ERROR, "Currently supporting KVM hosts only");
}
try {
LOG.debug("Uploading certificate " + name + " to agents for Direct Download");
boolean result = directDownloadManager.uploadCertificateToHosts(certificate, name, hypervisor);
SuccessResponse response = new SuccessResponse(getCommandName());
response.setSuccess(result);
setResponseObject(response);
} catch (Exception e) {
throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, e.getMessage());
}
}
@Override
public String getCommandName() {
return APINAME.toLowerCase() + BaseCmd.RESPONSE_SUFFIX;
}
@Override
public long getEntityOwnerId() {
return CallContext.current().getCallingAccount().getId();
}
}

View File

@ -86,6 +86,13 @@ public class ListPublicIpAddressesCmd extends BaseListTaggedResourcesCmd {
description = "lists all public IP addresses associated to the network specified")
private Long associatedNetworkId;
@Parameter(name = ApiConstants.NETWORK_ID,
type = CommandType.UUID,
entityType = NetworkResponse.class,
description = "lists all public IP addresses by source network ID",
since = "4.13.0")
private Long networkId;
@Parameter(name = ApiConstants.IS_SOURCE_NAT, type = CommandType.BOOLEAN, description = "list only source NAT IP addresses")
private Boolean isSourceNat;
@ -133,6 +140,10 @@ public class ListPublicIpAddressesCmd extends BaseListTaggedResourcesCmd {
return associatedNetworkId;
}
public Long getNetworkId() {
return networkId;
}
public Boolean isSourceNat() {
return isSourceNat;
}

View File

@ -0,0 +1,158 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.apache.cloudstack.api.command.user.iso;
import com.cloud.exception.ConcurrentOperationException;
import com.cloud.exception.InsufficientCapacityException;
import com.cloud.exception.NetworkRuleConflictException;
import com.cloud.exception.ResourceAllocationException;
import com.cloud.exception.ResourceUnavailableException;
import org.apache.cloudstack.api.APICommand;
import org.apache.cloudstack.api.AbstractGetUploadParamsCmd;
import org.apache.cloudstack.api.ApiConstants;
import org.apache.cloudstack.api.ApiErrorCode;
import org.apache.cloudstack.api.BaseCmd;
import org.apache.cloudstack.api.Parameter;
import org.apache.cloudstack.api.ServerApiException;
import org.apache.cloudstack.api.response.GetUploadParamsResponse;
import org.apache.cloudstack.api.response.GuestOSResponse;
import org.apache.cloudstack.api.response.ZoneResponse;
import org.apache.cloudstack.context.CallContext;
import java.net.MalformedURLException;
@APICommand(name = GetUploadParamsForIsoCmd.APINAME,
description = "upload an existing ISO into the CloudStack cloud.",
responseObject = GetUploadParamsResponse.class, since = "4.13",
requestHasSensitiveInfo = false, responseHasSensitiveInfo = false)
public class GetUploadParamsForIsoCmd extends AbstractGetUploadParamsCmd {
public static final String APINAME = "getUploadParamsForIso";
private static final String s_name = "postuploadisoresponse";
/////////////////////////////////////////////////////
//////////////// API parameters /////////////////////
/////////////////////////////////////////////////////
@Parameter(name = ApiConstants.BOOTABLE, type = BaseCmd.CommandType.BOOLEAN, description = "true if this ISO is bootable. If not passed explicitly its assumed to be true")
private Boolean bootable;
@Parameter(name = ApiConstants.DISPLAY_TEXT,
type = BaseCmd.CommandType.STRING,
required = true,
description = "the display text of the ISO. This is usually used for display purposes.",
length = 4096)
private String displayText;
@Parameter(name = ApiConstants.IS_FEATURED, type = BaseCmd.CommandType.BOOLEAN, description = "true if you want this ISO to be featured")
private Boolean featured;
@Parameter(name = ApiConstants.IS_PUBLIC,
type = BaseCmd.CommandType.BOOLEAN,
description = "true if you want to register the ISO to be publicly available to all users, false otherwise.")
private Boolean publicIso;
@Parameter(name = ApiConstants.IS_EXTRACTABLE, type = BaseCmd.CommandType.BOOLEAN, description = "true if the ISO or its derivatives are extractable; default is false")
private Boolean extractable;
@Parameter(name = ApiConstants.NAME, type = BaseCmd.CommandType.STRING, required = true, description = "the name of the ISO")
private String isoName;
@Parameter(name = ApiConstants.OS_TYPE_ID,
type = BaseCmd.CommandType.UUID,
entityType = GuestOSResponse.class,
description = "the ID of the OS type that best represents the OS of this ISO. If the ISO is bootable this parameter needs to be passed")
private Long osTypeId;
@Parameter(name=ApiConstants.ZONE_ID, type= BaseCmd.CommandType.UUID, entityType = ZoneResponse.class,
required=true, description="the ID of the zone you wish to register the ISO to.")
protected Long zoneId;
/////////////////////////////////////////////////////
/////////////////// Accessors ///////////////////////
/////////////////////////////////////////////////////
public Boolean isBootable() {
return bootable;
}
public String getDisplayText() {
return displayText;
}
public Boolean isFeatured() {
return featured;
}
public Boolean isPublic() {
return publicIso;
}
public Boolean isExtractable() {
return extractable;
}
public String getIsoName() {
return isoName;
}
public Long getOsTypeId() {
return osTypeId;
}
public Long getZoneId() {
return zoneId;
}
/////////////////////////////////////////////////////
/////////////// API Implementation///////////////////
/////////////////////////////////////////////////////
@Override
public void execute() throws ResourceUnavailableException, InsufficientCapacityException, ServerApiException, ConcurrentOperationException, ResourceAllocationException, NetworkRuleConflictException {
validateRequest();
try {
GetUploadParamsResponse response = _templateService.registerIsoForPostUpload(this);
response.setResponseName(getCommandName());
setResponseObject(response);
} catch (ResourceAllocationException | MalformedURLException e) {
s_logger.error("Exception while registering template", e);
throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, "Exception while registering ISO: " + e.getMessage());
}
}
private void validateRequest() {
if (getZoneId() <= 0) {
throw new ServerApiException(ApiErrorCode.PARAM_ERROR, "Invalid zoneid");
}
}
@Override
public String getCommandName() {
return s_name;
}
@Override
public long getEntityOwnerId() {
Long accountId = _accountService.finalyzeAccountId(getAccountName(), getDomainId(), getProjectId(), true);
if (accountId == null) {
return CallContext.current().getCallingAccount().getId();
}
return accountId;
}
}

View File

@ -16,15 +16,6 @@
// under the License.
package org.apache.cloudstack.api.command.user.snapshot;
import com.cloud.event.EventTypes;
import com.cloud.exception.InvalidParameterValueException;
import com.cloud.exception.PermissionDeniedException;
import com.cloud.exception.ResourceAllocationException;
import com.cloud.projects.Project;
import com.cloud.storage.Snapshot;
import com.cloud.storage.Volume;
import com.cloud.user.Account;
import com.cloud.utils.exception.CloudRuntimeException;
import org.apache.cloudstack.api.APICommand;
import org.apache.cloudstack.api.ApiCommandJobType;
import org.apache.cloudstack.api.ApiConstants;
@ -39,6 +30,16 @@ import org.apache.cloudstack.api.response.SnapshotResponse;
import org.apache.cloudstack.api.response.VolumeResponse;
import org.apache.log4j.Logger;
import com.cloud.event.EventTypes;
import com.cloud.exception.InvalidParameterValueException;
import com.cloud.exception.PermissionDeniedException;
import com.cloud.exception.ResourceAllocationException;
import com.cloud.projects.Project;
import com.cloud.storage.Snapshot;
import com.cloud.storage.Volume;
import com.cloud.user.Account;
import com.cloud.utils.exception.CloudRuntimeException;
@APICommand(name = "createSnapshot", description = "Creates an instant snapshot of a volume.", responseObject = SnapshotResponse.class, entityType = {Snapshot.class},
requestHasSensitiveInfo = false, responseHasSensitiveInfo = false)
public class CreateSnapshotCmd extends BaseAsyncCreateCmd {
@ -171,7 +172,7 @@ public class CreateSnapshotCmd extends BaseAsyncCreateCmd {
@Override
public String getEventDescription() {
return "creating snapshot for volume: " + this._uuidMgr.getUuid(Volume.class, getVolumeId());
return "creating snapshot for volume: " + getVolumeUuid();
}
@Override
@ -186,7 +187,7 @@ public class CreateSnapshotCmd extends BaseAsyncCreateCmd {
setEntityId(snapshot.getId());
setEntityUuid(snapshot.getUuid());
} else {
throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, "Failed to create snapshot");
throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, "Failed to create snapshot for volume" + getVolumeUuid());
}
}
@ -202,10 +203,10 @@ public class CreateSnapshotCmd extends BaseAsyncCreateCmd {
response.setResponseName(getCommandName());
setResponseObject(response);
} else {
throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, "Failed to create snapshot due to an internal error creating snapshot for volume " + getVolumeId());
throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, "Failed to create snapshot due to an internal error creating snapshot for volume " + getVolumeUuid());
}
} catch (Exception e) {
throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, "Failed to create snapshot due to an internal error creating snapshot for volume " + getVolumeId());
throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, "Failed to create snapshot due to an internal error creating snapshot for volume " + getVolumeUuid());
}
}
@ -249,4 +250,8 @@ public class CreateSnapshotCmd extends BaseAsyncCreateCmd {
return asyncBackup;
}
}
protected String getVolumeUuid() {
return _uuidMgr.getUuid(Volume.class, getVolumeId());
}
}

View File

@ -16,11 +16,12 @@
// under the License.
package org.apache.cloudstack.api.command.test;
import com.cloud.storage.Snapshot;
import com.cloud.storage.VolumeApiService;
import com.cloud.user.Account;
import com.cloud.user.AccountService;
import junit.framework.TestCase;
import static org.mockito.Matchers.any;
import static org.mockito.Matchers.anyBoolean;
import static org.mockito.Matchers.anyLong;
import static org.mockito.Matchers.anyString;
import static org.mockito.Matchers.isNull;
import org.apache.cloudstack.api.ResponseGenerator;
import org.apache.cloudstack.api.ServerApiException;
import org.apache.cloudstack.api.command.user.snapshot.CreateSnapshotCmd;
@ -32,11 +33,12 @@ import org.junit.Test;
import org.junit.rules.ExpectedException;
import org.mockito.Mockito;
import static org.mockito.Matchers.any;
import static org.mockito.Matchers.anyBoolean;
import static org.mockito.Matchers.anyLong;
import static org.mockito.Matchers.anyString;
import static org.mockito.Matchers.isNull;
import com.cloud.storage.Snapshot;
import com.cloud.storage.VolumeApiService;
import com.cloud.user.Account;
import com.cloud.user.AccountService;
import junit.framework.TestCase;
public class CreateSnapshotCmdTest extends TestCase {
@ -66,6 +68,11 @@ public class CreateSnapshotCmdTest extends TestCase {
public long getEntityOwnerId(){
return 1L;
}
@Override
protected String getVolumeUuid() {
return "123";
}
};
}
@ -126,7 +133,7 @@ public class CreateSnapshotCmdTest extends TestCase {
try {
createSnapshotCmd.execute();
} catch (ServerApiException exception) {
Assert.assertEquals("Failed to create snapshot due to an internal error creating snapshot for volume 1", exception.getDescription());
Assert.assertEquals("Failed to create snapshot due to an internal error creating snapshot for volume 123", exception.getDescription());
}
}
}

View File

@ -163,14 +163,6 @@ under the License.
used.
-->
<cacheManagerPeerProviderFactory
class="net.sf.ehcache.distribution.RMICacheManagerPeerProviderFactory"
properties="peerDiscovery=automatic,
multicastGroupAddress=230.0.0.1,
multicastGroupPort=4446, timeToLive=1"
propertySeparator=","
/>
<!--
CacheManagerPeerListener
@ -209,9 +201,6 @@ under the License.
If not specified it defaults 120000ms.
-->
<cacheManagerPeerListenerFactory
class="net.sf.ehcache.distribution.RMICacheManagerPeerListenerFactory"/>
<!--
Cache configuration

View File

@ -32,6 +32,10 @@ public class CreateVMSnapshotCommand extends VMSnapshotBaseCommand {
this.vmUuid = vmUuid;
}
public CreateVMSnapshotCommand(String vmName, VMSnapshotTO snapshot) {
super(vmName, snapshot, null, null);
}
public String getVmUuid() {
return vmUuid;
}

View File

@ -27,4 +27,8 @@ public class DeleteVMSnapshotCommand extends VMSnapshotBaseCommand {
public DeleteVMSnapshotCommand(String vmName, VMSnapshotTO snapshot, List<VolumeObjectTO> volumeTOs, String guestOSType) {
super(vmName, snapshot, volumeTOs, guestOSType);
}
public DeleteVMSnapshotCommand(String vmName, VMSnapshotTO snapshot) {
super(vmName, snapshot, null, null);
}
}

View File

@ -30,6 +30,7 @@ public class MigrateCommand extends Command {
private String vmName;
private String destIp;
private Map<String, MigrateDiskInfo> migrateStorage;
private boolean migrateStorageManaged;
private boolean autoConvergence;
private String hostGuid;
private boolean isWindows;
@ -56,6 +57,14 @@ public class MigrateCommand extends Command {
return migrateStorage != null ? new HashMap<>(migrateStorage) : new HashMap<String, MigrateDiskInfo>();
}
public boolean isMigrateStorageManaged() {
return migrateStorageManaged;
}
public void setMigrateStorageManaged(boolean migrateStorageManaged) {
this.migrateStorageManaged = migrateStorageManaged;
}
public void setAutoConvergence(boolean autoConvergence) {
this.autoConvergence = autoConvergence;
}

View File

@ -35,6 +35,15 @@ public class DhcpEntryCommand extends NetworkElementCommand {
String duid;
private boolean isDefault;
boolean executeInSequence = false;
boolean remove;
public boolean isRemove() {
return remove;
}
public void setRemove(boolean remove) {
this.remove = remove;
}
protected DhcpEntryCommand() {

View File

@ -0,0 +1,43 @@
//
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
//
package com.cloud.agent.api.storage;
import com.cloud.agent.api.Command;
import com.cloud.storage.Storage;
import java.util.Map;
public class CheckStorageAvailabilityCommand extends Command {
private Map<String, Storage.StoragePoolType> poolsMap;
public CheckStorageAvailabilityCommand(Map<String, Storage.StoragePoolType> poolsMap) {
this.poolsMap = poolsMap;
}
public Map<String, Storage.StoragePoolType> getPoolsMap() {
return poolsMap;
}
@Override
public boolean executeInSequence() {
return false;
}
}

View File

@ -35,7 +35,7 @@ public class DhcpEntryConfigItem extends AbstractConfigItemFacade {
final DhcpEntryCommand command = (DhcpEntryCommand) cmd;
final VmDhcpConfig vmDhcpConfig = new VmDhcpConfig(command.getVmName(), command.getVmMac(), command.getVmIpAddress(), command.getVmIp6Address(), command.getDuid(), command.getDefaultDns(),
command.getDefaultRouter(), command.getStaticRoutes(), command.isDefault());
command.getDefaultRouter(), command.getStaticRoutes(), command.isDefault(), command.isRemove());
return generateConfigItems(vmDhcpConfig);
}

View File

@ -30,12 +30,15 @@ public class VmDhcpConfig extends ConfigBase {
private String staticRoutes;
private boolean defaultEntry;
// Indicate if the entry should be removed when set to true
private boolean remove;
public VmDhcpConfig() {
super(VM_DHCP);
}
public VmDhcpConfig(String hostName, String macAddress, String ipv4Address, String ipv6Address, String ipv6Duid, String dnsAddresses, String defaultGateway,
String staticRoutes, boolean defaultEntry) {
String staticRoutes, boolean defaultEntry, boolean remove) {
super(VM_DHCP);
this.hostName = hostName;
this.macAddress = macAddress;
@ -46,6 +49,7 @@ public class VmDhcpConfig extends ConfigBase {
this.defaultGateway = defaultGateway;
this.staticRoutes = staticRoutes;
this.defaultEntry = defaultEntry;
this.remove = remove;
}
public String getHostName() {
@ -64,6 +68,14 @@ public class VmDhcpConfig extends ConfigBase {
this.macAddress = macAddress;
}
public boolean isRemove() {
return remove;
}
public void setRemove(boolean remove) {
this.remove = remove;
}
public String getIpv4Address() {
return ipv4Address;
}

View File

@ -20,12 +20,12 @@ package org.apache.cloudstack.agent.directdownload;
import com.cloud.agent.api.Command;
public class SetupDirectDownloadCertificate extends Command {
public class SetupDirectDownloadCertificateCommand extends Command {
private String certificate;
private String certificateName;
public SetupDirectDownloadCertificate(String certificate, String name) {
public SetupDirectDownloadCertificateCommand(String certificate, String name) {
this.certificate = certificate;
this.certificateName = name;
}

View File

@ -19,6 +19,7 @@
package org.apache.cloudstack.storage.to;
import com.cloud.storage.MigrationOptions;
import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo;
import com.cloud.agent.api.to.DataObjectType;
@ -59,6 +60,7 @@ public class VolumeObjectTO implements DataTO {
private Long iopsWriteRateMaxLength;
private DiskCacheMode cacheMode;
private Hypervisor.HypervisorType hypervisorType;
private MigrationOptions migrationOptions;
public VolumeObjectTO() {
@ -97,6 +99,7 @@ public class VolumeObjectTO implements DataTO {
cacheMode = volume.getCacheMode();
hypervisorType = volume.getHypervisorType();
setDeviceId(volume.getDeviceId());
this.migrationOptions = volume.getMigrationOptions();
}
public String getUuid() {
@ -300,4 +303,8 @@ public class VolumeObjectTO implements DataTO {
public DiskCacheMode getCacheMode() {
return cacheMode;
}
public MigrationOptions getMigrationOptions() {
return migrationOptions;
}
}

View File

@ -309,4 +309,8 @@ public interface NetworkOrchestrationService {
*/
boolean areRoutersRunning(final List<? extends VirtualRouter> routers);
/**
* Remove entry from /etc/dhcphosts and /etc/hosts on virtual routers
*/
void cleanupNicDhcpDnsEntry(Network network, VirtualMachineProfile vmProfile, NicProfile nicProfile);
}

View File

@ -47,6 +47,8 @@ public interface TemplateService {
AsyncCallFuture<TemplateApiResult> createTemplateFromVolumeAsync(VolumeInfo volume, TemplateInfo template, DataStore store);
boolean createOvaDataDiskTemplates(TemplateInfo parentTemplate);
AsyncCallFuture<TemplateApiResult> deleteTemplateAsync(TemplateInfo template);
AsyncCallFuture<TemplateApiResult> copyTemplate(TemplateInfo srcTemplate, DataStore destStore);

View File

@ -21,6 +21,7 @@ package org.apache.cloudstack.engine.subsystem.api.storage;
import com.cloud.agent.api.Answer;
import com.cloud.hypervisor.Hypervisor.HypervisorType;
import com.cloud.offering.DiskOffering.DiskCacheMode;
import com.cloud.storage.MigrationOptions;
import com.cloud.storage.Volume;
import com.cloud.vm.VirtualMachine;
@ -71,4 +72,11 @@ public interface VolumeInfo extends DataObject, Volume {
Long getIopsWriteRateMaxLength();
DiskCacheMode getCacheMode();
/**
* Currently available for KVM volumes
*/
MigrationOptions getMigrationOptions();
void setMigrationOptions(MigrationOptions migrationOptions);
}

View File

@ -87,11 +87,19 @@ public interface StorageManager extends StorageService {
ConfigKey<Integer> KvmStorageOnlineMigrationWait = new ConfigKey<>(Integer.class,
"kvm.storage.online.migration.wait",
"Storage",
"10800",
"86400",
"Timeout in seconds for online (live) storage migration to complete on KVM (migrateVirtualMachineWithVolume)",
true,
ConfigKey.Scope.Global,
null);
ConfigKey<Boolean> KvmAutoConvergence = new ConfigKey<>(Boolean.class,
"kvm.auto.convergence",
"Storage",
"false",
"Setting this to 'true' allows KVM to use auto convergence to complete VM migration (libvirt version 1.2.3+ and QEMU version 1.6+)",
true,
ConfigKey.Scope.Global,
null);
ConfigKey<Integer> MaxNumberOfManagedClusteredFileSystems = new ConfigKey<>(Integer.class,
"max.number.managed.clustered.file.systems",
"Storage",

View File

@ -120,7 +120,6 @@ import com.cloud.agent.manager.Commands;
import com.cloud.agent.manager.allocator.HostAllocator;
import com.cloud.alert.AlertManager;
import com.cloud.capacity.CapacityManager;
import com.cloud.configuration.Config;
import com.cloud.dc.ClusterDetailsDao;
import com.cloud.dc.ClusterDetailsVO;
import com.cloud.dc.DataCenter;
@ -2367,11 +2366,8 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
final boolean isWindows = _guestOsCategoryDao.findById(_guestOsDao.findById(vm.getGuestOSId()).getCategoryId()).getName().equalsIgnoreCase("Windows");
final MigrateCommand mc = new MigrateCommand(vm.getInstanceName(), dest.getHost().getPrivateIpAddress(), isWindows, to, getExecuteInSequence(vm.getHypervisorType()));
String autoConvergence = _configDao.getValue(Config.KvmAutoConvergence.toString());
boolean kvmAutoConvergence = Boolean.parseBoolean(autoConvergence);
boolean kvmAutoConvergence = StorageManager.KvmAutoConvergence.value();
mc.setAutoConvergence(kvmAutoConvergence);
mc.setHostGuid(dest.getHost().getGuid());
try {
@ -3897,11 +3893,8 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
final boolean isWindows = _guestOsCategoryDao.findById(_guestOsDao.findById(vm.getGuestOSId()).getCategoryId()).getName().equalsIgnoreCase("Windows");
final MigrateCommand mc = new MigrateCommand(vm.getInstanceName(), dest.getHost().getPrivateIpAddress(), isWindows, to, getExecuteInSequence(vm.getHypervisorType()));
String autoConvergence = _configDao.getValue(Config.KvmAutoConvergence.toString());
boolean kvmAutoConvergence = Boolean.parseBoolean(autoConvergence);
boolean kvmAutoConvergence = StorageManager.KvmAutoConvergence.value();
mc.setAutoConvergence(kvmAutoConvergence);
mc.setHostGuid(dest.getHost().getGuid());
try {

View File

@ -2999,6 +2999,34 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra
return true;
}
/**
* Cleanup entry on VR file specified by type
*/
@Override
public void cleanupNicDhcpDnsEntry(Network network, VirtualMachineProfile vmProfile, NicProfile nicProfile) {
final List<Provider> networkProviders = getNetworkProviders(network.getId());
for (final NetworkElement element : networkElements) {
if (networkProviders.contains(element.getProvider())) {
if (!_networkModel.isProviderEnabledInPhysicalNetwork(_networkModel.getPhysicalNetworkId(network), element.getProvider().getName())) {
throw new CloudRuntimeException("Service provider " + element.getProvider().getName() + " either doesn't exist or is not enabled in physical network id: "
+ network.getPhysicalNetworkId());
}
if (vmProfile.getType() == Type.User && element.getProvider() != null) {
if (_networkModel.areServicesSupportedInNetwork(network.getId(), Service.Dhcp)
&& _networkModel.isProviderSupportServiceInNetwork(network.getId(), Service.Dhcp, element.getProvider()) && element instanceof DhcpServiceProvider) {
final DhcpServiceProvider sp = (DhcpServiceProvider) element;
try {
sp.removeDhcpEntry(network, nicProfile, vmProfile);
} catch (ResourceUnavailableException e) {
s_logger.error("Failed to remove dhcp-dns entry due to: ", e);
}
}
}
}
}
}
/**
* rollingRestartRouters performs restart of routers of a network by first
* deploying a new VR and then destroying old VRs in rolling fashion. For

View File

@ -63,6 +63,7 @@ import com.cloud.upgrade.dao.Upgrade41000to41100;
import com.cloud.upgrade.dao.Upgrade410to420;
import com.cloud.upgrade.dao.Upgrade41100to41110;
import com.cloud.upgrade.dao.Upgrade41110to41120;
import com.cloud.upgrade.dao.Upgrade41120to41130;
import com.cloud.upgrade.dao.Upgrade41120to41200;
import com.cloud.upgrade.dao.Upgrade41200to41300;
import com.cloud.upgrade.dao.Upgrade420to421;
@ -183,7 +184,8 @@ public class DatabaseUpgradeChecker implements SystemIntegrityChecker {
.next("4.10.0.0", new Upgrade41000to41100())
.next("4.11.0.0", new Upgrade41100to41110())
.next("4.11.1.0", new Upgrade41110to41120())
.next("4.11.2.0", new Upgrade41120to41200())
.next("4.11.2.0", new Upgrade41120to41130())
.next("4.11.3.0", new Upgrade41120to41200())
.next("4.12.0.0", new Upgrade41200to41300())
.build();
}

View File

@ -19,17 +19,9 @@ package com.cloud.upgrade.dao;
import java.io.InputStream;
import java.sql.Connection;
import java.sql.PreparedStatement;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Map;
import java.util.Set;
import org.apache.log4j.Logger;
import com.cloud.hypervisor.Hypervisor;
import com.cloud.utils.exception.CloudRuntimeException;
public class Upgrade41110to41120 implements DbUpgrade {
@ -63,175 +55,6 @@ public class Upgrade41110to41120 implements DbUpgrade {
@Override
public void performDataMigration(Connection conn) {
updateSystemVmTemplates(conn);
}
@SuppressWarnings("serial")
private void updateSystemVmTemplates(final Connection conn) {
LOG.debug("Updating System Vm template IDs");
final Set<Hypervisor.HypervisorType> hypervisorsListInUse = new HashSet<Hypervisor.HypervisorType>();
try (PreparedStatement pstmt = conn.prepareStatement("select distinct(hypervisor_type) from `cloud`.`cluster` where removed is null"); ResultSet rs = pstmt.executeQuery()) {
while (rs.next()) {
switch (Hypervisor.HypervisorType.getType(rs.getString(1))) {
case XenServer:
hypervisorsListInUse.add(Hypervisor.HypervisorType.XenServer);
break;
case KVM:
hypervisorsListInUse.add(Hypervisor.HypervisorType.KVM);
break;
case VMware:
hypervisorsListInUse.add(Hypervisor.HypervisorType.VMware);
break;
case Hyperv:
hypervisorsListInUse.add(Hypervisor.HypervisorType.Hyperv);
break;
case LXC:
hypervisorsListInUse.add(Hypervisor.HypervisorType.LXC);
break;
case Ovm3:
hypervisorsListInUse.add(Hypervisor.HypervisorType.Ovm3);
break;
default:
break;
}
}
} catch (final SQLException e) {
LOG.error("updateSystemVmTemplates: Exception caught while getting hypervisor types from clusters: " + e.getMessage());
throw new CloudRuntimeException("updateSystemVmTemplates:Exception while getting hypervisor types from clusters", e);
}
final Map<Hypervisor.HypervisorType, String> NewTemplateNameList = new HashMap<Hypervisor.HypervisorType, String>() {
{
put(Hypervisor.HypervisorType.KVM, "systemvm-kvm-4.11.2");
put(Hypervisor.HypervisorType.VMware, "systemvm-vmware-4.11.2");
put(Hypervisor.HypervisorType.XenServer, "systemvm-xenserver-4.11.2");
put(Hypervisor.HypervisorType.Hyperv, "systemvm-hyperv-4.11.2");
put(Hypervisor.HypervisorType.LXC, "systemvm-lxc-4.11.2");
put(Hypervisor.HypervisorType.Ovm3, "systemvm-ovm3-4.11.2");
}
};
final Map<Hypervisor.HypervisorType, String> routerTemplateConfigurationNames = new HashMap<Hypervisor.HypervisorType, String>() {
{
put(Hypervisor.HypervisorType.KVM, "router.template.kvm");
put(Hypervisor.HypervisorType.VMware, "router.template.vmware");
put(Hypervisor.HypervisorType.XenServer, "router.template.xenserver");
put(Hypervisor.HypervisorType.Hyperv, "router.template.hyperv");
put(Hypervisor.HypervisorType.LXC, "router.template.lxc");
put(Hypervisor.HypervisorType.Ovm3, "router.template.ovm3");
}
};
final Map<Hypervisor.HypervisorType, String> newTemplateUrl = new HashMap<Hypervisor.HypervisorType, String>() {
{
put(Hypervisor.HypervisorType.KVM, "https://download.cloudstack.org/systemvm/4.11/systemvmtemplate-4.11.2-kvm.qcow2.bz2");
put(Hypervisor.HypervisorType.VMware, "https://download.cloudstack.org/systemvm/4.11/systemvmtemplate-4.11.2-vmware.ova");
put(Hypervisor.HypervisorType.XenServer, "https://download.cloudstack.org/systemvm/4.11/systemvmtemplate-4.11.2-xen.vhd.bz2");
put(Hypervisor.HypervisorType.Hyperv, "https://download.cloudstack.org/systemvm/4.11/systemvmtemplate-4.11.2-hyperv.vhd.zip");
put(Hypervisor.HypervisorType.LXC, "https://download.cloudstack.org/systemvm/4.11/systemvmtemplate-4.11.2-kvm.qcow2.bz2");
put(Hypervisor.HypervisorType.Ovm3, "https://download.cloudstack.org/systemvm/4.11/systemvmtemplate-4.11.2-ovm.raw.bz2");
}
};
final Map<Hypervisor.HypervisorType, String> newTemplateChecksum = new HashMap<Hypervisor.HypervisorType, String>() {
{
put(Hypervisor.HypervisorType.KVM, "6d12cc764cd7d64112d8c35d70923eb1");
put(Hypervisor.HypervisorType.XenServer, "6e8b3ae84ca8145736d1d7d3f7546e65");
put(Hypervisor.HypervisorType.VMware, "e981f8cb951688efd93481913198c9cc");
put(Hypervisor.HypervisorType.Hyperv, "e9032635ffba021371780307162551b9");
put(Hypervisor.HypervisorType.LXC, "6d12cc764cd7d64112d8c35d70923eb1");
put(Hypervisor.HypervisorType.Ovm3, "c4a91f8e52e4531a1c2a9a17c530d5fe");
}
};
for (final Map.Entry<Hypervisor.HypervisorType, String> hypervisorAndTemplateName : NewTemplateNameList.entrySet()) {
LOG.debug("Updating " + hypervisorAndTemplateName.getKey() + " System Vms");
try (PreparedStatement pstmt = conn.prepareStatement("select id from `cloud`.`vm_template` where name = ? and removed is null order by id desc limit 1")) {
// Get 4.11 systemvm template id for corresponding hypervisor
long templateId = -1;
pstmt.setString(1, hypervisorAndTemplateName.getValue());
try (ResultSet rs = pstmt.executeQuery()) {
if (rs.next()) {
templateId = rs.getLong(1);
}
} catch (final SQLException e) {
LOG.error("updateSystemVmTemplates: Exception caught while getting ids of templates: " + e.getMessage());
throw new CloudRuntimeException("updateSystemVmTemplates: Exception caught while getting ids of templates", e);
}
// change template type to SYSTEM
if (templateId != -1) {
try (PreparedStatement templ_type_pstmt = conn.prepareStatement("update `cloud`.`vm_template` set type='SYSTEM' where id = ?");) {
templ_type_pstmt.setLong(1, templateId);
templ_type_pstmt.executeUpdate();
} catch (final SQLException e) {
LOG.error("updateSystemVmTemplates:Exception while updating template with id " + templateId + " to be marked as 'system': " + e.getMessage());
throw new CloudRuntimeException("updateSystemVmTemplates:Exception while updating template with id " + templateId + " to be marked as 'system'", e);
}
// update template ID of system Vms
try (PreparedStatement update_templ_id_pstmt = conn
.prepareStatement("update `cloud`.`vm_instance` set vm_template_id = ? where type <> 'User' and hypervisor_type = ? and removed is NULL");) {
update_templ_id_pstmt.setLong(1, templateId);
update_templ_id_pstmt.setString(2, hypervisorAndTemplateName.getKey().toString());
update_templ_id_pstmt.executeUpdate();
} catch (final Exception e) {
LOG.error("updateSystemVmTemplates:Exception while setting template for " + hypervisorAndTemplateName.getKey().toString() + " to " + templateId
+ ": " + e.getMessage());
throw new CloudRuntimeException("updateSystemVmTemplates:Exception while setting template for " + hypervisorAndTemplateName.getKey().toString() + " to "
+ templateId, e);
}
// Change value of global configuration parameter
// router.template.* for the corresponding hypervisor
try (PreparedStatement update_pstmt = conn.prepareStatement("UPDATE `cloud`.`configuration` SET value = ? WHERE name = ?");) {
update_pstmt.setString(1, hypervisorAndTemplateName.getValue());
update_pstmt.setString(2, routerTemplateConfigurationNames.get(hypervisorAndTemplateName.getKey()));
update_pstmt.executeUpdate();
} catch (final SQLException e) {
LOG.error("updateSystemVmTemplates:Exception while setting " + routerTemplateConfigurationNames.get(hypervisorAndTemplateName.getKey()) + " to "
+ hypervisorAndTemplateName.getValue() + ": " + e.getMessage());
throw new CloudRuntimeException("updateSystemVmTemplates:Exception while setting "
+ routerTemplateConfigurationNames.get(hypervisorAndTemplateName.getKey()) + " to " + hypervisorAndTemplateName.getValue(), e);
}
// Change value of global configuration parameter
// minreq.sysvmtemplate.version for the ACS version
try (PreparedStatement update_pstmt = conn.prepareStatement("UPDATE `cloud`.`configuration` SET value = ? WHERE name = ?");) {
update_pstmt.setString(1, "4.11.2");
update_pstmt.setString(2, "minreq.sysvmtemplate.version");
update_pstmt.executeUpdate();
} catch (final SQLException e) {
LOG.error("updateSystemVmTemplates:Exception while setting 'minreq.sysvmtemplate.version' to 4.11.2: " + e.getMessage());
throw new CloudRuntimeException("updateSystemVmTemplates:Exception while setting 'minreq.sysvmtemplate.version' to 4.11.2", e);
}
} else {
if (hypervisorsListInUse.contains(hypervisorAndTemplateName.getKey())) {
throw new CloudRuntimeException(getUpgradedVersion() + hypervisorAndTemplateName.getKey() + " SystemVm template not found. Cannot upgrade system Vms");
} else {
LOG.warn(getUpgradedVersion() + hypervisorAndTemplateName.getKey() + " SystemVm template not found. " + hypervisorAndTemplateName.getKey()
+ " hypervisor is not used, so not failing upgrade");
// Update the latest template URLs for corresponding
// hypervisor
try (PreparedStatement update_templ_url_pstmt = conn
.prepareStatement("UPDATE `cloud`.`vm_template` SET url = ? , checksum = ? WHERE hypervisor_type = ? AND type = 'SYSTEM' AND removed is null order by id desc limit 1");) {
update_templ_url_pstmt.setString(1, newTemplateUrl.get(hypervisorAndTemplateName.getKey()));
update_templ_url_pstmt.setString(2, newTemplateChecksum.get(hypervisorAndTemplateName.getKey()));
update_templ_url_pstmt.setString(3, hypervisorAndTemplateName.getKey().toString());
update_templ_url_pstmt.executeUpdate();
} catch (final SQLException e) {
LOG.error("updateSystemVmTemplates:Exception while updating 'url' and 'checksum' for hypervisor type "
+ hypervisorAndTemplateName.getKey().toString() + ": " + e.getMessage());
throw new CloudRuntimeException("updateSystemVmTemplates:Exception while updating 'url' and 'checksum' for hypervisor type "
+ hypervisorAndTemplateName.getKey().toString(), e);
}
}
}
} catch (final SQLException e) {
LOG.error("updateSystemVmTemplates:Exception while getting ids of templates: " + e.getMessage());
throw new CloudRuntimeException("updateSystemVmTemplates:Exception while getting ids of templates", e);
}
}
LOG.debug("Updating System Vm Template IDs Complete");
}
@Override

View File

@ -0,0 +1,53 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package com.cloud.upgrade.dao;
import java.io.InputStream;
import java.sql.Connection;
public class Upgrade41120to41130 implements DbUpgrade {
@Override
public String[] getUpgradableVersionRange() {
return new String[]{"4.11.2.0", "4.11.3.0"};
}
@Override
public String getUpgradedVersion() {
return "4.11.3.0";
}
@Override
public boolean supportsRollingUpgrade() {
return false;
}
@Override
public InputStream[] getPrepareScripts() {
return new InputStream[] {};
}
@Override
public void performDataMigration(Connection conn) {
}
@Override
public InputStream[] getCleanupScripts() {
return new InputStream[] {};
}
}

View File

@ -19,9 +19,17 @@ package com.cloud.upgrade.dao;
import java.io.InputStream;
import java.sql.Connection;
import java.sql.PreparedStatement;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Map;
import java.util.Set;
import org.apache.log4j.Logger;
import com.cloud.hypervisor.Hypervisor;
import com.cloud.utils.exception.CloudRuntimeException;
public class Upgrade41200to41300 implements DbUpgrade {
@ -56,7 +64,175 @@ public class Upgrade41200to41300 implements DbUpgrade {
@Override
public void performDataMigration(Connection conn) {
updateSystemVmTemplates(conn);
}
@SuppressWarnings("serial")
private void updateSystemVmTemplates(final Connection conn) {
LOG.debug("Updating System Vm template IDs");
final Set<Hypervisor.HypervisorType> hypervisorsListInUse = new HashSet<Hypervisor.HypervisorType>();
try (PreparedStatement pstmt = conn.prepareStatement("select distinct(hypervisor_type) from `cloud`.`cluster` where removed is null"); ResultSet rs = pstmt.executeQuery()) {
while (rs.next()) {
switch (Hypervisor.HypervisorType.getType(rs.getString(1))) {
case XenServer:
hypervisorsListInUse.add(Hypervisor.HypervisorType.XenServer);
break;
case KVM:
hypervisorsListInUse.add(Hypervisor.HypervisorType.KVM);
break;
case VMware:
hypervisorsListInUse.add(Hypervisor.HypervisorType.VMware);
break;
case Hyperv:
hypervisorsListInUse.add(Hypervisor.HypervisorType.Hyperv);
break;
case LXC:
hypervisorsListInUse.add(Hypervisor.HypervisorType.LXC);
break;
case Ovm3:
hypervisorsListInUse.add(Hypervisor.HypervisorType.Ovm3);
break;
default:
break;
}
}
} catch (final SQLException e) {
LOG.error("updateSystemVmTemplates: Exception caught while getting hypervisor types from clusters: " + e.getMessage());
throw new CloudRuntimeException("updateSystemVmTemplates:Exception while getting hypervisor types from clusters", e);
}
final Map<Hypervisor.HypervisorType, String> NewTemplateNameList = new HashMap<Hypervisor.HypervisorType, String>() {
{
put(Hypervisor.HypervisorType.KVM, "systemvm-kvm-4.11.3");
put(Hypervisor.HypervisorType.VMware, "systemvm-vmware-4.11.3");
put(Hypervisor.HypervisorType.XenServer, "systemvm-xenserver-4.11.3");
put(Hypervisor.HypervisorType.Hyperv, "systemvm-hyperv-4.11.3");
put(Hypervisor.HypervisorType.LXC, "systemvm-lxc-4.11.3");
put(Hypervisor.HypervisorType.Ovm3, "systemvm-ovm3-4.11.3");
}
};
final Map<Hypervisor.HypervisorType, String> routerTemplateConfigurationNames = new HashMap<Hypervisor.HypervisorType, String>() {
{
put(Hypervisor.HypervisorType.KVM, "router.template.kvm");
put(Hypervisor.HypervisorType.VMware, "router.template.vmware");
put(Hypervisor.HypervisorType.XenServer, "router.template.xenserver");
put(Hypervisor.HypervisorType.Hyperv, "router.template.hyperv");
put(Hypervisor.HypervisorType.LXC, "router.template.lxc");
put(Hypervisor.HypervisorType.Ovm3, "router.template.ovm3");
}
};
final Map<Hypervisor.HypervisorType, String> newTemplateUrl = new HashMap<Hypervisor.HypervisorType, String>() {
{
put(Hypervisor.HypervisorType.KVM, "https://download.cloudstack.org/systemvm/4.11/systemvmtemplate-4.11.3-kvm.qcow2.bz2");
put(Hypervisor.HypervisorType.VMware, "https://download.cloudstack.org/systemvm/4.11/systemvmtemplate-4.11.3-vmware.ova");
put(Hypervisor.HypervisorType.XenServer, "https://download.cloudstack.org/systemvm/4.11/systemvmtemplate-4.11.3-xen.vhd.bz2");
put(Hypervisor.HypervisorType.Hyperv, "https://download.cloudstack.org/systemvm/4.11/systemvmtemplate-4.11.3-hyperv.vhd.zip");
put(Hypervisor.HypervisorType.LXC, "https://download.cloudstack.org/systemvm/4.11/systemvmtemplate-4.11.3-kvm.qcow2.bz2");
put(Hypervisor.HypervisorType.Ovm3, "https://download.cloudstack.org/systemvm/4.11/systemvmtemplate-4.11.3-ovm.raw.bz2");
}
};
final Map<Hypervisor.HypervisorType, String> newTemplateChecksum = new HashMap<Hypervisor.HypervisorType, String>() {
{
put(Hypervisor.HypervisorType.KVM, "15ec268d0939a8fa0be1bc79f397a167");
put(Hypervisor.HypervisorType.XenServer, "ae96f35fb746524edc4ebc9856719d71");
put(Hypervisor.HypervisorType.VMware, "f50c82139430afce7e4e46d3a585abbd");
put(Hypervisor.HypervisorType.Hyperv, "abf411f6cdd9139716b5d8172ab903a6");
put(Hypervisor.HypervisorType.LXC, "15ec268d0939a8fa0be1bc79f397a167");
put(Hypervisor.HypervisorType.Ovm3, "c71f143a477f4c7a0d5e8c82ccb00220");
}
};
for (final Map.Entry<Hypervisor.HypervisorType, String> hypervisorAndTemplateName : NewTemplateNameList.entrySet()) {
LOG.debug("Updating " + hypervisorAndTemplateName.getKey() + " System Vms");
try (PreparedStatement pstmt = conn.prepareStatement("select id from `cloud`.`vm_template` where name = ? and removed is null order by id desc limit 1")) {
// Get 4.11 systemvm template id for corresponding hypervisor
long templateId = -1;
pstmt.setString(1, hypervisorAndTemplateName.getValue());
try (ResultSet rs = pstmt.executeQuery()) {
if (rs.next()) {
templateId = rs.getLong(1);
}
} catch (final SQLException e) {
LOG.error("updateSystemVmTemplates: Exception caught while getting ids of templates: " + e.getMessage());
throw new CloudRuntimeException("updateSystemVmTemplates: Exception caught while getting ids of templates", e);
}
// change template type to SYSTEM
if (templateId != -1) {
try (PreparedStatement templ_type_pstmt = conn.prepareStatement("update `cloud`.`vm_template` set type='SYSTEM' where id = ?");) {
templ_type_pstmt.setLong(1, templateId);
templ_type_pstmt.executeUpdate();
} catch (final SQLException e) {
LOG.error("updateSystemVmTemplates:Exception while updating template with id " + templateId + " to be marked as 'system': " + e.getMessage());
throw new CloudRuntimeException("updateSystemVmTemplates:Exception while updating template with id " + templateId + " to be marked as 'system'", e);
}
// update template ID of system Vms
try (PreparedStatement update_templ_id_pstmt = conn
.prepareStatement("update `cloud`.`vm_instance` set vm_template_id = ? where type <> 'User' and hypervisor_type = ? and removed is NULL");) {
update_templ_id_pstmt.setLong(1, templateId);
update_templ_id_pstmt.setString(2, hypervisorAndTemplateName.getKey().toString());
update_templ_id_pstmt.executeUpdate();
} catch (final Exception e) {
LOG.error("updateSystemVmTemplates:Exception while setting template for " + hypervisorAndTemplateName.getKey().toString() + " to " + templateId
+ ": " + e.getMessage());
throw new CloudRuntimeException("updateSystemVmTemplates:Exception while setting template for " + hypervisorAndTemplateName.getKey().toString() + " to "
+ templateId, e);
}
// Change value of global configuration parameter
// router.template.* for the corresponding hypervisor
try (PreparedStatement update_pstmt = conn.prepareStatement("UPDATE `cloud`.`configuration` SET value = ? WHERE name = ?");) {
update_pstmt.setString(1, hypervisorAndTemplateName.getValue());
update_pstmt.setString(2, routerTemplateConfigurationNames.get(hypervisorAndTemplateName.getKey()));
update_pstmt.executeUpdate();
} catch (final SQLException e) {
LOG.error("updateSystemVmTemplates:Exception while setting " + routerTemplateConfigurationNames.get(hypervisorAndTemplateName.getKey()) + " to "
+ hypervisorAndTemplateName.getValue() + ": " + e.getMessage());
throw new CloudRuntimeException("updateSystemVmTemplates:Exception while setting "
+ routerTemplateConfigurationNames.get(hypervisorAndTemplateName.getKey()) + " to " + hypervisorAndTemplateName.getValue(), e);
}
// Change value of global configuration parameter
// minreq.sysvmtemplate.version for the ACS version
try (PreparedStatement update_pstmt = conn.prepareStatement("UPDATE `cloud`.`configuration` SET value = ? WHERE name = ?");) {
update_pstmt.setString(1, "4.11.3");
update_pstmt.setString(2, "minreq.sysvmtemplate.version");
update_pstmt.executeUpdate();
} catch (final SQLException e) {
LOG.error("updateSystemVmTemplates:Exception while setting 'minreq.sysvmtemplate.version' to 4.11.3: " + e.getMessage());
throw new CloudRuntimeException("updateSystemVmTemplates:Exception while setting 'minreq.sysvmtemplate.version' to 4.11.3", e);
}
} else {
if (hypervisorsListInUse.contains(hypervisorAndTemplateName.getKey())) {
throw new CloudRuntimeException(getUpgradedVersion() + hypervisorAndTemplateName.getKey() + " SystemVm template not found. Cannot upgrade system Vms");
} else {
LOG.warn(getUpgradedVersion() + hypervisorAndTemplateName.getKey() + " SystemVm template not found. " + hypervisorAndTemplateName.getKey()
+ " hypervisor is not used, so not failing upgrade");
// Update the latest template URLs for corresponding
// hypervisor
try (PreparedStatement update_templ_url_pstmt = conn
.prepareStatement("UPDATE `cloud`.`vm_template` SET url = ? , checksum = ? WHERE hypervisor_type = ? AND type = 'SYSTEM' AND removed is null order by id desc limit 1");) {
update_templ_url_pstmt.setString(1, newTemplateUrl.get(hypervisorAndTemplateName.getKey()));
update_templ_url_pstmt.setString(2, newTemplateChecksum.get(hypervisorAndTemplateName.getKey()));
update_templ_url_pstmt.setString(3, hypervisorAndTemplateName.getKey().toString());
update_templ_url_pstmt.executeUpdate();
} catch (final SQLException e) {
LOG.error("updateSystemVmTemplates:Exception while updating 'url' and 'checksum' for hypervisor type "
+ hypervisorAndTemplateName.getKey().toString() + ": " + e.getMessage());
throw new CloudRuntimeException("updateSystemVmTemplates:Exception while updating 'url' and 'checksum' for hypervisor type "
+ hypervisorAndTemplateName.getKey().toString(), e);
}
}
}
} catch (final SQLException e) {
LOG.error("updateSystemVmTemplates:Exception while getting ids of templates: " + e.getMessage());
throw new CloudRuntimeException("updateSystemVmTemplates:Exception while getting ids of templates", e);
}
}
LOG.debug("Updating System Vm Template IDs Complete");
}
@Override

View File

@ -27,92 +27,18 @@
http://www.springframework.org/schema/context/spring-context.xsd"
>
<!--
DAO with customized configuration
-->
<bean id="serviceOfferingDaoImpl" class="com.cloud.service.dao.ServiceOfferingDaoImpl">
<property name="configParams">
<map>
<entry key="cache.size" value="50" />
<entry key="cache.time.to.live" value="600" />
</map>
</property>
</bean>
<bean id="diskOfferingDaoImpl" class="com.cloud.storage.dao.DiskOfferingDaoImpl">
<property name="configParams">
<map>
<entry key="cache.size" value="50" />
<entry key="cache.time.to.live" value="600" />
</map>
</property>
</bean>
<bean id="dataCenterDaoImpl" class="com.cloud.dc.dao.DataCenterDaoImpl">
<property name="configParams">
<map>
<entry key="cache.size" value="50" />
<entry key="cache.time.to.live" value="600" />
</map>
</property>
</bean>
<bean id="hostPodDaoImpl" class="com.cloud.dc.dao.HostPodDaoImpl">
<property name="configParams">
<map>
<entry key="cache.size" value="50" />
<entry key="cache.time.to.live" value="600" />
</map>
</property>
</bean>
<bean id="vlanDaoImpl" class="com.cloud.dc.dao.VlanDaoImpl">
<property name="configParams">
<map>
<entry key="cache.size" value="30" />
<entry key="cache.time.to.live" value="3600" />
</map>
</property>
</bean>
<bean id="userDaoImpl" class="com.cloud.user.dao.UserDaoImpl">
<property name="configParams">
<map>
<entry key="cache.size" value="5000" />
<entry key="cache.time.to.live" value="300" />
</map>
</property>
</bean>
<bean id="VMTemplateDaoImpl" class="com.cloud.storage.dao.VMTemplateDaoImpl">
<property name="configParams">
<map>
<entry key="cache.size" value="100" />
<entry key="cache.time.to.live" value="600" />
</map>
</property>
</bean>
<bean id="hypervisorCapabilitiesDaoImpl" class="com.cloud.hypervisor.dao.HypervisorCapabilitiesDaoImpl">
<property name="configParams">
<map>
<entry key="cache.size" value="100" />
<entry key="cache.time.to.live" value="600" />
</map>
</property>
</bean>
<bean id="dedicatedResourceDaoImpl" class="com.cloud.dc.dao.DedicatedResourceDaoImpl">
<property name="configParams">
<map>
<entry key="cache.size" value="30" />
<entry key="cache.time.to.live" value="3600" />
</map>
</property>
</bean>
<!--
DAOs with default configuration
-->
<bean id="serviceOfferingDaoImpl" class="com.cloud.service.dao.ServiceOfferingDaoImpl" />
<bean id="diskOfferingDaoImpl" class="com.cloud.storage.dao.DiskOfferingDaoImpl" />
<bean id="dataCenterDaoImpl" class="com.cloud.dc.dao.DataCenterDaoImpl" />
<bean id="hostPodDaoImpl" class="com.cloud.dc.dao.HostPodDaoImpl" />
<bean id="vlanDaoImpl" class="com.cloud.dc.dao.VlanDaoImpl" />
<bean id="userDaoImpl" class="com.cloud.user.dao.UserDaoImpl" />
<bean id="VMTemplateDaoImpl" class="com.cloud.storage.dao.VMTemplateDaoImpl" />
<bean id="hypervisorCapabilitiesDaoImpl" class="com.cloud.hypervisor.dao.HypervisorCapabilitiesDaoImpl" />
<bean id="dedicatedResourceDaoImpl" class="com.cloud.dc.dao.DedicatedResourceDaoImpl" />
<bean id="roleDaoImpl" class="org.apache.cloudstack.acl.dao.RoleDaoImpl" />
<bean id="rolePermissionsDaoImpl" class="org.apache.cloudstack.acl.dao.RolePermissionsDaoImpl" />
<bean id="accountDaoImpl" class="com.cloud.user.dao.AccountDaoImpl" />

View File

@ -30,6 +30,7 @@ import com.cloud.upgrade.dao.DbUpgrade;
import com.cloud.upgrade.dao.Upgrade41000to41100;
import com.cloud.upgrade.dao.Upgrade41100to41110;
import com.cloud.upgrade.dao.Upgrade41110to41120;
import com.cloud.upgrade.dao.Upgrade41120to41130;
import com.cloud.upgrade.dao.Upgrade41120to41200;
import com.cloud.upgrade.dao.Upgrade452to453;
import com.cloud.upgrade.dao.Upgrade453to460;
@ -98,10 +99,11 @@ public class DatabaseUpgradeCheckerTest {
assertTrue(upgrades[0] instanceof Upgrade41000to41100);
assertTrue(upgrades[1] instanceof Upgrade41100to41110);
assertTrue(upgrades[2] instanceof Upgrade41110to41120);
assertTrue(upgrades[3] instanceof Upgrade41120to41200);
assertTrue(upgrades[3] instanceof Upgrade41120to41130);
assertTrue(upgrades[4] instanceof Upgrade41120to41200);
assertTrue(Arrays.equals(new String[] {"4.11.0.0", "4.11.1.0"}, upgrades[1].getUpgradableVersionRange()));
assertEquals(currentVersion.toString(), upgrades[3].getUpgradedVersion());
assertEquals(currentVersion.toString(), upgrades[4].getUpgradedVersion());
}

View File

@ -24,6 +24,8 @@ import java.util.Set;
import javax.inject.Inject;
import com.cloud.storage.ScopeType;
import com.cloud.storage.Storage;
import org.apache.cloudstack.engine.subsystem.api.storage.DataStore;
import org.apache.cloudstack.engine.subsystem.api.storage.StrategyPriority;
import org.apache.cloudstack.engine.subsystem.api.storage.TemplateDataFactory;
@ -54,6 +56,7 @@ import com.cloud.storage.VolumeVO;
import com.cloud.storage.dao.VMTemplatePoolDao;
import com.cloud.utils.exception.CloudRuntimeException;
import com.cloud.vm.VirtualMachineManager;
import org.apache.commons.collections.MapUtils;
/**
* Extends {@link StorageSystemDataMotionStrategy}, allowing KVM hosts to migrate VMs with the ROOT volume on a non managed local storage pool.
@ -77,14 +80,16 @@ public class KvmNonManagedStorageDataMotionStrategy extends StorageSystemDataMot
* Note that the super implementation (override) is called by {@link #canHandle(Map, Host, Host)} which ensures that {@link #internalCanHandle(Map)} will be executed only if the source host is KVM.
*/
@Override
protected StrategyPriority internalCanHandle(Map<VolumeInfo, DataStore> volumeMap) {
if (super.internalCanHandle(volumeMap) == StrategyPriority.CANT_HANDLE) {
Set<VolumeInfo> volumeInfoSet = volumeMap.keySet();
protected StrategyPriority internalCanHandle(Map<VolumeInfo, DataStore> volumeMap, Host srcHost, Host destHost) {
if (super.internalCanHandle(volumeMap, srcHost, destHost) == StrategyPriority.CANT_HANDLE) {
if (canHandleKVMNonManagedLiveNFSStorageMigration(volumeMap, srcHost, destHost) == StrategyPriority.CANT_HANDLE) {
Set<VolumeInfo> volumeInfoSet = volumeMap.keySet();
for (VolumeInfo volumeInfo : volumeInfoSet) {
StoragePoolVO storagePoolVO = _storagePoolDao.findById(volumeInfo.getPoolId());
if (storagePoolVO.getPoolType() != StoragePoolType.Filesystem && storagePoolVO.getPoolType() != StoragePoolType.NetworkFilesystem) {
return StrategyPriority.CANT_HANDLE;
for (VolumeInfo volumeInfo : volumeInfoSet) {
StoragePoolVO storagePoolVO = _storagePoolDao.findById(volumeInfo.getPoolId());
if (storagePoolVO.getPoolType() != StoragePoolType.Filesystem && storagePoolVO.getPoolType() != StoragePoolType.NetworkFilesystem) {
return StrategyPriority.CANT_HANDLE;
}
}
}
return StrategyPriority.HYPERVISOR;
@ -92,6 +97,52 @@ public class KvmNonManagedStorageDataMotionStrategy extends StorageSystemDataMot
return StrategyPriority.CANT_HANDLE;
}
/**
* Allow KVM live storage migration for non managed storage when:
* - Source host and destination host are different, and are on the same cluster
* - Source and destination storage are NFS
* - Destination storage is cluster-wide
*/
protected StrategyPriority canHandleKVMNonManagedLiveNFSStorageMigration(Map<VolumeInfo, DataStore> volumeMap,
Host srcHost, Host destHost) {
if (srcHost.getId() != destHost.getId() &&
srcHost.getClusterId().equals(destHost.getClusterId()) &&
isSourceNfsPrimaryStorage(volumeMap) &&
isDestinationNfsPrimaryStorageClusterWide(volumeMap)) {
return StrategyPriority.HYPERVISOR;
}
return StrategyPriority.CANT_HANDLE;
}
/**
* True if volumes source storage are NFS
*/
protected boolean isSourceNfsPrimaryStorage(Map<VolumeInfo, DataStore> volumeMap) {
if (MapUtils.isNotEmpty(volumeMap)) {
for (VolumeInfo volumeInfo : volumeMap.keySet()) {
StoragePoolVO storagePoolVO = _storagePoolDao.findById(volumeInfo.getPoolId());
return storagePoolVO != null &&
storagePoolVO.getPoolType() == Storage.StoragePoolType.NetworkFilesystem;
}
}
return false;
}
/**
* True if destination storage is cluster-wide NFS
*/
protected boolean isDestinationNfsPrimaryStorageClusterWide(Map<VolumeInfo, DataStore> volumeMap) {
if (MapUtils.isNotEmpty(volumeMap)) {
for (DataStore dataStore : volumeMap.values()) {
StoragePoolVO storagePoolVO = _storagePoolDao.findById(dataStore.getId());
return storagePoolVO != null &&
storagePoolVO.getPoolType() == Storage.StoragePoolType.NetworkFilesystem &&
storagePoolVO.getScope() == ScopeType.CLUSTER;
}
}
return false;
}
/**
* Configures a {@link MigrateDiskInfo} object configured for migrating a File System volume and calls rootImageProvisioning.
*/
@ -135,7 +186,7 @@ public class KvmNonManagedStorageDataMotionStrategy extends StorageSystemDataMot
*/
@Override
protected boolean shouldMigrateVolume(StoragePoolVO sourceStoragePool, Host destHost, StoragePoolVO destStoragePool) {
return sourceStoragePool.getPoolType() == StoragePoolType.Filesystem;
return sourceStoragePool.getPoolType() == StoragePoolType.Filesystem || sourceStoragePool.getPoolType() == StoragePoolType.NetworkFilesystem;
}
/**

View File

@ -18,62 +18,18 @@
*/
package org.apache.cloudstack.storage.motion;
import com.cloud.agent.AgentManager;
import com.cloud.agent.api.Answer;
import com.cloud.agent.api.storage.CopyVolumeAnswer;
import com.cloud.agent.api.storage.CopyVolumeCommand;
import com.cloud.agent.api.MigrateAnswer;
import com.cloud.agent.api.MigrateCommand;
import com.cloud.agent.api.MigrateCommand.MigrateDiskInfo;
import com.cloud.agent.api.ModifyTargetsAnswer;
import com.cloud.agent.api.ModifyTargetsCommand;
import com.cloud.agent.api.PrepareForMigrationCommand;
import com.cloud.agent.api.storage.MigrateVolumeAnswer;
import com.cloud.agent.api.storage.MigrateVolumeCommand;
import com.cloud.agent.api.to.DataStoreTO;
import com.cloud.agent.api.to.DataTO;
import com.cloud.agent.api.to.DiskTO;
import com.cloud.agent.api.to.NfsTO;
import com.cloud.agent.api.to.VirtualMachineTO;
import com.cloud.configuration.Config;
import com.cloud.dc.dao.ClusterDao;
import com.cloud.exception.AgentUnavailableException;
import com.cloud.exception.OperationTimedoutException;
import com.cloud.host.Host;
import com.cloud.host.HostVO;
import com.cloud.host.dao.HostDao;
import com.cloud.hypervisor.Hypervisor.HypervisorType;
import com.cloud.resource.ResourceState;
import com.cloud.storage.DataStoreRole;
import com.cloud.storage.DiskOfferingVO;
import com.cloud.storage.Snapshot;
import com.cloud.storage.SnapshotVO;
import com.cloud.storage.Storage.ImageFormat;
import com.cloud.storage.Storage.StoragePoolType;
import com.cloud.storage.StorageManager;
import com.cloud.storage.StoragePool;
import com.cloud.storage.VMTemplateVO;
import com.cloud.storage.VolumeDetailVO;
import com.cloud.storage.Volume;
import com.cloud.storage.VolumeVO;
import com.cloud.storage.dao.DiskOfferingDao;
import com.cloud.storage.dao.GuestOSCategoryDao;
import com.cloud.storage.dao.GuestOSDao;
import com.cloud.storage.dao.SnapshotDao;
import com.cloud.storage.dao.SnapshotDetailsDao;
import com.cloud.storage.dao.SnapshotDetailsVO;
import com.cloud.storage.dao.VMTemplateDao;
import com.cloud.storage.dao.VolumeDao;
import com.cloud.storage.dao.VolumeDetailsDao;
import com.cloud.utils.NumbersUtil;
import com.cloud.utils.db.GlobalLock;
import com.cloud.utils.exception.CloudRuntimeException;
import com.cloud.vm.VirtualMachine;
import com.cloud.vm.VirtualMachineManager;
import com.cloud.vm.VMInstanceVO;
import com.cloud.vm.dao.VMInstanceDao;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Random;
import java.util.Set;
import java.util.UUID;
import java.util.concurrent.TimeUnit;
import com.google.common.base.Preconditions;
import javax.inject.Inject;
import org.apache.cloudstack.engine.subsystem.api.storage.ChapInfo;
import org.apache.cloudstack.engine.subsystem.api.storage.ClusterScope;
@ -86,6 +42,7 @@ import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager;
import org.apache.cloudstack.engine.subsystem.api.storage.EndPoint;
import org.apache.cloudstack.engine.subsystem.api.storage.EndPointSelector;
import org.apache.cloudstack.engine.subsystem.api.storage.HostScope;
import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreStateMachine;
import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreStateMachine.Event;
import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreDriver;
import org.apache.cloudstack.engine.subsystem.api.storage.Scope;
@ -111,25 +68,73 @@ import org.apache.cloudstack.storage.datastore.db.SnapshotDataStoreDao;
import org.apache.cloudstack.storage.datastore.db.StoragePoolVO;
import org.apache.cloudstack.storage.to.PrimaryDataStoreTO;
import org.apache.cloudstack.storage.to.VolumeObjectTO;
import org.apache.commons.collections.MapUtils;
import org.apache.commons.lang.StringUtils;
import org.apache.log4j.Logger;
import org.springframework.stereotype.Component;
import com.cloud.agent.AgentManager;
import com.cloud.agent.api.Answer;
import com.cloud.agent.api.MigrateAnswer;
import com.cloud.agent.api.MigrateCommand;
import com.cloud.agent.api.MigrateCommand.MigrateDiskInfo;
import com.cloud.agent.api.ModifyTargetsAnswer;
import com.cloud.agent.api.ModifyTargetsCommand;
import com.cloud.agent.api.PrepareForMigrationCommand;
import com.cloud.agent.api.storage.CheckStorageAvailabilityCommand;
import com.cloud.agent.api.storage.CopyVolumeAnswer;
import com.cloud.agent.api.storage.CopyVolumeCommand;
import com.cloud.agent.api.storage.MigrateVolumeAnswer;
import com.cloud.agent.api.storage.MigrateVolumeCommand;
import com.cloud.agent.api.to.DataStoreTO;
import com.cloud.agent.api.to.DataTO;
import com.cloud.agent.api.to.DiskTO;
import com.cloud.agent.api.to.NfsTO;
import com.cloud.agent.api.to.VirtualMachineTO;
import com.cloud.configuration.Config;
import com.cloud.dc.dao.ClusterDao;
import com.cloud.exception.AgentUnavailableException;
import com.cloud.exception.OperationTimedoutException;
import com.cloud.host.Host;
import com.cloud.host.HostVO;
import com.cloud.host.dao.HostDao;
import com.cloud.hypervisor.Hypervisor.HypervisorType;
import com.cloud.resource.ResourceState;
import com.cloud.storage.DataStoreRole;
import com.cloud.storage.DiskOfferingVO;
import com.cloud.storage.MigrationOptions;
import com.cloud.storage.ScopeType;
import com.cloud.storage.Snapshot;
import com.cloud.storage.SnapshotVO;
import com.cloud.storage.Storage;
import com.cloud.storage.Storage.ImageFormat;
import com.cloud.storage.Storage.StoragePoolType;
import com.cloud.storage.StorageManager;
import com.cloud.storage.StoragePool;
import com.cloud.storage.VMTemplateStoragePoolVO;
import com.cloud.storage.VMTemplateStorageResourceAssoc;
import com.cloud.storage.VMTemplateVO;
import com.cloud.storage.Volume;
import com.cloud.storage.VolumeDetailVO;
import com.cloud.storage.VolumeVO;
import com.cloud.storage.dao.DiskOfferingDao;
import com.cloud.storage.dao.GuestOSCategoryDao;
import com.cloud.storage.dao.GuestOSDao;
import com.cloud.storage.dao.SnapshotDao;
import com.cloud.storage.dao.SnapshotDetailsDao;
import com.cloud.storage.dao.SnapshotDetailsVO;
import com.cloud.storage.dao.VMTemplateDao;
import com.cloud.storage.dao.VMTemplatePoolDao;
import com.cloud.storage.dao.VolumeDao;
import com.cloud.storage.dao.VolumeDetailsDao;
import com.cloud.utils.NumbersUtil;
import com.cloud.utils.db.GlobalLock;
import com.cloud.utils.exception.CloudRuntimeException;
import com.cloud.vm.VMInstanceVO;
import com.cloud.vm.VirtualMachine;
import com.cloud.vm.VirtualMachineManager;
import com.cloud.vm.dao.VMInstanceDao;
import com.google.common.base.Preconditions;
import javax.inject.Inject;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Random;
import java.util.Set;
import java.util.UUID;
import java.util.concurrent.TimeUnit;
@Component
public class StorageSystemDataMotionStrategy implements DataMotionStrategy {
private static final Logger LOGGER = Logger.getLogger(StorageSystemDataMotionStrategy.class);
private static final Random RANDOM = new Random(System.nanoTime());
@ -176,6 +181,8 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy {
private StorageCacheManager cacheMgr;
@Inject
private EndPointSelector selector;
@Inject
VMTemplatePoolDao templatePoolDao;
@Override
public StrategyPriority canHandle(DataObject srcData, DataObject destData) {
@ -272,7 +279,7 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy {
@Override
public final StrategyPriority canHandle(Map<VolumeInfo, DataStore> volumeMap, Host srcHost, Host destHost) {
if (HypervisorType.KVM.equals(srcHost.getHypervisorType())) {
return internalCanHandle(volumeMap);
return internalCanHandle(volumeMap, srcHost, destHost);
}
return StrategyPriority.CANT_HANDLE;
}
@ -280,7 +287,7 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy {
/**
* Handles migrating volumes on managed Storage.
*/
protected StrategyPriority internalCanHandle(Map<VolumeInfo, DataStore> volumeMap) {
protected StrategyPriority internalCanHandle(Map<VolumeInfo, DataStore> volumeMap, Host srcHost, Host destHost) {
Set<VolumeInfo> volumeInfoSet = volumeMap.keySet();
for (VolumeInfo volumeInfo : volumeInfoSet) {
@ -299,6 +306,7 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy {
if (storagePoolVO.isManaged()) {
return StrategyPriority.HIGHEST;
}
}
return StrategyPriority.CANT_HANDLE;
}
@ -1698,6 +1706,50 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy {
return _snapshotDetailsDao.persist(snapshotDetails);
}
/**
* Return expected MigrationOptions for a linked clone volume live storage migration
*/
protected MigrationOptions createLinkedCloneMigrationOptions(VolumeInfo srcVolumeInfo, VolumeInfo destVolumeInfo, String srcVolumeBackingFile, String srcPoolUuid, Storage.StoragePoolType srcPoolType) {
VMTemplateStoragePoolVO ref = templatePoolDao.findByPoolTemplate(destVolumeInfo.getPoolId(), srcVolumeInfo.getTemplateId());
boolean updateBackingFileReference = ref == null;
String backingFile = ref != null ? ref.getInstallPath() : srcVolumeBackingFile;
return new MigrationOptions(srcPoolUuid, srcPoolType, backingFile, updateBackingFileReference);
}
/**
* Return expected MigrationOptions for a full clone volume live storage migration
*/
protected MigrationOptions createFullCloneMigrationOptions(VolumeInfo srcVolumeInfo, VirtualMachineTO vmTO, Host srcHost, String srcPoolUuid, Storage.StoragePoolType srcPoolType) {
return new MigrationOptions(srcPoolUuid, srcPoolType, srcVolumeInfo.getPath());
}
/**
* Prepare hosts for KVM live storage migration depending on volume type by setting MigrationOptions on destination volume:
* - Linked clones (backing file on disk): Decide if template (backing file) should be copied to destination storage prior disk creation
* - Full clones (no backing file): Take snapshot of the VM prior disk creation
* Return this information
*/
protected void setVolumeMigrationOptions(VolumeInfo srcVolumeInfo, VolumeInfo destVolumeInfo,
VirtualMachineTO vmTO, Host srcHost, StoragePoolVO destStoragePool) {
if (!destStoragePool.isManaged()) {
String srcVolumeBackingFile = getVolumeBackingFile(srcVolumeInfo);
String srcPoolUuid = srcVolumeInfo.getDataStore().getUuid();
StoragePoolVO srcPool = _storagePoolDao.findById(srcVolumeInfo.getPoolId());
Storage.StoragePoolType srcPoolType = srcPool.getPoolType();
MigrationOptions migrationOptions;
if (StringUtils.isNotBlank(srcVolumeBackingFile)) {
migrationOptions = createLinkedCloneMigrationOptions(srcVolumeInfo, destVolumeInfo,
srcVolumeBackingFile, srcPoolUuid, srcPoolType);
} else {
migrationOptions = createFullCloneMigrationOptions(srcVolumeInfo, vmTO, srcHost, srcPoolUuid, srcPoolType);
}
migrationOptions.setTimeout(StorageManager.KvmStorageOnlineMigrationWait.value());
destVolumeInfo.setMigrationOptions(migrationOptions);
}
}
/**
* For each disk to migrate:
* <ul>
@ -1716,7 +1768,7 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy {
throw new CloudRuntimeException("Invalid hypervisor type (only KVM supported for this operation at the time being)");
}
verifyLiveMigrationMapForKVM(volumeDataStoreMap);
verifyLiveMigrationForKVM(volumeDataStoreMap, destHost);
VMInstanceVO vmInstance = _vmDao.findById(vmTO.getId());
vmTO.setState(vmInstance.getState());
@ -1725,6 +1777,7 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy {
Map<String, MigrateCommand.MigrateDiskInfo> migrateStorage = new HashMap<>();
Map<VolumeInfo, VolumeInfo> srcVolumeInfoToDestVolumeInfo = new HashMap<>();
boolean managedStorageDestination = false;
for (Map.Entry<VolumeInfo, DataStore> entry : volumeDataStoreMap.entrySet()) {
VolumeInfo srcVolumeInfo = entry.getKey();
DataStore destDataStore = entry.getValue();
@ -1749,15 +1802,23 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy {
// move the volume from Ready to Migrating
destVolumeInfo.processEvent(Event.MigrationRequested);
setVolumeMigrationOptions(srcVolumeInfo, destVolumeInfo, vmTO, srcHost, destStoragePool);
// create a volume on the destination storage
destDataStore.getDriver().createAsync(destDataStore, destVolumeInfo, null);
managedStorageDestination = destStoragePool.isManaged();
String volumeIdentifier = managedStorageDestination ? destVolumeInfo.get_iScsiName() : destVolumeInfo.getUuid();
destVolume = _volumeDao.findById(destVolume.getId());
destVolume.setPath(volumeIdentifier);
setVolumePath(destVolume);
_volumeDao.update(destVolume.getId(), destVolume);
postVolumeCreationActions(srcVolumeInfo, destVolumeInfo, vmTO, srcHost);
destVolumeInfo = _volumeDataFactory.getVolume(destVolume.getId(), destDataStore);
handleQualityOfServiceForVolumeMigration(destVolumeInfo, PrimaryDataStoreDriver.QualityOfServiceState.MIGRATION);
@ -1766,9 +1827,18 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy {
String destPath = generateDestPath(destHost, destStoragePool, destVolumeInfo);
MigrateCommand.MigrateDiskInfo migrateDiskInfo = configureMigrateDiskInfo(srcVolumeInfo, destPath);
migrateDiskInfo.setSourceDiskOnStorageFileSystem(isStoragePoolTypeOfFile(sourceStoragePool));
migrateDiskInfoList.add(migrateDiskInfo);
MigrateCommand.MigrateDiskInfo migrateDiskInfo;
if (managedStorageDestination) {
migrateDiskInfo = configureMigrateDiskInfo(srcVolumeInfo, destPath);
migrateDiskInfo.setSourceDiskOnStorageFileSystem(isStoragePoolTypeOfFile(sourceStoragePool));
migrateDiskInfoList.add(migrateDiskInfo);
} else {
migrateDiskInfo = new MigrateCommand.MigrateDiskInfo(srcVolumeInfo.getPath(),
MigrateCommand.MigrateDiskInfo.DiskType.FILE,
MigrateCommand.MigrateDiskInfo.DriverType.QCOW2,
MigrateCommand.MigrateDiskInfo.Source.FILE,
connectHostToVolume(destHost, destVolumeInfo.getPoolId(), volumeIdentifier));
}
migrateStorage.put(srcVolumeInfo.getPath(), migrateDiskInfo);
@ -1795,15 +1865,12 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy {
boolean isWindows = _guestOsCategoryDao.findById(_guestOsDao.findById(vm.getGuestOSId()).getCategoryId()).getName().equalsIgnoreCase("Windows");
MigrateCommand migrateCommand = new MigrateCommand(vmTO.getName(), destHost.getPrivateIpAddress(), isWindows, vmTO, true);
migrateCommand.setWait(StorageManager.KvmStorageOnlineMigrationWait.value());
migrateCommand.setMigrateStorage(migrateStorage);
migrateCommand.setMigrateDiskInfoList(migrateDiskInfoList);
migrateCommand.setMigrateStorageManaged(managedStorageDestination);
String autoConvergence = _configDao.getValue(Config.KvmAutoConvergence.toString());
boolean kvmAutoConvergence = Boolean.parseBoolean(autoConvergence);
boolean kvmAutoConvergence = StorageManager.KvmAutoConvergence.value();
migrateCommand.setAutoConvergence(kvmAutoConvergence);
MigrateAnswer migrateAnswer = (MigrateAnswer)agentManager.send(srcHost.getId(), migrateCommand);
@ -1863,7 +1930,9 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy {
* Configures a {@link MigrateDiskInfo} object with disk type of BLOCK, Driver type RAW and Source DEV
*/
protected MigrateCommand.MigrateDiskInfo configureMigrateDiskInfo(VolumeInfo srcVolumeInfo, String destPath) {
return new MigrateCommand.MigrateDiskInfo(srcVolumeInfo.getPath(), MigrateCommand.MigrateDiskInfo.DiskType.BLOCK, MigrateCommand.MigrateDiskInfo.DriverType.RAW,
return new MigrateCommand.MigrateDiskInfo(srcVolumeInfo.getPath(),
MigrateCommand.MigrateDiskInfo.DiskType.BLOCK,
MigrateCommand.MigrateDiskInfo.DriverType.RAW,
MigrateCommand.MigrateDiskInfo.Source.DEV, destPath);
}
@ -1883,6 +1952,21 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy {
// This method is used by classes that extend this one
}
/*
* Return backing file for volume (if any), only for KVM volumes
*/
private String getVolumeBackingFile(VolumeInfo srcVolumeInfo) {
if (srcVolumeInfo.getHypervisorType() == HypervisorType.KVM &&
srcVolumeInfo.getTemplateId() != null && srcVolumeInfo.getPoolId() != null) {
VMTemplateVO template = _vmTemplateDao.findById(srcVolumeInfo.getTemplateId());
if (template.getFormat() != null && template.getFormat() != Storage.ImageFormat.ISO) {
VMTemplateStoragePoolVO ref = templatePoolDao.findByPoolTemplate(srcVolumeInfo.getPoolId(), srcVolumeInfo.getTemplateId());
return ref != null ? ref.getInstallPath() : null;
}
}
return null;
}
private void handlePostMigration(boolean success, Map<VolumeInfo, VolumeInfo> srcVolumeInfoToDestVolumeInfo, VirtualMachineTO vmTO, Host destHost) {
if (!success) {
try {
@ -2046,10 +2130,40 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy {
return modifyTargetsAnswer.getConnectedPaths();
}
/**
* Update reference on template_spool_ref table of copied template to destination storage
*/
protected void updateCopiedTemplateReference(VolumeInfo srcVolumeInfo, VolumeInfo destVolumeInfo) {
VMTemplateStoragePoolVO ref = templatePoolDao.findByPoolTemplate(srcVolumeInfo.getPoolId(), srcVolumeInfo.getTemplateId());
VMTemplateStoragePoolVO newRef = new VMTemplateStoragePoolVO(destVolumeInfo.getPoolId(), ref.getTemplateId());
newRef.setDownloadPercent(100);
newRef.setDownloadState(VMTemplateStorageResourceAssoc.Status.DOWNLOADED);
newRef.setState(ObjectInDataStoreStateMachine.State.Ready);
newRef.setTemplateSize(ref.getTemplateSize());
newRef.setLocalDownloadPath(ref.getLocalDownloadPath());
newRef.setInstallPath(ref.getInstallPath());
templatePoolDao.persist(newRef);
}
/**
* Handle post destination volume creation actions depending on the migrating volume type: full clone or linked clone
*/
protected void postVolumeCreationActions(VolumeInfo srcVolumeInfo, VolumeInfo destVolumeInfo, VirtualMachineTO vmTO, Host srcHost) {
MigrationOptions migrationOptions = destVolumeInfo.getMigrationOptions();
if (migrationOptions != null) {
if (migrationOptions.getType() == MigrationOptions.Type.LinkedClone && migrationOptions.isCopySrcTemplate()) {
updateCopiedTemplateReference(srcVolumeInfo, destVolumeInfo);
}
}
}
/*
* At a high level: The source storage cannot be managed and the destination storage must be managed.
* At a high level: The source storage cannot be managed and
* the destination storages can be all managed or all not managed, not mixed.
*/
private void verifyLiveMigrationMapForKVM(Map<VolumeInfo, DataStore> volumeDataStoreMap) {
protected void verifyLiveMigrationForKVM(Map<VolumeInfo, DataStore> volumeDataStoreMap, Host destHost) {
Boolean storageTypeConsistency = null;
Map<String, Storage.StoragePoolType> sourcePools = new HashMap<>();
for (Map.Entry<VolumeInfo, DataStore> entry : volumeDataStoreMap.entrySet()) {
VolumeInfo volumeInfo = entry.getKey();
@ -2070,6 +2184,47 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy {
if (destStoragePoolVO == null) {
throw new CloudRuntimeException("Destination storage pool with ID " + dataStore.getId() + " was not located.");
}
if (storageTypeConsistency == null) {
storageTypeConsistency = destStoragePoolVO.isManaged();
} else if (storageTypeConsistency != destStoragePoolVO.isManaged()) {
throw new CloudRuntimeException("Destination storage pools must be either all managed or all not managed");
}
if (!destStoragePoolVO.isManaged()) {
if (destStoragePoolVO.getPoolType() == StoragePoolType.NetworkFilesystem &&
destStoragePoolVO.getScope() != ScopeType.CLUSTER) {
throw new CloudRuntimeException("KVM live storage migrations currently support cluster-wide " +
"not managed NFS destination storage");
}
if (!sourcePools.containsKey(srcStoragePoolVO.getUuid())) {
sourcePools.put(srcStoragePoolVO.getUuid(), srcStoragePoolVO.getPoolType());
}
}
}
verifyDestinationStorage(sourcePools, destHost);
}
/**
* Perform storage validation on destination host for KVM live storage migrations.
* Validate that volume source storage pools are mounted on the destination host prior the migration
* @throws CloudRuntimeException if any source storage pool is not mounted on the destination host
*/
private void verifyDestinationStorage(Map<String, Storage.StoragePoolType> sourcePools, Host destHost) {
if (MapUtils.isNotEmpty(sourcePools)) {
LOGGER.debug("Verifying source pools are already available on destination host " + destHost.getUuid());
CheckStorageAvailabilityCommand cmd = new CheckStorageAvailabilityCommand(sourcePools);
try {
Answer answer = agentManager.send(destHost.getId(), cmd);
if (answer == null || !answer.getResult()) {
throw new CloudRuntimeException("Storage verification failed on host "
+ destHost.getUuid() +": " + answer.getDetails());
}
} catch (AgentUnavailableException | OperationTimedoutException e) {
e.printStackTrace();
throw new CloudRuntimeException("Cannot perform storage verification on host " + destHost.getUuid() +
"due to: " + e.getMessage());
}
}
}

View File

@ -21,6 +21,10 @@ package org.apache.cloudstack.storage.motion;
import java.util.HashMap;
import java.util.Map;
import com.cloud.host.Host;
import com.cloud.hypervisor.Hypervisor;
import com.cloud.storage.ScopeType;
import com.cloud.storage.Storage;
import org.apache.cloudstack.engine.subsystem.api.storage.DataStore;
import org.apache.cloudstack.engine.subsystem.api.storage.StrategyPriority;
import org.apache.cloudstack.engine.subsystem.api.storage.TemplateDataFactory;
@ -38,6 +42,7 @@ import org.apache.cloudstack.storage.image.store.ImageStoreImpl;
import org.apache.cloudstack.storage.to.TemplateObjectTO;
import org.apache.cloudstack.storage.volume.VolumeObject;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.mockito.InOrder;
@ -53,7 +58,6 @@ import com.cloud.agent.api.MigrateCommand;
import com.cloud.exception.AgentUnavailableException;
import com.cloud.exception.CloudException;
import com.cloud.exception.OperationTimedoutException;
import com.cloud.host.Host;
import com.cloud.host.HostVO;
import com.cloud.hypervisor.Hypervisor.HypervisorType;
import com.cloud.storage.DataStoreRole;
@ -66,6 +70,9 @@ import com.cloud.storage.dao.VMTemplatePoolDao;
import com.cloud.utils.exception.CloudRuntimeException;
import com.cloud.vm.VirtualMachineManager;
import static org.junit.Assert.assertEquals;
import static org.mockito.Mockito.when;
@RunWith(MockitoJUnitRunner.class)
public class KvmNonManagedStorageSystemDataMotionTest {
@ -90,6 +97,36 @@ public class KvmNonManagedStorageSystemDataMotionTest {
@InjectMocks
private KvmNonManagedStorageDataMotionStrategy kvmNonManagedStorageDataMotionStrategy;
@Mock
VolumeInfo volumeInfo1;
@Mock
VolumeInfo volumeInfo2;
@Mock
DataStore dataStore1;
@Mock
DataStore dataStore2;
@Mock
DataStore dataStore3;
@Mock
StoragePoolVO pool1;
@Mock
StoragePoolVO pool2;
@Mock
StoragePoolVO pool3;
@Mock
Host host1;
@Mock
Host host2;
Map<VolumeInfo, DataStore> migrationMap;
private static final Long POOL_1_ID = 1L;
private static final Long POOL_2_ID = 2L;
private static final Long POOL_3_ID = 3L;
private static final Long HOST_1_ID = 1L;
private static final Long HOST_2_ID = 2L;
private static final Long CLUSTER_ID = 1L;
@Test
public void canHandleTestExpectHypervisorStrategyForKvm() {
canHandleExpectCannotHandle(HypervisorType.KVM, 1, StrategyPriority.HYPERVISOR);
@ -109,12 +146,13 @@ public class KvmNonManagedStorageSystemDataMotionTest {
private void canHandleExpectCannotHandle(HypervisorType hypervisorType, int times, StrategyPriority expectedStrategyPriority) {
HostVO srcHost = new HostVO("sourceHostUuid");
HostVO destHost = new HostVO("destHostUuid");
srcHost.setHypervisorType(hypervisorType);
Mockito.doReturn(StrategyPriority.HYPERVISOR).when(kvmNonManagedStorageDataMotionStrategy).internalCanHandle(new HashMap<>());
Mockito.doReturn(StrategyPriority.HYPERVISOR).when(kvmNonManagedStorageDataMotionStrategy).internalCanHandle(new HashMap<>(), srcHost, destHost);
StrategyPriority strategyPriority = kvmNonManagedStorageDataMotionStrategy.canHandle(new HashMap<>(), srcHost, new HostVO("destHostUuid"));
StrategyPriority strategyPriority = kvmNonManagedStorageDataMotionStrategy.canHandle(new HashMap<>(), srcHost, destHost);
Mockito.verify(kvmNonManagedStorageDataMotionStrategy, Mockito.times(times)).internalCanHandle(new HashMap<>());
Mockito.verify(kvmNonManagedStorageDataMotionStrategy, Mockito.times(times)).internalCanHandle(new HashMap<>(), srcHost, destHost);
Assert.assertEquals(expectedStrategyPriority, strategyPriority);
}
@ -123,7 +161,7 @@ public class KvmNonManagedStorageSystemDataMotionTest {
StoragePoolType[] storagePoolTypeArray = StoragePoolType.values();
for (int i = 0; i < storagePoolTypeArray.length; i++) {
Map<VolumeInfo, DataStore> volumeMap = configureTestInternalCanHandle(false, storagePoolTypeArray[i]);
StrategyPriority strategyPriority = kvmNonManagedStorageDataMotionStrategy.internalCanHandle(volumeMap);
StrategyPriority strategyPriority = kvmNonManagedStorageDataMotionStrategy.internalCanHandle(volumeMap, new HostVO("sourceHostUuid"), new HostVO("destHostUuid"));
if (storagePoolTypeArray[i] == StoragePoolType.Filesystem || storagePoolTypeArray[i] == StoragePoolType.NetworkFilesystem) {
Assert.assertEquals(StrategyPriority.HYPERVISOR, strategyPriority);
} else {
@ -137,7 +175,7 @@ public class KvmNonManagedStorageSystemDataMotionTest {
StoragePoolType[] storagePoolTypeArray = StoragePoolType.values();
for (int i = 0; i < storagePoolTypeArray.length; i++) {
Map<VolumeInfo, DataStore> volumeMap = configureTestInternalCanHandle(true, storagePoolTypeArray[i]);
StrategyPriority strategyPriority = kvmNonManagedStorageDataMotionStrategy.internalCanHandle(volumeMap);
StrategyPriority strategyPriority = kvmNonManagedStorageDataMotionStrategy.internalCanHandle(volumeMap, null, null);
Assert.assertEquals(StrategyPriority.CANT_HANDLE, strategyPriority);
}
}
@ -202,7 +240,7 @@ public class KvmNonManagedStorageSystemDataMotionTest {
for (int i = 0; i < storagePoolTypes.length; i++) {
Mockito.doReturn(storagePoolTypes[i]).when(sourceStoragePool).getPoolType();
boolean result = kvmNonManagedStorageDataMotionStrategy.shouldMigrateVolume(sourceStoragePool, destHost, destStoragePool);
if (storagePoolTypes[i] == StoragePoolType.Filesystem) {
if (storagePoolTypes[i] == StoragePoolType.Filesystem || storagePoolTypes[i] == StoragePoolType.NetworkFilesystem) {
Assert.assertTrue(result);
} else {
Assert.assertFalse(result);
@ -330,4 +368,102 @@ public class KvmNonManagedStorageSystemDataMotionTest {
verifyInOrder.verify(kvmNonManagedStorageDataMotionStrategy, Mockito.times(times)).sendCopyCommand(Mockito.eq(destHost), Mockito.any(TemplateObjectTO.class),
Mockito.any(TemplateObjectTO.class), Mockito.eq(destDataStore));
}
@Before
public void setUp() {
migrationMap = new HashMap<>();
migrationMap.put(volumeInfo1, dataStore2);
migrationMap.put(volumeInfo2, dataStore2);
when(volumeInfo1.getPoolId()).thenReturn(POOL_1_ID);
when(primaryDataStoreDao.findById(POOL_1_ID)).thenReturn(pool1);
when(pool1.isManaged()).thenReturn(false);
when(dataStore2.getId()).thenReturn(POOL_2_ID);
when(primaryDataStoreDao.findById(POOL_2_ID)).thenReturn(pool2);
when(pool2.isManaged()).thenReturn(true);
when(volumeInfo1.getDataStore()).thenReturn(dataStore1);
when(volumeInfo2.getPoolId()).thenReturn(POOL_1_ID);
when(volumeInfo2.getDataStore()).thenReturn(dataStore1);
when(dataStore1.getId()).thenReturn(POOL_1_ID);
when(pool1.getPoolType()).thenReturn(Storage.StoragePoolType.NetworkFilesystem);
when(pool2.getPoolType()).thenReturn(Storage.StoragePoolType.NetworkFilesystem);
when(pool2.getScope()).thenReturn(ScopeType.CLUSTER);
when(dataStore3.getId()).thenReturn(POOL_3_ID);
when(primaryDataStoreDao.findById(POOL_3_ID)).thenReturn(pool3);
when(pool3.getPoolType()).thenReturn(Storage.StoragePoolType.NetworkFilesystem);
when(pool3.getScope()).thenReturn(ScopeType.CLUSTER);
when(host1.getId()).thenReturn(HOST_1_ID);
when(host1.getClusterId()).thenReturn(CLUSTER_ID);
when(host1.getHypervisorType()).thenReturn(Hypervisor.HypervisorType.KVM);
when(host2.getId()).thenReturn(HOST_2_ID);
when(host2.getClusterId()).thenReturn(CLUSTER_ID);
when(host2.getHypervisorType()).thenReturn(Hypervisor.HypervisorType.KVM);
}
@Test
public void canHandleKVMLiveStorageMigrationSameHost() {
StrategyPriority priority = kvmNonManagedStorageDataMotionStrategy.canHandleKVMNonManagedLiveNFSStorageMigration(migrationMap, host1, host1);
assertEquals(StrategyPriority.CANT_HANDLE, priority);
}
@Test
public void canHandleKVMLiveStorageMigrationInterCluster() {
when(host2.getClusterId()).thenReturn(5L);
StrategyPriority priority = kvmNonManagedStorageDataMotionStrategy.canHandleKVMNonManagedLiveNFSStorageMigration(migrationMap, host1, host2);
assertEquals(StrategyPriority.CANT_HANDLE, priority);
}
@Test
public void canHandleKVMLiveStorageMigration() {
StrategyPriority priority = kvmNonManagedStorageDataMotionStrategy.canHandleKVMNonManagedLiveNFSStorageMigration(migrationMap, host1, host2);
assertEquals(StrategyPriority.HYPERVISOR, priority);
}
@Test
public void canHandleKVMLiveStorageMigrationMultipleSources() {
when(volumeInfo1.getDataStore()).thenReturn(dataStore2);
StrategyPriority priority = kvmNonManagedStorageDataMotionStrategy.canHandleKVMNonManagedLiveNFSStorageMigration(migrationMap, host1, host2);
assertEquals(StrategyPriority.HYPERVISOR, priority);
}
@Test
public void canHandleKVMLiveStorageMigrationMultipleDestination() {
migrationMap.put(volumeInfo2, dataStore3);
StrategyPriority priority = kvmNonManagedStorageDataMotionStrategy.canHandleKVMNonManagedLiveNFSStorageMigration(migrationMap, host1, host2);
assertEquals(StrategyPriority.HYPERVISOR, priority);
}
@Test
public void testCanHandleLiveMigrationUnmanagedStorage() {
when(pool2.isManaged()).thenReturn(false);
StrategyPriority priority = kvmNonManagedStorageDataMotionStrategy.canHandleKVMNonManagedLiveNFSStorageMigration(migrationMap, host1, host2);
assertEquals(StrategyPriority.HYPERVISOR, priority);
}
@Test
public void testVerifyLiveMigrationMapForKVM() {
kvmNonManagedStorageDataMotionStrategy.verifyLiveMigrationForKVM(migrationMap, host2);
}
@Test(expected = CloudRuntimeException.class)
public void testVerifyLiveMigrationMapForKVMNotExistingSource() {
when(primaryDataStoreDao.findById(POOL_1_ID)).thenReturn(null);
kvmNonManagedStorageDataMotionStrategy.verifyLiveMigrationForKVM(migrationMap, host2);
}
@Test(expected = CloudRuntimeException.class)
public void testVerifyLiveMigrationMapForKVMNotExistingDest() {
when(primaryDataStoreDao.findById(POOL_2_ID)).thenReturn(null);
kvmNonManagedStorageDataMotionStrategy.verifyLiveMigrationForKVM(migrationMap, host2);
}
@Test(expected = CloudRuntimeException.class)
public void testVerifyLiveMigrationMapForKVMMixedManagedUnmagedStorage() {
when(pool1.isManaged()).thenReturn(true);
when(pool2.isManaged()).thenReturn(false);
kvmNonManagedStorageDataMotionStrategy.verifyLiveMigrationForKVM(migrationMap, host2);
}
}

View File

@ -51,8 +51,8 @@ import com.cloud.host.HostVO;
import com.cloud.storage.DataStoreRole;
import com.cloud.storage.ImageStore;
import com.cloud.storage.Storage;
import com.cloud.storage.Volume;
import com.cloud.storage.Storage.StoragePoolType;
import com.cloud.storage.Volume;
import com.cloud.storage.VolumeVO;
@RunWith(MockitoJUnitRunner.class)
@ -60,14 +60,14 @@ public class StorageSystemDataMotionStrategyTest {
@Spy
@InjectMocks
private StorageSystemDataMotionStrategy storageSystemDataMotionStrategy;
private StorageSystemDataMotionStrategy strategy;
@Mock
private VolumeObject volumeObjectSource;
@Mock
private DataObject dataObjectDestination;
@Mock
private PrimaryDataStore primaryDataStoreSourceStore;
private PrimaryDataStore sourceStore;
@Mock
private ImageStore destinationStore;
@Mock
@ -75,26 +75,26 @@ public class StorageSystemDataMotionStrategyTest {
@Before
public void setUp() throws Exception {
primaryDataStoreSourceStore = mock(PrimaryDataStoreImpl.class);
sourceStore = mock(PrimaryDataStoreImpl.class);
destinationStore = mock(ImageStoreImpl.class);
volumeObjectSource = mock(VolumeObject.class);
dataObjectDestination = mock(VolumeObject.class);
initMocks(storageSystemDataMotionStrategy);
initMocks(strategy);
}
@Test
public void cantHandleSecondary() {
doReturn(primaryDataStoreSourceStore).when(volumeObjectSource).getDataStore();
doReturn(DataStoreRole.Primary).when(primaryDataStoreSourceStore).getRole();
doReturn(sourceStore).when(volumeObjectSource).getDataStore();
doReturn(DataStoreRole.Primary).when(sourceStore).getRole();
doReturn(destinationStore).when(dataObjectDestination).getDataStore();
doReturn(DataStoreRole.Image).when((DataStore)destinationStore).getRole();
doReturn(primaryDataStoreSourceStore).when(volumeObjectSource).getDataStore();
doReturn(sourceStore).when(volumeObjectSource).getDataStore();
doReturn(destinationStore).when(dataObjectDestination).getDataStore();
StoragePoolVO storeVO = new StoragePoolVO();
doReturn(storeVO).when(primaryDataStoreDao).findById(0l);
assertTrue(storageSystemDataMotionStrategy.canHandle(volumeObjectSource, dataObjectDestination) == StrategyPriority.CANT_HANDLE);
assertTrue(strategy.canHandle(volumeObjectSource, dataObjectDestination) == StrategyPriority.CANT_HANDLE);
}
@Test
@ -135,7 +135,7 @@ public class StorageSystemDataMotionStrategyTest {
Mockito.doReturn(storagePool0).when(primaryDataStoreDao).findById(0l);
Mockito.doReturn(storagePool1).when(primaryDataStoreDao).findById(1l);
StrategyPriority strategyPriority = storageSystemDataMotionStrategy.internalCanHandle(volumeMap);
StrategyPriority strategyPriority = strategy.internalCanHandle(volumeMap, new HostVO("srcHostUuid"), new HostVO("destHostUuid"));
Assert.assertEquals(expectedStrategyPriority, strategyPriority);
}
@ -146,7 +146,7 @@ public class StorageSystemDataMotionStrategyTest {
StoragePoolType[] storagePoolTypeArray = StoragePoolType.values();
for (int i = 0; i < storagePoolTypeArray.length; i++) {
Mockito.doReturn(storagePoolTypeArray[i]).when(sourceStoragePool).getPoolType();
boolean result = storageSystemDataMotionStrategy.isStoragePoolTypeOfFile(sourceStoragePool);
boolean result = strategy.isStoragePoolTypeOfFile(sourceStoragePool);
if (sourceStoragePool.getPoolType() == StoragePoolType.Filesystem) {
Assert.assertTrue(result);
} else {
@ -161,19 +161,19 @@ public class StorageSystemDataMotionStrategyTest {
HostVO destHost = new HostVO("guid");
Mockito.doReturn("iScsiName").when(destVolumeInfo).get_iScsiName();
Mockito.doReturn(0l).when(destVolumeInfo).getPoolId();
Mockito.doReturn("expected").when(storageSystemDataMotionStrategy).connectHostToVolume(destHost, 0l, "iScsiName");
Mockito.doReturn("expected").when(strategy).connectHostToVolume(destHost, 0l, "iScsiName");
String expected = storageSystemDataMotionStrategy.generateDestPath(destHost, Mockito.mock(StoragePoolVO.class), destVolumeInfo);
String expected = strategy.generateDestPath(destHost, Mockito.mock(StoragePoolVO.class), destVolumeInfo);
Assert.assertEquals(expected, "expected");
Mockito.verify(storageSystemDataMotionStrategy).connectHostToVolume(destHost, 0l, "iScsiName");
Mockito.verify(strategy).connectHostToVolume(destHost, 0l, "iScsiName");
}
@Test
public void configureMigrateDiskInfoTest() {
VolumeObject srcVolumeInfo = Mockito.spy(new VolumeObject());
Mockito.doReturn("volume path").when(srcVolumeInfo).getPath();
MigrateCommand.MigrateDiskInfo migrateDiskInfo = storageSystemDataMotionStrategy.configureMigrateDiskInfo(srcVolumeInfo, "destPath");
MigrateCommand.MigrateDiskInfo migrateDiskInfo = strategy.configureMigrateDiskInfo(srcVolumeInfo, "destPath");
Assert.assertEquals(MigrateCommand.MigrateDiskInfo.DiskType.BLOCK, migrateDiskInfo.getDiskType());
Assert.assertEquals(MigrateCommand.MigrateDiskInfo.DriverType.RAW, migrateDiskInfo.getDriverType());
Assert.assertEquals(MigrateCommand.MigrateDiskInfo.Source.DEV, migrateDiskInfo.getSource());
@ -187,7 +187,7 @@ public class StorageSystemDataMotionStrategyTest {
String volumePath = "iScsiName";
volume.set_iScsiName(volumePath);
storageSystemDataMotionStrategy.setVolumePath(volume);
strategy.setVolumePath(volume);
Assert.assertEquals(volumePath, volume.getPath());
}
@ -200,8 +200,9 @@ public class StorageSystemDataMotionStrategyTest {
StoragePoolType[] storagePoolTypes = StoragePoolType.values();
for (int i = 0; i < storagePoolTypes.length; i++) {
Mockito.doReturn(storagePoolTypes[i]).when(sourceStoragePool).getPoolType();
boolean result = storageSystemDataMotionStrategy.shouldMigrateVolume(sourceStoragePool, destHost, destStoragePool);
boolean result = strategy.shouldMigrateVolume(sourceStoragePool, destHost, destStoragePool);
Assert.assertTrue(result);
}
}
}
}

View File

@ -409,8 +409,15 @@ public class TemplateServiceImpl implements TemplateService {
_templateDao.update(tmplt.getId(), tmlpt);
if (tmplt.getState() == VirtualMachineTemplate.State.NotUploaded || tmplt.getState() == VirtualMachineTemplate.State.UploadInProgress) {
VirtualMachineTemplate.Event event = VirtualMachineTemplate.Event.OperationSucceeded;
// For multi-disk OVA, check and create data disk templates
if (tmplt.getFormat().equals(ImageFormat.OVA)) {
if (!createOvaDataDiskTemplates(_templateFactory.getTemplate(tmlpt.getId(), store))) {
event = VirtualMachineTemplate.Event.OperationFailed;
}
}
try {
stateMachine.transitTo(tmplt, VirtualMachineTemplate.Event.OperationSucceeded, null, _templateDao);
stateMachine.transitTo(tmplt, event, null, _templateDao);
} catch (NoTransitionException e) {
s_logger.error("Unexpected state transition exception for template " + tmplt.getName() + ". Details: " + e.getMessage());
}
@ -701,7 +708,7 @@ public class TemplateServiceImpl implements TemplateService {
return null;
}
// Check if OVA contains additional data disks. If yes, create Datadisk templates for each of the additional datadisk present in the OVA
// For multi-disk OVA, check and create data disk templates
if (template.getFormat().equals(ImageFormat.OVA)) {
if (!createOvaDataDiskTemplates(template)) {
template.processEvent(ObjectInDataStoreStateMachine.Event.OperationFailed);
@ -729,8 +736,8 @@ public class TemplateServiceImpl implements TemplateService {
return null;
}
protected boolean createOvaDataDiskTemplates(TemplateInfo parentTemplate) {
@Override
public boolean createOvaDataDiskTemplates(TemplateInfo parentTemplate) {
try {
// Get Datadisk template (if any) for OVA
List<DatadiskTO> dataDiskTemplates = new ArrayList<DatadiskTO>();

View File

@ -90,6 +90,7 @@ public class ObjectInDataStoreManagerImpl implements ObjectInDataStoreManager {
stateMachines.addTransition(State.Allocated, Event.CreateOnlyRequested, State.Creating);
stateMachines.addTransition(State.Allocated, Event.DestroyRequested, State.Destroying);
stateMachines.addTransition(State.Allocated, Event.OperationFailed, State.Failed);
stateMachines.addTransition(State.Allocated, Event.OperationSuccessed, State.Ready);
stateMachines.addTransition(State.Creating, Event.OperationFailed, State.Allocated);
stateMachines.addTransition(State.Creating, Event.OperationSuccessed, State.Ready);
stateMachines.addTransition(State.Ready, Event.CopyingRequested, State.Copying);

View File

@ -20,6 +20,7 @@ import java.util.Date;
import javax.inject.Inject;
import com.cloud.storage.MigrationOptions;
import org.apache.log4j.Logger;
import org.apache.cloudstack.engine.subsystem.api.storage.DataObjectInStore;
@ -72,6 +73,7 @@ public class VolumeObject implements VolumeInfo {
@Inject
DiskOfferingDao diskOfferingDao;
private Object payload;
private MigrationOptions migrationOptions;
public VolumeObject() {
_volStateMachine = Volume.State.getStateMachine();
@ -315,6 +317,16 @@ public class VolumeObject implements VolumeInfo {
return null;
}
@Override
public MigrationOptions getMigrationOptions() {
return migrationOptions;
}
@Override
public void setMigrationOptions(MigrationOptions migrationOptions) {
this.migrationOptions = migrationOptions;
}
public void update() {
volumeDao.update(volumeVO.getId(), volumeVO);
volumeVO = volumeDao.findById(volumeVO.getId());

View File

@ -40,6 +40,7 @@ import com.cloud.user.Account;
import com.cloud.utils.db.DB;
import com.cloud.utils.exception.CloudRuntimeException;
import org.apache.cloudstack.api.command.user.iso.DeleteIsoCmd;
import org.apache.cloudstack.api.command.user.iso.GetUploadParamsForIsoCmd;
import org.apache.cloudstack.api.command.user.iso.RegisterIsoCmd;
import org.apache.cloudstack.api.command.user.template.RegisterTemplateCmd;
import org.apache.cloudstack.storage.command.TemplateOrVolumePostUploadCommand;
@ -72,6 +73,11 @@ public class BareMetalTemplateAdapter extends TemplateAdapterBase implements Tem
throw new CloudRuntimeException("Baremetal doesn't support ISO template");
}
@Override
public TemplateProfile prepare(GetUploadParamsForIsoCmd cmd) throws ResourceAllocationException {
throw new CloudRuntimeException("Baremetal doesn't support ISO template");
}
private void templateCreateUsage(VMTemplateVO template, long dcId) {
if (template.getAccountId() != Account.ACCOUNT_ID_SYSTEM) {
UsageEventVO usageEvent =

View File

@ -185,4 +185,9 @@ public class BaremetalDhcpElement extends AdapterBase implements DhcpServiceProv
return false;
}
@Override
public boolean removeDhcpEntry(Network network, NicProfile nic, VirtualMachineProfile vmProfile) throws ResourceUnavailableException {
return false;
}
}

View File

@ -34,46 +34,67 @@ public class MigrateKVMAsync implements Callable<Domain> {
private String vmName = "";
private String destIp = "";
private boolean migrateStorage;
private boolean migrateStorageManaged;
private boolean autoConvergence;
/**
* Do not pause the domain during migration. The domain's memory will be transferred to the destination host while the domain is running. The migration may never converge if the domain is changing its memory faster then it can be transferred. The domain can be manually paused anytime during migration using virDomainSuspend.
* @value 1
* @see Libvirt <a href="https://libvirt.org/html/libvirt-libvirt-domain.html#virDomainMigrateFlags">virDomainMigrateFlags</a> documentation
*/
// Libvirt Migrate Flags reference:
// https://libvirt.org/html/libvirt-libvirt-domain.html#virDomainMigrateFlags
// Do not pause the domain during migration. The domain's memory will be
// transferred to the destination host while the domain is running. The migration
// may never converge if the domain is changing its memory faster then it can be
// transferred. The domain can be manually paused anytime during migration using
// virDomainSuspend.
private static final long VIR_MIGRATE_LIVE = 1L;
/**
* Migrate full disk images in addition to domain's memory. By default only non-shared non-readonly disk images are transferred. The VIR_MIGRATE_PARAM_MIGRATE_DISKS parameter can be used to specify which disks should be migrated. This flag and VIR_MIGRATE_NON_SHARED_INC are mutually exclusive.
* @value 64
* @see Libvirt <a href="https://libvirt.org/html/libvirt-libvirt-domain.html#virDomainMigrateFlags">virDomainMigrateFlags</a> documentation
*/
// Define the domain as persistent on the destination host after successful
// migration. If the domain was persistent on the source host and
// VIR_MIGRATE_UNDEFINE_SOURCE is not used, it will end up persistent on both
// hosts.
private static final long VIR_MIGRATE_PERSIST_DEST = 8L;
// Migrate full disk images in addition to domain's memory. By default only
// non-shared non-readonly disk images are transferred. The
// VIR_MIGRATE_PARAM_MIGRATE_DISKS parameter can be used to specify which disks
// should be migrated. This flag and VIR_MIGRATE_NON_SHARED_INC are mutually
// exclusive.
private static final long VIR_MIGRATE_NON_SHARED_DISK = 64L;
/**
* Compress migration data. The compression methods can be specified using VIR_MIGRATE_PARAM_COMPRESSION. A hypervisor default method will be used if this parameter is omitted. Individual compression methods can be tuned via their specific VIR_MIGRATE_PARAM_COMPRESSION_* parameters.
* @value 2048
* @see Libvirt <a href="https://libvirt.org/html/libvirt-libvirt-domain.html#virDomainMigrateFlags">virDomainMigrateFlags</a> documentation
*/
// Migrate disk images in addition to domain's memory. This is similar to
// VIR_MIGRATE_NON_SHARED_DISK, but only the top level of each disk's backing chain
// is copied. That is, the rest of the backing chain is expected to be present on
// the destination and to be exactly the same as on the source host. This flag and
// VIR_MIGRATE_NON_SHARED_DISK are mutually exclusive.
private static final long VIR_MIGRATE_NON_SHARED_INC = 128L;
// Compress migration data. The compression methods can be specified using
// VIR_MIGRATE_PARAM_COMPRESSION. A hypervisor default method will be used if this
// parameter is omitted. Individual compression methods can be tuned via their
// specific VIR_MIGRATE_PARAM_COMPRESSION_* parameters.
private static final long VIR_MIGRATE_COMPRESSED = 2048L;
/**
* Enable algorithms that ensure a live migration will eventually converge. This usually means the domain will be slowed down to make sure it does not change its memory faster than a hypervisor can transfer the changed memory to the destination host. VIR_MIGRATE_PARAM_AUTO_CONVERGE_* parameters can be used to tune the algorithm.
* @value 8192
* @see Libvirt <a href="https://libvirt.org/html/libvirt-libvirt-domain.html#virDomainMigrateFlags">virDomainMigrateFlags</a> documentation
*/
// Enable algorithms that ensure a live migration will eventually converge.
// This usually means the domain will be slowed down to make sure it does not
// change its memory faster than a hypervisor can transfer the changed memory to
// the destination host. VIR_MIGRATE_PARAM_AUTO_CONVERGE_* parameters can be used
// to tune the algorithm.
private static final long VIR_MIGRATE_AUTO_CONVERGE = 8192L;
/**
* Libvirt 1.0.3 supports compression flag for migration.
*/
// Libvirt 1.0.3 supports compression flag for migration.
private static final int LIBVIRT_VERSION_SUPPORTS_MIGRATE_COMPRESSED = 1000003;
// Libvirt 1.2.3 supports auto converge.
private static final int LIBVIRT_VERSION_SUPPORTS_AUTO_CONVERGE = 1002003;
public MigrateKVMAsync(final LibvirtComputingResource libvirtComputingResource, final Domain dm, final Connect dconn, final String dxml,
final boolean migrateStorage, final boolean autoConvergence, final String vmName, final String destIp) {
final boolean migrateStorage, final boolean migrateStorageManaged, final boolean autoConvergence, final String vmName, final String destIp) {
this.libvirtComputingResource = libvirtComputingResource;
this.dm = dm;
this.dconn = dconn;
this.dxml = dxml;
this.migrateStorage = migrateStorage;
this.migrateStorageManaged = migrateStorageManaged;
this.autoConvergence = autoConvergence;
this.vmName = vmName;
this.destIp = destIp;
@ -84,15 +105,20 @@ public class MigrateKVMAsync implements Callable<Domain> {
long flags = VIR_MIGRATE_LIVE;
if (dconn.getLibVirVersion() >= LIBVIRT_VERSION_SUPPORTS_MIGRATE_COMPRESSED) {
flags += VIR_MIGRATE_COMPRESSED;
flags |= VIR_MIGRATE_COMPRESSED;
}
if (migrateStorage) {
flags += VIR_MIGRATE_NON_SHARED_DISK;
if (migrateStorageManaged) {
flags |= VIR_MIGRATE_NON_SHARED_DISK;
} else {
flags |= VIR_MIGRATE_PERSIST_DEST;
flags |= VIR_MIGRATE_NON_SHARED_INC;
}
}
if (autoConvergence && dconn.getLibVirVersion() >= 1002003) {
flags += VIR_MIGRATE_AUTO_CONVERGE;
if (autoConvergence && dconn.getLibVirVersion() >= LIBVIRT_VERSION_SUPPORTS_AUTO_CONVERGE) {
flags |= VIR_MIGRATE_AUTO_CONVERGE;
}
return dm.migrate(dconn, flags, dxml, vmName, "tcp:" + destIp, libvirtComputingResource.getMigrateSpeed());

View File

@ -0,0 +1,61 @@
//
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
//
package com.cloud.hypervisor.kvm.resource.wrapper;
import com.cloud.agent.api.Answer;
import com.cloud.agent.api.storage.CheckStorageAvailabilityCommand;
import com.cloud.hypervisor.kvm.resource.LibvirtComputingResource;
import com.cloud.hypervisor.kvm.storage.KVMStoragePool;
import com.cloud.hypervisor.kvm.storage.KVMStoragePoolManager;
import com.cloud.resource.CommandWrapper;
import com.cloud.resource.ResourceWrapper;
import com.cloud.storage.Storage;
import com.cloud.utils.exception.CloudRuntimeException;
import org.apache.log4j.Logger;
import java.util.Map;
@ResourceWrapper(handles = CheckStorageAvailabilityCommand.class)
public class LibvirtCheckStorageAvailabilityWrapper extends CommandWrapper<CheckStorageAvailabilityCommand, Answer, LibvirtComputingResource> {
private static final Logger s_logger = Logger.getLogger(LibvirtCheckStorageAvailabilityWrapper.class);
@Override
public Answer execute(CheckStorageAvailabilityCommand command, LibvirtComputingResource resource) {
KVMStoragePoolManager storagePoolMgr = resource.getStoragePoolMgr();
Map<String, Storage.StoragePoolType> poolsMap = command.getPoolsMap();
for (String poolUuid : poolsMap.keySet()) {
Storage.StoragePoolType type = poolsMap.get(poolUuid);
s_logger.debug("Checking if storage pool " + poolUuid + " (" + type + ") is mounted on this host");
try {
KVMStoragePool storagePool = storagePoolMgr.getStoragePool(type, poolUuid);
if (storagePool == null) {
s_logger.info("Storage pool " + poolUuid + " is not available");
return new Answer(command, false, "Storage pool " + poolUuid + " not available");
}
} catch (CloudRuntimeException e) {
s_logger.info("Storage pool " + poolUuid + " is not available");
return new Answer(command, e);
}
}
return new Answer(command);
}
}

View File

@ -147,9 +147,10 @@ public final class LibvirtMigrateCommandWrapper extends CommandWrapper<MigrateCo
// migrateStorage is declared as final because the replaceStorage method may mutate mapMigrateStorage, but
// migrateStorage's value should always only be associated with the initial state of mapMigrateStorage.
final boolean migrateStorage = MapUtils.isNotEmpty(mapMigrateStorage);
final boolean migrateStorageManaged = command.isMigrateStorageManaged();
if (migrateStorage) {
xmlDesc = replaceStorage(xmlDesc, mapMigrateStorage);
xmlDesc = replaceStorage(xmlDesc, mapMigrateStorage, migrateStorageManaged);
}
dconn = libvirtUtilitiesHelper.retrieveQemuConnection(destinationUri);
@ -157,7 +158,8 @@ public final class LibvirtMigrateCommandWrapper extends CommandWrapper<MigrateCo
//run migration in thread so we can monitor it
s_logger.info("Live migration of instance " + vmName + " initiated to destination host: " + dconn.getURI());
final ExecutorService executor = Executors.newFixedThreadPool(1);
final Callable<Domain> worker = new MigrateKVMAsync(libvirtComputingResource, dm, dconn, xmlDesc, migrateStorage,
final Callable<Domain> worker = new MigrateKVMAsync(libvirtComputingResource, dm, dconn, xmlDesc,
migrateStorage, migrateStorageManaged,
command.isAutoConvergence(), vmName, command.getDestinationIp());
final Future<Domain> migrateThread = executor.submit(worker);
executor.shutdown();
@ -356,7 +358,8 @@ public final class LibvirtMigrateCommandWrapper extends CommandWrapper<MigrateCo
* <li>The source of the disk needs an attribute that is either 'file' or 'dev' as well as its corresponding value.
* </ul>
*/
protected String replaceStorage(String xmlDesc, Map<String, MigrateCommand.MigrateDiskInfo> migrateStorage)
protected String replaceStorage(String xmlDesc, Map<String, MigrateCommand.MigrateDiskInfo> migrateStorage,
boolean migrateStorageManaged)
throws IOException, ParserConfigurationException, SAXException, TransformerException {
InputStream in = IOUtils.toInputStream(xmlDesc);
@ -398,7 +401,7 @@ public final class LibvirtMigrateCommandWrapper extends CommandWrapper<MigrateCo
for (int z = 0; z < diskChildNodes.getLength(); z++) {
Node diskChildNode = diskChildNodes.item(z);
if ("driver".equals(diskChildNode.getNodeName())) {
if (migrateStorageManaged && "driver".equals(diskChildNode.getNodeName())) {
Node driverNode = diskChildNode;
NamedNodeMap driverNodeAttributes = driverNode.getAttributes();
@ -413,7 +416,7 @@ public final class LibvirtMigrateCommandWrapper extends CommandWrapper<MigrateCo
newChildSourceNode.setAttribute(migrateDiskInfo.getSource().toString(), migrateDiskInfo.getSourceText());
diskNode.appendChild(newChildSourceNode);
} else if ("auth".equals(diskChildNode.getNodeName())) {
} else if (migrateStorageManaged && "auth".equals(diskChildNode.getNodeName())) {
diskNode.removeChild(diskChildNode);
}
}

View File

@ -0,0 +1,136 @@
//
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
//
package com.cloud.hypervisor.kvm.resource.wrapper;
import com.cloud.agent.api.Answer;
import com.cloud.hypervisor.kvm.resource.LibvirtComputingResource;
import com.cloud.resource.CommandWrapper;
import com.cloud.resource.ResourceWrapper;
import com.cloud.utils.PropertiesUtil;
import com.cloud.utils.exception.CloudRuntimeException;
import com.cloud.utils.script.Script;
import org.apache.cloudstack.agent.directdownload.SetupDirectDownloadCertificateCommand;
import org.apache.cloudstack.utils.security.KeyStoreUtils;
import org.apache.log4j.Logger;
import java.io.File;
import java.io.FileNotFoundException;
import java.io.IOException;
import static org.apache.commons.lang.StringUtils.isBlank;
@ResourceWrapper(handles = SetupDirectDownloadCertificateCommand.class)
public class LibvirtSetupDirectDownloadCertificateCommandWrapper extends CommandWrapper<SetupDirectDownloadCertificateCommand, Answer, LibvirtComputingResource> {
private static final String temporaryCertFilePrefix = "CSCERTIFICATE";
private static final Logger s_logger = Logger.getLogger(LibvirtSetupDirectDownloadCertificateCommandWrapper.class);
/**
* Retrieve agent.properties file
*/
private File getAgentPropertiesFile() throws FileNotFoundException {
final File agentFile = PropertiesUtil.findConfigFile("agent.properties");
if (agentFile == null) {
throw new FileNotFoundException("Failed to find agent.properties file");
}
return agentFile;
}
/**
* Get the property 'keystore.passphrase' value from agent.properties file
*/
private String getKeystorePassword(File agentFile) {
String pass = null;
if (agentFile != null) {
try {
pass = PropertiesUtil.loadFromFile(agentFile).getProperty(KeyStoreUtils.KS_PASSPHRASE_PROPERTY);
} catch (IOException e) {
s_logger.error("Could not get 'keystore.passphrase' property value due to: " + e.getMessage());
}
}
return pass;
}
/**
* Get keystore path
*/
private String getKeyStoreFilePath(File agentFile) {
return agentFile.getParent() + "/" + KeyStoreUtils.KS_FILENAME;
}
/**
* Import certificate from temporary file into keystore
*/
private void importCertificate(String tempCerFilePath, String keyStoreFile, String certificateName, String privatePassword) {
s_logger.debug("Importing certificate from temporary file to keystore");
String importCommandFormat = "keytool -importcert -file %s -keystore %s -alias '%s' -storepass '%s' -noprompt";
String importCmd = String.format(importCommandFormat, tempCerFilePath, keyStoreFile, certificateName, privatePassword);
int result = Script.runSimpleBashScriptForExitValue(importCmd);
if (result != 0) {
s_logger.debug("Certificate " + certificateName + " not imported as it already exist on keystore");
}
}
/**
* Create temporary file and return its path
*/
private String createTemporaryFile(File agentFile, String certificateName, String certificate) {
String tempCerFilePath = String.format("%s/%s-%s",
agentFile.getParent(), temporaryCertFilePrefix, certificateName);
s_logger.debug("Creating temporary certificate file into: " + tempCerFilePath);
int result = Script.runSimpleBashScriptForExitValue(String.format("echo '%s' > %s", certificate, tempCerFilePath));
if (result != 0) {
throw new CloudRuntimeException("Could not create the certificate file on path: " + tempCerFilePath);
}
return tempCerFilePath;
}
/**
* Remove temporary file
*/
private void cleanupTemporaryFile(String temporaryFile) {
s_logger.debug("Cleaning up temporary certificate file");
Script.runSimpleBashScript("rm -f " + temporaryFile);
}
@Override
public Answer execute(SetupDirectDownloadCertificateCommand cmd, LibvirtComputingResource serverResource) {
String certificate = cmd.getCertificate();
String certificateName = cmd.getCertificateName();
try {
File agentFile = getAgentPropertiesFile();
String privatePassword = getKeystorePassword(agentFile);
if (isBlank(privatePassword)) {
return new Answer(cmd, false, "No password found for keystore: " + KeyStoreUtils.KS_FILENAME);
}
final String keyStoreFile = getKeyStoreFilePath(agentFile);
String temporaryFile = createTemporaryFile(agentFile, certificateName, certificate);
importCertificate(temporaryFile, keyStoreFile, certificateName, privatePassword);
cleanupTemporaryFile(temporaryFile);
} catch (FileNotFoundException | CloudRuntimeException e) {
s_logger.error("Error while setting up certificate " + certificateName, e);
return new Answer(cmd, false, e.getMessage());
}
return new Answer(cmd, true, "Certificate " + certificateName + " imported");
}
}

View File

@ -427,7 +427,7 @@ public class IscsiAdmStorageAdaptor implements StorageAdaptor {
}
@Override
public KVMPhysicalDisk createDiskFromSnapshot(KVMPhysicalDisk snapshot, String snapshotName, String name, KVMStoragePool destPool) {
public KVMPhysicalDisk createDiskFromSnapshot(KVMPhysicalDisk snapshot, String snapshotName, String name, KVMStoragePool destPool, int timeout) {
throw new UnsupportedOperationException("Creating a disk from a snapshot is not supported in this configuration.");
}
@ -440,4 +440,9 @@ public class IscsiAdmStorageAdaptor implements StorageAdaptor {
public boolean createFolder(String uuid, String path) {
throw new UnsupportedOperationException("A folder cannot be created in this configuration.");
}
@Override
public KVMPhysicalDisk createDiskFromTemplateBacking(KVMPhysicalDisk template, String name, PhysicalDiskFormat format, long size, KVMStoragePool destPool, int timeout) {
return null;
}
}

View File

@ -394,9 +394,15 @@ public class KVMStoragePoolManager {
return adaptor.copyPhysicalDisk(disk, name, destPool, timeout);
}
public KVMPhysicalDisk createDiskFromSnapshot(KVMPhysicalDisk snapshot, String snapshotName, String name, KVMStoragePool destPool) {
public KVMPhysicalDisk createDiskFromSnapshot(KVMPhysicalDisk snapshot, String snapshotName, String name, KVMStoragePool destPool, int timeout) {
StorageAdaptor adaptor = getStorageAdaptor(destPool.getType());
return adaptor.createDiskFromSnapshot(snapshot, snapshotName, name, destPool);
return adaptor.createDiskFromSnapshot(snapshot, snapshotName, name, destPool, timeout);
}
public KVMPhysicalDisk createDiskWithTemplateBacking(KVMPhysicalDisk template, String name, PhysicalDiskFormat format, long size,
KVMStoragePool destPool, int timeout) {
StorageAdaptor adaptor = getStorageAdaptor(destPool.getType());
return adaptor.createDiskFromTemplateBacking(template, name, format, size, destPool, timeout);
}
}

View File

@ -36,19 +36,12 @@ import java.util.UUID;
import javax.naming.ConfigurationException;
import com.cloud.agent.direct.download.DirectTemplateDownloader;
import com.cloud.agent.direct.download.DirectTemplateDownloader.DirectTemplateInformation;
import com.cloud.agent.direct.download.HttpDirectTemplateDownloader;
import com.cloud.agent.direct.download.MetalinkDirectTemplateDownloader;
import com.cloud.agent.direct.download.NfsDirectTemplateDownloader;
import com.cloud.agent.direct.download.HttpsDirectTemplateDownloader;
import com.cloud.exception.InvalidParameterValueException;
import org.apache.cloudstack.agent.directdownload.HttpsDirectDownloadCommand;
import org.apache.cloudstack.agent.directdownload.DirectDownloadAnswer;
import org.apache.cloudstack.agent.directdownload.DirectDownloadCommand;
import org.apache.cloudstack.agent.directdownload.HttpDirectDownloadCommand;
import org.apache.cloudstack.agent.directdownload.HttpsDirectDownloadCommand;
import org.apache.cloudstack.agent.directdownload.MetalinkDirectDownloadCommand;
import org.apache.cloudstack.agent.directdownload.NfsDirectDownloadCommand;
import org.apache.cloudstack.agent.directdownload.DirectDownloadAnswer;
import org.apache.cloudstack.storage.command.AttachAnswer;
import org.apache.cloudstack.storage.command.AttachCommand;
import org.apache.cloudstack.storage.command.CopyCmdAnswer;
@ -95,7 +88,14 @@ import com.cloud.agent.api.to.DataTO;
import com.cloud.agent.api.to.DiskTO;
import com.cloud.agent.api.to.NfsTO;
import com.cloud.agent.api.to.S3TO;
import com.cloud.agent.direct.download.DirectTemplateDownloader;
import com.cloud.agent.direct.download.DirectTemplateDownloader.DirectTemplateInformation;
import com.cloud.agent.direct.download.HttpDirectTemplateDownloader;
import com.cloud.agent.direct.download.HttpsDirectTemplateDownloader;
import com.cloud.agent.direct.download.MetalinkDirectTemplateDownloader;
import com.cloud.agent.direct.download.NfsDirectTemplateDownloader;
import com.cloud.exception.InternalErrorException;
import com.cloud.exception.InvalidParameterValueException;
import com.cloud.hypervisor.Hypervisor;
import com.cloud.hypervisor.kvm.resource.LibvirtComputingResource;
import com.cloud.hypervisor.kvm.resource.LibvirtConnection;
@ -105,6 +105,7 @@ import com.cloud.hypervisor.kvm.resource.LibvirtVMDef.DiskDef.DeviceType;
import com.cloud.hypervisor.kvm.resource.LibvirtVMDef.DiskDef.DiscardType;
import com.cloud.hypervisor.kvm.resource.LibvirtVMDef.DiskDef.DiskProtocol;
import com.cloud.storage.JavaStorageLayer;
import com.cloud.storage.MigrationOptions;
import com.cloud.storage.Storage.ImageFormat;
import com.cloud.storage.Storage.StoragePoolType;
import com.cloud.storage.StorageLayer;
@ -114,9 +115,9 @@ import com.cloud.storage.template.Processor.FormatInfo;
import com.cloud.storage.template.QCOW2Processor;
import com.cloud.storage.template.TemplateLocation;
import com.cloud.utils.NumbersUtil;
import com.cloud.utils.storage.S3.S3Utils;
import com.cloud.utils.exception.CloudRuntimeException;
import com.cloud.utils.script.Script;
import com.cloud.utils.storage.S3.S3Utils;
public class KVMStorageProcessor implements StorageProcessor {
private static final Logger s_logger = Logger.getLogger(KVMStorageProcessor.class);
@ -1352,6 +1353,32 @@ public class KVMStorageProcessor implements StorageProcessor {
}
}
/**
* Create volume with backing file (linked clone)
*/
protected KVMPhysicalDisk createLinkedCloneVolume(MigrationOptions migrationOptions, KVMStoragePool srcPool, KVMStoragePool primaryPool, VolumeObjectTO volume, PhysicalDiskFormat format, int timeout) {
String srcBackingFilePath = migrationOptions.getSrcBackingFilePath();
boolean copySrcTemplate = migrationOptions.isCopySrcTemplate();
KVMPhysicalDisk srcTemplate = srcPool.getPhysicalDisk(srcBackingFilePath);
KVMPhysicalDisk destTemplate;
if (copySrcTemplate) {
KVMPhysicalDisk copiedTemplate = storagePoolMgr.copyPhysicalDisk(srcTemplate, srcTemplate.getName(), primaryPool, 10000 * 1000);
destTemplate = primaryPool.getPhysicalDisk(copiedTemplate.getPath());
} else {
destTemplate = primaryPool.getPhysicalDisk(srcBackingFilePath);
}
return storagePoolMgr.createDiskWithTemplateBacking(destTemplate, volume.getUuid(), format, volume.getSize(),
primaryPool, timeout);
}
/**
* Create full clone volume from VM snapshot
*/
protected KVMPhysicalDisk createFullCloneVolume(MigrationOptions migrationOptions, VolumeObjectTO volume, KVMStoragePool primaryPool, PhysicalDiskFormat format) {
s_logger.debug("For VM migration with full-clone volume: Creating empty stub disk for source disk " + migrationOptions.getSrcVolumeUuid() + " and size: " + volume.getSize() + " and format: " + format);
return primaryPool.createPhysicalDisk(volume.getUuid(), format, volume.getProvisioningType(), volume.getSize());
}
@Override
public Answer createVolume(final CreateObjectCommand cmd) {
final VolumeObjectTO volume = (VolumeObjectTO)cmd.getData();
@ -1369,8 +1396,23 @@ public class KVMStorageProcessor implements StorageProcessor {
} else {
format = PhysicalDiskFormat.valueOf(volume.getFormat().toString().toUpperCase());
}
vol = primaryPool.createPhysicalDisk(volume.getUuid(), format,
volume.getProvisioningType(), disksize);
MigrationOptions migrationOptions = volume.getMigrationOptions();
if (migrationOptions != null) {
String srcStoreUuid = migrationOptions.getSrcPoolUuid();
StoragePoolType srcPoolType = migrationOptions.getSrcPoolType();
KVMStoragePool srcPool = storagePoolMgr.getStoragePool(srcPoolType, srcStoreUuid);
int timeout = migrationOptions.getTimeout();
if (migrationOptions.getType() == MigrationOptions.Type.LinkedClone) {
vol = createLinkedCloneVolume(migrationOptions, srcPool, primaryPool, volume, format, timeout);
} else if (migrationOptions.getType() == MigrationOptions.Type.FullClone) {
vol = createFullCloneVolume(migrationOptions, volume, primaryPool, format);
}
} else {
vol = primaryPool.createPhysicalDisk(volume.getUuid(), format,
volume.getProvisioningType(), disksize);
}
final VolumeObjectTO newVol = new VolumeObjectTO();
if(vol != null) {

View File

@ -95,6 +95,33 @@ public class LibvirtStorageAdaptor implements StorageAdaptor {
return true;
}
@Override
public KVMPhysicalDisk createDiskFromTemplateBacking(KVMPhysicalDisk template, String name, PhysicalDiskFormat format, long size,
KVMStoragePool destPool, int timeout) {
s_logger.info("Creating volume " + name + " with template backing " + template.getName() + " in pool " + destPool.getUuid() +
" (" + destPool.getType().toString() + ") with size " + size);
KVMPhysicalDisk disk = null;
String destPath = destPool.getLocalPath().endsWith("/") ?
destPool.getLocalPath() + name :
destPool.getLocalPath() + "/" + name;
if (destPool.getType() == StoragePoolType.NetworkFilesystem) {
try {
if (format == PhysicalDiskFormat.QCOW2) {
QemuImg qemu = new QemuImg(timeout);
QemuImgFile destFile = new QemuImgFile(destPath, format);
destFile.setSize(size);
QemuImgFile backingFile = new QemuImgFile(template.getPath(), template.getFormat());
qemu.create(destFile, backingFile);
}
} catch (QemuImgException e) {
s_logger.error("Failed to create " + destPath + " due to a failed executing of qemu-img: " + e.getMessage());
}
}
return disk;
}
public StorageVol getVolume(StoragePool pool, String volName) {
StorageVol vol = null;
@ -914,7 +941,7 @@ public class LibvirtStorageAdaptor implements StorageAdaptor {
case SPARSE:
case FAT:
QemuImgFile srcFile = new QemuImgFile(template.getPath(), template.getFormat());
qemu.convert(srcFile, destFile, options);
qemu.convert(srcFile, destFile, options, null);
break;
}
} else if (format == PhysicalDiskFormat.RAW) {
@ -927,7 +954,7 @@ public class LibvirtStorageAdaptor implements StorageAdaptor {
}
QemuImg qemu = new QemuImg(timeout);
Map<String, String> options = new HashMap<String, String>();
qemu.convert(sourceFile, destFile, options);
qemu.convert(sourceFile, destFile, options, null);
}
} catch (QemuImgException e) {
s_logger.error("Failed to create " + disk.getPath() +
@ -1302,8 +1329,35 @@ public class LibvirtStorageAdaptor implements StorageAdaptor {
}
@Override
public KVMPhysicalDisk createDiskFromSnapshot(KVMPhysicalDisk snapshot, String snapshotName, String name, KVMStoragePool destPool) {
return null;
public KVMPhysicalDisk createDiskFromSnapshot(KVMPhysicalDisk snapshot, String snapshotName, String name, KVMStoragePool destPool, int timeout) {
s_logger.info("Creating volume " + name + " from snapshot " + snapshotName + " in pool " + destPool.getUuid() +
" (" + destPool.getType().toString() + ")");
PhysicalDiskFormat format = snapshot.getFormat();
long size = snapshot.getSize();
String destPath = destPool.getLocalPath().endsWith("/") ?
destPool.getLocalPath() + name :
destPool.getLocalPath() + "/" + name;
if (destPool.getType() == StoragePoolType.NetworkFilesystem) {
try {
if (format == PhysicalDiskFormat.QCOW2) {
QemuImg qemu = new QemuImg(timeout);
QemuImgFile destFile = new QemuImgFile(destPath, format);
if (size > snapshot.getVirtualSize()) {
destFile.setSize(size);
} else {
destFile.setSize(snapshot.getVirtualSize());
}
QemuImgFile srcFile = new QemuImgFile(snapshot.getPath(), snapshot.getFormat());
qemu.convert(srcFile, destFile, snapshotName);
}
} catch (QemuImgException e) {
s_logger.error("Failed to create " + destPath +
" due to a failed executing of qemu-img: " + e.getMessage());
}
}
return destPool.getPhysicalDisk(name);
}
@Override

View File

@ -294,7 +294,7 @@ public class ManagedNfsStorageAdaptor implements StorageAdaptor {
}
@Override
public KVMPhysicalDisk createDiskFromSnapshot(KVMPhysicalDisk snapshot, String snapshotName, String name, KVMStoragePool destPool) {
public KVMPhysicalDisk createDiskFromSnapshot(KVMPhysicalDisk snapshot, String snapshotName, String name, KVMStoragePool destPool, int timeout) {
throw new UnsupportedOperationException("Creating a disk from a snapshot is not supported in this configuration.");
}
@ -313,6 +313,11 @@ public class ManagedNfsStorageAdaptor implements StorageAdaptor {
return true;
}
@Override
public KVMPhysicalDisk createDiskFromTemplateBacking(KVMPhysicalDisk template, String name, PhysicalDiskFormat format, long size, KVMStoragePool destPool, int timeout) {
return null;
}
@Override
public KVMPhysicalDisk createPhysicalDisk(String name, KVMStoragePool pool, PhysicalDiskFormat format, ProvisioningType provisioningType, long size) {
return null;

View File

@ -66,11 +66,19 @@ public interface StorageAdaptor {
public KVMPhysicalDisk copyPhysicalDisk(KVMPhysicalDisk disk, String name, KVMStoragePool destPools, int timeout);
public KVMPhysicalDisk createDiskFromSnapshot(KVMPhysicalDisk snapshot, String snapshotName, String name, KVMStoragePool destPool);
public KVMPhysicalDisk createDiskFromSnapshot(KVMPhysicalDisk snapshot, String snapshotName, String name, KVMStoragePool destPool, int timeout);
public boolean refresh(KVMStoragePool pool);
public boolean deleteStoragePool(KVMStoragePool pool);
public boolean createFolder(String uuid, String path);
/**
* Creates disk using template backing.
* Precondition: Template is on destPool
*/
KVMPhysicalDisk createDiskFromTemplateBacking(KVMPhysicalDisk template,
String name, PhysicalDiskFormat format, long size,
KVMStoragePool destPool, int timeout);
}

View File

@ -20,17 +20,24 @@ import java.util.HashMap;
import java.util.Iterator;
import java.util.Map;
import org.apache.commons.lang.StringUtils;
import org.apache.commons.lang.NotImplementedException;
import com.cloud.storage.Storage;
import com.cloud.utils.script.OutputInterpreter;
import com.cloud.utils.script.Script;
import org.apache.commons.lang.NotImplementedException;
public class QemuImg {
/* The qemu-img binary. We expect this to be in $PATH */
public String _qemuImgPath = "qemu-img";
private String cloudQemuImgPath = "cloud-qemu-img";
private int timeout;
private String getQemuImgPathScript = String.format("which %s >& /dev/null; " +
"if [ $? -gt 0 ]; then echo \"%s\"; else echo \"%s\"; fi",
cloudQemuImgPath, _qemuImgPath, cloudQemuImgPath);
/* Shouldn't we have KVMPhysicalDisk and LibvirtVMDef read this? */
public static enum PhysicalDiskFormat {
RAW("raw"), QCOW2("qcow2"), VMDK("vmdk"), FILE("file"), RBD("rbd"), SHEEPDOG("sheepdog"), HTTP("http"), HTTPS("https"), TAR("tar"), DIR("dir");
@ -220,10 +227,18 @@ public class QemuImg {
* @param options
* Options for the convert. Takes a Map<String, String> with key value
* pairs which are passed on to qemu-img without validation.
* @param snapshotName
* If it is provided, convertion uses it as parameter
* @return void
*/
public void convert(final QemuImgFile srcFile, final QemuImgFile destFile, final Map<String, String> options) throws QemuImgException {
final Script script = new Script(_qemuImgPath, timeout);
public void convert(final QemuImgFile srcFile, final QemuImgFile destFile,
final Map<String, String> options, final String snapshotName) throws QemuImgException {
Script script = new Script(_qemuImgPath, timeout);
if (StringUtils.isNotBlank(snapshotName)) {
String qemuPath = Script.runSimpleBashScript(getQemuImgPathScript);
script = new Script(qemuPath, timeout);
}
script.add("convert");
// autodetect source format. Sometime int he future we may teach KVMPhysicalDisk about more formats, then we can explicitly pass them if necessary
//s.add("-f");
@ -242,6 +257,13 @@ public class QemuImg {
script.add(optionsStr);
}
if (StringUtils.isNotBlank(snapshotName)) {
script.add("-f");
script.add(srcFile.getFormat().toString());
script.add("-s");
script.add(snapshotName);
}
script.add(srcFile.getFileName());
script.add(destFile.getFileName());
@ -269,7 +291,26 @@ public class QemuImg {
* @return void
*/
public void convert(final QemuImgFile srcFile, final QemuImgFile destFile) throws QemuImgException {
this.convert(srcFile, destFile, null);
this.convert(srcFile, destFile, null, null);
}
/**
* Convert a image from source to destination
*
* This method calls 'qemu-img convert' and takes three objects
* as an argument.
*
*
* @param srcFile
* The source file
* @param destFile
* The destination file
* @param snapshotName
* The snapshot name
* @return void
*/
public void convert(final QemuImgFile srcFile, final QemuImgFile destFile, String snapshotName) throws QemuImgException {
this.convert(srcFile, destFile, null, snapshotName);
}
/**

View File

@ -19,16 +19,22 @@
package com.cloud.hypervisor.kvm.resource.wrapper;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
import java.io.InputStream;
import java.io.IOException;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Scanner;
import org.apache.cloudstack.utils.linux.MemStat;
import java.util.Map;
import java.util.HashMap;
import org.apache.commons.io.IOUtils;
import javax.xml.parsers.ParserConfigurationException;
import javax.xml.transform.TransformerException;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
@ -40,7 +46,9 @@ import org.mockito.Mockito;
import org.powermock.api.mockito.PowerMockito;
import org.powermock.core.classloader.annotations.PrepareForTest;
import org.powermock.modules.junit4.PowerMockRunner;
import org.xml.sax.SAXException;
import com.cloud.agent.api.MigrateCommand;
import com.cloud.agent.api.MigrateCommand.MigrateDiskInfo;
import com.cloud.agent.api.MigrateCommand.MigrateDiskInfo.DiskType;
import com.cloud.agent.api.MigrateCommand.MigrateDiskInfo.DriverType;
@ -310,6 +318,114 @@ public class LibvirtMigrateCommandWrapperTest {
PowerMockito.whenNew(Scanner.class).withAnyArguments().thenReturn(scanner);
}
private static final String sourcePoolUuid = "07eb495b-5590-3877-9fb7-23c6e9a40d40";
private static final String destPoolUuid = "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA";
private static final String disk1SourceFilename = "981ab1dc-40f4-41b5-b387-6539aeddbf47";
private static final String disk2SourceFilename = "bf8621b3-027c-497d-963b-06319650f048";
private static final String sourceMultidiskDomainXml =
"<domain type='kvm' id='6'>\n" +
" <name>i-2-3-VM</name>\n" +
" <uuid>91860126-7dda-4876-ac1e-48d06cd4b2eb</uuid>\n" +
" <description>Apple Mac OS X 10.6 (32-bit)</description>\n" +
" <memory unit='KiB'>524288</memory>\n" +
" <currentMemory unit='KiB'>524288</currentMemory>\n" +
" <vcpu placement='static'>1</vcpu>\n" +
" <cputune>\n" +
" <shares>250</shares>\n" +
" </cputune>\n" +
" <sysinfo type='smbios'>\n" +
" <system>\n" +
" <entry name='manufacturer'>Apache Software Foundation</entry>\n" +
" <entry name='product'>CloudStack KVM Hypervisor</entry>\n" +
" <entry name='uuid'>91860126-7dda-4876-ac1e-48d06cd4b2eb</entry>\n" +
" </system>\n" +
" </sysinfo>\n" +
" <os>\n" +
" <type arch='x86_64' machine='rhel6.6.0'>hvm</type>\n" +
" <boot dev='cdrom'/>\n" +
" <boot dev='hd'/>\n" +
" <smbios mode='sysinfo'/>\n" +
" </os>\n" +
" <features>\n" +
" <acpi/>\n" +
" <apic/>\n" +
" <pae/>\n" +
" </features>\n" +
" <cpu>\n" +
" </cpu>\n" +
" <clock offset='utc'/>\n" +
" <on_poweroff>destroy</on_poweroff>\n" +
" <on_reboot>restart</on_reboot>\n" +
" <on_crash>destroy</on_crash>\n" +
" <devices>\n" +
" <emulator>/usr/libexec/qemu-kvm</emulator>\n" +
" <disk type='file' device='disk'>\n" +
" <driver name='qemu' type='qcow2' cache='none'/>\n" +
" <source file='/mnt/07eb495b-5590-3877-9fb7-23c6e9a40d40/981ab1dc-40f4-41b5-b387-6539aeddbf47'/>\n" +
" <target dev='hda' bus='ide'/>\n" +
" <serial>e8141f63b5364a7f8cbb</serial>\n" +
" <alias name='ide0-0-0'/>\n" +
" <address type='drive' controller='0' bus='0' target='0' unit='0'/>\n" +
" </disk>\n" +
" <disk type='file' device='cdrom'>\n" +
" <driver name='qemu' type='raw' cache='none'/>\n" +
" <target dev='hdc' bus='ide'/>\n" +
" <readonly/>\n" +
" <alias name='ide0-1-0'/>\n" +
" <address type='drive' controller='0' bus='1' target='0' unit='0'/>\n" +
" </disk>\n" +
" <disk type='file' device='disk'>\n" +
" <driver name='qemu' type='qcow2' cache='none'/>\n" +
" <source file='/mnt/07eb495b-5590-3877-9fb7-23c6e9a40d40/bf8621b3-027c-497d-963b-06319650f048'/>\n" +
" <target dev='vdb' bus='virtio'/>\n" +
" <serial>bf8621b3027c497d963b</serial>\n" +
" <alias name='virtio-disk1'/>\n" +
" <address type='pci' domain='0x0000' bus='0x00' slot='0x04' function='0x0'/>\n" +
" </disk>\n" +
" <controller type='usb' index='0'>\n" +
" <alias name='usb0'/>\n" +
" <address type='pci' domain='0x0000' bus='0x00' slot='0x01' function='0x2'/>\n" +
" </controller>\n" +
" <controller type='ide' index='0'>\n" +
" <alias name='ide0'/>\n" +
" <address type='pci' domain='0x0000' bus='0x00' slot='0x01' function='0x1'/>\n" +
" </controller>\n" +
" <interface type='bridge'>\n" +
" <mac address='02:00:4c:5f:00:01'/>\n" +
" <source bridge='breth1-511'/>\n" +
" <target dev='vnet6'/>\n" +
" <model type='e1000'/>\n" +
" <alias name='net0'/>\n" +
" <address type='pci' domain='0x0000' bus='0x00' slot='0x03' function='0x0'/>\n" +
" </interface>\n" +
" <serial type='pty'>\n" +
" <source path='/dev/pts/2'/>\n" +
" <target port='0'/>\n" +
" <alias name='serial0'/>\n" +
" </serial>\n" +
" <console type='pty' tty='/dev/pts/2'>\n" +
" <source path='/dev/pts/2'/>\n" +
" <target type='serial' port='0'/>\n" +
" <alias name='serial0'/>\n" +
" </console>\n" +
" <input type='tablet' bus='usb'>\n" +
" <alias name='input0'/>\n" +
" </input>\n" +
" <input type='mouse' bus='ps2'/>\n" +
" <graphics type='vnc' port='5902' autoport='yes' listen='10.2.2.31' passwd='LEm_y8SIs-8hXimtxnyEnA'>\n" +
" <listen type='address' address='10.2.2.31'/>\n" +
" </graphics>\n" +
" <video>\n" +
" <model type='cirrus' vram='9216' heads='1'/>\n" +
" <alias name='video0'/>\n" +
" <address type='pci' domain='0x0000' bus='0x00' slot='0x02' function='0x0'/>\n" +
" </video>\n" +
" <memballoon model='none'>\n" +
" <alias name='balloon0'/>\n" +
" </memballoon>\n" +
" </devices>\n" +
"</domain>\n";
@Test
public void testReplaceIpForVNCInDescFile() {
final String targetIp = "192.168.22.21";
@ -488,7 +604,7 @@ public class LibvirtMigrateCommandWrapperTest {
MigrateDiskInfo diskInfo = new MigrateDiskInfo("123456", DiskType.BLOCK, DriverType.RAW, Source.FILE, "sourctest");
mapMigrateStorage.put("/mnt/812ea6a3-7ad0-30f4-9cab-01e3f2985b98/4650a2f7-fce5-48e2-beaa-bcdf063194e6", diskInfo);
final String result = libvirtMigrateCmdWrapper.replaceStorage(fullfile, mapMigrateStorage);
final String result = libvirtMigrateCmdWrapper.replaceStorage(fullfile, mapMigrateStorage, true);
InputStream in = IOUtils.toInputStream(result);
DocumentBuilderFactory docFactory = DocumentBuilderFactory.newInstance();
@ -499,4 +615,25 @@ public class LibvirtMigrateCommandWrapperTest {
assertXpath(doc, "/domain/devices/disk/driver/@type", "raw");
}
public void testReplaceStorageXmlDiskNotManagedStorage() throws ParserConfigurationException, TransformerException, SAXException, IOException {
final LibvirtMigrateCommandWrapper lw = new LibvirtMigrateCommandWrapper();
String destDisk1FileName = "XXXXXXXXXXXXXX";
String destDisk2FileName = "YYYYYYYYYYYYYY";
String destDisk1Path = String.format("/mnt/%s/%s", destPoolUuid, destDisk1FileName);
MigrateCommand.MigrateDiskInfo migrateDisk1Info = new MigrateCommand.MigrateDiskInfo(disk1SourceFilename,
MigrateCommand.MigrateDiskInfo.DiskType.FILE, MigrateCommand.MigrateDiskInfo.DriverType.QCOW2,
MigrateCommand.MigrateDiskInfo.Source.FILE, destDisk1Path);
String destDisk2Path = String.format("/mnt/%s/%s", destPoolUuid, destDisk2FileName);
MigrateCommand.MigrateDiskInfo migrateDisk2Info = new MigrateCommand.MigrateDiskInfo(disk2SourceFilename,
MigrateCommand.MigrateDiskInfo.DiskType.FILE, MigrateCommand.MigrateDiskInfo.DriverType.QCOW2,
MigrateCommand.MigrateDiskInfo.Source.FILE, destDisk2Path);
Map<String, MigrateCommand.MigrateDiskInfo> migrateStorage = new HashMap<>();
migrateStorage.put(disk1SourceFilename, migrateDisk1Info);
migrateStorage.put(disk2SourceFilename, migrateDisk2Info);
String newXml = lw.replaceStorage(sourceMultidiskDomainXml, migrateStorage, false);
assertTrue(newXml.contains(destDisk1Path));
assertTrue(newXml.contains(destDisk2Path));
assertFalse(newXml.contains("/mnt/" + sourcePoolUuid + "/" + disk1SourceFilename));
assertFalse(newXml.contains("/mnt/" + sourcePoolUuid + "/" + disk2SourceFilename));
}
}

View File

@ -157,11 +157,11 @@ public class VMwareGuru extends HypervisorGuruBase implements HypervisorGuru, Co
}
public static final ConfigKey<Boolean> VmwareReserveCpu = new ConfigKey<Boolean>(Boolean.class, "vmware.reserve.cpu", "Advanced", "false",
"Specify whether or not to reserve CPU when not overprovisioning, In case of cpu overprovisioning we will always reserve cpu.", true, ConfigKey.Scope.Cluster,
"Specify whether or not to reserve CPU when deploying an instance.", true, ConfigKey.Scope.Cluster,
null);
public static final ConfigKey<Boolean> VmwareReserveMemory = new ConfigKey<Boolean>(Boolean.class, "vmware.reserve.mem", "Advanced", "false",
"Specify whether or not to reserve memory when not overprovisioning, In case of memory overprovisioning we will always reserve memory.", true,
"Specify whether or not to reserve memory when deploying an instance.", true,
ConfigKey.Scope.Cluster, null);
protected ConfigKey<Boolean> VmwareEnableNestedVirtualization = new ConfigKey<Boolean>(Boolean.class, "vmware.nested.virtualization", "Advanced", "false",

View File

@ -379,4 +379,9 @@ public class ContrailElementImpl extends AdapterBase
public boolean setExtraDhcpOptions(Network network, long nicId, Map<Integer, String> dhcpOptions) {
return false;
}
@Override
public boolean removeDhcpEntry(Network network, NicProfile nic, VirtualMachineProfile vmProfile) {
return false;
}
}

View File

@ -2078,11 +2078,11 @@ public class JuniperSrxResource implements ServerResource {
xml = replaceXmlValue(xml, "rule-set", _privateZone);
xml = replaceXmlValue(xml, "from-zone", _privateZone);
xml = replaceXmlValue(xml, "rule-name", ruleName_private);
}
if (!sendRequestAndCheckResponse(command, xml, "name", ruleName_private))
{
throw new ExecutionException("Failed to delete trust static NAT rule from public IP " + publicIp + " to private IP " + privateIp);
if (!sendRequestAndCheckResponse(command, xml, "name", ruleName_private))
{
throw new ExecutionException("Failed to delete trust static NAT rule from public IP " + publicIp + " to private IP " + privateIp);
}
}
return true;
}
@ -3568,6 +3568,7 @@ public class JuniperSrxResource implements ServerResource {
case CHECK_IF_EXISTS:
case CHECK_IF_IN_USE:
case CHECK_PRIVATE_IF_EXISTS:
assert (keyAndValue != null && keyAndValue.length == 2) : "If the SrxCommand is " + command + ", both a key and value must be specified.";
key = keyAndValue[0];

View File

@ -38,9 +38,6 @@ if [ -z "${KS_PASS// }" ]; then
exit 1
fi
# Use a new keystore file
NEW_KS_FILE="$KS_FILE.new"
# Import certificate
if [ ! -z "${CERT// }" ]; then
echo "$CERT" > "$CERT_FILE"
@ -54,8 +51,8 @@ fi
# Import cacerts into the keystore
awk '/-----BEGIN CERTIFICATE-----?/{n++}{print > "cloudca." n }' "$CACERT_FILE"
for caChain in $(ls cloudca.*); do
keytool -delete -noprompt -alias "$caChain" -keystore "$NEW_KS_FILE" -storepass "$KS_PASS" > /dev/null 2>&1 || true
keytool -import -noprompt -storepass "$KS_PASS" -trustcacerts -alias "$caChain" -file "$caChain" -keystore "$NEW_KS_FILE" > /dev/null 2>&1
keytool -delete -noprompt -alias "$caChain" -keystore "$KS_FILE" -storepass "$KS_PASS" > /dev/null 2>&1 || true
keytool -import -noprompt -storepass "$KS_PASS" -trustcacerts -alias "$caChain" -file "$caChain" -keystore "$KS_FILE" > /dev/null 2>&1
done
rm -f cloudca.*
@ -63,21 +60,19 @@ rm -f cloudca.*
if [ ! -z "${PRIVKEY// }" ]; then
echo "$PRIVKEY" > "$PRIVKEY_FILE"
# Re-initialize keystore when private key is provided
keytool -delete -noprompt -alias "$ALIAS" -keystore "$NEW_KS_FILE" -storepass "$KS_PASS" 2>/dev/null || true
openssl pkcs12 -export -name "$ALIAS" -in "$CERT_FILE" -inkey "$PRIVKEY_FILE" -out "$NEW_KS_FILE.p12" -password pass:"$KS_PASS" > /dev/null 2>&1
keytool -importkeystore -srckeystore "$NEW_KS_FILE.p12" -destkeystore "$NEW_KS_FILE" -srcstoretype PKCS12 -alias "$ALIAS" -deststorepass "$KS_PASS" -destkeypass "$KS_PASS" -srcstorepass "$KS_PASS" -srckeypass "$KS_PASS" > /dev/null 2>&1
keytool -delete -noprompt -alias "$ALIAS" -keystore "$KS_FILE" -storepass "$KS_PASS" 2>/dev/null || true
openssl pkcs12 -export -name "$ALIAS" -in "$CERT_FILE" -inkey "$PRIVKEY_FILE" -out "$KS_FILE.p12" -password pass:"$KS_PASS" > /dev/null 2>&1
keytool -importkeystore -srckeystore "$KS_FILE.p12" -destkeystore "$KS_FILE" -srcstoretype PKCS12 -alias "$ALIAS" -deststorepass "$KS_PASS" -destkeypass "$KS_PASS" -srcstorepass "$KS_PASS" -srckeypass "$KS_PASS" > /dev/null 2>&1
else
# Import certificate into the keystore
keytool -import -storepass "$KS_PASS" -alias "$ALIAS" -file "$CERT_FILE" -keystore "$NEW_KS_FILE" > /dev/null 2>&1 || true
keytool -import -storepass "$KS_PASS" -alias "$ALIAS" -file "$CERT_FILE" -keystore "$KS_FILE" > /dev/null 2>&1 || true
# Export private key from keystore
rm -f "$PRIVKEY_FILE"
keytool -importkeystore -srckeystore "$NEW_KS_FILE" -destkeystore "$NEW_KS_FILE.p12" -deststoretype PKCS12 -srcalias "$ALIAS" -deststorepass "$KS_PASS" -destkeypass "$KS_PASS" -srcstorepass "$KS_PASS" -srckeypass "$KS_PASS" > /dev/null 2>&1
openssl pkcs12 -in "$NEW_KS_FILE.p12" -nodes -nocerts -nomac -password pass:"$KS_PASS" 2>/dev/null | openssl rsa -out "$PRIVKEY_FILE" > /dev/null 2>&1
keytool -importkeystore -srckeystore "$KS_FILE" -destkeystore "$KS_FILE.p12" -deststoretype PKCS12 -srcalias "$ALIAS" -deststorepass "$KS_PASS" -destkeypass "$KS_PASS" -srcstorepass "$KS_PASS" -srckeypass "$KS_PASS" > /dev/null 2>&1
openssl pkcs12 -in "$KS_FILE.p12" -nodes -nocerts -nomac -password pass:"$KS_PASS" 2>/dev/null | openssl rsa -out "$PRIVKEY_FILE" > /dev/null 2>&1
fi
# Commit the new keystore
rm -f "$NEW_KS_FILE.p12"
mv -f "$NEW_KS_FILE" "$KS_FILE"
rm -f "$KS_FILE.p12"
# Secure libvirtd on cert import
if [ -f "$LIBVIRTD_FILE" ]; then

View File

@ -17,7 +17,7 @@
# under the License.
PROPS_FILE="$1"
KS_FILE="$2.new"
KS_FILE="$2"
KS_PASS="$3"
KS_VALIDITY="$4"
CSR_FILE="$5"
@ -35,8 +35,10 @@ if [ -f "$PROPS_FILE" ]; then
fi
fi
# Generate keystore
rm -f "$KS_FILE"
if [ -f "$KS_FILE" ]; then
keytool -delete -noprompt -alias "$ALIAS" -keystore "$KS_FILE" -storepass "$KS_PASS" > /dev/null 2>&1 || true
fi
CN=$(hostname --fqdn)
keytool -genkey -storepass "$KS_PASS" -keypass "$KS_PASS" -alias "$ALIAS" -keyalg RSA -validity "$KS_VALIDITY" -dname cn="$CN",ou="cloudstack",o="cloudstack",c="cloudstack" -keystore "$KS_FILE" > /dev/null 2>&1

File diff suppressed because it is too large Load Diff

View File

@ -77,6 +77,7 @@ public class DomainRouterJoinDaoImpl extends GenericDaoBase<DomainRouterJoinVO,
routerResponse.setCreated(router.getCreated());
routerResponse.setState(router.getState());
routerResponse.setIsRedundantRouter(router.isRedundantRouter());
routerResponse.setScriptsVersion(router.getScriptsVersion());
if (router.getRedundantState() != null) {
routerResponse.setRedundantState(router.getRedundantState().toString());
}

View File

@ -890,14 +890,6 @@ public enum Config {
"0",
"Default disk I/O writerate in bytes per second allowed in User vm's disk.",
null),
KvmAutoConvergence(
"Advanced",
ManagementServer.class,
Boolean.class,
"kvm.auto.convergence",
"false",
"Setting this to 'true' allows KVM to use auto convergence to complete VM migration (libvirt version 1.2.3+ and QEMU version 1.6+)",
null),
ControlCidr(
"Advanced",
ManagementServer.class,

View File

@ -3893,9 +3893,9 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati
}
boolean isDomainSpecific = false;
List<DomainVlanMapVO> domainVln = _domainVlanMapDao.listDomainVlanMapsByVlan(vlanRange.getId());
List<DomainVlanMapVO> domainVlan = _domainVlanMapDao.listDomainVlanMapsByVlan(vlanRange.getId());
// Check for domain wide pool. It will have an entry for domain_vlan_map.
if (domainVln != null && !domainVln.isEmpty()) {
if (domainVlan != null && !domainVlan.isEmpty()) {
isDomainSpecific = true;
}
@ -4052,10 +4052,10 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati
forSystemVms = ip.isForSystemVms();
final Long allocatedToAccountId = ip.getAllocatedToAccountId();
if (allocatedToAccountId != null) {
final Account accountAllocatedTo = _accountMgr.getActiveAccountById(allocatedToAccountId);
if (!accountAllocatedTo.getAccountName().equalsIgnoreCase(accountName)) {
if (vlanOwner != null && allocatedToAccountId != vlanOwner.getId()) {
throw new InvalidParameterValueException(ip.getAddress() + " Public IP address in range is allocated to another account ");
}
final Account accountAllocatedTo = _accountMgr.getActiveAccountById(allocatedToAccountId);
if (vlanOwner == null && domain != null && domain.getId() != accountAllocatedTo.getDomainId()){
throw new InvalidParameterValueException(ip.getAddress()
+ " Public IP address in range is allocated to another domain/account ");
@ -4116,9 +4116,9 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati
}
boolean isDomainSpecific = false;
final List<DomainVlanMapVO> domainVln = _domainVlanMapDao.listDomainVlanMapsByVlan(vlanDbId);
final List<DomainVlanMapVO> domainVlan = _domainVlanMapDao.listDomainVlanMapsByVlan(vlanDbId);
// Check for domain wide pool. It will have an entry for domain_vlan_map.
if (domainVln != null && !domainVln.isEmpty()) {
if (domainVlan != null && !domainVlan.isEmpty()) {
isDomainSpecific = true;
}
@ -4171,7 +4171,7 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati
// decrement resource count for dedicated public ip's
_resourceLimitMgr.decrementResourceCount(acctVln.get(0).getAccountId(), ResourceType.public_ip, new Long(ips.size()));
return true;
} else if (isDomainSpecific && _domainVlanMapDao.remove(domainVln.get(0).getId())) {
} else if (isDomainSpecific && _domainVlanMapDao.remove(domainVlan.get(0).getId())) {
s_logger.debug("Remove the vlan from domain_vlan_map successfully.");
return true;
} else {

View File

@ -210,6 +210,12 @@ public abstract class LibvirtServerDiscoverer extends DiscovererBase implements
return null;
}
// Set cluster GUID based on cluster ID if null
if (cluster.getGuid() == null) {
cluster.setGuid(UUID.nameUUIDFromBytes(String.valueOf(clusterId).getBytes()).toString());
_clusterDao.update(clusterId, cluster);
}
Map<KvmDummyResourceBase, Map<String, String>> resources = new HashMap<KvmDummyResourceBase, Map<String, String>>();
Map<String, String> details = new HashMap<String, String>();
if (!uri.getScheme().equals("http")) {
@ -230,8 +236,9 @@ public abstract class LibvirtServerDiscoverer extends DiscovererBase implements
if (existingHosts != null) {
for (HostVO existingHost : existingHosts) {
if (existingHost.getGuid().toLowerCase().startsWith(guid.toLowerCase())) {
s_logger.debug("Skipping " + agentIp + " because " + guid + " is already in the database for resource " + existingHost.getGuid());
return null;
final String msg = "Skipping host " + agentIp + " because " + guid + " is already in the database for resource " + existingHost.getGuid() + " with ID " + existingHost.getUuid();
s_logger.debug(msg);
throw new CloudRuntimeException(msg);
}
}
}
@ -326,12 +333,6 @@ public abstract class LibvirtServerDiscoverer extends DiscovererBase implements
details.put("guid", connectedHost.getGuid());
// place a place holder guid derived from cluster ID
if (cluster.getGuid() == null) {
cluster.setGuid(UUID.nameUUIDFromBytes(String.valueOf(clusterId).getBytes()).toString());
_clusterDao.update(clusterId, cluster);
}
// save user name and password
_hostDao.loadDetails(connectedHost);
Map<String, String> hostDetails = connectedHost.getDetails();

View File

@ -895,6 +895,7 @@ NetworkMigrationResponder, AggregatedCommandExecutor, RedundantResource, DnsServ
@Override
public boolean release(final Network network, final NicProfile nic, final VirtualMachineProfile vm, final ReservationContext context) throws ConcurrentOperationException,
ResourceUnavailableException {
removeDhcpEntry(network, nic, vm);
return true;
}
@ -946,6 +947,34 @@ NetworkMigrationResponder, AggregatedCommandExecutor, RedundantResource, DnsServ
return false;
}
@Override
public boolean removeDhcpEntry(Network network, NicProfile nic, VirtualMachineProfile vmProfile) throws ResourceUnavailableException {
boolean result = true;
if (canHandle(network, Service.Dhcp)) {
if (vmProfile.getType() != VirtualMachine.Type.User) {
return false;
}
final List<DomainRouterVO> routers = _routerDao.listByNetworkAndRole(network.getId(), VirtualRouter.Role.VIRTUAL_ROUTER);
if (CollectionUtils.isEmpty(routers)) {
throw new ResourceUnavailableException("Can't find at least one router!", DataCenter.class, network.getDataCenterId());
}
final DataCenterVO dcVO = _dcDao.findById(network.getDataCenterId());
final NetworkTopology networkTopology = networkTopologyContext.retrieveNetworkTopology(dcVO);
for (final DomainRouterVO domainRouterVO : routers) {
if (domainRouterVO.getState() != VirtualMachine.State.Running) {
continue;
}
result = result && networkTopology.removeDhcpEntry(network, nic, vmProfile, domainRouterVO);
}
}
return result;
}
@Override
public boolean removeDnsSupportForSubnet(Network network) throws ResourceUnavailableException {
// Ignore if virtual router is already dhcp provider

View File

@ -211,7 +211,7 @@ public class CommandSetupHelper {
cmds.addCommand("users", cmd);
}
public void createDhcpEntryCommand(final VirtualRouter router, final UserVm vm, final NicVO nic, final Commands cmds) {
public void createDhcpEntryCommand(final VirtualRouter router, final UserVm vm, final NicVO nic, boolean remove, final Commands cmds) {
final DhcpEntryCommand dhcpCommand = new DhcpEntryCommand(nic.getMacAddress(), nic.getIPv4Address(), vm.getHostName(), nic.getIPv6Address(),
_networkModel.getExecuteInSeqNtwkElmtCmd());
@ -229,6 +229,7 @@ public class CommandSetupHelper {
dhcpCommand.setDefaultDns(ipaddress);
dhcpCommand.setDuid(NetUtils.getDuidLL(nic.getMacAddress()));
dhcpCommand.setDefault(nic.isDefaultNic());
dhcpCommand.setRemove(remove);
dhcpCommand.setAccessDetail(NetworkElementCommand.ROUTER_IP, _routerControlHelper.getRouterControlIp(router.getId()));
dhcpCommand.setAccessDetail(NetworkElementCommand.ROUTER_NAME, router.getInstanceName());
@ -622,7 +623,7 @@ public class CommandSetupHelper {
final NicVO nic = _nicDao.findByNtwkIdAndInstanceId(guestNetworkId, vm.getId());
if (nic != null) {
s_logger.debug("Creating dhcp entry for vm " + vm + " on domR " + router + ".");
createDhcpEntryCommand(router, vm, nic, cmds);
createDhcpEntryCommand(router, vm, nic, false, cmds);
}
}
}

View File

@ -36,6 +36,8 @@ public class DhcpEntryRules extends RuleApplier {
private final VirtualMachineProfile _profile;
private final DeployDestination _destination;
private boolean remove;
private NicVO _nicVo;
private UserVmVO _userVM;
@ -77,4 +79,12 @@ public class DhcpEntryRules extends RuleApplier {
public UserVmVO getUserVM() {
return _userVM;
}
public boolean isRemove() {
return remove;
}
public void setRemove(boolean remove) {
this.remove = remove;
}
}

View File

@ -37,7 +37,6 @@ import javax.crypto.spec.SecretKeySpec;
import javax.inject.Inject;
import javax.naming.ConfigurationException;
import com.cloud.storage.ScopeType;
import org.apache.cloudstack.acl.ControlledEntity;
import org.apache.cloudstack.affinity.AffinityGroupProcessor;
import org.apache.cloudstack.affinity.dao.AffinityGroupVMMapDao;
@ -66,7 +65,7 @@ import org.apache.cloudstack.api.command.admin.config.ListDeploymentPlannersCmd;
import org.apache.cloudstack.api.command.admin.config.ListHypervisorCapabilitiesCmd;
import org.apache.cloudstack.api.command.admin.config.UpdateCfgCmd;
import org.apache.cloudstack.api.command.admin.config.UpdateHypervisorCapabilitiesCmd;
import org.apache.cloudstack.api.command.admin.direct.download.UploadTemplateDirectDownloadCertificate;
import org.apache.cloudstack.api.command.admin.direct.download.UploadTemplateDirectDownloadCertificateCmd;
import org.apache.cloudstack.api.command.admin.domain.CreateDomainCmd;
import org.apache.cloudstack.api.command.admin.domain.DeleteDomainCmd;
import org.apache.cloudstack.api.command.admin.domain.ListDomainChildrenCmd;
@ -217,10 +216,10 @@ import org.apache.cloudstack.api.command.admin.usage.AddTrafficTypeCmd;
import org.apache.cloudstack.api.command.admin.usage.DeleteTrafficMonitorCmd;
import org.apache.cloudstack.api.command.admin.usage.DeleteTrafficTypeCmd;
import org.apache.cloudstack.api.command.admin.usage.GenerateUsageRecordsCmd;
import org.apache.cloudstack.api.command.admin.usage.ListUsageRecordsCmd;
import org.apache.cloudstack.api.command.admin.usage.ListTrafficMonitorsCmd;
import org.apache.cloudstack.api.command.admin.usage.ListTrafficTypeImplementorsCmd;
import org.apache.cloudstack.api.command.admin.usage.ListTrafficTypesCmd;
import org.apache.cloudstack.api.command.admin.usage.ListUsageRecordsCmd;
import org.apache.cloudstack.api.command.admin.usage.ListUsageTypesCmd;
import org.apache.cloudstack.api.command.admin.usage.RemoveRawUsageRecordsCmd;
import org.apache.cloudstack.api.command.admin.usage.UpdateTrafficTypeCmd;
@ -338,6 +337,7 @@ import org.apache.cloudstack.api.command.user.iso.CopyIsoCmd;
import org.apache.cloudstack.api.command.user.iso.DeleteIsoCmd;
import org.apache.cloudstack.api.command.user.iso.DetachIsoCmd;
import org.apache.cloudstack.api.command.user.iso.ExtractIsoCmd;
import org.apache.cloudstack.api.command.user.iso.GetUploadParamsForIsoCmd;
import org.apache.cloudstack.api.command.user.iso.ListIsoPermissionsCmd;
import org.apache.cloudstack.api.command.user.iso.ListIsosCmd;
import org.apache.cloudstack.api.command.user.iso.RegisterIsoCmd;
@ -634,6 +634,7 @@ import com.cloud.storage.GuestOSHypervisor;
import com.cloud.storage.GuestOSHypervisorVO;
import com.cloud.storage.GuestOSVO;
import com.cloud.storage.GuestOsCategory;
import com.cloud.storage.ScopeType;
import com.cloud.storage.StorageManager;
import com.cloud.storage.StoragePool;
import com.cloud.storage.Volume;
@ -1848,6 +1849,7 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe
final Object keyword = cmd.getKeyword();
final Long physicalNetworkId = cmd.getPhysicalNetworkId();
final Long associatedNetworkId = cmd.getAssociatedNetworkId();
final Long sourceNetworkId = cmd.getNetworkId();
final Long zone = cmd.getZoneId();
final String address = cmd.getIpAddress();
final Long vlan = cmd.getVlanId();
@ -1893,7 +1895,8 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe
sb.and("vlanDbId", sb.entity().getVlanId(), SearchCriteria.Op.EQ);
sb.and("id", sb.entity().getId(), SearchCriteria.Op.EQ);
sb.and("physicalNetworkId", sb.entity().getPhysicalNetworkId(), SearchCriteria.Op.EQ);
sb.and("associatedNetworkIdEq", sb.entity().getAssociatedWithNetworkId(), SearchCriteria.Op.EQ);
sb.and("associatedNetworkId", sb.entity().getAssociatedWithNetworkId(), SearchCriteria.Op.EQ);
sb.and("sourceNetworkId", sb.entity().getSourceNetworkId(), SearchCriteria.Op.EQ);
sb.and("isSourceNat", sb.entity().isSourceNat(), SearchCriteria.Op.EQ);
sb.and("isStaticNat", sb.entity().isOneToOneNat(), SearchCriteria.Op.EQ);
sb.and("vpcId", sb.entity().getVpcId(), SearchCriteria.Op.EQ);
@ -1991,7 +1994,11 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe
}
if (associatedNetworkId != null) {
sc.setParameters("associatedNetworkIdEq", associatedNetworkId);
sc.setParameters("associatedNetworkId", associatedNetworkId);
}
if (sourceNetworkId != null) {
sc.setParameters("sourceNetworkId", sourceNetworkId);
}
if (forDisplay != null) {
@ -3068,8 +3075,9 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe
cmdList.add(ReleasePodIpCmdByAdmin.class);
cmdList.add(CreateManagementNetworkIpRangeCmd.class);
cmdList.add(DeleteManagementNetworkIpRangeCmd.class);
cmdList.add(UploadTemplateDirectDownloadCertificate.class);
cmdList.add(UploadTemplateDirectDownloadCertificateCmd.class);
cmdList.add(ListMgmtsCmd.class);
cmdList.add(GetUploadParamsForIsoCmd.class);
// Out-of-band management APIs for admins
cmdList.add(EnableOutOfBandManagementForHostCmd.class);

View File

@ -25,17 +25,14 @@ import java.util.concurrent.TimeUnit;
import javax.inject.Inject;
import javax.naming.ConfigurationException;
import com.cloud.configuration.Resource;
import com.cloud.event.EventTypes;
import com.cloud.event.UsageEventUtils;
import com.cloud.user.ResourceLimitService;
import org.apache.log4j.Logger;
import org.springframework.stereotype.Component;
import org.apache.cloudstack.engine.subsystem.api.storage.DataStore;
import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager;
import org.apache.cloudstack.engine.subsystem.api.storage.EndPoint;
import org.apache.cloudstack.engine.subsystem.api.storage.EndPointSelector;
import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreStateMachine.State;
import org.apache.cloudstack.engine.subsystem.api.storage.TemplateDataFactory;
import org.apache.cloudstack.engine.subsystem.api.storage.TemplateInfo;
import org.apache.cloudstack.engine.subsystem.api.storage.TemplateService;
import org.apache.cloudstack.framework.config.ConfigKey;
import org.apache.cloudstack.framework.config.Configurable;
import org.apache.cloudstack.managed.context.ManagedContextRunnable;
@ -48,6 +45,8 @@ import org.apache.cloudstack.storage.datastore.db.TemplateDataStoreVO;
import org.apache.cloudstack.storage.datastore.db.VolumeDataStoreDao;
import org.apache.cloudstack.storage.datastore.db.VolumeDataStoreVO;
import org.apache.cloudstack.utils.identity.ManagementServerNode;
import org.apache.log4j.Logger;
import org.springframework.stereotype.Component;
import com.cloud.agent.Listener;
import com.cloud.agent.api.AgentControlAnswer;
@ -56,6 +55,9 @@ import com.cloud.agent.api.Answer;
import com.cloud.agent.api.Command;
import com.cloud.agent.api.StartupCommand;
import com.cloud.alert.AlertManager;
import com.cloud.configuration.Resource;
import com.cloud.event.EventTypes;
import com.cloud.event.UsageEventUtils;
import com.cloud.exception.ConnectionException;
import com.cloud.host.Host;
import com.cloud.host.Status;
@ -65,6 +67,7 @@ import com.cloud.storage.dao.VMTemplateDao;
import com.cloud.storage.dao.VMTemplateZoneDao;
import com.cloud.storage.dao.VolumeDao;
import com.cloud.template.VirtualMachineTemplate;
import com.cloud.user.ResourceLimitService;
import com.cloud.utils.component.ManagerBase;
import com.cloud.utils.concurrency.NamedThreadFactory;
import com.cloud.utils.db.Transaction;
@ -102,6 +105,12 @@ public class ImageStoreUploadMonitorImpl extends ManagerBase implements ImageSto
private AlertManager _alertMgr;
@Inject
private VMTemplateZoneDao _vmTemplateZoneDao;
@Inject
private DataStoreManager dataStoreManager;
@Inject
private TemplateDataFactory templateFactory;
@Inject
private TemplateService templateService;
private long _nodeId;
private ScheduledExecutorService _executor = null;
@ -110,7 +119,7 @@ public class ImageStoreUploadMonitorImpl extends ManagerBase implements ImageSto
static final ConfigKey<Integer> UploadMonitoringInterval = new ConfigKey<Integer>("Advanced", Integer.class, "upload.monitoring.interval", "60",
"Interval (in seconds) to check the status of volumes that are uploaded using HTTP POST request", true);
static final ConfigKey<Integer> UploadOperationTimeout = new ConfigKey<Integer>("Advanced", Integer.class, "upload.operation.timeout", "10",
static final ConfigKey<Integer> UploadOperationTimeout = new ConfigKey<Integer>("Advanced", Integer.class, "upload.operation.timeout", "60",
"Time (in minutes) to wait before abandoning volume upload using HTTP POST request", true);
@Override
@ -395,6 +404,20 @@ public class ImageStoreUploadMonitorImpl extends ManagerBase implements ImageSto
VMTemplateVO templateUpdate = _templateDao.createForUpdate();
templateUpdate.setSize(answer.getVirtualSize());
_templateDao.update(tmpTemplate.getId(), templateUpdate);
// For multi-disk OVA, check and create data disk templates
if (tmpTemplate.getFormat().equals(Storage.ImageFormat.OVA)) {
final DataStore store = dataStoreManager.getDataStore(templateDataStore.getDataStoreId(), templateDataStore.getDataStoreRole());
final TemplateInfo templateInfo = templateFactory.getTemplate(tmpTemplate.getId(), store);
if (!templateService.createOvaDataDiskTemplates(templateInfo)) {
tmpTemplateDataStore.setDownloadState(VMTemplateStorageResourceAssoc.Status.ABANDONED);
tmpTemplateDataStore.setState(State.Failed);
stateMachine.transitTo(tmpTemplate, VirtualMachineTemplate.Event.OperationFailed, null, _templateDao);
msg = "Multi-disk OVA template " + tmpTemplate.getUuid() + " failed to process data disks";
s_logger.error(msg);
sendAlert = true;
break;
}
}
stateMachine.transitTo(tmpTemplate, VirtualMachineTemplate.Event.OperationSucceeded, null, _templateDao);
_resourceLimitMgr.incrementResourceCount(template.getAccountId(), Resource.ResourceType.secondary_storage, answer.getVirtualSize());
//publish usage event

View File

@ -2489,8 +2489,17 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C
@Override
public ConfigKey<?>[] getConfigKeys() {
return new ConfigKey<?>[] { StorageCleanupInterval, StorageCleanupDelay, StorageCleanupEnabled, TemplateCleanupEnabled,
KvmStorageOfflineMigrationWait, KvmStorageOnlineMigrationWait, MaxNumberOfManagedClusteredFileSystems, PRIMARY_STORAGE_DOWNLOAD_WAIT};
return new ConfigKey<?>[]{
StorageCleanupInterval,
StorageCleanupDelay,
StorageCleanupEnabled,
TemplateCleanupEnabled,
KvmStorageOfflineMigrationWait,
KvmStorageOnlineMigrationWait,
KvmAutoConvergence,
MaxNumberOfManagedClusteredFileSystems,
PRIMARY_STORAGE_DOWNLOAD_WAIT
};
}
@Override

View File

@ -72,8 +72,6 @@ import org.apache.cloudstack.storage.command.AttachCommand;
import org.apache.cloudstack.storage.command.DettachCommand;
import org.apache.cloudstack.storage.command.TemplateOrVolumePostUploadCommand;
import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao;
import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailVO;
import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailsDao;
import org.apache.cloudstack.storage.datastore.db.StoragePoolVO;
import org.apache.cloudstack.storage.datastore.db.VolumeDataStoreDao;
import org.apache.cloudstack.storage.datastore.db.VolumeDataStoreVO;
@ -120,6 +118,7 @@ import com.cloud.service.dao.ServiceOfferingDetailsDao;
import com.cloud.storage.Storage.ImageFormat;
import com.cloud.storage.dao.DiskOfferingDao;
import com.cloud.storage.dao.SnapshotDao;
import com.cloud.storage.dao.StoragePoolTagsDao;
import com.cloud.storage.dao.VMTemplateDao;
import com.cloud.storage.dao.VolumeDao;
import com.cloud.storage.snapshot.SnapshotApiService;
@ -254,7 +253,7 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic
@Inject
private StorageManager storageMgr;
@Inject
private StoragePoolDetailsDao storagePoolDetailsDao;
private StoragePoolTagsDao storagePoolTagsDao;
@Inject
private StorageUtil storageUtil;
@ -2076,11 +2075,12 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic
// OfflineVmwareMigration: check storage tags on disk(offering)s in comparison to destination storage pool
// OfflineVmwareMigration: if no match return a proper error now
DiskOfferingVO diskOffering = _diskOfferingDao.findById(vol.getDiskOfferingId());
if(diskOffering.equals(null)) {
throw new CloudRuntimeException("volume '" + vol.getUuid() +"', has no diskoffering. Migration target cannot be checked.");
if (diskOffering.equals(null)) {
throw new CloudRuntimeException("volume '" + vol.getUuid() + "', has no diskoffering. Migration target cannot be checked.");
}
if(! doesTargetStorageSupportDiskOffering(destPool, diskOffering)) {
throw new CloudRuntimeException("Migration target has no matching tags for volume '" +vol.getName() + "(" + vol.getUuid() + ")'");
if (!doesTargetStorageSupportDiskOffering(destPool, diskOffering)) {
throw new CloudRuntimeException(String.format("Migration target pool [%s, tags:%s] has no matching tags for volume [%s, uuid:%s, tags:%s]", destPool.getName(),
getStoragePoolTags(destPool), vol.getName(), vol.getUuid(), diskOffering.getTags()));
}
if (liveMigrateVolume && destPool.getClusterId() != null && srcClusterId != null) {
@ -2275,15 +2275,11 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic
* Retrieves the storage pool tags as a {@link String}. If the storage pool does not have tags we return a null value.
*/
protected String getStoragePoolTags(StoragePool destPool) {
List<StoragePoolDetailVO> storagePoolDetails = storagePoolDetailsDao.listDetails(destPool.getId());
if (CollectionUtils.isEmpty(storagePoolDetails)) {
List<String> destPoolTags = storagePoolTagsDao.getStoragePoolTags(destPool.getId());
if (CollectionUtils.isEmpty(destPoolTags)) {
return null;
}
String storageTags = "";
for (StoragePoolDetailVO storagePoolDetailVO : storagePoolDetails) {
storageTags = storageTags + storagePoolDetailVO.getName() + ",";
}
return storageTags.substring(0, storageTags.length() - 1);
return StringUtils.join(destPoolTags, ",");
}
private Volume orchestrateMigrateVolume(VolumeVO volume, StoragePool destPool, boolean liveMigrateVolume, DiskOfferingVO newDiskOffering) {

View File

@ -297,7 +297,7 @@ public class SnapshotSchedulerImpl extends ManagerBase implements SnapshotSchedu
tmpSnapshotScheduleVO = _snapshotScheduleDao.acquireInLockTable(snapshotScheId);
final Long eventId =
ActionEventUtils.onScheduledActionEvent(User.UID_SYSTEM, volume.getAccountId(), EventTypes.EVENT_SNAPSHOT_CREATE, "creating snapshot for volume Id:" +
volumeId, true, 0);
volume.getUuid(), true, 0);
final Map<String, String> params = new HashMap<String, String>();
params.put(ApiConstants.VOLUME_ID, "" + volumeId);

View File

@ -0,0 +1,33 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package com.cloud.storage.upload.params;
import com.cloud.hypervisor.Hypervisor;
import com.cloud.storage.Storage;
public class IsoUploadParams extends UploadParamsBase {
public IsoUploadParams(long userId, String name, String displayText, Boolean isPublic, Boolean isFeatured,
Boolean isExtractable, Long osTypeId, Long zoneId, Boolean bootable, long ownerId) {
super(userId, name, displayText, isPublic, isFeatured, isExtractable, osTypeId, zoneId, bootable, ownerId);
setIso(true);
setBits(64);
setFormat(Storage.ImageFormat.ISO.toString());
setHypervisorType(Hypervisor.HypervisorType.None);
setRequiresHVM(true);
}
}

View File

@ -0,0 +1,38 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package com.cloud.storage.upload.params;
import com.cloud.hypervisor.Hypervisor;
import java.util.Map;
public class TemplateUploadParams extends UploadParamsBase {
public TemplateUploadParams(long userId, String name, String displayText,
Integer bits, Boolean passwordEnabled, Boolean requiresHVM,
Boolean isPublic, Boolean featured,
Boolean isExtractable, String format, Long guestOSId,
Long zoneId, Hypervisor.HypervisorType hypervisorType, String chksum,
String templateTag, long templateOwnerId,
Map details, Boolean sshkeyEnabled,
Boolean isDynamicallyScalable, Boolean isRoutingType) {
super(userId, name, displayText, bits, passwordEnabled, requiresHVM, isPublic, featured, isExtractable,
format, guestOSId, zoneId, hypervisorType, chksum, templateTag, templateOwnerId, details,
sshkeyEnabled, isDynamicallyScalable, isRoutingType);
setBootable(true);
}
}

View File

@ -0,0 +1,49 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package com.cloud.storage.upload.params;
import com.cloud.hypervisor.Hypervisor;
import java.util.Map;
public interface UploadParams {
boolean isIso();
long getUserId();
String getName();
String getDisplayText();
Integer getBits();
boolean isPasswordEnabled();
boolean requiresHVM();
String getUrl();
boolean isPublic();
boolean isFeatured();
boolean isExtractable();
String getFormat();
Long getGuestOSId();
Long getZoneId();
Hypervisor.HypervisorType getHypervisorType();
String getChecksum();
boolean isBootable();
String getTemplateTag();
long getTemplateOwnerId();
Map getDetails();
boolean isSshKeyEnabled();
String getImageStoreUuid();
boolean isDynamicallyScalable();
boolean isRoutingType();
boolean isDirectDownload();
}

View File

@ -0,0 +1,240 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package com.cloud.storage.upload.params;
import com.cloud.hypervisor.Hypervisor;
import java.util.Map;
public abstract class UploadParamsBase implements UploadParams {
private boolean isIso;
private long userId;
private String name;
private String displayText;
private Integer bits;
private boolean passwordEnabled;
private boolean requiresHVM;
private boolean isPublic;
private boolean featured;
private boolean isExtractable;
private String format;
private Long guestOSId;
private Long zoneId;
private Hypervisor.HypervisorType hypervisorType;
private String checksum;
private boolean bootable;
private String templateTag;
private long templateOwnerId;
private Map details;
private boolean sshkeyEnabled;
private boolean isDynamicallyScalable;
private boolean isRoutingType;
UploadParamsBase(long userId, String name, String displayText,
Integer bits, boolean passwordEnabled, boolean requiresHVM,
boolean isPublic, boolean featured,
boolean isExtractable, String format, Long guestOSId,
Long zoneId, Hypervisor.HypervisorType hypervisorType, String checksum,
String templateTag, long templateOwnerId,
Map details, boolean sshkeyEnabled,
boolean isDynamicallyScalable, boolean isRoutingType) {
this.userId = userId;
this.name = name;
this.displayText = displayText;
this.bits = bits;
this.passwordEnabled = passwordEnabled;
this.requiresHVM = requiresHVM;
this.isPublic = isPublic;
this.featured = featured;
this.isExtractable = isExtractable;
this.format = format;
this.guestOSId = guestOSId;
this.zoneId = zoneId;
this.hypervisorType = hypervisorType;
this.checksum = checksum;
this.templateTag = templateTag;
this.templateOwnerId = templateOwnerId;
this.details = details;
this.sshkeyEnabled = sshkeyEnabled;
this.isDynamicallyScalable = isDynamicallyScalable;
this.isRoutingType = isRoutingType;
}
UploadParamsBase(long userId, String name, String displayText, boolean isPublic, boolean isFeatured,
boolean isExtractable, Long osTypeId, Long zoneId, boolean bootable, long ownerId) {
this.userId = userId;
this.name = name;
this.displayText = displayText;
this.isPublic = isPublic;
this.featured = isFeatured;
this.isExtractable = isExtractable;
this.guestOSId = osTypeId;
this.zoneId = zoneId;
this.bootable = bootable;
this.templateOwnerId = ownerId;
}
@Override
public boolean isIso() {
return isIso;
}
@Override
public long getUserId() {
return userId;
}
@Override
public String getName() {
return name;
}
@Override
public String getDisplayText() {
return displayText;
}
@Override
public Integer getBits() {
return bits;
}
@Override
public boolean isPasswordEnabled() {
return passwordEnabled;
}
@Override
public boolean requiresHVM() {
return requiresHVM;
}
@Override
public String getUrl() {
return null;
}
@Override
public boolean isPublic() {
return isPublic;
}
@Override
public boolean isFeatured() {
return featured;
}
@Override
public boolean isExtractable() {
return isExtractable;
}
@Override
public String getFormat() {
return format;
}
@Override
public Long getGuestOSId() {
return guestOSId;
}
@Override
public Long getZoneId() {
return zoneId;
}
@Override
public Hypervisor.HypervisorType getHypervisorType() {
return hypervisorType;
}
@Override
public String getChecksum() {
return checksum;
}
@Override
public boolean isBootable() {
return bootable;
}
@Override
public String getTemplateTag() {
return templateTag;
}
@Override
public long getTemplateOwnerId() {
return templateOwnerId;
}
@Override
public Map getDetails() {
return details;
}
@Override
public boolean isSshKeyEnabled() {
return sshkeyEnabled;
}
@Override
public String getImageStoreUuid() {
return null;
}
@Override
public boolean isDynamicallyScalable() {
return isDynamicallyScalable;
}
@Override
public boolean isRoutingType() {
return isRoutingType;
}
@Override
public boolean isDirectDownload() {
return false;
}
void setIso(boolean iso) {
isIso = iso;
}
void setBootable(boolean bootable) {
this.bootable = bootable;
}
void setBits(Integer bits) {
this.bits = bits;
}
void setFormat(String format) {
this.format = format;
}
void setRequiresHVM(boolean requiresHVM) {
this.requiresHVM = requiresHVM;
}
void setHypervisorType(Hypervisor.HypervisorType hypervisorType) {
this.hypervisorType = hypervisorType;
}
}

View File

@ -35,6 +35,7 @@ import com.cloud.utils.db.TransactionCallback;
import com.cloud.utils.db.TransactionStatus;
import org.apache.cloudstack.agent.directdownload.CheckUrlAnswer;
import org.apache.cloudstack.agent.directdownload.CheckUrlCommand;
import org.apache.cloudstack.api.command.user.iso.GetUploadParamsForIsoCmd;
import org.apache.cloudstack.api.command.user.template.GetUploadParamsForTemplateCmd;
import org.apache.cloudstack.engine.subsystem.api.storage.DataObject;
import org.apache.cloudstack.engine.subsystem.api.storage.EndPoint;
@ -166,6 +167,15 @@ public class HypervisorTemplateAdapter extends TemplateAdapterBase {
return profile;
}
@Override
public TemplateProfile prepare(GetUploadParamsForIsoCmd cmd) throws ResourceAllocationException {
TemplateProfile profile = super.prepare(cmd);
// Check that the resource limit for secondary storage won't be exceeded
_resourceLimitMgr.checkResourceLimit(_accountMgr.getAccount(cmd.getEntityOwnerId()), ResourceType.secondary_storage);
return profile;
}
@Override
public TemplateProfile prepare(RegisterTemplateCmd cmd) throws ResourceAllocationException {
TemplateProfile profile = super.prepare(cmd);

View File

@ -20,6 +20,7 @@ import java.util.List;
import java.util.Map;
import org.apache.cloudstack.api.command.user.iso.DeleteIsoCmd;
import org.apache.cloudstack.api.command.user.iso.GetUploadParamsForIsoCmd;
import org.apache.cloudstack.api.command.user.iso.RegisterIsoCmd;
import org.apache.cloudstack.api.command.user.template.DeleteTemplateCmd;
import org.apache.cloudstack.api.command.user.template.ExtractTemplateCmd;
@ -51,29 +52,31 @@ public interface TemplateAdapter extends Adapter {
}
}
public TemplateProfile prepare(RegisterTemplateCmd cmd) throws ResourceAllocationException;
TemplateProfile prepare(RegisterTemplateCmd cmd) throws ResourceAllocationException;
public TemplateProfile prepare(GetUploadParamsForTemplateCmd cmd) throws ResourceAllocationException;
TemplateProfile prepare(GetUploadParamsForTemplateCmd cmd) throws ResourceAllocationException;
public TemplateProfile prepare(RegisterIsoCmd cmd) throws ResourceAllocationException;
TemplateProfile prepare(RegisterIsoCmd cmd) throws ResourceAllocationException;
public VMTemplateVO create(TemplateProfile profile);
TemplateProfile prepare(GetUploadParamsForIsoCmd cmd) throws ResourceAllocationException;
public List<TemplateOrVolumePostUploadCommand> createTemplateForPostUpload(TemplateProfile profile);
VMTemplateVO create(TemplateProfile profile);
public TemplateProfile prepareDelete(DeleteTemplateCmd cmd);
List<TemplateOrVolumePostUploadCommand> createTemplateForPostUpload(TemplateProfile profile);
public TemplateProfile prepareDelete(DeleteIsoCmd cmd);
TemplateProfile prepareDelete(DeleteTemplateCmd cmd);
public TemplateProfile prepareExtractTemplate(ExtractTemplateCmd cmd);
TemplateProfile prepareDelete(DeleteIsoCmd cmd);
public boolean delete(TemplateProfile profile);
TemplateProfile prepareExtractTemplate(ExtractTemplateCmd cmd);
public TemplateProfile prepare(boolean isIso, Long userId, String name, String displayText, Integer bits, Boolean passwordEnabled, Boolean requiresHVM, String url,
boolean delete(TemplateProfile profile);
TemplateProfile prepare(boolean isIso, Long userId, String name, String displayText, Integer bits, Boolean passwordEnabled, Boolean requiresHVM, String url,
Boolean isPublic, Boolean featured, Boolean isExtractable, String format, Long guestOSId, List<Long> zoneId, HypervisorType hypervisorType, String accountName,
Long domainId, String chksum, Boolean bootable, Map details, boolean directDownload) throws ResourceAllocationException;
public TemplateProfile prepare(boolean isIso, long userId, String name, String displayText, Integer bits, Boolean passwordEnabled, Boolean requiresHVM, String url,
TemplateProfile prepare(boolean isIso, long userId, String name, String displayText, Integer bits, Boolean passwordEnabled, Boolean requiresHVM, String url,
Boolean isPublic, Boolean featured, Boolean isExtractable, String format, Long guestOSId, List<Long> zoneId, HypervisorType hypervisorType, String chksum,
Boolean bootable, String templateTag, Account templateOwner, Map details, Boolean sshKeyEnabled, String imageStoreUuid, Boolean isDynamicallyScalable,
TemplateType templateType, boolean directDownload) throws ResourceAllocationException;

View File

@ -23,8 +23,13 @@ import java.util.Map;
import javax.inject.Inject;
import com.cloud.storage.upload.params.IsoUploadParams;
import com.cloud.storage.upload.params.TemplateUploadParams;
import com.cloud.storage.upload.params.UploadParams;
import org.apache.cloudstack.api.command.user.iso.GetUploadParamsForIsoCmd;
import org.apache.cloudstack.api.command.user.template.GetUploadParamsForTemplateCmd;
import org.apache.commons.collections.CollectionUtils;
import org.apache.commons.lang3.BooleanUtils;
import org.apache.log4j.Logger;
import org.apache.cloudstack.api.ApiConstants;
@ -284,35 +289,55 @@ public abstract class TemplateAdapterBase extends AdapterBase implements Templat
}
@Override
public TemplateProfile prepare(GetUploadParamsForTemplateCmd cmd) throws ResourceAllocationException {
/**
* Prepare upload parameters internal method for templates and ISOs local upload
*/
private TemplateProfile prepareUploadParamsInternal(UploadParams params) throws ResourceAllocationException {
//check if the caller can operate with the template owner
Account caller = CallContext.current().getCallingAccount();
Account owner = _accountMgr.getAccount(cmd.getEntityOwnerId());
Account owner = _accountMgr.getAccount(params.getTemplateOwnerId());
_accountMgr.checkAccess(caller, null, true, owner);
boolean isRouting = (cmd.isRoutingType() == null) ? false : cmd.isRoutingType();
List<Long> zoneList = null;
Long zoneId = cmd.getZoneId();
// ignore passed zoneId if we are using region wide image store
List<ImageStoreVO> stores = _imgStoreDao.findRegionImageStores();
if (!(stores != null && stores.size() > 0)) {
zoneList = new ArrayList<>();
zoneList.add(zoneId);
zoneList.add(params.getZoneId());
}
HypervisorType hypervisorType = HypervisorType.getType(cmd.getHypervisor());
if(hypervisorType == HypervisorType.None) {
throw new InvalidParameterValueException("Hypervisor Type: " + cmd.getHypervisor() + " is invalid. Supported Hypervisor types are "
+ EnumUtils.listValues(HypervisorType.values()).replace("None, ", ""));
if(!params.isIso() && params.getHypervisorType() == HypervisorType.None) {
throw new InvalidParameterValueException("Hypervisor Type: " + params.getHypervisorType() + " is invalid. Supported Hypervisor types are "
+ EnumUtils.listValues(HypervisorType.values()).replace("None, ", ""));
}
return prepare(false, CallContext.current().getCallingUserId(), cmd.getName(), cmd.getDisplayText(), cmd.getBits(), cmd.isPasswordEnabled(),
cmd.getRequiresHvm(), null, cmd.isPublic(), cmd.isFeatured(), cmd.isExtractable(), cmd.getFormat(), cmd.getOsTypeId(), zoneList,
hypervisorType, cmd.getChecksum(), true, cmd.getTemplateTag(), owner, cmd.getDetails(), cmd.isSshKeyEnabled(), null,
cmd.isDynamicallyScalable(), isRouting ? TemplateType.ROUTING : TemplateType.USER, false);
return prepare(params.isIso(), params.getUserId(), params.getName(), params.getDisplayText(), params.getBits(),
params.isPasswordEnabled(), params.requiresHVM(), params.getUrl(), params.isPublic(), params.isFeatured(),
params.isExtractable(), params.getFormat(), params.getGuestOSId(), zoneList,
params.getHypervisorType(), params.getChecksum(), params.isBootable(), params.getTemplateTag(), owner,
params.getDetails(), params.isSshKeyEnabled(), params.getImageStoreUuid(),
params.isDynamicallyScalable(), params.isRoutingType() ? TemplateType.ROUTING : TemplateType.USER, params.isDirectDownload());
}
@Override
public TemplateProfile prepare(GetUploadParamsForTemplateCmd cmd) throws ResourceAllocationException {
UploadParams params = new TemplateUploadParams(CallContext.current().getCallingUserId(), cmd.getName(),
cmd.getDisplayText(), cmd.getBits(), BooleanUtils.toBoolean(cmd.isPasswordEnabled()),
BooleanUtils.toBoolean(cmd.getRequiresHvm()), BooleanUtils.toBoolean(cmd.isPublic()),
BooleanUtils.toBoolean(cmd.isFeatured()), BooleanUtils.toBoolean(cmd.isExtractable()), cmd.getFormat(), cmd.getOsTypeId(),
cmd.getZoneId(), HypervisorType.getType(cmd.getHypervisor()), cmd.getChecksum(),
cmd.getTemplateTag(), cmd.getEntityOwnerId(), cmd.getDetails(), BooleanUtils.toBoolean(cmd.isSshKeyEnabled()),
BooleanUtils.toBoolean(cmd.isDynamicallyScalable()), BooleanUtils.toBoolean(cmd.isRoutingType()));
return prepareUploadParamsInternal(params);
}
@Override
public TemplateProfile prepare(GetUploadParamsForIsoCmd cmd) throws ResourceAllocationException {
UploadParams params = new IsoUploadParams(CallContext.current().getCallingUserId(), cmd.getName(),
cmd.getDisplayText(), BooleanUtils.toBoolean(cmd.isPublic()), BooleanUtils.toBoolean(cmd.isFeatured()),
BooleanUtils.toBoolean(cmd.isExtractable()), cmd.getOsTypeId(),
cmd.getZoneId(), BooleanUtils.toBoolean(cmd.isBootable()), cmd.getEntityOwnerId());
return prepareUploadParamsInternal(params);
}
@Override

View File

@ -43,6 +43,7 @@ import com.google.common.base.Joiner;
import com.google.gson.Gson;
import com.google.gson.GsonBuilder;
import org.apache.cloudstack.api.command.user.iso.GetUploadParamsForIsoCmd;
import org.apache.cloudstack.api.command.user.template.GetUploadParamsForTemplateCmd;
import org.apache.cloudstack.framework.async.AsyncCallFuture;
import org.apache.cloudstack.storage.command.TemplateOrVolumePostUploadCommand;
@ -349,11 +350,14 @@ public class TemplateManagerImpl extends ManagerBase implements TemplateManager,
}
}
@Override
@ActionEvent(eventType = EventTypes.EVENT_TEMPLATE_CREATE, eventDescription = "creating post upload template")
public GetUploadParamsResponse registerTemplateForPostUpload(GetUploadParamsForTemplateCmd cmd) throws ResourceAllocationException, MalformedURLException {
TemplateAdapter adapter = getAdapter(HypervisorType.getType(cmd.getHypervisor()));
TemplateProfile profile = adapter.prepare(cmd);
/**
* Internal register template or ISO method - post local upload
* @param adapter
* @param profile
*/
private GetUploadParamsResponse registerPostUploadInternal(TemplateAdapter adapter,
TemplateProfile profile) throws MalformedURLException {
List<TemplateOrVolumePostUploadCommand> payload = adapter.createTemplateForPostUpload(profile);
if(CollectionUtils.isNotEmpty(payload)) {
@ -403,6 +407,21 @@ public class TemplateManagerImpl extends ManagerBase implements TemplateManager,
}
}
@Override
@ActionEvent(eventType = EventTypes.EVENT_ISO_CREATE, eventDescription = "creating post upload iso")
public GetUploadParamsResponse registerIsoForPostUpload(GetUploadParamsForIsoCmd cmd) throws ResourceAllocationException, MalformedURLException {
TemplateAdapter adapter = getAdapter(HypervisorType.None);
TemplateProfile profile = adapter.prepare(cmd);
return registerPostUploadInternal(adapter, profile);
}
@Override
@ActionEvent(eventType = EventTypes.EVENT_TEMPLATE_CREATE, eventDescription = "creating post upload template")
public GetUploadParamsResponse registerTemplateForPostUpload(GetUploadParamsForTemplateCmd cmd) throws ResourceAllocationException, MalformedURLException {
TemplateAdapter adapter = getAdapter(HypervisorType.getType(cmd.getHypervisor()));
TemplateProfile profile = adapter.prepare(cmd);
return registerPostUploadInternal(adapter, profile);
}
@Override
public DataStore getImageStore(String storeUuid, Long zoneId) {
@ -558,6 +577,7 @@ public class TemplateManagerImpl extends ManagerBase implements TemplateManager,
if (vm.getIsoId() != null) {
Map<Volume, StoragePool> storageForDisks = dest.getStorageForDisks();
Long poolId = null;
TemplateInfo template;
if (MapUtils.isNotEmpty(storageForDisks)) {
for (StoragePool storagePool : storageForDisks.values()) {
if (poolId != null && storagePool.getId() != poolId) {
@ -565,8 +585,11 @@ public class TemplateManagerImpl extends ManagerBase implements TemplateManager,
}
poolId = storagePool.getId();
}
template = prepareIso(vm.getIsoId(), vm.getDataCenterId(), dest.getHost().getId(), poolId);
} else {
template = _tmplFactory.getTemplate(vm.getIsoId(), DataStoreRole.Primary, dest.getDataCenter().getId());
}
TemplateInfo template = prepareIso(vm.getIsoId(), vm.getDataCenterId(), dest.getHost().getId(), poolId);
if (template == null){
s_logger.error("Failed to prepare ISO on secondary or cache storage");
throw new CloudRuntimeException("Failed to prepare ISO on secondary or cache storage");

View File

@ -302,7 +302,7 @@ import com.cloud.vm.dao.VMInstanceDao;
import com.cloud.vm.snapshot.VMSnapshotManager;
import com.cloud.vm.snapshot.VMSnapshotVO;
import com.cloud.vm.snapshot.dao.VMSnapshotDao;
import com.google.common.base.Strings;
public class UserVmManagerImpl extends ManagerBase implements UserVmManager, VirtualMachineGuru, UserVmService, Configurable {
private static final Logger s_logger = Logger.getLogger(UserVmManagerImpl.class);
@ -1460,6 +1460,19 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir
newNetworkOfferingId, null, 0L, VirtualMachine.class.getName(), vmInstance.getUuid(), vmInstance.isDisplay());
UsageEventUtils.publishUsageEvent(EventTypes.EVENT_NETWORK_OFFERING_ASSIGN, vmInstance.getAccountId(), vmInstance.getDataCenterId(), vmInstance.getId(),
oldNicIdString, oldNetworkOfferingId, null, 0L, VirtualMachine.class.getName(), vmInstance.getUuid(), vmInstance.isDisplay());
if (vmInstance.getState() != State.Stopped) {
try {
VirtualMachineProfile vmProfile = new VirtualMachineProfileImpl(vmInstance);
User callerUser = _accountMgr.getActiveUser(CallContext.current().getCallingUserId());
ReservationContext context = new ReservationContextImpl(null, null, callerUser, caller);
DeployDestination dest = new DeployDestination(dc, null, null, null);
_networkMgr.prepare(vmProfile, dest, context);
} catch (final Exception e) {
s_logger.info("Got exception: ", e);
}
}
return _vmDao.findById(vmInstance.getId());
}
@ -4419,10 +4432,16 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir
}
}
List<NicVO> nics = _nicDao.listByVmId(vm.getId());
for (NicVO nic : nics) {
NetworkVO network = _networkDao.findById(nic.getNetworkId());
if (network.getTrafficType() == TrafficType.Guest) {
final List<NicVO> nics = _nicDao.listByVmId(vm.getId());
for (final NicVO nic : nics) {
final NetworkVO network = _networkDao.findById(nic.getNetworkId());
if (network != null && network.getTrafficType() == TrafficType.Guest) {
final String nicIp = Strings.isNullOrEmpty(nic.getIPv4Address()) ? nic.getIPv6Address() : nic.getIPv4Address();
if (!Strings.isNullOrEmpty(nicIp)) {
NicProfile nicProfile = new NicProfile(nic.getIPv4Address(), nic.getIPv6Address(), nic.getMacAddress());
nicProfile.setId(nic.getId());
_networkMgr.cleanupNicDhcpDnsEntry(network, profile, nicProfile);
}
if (nic.getBroadcastUri() != null && nic.getBroadcastUri().getScheme().equals("pvlan")) {
NicProfile nicProfile = new NicProfile(nic, network, nic.getBroadcastUri(), nic.getIsolationUri(), 0, false, "pvlan-nic");
setupVmForPvlan(false, vm.getHostId(), nicProfile);

View File

@ -41,6 +41,10 @@ import com.cloud.utils.exception.CloudRuntimeException;
import java.net.URI;
import java.net.URISyntaxException;
import java.security.cert.Certificate;
import java.security.cert.CertificateException;
import java.security.cert.CertificateExpiredException;
import java.security.cert.CertificateNotYetValidException;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
@ -50,6 +54,7 @@ import java.util.Collections;
import java.util.stream.Collectors;
import javax.inject.Inject;
import com.cloud.utils.security.CertificateHelper;
import org.apache.cloudstack.agent.directdownload.DirectDownloadCommand;
import org.apache.cloudstack.agent.directdownload.DirectDownloadAnswer;
import org.apache.cloudstack.agent.directdownload.DirectDownloadCommand.DownloadProtocol;
@ -57,7 +62,7 @@ import org.apache.cloudstack.agent.directdownload.HttpDirectDownloadCommand;
import org.apache.cloudstack.agent.directdownload.MetalinkDirectDownloadCommand;
import org.apache.cloudstack.agent.directdownload.NfsDirectDownloadCommand;
import org.apache.cloudstack.agent.directdownload.HttpsDirectDownloadCommand;
import org.apache.cloudstack.agent.directdownload.SetupDirectDownloadCertificate;
import org.apache.cloudstack.agent.directdownload.SetupDirectDownloadCertificateCommand;
import org.apache.cloudstack.context.CallContext;
import org.apache.cloudstack.engine.subsystem.api.storage.DataStore;
import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager;
@ -69,6 +74,7 @@ import org.apache.cloudstack.storage.to.PrimaryDataStoreTO;
import org.apache.commons.collections.CollectionUtils;
import org.apache.commons.collections.MapUtils;
import org.apache.log4j.Logger;
import sun.security.x509.X509CertImpl;
public class DirectDownloadManagerImpl extends ManagerBase implements DirectDownloadManager {
@ -79,17 +85,17 @@ public class DirectDownloadManagerImpl extends ManagerBase implements DirectDown
protected final static String LINE_SEPARATOR = "\n";
@Inject
VMTemplateDao vmTemplateDao;
private VMTemplateDao vmTemplateDao;
@Inject
PrimaryDataStoreDao primaryDataStoreDao;
private PrimaryDataStoreDao primaryDataStoreDao;
@Inject
HostDao hostDao;
private HostDao hostDao;
@Inject
AgentManager agentManager;
private AgentManager agentManager;
@Inject
VMTemplatePoolDao vmTemplatePoolDao;
private VMTemplatePoolDao vmTemplatePoolDao;
@Inject
DataStoreManager dataStoreManager;
private DataStoreManager dataStoreManager;
@Override
public List<Class<?>> getCommands() {
@ -313,17 +319,76 @@ public class DirectDownloadManagerImpl extends ManagerBase implements DirectDown
.collect(Collectors.toList());
}
@Override
public boolean uploadCertificateToHosts(String certificateCer, String certificateName, String hypervisor) {
HypervisorType hypervisorType = HypervisorType.getType(hypervisor);
List<HostVO> hosts = getRunningHostsToUploadCertificate(hypervisorType);
if (CollectionUtils.isNotEmpty(hosts)) {
for (HostVO host : hosts) {
if (!uploadCertificate(certificateCer, certificateName, host.getId())) {
throw new CloudRuntimeException("Uploading certificate " + certificateName + " failed on host: " + host.getId());
}
/**
* Return pretified PEM certificate
*/
protected String getPretifiedCertificate(String certificateCer) {
String cert = certificateCer.replaceAll("(.{64})", "$1\n");
if (!cert.startsWith(BEGIN_CERT) && !cert.endsWith(END_CERT)) {
cert = BEGIN_CERT + LINE_SEPARATOR + cert + LINE_SEPARATOR + END_CERT;
}
return cert;
}
/**
* Generate and return certificate from the string
* @throws CloudRuntimeException if the certificate is not well formed
*/
private Certificate getCertificateFromString(String certificatePem) {
try {
return CertificateHelper.buildCertificate(certificatePem);
} catch (CertificateException e) {
e.printStackTrace();
throw new CloudRuntimeException("Cannot parse the certificate provided, please provide a PEM certificate. Error: " + e.getMessage());
}
}
/**
* Perform sanity of string parsed certificate
*/
protected void certificateSanity(String certificatePem) {
Certificate certificate = getCertificateFromString(certificatePem);
if (certificate instanceof X509CertImpl) {
X509CertImpl x509Cert = (X509CertImpl) certificate;
try {
x509Cert.checkValidity();
} catch (CertificateExpiredException | CertificateNotYetValidException e) {
String msg = "Certificate is invalid. Please provide a valid certificate. Error: " + e.getMessage();
s_logger.error(msg);
throw new CloudRuntimeException(msg);
}
if (x509Cert.getSubjectDN() != null) {
s_logger.debug("Valid certificate for domain name: " + x509Cert.getSubjectDN().getName());
}
}
}
@Override
public boolean uploadCertificateToHosts(String certificateCer, String alias, String hypervisor) {
if (alias != null && (alias.equalsIgnoreCase("cloud") || alias.startsWith("cloudca"))) {
throw new CloudRuntimeException("Please provide a different alias name for the certificate");
}
HypervisorType hypervisorType = HypervisorType.getType(hypervisor);
List<HostVO> hosts = getRunningHostsToUploadCertificate(hypervisorType);
String certificatePem = getPretifiedCertificate(certificateCer);
certificateSanity(certificatePem);
s_logger.info("Attempting to upload certificate: " + alias + " to " + hosts.size() + " hosts");
int hostCount = 0;
if (CollectionUtils.isNotEmpty(hosts)) {
for (HostVO host : hosts) {
if (!uploadCertificate(certificatePem, alias, host.getId())) {
String msg = "Could not upload certificate " + alias + " on host: " + host.getName() + " (" + host.getUuid() + ")";
s_logger.error(msg);
throw new CloudRuntimeException(msg);
}
hostCount++;
}
}
s_logger.info("Certificate was successfully uploaded to " + hostCount + " hosts");
return true;
}
@ -331,14 +396,19 @@ public class DirectDownloadManagerImpl extends ManagerBase implements DirectDown
* Upload and import certificate to hostId on keystore
*/
protected boolean uploadCertificate(String certificate, String certificateName, long hostId) {
String cert = certificate.replaceAll("(.{64})", "$1\n");
final String prettified_cert = BEGIN_CERT + LINE_SEPARATOR + cert + LINE_SEPARATOR + END_CERT;
SetupDirectDownloadCertificate cmd = new SetupDirectDownloadCertificate(prettified_cert, certificateName);
s_logger.debug("Uploading certificate: " + certificateName + " to host " + hostId);
SetupDirectDownloadCertificateCommand cmd = new SetupDirectDownloadCertificateCommand(certificate, certificateName);
Answer answer = agentManager.easySend(hostId, cmd);
if (answer == null || !answer.getResult()) {
String msg = "Certificate " + certificateName + " could not be added to host " + hostId;
if (answer != null) {
msg += " due to: " + answer.getDetails();
}
s_logger.error(msg);
return false;
}
s_logger.info("Certificate " + certificateName + " successfully uploaded to host: " + hostId);
return true;
}
}

View File

@ -172,6 +172,21 @@ public class AdvancedNetworkTopology extends BasicNetworkTopology {
return applyRules(network, router, typeString, isPodLevelException, podId, failWhenDisconnect, new RuleApplierWrapper<RuleApplier>(dhcpRules));
}
@Override
public boolean removeDhcpEntry(Network network, NicProfile nic, VirtualMachineProfile profile, VirtualRouter virtualRouter) throws ResourceUnavailableException {
s_logger.debug("REMOVE VPC DHCP ENTRY RULES");
final String typeString = "dhcp entry";
final Long podId = null;
final boolean isPodLevelException = false;
final boolean failWhenDisconnect = false;
final DhcpEntryRules dhcpRules = new DhcpEntryRules(network, nic, profile, null);
dhcpRules.setRemove(true);
return applyRules(network, virtualRouter, typeString, isPodLevelException, podId, failWhenDisconnect, new RuleApplierWrapper<RuleApplier>(dhcpRules));
}
@Override
public boolean associatePublicIP(final Network network, final List<? extends PublicIpAddress> ipAddresses, final VirtualRouter router)
throws ResourceUnavailableException {

View File

@ -80,8 +80,9 @@ public class AdvancedNetworkVisitor extends BasicNetworkVisitor {
final Commands commands = new Commands(Command.OnError.Stop);
final NicVO nicVo = dhcp.getNicVo();
final UserVmVO userVM = dhcp.getUserVM();
final boolean remove = dhcp.isRemove();
_commandSetupHelper.createDhcpEntryCommand(router, userVM, nicVo, commands);
_commandSetupHelper.createDhcpEntryCommand(router, userVM, nicVo, remove, commands);
return _networkGeneralHelper.sendCommandsToRouter(router, commands);
}

View File

@ -442,4 +442,25 @@ public class BasicNetworkTopology implements NetworkTopology {
}
return result;
}
@Override
public boolean removeDhcpEntry(Network network, NicProfile nic, VirtualMachineProfile profile, VirtualRouter virtualRouter) throws ResourceUnavailableException {
s_logger.debug("REMOVING DHCP ENTRY RULE");
final String typeString = "dhcp entry";
final Long podId = profile.getVirtualMachine().getPodIdToDeployIn();
boolean isPodLevelException = false;
if (podId != null && profile.getVirtualMachine().getType() == VirtualMachine.Type.User && network.getTrafficType() == TrafficType.Guest
&& network.getGuestType() == Network.GuestType.Shared) {
isPodLevelException = true;
}
final boolean failWhenDisconnect = false;
final DhcpEntryRules dhcpRules = new DhcpEntryRules(network, nic, profile, null);
dhcpRules.setRemove(true);
return applyRules(network, virtualRouter, typeString, isPodLevelException, podId, failWhenDisconnect, new RuleApplierWrapper<RuleApplier>(dhcpRules));
}
}

View File

@ -196,9 +196,10 @@ public class BasicNetworkVisitor extends NetworkTopologyVisitor {
final NicVO nicVo = dhcp.getNicVo();
final UserVmVO userVM = dhcp.getUserVM();
final DeployDestination destination = dhcp.getDestination();
final boolean remove = dhcp.isRemove();
if (router.getPodIdToDeployIn().longValue() == destination.getPod().getId()) {
_commandSetupHelper.createDhcpEntryCommand(router, userVM, nicVo, commands);
_commandSetupHelper.createDhcpEntryCommand(router, userVM, nicVo, remove, commands);
return _networkGeneralHelper.sendCommandsToRouter(router, commands);
}

View File

@ -87,4 +87,6 @@ public interface NetworkTopology {
boolean applyRules(final Network network, final VirtualRouter router, final String typeString, final boolean isPodLevelException, final Long podId,
final boolean failWhenDisconnect, RuleApplierWrapper<RuleApplier> ruleApplier) throws ResourceUnavailableException;
boolean removeDhcpEntry(final Network network, final NicProfile nic, final VirtualMachineProfile profile, final VirtualRouter virtualRouter) throws ResourceUnavailableException;
}

View File

@ -49,8 +49,6 @@ import org.apache.cloudstack.framework.jobs.AsyncJobManager;
import org.apache.cloudstack.framework.jobs.dao.AsyncJobJoinMapDao;
import org.apache.cloudstack.framework.jobs.impl.AsyncJobVO;
import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao;
import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailVO;
import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailsDao;
import org.apache.cloudstack.storage.datastore.db.StoragePoolVO;
import org.apache.cloudstack.storage.datastore.db.VolumeDataStoreDao;
import org.apache.cloudstack.storage.datastore.db.VolumeDataStoreVO;
@ -79,6 +77,7 @@ import com.cloud.hypervisor.Hypervisor.HypervisorType;
import com.cloud.org.Grouping;
import com.cloud.serializer.GsonHelper;
import com.cloud.storage.Volume.Type;
import com.cloud.storage.dao.StoragePoolTagsDao;
import com.cloud.storage.dao.VolumeDao;
import com.cloud.storage.snapshot.SnapshotManager;
import com.cloud.user.Account;
@ -146,7 +145,7 @@ public class VolumeApiServiceImplTest {
@Mock
private HostDao _hostDao;
@Mock
private StoragePoolDetailsDao storagePoolDetailsDao;
private StoragePoolTagsDao storagePoolTagsDao;
private DetachVolumeCmd detachCmd = new DetachVolumeCmd();
private Class<?> _detachCmdClass = detachCmd.getClass();
@ -516,26 +515,25 @@ public class VolumeApiServiceImplTest {
@Test
public void getStoragePoolTagsTestStorageWithoutTags() {
Mockito.when(storagePoolDetailsDao.listDetails(storagePoolMockId)).thenReturn(new ArrayList<>());
Mockito.when(storagePoolTagsDao.getStoragePoolTags(storagePoolMockId)).thenReturn(new ArrayList<>());
String returnedStoragePoolTags = volumeApiServiceImpl.getStoragePoolTags(storagePoolMock);
Assert.assertNull(returnedStoragePoolTags);
}
@Test
public void getStoragePoolTagsTestStorageWithTags() {
ArrayList<StoragePoolDetailVO> tags = new ArrayList<>();
StoragePoolDetailVO tag1 = new StoragePoolDetailVO(1l, "tag1", "value", true);
StoragePoolDetailVO tag2 = new StoragePoolDetailVO(1l, "tag2", "value", true);
StoragePoolDetailVO tag3 = new StoragePoolDetailVO(1l, "tag3", "value", true);
ArrayList<String> tags = new ArrayList<>();
String tag1 = "tag1";
String tag2 = "tag2";
String tag3 = "tag3";
tags.add(tag1);
tags.add(tag2);
tags.add(tag3);
Mockito.when(storagePoolDetailsDao.listDetails(storagePoolMockId)).thenReturn(tags);
Mockito.when(storagePoolTagsDao.getStoragePoolTags(storagePoolMockId)).thenReturn(tags);
String returnedStoragePoolTags = volumeApiServiceImpl.getStoragePoolTags(storagePoolMock);

View File

@ -925,6 +925,10 @@ public class MockNetworkManagerImpl extends ManagerBase implements NetworkOrches
return false;
}
@Override
public void cleanupNicDhcpDnsEntry(Network network, VirtualMachineProfile vmProfile, NicProfile nicProfile) {
}
@Override
public void finalizeUpdateInSequence(Network network, boolean success) {
return;

View File

@ -20,6 +20,7 @@ package org.apache.cloudstack.direct.download;
import com.cloud.agent.AgentManager;
import com.cloud.host.dao.HostDao;
import com.cloud.utils.exception.CloudRuntimeException;
import org.apache.cloudstack.agent.directdownload.DirectDownloadCommand.DownloadProtocol;
import org.junit.Assert;
import org.junit.Before;
@ -50,6 +51,26 @@ public class DirectDownloadManagerImplTest {
private static final String HTTP_HEADER_2 = "Accept-Encoding";
private static final String HTTP_VALUE_2 = "gzip";
private static final String VALID_CERTIFICATE =
"MIIDSzCCAjMCFDa0LoW+1O8/cEwCI0nIqfl8c1TLMA0GCSqGSIb3DQEBCwUAMGEx\n" +
"CzAJBgNVBAYTAkNTMQswCQYDVQQIDAJDUzELMAkGA1UEBwwCQ1MxCzAJBgNVBAoM\n" +
"AkNTMQswCQYDVQQLDAJDUzELMAkGA1UEAwwCQ1MxETAPBgkqhkiG9w0BCQEWAkNT\n" +
"MCAXDTE5MDQyNDE1NTIzNVoYDzIwOTgwOTE1MTU1MjM1WjBhMQswCQYDVQQGEwJD\n" +
"UzELMAkGA1UECAwCQ1MxCzAJBgNVBAcMAkNTMQswCQYDVQQKDAJDUzELMAkGA1UE\n" +
"CwwCQ1MxCzAJBgNVBAMMAkNTMREwDwYJKoZIhvcNAQkBFgJDUzCCASIwDQYJKoZI\n" +
"hvcNAQEBBQADggEPADCCAQoCggEBAKstLRcMGCo6+2hojRMjEuuimnWp27yfYhDU\n" +
"w/Cj03MJe/KCOhwsDqX82QNIr/bNtLdFf2ZJEUQd08sLLlHeUy9y5aOcxt9SGx2j\n" +
"xolqO4MBL7BW3dklO0IvjaEfBeFP6udz8ajeVur/iPPZb2Edd0zlXuHvDozfQisv\n" +
"bpuJImnTUVx0ReCXP075PBGvlqQXW2uEht+E/w3H8/2rra3JFV6J5xc77KyQSq2t\n" +
"1+2ZU7PJiy/rppXf5rjTvNm6ydfag8/av7lcgs2ntdkK4koAmkmROhAwNonlL7cD\n" +
"xIC83cKOqOFiQXSwr1IgoLf7zBNafKoTlSb/ev6Zt18BXEMLGpkCAwEAATANBgkq\n" +
"hkiG9w0BAQsFAAOCAQEAVS5uWZRz2m3yx7EUQm47RTMW5WMXU4pI8D+N5WZ9xubY\n" +
"OqtU3r2OAYpfL/QO8iT7jcqNYGoDqe8ZjEaNvfxiTG8cOI6TSXhKBG6hjSaSFQSH\n" +
"OZ5mfstM36y/3ENFh6JCJ2ao1rgWSbfDRyAaHuvt6aCkaV6zRq2OMEgoJqZSgwxL\n" +
"QO230xa2hYgKXOePMVZyHFA2oKJtSOc3jCke9Y8zDUwm0McGdMRBD8tVB0rcaOqQ\n" +
"0PlDLjB9sQuhhLu8vjdgbznmPbUmMG7JN0yhT1eJbIX5ImXyh0DoTwiaGcYwW6Sq\n" +
"YodjXACsC37xaQXAPYBiaAs4iI80TJSx1DVFO1LV0g==";
@Before
public void setUp() {
}
@ -103,4 +124,16 @@ public class DirectDownloadManagerImplTest {
Map<String, String> headers = manager.getHeadersFromDetails(details);
Assert.assertTrue(headers.isEmpty());
}
@Test
public void testCertificateSanityValidCertificate() {
String pretifiedCertificate = manager.getPretifiedCertificate(VALID_CERTIFICATE);
manager.certificateSanity(pretifiedCertificate);
}
@Test(expected = CloudRuntimeException.class)
public void testCertificateSanityInvalidCertificate() {
String pretifiedCertificate = manager.getPretifiedCertificate(VALID_CERTIFICATE + "xxx");
manager.certificateSanity(pretifiedCertificate);
}
}

View File

@ -1431,7 +1431,9 @@ AjaxViewer.prototype = {
if(e.shiftLeft)
modifiers |= AjaxViewer.LEFT_SHIFT_MASK;
if(e.metaKey)
// Don't pass meta key modifier filter if control key is pressed.
// For more details see https://github.com/apache/cloudstack/issues/3229
if(e.metaKey && !e.ctrlKey)
modifiers |= AjaxViewer.META_KEY_MASK;
return modifiers;

View File

@ -858,7 +858,7 @@ class CsForwardingRules(CsDataBag):
rule['protocol'],
rule['protocol'],
public_fwports,
hex(int(public_fwinterface[3:]))
hex(100 + int(public_fwinterface[3:]))
)
fw6 = "-A PREROUTING -d %s/32 -i %s -p %s -m %s --dport %s -m state --state NEW -j CONNMARK --save-mark --nfmask 0xffffffff --ctmask 0xffffffff" % \
(
@ -922,12 +922,12 @@ class CsForwardingRules(CsDataBag):
if device is None:
raise Exception("Ip address %s has no device in the ips databag" % rule["public_ip"])
self.fw.append(["mangle", "",
"-I PREROUTING -s %s/32 -m state --state NEW -j CONNMARK --save-mark --nfmask 0xffffffff --ctmask 0xffffffff" %
self.fw.append(["mangle", "front",
"-A PREROUTING -s %s/32 -m state --state NEW -j CONNMARK --save-mark --nfmask 0xffffffff --ctmask 0xffffffff" %
rule["internal_ip"]])
self.fw.append(["mangle", "",
"-I PREROUTING -s %s/32 -m state --state NEW -j MARK --set-xmark %s/0xffffffff" %
(rule["internal_ip"], hex(int(device[len("eth"):])))])
self.fw.append(["mangle", "front",
"-A PREROUTING -s %s/32 -m state --state NEW -j MARK --set-xmark %s/0xffffffff" %
(rule["internal_ip"], hex(100 + int(device[len("eth"):])))])
self.fw.append(["nat", "front",
"-A PREROUTING -d %s/32 -j DNAT --to-destination %s" % (rule["public_ip"], rule["internal_ip"])])
self.fw.append(["nat", "front",

View File

@ -258,7 +258,7 @@ class CsIP:
def __init__(self, dev, config):
self.dev = dev
self.dnum = hex(int(dev[3:]))
self.dnum = hex(100 + int(dev[3:]))
self.iplist = {}
self.address = {}
self.list()
@ -518,12 +518,11 @@ class CsIP:
if method == "add":
if not self.config.is_vpc():
# treat the first IP on a interface as special case to set up the routing rules
if self.get_type() in ["public"] and (len(self.iplist) == 1):
CsHelper.execute("sudo ip route add throw " + self.config.address().dbag['eth0'][0]['network'] + " table " + tableName + " proto static")
CsHelper.execute("sudo ip route add throw " + self.config.address().dbag['eth1'][0]['network'] + " table " + tableName + " proto static")
if self.get_type() in ["public"]:
route.set_route("table %s throw %s proto static" % (tableName, self.config.address().dbag['eth0'][0]['network']))
route.set_route("table %s throw %s proto static" % (tableName, self.config.address().dbag['eth1'][0]['network']))
# add 'defaul via gateway' rule in the device specific routing table
# add 'default via gateway' rule in the device specific routing table
if "gateway" in self.address and self.address["gateway"] and self.address["gateway"] != "None":
route.add_route(self.dev, self.address["gateway"])
if "network" in self.address and self.address["network"]:

View File

@ -16,6 +16,7 @@
# under the License.
import CsHelper
import logging
import os
from netaddr import *
from random import randint
from CsGuestNetwork import CsGuestNetwork
@ -46,8 +47,8 @@ class CsDhcp(CsDataBag):
for item in self.dbag:
if item == "id":
continue
self.add(self.dbag[item])
self.write_hosts()
if not self.dbag[item]['remove']:
self.add(self.dbag[item])
self.configure_server()
@ -64,6 +65,8 @@ class CsDhcp(CsDataBag):
if restart_dnsmasq:
self.delete_leases()
self.write_hosts()
if not self.cl.is_redundant() or self.cl.is_master():
if restart_dnsmasq:
CsHelper.service("dnsmasq", "restart")
@ -114,10 +117,26 @@ class CsDhcp(CsDataBag):
idx += 1
def delete_leases(self):
macs_dhcphosts = []
try:
open(LEASES, 'w').close()
except IOError:
return
logging.info("Attempting to delete entries from dnsmasq.leases file for VMs which are not on dhcphosts file")
for host in open(DHCP_HOSTS):
macs_dhcphosts.append(host.split(',')[0])
removed = 0
for leaseline in open(LEASES):
lease = leaseline.split(' ')
mac = lease[1]
ip = lease[2]
if mac not in macs_dhcphosts:
cmd = "dhcp_release $(ip route get %s | grep eth | head -1 | awk '{print $3}') %s %s" % (ip, ip, mac)
logging.info(cmd)
CsHelper.execute(cmd)
removed = removed + 1
self.del_host(ip)
logging.info("Deleted %s entries from dnsmasq.leases file" % str(removed))
except Exception as e:
logging.error("Caught error while trying to delete entries from dnsmasq.leases file: %s" % e)
def preseed(self):
self.add_host("127.0.0.1", "localhost %s" % CsHelper.get_hostname())
@ -170,3 +189,7 @@ class CsDhcp(CsDataBag):
def add_host(self, ip, hosts):
self.hosts[ip] = hosts
def del_host(self, ip):
if ip in self.hosts:
self.hosts.pop(ip)

View File

@ -351,6 +351,33 @@ class CsRedundant(object):
interfaces = [interface for interface in self.address.get_interfaces() if interface.is_public()]
CsHelper.reconfigure_interfaces(self.cl, interfaces)
public_devices = list(set([interface.get_device() for interface in interfaces]))
if len(public_devices) > 1:
# Handle specific failures when multiple public interfaces
public_devices.sort()
# Ensure the default route is added, or outgoing traffic from VMs with static NAT on
# the subsequent interfaces will go from he wrong IP
route = CsRoute()
dev = ''
for interface in interfaces:
if dev == interface.get_device():
continue
dev = interface.get_device()
gateway = interface.get_gateway()
if gateway:
route.add_route(dev, gateway)
# The first public interface has a static MAC address between VRs. Subsequent ones don't,
# so an ARP announcement is needed on failover
for device in public_devices[1:]:
logging.info("Sending garp messages for IPs on %s" % device)
for interface in interfaces:
if interface.get_device() == device:
CsHelper.execute("arping -I %s -U %s -c 1" % (device, interface.get_ip()))
logging.info("Router switched to master mode")
def _collect_ignore_ips(self):

View File

@ -15,6 +15,7 @@
# specific language governing permissions and limitations
# under the License.
import logging
from netaddr import *
@ -26,16 +27,14 @@ def merge(dbag, data):
del(dbag[data['ipv4_address']])
else:
remove_keys = set()
for key, entry in dbag.iteritems():
if key != 'id' and entry['host_name'] == data['host_name']:
remove_keys.add(key)
break
for key, entry in dbag.iteritems():
if key != 'id' and entry['mac_address'] == data['mac_address']:
remove_keys.add(key)
break
if data['remove'] and key not in remove_keys:
remove_keys.add(key)
for remove_key in remove_keys:
del(dbag[remove_key])

View File

@ -0,0 +1,227 @@
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
""" Test for Direct Downloads of Templates and ISOs
"""
# Import Local Modules
from marvin.cloudstackTestCase import cloudstackTestCase
from marvin.lib.utils import (cleanup_resources)
from marvin.lib.base import (ServiceOffering,
NetworkOffering,
Network,
Template,
VirtualMachine)
from marvin.lib.common import (get_pod,
get_zone)
from nose.plugins.attrib import attr
from marvin.cloudstackAPI import uploadTemplateDirectDownloadCertificate
from marvin.lib.decoratorGenerators import skipTestIf
class TestUploadDirectDownloadCertificates(cloudstackTestCase):
@classmethod
def setUpClass(cls):
cls.testClient = super(TestUploadDirectDownloadCertificates, cls).getClsTestClient()
cls.apiclient = cls.testClient.getApiClient()
cls.hypervisor = cls.testClient.getHypervisorInfo()
cls.dbclient = cls.testClient.getDbConnection()
cls.zone = get_zone(cls.apiclient, cls.testClient.getZoneForTests())
cls.pod = get_pod(cls.apiclient, cls.zone.id)
cls.services = cls.testClient.getParsedTestDataConfig()
cls._cleanup = []
cls.hypervisorNotSupported = False
if cls.hypervisor.lower() not in ['kvm', 'lxc']:
cls.hypervisorNotSupported = True
if not cls.hypervisorNotSupported:
cls.certificates = {
"expired": "MIIDSTCCAjECFDi8s70TWFhwVN9cj67RJoAF99c8MA0GCSqGSIb3DQEBCwUAMGExCzAJBgNVBAYTAkNTMQswCQYDVQQIDAJDUzELMAkGA1UEBwwCQ1MxCzAJBgNVBAoMAkNTMQswCQYDVQQLDAJDUzELMAkGA1UEAwwCQ1MxETAPBgkqhkiG9w0BCQEWAkNTMB4XDTE5MDQyNDE1NTQxM1oXDTE5MDQyMjE1NTQxM1owYTELMAkGA1UEBhMCQ1MxCzAJBgNVBAgMAkNTMQswCQYDVQQHDAJDUzELMAkGA1UECgwCQ1MxCzAJBgNVBAsMAkNTMQswCQYDVQQDDAJDUzERMA8GCSqGSIb3DQEJARYCQ1MwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCrLS0XDBgqOvtoaI0TIxLropp1qdu8n2IQ1MPwo9NzCXvygjocLA6l/NkDSK/2zbS3RX9mSRFEHdPLCy5R3lMvcuWjnMbfUhsdo8aJajuDAS+wVt3ZJTtCL42hHwXhT+rnc/Go3lbq/4jz2W9hHXdM5V7h7w6M30IrL26biSJp01FcdEXglz9O+TwRr5akF1trhIbfhP8Nx/P9q62tyRVeiecXO+yskEqtrdftmVOzyYsv66aV3+a407zZusnX2oPP2r+5XILNp7XZCuJKAJpJkToQMDaJ5S+3A8SAvN3CjqjhYkF0sK9SIKC3+8wTWnyqE5Um/3r+mbdfAVxDCxqZAgMBAAEwDQYJKoZIhvcNAQELBQADggEBAG/R9sJ2pFbu35MliIJIhWkwP7FeP/7gYCNvOXFt6vVGXmcOwuw9WGBxsmsGESQRB4+NnJFjyGQ1Ck+ps5XRRMizyvq6bCQxVuC5M+vYS4J0q8YoL0RJ20pN9iwTsosZjSEKmfUlVgsufqCG2nyusV71LSaQU6f/bylJcJkKwGUhThExh+PVLZ66H5cF4/SzuK6WzWnj5p6+YX8TP+qPUkXN1mapgVKfVMo6mqLsH+eLKH+zqdy5ZZ5znNSbJFgHufYbEFlutTaxHEvKNMEgMCFkFGiyPwRuD6oaPnZFquJLh/mBZOLogpxVD5v20AcUTANtbXSlPaqOnEQFcbiVCb8=",
"invalid": "XXXXXXXXXXXXXXXXXXXXXXXXXXXX",
"valid": "MIIDSzCCAjMCFDa0LoW+1O8/cEwCI0nIqfl8c1TLMA0GCSqGSIb3DQEBCwUAMGExCzAJBgNVBAYTAkNTMQswCQYDVQQIDAJDUzELMAkGA1UEBwwCQ1MxCzAJBgNVBAoMAkNTMQswCQYDVQQLDAJDUzELMAkGA1UEAwwCQ1MxETAPBgkqhkiG9w0BCQEWAkNTMCAXDTE5MDQyNDE1NTIzNVoYDzIwOTgwOTE1MTU1MjM1WjBhMQswCQYDVQQGEwJDUzELMAkGA1UECAwCQ1MxCzAJBgNVBAcMAkNTMQswCQYDVQQKDAJDUzELMAkGA1UECwwCQ1MxCzAJBgNVBAMMAkNTMREwDwYJKoZIhvcNAQkBFgJDUzCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAKstLRcMGCo6+2hojRMjEuuimnWp27yfYhDUw/Cj03MJe/KCOhwsDqX82QNIr/bNtLdFf2ZJEUQd08sLLlHeUy9y5aOcxt9SGx2jxolqO4MBL7BW3dklO0IvjaEfBeFP6udz8ajeVur/iPPZb2Edd0zlXuHvDozfQisvbpuJImnTUVx0ReCXP075PBGvlqQXW2uEht+E/w3H8/2rra3JFV6J5xc77KyQSq2t1+2ZU7PJiy/rppXf5rjTvNm6ydfag8/av7lcgs2ntdkK4koAmkmROhAwNonlL7cDxIC83cKOqOFiQXSwr1IgoLf7zBNafKoTlSb/ev6Zt18BXEMLGpkCAwEAATANBgkqhkiG9w0BAQsFAAOCAQEAVS5uWZRz2m3yx7EUQm47RTMW5WMXU4pI8D+N5WZ9xubYOqtU3r2OAYpfL/QO8iT7jcqNYGoDqe8ZjEaNvfxiTG8cOI6TSXhKBG6hjSaSFQSHOZ5mfstM36y/3ENFh6JCJ2ao1rgWSbfDRyAaHuvt6aCkaV6zRq2OMEgoJqZSgwxLQO230xa2hYgKXOePMVZyHFA2oKJtSOc3jCke9Y8zDUwm0McGdMRBD8tVB0rcaOqQ0PlDLjB9sQuhhLu8vjdgbznmPbUmMG7JN0yhT1eJbIX5ImXyh0DoTwiaGcYwW6SqYodjXACsC37xaQXAPYBiaAs4iI80TJSx1DVFO1LV0g=="
}
return
@classmethod
def tearDownClass(cls):
try:
cleanup_resources(cls.apiclient, cls._cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
def setUp(self):
self.apiclient = self.testClient.getApiClient()
self.dbclient = self.testClient.getDbConnection()
self.cleanup = []
return
def tearDown(self):
try:
cleanup_resources(self.apiclient, self.cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
@skipTestIf("hypervisorNotSupported")
@attr(tags=["advanced", "basic", "eip", "advancedns", "sg"], required_hardware="false")
def test_01_sanity_check_on_certificates(self):
"""Test Verify certificates before uploading to KVM hosts
"""
# Validate the following
# 1. Invalid certificates cannot be uploaded to hosts for direct downloads
# 2. Expired certificates cannot be uploaded to hosts for direct downloads
cmd = uploadTemplateDirectDownloadCertificate.uploadTemplateDirectDownloadCertificateCmd()
cmd.hypervisor = self.hypervisor
cmd.name = "marvin-test-verify-certs"
cmd.certificate = self.certificates["invalid"]
invalid_cert_uploadFails = False
expired_cert_upload_fails = False
try:
self.apiclient.uploadTemplateDirectDownloadCertificate(cmd)
self.fail("Invalid certificate must not be uploaded")
except Exception as e:
invalid_cert_uploadFails = True
cmd.certificate = self.certificates["expired"]
try:
self.apiclient.uploadTemplateDirectDownloadCertificate(cmd)
self.fail("Expired certificate must not be uploaded")
except Exception as e:
expired_cert_upload_fails = True
self.assertTrue(invalid_cert_uploadFails and expired_cert_upload_fails,
"Invalid or expired certificates must not be uploaded")
return
@skipTestIf("hypervisorNotSupported")
@attr(tags=["advanced", "basic", "eip", "advancedns", "sg"], required_hardware="false")
def test_02_upload_direct_download_certificates(self):
"""Test Upload certificates to KVM hosts for direct download
"""
# Validate the following
# 1. Valid certificates are uploaded to hosts
cmd = uploadTemplateDirectDownloadCertificate.uploadTemplateDirectDownloadCertificateCmd()
cmd.hypervisor = self.hypervisor
cmd.name = "marvin-test-verify-certs"
cmd.certificate = self.certificates["valid"]
try:
self.apiclient.uploadTemplateDirectDownloadCertificate(cmd)
except Exception as e:
self.fail("Valid certificate must be uploaded")
return
class TestDirectDownloadTemplates(cloudstackTestCase):
@classmethod
def setUpClass(cls):
cls.testClient = super(TestDirectDownloadTemplates, cls).getClsTestClient()
cls.apiclient = cls.testClient.getApiClient()
cls.hypervisor = cls.testClient.getHypervisorInfo()
cls.dbclient = cls.testClient.getDbConnection()
cls.zone = get_zone(cls.apiclient, cls.testClient.getZoneForTests())
cls.pod = get_pod(cls.apiclient, cls.zone.id)
cls.services = cls.testClient.getParsedTestDataConfig()
cls._cleanup = []
cls.hypervisorNotSupported = False
if cls.hypervisor.lower() not in ['kvm', 'lxc']:
cls.hypervisorNotSupported = True
if not cls.hypervisorNotSupported:
cls.services["test_templates"]["kvm"]["directdownload"] = "true"
cls.template = Template.register(cls.apiclient, cls.services["test_templates"]["kvm"],
zoneid=cls.zone.id, hypervisor=cls.hypervisor)
cls._cleanup.append(cls.template)
cls.services["virtual_machine"]["zoneid"] = cls.zone.id
cls.services["virtual_machine"]["template"] = cls.template.id
cls.services["virtual_machine"]["hypervisor"] = cls.hypervisor
cls.service_offering = ServiceOffering.create(
cls.apiclient,
cls.services["service_offerings"]["tiny"]
)
cls._cleanup.append(cls.service_offering)
cls.network_offering = NetworkOffering.create(
cls.apiclient,
cls.services["l2-network_offering"],
)
cls.network_offering.update(cls.apiclient, state='Enabled')
cls.services["network"]["networkoffering"] = cls.network_offering.id
cls.l2_network = Network.create(
cls.apiclient,
cls.services["l2-network"],
zoneid=cls.zone.id,
networkofferingid=cls.network_offering.id
)
cls._cleanup.append(cls.l2_network)
cls._cleanup.append(cls.network_offering)
return
@classmethod
def tearDownClass(cls):
try:
cleanup_resources(cls.apiclient, cls._cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
def setUp(self):
self.apiclient = self.testClient.getApiClient()
self.dbclient = self.testClient.getDbConnection()
self.cleanup = []
return
def tearDown(self):
try:
cleanup_resources(self.apiclient, self.cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
@skipTestIf("hypervisorNotSupported")
@attr(tags=["advanced", "basic", "eip", "advancedns", "sg"], required_hardware="false")
def test_01_deploy_vm_from_direct_download_template(self):
"""Test Deploy VM from direct download template
"""
# Validate the following
# 1. Register direct download template
# 2. Deploy VM from direct download template
vm = VirtualMachine.create(
self.apiclient,
self.services["virtual_machine"],
serviceofferingid=self.service_offering.id,
networkids=self.l2_network.id
)
self.assertEqual(
vm.state,
"Running",
"Check VM deployed from direct download template is running"
)
self.cleanup.append(vm)
return

Some files were not shown because too many files have changed in this diff Show More