CLOUDSTACK-6170

This commit is contained in:
Mike Tutkowski 2014-03-11 13:08:00 -06:00
parent 5e4a4e8b34
commit d55c4dd804
10 changed files with 175 additions and 58 deletions

View File

@ -482,41 +482,50 @@ public class DeployVMCmd extends BaseAsyncCreateCustomIdCmd {
String minIops = (String)map.get("minIops");
String maxIops = (String)map.get("maxIops");
if ((minIops != null && maxIops == null) || (minIops == null && maxIops != null)) {
throw new InvalidParameterValueException("Either 'Min IOPS' and 'Max IOPS' must both be specified or neither be specified.");
}
verifyMinAndMaxIops(minIops, maxIops);
long lMinIops;
minIops = (String)map.get("minIopsDo");
maxIops = (String)map.get("maxIopsDo");
try {
if (minIops != null) {
lMinIops = Long.valueOf(minIops);
}
else {
lMinIops = 0;
}
}
catch (NumberFormatException ex) {
throw new InvalidParameterValueException("'Min IOPS' must be a whole number.");
}
verifyMinAndMaxIops(minIops, maxIops);
}
}
long lMaxIops;
private void verifyMinAndMaxIops(String minIops, String maxIops) {
if ((minIops != null && maxIops == null) || (minIops == null && maxIops != null)) {
throw new InvalidParameterValueException("Either 'Min IOPS' and 'Max IOPS' must both be specified or neither be specified.");
}
try {
if (maxIops != null) {
lMaxIops = Long.valueOf(maxIops);
}
else {
lMaxIops = 0;
}
}
catch (NumberFormatException ex) {
throw new InvalidParameterValueException("'Max IOPS' must be a whole number.");
}
long lMinIops;
if (lMinIops > lMaxIops) {
throw new InvalidParameterValueException("'Min IOPS' must be less than or equal to 'Max IOPS'.");
try {
if (minIops != null) {
lMinIops = Long.valueOf(minIops);
}
else {
lMinIops = 0;
}
}
catch (NumberFormatException ex) {
throw new InvalidParameterValueException("'Min IOPS' must be a whole number.");
}
long lMaxIops;
try {
if (maxIops != null) {
lMaxIops = Long.valueOf(maxIops);
}
else {
lMaxIops = 0;
}
}
catch (NumberFormatException ex) {
throw new InvalidParameterValueException("'Max IOPS' must be a whole number.");
}
if (lMinIops > lMaxIops) {
throw new InvalidParameterValueException("'Min IOPS' must be less than or equal to 'Max IOPS'.");
}
}

View File

@ -36,7 +36,6 @@ import com.cloud.exception.OperationTimedoutException;
import com.cloud.exception.ResourceUnavailableException;
import com.cloud.hypervisor.Hypervisor.HypervisorType;
import com.cloud.network.Network;
import com.cloud.offering.DiskOffering;
import com.cloud.offering.DiskOfferingInfo;
import com.cloud.offering.ServiceOffering;
import com.cloud.storage.StoragePool;
@ -77,7 +76,7 @@ public interface VirtualMachineManager extends Manager {
* @throws InsufficientCapacityException If there are insufficient capacity to deploy this vm.
*/
void allocate(String vmInstanceName, VirtualMachineTemplate template, ServiceOffering serviceOffering, DiskOfferingInfo rootDiskOfferingInfo,
LinkedHashMap<? extends DiskOffering, Long> dataDiskOfferings, LinkedHashMap<? extends Network, List<? extends NicProfile>> auxiliaryNetworks, DeploymentPlan plan,
List<DiskOfferingInfo> dataDiskOfferings, LinkedHashMap<? extends Network, List<? extends NicProfile>> auxiliaryNetworks, DeploymentPlan plan,
HypervisorType hyperType) throws InsufficientCapacityException;
void allocate(String vmInstanceName, VirtualMachineTemplate template, ServiceOffering serviceOffering,

View File

@ -86,7 +86,7 @@ public interface VolumeOrchestrationService {
void destroyVolume(Volume volume);
DiskProfile allocateRawVolume(Type type, String name, DiskOffering offering, Long size, VirtualMachine vm, VirtualMachineTemplate template, Account owner);
DiskProfile allocateRawVolume(Type type, String name, DiskOffering offering, Long size, Long minIops, Long maxIops, VirtualMachine vm, VirtualMachineTemplate template, Account owner);
VolumeInfo createVolumeOnPrimaryStorage(VirtualMachine vm, Volume rootVolumeOfVm, VolumeInfo volume, HypervisorType rootDiskHyperType) throws NoTransitionException;

View File

@ -154,7 +154,6 @@ import com.cloud.network.NetworkModel;
import com.cloud.network.dao.NetworkDao;
import com.cloud.network.dao.NetworkVO;
import com.cloud.network.rules.RulesManager;
import com.cloud.offering.DiskOffering;
import com.cloud.offering.DiskOfferingInfo;
import com.cloud.offering.ServiceOffering;
import com.cloud.org.Cluster;
@ -376,7 +375,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
@Override
@DB
public void allocate(String vmInstanceName, final VirtualMachineTemplate template, ServiceOffering serviceOffering,
final DiskOfferingInfo rootDiskOfferingInfo, LinkedHashMap<? extends DiskOffering, Long> dataDiskOfferings,
final DiskOfferingInfo rootDiskOfferingInfo, final List<DiskOfferingInfo> dataDiskOfferings,
final LinkedHashMap<? extends Network, List<? extends NicProfile>> auxiliaryNetworks, DeploymentPlan plan, HypervisorType hyperType)
throws InsufficientCapacityException {
@ -393,8 +392,6 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
}
assert (plan.getClusterId() == null && plan.getPoolId() == null) : "We currently don't support cluster and pool preset yet";
final VMInstanceVO vmFinal = _vmDao.persist(vm);
final LinkedHashMap<? extends DiskOffering, Long> dataDiskOfferingsFinal =
dataDiskOfferings == null ? new LinkedHashMap<DiskOffering, Long>() : dataDiskOfferings;
final VirtualMachineProfileImpl vmProfile = new VirtualMachineProfileImpl(vmFinal, template, serviceOffering, null, null);
@ -416,8 +413,8 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
}
if (template.getFormat() == ImageFormat.ISO) {
volumeMgr.allocateRawVolume(Type.ROOT, "ROOT-" + vmFinal.getId(), rootDiskOfferingInfo.getDiskOffering(), rootDiskOfferingInfo.getSize(), vmFinal,
template, owner);
volumeMgr.allocateRawVolume(Type.ROOT, "ROOT-" + vmFinal.getId(), rootDiskOfferingInfo.getDiskOffering(), rootDiskOfferingInfo.getSize(),
rootDiskOfferingInfo.getMinIops(), rootDiskOfferingInfo.getMaxIops(), vmFinal, template, owner);
} else if (template.getFormat() == ImageFormat.BAREMETAL) {
// Do nothing
} else {
@ -425,8 +422,11 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
rootDiskOfferingInfo.getMinIops(), rootDiskOfferingInfo.getMaxIops(), template, vmFinal, owner);
}
for (Map.Entry<? extends DiskOffering, Long> offering : dataDiskOfferingsFinal.entrySet()) {
volumeMgr.allocateRawVolume(Type.DATADISK, "DATA-" + vmFinal.getId(), offering.getKey(), offering.getValue(), vmFinal, template, owner);
if (dataDiskOfferings != null) {
for (DiskOfferingInfo dataDiskOfferingInfo : dataDiskOfferings) {
volumeMgr.allocateRawVolume(Type.DATADISK, "DATA-" + vmFinal.getId(), dataDiskOfferingInfo.getDiskOffering(), dataDiskOfferingInfo.getSize(),
dataDiskOfferingInfo.getMinIops(), dataDiskOfferingInfo.getMaxIops(), vmFinal, template, owner);
}
}
}
});
@ -439,7 +439,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
@Override
public void allocate(String vmInstanceName, VirtualMachineTemplate template, ServiceOffering serviceOffering,
LinkedHashMap<? extends Network, List<? extends NicProfile>> networks, DeploymentPlan plan, HypervisorType hyperType) throws InsufficientCapacityException {
allocate(vmInstanceName, template, serviceOffering, new DiskOfferingInfo(serviceOffering), null, networks, plan, hyperType);
allocate(vmInstanceName, template, serviceOffering, new DiskOfferingInfo(serviceOffering), new ArrayList<DiskOfferingInfo>(), networks, plan, hyperType);
}
private VirtualMachineGuru getVmGuru(VirtualMachine vm) {

View File

@ -45,7 +45,6 @@ import com.cloud.hypervisor.Hypervisor.HypervisorType;
import com.cloud.network.Network;
import com.cloud.network.dao.NetworkDao;
import com.cloud.network.dao.NetworkVO;
import com.cloud.offering.DiskOffering;
import com.cloud.offering.DiskOfferingInfo;
import com.cloud.service.ServiceOfferingVO;
import com.cloud.service.dao.ServiceOfferingDao;
@ -181,14 +180,14 @@ public class CloudOrchestrator implements OrchestrationService {
// Else, a disk offering is optional, and if present will be used to create the data disk
DiskOfferingInfo rootDiskOfferingInfo = new DiskOfferingInfo();
LinkedHashMap<DiskOfferingVO, Long> dataDiskOfferings = new LinkedHashMap<DiskOfferingVO, Long>();
List<DiskOfferingInfo> dataDiskOfferings = new ArrayList<DiskOfferingInfo>();
ServiceOfferingVO offering = _serviceOfferingDao.findById(vm.getId(), vm.getServiceOfferingId());
ServiceOfferingVO computeOffering = _serviceOfferingDao.findById(vm.getId(), vm.getServiceOfferingId());
rootDiskOfferingInfo.setDiskOffering(offering);
rootDiskOfferingInfo.setDiskOffering(computeOffering);
rootDiskOfferingInfo.setSize(rootDiskSize);
if (offering.isCustomizedIops() != null && offering.isCustomizedIops()) {
if (computeOffering.isCustomizedIops() != null && computeOffering.isCustomizedIops()) {
Map<String, String> userVmDetails = _userVmDetailsDao.listDetailsKeyPairs(vm.getId());
if (userVmDetails != null) {
@ -213,10 +212,28 @@ public class CloudOrchestrator implements OrchestrationService {
}
_volumeMgr.validateVolumeSizeRange(size * 1024 * 1024 * 1024);
}
dataDiskOfferings.put(diskOffering, size);
DiskOfferingInfo dataDiskOfferingInfo = new DiskOfferingInfo();
dataDiskOfferingInfo.setDiskOffering(diskOffering);
dataDiskOfferingInfo.setSize(size);
if (diskOffering.isCustomizedIops() != null && diskOffering.isCustomizedIops()) {
Map<String, String> userVmDetails = _userVmDetailsDao.listDetailsKeyPairs(vm.getId());
if (userVmDetails != null) {
String minIops = userVmDetails.get("minIopsDo");
String maxIops = userVmDetails.get("maxIopsDo");
dataDiskOfferingInfo.setMinIops(minIops != null && minIops.trim().length() > 0 ? Long.parseLong(minIops) : null);
dataDiskOfferingInfo.setMaxIops(maxIops != null && maxIops.trim().length() > 0 ? Long.parseLong(maxIops) : null);
}
}
dataDiskOfferings.add(dataDiskOfferingInfo);
}
_itMgr.allocate(vm.getInstanceName(), _templateDao.findById(new Long(templateId)), offering, rootDiskOfferingInfo, dataDiskOfferings, networkIpMap, plan,
_itMgr.allocate(vm.getInstanceName(), _templateDao.findById(new Long(templateId)), computeOffering, rootDiskOfferingInfo, dataDiskOfferings, networkIpMap, plan,
hypervisorType);
return vmEntity;
@ -234,13 +251,12 @@ public class CloudOrchestrator implements OrchestrationService {
//load vm instance and offerings and call virtualMachineManagerImpl
VMInstanceVO vm = _vmDao.findByUuid(id);
ServiceOfferingVO offering = _serviceOfferingDao.findById(vm.getId(), vm.getServiceOfferingId());
ServiceOfferingVO computeOffering = _serviceOfferingDao.findById(vm.getId(), vm.getServiceOfferingId());
DiskOfferingInfo rootDiskOfferingInfo = new DiskOfferingInfo();
rootDiskOfferingInfo.setDiskOffering(offering);
rootDiskOfferingInfo.setDiskOffering(computeOffering);
LinkedHashMap<DiskOffering, Long> dataDiskOfferings = new LinkedHashMap<DiskOffering, Long>();
Long diskOfferingId = vm.getDiskOfferingId();
if (diskOfferingId == null) {
throw new InvalidParameterValueException("Installing from ISO requires a disk offering to be specified for the root disk.");
@ -261,6 +277,18 @@ public class CloudOrchestrator implements OrchestrationService {
rootDiskOfferingInfo.setDiskOffering(diskOffering);
rootDiskOfferingInfo.setSize(size);
if (diskOffering.isCustomizedIops() != null && diskOffering.isCustomizedIops()) {
Map<String, String> userVmDetails = _userVmDetailsDao.listDetailsKeyPairs(vm.getId());
if (userVmDetails != null) {
String minIops = userVmDetails.get("minIopsDo");
String maxIops = userVmDetails.get("maxIopsDo");
rootDiskOfferingInfo.setMinIops(minIops != null && minIops.trim().length() > 0 ? Long.parseLong(minIops) : null);
rootDiskOfferingInfo.setMaxIops(maxIops != null && maxIops.trim().length() > 0 ? Long.parseLong(maxIops) : null);
}
}
LinkedHashMap<Network, List<? extends NicProfile>> networkIpMap = new LinkedHashMap<Network, List<? extends NicProfile>>();
for (String uuid : networkNicMap.keySet()) {
NetworkVO network = _networkDao.findByUuid(uuid);
@ -271,7 +299,7 @@ public class CloudOrchestrator implements OrchestrationService {
HypervisorType hypervisorType = HypervisorType.valueOf(hypervisor);
_itMgr.allocate(vm.getInstanceName(), _templateDao.findById(new Long(isoId)), offering, rootDiskOfferingInfo, dataDiskOfferings, networkIpMap, plan, hypervisorType);
_itMgr.allocate(vm.getInstanceName(), _templateDao.findById(new Long(isoId)), computeOffering, rootDiskOfferingInfo, new ArrayList<DiskOfferingInfo>(), networkIpMap, plan, hypervisorType);
return vmEntity;
}

View File

@ -569,14 +569,17 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
}
@Override
public DiskProfile allocateRawVolume(Type type, String name, DiskOffering offering, Long size, VirtualMachine vm, VirtualMachineTemplate template, Account owner) {
public DiskProfile allocateRawVolume(Type type, String name, DiskOffering offering, Long size, Long minIops, Long maxIops, VirtualMachine vm, VirtualMachineTemplate template, Account owner) {
if (size == null) {
size = offering.getDiskSize();
} else {
size = (size * 1024 * 1024 * 1024);
}
VolumeVO vol = new VolumeVO(type, name, vm.getDataCenterId(), owner.getDomainId(), owner.getId(), offering.getId(), size, offering.getMinIops(), offering.getMaxIops(),
null);
minIops = minIops != null ? minIops : offering.getMinIops();
maxIops = maxIops != null ? maxIops : offering.getMaxIops();
VolumeVO vol = new VolumeVO(type, name, vm.getDataCenterId(), owner.getDomainId(), owner.getId(), offering.getId(), size, minIops, maxIops, null);
if (vm != null) {
vol.setInstanceId(vm.getId());
}
@ -1151,7 +1154,7 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
StoragePoolVO storagePool = _storagePoolDao.findById(destPool.getId());
if (newVol.getVolumeType() == Type.DATADISK && storagePool.isManaged()) {
if (storagePool.isManaged()) {
long hostId = vm.getVirtualMachine().getHostId();
Host host = _hostDao.findById(hostId);

View File

@ -6057,6 +6057,13 @@ label.error {
border-radius: 4px;
}
.multi-wizard.instance-wizard .section.custom-iops-do {
position: relative;
background: #F4F4F4;
padding: 7px;
border-radius: 4px;
}
.multi-wizard.instance-wizard .section.custom-size input[type=radio] {
float: left;
}
@ -6073,6 +6080,12 @@ label.error {
margin: 6px -1px 0 8px;
}
.multi-wizard.instance-wizard .section.custom-iops-do input[type=text] {
float: left;
width: 28px;
margin: 6px -1px 0 8px;
}
.multi-wizard.instance-wizard .section.custom-size label.error {
position: absolute;
top: 29px;
@ -6080,6 +6093,33 @@ label.error {
font-size: 10px;
}
.instance-wizard .step.data-disk-offering.custom-iops-do .select-container {
height: 235px;
}
.instance-wizard .step.data-disk-offering .custom-iops-do {
display: none;
}
.instance-wizard .step.data-disk-offering.custom-iops-do .custom-iops-do {
display: block;
}
.instance-wizard .step.data-disk-offering .custom-iops-do .field {
width: 30%;
float: left;
margin-bottom: 13px;
}
.instance-wizard .step.data-disk-offering .custom-iops-do .field label {
text-indent: 20px;
}
.instance-wizard .step.data-disk-offering .custom-iops-do .field input {
width: 88%;
margin-left: 26px;
}
/*** Compute offering*/
.instance-wizard .step.service-offering {
}

View File

@ -221,7 +221,7 @@
<input type="text" class="required disallowSpecialCharacters" name="compute-memory" />
</div>
</div>
<!-- Custom iops slider -->
<!-- Custom iops -->
<div class="section custom-iops">
<div class="field">
<label><fmt:message key="label.disk.iops.min"/></label>
@ -259,6 +259,17 @@
<input type="text" class="required digits" name="size" value="1" />
<label class="size">GB</label>
</div>
<!-- Custom iops -->
<div class="section custom-iops-do">
<div class="field">
<label><fmt:message key="label.disk.iops.min"/></label>
<input type="text" class="disallowSpecialCharacters" name="disk-min-iops-do" />
</div>
<div class="field">
<label><fmt:message key="label.disk.iops.max"/></label>
<input type="text" class="disallowSpecialCharacters" name="disk-max-iops-do" />
</div>
</div>
</div>
</div>

View File

@ -292,6 +292,9 @@
selectedHypervisor = args.currentData.hypervisorid;
}
// if the user is leveraging a template, then we can show custom IOPS, if applicable
var canShowCustomIopsForServiceOffering = (args.currentData["select-template"] != "select-iso" ? true : false);
$.ajax({
url: createURL("listServiceOfferings&issystem=false"),
dataType: "json",
@ -299,6 +302,7 @@
success: function(json) {
serviceOfferingObjs = json.listserviceofferingsresponse.serviceoffering;
args.response.success({
canShowCustomIops: canShowCustomIopsForServiceOffering,
customFlag: 'iscustomized',
//customFlag: 'offerha', //for testing only
customIopsFlag: 'iscustomizediops',
@ -322,6 +326,7 @@
args.response.success({
required: isRequred,
customFlag: 'iscustomized', // Field determines if custom slider is shown
customIopsDoFlag: 'iscustomizediops',
data: {
diskOfferings: diskOfferingObjs
}
@ -657,6 +662,20 @@
size : args.data.size
});
}
if (selectedDiskOfferingObj.iscustomizediops == true) {
if (args.$wizard.find('input[name=disk-min-iops-do]').val().length > 0) {
$.extend(deployVmData, {
'details[0].minIopsDo' : args.$wizard.find('input[name=disk-min-iops-do]').val()
});
}
if (args.$wizard.find('input[name=disk-max-iops-do]').val().length > 0) {
$.extend(deployVmData, {
'details[0].maxIopsDo' : args.$wizard.find('input[name=disk-max-iops-do]').val()
});
}
}
}
//step 5: select an affinity group

View File

@ -460,7 +460,7 @@
var customIops = item[args.customIopsFlag];
if (customIops) {
if (customIops && args.canShowCustomIops) {
$step.addClass('custom-iops');
} else {
$step.removeClass('custom-iops');
@ -556,6 +556,14 @@
$step.removeClass('custom-disk-size');
}
var customIops = item[args.customIopsDoFlag];
if (customIops) {
$step.addClass('custom-iops-do');
} else {
$step.removeClass('custom-iops-do');
}
return true;
});