mirror of
https://github.com/apache/cloudstack.git
synced 2025-12-13 09:02:26 +01:00
merge LTS branch 4.22 into main
This commit is contained in:
commit
9032fe3fb5
24
agent/conf/uefi.properties.in
Normal file
24
agent/conf/uefi.properties.in
Normal file
@ -0,0 +1,24 @@
|
||||
# Licensed to the Apache Software Foundation (ASF) under one
|
||||
# or more contributor license agreements. See the NOTICE file
|
||||
# distributed with this work for additional information
|
||||
# regarding copyright ownership. The ASF licenses this file
|
||||
# to you under the Apache License, Version 2.0 (the
|
||||
# "License"); you may not use this file except in compliance
|
||||
# with the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing,
|
||||
# software distributed under the License is distributed on an
|
||||
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
# KIND, either express or implied. See the License for the
|
||||
# specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
# Configuration file for UEFI
|
||||
|
||||
guest.nvram.template.legacy=@GUESTNVRAMTEMPLATELEGACY@
|
||||
guest.loader.legacy=@GUESTLOADERLEGACY@
|
||||
guest.nvram.template.secure=@GUESTNVRAMTEMPLATESECURE@
|
||||
guest.loader.secure=@GUESTLOADERSECURE@
|
||||
guest.nvram.path=@GUESTNVRAMPATH@
|
||||
@ -53,7 +53,7 @@ public class ListPublicIpAddressesCmd extends BaseListRetrieveOnlyResourceCountC
|
||||
@Parameter(name = ApiConstants.ALLOCATED_ONLY, type = CommandType.BOOLEAN, description = "limits search results to allocated public IP addresses")
|
||||
private Boolean allocatedOnly;
|
||||
|
||||
@Parameter(name = ApiConstants.STATE, type = CommandType.STRING, description = "lists all public IP addresses by state")
|
||||
@Parameter(name = ApiConstants.STATE, type = CommandType.STRING, description = "lists all public IP addresses by state. A comma-separated list of states can be passed")
|
||||
private String state;
|
||||
|
||||
@Parameter(name = ApiConstants.FOR_VIRTUAL_NETWORK, type = CommandType.BOOLEAN, description = "the virtual network for the IP address")
|
||||
|
||||
1
debian/cloudstack-agent.install
vendored
1
debian/cloudstack-agent.install
vendored
@ -16,6 +16,7 @@
|
||||
# under the License.
|
||||
|
||||
/etc/cloudstack/agent/agent.properties
|
||||
/etc/cloudstack/agent/uefi.properties
|
||||
/etc/cloudstack/agent/environment.properties
|
||||
/etc/cloudstack/agent/log4j-cloud.xml
|
||||
/etc/default/cloudstack-agent
|
||||
|
||||
2
debian/cloudstack-agent.postinst
vendored
2
debian/cloudstack-agent.postinst
vendored
@ -23,7 +23,7 @@ case "$1" in
|
||||
configure)
|
||||
OLDCONFDIR="/etc/cloud/agent"
|
||||
NEWCONFDIR="/etc/cloudstack/agent"
|
||||
CONFFILES="agent.properties log4j.xml log4j-cloud.xml"
|
||||
CONFFILES="agent.properties uefi.properties log4j.xml log4j-cloud.xml"
|
||||
|
||||
mkdir -m 0755 -p /usr/share/cloudstack-agent/tmp
|
||||
|
||||
|
||||
2
debian/control
vendored
2
debian/control
vendored
@ -24,7 +24,7 @@ Description: CloudStack server library
|
||||
|
||||
Package: cloudstack-agent
|
||||
Architecture: all
|
||||
Depends: ${python:Depends}, ${python3:Depends}, openjdk-17-jre-headless | java17-runtime-headless | java17-runtime | zulu-17, cloudstack-common (= ${source:Version}), lsb-base (>= 9), openssh-client, qemu-kvm (>= 2.5) | qemu-system-x86 (>= 5.2), libvirt-bin (>= 1.3) | libvirt-daemon-system (>= 3.0), iproute2, ebtables, vlan, ipset, python3-libvirt, ethtool, iptables, cryptsetup, rng-tools, rsync, lsb-release, ufw, apparmor, cpu-checker, libvirt-daemon-driver-storage-rbd, sysstat
|
||||
Depends: ${python:Depends}, ${python3:Depends}, openjdk-17-jre-headless | java17-runtime-headless | java17-runtime | zulu-17, cloudstack-common (= ${source:Version}), lsb-base (>= 9), openssh-client, qemu-kvm (>= 2.5) | qemu-system-x86 (>= 5.2), libvirt-bin (>= 1.3) | libvirt-daemon-system (>= 3.0), iproute2, ebtables, vlan, ipset, python3-libvirt, ethtool, iptables, cryptsetup, rng-tools, rsync, ovmf, swtpm, lsb-release, ufw, apparmor, cpu-checker, libvirt-daemon-driver-storage-rbd, sysstat
|
||||
Recommends: init-system-helpers
|
||||
Conflicts: cloud-agent, cloud-agent-libs, cloud-agent-deps, cloud-agent-scripts
|
||||
Description: CloudStack agent
|
||||
|
||||
@ -94,6 +94,14 @@ public class UsageEventUtils {
|
||||
|
||||
}
|
||||
|
||||
public static void publishUsageEvent(String usageType, long accountId, long zoneId, long resourceId, String resourceName, Long offeringId, Long templateId,
|
||||
Long size, String entityType, String entityUUID, Long vmId, boolean displayResource) {
|
||||
if (displayResource) {
|
||||
saveUsageEvent(usageType, accountId, zoneId, resourceId, offeringId, templateId, size, vmId, resourceName);
|
||||
}
|
||||
publishUsageEvent(usageType, accountId, zoneId, entityType, entityUUID);
|
||||
}
|
||||
|
||||
public static void publishUsageEvent(String usageType, long accountId, long zoneId, long resourceId, String resourceName, Long offeringId, Long templateId,
|
||||
Long size, Long virtualSize, String entityType, String entityUUID, Map<String, String> details) {
|
||||
saveUsageEvent(usageType, accountId, zoneId, resourceId, resourceName, offeringId, templateId, size, virtualSize, details);
|
||||
@ -202,6 +210,10 @@ public class UsageEventUtils {
|
||||
s_usageEventDao.persist(new UsageEventVO(usageType, accountId, zoneId, vmId, securityGroupId));
|
||||
}
|
||||
|
||||
public static void saveUsageEvent(String usageType, long accountId, long zoneId, long resourceId, Long offeringId, Long templateId, Long size, Long vmId, String resourceName) {
|
||||
s_usageEventDao.persist(new UsageEventVO(usageType, accountId, zoneId, resourceId, offeringId, templateId, size, vmId, resourceName));
|
||||
}
|
||||
|
||||
private static void publishUsageEvent(String usageEventType, Long accountId, Long zoneId, String resourceType, String resourceUUID) {
|
||||
String configKey = "publish.usage.events";
|
||||
String value = s_configDao.getValue(configKey);
|
||||
|
||||
@ -903,7 +903,7 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
|
||||
// Save usage event and update resource count for user vm volumes
|
||||
if (vm.getType() == VirtualMachine.Type.User) {
|
||||
UsageEventUtils.publishUsageEvent(EventTypes.EVENT_VOLUME_CREATE, vol.getAccountId(), vol.getDataCenterId(), vol.getId(), vol.getName(), offering.getId(), null, size,
|
||||
Volume.class.getName(), vol.getUuid(), vol.isDisplayVolume());
|
||||
Volume.class.getName(), vol.getUuid(), vol.getInstanceId(), vol.isDisplayVolume());
|
||||
_resourceLimitMgr.incrementVolumeResourceCount(vm.getAccountId(), vol.isDisplayVolume(), vol.getSize(), offering);
|
||||
}
|
||||
DiskProfile diskProfile = toDiskProfile(vol, offering);
|
||||
@ -981,7 +981,7 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
|
||||
}
|
||||
|
||||
UsageEventUtils.publishUsageEvent(EventTypes.EVENT_VOLUME_CREATE, vol.getAccountId(), vol.getDataCenterId(), vol.getId(), vol.getName(), offeringId, vol.getTemplateId(), size,
|
||||
Volume.class.getName(), vol.getUuid(), vol.isDisplayVolume());
|
||||
Volume.class.getName(), vol.getUuid(), vol.getInstanceId(), vol.isDisplayVolume());
|
||||
|
||||
_resourceLimitMgr.incrementVolumeResourceCount(vm.getAccountId(), vol.isDisplayVolume(), vol.getSize(), offering);
|
||||
}
|
||||
|
||||
@ -75,6 +75,9 @@ public class UsageEventVO implements UsageEvent {
|
||||
@Column(name = "virtual_size")
|
||||
private Long virtualSize;
|
||||
|
||||
@Column(name = "vm_id")
|
||||
private Long vmId;
|
||||
|
||||
public UsageEventVO() {
|
||||
}
|
||||
|
||||
@ -143,6 +146,18 @@ public class UsageEventVO implements UsageEvent {
|
||||
this.offeringId = securityGroupId;
|
||||
}
|
||||
|
||||
public UsageEventVO(String usageType, long accountId, long zoneId, long resourceId, Long offeringId, Long templateId, Long size, Long vmId, String resourceName) {
|
||||
this.type = usageType;
|
||||
this.accountId = accountId;
|
||||
this.zoneId = zoneId;
|
||||
this.resourceId = resourceId;
|
||||
this.offeringId = offeringId;
|
||||
this.templateId = templateId;
|
||||
this.size = size;
|
||||
this.vmId = vmId;
|
||||
this.resourceName = resourceName;
|
||||
}
|
||||
|
||||
@Override
|
||||
public long getId() {
|
||||
return id;
|
||||
@ -248,4 +263,11 @@ public class UsageEventVO implements UsageEvent {
|
||||
this.virtualSize = virtualSize;
|
||||
}
|
||||
|
||||
public Long getVmId() {
|
||||
return vmId;
|
||||
}
|
||||
|
||||
public void setVmId(Long vmId) {
|
||||
this.vmId = vmId;
|
||||
}
|
||||
}
|
||||
|
||||
@ -45,11 +45,11 @@ public class UsageEventDaoImpl extends GenericDaoBase<UsageEventVO, Long> implem
|
||||
private final SearchBuilder<UsageEventVO> latestEventsSearch;
|
||||
private final SearchBuilder<UsageEventVO> IpeventsSearch;
|
||||
private static final String COPY_EVENTS =
|
||||
"INSERT INTO cloud_usage.usage_event (id, type, account_id, created, zone_id, resource_id, resource_name, offering_id, template_id, size, resource_type, virtual_size) "
|
||||
+ "SELECT id, type, account_id, created, zone_id, resource_id, resource_name, offering_id, template_id, size, resource_type, virtual_size FROM cloud.usage_event vmevt WHERE vmevt.id > ? and vmevt.id <= ? ";
|
||||
"INSERT INTO cloud_usage.usage_event (id, type, account_id, created, zone_id, resource_id, resource_name, offering_id, template_id, size, resource_type, virtual_size, vm_id) "
|
||||
+ "SELECT id, type, account_id, created, zone_id, resource_id, resource_name, offering_id, template_id, size, resource_type, virtual_size, vm_id FROM cloud.usage_event vmevt WHERE vmevt.id > ? and vmevt.id <= ? ";
|
||||
private static final String COPY_ALL_EVENTS =
|
||||
"INSERT INTO cloud_usage.usage_event (id, type, account_id, created, zone_id, resource_id, resource_name, offering_id, template_id, size, resource_type, virtual_size) "
|
||||
+ "SELECT id, type, account_id, created, zone_id, resource_id, resource_name, offering_id, template_id, size, resource_type, virtual_size FROM cloud.usage_event vmevt WHERE vmevt.id <= ?";
|
||||
"INSERT INTO cloud_usage.usage_event (id, type, account_id, created, zone_id, resource_id, resource_name, offering_id, template_id, size, resource_type, virtual_size, vm_id) "
|
||||
+ "SELECT id, type, account_id, created, zone_id, resource_id, resource_name, offering_id, template_id, size, resource_type, virtual_size, vm_id FROM cloud.usage_event vmevt WHERE vmevt.id <= ?";
|
||||
private static final String COPY_EVENT_DETAILS = "INSERT INTO cloud_usage.usage_event_details (id, usage_event_id, name, value) "
|
||||
+ "SELECT id, usage_event_id, name, value FROM cloud.usage_event_details vmevtDetails WHERE vmevtDetails.usage_event_id > ? and vmevtDetails.usage_event_id <= ? ";
|
||||
private static final String COPY_ALL_EVENT_DETAILS = "INSERT INTO cloud_usage.usage_event_details (id, usage_event_id, name, value) "
|
||||
|
||||
@ -89,10 +89,11 @@ import com.cloud.upgrade.dao.Upgrade41900to41910;
|
||||
import com.cloud.upgrade.dao.Upgrade41910to42000;
|
||||
import com.cloud.upgrade.dao.Upgrade42000to42010;
|
||||
import com.cloud.upgrade.dao.Upgrade42010to42100;
|
||||
import com.cloud.upgrade.dao.Upgrade420to421;
|
||||
import com.cloud.upgrade.dao.Upgrade42100to42200;
|
||||
import com.cloud.upgrade.dao.Upgrade42200to42210;
|
||||
import com.cloud.upgrade.dao.Upgrade420to421;
|
||||
import com.cloud.upgrade.dao.Upgrade421to430;
|
||||
import com.cloud.upgrade.dao.Upgrade42200to42300;
|
||||
import com.cloud.upgrade.dao.Upgrade42210to42300;
|
||||
import com.cloud.upgrade.dao.Upgrade430to440;
|
||||
import com.cloud.upgrade.dao.Upgrade431to440;
|
||||
import com.cloud.upgrade.dao.Upgrade432to440;
|
||||
@ -237,7 +238,8 @@ public class DatabaseUpgradeChecker implements SystemIntegrityChecker {
|
||||
.next("4.20.0.0", new Upgrade42000to42010())
|
||||
.next("4.20.1.0", new Upgrade42010to42100())
|
||||
.next("4.21.0.0", new Upgrade42100to42200())
|
||||
.next("4.22.0.0", new Upgrade42200to42300())
|
||||
.next("4.22.0.0", new Upgrade42200to42210())
|
||||
.next("4.22.1.0", new Upgrade42210to42300())
|
||||
.build();
|
||||
}
|
||||
|
||||
@ -315,20 +317,20 @@ public class DatabaseUpgradeChecker implements SystemIntegrityChecker {
|
||||
}
|
||||
|
||||
protected void executeProcedureScripts() {
|
||||
LOGGER.info(String.format("Executing Stored Procedure scripts that are under resource directory [%s].", PROCEDURES_DIRECTORY));
|
||||
LOGGER.info("Executing Stored Procedure scripts that are under resource directory [{}].", PROCEDURES_DIRECTORY);
|
||||
List<String> filesPathUnderViewsDirectory = FileUtil.getFilesPathsUnderResourceDirectory(PROCEDURES_DIRECTORY);
|
||||
|
||||
try (TransactionLegacy txn = TransactionLegacy.open("execute-procedure-scripts")) {
|
||||
Connection conn = txn.getConnection();
|
||||
|
||||
for (String filePath : filesPathUnderViewsDirectory) {
|
||||
LOGGER.debug(String.format("Executing PROCEDURE script [%s].", filePath));
|
||||
LOGGER.debug("Executing PROCEDURE script [{}].", filePath);
|
||||
|
||||
InputStream viewScript = Thread.currentThread().getContextClassLoader().getResourceAsStream(filePath);
|
||||
runScript(conn, viewScript);
|
||||
}
|
||||
|
||||
LOGGER.info(String.format("Finished execution of PROCEDURE scripts that are under resource directory [%s].", PROCEDURES_DIRECTORY));
|
||||
LOGGER.info("Finished execution of PROCEDURE scripts that are under resource directory [{}].", PROCEDURES_DIRECTORY);
|
||||
} catch (SQLException e) {
|
||||
String message = String.format("Unable to execute PROCEDURE scripts due to [%s].", e.getMessage());
|
||||
LOGGER.error(message, e);
|
||||
@ -337,7 +339,7 @@ public class DatabaseUpgradeChecker implements SystemIntegrityChecker {
|
||||
}
|
||||
|
||||
private DbUpgrade[] executeUpgrades(CloudStackVersion dbVersion, CloudStackVersion currentVersion) {
|
||||
LOGGER.info("Database upgrade must be performed from " + dbVersion + " to " + currentVersion);
|
||||
LOGGER.info("Database upgrade must be performed from {} to {}", dbVersion, currentVersion);
|
||||
|
||||
final DbUpgrade[] upgrades = calculateUpgradePath(dbVersion, currentVersion);
|
||||
|
||||
@ -350,8 +352,8 @@ public class DatabaseUpgradeChecker implements SystemIntegrityChecker {
|
||||
|
||||
private VersionVO executeUpgrade(DbUpgrade upgrade) {
|
||||
VersionVO version;
|
||||
LOGGER.debug("Running upgrade " + upgrade.getClass().getSimpleName() + " to upgrade from " + upgrade.getUpgradableVersionRange()[0] + "-" + upgrade
|
||||
.getUpgradableVersionRange()[1] + " to " + upgrade.getUpgradedVersion());
|
||||
LOGGER.debug("Running upgrade {} to upgrade from {}-{} to {}", upgrade.getClass().getSimpleName(), upgrade.getUpgradableVersionRange()[0], upgrade
|
||||
.getUpgradableVersionRange()[1], upgrade.getUpgradedVersion());
|
||||
TransactionLegacy txn = TransactionLegacy.open("Upgrade");
|
||||
txn.start();
|
||||
try {
|
||||
@ -394,8 +396,8 @@ public class DatabaseUpgradeChecker implements SystemIntegrityChecker {
|
||||
// Run the corresponding '-cleanup.sql' script
|
||||
txn = TransactionLegacy.open("Cleanup");
|
||||
try {
|
||||
LOGGER.info("Cleanup upgrade " + upgrade.getClass().getSimpleName() + " to upgrade from " + upgrade.getUpgradableVersionRange()[0] + "-" + upgrade
|
||||
.getUpgradableVersionRange()[1] + " to " + upgrade.getUpgradedVersion());
|
||||
LOGGER.info("Cleanup upgrade {} to upgrade from {}-{} to {}", upgrade.getClass().getSimpleName(), upgrade.getUpgradableVersionRange()[0], upgrade
|
||||
.getUpgradableVersionRange()[1], upgrade.getUpgradedVersion());
|
||||
|
||||
txn.start();
|
||||
Connection conn;
|
||||
@ -410,7 +412,7 @@ public class DatabaseUpgradeChecker implements SystemIntegrityChecker {
|
||||
if (scripts != null) {
|
||||
for (InputStream script : scripts) {
|
||||
runScript(conn, script);
|
||||
LOGGER.debug("Cleanup script " + upgrade.getClass().getSimpleName() + " is executed successfully");
|
||||
LOGGER.debug("Cleanup script {} is executed successfully", upgrade.getClass().getSimpleName());
|
||||
}
|
||||
}
|
||||
txn.commit();
|
||||
@ -420,27 +422,27 @@ public class DatabaseUpgradeChecker implements SystemIntegrityChecker {
|
||||
version.setUpdated(new Date());
|
||||
_dao.update(version.getId(), version);
|
||||
txn.commit();
|
||||
LOGGER.debug("Upgrade completed for version " + version.getVersion());
|
||||
LOGGER.debug("Upgrade completed for version {}", version.getVersion());
|
||||
} finally {
|
||||
txn.close();
|
||||
}
|
||||
}
|
||||
|
||||
protected void executeViewScripts() {
|
||||
LOGGER.info(String.format("Executing VIEW scripts that are under resource directory [%s].", VIEWS_DIRECTORY));
|
||||
LOGGER.info("Executing VIEW scripts that are under resource directory [{}].", VIEWS_DIRECTORY);
|
||||
List<String> filesPathUnderViewsDirectory = FileUtil.getFilesPathsUnderResourceDirectory(VIEWS_DIRECTORY);
|
||||
|
||||
try (TransactionLegacy txn = TransactionLegacy.open("execute-view-scripts")) {
|
||||
Connection conn = txn.getConnection();
|
||||
|
||||
for (String filePath : filesPathUnderViewsDirectory) {
|
||||
LOGGER.debug(String.format("Executing VIEW script [%s].", filePath));
|
||||
LOGGER.debug("Executing VIEW script [{}].", filePath);
|
||||
|
||||
InputStream viewScript = Thread.currentThread().getContextClassLoader().getResourceAsStream(filePath);
|
||||
runScript(conn, viewScript);
|
||||
}
|
||||
|
||||
LOGGER.info(String.format("Finished execution of VIEW scripts that are under resource directory [%s].", VIEWS_DIRECTORY));
|
||||
LOGGER.info("Finished execution of VIEW scripts that are under resource directory [{}].", VIEWS_DIRECTORY);
|
||||
} catch (SQLException e) {
|
||||
String message = String.format("Unable to execute VIEW scripts due to [%s].", e.getMessage());
|
||||
LOGGER.error(message, e);
|
||||
@ -470,10 +472,10 @@ public class DatabaseUpgradeChecker implements SystemIntegrityChecker {
|
||||
String csVersion = SystemVmTemplateRegistration.parseMetadataFile();
|
||||
final CloudStackVersion sysVmVersion = CloudStackVersion.parse(csVersion);
|
||||
final CloudStackVersion currentVersion = CloudStackVersion.parse(currentVersionValue);
|
||||
SystemVmTemplateRegistration.CS_MAJOR_VERSION = String.valueOf(sysVmVersion.getMajorRelease()) + "." + String.valueOf(sysVmVersion.getMinorRelease());
|
||||
SystemVmTemplateRegistration.CS_MAJOR_VERSION = sysVmVersion.getMajorRelease() + "." + sysVmVersion.getMinorRelease();
|
||||
SystemVmTemplateRegistration.CS_TINY_VERSION = String.valueOf(sysVmVersion.getPatchRelease());
|
||||
|
||||
LOGGER.info("DB version = " + dbVersion + " Code Version = " + currentVersion);
|
||||
LOGGER.info("DB version = {} Code Version = {}", dbVersion, currentVersion);
|
||||
|
||||
if (dbVersion.compareTo(currentVersion) > 0) {
|
||||
throw new CloudRuntimeException("Database version " + dbVersion + " is higher than management software version " + currentVersionValue);
|
||||
@ -522,7 +524,7 @@ public class DatabaseUpgradeChecker implements SystemIntegrityChecker {
|
||||
ResultSet result = pstmt.executeQuery()) {
|
||||
if (result.next()) {
|
||||
String init = result.getString(1);
|
||||
LOGGER.info("init = " + DBEncryptionUtil.decrypt(init));
|
||||
LOGGER.info("init = {}", DBEncryptionUtil.decrypt(init));
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -553,21 +555,11 @@ public class DatabaseUpgradeChecker implements SystemIntegrityChecker {
|
||||
return upgradedVersion;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean supportsRollingUpgrade() {
|
||||
return false;
|
||||
}
|
||||
|
||||
@Override
|
||||
public InputStream[] getPrepareScripts() {
|
||||
return new InputStream[0];
|
||||
}
|
||||
|
||||
@Override
|
||||
public void performDataMigration(Connection conn) {
|
||||
|
||||
}
|
||||
|
||||
@Override
|
||||
public InputStream[] getCleanupScripts() {
|
||||
return new InputStream[0];
|
||||
|
||||
@ -0,0 +1,30 @@
|
||||
// Licensed to the Apache Software Foundation (ASF) under one
|
||||
// or more contributor license agreements. See the NOTICE file
|
||||
// distributed with this work for additional information
|
||||
// regarding copyright ownership. The ASF licenses this file
|
||||
// to you under the Apache License, Version 2.0 (the
|
||||
// "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
package com.cloud.upgrade.dao;
|
||||
|
||||
public class Upgrade42200to42210 extends DbUpgradeAbstractImpl implements DbUpgrade, DbUpgradeSystemVmTemplate {
|
||||
|
||||
@Override
|
||||
public String[] getUpgradableVersionRange() {
|
||||
return new String[] {"4.22.0.0", "4.22.1.0"};
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getUpgradedVersion() {
|
||||
return "4.22.1.0";
|
||||
}
|
||||
}
|
||||
@ -16,11 +16,11 @@
|
||||
// under the License.
|
||||
package com.cloud.upgrade.dao;
|
||||
|
||||
public class Upgrade42200to42300 extends DbUpgradeAbstractImpl implements DbUpgrade, DbUpgradeSystemVmTemplate {
|
||||
public class Upgrade42210to42300 extends DbUpgradeAbstractImpl implements DbUpgrade, DbUpgradeSystemVmTemplate {
|
||||
|
||||
@Override
|
||||
public String[] getUpgradableVersionRange() {
|
||||
return new String[]{"4.22.0.0", "4.23.0.0"};
|
||||
return new String[]{"4.22.1.0", "4.23.0.0"};
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -59,6 +59,9 @@ public class UsageVolumeVO implements InternalIdentity {
|
||||
@Column(name = "size")
|
||||
private long size;
|
||||
|
||||
@Column(name = "vm_id")
|
||||
private Long vmId;
|
||||
|
||||
@Column(name = "created")
|
||||
@Temporal(value = TemporalType.TIMESTAMP)
|
||||
private Date created = null;
|
||||
@ -70,13 +73,14 @@ public class UsageVolumeVO implements InternalIdentity {
|
||||
protected UsageVolumeVO() {
|
||||
}
|
||||
|
||||
public UsageVolumeVO(long id, long zoneId, long accountId, long domainId, Long diskOfferingId, Long templateId, long size, Date created, Date deleted) {
|
||||
public UsageVolumeVO(long id, long zoneId, long accountId, long domainId, Long diskOfferingId, Long templateId, Long vmId, long size, Date created, Date deleted) {
|
||||
this.volumeId = id;
|
||||
this.zoneId = zoneId;
|
||||
this.accountId = accountId;
|
||||
this.domainId = domainId;
|
||||
this.diskOfferingId = diskOfferingId;
|
||||
this.templateId = templateId;
|
||||
this.vmId = vmId;
|
||||
this.size = size;
|
||||
this.created = created;
|
||||
this.deleted = deleted;
|
||||
@ -126,4 +130,12 @@ public class UsageVolumeVO implements InternalIdentity {
|
||||
public long getVolumeId() {
|
||||
return volumeId;
|
||||
}
|
||||
|
||||
public Long getVmId() {
|
||||
return vmId;
|
||||
}
|
||||
|
||||
public void setVmId(Long vmId) {
|
||||
this.vmId = vmId;
|
||||
}
|
||||
}
|
||||
|
||||
@ -57,6 +57,7 @@ public class UsageStorageDaoImpl extends GenericDaoBase<UsageStorageVO, Long> im
|
||||
IdSearch.and("accountId", IdSearch.entity().getAccountId(), SearchCriteria.Op.EQ);
|
||||
IdSearch.and("id", IdSearch.entity().getEntityId(), SearchCriteria.Op.EQ);
|
||||
IdSearch.and("type", IdSearch.entity().getStorageType(), SearchCriteria.Op.EQ);
|
||||
IdSearch.and("deleted", IdSearch.entity().getDeleted(), SearchCriteria.Op.NULL);
|
||||
IdSearch.done();
|
||||
|
||||
IdZoneSearch = createSearchBuilder();
|
||||
@ -74,6 +75,7 @@ public class UsageStorageDaoImpl extends GenericDaoBase<UsageStorageVO, Long> im
|
||||
sc.setParameters("accountId", accountId);
|
||||
sc.setParameters("id", id);
|
||||
sc.setParameters("type", type);
|
||||
sc.setParameters("deleted", null);
|
||||
return listBy(sc, null);
|
||||
}
|
||||
|
||||
|
||||
@ -23,9 +23,7 @@ import com.cloud.usage.UsageVolumeVO;
|
||||
import com.cloud.utils.db.GenericDao;
|
||||
|
||||
public interface UsageVolumeDao extends GenericDao<UsageVolumeVO, Long> {
|
||||
public void removeBy(long userId, long id);
|
||||
|
||||
public void update(UsageVolumeVO usage);
|
||||
|
||||
public List<UsageVolumeVO> getUsageRecords(Long accountId, Long domainId, Date startDate, Date endDate, boolean limit, int page);
|
||||
|
||||
List<UsageVolumeVO> listByVolumeId(long volumeId, long accountId);
|
||||
}
|
||||
|
||||
@ -18,81 +18,46 @@ package com.cloud.usage.dao;
|
||||
|
||||
import java.sql.PreparedStatement;
|
||||
import java.sql.ResultSet;
|
||||
import java.sql.SQLException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Date;
|
||||
import java.util.List;
|
||||
import java.util.TimeZone;
|
||||
|
||||
|
||||
import com.cloud.exception.CloudException;
|
||||
import javax.annotation.PostConstruct;
|
||||
|
||||
import org.springframework.stereotype.Component;
|
||||
|
||||
import com.cloud.usage.UsageVolumeVO;
|
||||
import com.cloud.utils.DateUtil;
|
||||
import com.cloud.utils.db.GenericDaoBase;
|
||||
import com.cloud.utils.db.SearchBuilder;
|
||||
import com.cloud.utils.db.SearchCriteria;
|
||||
import com.cloud.utils.db.TransactionLegacy;
|
||||
|
||||
@Component
|
||||
public class UsageVolumeDaoImpl extends GenericDaoBase<UsageVolumeVO, Long> implements UsageVolumeDao {
|
||||
|
||||
protected static final String REMOVE_BY_USERID_VOLID = "DELETE FROM usage_volume WHERE account_id = ? AND volume_id = ?";
|
||||
protected static final String UPDATE_DELETED = "UPDATE usage_volume SET deleted = ? WHERE account_id = ? AND volume_id = ? and deleted IS NULL";
|
||||
protected static final String GET_USAGE_RECORDS_BY_ACCOUNT = "SELECT volume_id, zone_id, account_id, domain_id, disk_offering_id, template_id, size, created, deleted "
|
||||
protected static final String GET_USAGE_RECORDS_BY_ACCOUNT = "SELECT volume_id, zone_id, account_id, domain_id, disk_offering_id, template_id, vm_id, size, created, deleted "
|
||||
+ "FROM usage_volume " + "WHERE account_id = ? AND ((deleted IS NULL) OR (created BETWEEN ? AND ?) OR "
|
||||
+ " (deleted BETWEEN ? AND ?) OR ((created <= ?) AND (deleted >= ?)))";
|
||||
protected static final String GET_USAGE_RECORDS_BY_DOMAIN = "SELECT volume_id, zone_id, account_id, domain_id, disk_offering_id, template_id, size, created, deleted "
|
||||
protected static final String GET_USAGE_RECORDS_BY_DOMAIN = "SELECT volume_id, zone_id, account_id, domain_id, disk_offering_id, template_id, vm_id, size, created, deleted "
|
||||
+ "FROM usage_volume " + "WHERE domain_id = ? AND ((deleted IS NULL) OR (created BETWEEN ? AND ?) OR "
|
||||
+ " (deleted BETWEEN ? AND ?) OR ((created <= ?) AND (deleted >= ?)))";
|
||||
protected static final String GET_ALL_USAGE_RECORDS = "SELECT volume_id, zone_id, account_id, domain_id, disk_offering_id, template_id, size, created, deleted "
|
||||
protected static final String GET_ALL_USAGE_RECORDS = "SELECT volume_id, zone_id, account_id, domain_id, disk_offering_id, template_id, vm_id, size, created, deleted "
|
||||
+ "FROM usage_volume " + "WHERE (deleted IS NULL) OR (created BETWEEN ? AND ?) OR " + " (deleted BETWEEN ? AND ?) OR ((created <= ?) AND (deleted >= ?))";
|
||||
private SearchBuilder<UsageVolumeVO> volumeSearch;
|
||||
|
||||
public UsageVolumeDaoImpl() {
|
||||
}
|
||||
|
||||
@Override
|
||||
public void removeBy(long accountId, long volId) {
|
||||
TransactionLegacy txn = TransactionLegacy.open(TransactionLegacy.USAGE_DB);
|
||||
try {
|
||||
txn.start();
|
||||
try(PreparedStatement pstmt = txn.prepareStatement(REMOVE_BY_USERID_VOLID);) {
|
||||
if (pstmt != null) {
|
||||
pstmt.setLong(1, accountId);
|
||||
pstmt.setLong(2, volId);
|
||||
pstmt.executeUpdate();
|
||||
}
|
||||
}catch (SQLException e) {
|
||||
throw new CloudException("Error removing usageVolumeVO:"+e.getMessage(), e);
|
||||
}
|
||||
txn.commit();
|
||||
} catch (Exception e) {
|
||||
txn.rollback();
|
||||
logger.warn("Error removing usageVolumeVO:"+e.getMessage(), e);
|
||||
} finally {
|
||||
txn.close();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void update(UsageVolumeVO usage) {
|
||||
TransactionLegacy txn = TransactionLegacy.open(TransactionLegacy.USAGE_DB);
|
||||
PreparedStatement pstmt = null;
|
||||
try {
|
||||
txn.start();
|
||||
if (usage.getDeleted() != null) {
|
||||
pstmt = txn.prepareAutoCloseStatement(UPDATE_DELETED);
|
||||
pstmt.setString(1, DateUtil.getDateDisplayString(TimeZone.getTimeZone("GMT"), usage.getDeleted()));
|
||||
pstmt.setLong(2, usage.getAccountId());
|
||||
pstmt.setLong(3, usage.getVolumeId());
|
||||
pstmt.executeUpdate();
|
||||
}
|
||||
txn.commit();
|
||||
} catch (Exception e) {
|
||||
txn.rollback();
|
||||
logger.warn("Error updating UsageVolumeVO", e);
|
||||
} finally {
|
||||
txn.close();
|
||||
}
|
||||
@PostConstruct
|
||||
protected void init() {
|
||||
volumeSearch = createSearchBuilder();
|
||||
volumeSearch.and("accountId", volumeSearch.entity().getAccountId(), SearchCriteria.Op.EQ);
|
||||
volumeSearch.and("volumeId", volumeSearch.entity().getVolumeId(), SearchCriteria.Op.EQ);
|
||||
volumeSearch.and("deleted", volumeSearch.entity().getDeleted(), SearchCriteria.Op.NULL);
|
||||
volumeSearch.done();
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -150,11 +115,15 @@ public class UsageVolumeDaoImpl extends GenericDaoBase<UsageVolumeVO, Long> impl
|
||||
if (tId == 0) {
|
||||
tId = null;
|
||||
}
|
||||
long size = Long.valueOf(rs.getLong(7));
|
||||
Long vmId = Long.valueOf(rs.getLong(7));
|
||||
if (vmId == 0) {
|
||||
vmId = null;
|
||||
}
|
||||
long size = Long.valueOf(rs.getLong(8));
|
||||
Date createdDate = null;
|
||||
Date deletedDate = null;
|
||||
String createdTS = rs.getString(8);
|
||||
String deletedTS = rs.getString(9);
|
||||
String createdTS = rs.getString(9);
|
||||
String deletedTS = rs.getString(10);
|
||||
|
||||
if (createdTS != null) {
|
||||
createdDate = DateUtil.parseDateString(s_gmtTimeZone, createdTS);
|
||||
@ -163,7 +132,7 @@ public class UsageVolumeDaoImpl extends GenericDaoBase<UsageVolumeVO, Long> impl
|
||||
deletedDate = DateUtil.parseDateString(s_gmtTimeZone, deletedTS);
|
||||
}
|
||||
|
||||
usageRecords.add(new UsageVolumeVO(vId, zoneId, acctId, dId, doId, tId, size, createdDate, deletedDate));
|
||||
usageRecords.add(new UsageVolumeVO(vId, zoneId, acctId, dId, doId, tId, vmId, size, createdDate, deletedDate));
|
||||
}
|
||||
} catch (Exception e) {
|
||||
txn.rollback();
|
||||
@ -174,4 +143,13 @@ public class UsageVolumeDaoImpl extends GenericDaoBase<UsageVolumeVO, Long> impl
|
||||
|
||||
return usageRecords;
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<UsageVolumeVO> listByVolumeId(long volumeId, long accountId) {
|
||||
SearchCriteria<UsageVolumeVO> sc = volumeSearch.create();
|
||||
sc.setParameters("accountId", accountId);
|
||||
sc.setParameters("volumeId", volumeId);
|
||||
sc.setParameters("deleted", null);
|
||||
return listBy(sc);
|
||||
}
|
||||
}
|
||||
|
||||
@ -101,7 +101,7 @@ public class UserVmDaoImpl extends GenericDaoBase<UserVmVO, Long> implements Use
|
||||
ReservationDao reservationDao;
|
||||
|
||||
private static final String LIST_PODS_HAVING_VMS_FOR_ACCOUNT =
|
||||
"SELECT pod_id FROM cloud.vm_instance WHERE data_center_id = ? AND account_id = ? AND pod_id IS NOT NULL AND (state = 'Running' OR state = 'Stopped') "
|
||||
"SELECT pod_id FROM cloud.vm_instance WHERE data_center_id = ? AND account_id = ? AND pod_id IS NOT NULL AND state IN ('Starting', 'Running', 'Stopped') "
|
||||
+ "GROUP BY pod_id HAVING count(id) > 0 ORDER BY count(id) DESC";
|
||||
|
||||
private static final String VM_DETAILS = "select vm_instance.id, "
|
||||
|
||||
@ -0,0 +1,20 @@
|
||||
-- Licensed to the Apache Software Foundation (ASF) under one
|
||||
-- or more contributor license agreements. See the NOTICE file
|
||||
-- distributed with this work for additional information
|
||||
-- regarding copyright ownership. The ASF licenses this file
|
||||
-- to you under the Apache License, Version 2.0 (the
|
||||
-- "License"); you may not use this file except in compliance
|
||||
-- with the License. You may obtain a copy of the License at
|
||||
--
|
||||
-- http://www.apache.org/licenses/LICENSE-2.0
|
||||
--
|
||||
-- Unless required by applicable law or agreed to in writing,
|
||||
-- software distributed under the License is distributed on an
|
||||
-- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
-- KIND, either express or implied. See the License for the
|
||||
-- specific language governing permissions and limitations
|
||||
-- under the License.
|
||||
|
||||
--;
|
||||
-- Schema upgrade cleanup from 4.22.0.0 to 4.22.1.0
|
||||
--;
|
||||
@ -0,0 +1,27 @@
|
||||
-- Licensed to the Apache Software Foundation (ASF) under one
|
||||
-- or more contributor license agreements. See the NOTICE file
|
||||
-- distributed with this work for additional information
|
||||
-- regarding copyright ownership. The ASF licenses this file
|
||||
-- to you under the Apache License, Version 2.0 (the
|
||||
-- "License"); you may not use this file except in compliance
|
||||
-- with the License. You may obtain a copy of the License at
|
||||
--
|
||||
-- http://www.apache.org/licenses/LICENSE-2.0
|
||||
--
|
||||
-- Unless required by applicable law or agreed to in writing,
|
||||
-- software distributed under the License is distributed on an
|
||||
-- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
-- KIND, either express or implied. See the License for the
|
||||
-- specific language governing permissions and limitations
|
||||
-- under the License.
|
||||
|
||||
--;
|
||||
-- Schema upgrade from 4.22.0.0 to 4.22.1.0
|
||||
--;
|
||||
|
||||
-- Add vm_id column to usage_event table for volume usage events
|
||||
CALL `cloud`.`IDEMPOTENT_ADD_COLUMN`('cloud.usage_event','vm_id', 'bigint UNSIGNED NULL COMMENT "VM ID associated with volume usage events"');
|
||||
CALL `cloud_usage`.`IDEMPOTENT_ADD_COLUMN`('cloud_usage.usage_event','vm_id', 'bigint UNSIGNED NULL COMMENT "VM ID associated with volume usage events"');
|
||||
|
||||
-- Add vm_id column to cloud_usage.usage_volume table
|
||||
CALL `cloud_usage`.`IDEMPOTENT_ADD_COLUMN`('cloud_usage.usage_volume','vm_id', 'bigint UNSIGNED NULL COMMENT "VM ID associated with the volume usage"');
|
||||
@ -25,10 +25,7 @@ import java.sql.ResultSet;
|
||||
import java.sql.SQLException;
|
||||
import java.sql.Savepoint;
|
||||
import java.sql.Statement;
|
||||
import java.util.Iterator;
|
||||
import java.util.LinkedList;
|
||||
import java.util.List;
|
||||
import java.util.Properties;
|
||||
import java.util.*;
|
||||
import java.util.concurrent.atomic.AtomicLong;
|
||||
|
||||
import javax.sql.DataSource;
|
||||
@ -56,14 +53,12 @@ import com.zaxxer.hikari.HikariDataSource;
|
||||
/**
|
||||
* Transaction abstracts away the Connection object in JDBC. It allows the
|
||||
* following things that the Connection object does not.
|
||||
*
|
||||
* 1. Transaction can be started at an entry point and whether the DB
|
||||
* actions should be auto-commit or not determined at that point.
|
||||
* 2. DB Connection is allocated only when it is needed.
|
||||
* 3. Code does not need to know if a transaction has been started or not.
|
||||
* It just starts/ends a transaction and we resolve it correctly with
|
||||
* the previous actions.
|
||||
*
|
||||
* Note that this class is not synchronous but it doesn't need to be because
|
||||
* it is stored with TLS and is one per thread. Use appropriately.
|
||||
*/
|
||||
@ -73,7 +68,7 @@ public class TransactionLegacy implements Closeable {
|
||||
protected Logger lockLogger = LogManager.getLogger(Transaction.class.getName() + "." + "Lock");
|
||||
protected static Logger CONN_LOGGER = LogManager.getLogger(Transaction.class.getName() + "." + "Connection");
|
||||
|
||||
private static final ThreadLocal<TransactionLegacy> tls = new ThreadLocal<TransactionLegacy>();
|
||||
private static final ThreadLocal<TransactionLegacy> tls = new ThreadLocal<>();
|
||||
private static final String START_TXN = "start_txn";
|
||||
private static final String CURRENT_TXN = "current_txn";
|
||||
private static final String CREATE_TXN = "create_txn";
|
||||
@ -103,7 +98,7 @@ public class TransactionLegacy implements Closeable {
|
||||
private final LinkedList<StackElement> _stack;
|
||||
private long _id;
|
||||
|
||||
private final LinkedList<Pair<String, Long>> _lockTimes = new LinkedList<Pair<String, Long>>();
|
||||
private final LinkedList<Pair<String, Long>> _lockTimes = new LinkedList<>();
|
||||
|
||||
private String _name;
|
||||
private Connection _conn;
|
||||
@ -160,7 +155,7 @@ public class TransactionLegacy implements Closeable {
|
||||
TransactionLegacy txn = tls.get();
|
||||
if (txn == null) {
|
||||
if (LOGGER.isTraceEnabled()) {
|
||||
LOGGER.trace("Creating the transaction: " + name);
|
||||
LOGGER.trace("Creating the transaction: {}", name);
|
||||
}
|
||||
txn = new TransactionLegacy(name, false, databaseId);
|
||||
tls.set(txn);
|
||||
@ -206,7 +201,7 @@ public class TransactionLegacy implements Closeable {
|
||||
|
||||
public void registerLock(String sql) {
|
||||
if (_txn && lockLogger.isDebugEnabled()) {
|
||||
Pair<String, Long> time = new Pair<String, Long>(sql, System.currentTimeMillis());
|
||||
Pair<String, Long> time = new Pair<>(sql, System.currentTimeMillis());
|
||||
_lockTimes.add(time);
|
||||
}
|
||||
}
|
||||
@ -218,7 +213,7 @@ public class TransactionLegacy implements Closeable {
|
||||
public static Connection getStandaloneConnectionWithException() throws SQLException {
|
||||
Connection conn = s_ds.getConnection();
|
||||
if (CONN_LOGGER.isTraceEnabled()) {
|
||||
CONN_LOGGER.trace("Retrieving a standalone connection: dbconn" + System.identityHashCode(conn));
|
||||
CONN_LOGGER.trace("Retrieving a standalone connection: dbconn{}", System.identityHashCode(conn));
|
||||
}
|
||||
return conn;
|
||||
}
|
||||
@ -236,7 +231,7 @@ public class TransactionLegacy implements Closeable {
|
||||
try {
|
||||
Connection conn = s_usageDS.getConnection();
|
||||
if (CONN_LOGGER.isTraceEnabled()) {
|
||||
CONN_LOGGER.trace("Retrieving a standalone connection for usage: dbconn" + System.identityHashCode(conn));
|
||||
CONN_LOGGER.trace("Retrieving a standalone connection for usage: dbconn{}", System.identityHashCode(conn));
|
||||
}
|
||||
return conn;
|
||||
} catch (SQLException e) {
|
||||
@ -249,7 +244,7 @@ public class TransactionLegacy implements Closeable {
|
||||
try {
|
||||
Connection conn = s_simulatorDS.getConnection();
|
||||
if (CONN_LOGGER.isTraceEnabled()) {
|
||||
CONN_LOGGER.trace("Retrieving a standalone connection for simulator: dbconn" + System.identityHashCode(conn));
|
||||
CONN_LOGGER.trace("Retrieving a standalone connection for simulator: dbconn{}", System.identityHashCode(conn));
|
||||
}
|
||||
return conn;
|
||||
} catch (SQLException e) {
|
||||
@ -266,7 +261,7 @@ public class TransactionLegacy implements Closeable {
|
||||
Iterator<StackElement> it = _stack.descendingIterator();
|
||||
while (it.hasNext()) {
|
||||
StackElement element = it.next();
|
||||
if (element.type == ATTACHMENT) {
|
||||
if (Objects.equals(element.type, ATTACHMENT)) {
|
||||
TransactionAttachment att = (TransactionAttachment)element.ref;
|
||||
if (name.equals(att.getName())) {
|
||||
it.remove();
|
||||
@ -308,7 +303,7 @@ public class TransactionLegacy implements Closeable {
|
||||
}
|
||||
|
||||
// relax stack structure for several places that @DB required injection is not in place
|
||||
LOGGER.warn("Non-standard stack context that Transaction context is manaully placed into the calling chain. Stack chain: " + sb);
|
||||
LOGGER.warn("Non-standard stack context that Transaction context is manaully placed into the calling chain. Stack chain: {}", sb);
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -344,7 +339,7 @@ public class TransactionLegacy implements Closeable {
|
||||
private TransactionLegacy(final String name, final boolean forLocking, final short databaseId) {
|
||||
_name = name;
|
||||
_conn = null;
|
||||
_stack = new LinkedList<StackElement>();
|
||||
_stack = new LinkedList<>();
|
||||
_txn = false;
|
||||
_dbId = databaseId;
|
||||
_id = s_id.incrementAndGet();
|
||||
@ -372,7 +367,7 @@ public class TransactionLegacy implements Closeable {
|
||||
final StringBuilder str = new StringBuilder((_name != null ? _name : ""));
|
||||
str.append(" : ");
|
||||
for (final StackElement se : _stack) {
|
||||
if (se.type == CURRENT_TXN) {
|
||||
if (Objects.equals(se.type, CURRENT_TXN)) {
|
||||
str.append(se.ref).append(", ");
|
||||
}
|
||||
}
|
||||
@ -406,7 +401,7 @@ public class TransactionLegacy implements Closeable {
|
||||
@Deprecated
|
||||
public void start() {
|
||||
if (LOGGER.isTraceEnabled()) {
|
||||
LOGGER.trace("txn: start requested by: " + buildName());
|
||||
LOGGER.trace("txn: start requested by: {}", buildName());
|
||||
}
|
||||
|
||||
_stack.push(new StackElement(START_TXN, null));
|
||||
@ -434,7 +429,7 @@ public class TransactionLegacy implements Closeable {
|
||||
if (_stmt != null) {
|
||||
try {
|
||||
if (stmtLogger.isTraceEnabled()) {
|
||||
stmtLogger.trace("Closing: " + _stmt.toString());
|
||||
stmtLogger.trace("Closing: {}", _stmt.toString());
|
||||
}
|
||||
try {
|
||||
ResultSet rs = _stmt.getResultSet();
|
||||
@ -446,7 +441,7 @@ public class TransactionLegacy implements Closeable {
|
||||
}
|
||||
_stmt.close();
|
||||
} catch (final SQLException e) {
|
||||
stmtLogger.trace("Unable to close statement: " + _stmt.toString());
|
||||
stmtLogger.trace("Unable to close statement: {}", _stmt.toString());
|
||||
} finally {
|
||||
_stmt = null;
|
||||
}
|
||||
@ -474,7 +469,7 @@ public class TransactionLegacy implements Closeable {
|
||||
final Connection conn = getConnection();
|
||||
final PreparedStatement pstmt = conn.prepareStatement(sql);
|
||||
if (stmtLogger.isTraceEnabled()) {
|
||||
stmtLogger.trace("Preparing: " + sql);
|
||||
stmtLogger.trace("Preparing: {}", sql);
|
||||
}
|
||||
return pstmt;
|
||||
}
|
||||
@ -494,7 +489,7 @@ public class TransactionLegacy implements Closeable {
|
||||
final Connection conn = getConnection();
|
||||
final PreparedStatement pstmt = conn.prepareStatement(sql, autoGeneratedKeys);
|
||||
if (stmtLogger.isTraceEnabled()) {
|
||||
stmtLogger.trace("Preparing: " + sql);
|
||||
stmtLogger.trace("Preparing: {}", sql);
|
||||
}
|
||||
closePreviousStatement();
|
||||
_stmt = pstmt;
|
||||
@ -516,7 +511,7 @@ public class TransactionLegacy implements Closeable {
|
||||
final Connection conn = getConnection();
|
||||
final PreparedStatement pstmt = conn.prepareStatement(sql, columnNames);
|
||||
if (stmtLogger.isTraceEnabled()) {
|
||||
stmtLogger.trace("Preparing: " + sql);
|
||||
stmtLogger.trace("Preparing: {}", sql);
|
||||
}
|
||||
closePreviousStatement();
|
||||
_stmt = pstmt;
|
||||
@ -537,7 +532,7 @@ public class TransactionLegacy implements Closeable {
|
||||
final Connection conn = getConnection();
|
||||
final PreparedStatement pstmt = conn.prepareStatement(sql, resultSetType, resultSetConcurrency, resultSetHoldability);
|
||||
if (stmtLogger.isTraceEnabled()) {
|
||||
stmtLogger.trace("Preparing: " + sql);
|
||||
stmtLogger.trace("Preparing: {}", sql);
|
||||
}
|
||||
closePreviousStatement();
|
||||
_stmt = pstmt;
|
||||
@ -546,7 +541,6 @@ public class TransactionLegacy implements Closeable {
|
||||
|
||||
/**
|
||||
* Returns the db connection.
|
||||
*
|
||||
* Note: that you can call getConnection() but beaware that
|
||||
* all prepare statements from the Connection are not garbage
|
||||
* collected!
|
||||
@ -595,8 +589,7 @@ public class TransactionLegacy implements Closeable {
|
||||
//
|
||||
_stack.push(new StackElement(CREATE_CONN, null));
|
||||
if (CONN_LOGGER.isTraceEnabled()) {
|
||||
CONN_LOGGER.trace("Creating a DB connection with " + (_txn ? " txn: " : " no txn: ") + " for " + _dbId + ": dbconn" + System.identityHashCode(_conn) +
|
||||
". Stack: " + buildName());
|
||||
CONN_LOGGER.trace("Creating a DB connection with {} for {}: dbconn{}. Stack: {}", _txn ? " txn: " : " no txn: ", _dbId, System.identityHashCode(_conn), buildName());
|
||||
}
|
||||
} else {
|
||||
LOGGER.trace("conn: Using existing DB connection");
|
||||
@ -615,33 +608,33 @@ public class TransactionLegacy implements Closeable {
|
||||
}
|
||||
|
||||
protected boolean takeOver(final String name, final boolean create) {
|
||||
if (_stack.size() != 0) {
|
||||
if (!_stack.isEmpty()) {
|
||||
if (!create) {
|
||||
// If it is not a create transaction, then let's just use the current one.
|
||||
if (LOGGER.isTraceEnabled()) {
|
||||
LOGGER.trace("Using current transaction: " + toString());
|
||||
LOGGER.trace("Using current transaction: {}", this);
|
||||
}
|
||||
mark(name);
|
||||
return false;
|
||||
}
|
||||
|
||||
final StackElement se = _stack.getFirst();
|
||||
if (se.type == CREATE_TXN) {
|
||||
if (Objects.equals(se.type, CREATE_TXN)) {
|
||||
// This create is called inside of another create. Which is ok?
|
||||
// We will let that create be responsible for cleaning up.
|
||||
if (LOGGER.isTraceEnabled()) {
|
||||
LOGGER.trace("Create using current transaction: " + toString());
|
||||
LOGGER.trace("Create using current transaction: {}", this);
|
||||
}
|
||||
mark(name);
|
||||
return false;
|
||||
}
|
||||
|
||||
LOGGER.warn("Encountered a transaction that has leaked. Cleaning up. " + toString());
|
||||
LOGGER.warn("Encountered a transaction that has leaked. Cleaning up. {}", this);
|
||||
cleanup();
|
||||
}
|
||||
|
||||
if (LOGGER.isTraceEnabled()) {
|
||||
LOGGER.trace("Took over the transaction: " + name);
|
||||
LOGGER.trace("Took over the transaction: {}", name);
|
||||
}
|
||||
_stack.push(new StackElement(create ? CREATE_TXN : CURRENT_TXN, name));
|
||||
_name = name;
|
||||
@ -671,7 +664,7 @@ public class TransactionLegacy implements Closeable {
|
||||
public void close() {
|
||||
removeUpTo(CURRENT_TXN, null);
|
||||
|
||||
if (_stack.size() == 0) {
|
||||
if (_stack.isEmpty()) {
|
||||
LOGGER.trace("Transaction is done");
|
||||
cleanup();
|
||||
}
|
||||
@ -687,7 +680,7 @@ public class TransactionLegacy implements Closeable {
|
||||
public boolean close(final String name) {
|
||||
if (_name == null) { // Already cleaned up.
|
||||
if (LOGGER.isTraceEnabled()) {
|
||||
LOGGER.trace("Already cleaned up." + buildName());
|
||||
LOGGER.trace("Already cleaned up.{}", buildName());
|
||||
}
|
||||
return true;
|
||||
}
|
||||
@ -698,7 +691,7 @@ public class TransactionLegacy implements Closeable {
|
||||
}
|
||||
|
||||
if (LOGGER.isDebugEnabled() && _stack.size() > 2) {
|
||||
LOGGER.debug("Transaction is not closed properly: " + toString() + ". Called by " + buildName());
|
||||
LOGGER.debug("Transaction is not closed properly: {}. Called by {}", this, buildName());
|
||||
}
|
||||
|
||||
cleanup();
|
||||
@ -714,7 +707,7 @@ public class TransactionLegacy implements Closeable {
|
||||
protected void clearLockTimes() {
|
||||
if (lockLogger.isDebugEnabled()) {
|
||||
for (Pair<String, Long> time : _lockTimes) {
|
||||
lockLogger.trace("SQL " + time.first() + " took " + (System.currentTimeMillis() - time.second()));
|
||||
lockLogger.trace("SQL {} took {}", time.first(), System.currentTimeMillis() - time.second());
|
||||
}
|
||||
_lockTimes.clear();
|
||||
}
|
||||
@ -722,14 +715,14 @@ public class TransactionLegacy implements Closeable {
|
||||
|
||||
public boolean commit() {
|
||||
if (!_txn) {
|
||||
LOGGER.warn("txn: Commit called when it is not a transaction: " + buildName());
|
||||
LOGGER.warn("txn: Commit called when it is not a transaction: {}", buildName());
|
||||
return false;
|
||||
}
|
||||
|
||||
Iterator<StackElement> it = _stack.iterator();
|
||||
while (it.hasNext()) {
|
||||
StackElement st = it.next();
|
||||
if (st.type == START_TXN) {
|
||||
if (Objects.equals(st.type, START_TXN)) {
|
||||
it.remove();
|
||||
break;
|
||||
}
|
||||
@ -737,7 +730,7 @@ public class TransactionLegacy implements Closeable {
|
||||
|
||||
if (hasTxnInStack()) {
|
||||
if (LOGGER.isTraceEnabled()) {
|
||||
LOGGER.trace("txn: Not committing because transaction started elsewhere: " + buildName() + " / " + toString());
|
||||
LOGGER.trace("txn: Not committing because transaction started elsewhere: {} / {}", buildName(), this);
|
||||
}
|
||||
return false;
|
||||
}
|
||||
@ -746,7 +739,7 @@ public class TransactionLegacy implements Closeable {
|
||||
try {
|
||||
if (_conn != null) {
|
||||
_conn.commit();
|
||||
LOGGER.trace("txn: DB Changes committed. Time = " + (System.currentTimeMillis() - _txnTime));
|
||||
LOGGER.trace("txn: DB Changes committed. Time = {}", System.currentTimeMillis() - _txnTime);
|
||||
clearLockTimes();
|
||||
closeConnection();
|
||||
}
|
||||
@ -773,7 +766,7 @@ public class TransactionLegacy implements Closeable {
|
||||
// we should only close db connection when it is not user managed
|
||||
if (_dbId != CONNECTED_DB) {
|
||||
if (CONN_LOGGER.isTraceEnabled()) {
|
||||
CONN_LOGGER.trace("Closing DB connection: dbconn" + System.identityHashCode(_conn));
|
||||
CONN_LOGGER.trace("Closing DB connection: dbconn{}", System.identityHashCode(_conn));
|
||||
}
|
||||
_conn.close();
|
||||
_conn = null;
|
||||
@ -797,13 +790,13 @@ public class TransactionLegacy implements Closeable {
|
||||
break;
|
||||
}
|
||||
|
||||
if (item.type == CURRENT_TXN) {
|
||||
if (Objects.equals(item.type, CURRENT_TXN)) {
|
||||
if (LOGGER.isTraceEnabled()) {
|
||||
LOGGER.trace("Releasing the current txn: " + (item.ref != null ? item.ref : ""));
|
||||
LOGGER.trace("Releasing the current txn: {}", item.ref != null ? item.ref : "");
|
||||
}
|
||||
} else if (item.type == CREATE_CONN) {
|
||||
} else if (Objects.equals(item.type, CREATE_CONN)) {
|
||||
closeConnection();
|
||||
} else if (item.type == START_TXN) {
|
||||
} else if (Objects.equals(item.type, START_TXN)) {
|
||||
if (item.ref == null) {
|
||||
rollback = true;
|
||||
} else {
|
||||
@ -814,10 +807,10 @@ public class TransactionLegacy implements Closeable {
|
||||
LOGGER.warn("Unable to rollback Txn.", e);
|
||||
}
|
||||
}
|
||||
} else if (item.type == STATEMENT) {
|
||||
} else if (Objects.equals(item.type, STATEMENT)) {
|
||||
try {
|
||||
if (stmtLogger.isTraceEnabled()) {
|
||||
stmtLogger.trace("Closing: " + ref.toString());
|
||||
stmtLogger.trace("Closing: {}", ref.toString());
|
||||
}
|
||||
Statement stmt = (Statement)ref;
|
||||
try {
|
||||
@ -830,17 +823,17 @@ public class TransactionLegacy implements Closeable {
|
||||
}
|
||||
stmt.close();
|
||||
} catch (final SQLException e) {
|
||||
stmtLogger.trace("Unable to close statement: " + item);
|
||||
stmtLogger.trace("Unable to close statement: {}", item);
|
||||
}
|
||||
} else if (item.type == ATTACHMENT) {
|
||||
} else if (Objects.equals(item.type, ATTACHMENT)) {
|
||||
TransactionAttachment att = (TransactionAttachment)item.ref;
|
||||
if (LOGGER.isTraceEnabled()) {
|
||||
LOGGER.trace("Cleaning up " + att.getName());
|
||||
LOGGER.trace("Cleaning up {}", att.getName());
|
||||
}
|
||||
att.cleanup();
|
||||
}
|
||||
} catch (Exception e) {
|
||||
LOGGER.error("Unable to clean up " + item, e);
|
||||
LOGGER.error("Unable to clean up {}", item, e);
|
||||
}
|
||||
}
|
||||
|
||||
@ -853,7 +846,7 @@ public class TransactionLegacy implements Closeable {
|
||||
closePreviousStatement();
|
||||
if (!_txn) {
|
||||
if (LOGGER.isTraceEnabled()) {
|
||||
LOGGER.trace("Rollback called for " + _name + " when there's no transaction: " + buildName());
|
||||
LOGGER.trace("Rollback called for {} when there's no transaction: {}", _name, buildName());
|
||||
}
|
||||
return;
|
||||
}
|
||||
@ -862,7 +855,7 @@ public class TransactionLegacy implements Closeable {
|
||||
try {
|
||||
if (_conn != null) {
|
||||
if (LOGGER.isDebugEnabled()) {
|
||||
LOGGER.debug("Rolling back the transaction: Time = " + (System.currentTimeMillis() - _txnTime) + " Name = " + _name + "; called by " + buildName());
|
||||
LOGGER.debug("Rolling back the transaction: Time = {} Name = {}; called by {}", System.currentTimeMillis() - _txnTime, _name, buildName());
|
||||
}
|
||||
_conn.rollback();
|
||||
}
|
||||
@ -879,7 +872,7 @@ public class TransactionLegacy implements Closeable {
|
||||
_conn.rollback(sp);
|
||||
}
|
||||
} catch (SQLException e) {
|
||||
LOGGER.warn("Unable to rollback to savepoint " + sp);
|
||||
LOGGER.warn("Unable to rollback to savepoint {}", sp);
|
||||
}
|
||||
|
||||
if (!hasTxnInStack()) {
|
||||
@ -892,7 +885,7 @@ public class TransactionLegacy implements Closeable {
|
||||
Iterator<StackElement> it = _stack.iterator();
|
||||
while (it.hasNext()) {
|
||||
StackElement st = it.next();
|
||||
if (st.type == START_TXN) {
|
||||
if (Objects.equals(st.type, START_TXN)) {
|
||||
if (st.ref == null) {
|
||||
it.remove();
|
||||
} else {
|
||||
@ -943,7 +936,7 @@ public class TransactionLegacy implements Closeable {
|
||||
Iterator<StackElement> it = _stack.iterator();
|
||||
while (it.hasNext()) {
|
||||
StackElement se = it.next();
|
||||
if (se.type == START_TXN && se.ref == sp) {
|
||||
if (Objects.equals(se.type, START_TXN) && se.ref == sp) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
@ -960,7 +953,7 @@ public class TransactionLegacy implements Closeable {
|
||||
Iterator<StackElement> it = _stack.iterator();
|
||||
while (it.hasNext()) {
|
||||
StackElement se = it.next();
|
||||
if (se.type == START_TXN) {
|
||||
if (Objects.equals(se.type, START_TXN)) {
|
||||
it.remove();
|
||||
if (se.ref == sp) {
|
||||
return;
|
||||
@ -993,7 +986,7 @@ public class TransactionLegacy implements Closeable {
|
||||
|
||||
@Override
|
||||
protected void finalize() throws Throwable {
|
||||
if (!(_conn == null && (_stack == null || _stack.size() == 0))) {
|
||||
if (!(_conn == null && (_stack == null || _stack.isEmpty()))) {
|
||||
assert (false) : "Oh Alex oh alex...something is wrong with how we're doing this";
|
||||
LOGGER.error("Something went wrong that a transaction is orphaned before db connection is closed");
|
||||
cleanup();
|
||||
@ -1052,11 +1045,11 @@ public class TransactionLegacy implements Closeable {
|
||||
@SuppressWarnings({"rawtypes", "unchecked"})
|
||||
public static void initDataSource(Properties dbProps) {
|
||||
try {
|
||||
if (dbProps.size() == 0)
|
||||
if (dbProps.isEmpty())
|
||||
return;
|
||||
|
||||
s_dbHAEnabled = Boolean.valueOf(dbProps.getProperty("db.ha.enabled"));
|
||||
LOGGER.info("Is Data Base High Availiability enabled? Ans : " + s_dbHAEnabled);
|
||||
s_dbHAEnabled = Boolean.parseBoolean(dbProps.getProperty("db.ha.enabled"));
|
||||
LOGGER.info("Is Data Base High Availiability enabled? Ans : {}", s_dbHAEnabled);
|
||||
String loadBalanceStrategy = dbProps.getProperty("db.ha.loadBalanceStrategy");
|
||||
// FIXME: If params are missing...default them????
|
||||
final Integer cloudMaxActive = parseNumber(dbProps.getProperty("db.cloud.maxActive"), Integer.class);
|
||||
@ -1082,7 +1075,7 @@ public class TransactionLegacy implements Closeable {
|
||||
} else if (cloudIsolationLevel.equalsIgnoreCase("readuncommitted")) {
|
||||
isolationLevel = Connection.TRANSACTION_READ_UNCOMMITTED;
|
||||
} else {
|
||||
LOGGER.warn("Unknown isolation level " + cloudIsolationLevel + ". Using read uncommitted");
|
||||
LOGGER.warn("Unknown isolation level {}. Using read uncommitted", cloudIsolationLevel);
|
||||
}
|
||||
|
||||
final boolean cloudTestOnBorrow = Boolean.parseBoolean(dbProps.getProperty("db.cloud.testOnBorrow"));
|
||||
@ -1190,16 +1183,16 @@ public class TransactionLegacy implements Closeable {
|
||||
driver = dbProps.getProperty(String.format("db.%s.driver", schema));
|
||||
connectionUri = getPropertiesAndBuildConnectionUri(dbProps, loadBalanceStrategy, driver, useSSL, schema);
|
||||
} else {
|
||||
LOGGER.warn(String.format("db.%s.uri was set, ignoring the following properties for schema %s of db.properties: [host, port, name, driver, autoReconnect, url.params,"
|
||||
LOGGER.warn("db.{}.uri was set, ignoring the following properties for schema {} of db.properties: [host, port, name, driver, autoReconnect, url.params,"
|
||||
+ " replicas, ha.loadBalanceStrategy, ha.enable, failOverReadOnly, reconnectAtTxEnd, autoReconnectForPools, secondsBeforeRetrySource, queriesBeforeRetrySource, "
|
||||
+ "initialTimeout].", schema, schema));
|
||||
+ "initialTimeout].", schema, schema);
|
||||
|
||||
String[] splitUri = propertyUri.split(":");
|
||||
driver = String.format("%s:%s", splitUri[0], splitUri[1]);
|
||||
|
||||
connectionUri = propertyUri;
|
||||
}
|
||||
LOGGER.info(String.format("Using the following URI to connect to %s database [%s].", schema, connectionUri));
|
||||
LOGGER.info("Using the following URI to connect to {} database [{}].", schema, connectionUri);
|
||||
return new Pair<>(connectionUri, driver);
|
||||
}
|
||||
|
||||
@ -1215,7 +1208,7 @@ public class TransactionLegacy implements Closeable {
|
||||
if (s_dbHAEnabled) {
|
||||
dbHaParams = getDBHAParams(schema, dbProps);
|
||||
replicas = dbProps.getProperty(String.format("db.%s.replicas", schema));
|
||||
LOGGER.info(String.format("The replicas configured for %s data base are %s.", schema, replicas));
|
||||
LOGGER.info("The replicas configured for {} data base are {}.", schema, replicas);
|
||||
}
|
||||
|
||||
return buildConnectionUri(loadBalanceStrategy, driver, useSSL, host, replicas, port, dbName, autoReconnect, urlParams, dbHaParams);
|
||||
@ -1322,8 +1315,7 @@ public class TransactionLegacy implements Closeable {
|
||||
config.addDataSourceProperty("elideSetAutoCommits", "true");
|
||||
config.addDataSourceProperty("maintainTimeStats", "false");
|
||||
|
||||
HikariDataSource dataSource = new HikariDataSource(config);
|
||||
return dataSource;
|
||||
return new HikariDataSource(config);
|
||||
}
|
||||
|
||||
private static DataSource createDbcpDataSource(String uri, String username, String password,
|
||||
@ -1411,19 +1403,19 @@ public class TransactionLegacy implements Closeable {
|
||||
|
||||
private static String getDBHAParams(String dbName, Properties dbProps) {
|
||||
StringBuilder sb = new StringBuilder();
|
||||
sb.append("failOverReadOnly=" + dbProps.getProperty("db." + dbName + ".failOverReadOnly"));
|
||||
sb.append("&").append("reconnectAtTxEnd=" + dbProps.getProperty("db." + dbName + ".reconnectAtTxEnd"));
|
||||
sb.append("&").append("autoReconnectForPools=" + dbProps.getProperty("db." + dbName + ".autoReconnectForPools"));
|
||||
sb.append("&").append("secondsBeforeRetrySource=" + dbProps.getProperty("db." + dbName + ".secondsBeforeRetrySource"));
|
||||
sb.append("&").append("queriesBeforeRetrySource=" + dbProps.getProperty("db." + dbName + ".queriesBeforeRetrySource"));
|
||||
sb.append("&").append("initialTimeout=" + dbProps.getProperty("db." + dbName + ".initialTimeout"));
|
||||
sb.append("failOverReadOnly=").append(dbProps.getProperty("db." + dbName + ".failOverReadOnly"));
|
||||
sb.append("&").append("reconnectAtTxEnd=").append(dbProps.getProperty("db." + dbName + ".reconnectAtTxEnd"));
|
||||
sb.append("&").append("autoReconnectForPools=").append(dbProps.getProperty("db." + dbName + ".autoReconnectForPools"));
|
||||
sb.append("&").append("secondsBeforeRetrySource=").append(dbProps.getProperty("db." + dbName + ".secondsBeforeRetrySource"));
|
||||
sb.append("&").append("queriesBeforeRetrySource=").append(dbProps.getProperty("db." + dbName + ".queriesBeforeRetrySource"));
|
||||
sb.append("&").append("initialTimeout=").append(dbProps.getProperty("db." + dbName + ".initialTimeout"));
|
||||
return sb.toString();
|
||||
}
|
||||
|
||||
/**
|
||||
* Used for unit testing primarily
|
||||
*
|
||||
* @param conn
|
||||
* @param conn connection to use
|
||||
*/
|
||||
protected void setConnection(Connection conn) {
|
||||
_conn = conn;
|
||||
@ -1433,7 +1425,7 @@ public class TransactionLegacy implements Closeable {
|
||||
* Receives a list of {@link PreparedStatement} and quietly closes all of them, which
|
||||
* triggers also closing their dependent objects, like a {@link ResultSet}
|
||||
*
|
||||
* @param pstmt2Close
|
||||
* @param pstmt2Close list of PreparedStatement to close
|
||||
*/
|
||||
public static void closePstmts(List<PreparedStatement> pstmt2Close) {
|
||||
for (PreparedStatement pstmt : pstmt2Close) {
|
||||
|
||||
@ -59,3 +59,8 @@ USAGELOG=/var/log/cloudstack/usage/usage.log
|
||||
USAGESYSCONFDIR=/etc/cloudstack/usage
|
||||
PACKAGE=cloudstack
|
||||
EXTENSIONSDEPLOYMENTMODE=production
|
||||
GUESTNVRAMTEMPLATELEGACY=/usr/share/OVMF/OVMF_VARS_4M.fd
|
||||
GUESTLOADERLEGACY=/usr/share/OVMF/OVMF_CODE_4M.fd
|
||||
GUESTNVRAMTEMPLATESECURE=/usr/share/OVMF/OVMF_VARS_4M.ms.fd
|
||||
GUESTLOADERSECURE=/usr/share/OVMF/OVMF_CODE_4M.secboot.fd
|
||||
GUESTNVRAMPATH=/var/lib/libvirt/qemu/nvram/
|
||||
|
||||
@ -115,6 +115,8 @@ Requires: ipset
|
||||
Requires: perl
|
||||
Requires: rsync
|
||||
Requires: cifs-utils
|
||||
Requires: edk2-ovmf
|
||||
Requires: swtpm
|
||||
Requires: (python3-libvirt or python3-libvirt-python)
|
||||
Requires: (qemu-img or qemu-tools)
|
||||
Requires: qemu-kvm
|
||||
@ -356,6 +358,7 @@ install -D packaging/systemd/cloudstack-agent.service ${RPM_BUILD_ROOT}%{_unitdi
|
||||
install -D packaging/systemd/cloudstack-rolling-maintenance@.service ${RPM_BUILD_ROOT}%{_unitdir}/%{name}-rolling-maintenance@.service
|
||||
install -D packaging/systemd/cloudstack-agent.default ${RPM_BUILD_ROOT}%{_sysconfdir}/default/%{name}-agent
|
||||
install -D agent/target/transformed/agent.properties ${RPM_BUILD_ROOT}%{_sysconfdir}/%{name}/agent/agent.properties
|
||||
install -D agent/target/transformed/uefi.properties ${RPM_BUILD_ROOT}%{_sysconfdir}/%{name}/agent/uefi.properties
|
||||
install -D agent/target/transformed/environment.properties ${RPM_BUILD_ROOT}%{_sysconfdir}/%{name}/agent/environment.properties
|
||||
install -D agent/target/transformed/log4j-cloud.xml ${RPM_BUILD_ROOT}%{_sysconfdir}/%{name}/agent/log4j-cloud.xml
|
||||
install -D agent/target/transformed/cloud-setup-agent ${RPM_BUILD_ROOT}%{_bindir}/%{name}-setup-agent
|
||||
@ -523,7 +526,7 @@ mkdir -m 0755 -p /usr/share/cloudstack-agent/tmp
|
||||
/usr/bin/systemctl enable cloudstack-rolling-maintenance@p > /dev/null 2>&1 || true
|
||||
/usr/bin/systemctl enable --now rngd > /dev/null 2>&1 || true
|
||||
|
||||
# if saved configs from upgrade exist, copy them over
|
||||
# if saved agent.properties from upgrade exist, copy them over
|
||||
if [ -f "%{_sysconfdir}/cloud.rpmsave/agent/agent.properties" ]; then
|
||||
mv %{_sysconfdir}/%{name}/agent/agent.properties %{_sysconfdir}/%{name}/agent/agent.properties.rpmnew
|
||||
cp -p %{_sysconfdir}/cloud.rpmsave/agent/agent.properties %{_sysconfdir}/%{name}/agent
|
||||
@ -531,6 +534,14 @@ if [ -f "%{_sysconfdir}/cloud.rpmsave/agent/agent.properties" ]; then
|
||||
mv %{_sysconfdir}/cloud.rpmsave/agent/agent.properties %{_sysconfdir}/cloud.rpmsave/agent/agent.properties.rpmsave
|
||||
fi
|
||||
|
||||
# if saved uefi.properties from upgrade exist, copy them over
|
||||
if [ -f "%{_sysconfdir}/cloud.rpmsave/agent/uefi.properties" ]; then
|
||||
mv %{_sysconfdir}/%{name}/agent/uefi.properties %{_sysconfdir}/%{name}/agent/uefi.properties.rpmnew
|
||||
cp -p %{_sysconfdir}/cloud.rpmsave/agent/uefi.properties %{_sysconfdir}/%{name}/agent
|
||||
# make sure we only do this on the first install of this RPM, don't want to overwrite on a reinstall
|
||||
mv %{_sysconfdir}/cloud.rpmsave/agent/uefi.properties %{_sysconfdir}/cloud.rpmsave/agent/uefi.properties.rpmsave
|
||||
fi
|
||||
|
||||
systemctl daemon-reload
|
||||
|
||||
# Print help message
|
||||
|
||||
@ -58,3 +58,8 @@ USAGECLASSPATH=
|
||||
USAGELOG=/var/log/cloudstack/usage/usage.log
|
||||
USAGESYSCONFDIR=/etc/sysconfig
|
||||
EXTENSIONSDEPLOYMENTMODE=production
|
||||
GUESTNVRAMTEMPLATELEGACY=/usr/share/edk2/ovmf/OVMF_VARS.fd
|
||||
GUESTLOADERLEGACY=/usr/share/edk2/ovmf/OVMF_CODE.cc.fd
|
||||
GUESTNVRAMTEMPLATESECURE=/usr/share/edk2/ovmf/OVMF_VARS.secboot.fd
|
||||
GUESTLOADERSECURE=/usr/share/edk2/ovmf/OVMF_CODE.secboot.fd
|
||||
GUESTNVRAMPATH=/var/lib/libvirt/qemu/nvram/
|
||||
|
||||
@ -105,7 +105,8 @@ public class VeeamClient {
|
||||
private static final String REPOSITORY_REFERENCE = "RepositoryReference";
|
||||
private static final String RESTORE_POINT_REFERENCE = "RestorePointReference";
|
||||
private static final String BACKUP_FILE_REFERENCE = "BackupFileReference";
|
||||
private static final SimpleDateFormat dateFormat = new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ss");
|
||||
private static final SimpleDateFormat DATE_FORMAT = new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ss");
|
||||
private static final ObjectMapper OBJECT_MAPPER = new XmlMapper();
|
||||
|
||||
private String veeamServerIp;
|
||||
private final Integer veeamServerVersion;
|
||||
@ -124,6 +125,8 @@ public class VeeamClient {
|
||||
this.taskPollInterval = taskPollInterval;
|
||||
this.taskPollMaxRetry = taskPollMaxRetry;
|
||||
|
||||
OBJECT_MAPPER.configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false);
|
||||
|
||||
final RequestConfig config = RequestConfig.custom()
|
||||
.setConnectTimeout(timeout * 1000)
|
||||
.setConnectionRequestTimeout(timeout * 1000)
|
||||
@ -233,8 +236,7 @@ public class VeeamClient {
|
||||
private HttpResponse post(final String path, final Object obj) throws IOException {
|
||||
String xml = null;
|
||||
if (obj != null) {
|
||||
XmlMapper xmlMapper = new XmlMapper();
|
||||
xml = xmlMapper.writer()
|
||||
xml = OBJECT_MAPPER.writer()
|
||||
.with(ToXmlGenerator.Feature.WRITE_XML_DECLARATION)
|
||||
.writeValueAsString(obj);
|
||||
// Remove invalid/empty xmlns
|
||||
@ -277,8 +279,7 @@ public class VeeamClient {
|
||||
try {
|
||||
final HttpResponse response = get("/hierarchyRoots");
|
||||
checkResponseOK(response);
|
||||
final ObjectMapper objectMapper = new XmlMapper();
|
||||
final EntityReferences references = objectMapper.readValue(response.getEntity().getContent(), EntityReferences.class);
|
||||
final EntityReferences references = OBJECT_MAPPER.readValue(response.getEntity().getContent(), EntityReferences.class);
|
||||
for (final Ref ref : references.getRefs()) {
|
||||
if (ref.getName().equals(vmwareDcName) && ref.getType().equals(HIERARCHY_ROOT_REFERENCE)) {
|
||||
return ref.getUid();
|
||||
@ -297,8 +298,7 @@ public class VeeamClient {
|
||||
try {
|
||||
final HttpResponse response = get(String.format("/lookup?host=%s&type=Vm&name=%s", hierarchyId, vmName));
|
||||
checkResponseOK(response);
|
||||
final ObjectMapper objectMapper = new XmlMapper();
|
||||
final HierarchyItems items = objectMapper.readValue(response.getEntity().getContent(), HierarchyItems.class);
|
||||
final HierarchyItems items = OBJECT_MAPPER.readValue(response.getEntity().getContent(), HierarchyItems.class);
|
||||
if (items == null || items.getItems() == null || items.getItems().isEmpty()) {
|
||||
throw new CloudRuntimeException("Could not find VM " + vmName + " in Veeam, please ask administrator to check Veeam B&R manager");
|
||||
}
|
||||
@ -316,14 +316,12 @@ public class VeeamClient {
|
||||
|
||||
private Task parseTaskResponse(HttpResponse response) throws IOException {
|
||||
checkResponseOK(response);
|
||||
final ObjectMapper objectMapper = new XmlMapper();
|
||||
return objectMapper.readValue(response.getEntity().getContent(), Task.class);
|
||||
return OBJECT_MAPPER.readValue(response.getEntity().getContent(), Task.class);
|
||||
}
|
||||
|
||||
protected RestoreSession parseRestoreSessionResponse(HttpResponse response) throws IOException {
|
||||
checkResponseOK(response);
|
||||
final ObjectMapper objectMapper = new XmlMapper();
|
||||
return objectMapper.readValue(response.getEntity().getContent(), RestoreSession.class);
|
||||
return OBJECT_MAPPER.readValue(response.getEntity().getContent(), RestoreSession.class);
|
||||
}
|
||||
|
||||
private boolean checkTaskStatus(final HttpResponse response) throws IOException {
|
||||
@ -410,8 +408,7 @@ public class VeeamClient {
|
||||
String repositoryName = getRepositoryNameFromJob(backupName);
|
||||
final HttpResponse response = get(String.format("/backupServers/%s/repositories", backupServerId));
|
||||
checkResponseOK(response);
|
||||
final ObjectMapper objectMapper = new XmlMapper();
|
||||
final EntityReferences references = objectMapper.readValue(response.getEntity().getContent(), EntityReferences.class);
|
||||
final EntityReferences references = OBJECT_MAPPER.readValue(response.getEntity().getContent(), EntityReferences.class);
|
||||
for (final Ref ref : references.getRefs()) {
|
||||
if (ref.getType().equals(REPOSITORY_REFERENCE) && ref.getName().equals(repositoryName)) {
|
||||
return ref;
|
||||
@ -447,8 +444,7 @@ public class VeeamClient {
|
||||
try {
|
||||
final HttpResponse response = get("/backups");
|
||||
checkResponseOK(response);
|
||||
final ObjectMapper objectMapper = new XmlMapper();
|
||||
final EntityReferences entityReferences = objectMapper.readValue(response.getEntity().getContent(), EntityReferences.class);
|
||||
final EntityReferences entityReferences = OBJECT_MAPPER.readValue(response.getEntity().getContent(), EntityReferences.class);
|
||||
for (final Ref ref : entityReferences.getRefs()) {
|
||||
logger.debug("Veeam Backup found, name: " + ref.getName() + ", uid: " + ref.getUid() + ", type: " + ref.getType());
|
||||
}
|
||||
@ -463,8 +459,7 @@ public class VeeamClient {
|
||||
try {
|
||||
final HttpResponse response = get("/jobs");
|
||||
checkResponseOK(response);
|
||||
final ObjectMapper objectMapper = new XmlMapper();
|
||||
final EntityReferences entityReferences = objectMapper.readValue(response.getEntity().getContent(), EntityReferences.class);
|
||||
final EntityReferences entityReferences = OBJECT_MAPPER.readValue(response.getEntity().getContent(), EntityReferences.class);
|
||||
final List<BackupOffering> policies = new ArrayList<>();
|
||||
if (entityReferences == null || entityReferences.getRefs() == null) {
|
||||
return policies;
|
||||
@ -486,9 +481,7 @@ public class VeeamClient {
|
||||
final HttpResponse response = get(String.format("/jobs/%s?format=Entity",
|
||||
jobId.replace("urn:veeam:Job:", "")));
|
||||
checkResponseOK(response);
|
||||
final ObjectMapper objectMapper = new XmlMapper();
|
||||
objectMapper.configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false);
|
||||
return objectMapper.readValue(response.getEntity().getContent(), Job.class);
|
||||
return OBJECT_MAPPER.readValue(response.getEntity().getContent(), Job.class);
|
||||
} catch (final IOException e) {
|
||||
logger.error("Failed to list Veeam jobs due to:", e);
|
||||
checkResponseTimeOut(e);
|
||||
@ -568,9 +561,7 @@ public class VeeamClient {
|
||||
final String veeamVmRefId = lookupVM(hierarchyId, vmwareInstanceName);
|
||||
final HttpResponse response = get(String.format("/jobs/%s/includes", jobId));
|
||||
checkResponseOK(response);
|
||||
final ObjectMapper objectMapper = new XmlMapper();
|
||||
objectMapper.configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false);
|
||||
final ObjectsInJob jobObjects = objectMapper.readValue(response.getEntity().getContent(), ObjectsInJob.class);
|
||||
final ObjectsInJob jobObjects = OBJECT_MAPPER.readValue(response.getEntity().getContent(), ObjectsInJob.class);
|
||||
if (jobObjects == null || jobObjects.getObjects() == null) {
|
||||
logger.warn("No objects found in the Veeam job " + jobId);
|
||||
return false;
|
||||
@ -710,8 +701,7 @@ public class VeeamClient {
|
||||
protected Map<String, Backup.Metric> processHttpResponseForBackupMetrics(final InputStream content) {
|
||||
Map<String, Backup.Metric> metrics = new HashMap<>();
|
||||
try {
|
||||
final ObjectMapper objectMapper = new XmlMapper();
|
||||
final BackupFiles backupFiles = objectMapper.readValue(content, BackupFiles.class);
|
||||
final BackupFiles backupFiles = OBJECT_MAPPER.readValue(content, BackupFiles.class);
|
||||
if (backupFiles == null || CollectionUtils.isEmpty(backupFiles.getBackupFiles())) {
|
||||
throw new CloudRuntimeException("Could not get backup metrics via Veeam B&R API");
|
||||
}
|
||||
@ -855,8 +845,7 @@ public class VeeamClient {
|
||||
public List<Backup.RestorePoint> processHttpResponseForVmRestorePoints(InputStream content, String vmwareDcName, String vmInternalName, Map<String, Backup.Metric> metricsMap) {
|
||||
List<Backup.RestorePoint> vmRestorePointList = new ArrayList<>();
|
||||
try {
|
||||
final ObjectMapper objectMapper = new XmlMapper();
|
||||
final VmRestorePoints vmRestorePoints = objectMapper.readValue(content, VmRestorePoints.class);
|
||||
final VmRestorePoints vmRestorePoints = OBJECT_MAPPER.readValue(content, VmRestorePoints.class);
|
||||
final String hierarchyId = findDCHierarchy(vmwareDcName);
|
||||
final String hierarchyUuid = StringUtils.substringAfterLast(hierarchyId, ":");
|
||||
if (vmRestorePoints == null) {
|
||||
@ -907,7 +896,7 @@ public class VeeamClient {
|
||||
}
|
||||
|
||||
private Date formatDate(String date) throws ParseException {
|
||||
return dateFormat.parse(StringUtils.substring(date, 0, 19));
|
||||
return DATE_FORMAT.parse(StringUtils.substring(date, 0, 19));
|
||||
}
|
||||
|
||||
public Pair<Boolean, String> restoreVMToDifferentLocation(String restorePointId, String restoreLocation, String hostIp, String dataStoreUuid) {
|
||||
|
||||
@ -483,7 +483,9 @@ public class VeeamClientTest {
|
||||
" xmlns:xsd=\"http://www.w3.org/2001/XMLSchema\"\n" +
|
||||
" xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\"\n" +
|
||||
" xmlns=\"http://www.veeam.com/ent/v1.0\">\n" +
|
||||
" <VmRestorePoint Href=\"https://10.0.3.142:9398/api/vmRestorePoints/f6d504cf-eafe-4cd2-8dfc-e9cfe2f1e977?format=Entity\" Type=\"VmRestorePoint\" Name=\"i-2-4-VM@2023-11-03 16:26:12.209913\" UID=\"urn:veeam:VmRestorePoint:f6d504cf-eafe-4cd2-8dfc-e9cfe2f1e977\" VmDisplayName=\"i-2-4-VM\">\n" +
|
||||
" <VmRestorePoint Href=\"https://10.0.3.142:9398/api/vmRestorePoints/f6d504cf-eafe-4cd2-8dfc-e9cfe2f1e977?format=Entity\"" +
|
||||
" Type=\"VmRestorePoint\" Name=\"i-2-4-VM@2023-11-03 16:26:12.209913\" UID=\"urn:veeam:VmRestorePoint:f6d504cf-eafe-4cd2-8dfc-e9cfe2f1e977\"" +
|
||||
" VmDisplayName=\"i-2-4-VM\" SqlInfo=\"SqlInfo\">\n" +
|
||||
" <Links>\n" +
|
||||
" <Link Href=\"https://10.0.3.142:9398/api/vmRestorePoints/f6d504cf-eafe-4cd2-8dfc-e9cfe2f1e977?action=restore\" Rel=\"Restore\" />\n" +
|
||||
" <Link Href=\"https://10.0.3.142:9398/api/backupServers/18cc2a81-1ff0-42cd-8389-62f2bbcc6b7f\" Name=\"10.0.3.142\" Type=\"BackupServerReference\" Rel=\"Up\" />\n" +
|
||||
|
||||
@ -3293,25 +3293,25 @@ public class LibvirtComputingResource extends ServerResourceBase implements Serv
|
||||
setGuestLoader(bootMode, SECURE, guest, GuestDef.GUEST_LOADER_SECURE);
|
||||
setGuestLoader(bootMode, LEGACY, guest, GuestDef.GUEST_LOADER_LEGACY);
|
||||
|
||||
if (isUefiPropertieNotNull(GuestDef.GUEST_NVRAM_PATH)) {
|
||||
if (isUefiPropertyNotNull(GuestDef.GUEST_NVRAM_PATH)) {
|
||||
guest.setNvram(uefiProperties.getProperty(GuestDef.GUEST_NVRAM_PATH));
|
||||
}
|
||||
|
||||
if (isSecureBoot && isUefiPropertieNotNull(GuestDef.GUEST_NVRAM_TEMPLATE_SECURE) && SECURE.equalsIgnoreCase(bootMode)) {
|
||||
if (isSecureBoot && isUefiPropertyNotNull(GuestDef.GUEST_NVRAM_TEMPLATE_SECURE) && SECURE.equalsIgnoreCase(bootMode)) {
|
||||
guest.setNvramTemplate(uefiProperties.getProperty(GuestDef.GUEST_NVRAM_TEMPLATE_SECURE));
|
||||
} else if (isUefiPropertieNotNull(GuestDef.GUEST_NVRAM_TEMPLATE_LEGACY)) {
|
||||
} else if (isUefiPropertyNotNull(GuestDef.GUEST_NVRAM_TEMPLATE_LEGACY)) {
|
||||
guest.setNvramTemplate(uefiProperties.getProperty(GuestDef.GUEST_NVRAM_TEMPLATE_LEGACY));
|
||||
}
|
||||
}
|
||||
|
||||
private void setGuestLoader(String bootMode, String mode, GuestDef guest, String propertie) {
|
||||
if (isUefiPropertieNotNull(propertie) && mode.equalsIgnoreCase(bootMode)) {
|
||||
guest.setLoader(uefiProperties.getProperty(propertie));
|
||||
private void setGuestLoader(String bootMode, String mode, GuestDef guest, String property) {
|
||||
if (isUefiPropertyNotNull(property) && mode.equalsIgnoreCase(bootMode)) {
|
||||
guest.setLoader(uefiProperties.getProperty(property));
|
||||
}
|
||||
}
|
||||
|
||||
private boolean isUefiPropertieNotNull(String propertie) {
|
||||
return uefiProperties.getProperty(propertie) != null;
|
||||
private boolean isUefiPropertyNotNull(String property) {
|
||||
return uefiProperties.getProperty(property) != null;
|
||||
}
|
||||
|
||||
public boolean isGuestAarch64() {
|
||||
|
||||
@ -695,7 +695,12 @@ public class VMwareGuru extends HypervisorGuruBase implements HypervisorGuru, Co
|
||||
updateTemplateRef(templateId, poolId, templatePath, templateSize);
|
||||
return templateId;
|
||||
} else {
|
||||
return volumeVO.getTemplateId();
|
||||
Long templateId = volumeVO.getTemplateId();
|
||||
if (templateId == null && volumeVO.getInstanceId() != null) {
|
||||
VMInstanceVO vmInstanceVO = vmDao.findByIdIncludingRemoved(volumeVO.getInstanceId());
|
||||
return vmInstanceVO.getTemplateId();
|
||||
}
|
||||
return templateId;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -61,7 +61,7 @@ public interface KubernetesClusterService extends PluggableService, Configurable
|
||||
"cloud.kubernetes.cluster.network.offering",
|
||||
"DefaultNetworkOfferingforKubernetesService",
|
||||
"Name of the network offering that will be used to create isolated network in which Kubernetes cluster VMs will be launched",
|
||||
false,
|
||||
true,
|
||||
KubernetesServiceEnabled.key());
|
||||
static final ConfigKey<Long> KubernetesClusterStartTimeout = new ConfigKey<Long>("Advanced", Long.class,
|
||||
"cloud.kubernetes.cluster.start.timeout",
|
||||
|
||||
@ -49,7 +49,7 @@ public class ADLdapUserManagerImpl extends OpenLdapUserManagerImpl implements Ld
|
||||
searchControls.setReturningAttributes(_ldapConfiguration.getReturnAttributes(domainId));
|
||||
|
||||
NamingEnumeration<SearchResult> results = context.search(basedn, generateADGroupSearchFilter(groupName, domainId), searchControls);
|
||||
final List<LdapUser> users = new ArrayList<LdapUser>();
|
||||
final List<LdapUser> users = new ArrayList<>();
|
||||
while (results.hasMoreElements()) {
|
||||
final SearchResult result = results.nextElement();
|
||||
users.add(createUser(result, domainId));
|
||||
@ -58,10 +58,8 @@ public class ADLdapUserManagerImpl extends OpenLdapUserManagerImpl implements Ld
|
||||
}
|
||||
|
||||
String generateADGroupSearchFilter(String groupName, Long domainId) {
|
||||
final StringBuilder userObjectFilter = new StringBuilder();
|
||||
userObjectFilter.append("(objectClass=");
|
||||
userObjectFilter.append(_ldapConfiguration.getUserObject(domainId));
|
||||
userObjectFilter.append(")");
|
||||
|
||||
final StringBuilder userObjectFilter = getUserObjectFilter(domainId);
|
||||
|
||||
final StringBuilder memberOfFilter = new StringBuilder();
|
||||
String groupCnName = _ldapConfiguration.getCommonNameAttribute() + "=" +groupName + "," + _ldapConfiguration.getBaseDn(domainId);
|
||||
@ -75,10 +73,18 @@ public class ADLdapUserManagerImpl extends OpenLdapUserManagerImpl implements Ld
|
||||
result.append(memberOfFilter);
|
||||
result.append(")");
|
||||
|
||||
logger.debug("group search filter = " + result);
|
||||
logger.debug("group search filter = {}", result);
|
||||
return result.toString();
|
||||
}
|
||||
|
||||
StringBuilder getUserObjectFilter(Long domainId) {
|
||||
final StringBuilder userObjectFilter = new StringBuilder();
|
||||
userObjectFilter.append("(&(objectCategory=person)");
|
||||
userObjectFilter.append(super.getUserObjectFilter(domainId));
|
||||
userObjectFilter.append(")");
|
||||
return userObjectFilter;
|
||||
}
|
||||
|
||||
protected boolean isUserDisabled(SearchResult result) throws NamingException {
|
||||
boolean isDisabledUser = false;
|
||||
String userAccountControl = LdapUtils.getAttributeValue(result.getAttributes(), _ldapConfiguration.getUserAccountControlAttribute());
|
||||
|
||||
@ -75,23 +75,15 @@ public class OpenLdapUserManagerImpl implements LdapUserManager {
|
||||
}
|
||||
|
||||
private String generateSearchFilter(final String username, Long domainId) {
|
||||
final StringBuilder userObjectFilter = new StringBuilder();
|
||||
userObjectFilter.append("(objectClass=");
|
||||
userObjectFilter.append(_ldapConfiguration.getUserObject(domainId));
|
||||
userObjectFilter.append(")");
|
||||
final StringBuilder userObjectFilter = getUserObjectFilter(domainId);
|
||||
|
||||
final StringBuilder usernameFilter = new StringBuilder();
|
||||
usernameFilter.append("(");
|
||||
usernameFilter.append(_ldapConfiguration.getUsernameAttribute(domainId));
|
||||
usernameFilter.append("=");
|
||||
usernameFilter.append((username == null ? "*" : LdapUtils.escapeLDAPSearchFilter(username)));
|
||||
usernameFilter.append(")");
|
||||
final StringBuilder usernameFilter = getUsernameFilter(username, domainId);
|
||||
|
||||
String memberOfAttribute = getMemberOfAttribute(domainId);
|
||||
StringBuilder ldapGroupsFilter = new StringBuilder();
|
||||
// this should get the trustmaps for this domain
|
||||
List<String> ldapGroups = getMappedLdapGroups(domainId);
|
||||
if (null != ldapGroups && ldapGroups.size() > 0) {
|
||||
if (!ldapGroups.isEmpty()) {
|
||||
ldapGroupsFilter.append("(|");
|
||||
for (String ldapGroup : ldapGroups) {
|
||||
ldapGroupsFilter.append(getMemberOfGroupString(ldapGroup, memberOfAttribute));
|
||||
@ -104,21 +96,35 @@ public class OpenLdapUserManagerImpl implements LdapUserManager {
|
||||
if (null != pricipleGroup) {
|
||||
principleGroupFilter.append(getMemberOfGroupString(pricipleGroup, memberOfAttribute));
|
||||
}
|
||||
final StringBuilder result = new StringBuilder();
|
||||
result.append("(&");
|
||||
result.append(userObjectFilter);
|
||||
result.append(usernameFilter);
|
||||
result.append(ldapGroupsFilter);
|
||||
result.append(principleGroupFilter);
|
||||
result.append(")");
|
||||
|
||||
String returnString = result.toString();
|
||||
if (logger.isTraceEnabled()) {
|
||||
logger.trace("constructed ldap query: " + returnString);
|
||||
}
|
||||
String returnString = "(&" +
|
||||
userObjectFilter +
|
||||
usernameFilter +
|
||||
ldapGroupsFilter +
|
||||
principleGroupFilter +
|
||||
")";
|
||||
logger.trace("constructed ldap query: {}", returnString);
|
||||
return returnString;
|
||||
}
|
||||
|
||||
private StringBuilder getUsernameFilter(String username, Long domainId) {
|
||||
final StringBuilder usernameFilter = new StringBuilder();
|
||||
usernameFilter.append("(");
|
||||
usernameFilter.append(_ldapConfiguration.getUsernameAttribute(domainId));
|
||||
usernameFilter.append("=");
|
||||
usernameFilter.append((username == null ? "*" : LdapUtils.escapeLDAPSearchFilter(username)));
|
||||
usernameFilter.append(")");
|
||||
return usernameFilter;
|
||||
}
|
||||
|
||||
StringBuilder getUserObjectFilter(Long domainId) {
|
||||
final StringBuilder userObjectFilter = new StringBuilder();
|
||||
userObjectFilter.append("(objectClass=");
|
||||
userObjectFilter.append(_ldapConfiguration.getUserObject(domainId));
|
||||
userObjectFilter.append(")");
|
||||
return userObjectFilter;
|
||||
}
|
||||
|
||||
private List<String> getMappedLdapGroups(Long domainId) {
|
||||
List <String> ldapGroups = new ArrayList<>();
|
||||
// first get the trustmaps
|
||||
@ -134,37 +140,31 @@ public class OpenLdapUserManagerImpl implements LdapUserManager {
|
||||
private String getMemberOfGroupString(String group, String memberOfAttribute) {
|
||||
final StringBuilder memberOfFilter = new StringBuilder();
|
||||
if (null != group) {
|
||||
if(logger.isDebugEnabled()) {
|
||||
logger.debug("adding search filter for '" + group +
|
||||
"', using '" + memberOfAttribute + "'");
|
||||
}
|
||||
memberOfFilter.append("(" + memberOfAttribute + "=");
|
||||
memberOfFilter.append(group);
|
||||
memberOfFilter.append(")");
|
||||
logger.debug("adding search filter for '{}', using '{}'", group, memberOfAttribute);
|
||||
memberOfFilter.append("(")
|
||||
.append(memberOfAttribute)
|
||||
.append("=")
|
||||
.append(group)
|
||||
.append(")");
|
||||
}
|
||||
return memberOfFilter.toString();
|
||||
}
|
||||
|
||||
private String generateGroupSearchFilter(final String groupName, Long domainId) {
|
||||
final StringBuilder groupObjectFilter = new StringBuilder();
|
||||
groupObjectFilter.append("(objectClass=");
|
||||
groupObjectFilter.append(_ldapConfiguration.getGroupObject(domainId));
|
||||
groupObjectFilter.append(")");
|
||||
String groupObjectFilter = "(objectClass=" +
|
||||
_ldapConfiguration.getGroupObject(domainId) +
|
||||
")";
|
||||
|
||||
final StringBuilder groupNameFilter = new StringBuilder();
|
||||
groupNameFilter.append("(");
|
||||
groupNameFilter.append(_ldapConfiguration.getCommonNameAttribute());
|
||||
groupNameFilter.append("=");
|
||||
groupNameFilter.append((groupName == null ? "*" : LdapUtils.escapeLDAPSearchFilter(groupName)));
|
||||
groupNameFilter.append(")");
|
||||
String groupNameFilter = "(" +
|
||||
_ldapConfiguration.getCommonNameAttribute() +
|
||||
"=" +
|
||||
(groupName == null ? "*" : LdapUtils.escapeLDAPSearchFilter(groupName)) +
|
||||
")";
|
||||
|
||||
final StringBuilder result = new StringBuilder();
|
||||
result.append("(&");
|
||||
result.append(groupObjectFilter);
|
||||
result.append(groupNameFilter);
|
||||
result.append(")");
|
||||
|
||||
return result.toString();
|
||||
return "(&" +
|
||||
groupObjectFilter +
|
||||
groupNameFilter +
|
||||
")";
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -186,17 +186,9 @@ public class OpenLdapUserManagerImpl implements LdapUserManager {
|
||||
basedn = _ldapConfiguration.getBaseDn(domainId);
|
||||
}
|
||||
|
||||
final StringBuilder userObjectFilter = new StringBuilder();
|
||||
userObjectFilter.append("(objectClass=");
|
||||
userObjectFilter.append(_ldapConfiguration.getUserObject(domainId));
|
||||
userObjectFilter.append(")");
|
||||
final StringBuilder userObjectFilter = getUserObjectFilter(domainId);
|
||||
|
||||
final StringBuilder usernameFilter = new StringBuilder();
|
||||
usernameFilter.append("(");
|
||||
usernameFilter.append(_ldapConfiguration.getUsernameAttribute(domainId));
|
||||
usernameFilter.append("=");
|
||||
usernameFilter.append((username == null ? "*" : LdapUtils.escapeLDAPSearchFilter(username)));
|
||||
usernameFilter.append(")");
|
||||
final StringBuilder usernameFilter = getUsernameFilter(username, domainId);
|
||||
|
||||
final StringBuilder memberOfFilter = new StringBuilder();
|
||||
if ("GROUP".equals(type)) {
|
||||
@ -205,18 +197,17 @@ public class OpenLdapUserManagerImpl implements LdapUserManager {
|
||||
memberOfFilter.append(")");
|
||||
}
|
||||
|
||||
final StringBuilder searchQuery = new StringBuilder();
|
||||
searchQuery.append("(&");
|
||||
searchQuery.append(userObjectFilter);
|
||||
searchQuery.append(usernameFilter);
|
||||
searchQuery.append(memberOfFilter);
|
||||
searchQuery.append(")");
|
||||
String searchQuery = "(&" +
|
||||
userObjectFilter +
|
||||
usernameFilter +
|
||||
memberOfFilter +
|
||||
")";
|
||||
|
||||
return searchUser(basedn, searchQuery.toString(), context, domainId);
|
||||
return searchUser(basedn, searchQuery, context, domainId);
|
||||
}
|
||||
|
||||
protected String getMemberOfAttribute(final Long domainId) {
|
||||
return _ldapConfiguration.getUserMemberOfAttribute(domainId);
|
||||
return LdapConfiguration.getUserMemberOfAttribute(domainId);
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -243,7 +234,7 @@ public class OpenLdapUserManagerImpl implements LdapUserManager {
|
||||
|
||||
NamingEnumeration<SearchResult> result = context.search(_ldapConfiguration.getBaseDn(domainId), generateGroupSearchFilter(groupName, domainId), controls);
|
||||
|
||||
final List<LdapUser> users = new ArrayList<LdapUser>();
|
||||
final List<LdapUser> users = new ArrayList<>();
|
||||
//Expecting only one result which has all the users
|
||||
if (result.hasMoreElements()) {
|
||||
Attribute attribute = result.nextElement().getAttributes().get(attributeName);
|
||||
@ -254,7 +245,7 @@ public class OpenLdapUserManagerImpl implements LdapUserManager {
|
||||
try{
|
||||
users.add(getUserForDn(userdn, context, domainId));
|
||||
} catch (NamingException e){
|
||||
logger.info("Userdn: " + userdn + " Not Found:: Exception message: " + e.getMessage());
|
||||
logger.info("Userdn: {} Not Found:: Exception message: {}", userdn, e.getMessage());
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -286,17 +277,15 @@ public class OpenLdapUserManagerImpl implements LdapUserManager {
|
||||
return false;
|
||||
}
|
||||
|
||||
public LdapUser searchUser(final String basedn, final String searchString, final LdapContext context, Long domainId) throws NamingException, IOException {
|
||||
public LdapUser searchUser(final String basedn, final String searchString, final LdapContext context, Long domainId) throws NamingException {
|
||||
final SearchControls searchControls = new SearchControls();
|
||||
|
||||
searchControls.setSearchScope(_ldapConfiguration.getScope());
|
||||
searchControls.setReturningAttributes(_ldapConfiguration.getReturnAttributes(domainId));
|
||||
|
||||
NamingEnumeration<SearchResult> results = context.search(basedn, searchString, searchControls);
|
||||
if(logger.isDebugEnabled()) {
|
||||
logger.debug("searching user(s) with filter: \"" + searchString + "\"");
|
||||
}
|
||||
final List<LdapUser> users = new ArrayList<LdapUser>();
|
||||
logger.debug("searching user(s) with filter: \"{}\"", searchString);
|
||||
final List<LdapUser> users = new ArrayList<>();
|
||||
while (results.hasMoreElements()) {
|
||||
final SearchResult result = results.nextElement();
|
||||
users.add(createUser(result, domainId));
|
||||
@ -324,7 +313,7 @@ public class OpenLdapUserManagerImpl implements LdapUserManager {
|
||||
byte[] cookie = null;
|
||||
int pageSize = _ldapConfiguration.getLdapPageSize(domainId);
|
||||
context.setRequestControls(new Control[]{new PagedResultsControl(pageSize, Control.NONCRITICAL)});
|
||||
final List<LdapUser> users = new ArrayList<LdapUser>();
|
||||
final List<LdapUser> users = new ArrayList<>();
|
||||
NamingEnumeration<SearchResult> results;
|
||||
do {
|
||||
results = context.search(basedn, generateSearchFilter(username, domainId), searchControls);
|
||||
|
||||
@ -54,9 +54,8 @@ public class ADLdapUserManagerImplTest {
|
||||
String [] groups = {"dev", "dev-hyd"};
|
||||
for (String group: groups) {
|
||||
String result = adLdapUserManager.generateADGroupSearchFilter(group, 1L);
|
||||
assertTrue(("(&(objectClass=user)(memberOf:1.2.840.113556.1.4.1941:=CN=" + group + ",DC=cloud,DC=citrix,DC=com))").equals(result));
|
||||
assertTrue(("(&(&(objectCategory=person)(objectClass=user))(memberOf:1.2.840.113556.1.4.1941:=CN=" + group + ",DC=cloud,DC=citrix,DC=com))").equals(result));
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@Test
|
||||
@ -69,7 +68,7 @@ public class ADLdapUserManagerImplTest {
|
||||
String [] groups = {"dev", "dev-hyd"};
|
||||
for (String group: groups) {
|
||||
String result = adLdapUserManager.generateADGroupSearchFilter(group, 1L);
|
||||
assertTrue(("(&(objectClass=user)(memberOf=CN=" + group + ",DC=cloud,DC=citrix,DC=com))").equals(result));
|
||||
assertTrue(("(&(&(objectCategory=person)(objectClass=user))(memberOf=CN=" + group + ",DC=cloud,DC=citrix,DC=com))").equals(result));
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
4
pom.xml
4
pom.xml
@ -1038,15 +1038,19 @@
|
||||
<exclude>dist/console-proxy/js/jquery.js</exclude>
|
||||
<exclude>engine/schema/dist/**</exclude>
|
||||
<exclude>plugins/hypervisors/hyperv/conf/agent.properties</exclude>
|
||||
<exclude>plugins/hypervisors/hyperv/conf/uefi.properties</exclude>
|
||||
<exclude>plugins/hypervisors/hyperv/DotNet/ServerResource/**</exclude>
|
||||
<exclude>scripts/installer/windows/acs_license.rtf</exclude>
|
||||
<exclude>scripts/vm/systemvm/id_rsa.cloud</exclude>
|
||||
<exclude>services/console-proxy/server/conf/agent.properties</exclude>
|
||||
<exclude>services/console-proxy/server/conf/uefi.properties</exclude>
|
||||
<exclude>services/console-proxy/server/conf/environment.properties</exclude>
|
||||
<exclude>services/console-proxy/server/js/jquery.js</exclude>
|
||||
<exclude>services/secondary-storage/conf/agent.properties</exclude>
|
||||
<exclude>services/secondary-storage/conf/uefi.properties</exclude>
|
||||
<exclude>services/secondary-storage/conf/environment.properties</exclude>
|
||||
<exclude>systemvm/agent/conf/agent.properties</exclude>
|
||||
<exclude>systemvm/agent/conf/uefi.properties</exclude>
|
||||
<exclude>systemvm/agent/conf/environment.properties</exclude>
|
||||
<exclude>systemvm/agent/js/jquery.js</exclude>
|
||||
<exclude>systemvm/agent/js/jquery.flot.navigate.js</exclude>
|
||||
|
||||
@ -4302,6 +4302,9 @@ public class ApiResponseHelper implements ResponseGenerator {
|
||||
if (volume != null) {
|
||||
builder.append("for ").append(volume.getName()).append(" (").append(volume.getUuid()).append(")");
|
||||
}
|
||||
if (vmInstance != null) {
|
||||
builder.append(" attached to VM ").append(vmInstance.getHostName()).append(" (").append(vmInstance.getUuid()).append(")");
|
||||
}
|
||||
if (diskOff != null) {
|
||||
builder.append(" with disk offering ").append(diskOff.getName()).append(" (").append(diskOff.getUuid()).append(")");
|
||||
}
|
||||
|
||||
@ -39,6 +39,7 @@ import java.util.HashMap;
|
||||
import java.util.HashSet;
|
||||
import java.util.Iterator;
|
||||
import java.util.List;
|
||||
import java.util.Arrays;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.TimeZone;
|
||||
@ -251,6 +252,12 @@ public class ApiServer extends ManagerBase implements HttpRequestHandler, ApiSer
|
||||
@Inject
|
||||
private MessageBus messageBus;
|
||||
|
||||
private static final Set<String> sensitiveFields = new HashSet<>(Arrays.asList(
|
||||
"password", "secretkey", "apikey", "token",
|
||||
"sessionkey", "accesskey", "signature",
|
||||
"authorization", "credential", "secret"
|
||||
));
|
||||
|
||||
private static final ConfigKey<Integer> IntegrationAPIPort = new ConfigKey<>(ConfigKey.CATEGORY_ADVANCED
|
||||
, Integer.class
|
||||
, "integration.api.port"
|
||||
@ -624,10 +631,23 @@ public class ApiServer extends ManagerBase implements HttpRequestHandler, ApiSer
|
||||
logger.error("invalid request, no command sent");
|
||||
if (logger.isTraceEnabled()) {
|
||||
logger.trace("dumping request parameters");
|
||||
for (final Object key : params.keySet()) {
|
||||
final String keyStr = (String)key;
|
||||
final String[] value = (String[])params.get(key);
|
||||
logger.trace(" key: " + keyStr + ", value: " + ((value == null) ? "'null'" : value[0]));
|
||||
|
||||
for (final Object key : params.keySet()) {
|
||||
final String keyStr = (String) key;
|
||||
final String[] value = (String[]) params.get(key);
|
||||
|
||||
String lowerKeyStr = keyStr.toLowerCase();
|
||||
boolean isSensitive = sensitiveFields.stream()
|
||||
.anyMatch(lowerKeyStr::contains);
|
||||
|
||||
String logValue;
|
||||
if (isSensitive) {
|
||||
logValue = "******"; // mask sensitive values
|
||||
} else {
|
||||
logValue = (value == null) ? "'null'" : value[0];
|
||||
}
|
||||
|
||||
logger.trace(" key: " + keyStr + ", value: " + logValue);
|
||||
}
|
||||
}
|
||||
throw new ServerApiException(ApiErrorCode.UNSUPPORTED_ACTION_ERROR, "Invalid request, no command sent");
|
||||
|
||||
@ -21,6 +21,8 @@ import com.cloud.agent.api.to.DataObjectType;
|
||||
import com.cloud.agent.api.to.NicTO;
|
||||
import com.cloud.agent.api.to.VirtualMachineTO;
|
||||
import com.cloud.configuration.ConfigurationManagerImpl;
|
||||
import com.cloud.event.EventTypes;
|
||||
import com.cloud.event.UsageEventUtils;
|
||||
import com.cloud.host.HostVO;
|
||||
import com.cloud.hypervisor.Hypervisor.HypervisorType;
|
||||
import com.cloud.hypervisor.dao.HypervisorCapabilitiesDao;
|
||||
@ -372,6 +374,8 @@ public class KVMGuru extends HypervisorGuruBase implements HypervisorGuru {
|
||||
_volumeDao.update(volume.getId(), volume);
|
||||
_volumeDao.attachVolume(volume.getId(), vm.getId(), getNextAvailableDeviceId(vmVolumes));
|
||||
}
|
||||
UsageEventUtils.publishUsageEvent(EventTypes.EVENT_VOLUME_ATTACH, volume.getAccountId(), volume.getDataCenterId(), volume.getId(), volume.getName(),
|
||||
volume.getDiskOfferingId(), volume.getTemplateId(), volume.getSize(), Volume.class.getName(), volume.getUuid(), vm.getId(), volume.isDisplay());
|
||||
}
|
||||
} catch (Exception e) {
|
||||
throw new RuntimeException("Could not restore VM " + vm.getName() + " due to : " + e.getMessage());
|
||||
@ -389,6 +393,8 @@ public class KVMGuru extends HypervisorGuruBase implements HypervisorGuru {
|
||||
_volumeDao.attachVolume(restoredVolume.getId(), vm.getId(), getNextAvailableDeviceId(vmVolumes));
|
||||
restoredVolume.setState(Volume.State.Ready);
|
||||
_volumeDao.update(restoredVolume.getId(), restoredVolume);
|
||||
UsageEventUtils.publishUsageEvent(EventTypes.EVENT_VOLUME_ATTACH, restoredVolume.getAccountId(), restoredVolume.getDataCenterId(), restoredVolume.getId(), restoredVolume.getName(),
|
||||
restoredVolume.getDiskOfferingId(), restoredVolume.getTemplateId(), restoredVolume.getSize(), Volume.class.getName(), restoredVolume.getUuid(), vm.getId(), restoredVolume.isDisplay());
|
||||
return true;
|
||||
} catch (Exception e) {
|
||||
restoredVolume.setDisplay(false);
|
||||
|
||||
@ -823,6 +823,7 @@ import com.cloud.user.dao.AccountDao;
|
||||
import com.cloud.user.dao.SSHKeyPairDao;
|
||||
import com.cloud.user.dao.UserDao;
|
||||
import com.cloud.user.dao.UserDataDao;
|
||||
import com.cloud.utils.EnumUtils;
|
||||
import com.cloud.utils.NumbersUtil;
|
||||
import com.cloud.utils.Pair;
|
||||
import com.cloud.utils.PasswordGenerator;
|
||||
@ -1410,7 +1411,7 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe
|
||||
if (vmInstanceDetailVO != null &&
|
||||
(ApiConstants.BootMode.LEGACY.toString().equalsIgnoreCase(vmInstanceDetailVO.getValue()) ||
|
||||
ApiConstants.BootMode.SECURE.toString().equalsIgnoreCase(vmInstanceDetailVO.getValue()))) {
|
||||
logger.info(" Live Migration of UEFI enabled VM : " + vm.getInstanceName() + " is not supported");
|
||||
logger.debug("{} VM is UEFI enabled, Checking for other UEFI enabled hosts as it can be live migrated to UEFI enabled host only.", vm.getInstanceName());
|
||||
if (CollectionUtils.isEmpty(filteredHosts)) {
|
||||
filteredHosts = new ArrayList<>(allHosts);
|
||||
}
|
||||
@ -1420,6 +1421,9 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe
|
||||
return new Pair<>(false, null);
|
||||
}
|
||||
filteredHosts.removeIf(host -> !uefiEnabledHosts.contains(host.getId()));
|
||||
if (filteredHosts.isEmpty()) {
|
||||
logger.warn("No UEFI enabled hosts are available for the live migration of VM {}", vm.getInstanceName());
|
||||
}
|
||||
return new Pair<>(!filteredHosts.isEmpty(), filteredHosts);
|
||||
}
|
||||
return new Pair<>(true, filteredHosts);
|
||||
@ -2416,6 +2420,22 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe
|
||||
return new Pair<>(result.first(), result.second());
|
||||
}
|
||||
|
||||
protected List<IpAddress.State> getStatesForIpAddressSearch(final ListPublicIpAddressesCmd cmd) {
|
||||
final String statesStr = cmd.getState();
|
||||
final List<IpAddress.State> states = new ArrayList<>();
|
||||
if (StringUtils.isBlank(statesStr)) {
|
||||
return states;
|
||||
}
|
||||
for (String s : StringUtils.split(statesStr, ",")) {
|
||||
IpAddress.State state = EnumUtils.getEnumIgnoreCase(IpAddress.State.class, s.trim());
|
||||
if (state == null) {
|
||||
throw new InvalidParameterValueException("Invalid state: " + s);
|
||||
}
|
||||
states.add(state);
|
||||
}
|
||||
return states;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Pair<List<? extends IpAddress>, Integer> searchForIPAddresses(final ListPublicIpAddressesCmd cmd) {
|
||||
final Long associatedNetworkId = cmd.getAssociatedNetworkId();
|
||||
@ -2426,20 +2446,20 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe
|
||||
final Long networkId = cmd.getNetworkId();
|
||||
final Long vpcId = cmd.getVpcId();
|
||||
|
||||
final String state = cmd.getState();
|
||||
final List<IpAddress.State> states = getStatesForIpAddressSearch(cmd);
|
||||
Boolean isAllocated = cmd.isAllocatedOnly();
|
||||
if (isAllocated == null) {
|
||||
if (state != null && (state.equalsIgnoreCase(IpAddress.State.Free.name()) || state.equalsIgnoreCase(IpAddress.State.Reserved.name()))) {
|
||||
if (states.contains(IpAddress.State.Free) || states.contains(IpAddress.State.Reserved)) {
|
||||
isAllocated = Boolean.FALSE;
|
||||
} else {
|
||||
isAllocated = Boolean.TRUE; // default
|
||||
}
|
||||
} else {
|
||||
if (state != null && (state.equalsIgnoreCase(IpAddress.State.Free.name()) || state.equalsIgnoreCase(IpAddress.State.Reserved.name()))) {
|
||||
if (states.contains(IpAddress.State.Free) || states.contains(IpAddress.State.Reserved)) {
|
||||
if (isAllocated) {
|
||||
throw new InvalidParameterValueException("Conflict: allocatedonly is true but state is Free");
|
||||
}
|
||||
} else if (state != null && state.equalsIgnoreCase(IpAddress.State.Allocated.name())) {
|
||||
} else if (states.contains(IpAddress.State.Allocated)) {
|
||||
isAllocated = Boolean.TRUE;
|
||||
}
|
||||
}
|
||||
@ -2518,10 +2538,8 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe
|
||||
Boolean isRecursive = cmd.isRecursive();
|
||||
final List<Long> permittedAccounts = new ArrayList<>();
|
||||
ListProjectResourcesCriteria listProjectResourcesCriteria = null;
|
||||
boolean isAllocatedOrReserved = false;
|
||||
if (isAllocated || IpAddress.State.Reserved.name().equalsIgnoreCase(state)) {
|
||||
isAllocatedOrReserved = true;
|
||||
}
|
||||
boolean isAllocatedOrReserved = isAllocated ||
|
||||
(states.size() == 1 && IpAddress.State.Reserved.equals(states.get(0)));
|
||||
if (isAllocatedOrReserved || (vlanType == VlanType.VirtualNetwork && (caller.getType() != Account.Type.ADMIN || cmd.getDomainId() != null))) {
|
||||
final Ternary<Long, Boolean, ListProjectResourcesCriteria> domainIdRecursiveListProject = new Ternary<>(cmd.getDomainId(), cmd.isRecursive(),
|
||||
null);
|
||||
@ -2535,7 +2553,7 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe
|
||||
buildParameters(sb, cmd, vlanType == VlanType.VirtualNetwork ? true : isAllocated);
|
||||
|
||||
SearchCriteria<IPAddressVO> sc = sb.create();
|
||||
setParameters(sc, cmd, vlanType, isAllocated);
|
||||
setParameters(sc, cmd, vlanType, isAllocated, states);
|
||||
|
||||
if (isAllocatedOrReserved || (vlanType == VlanType.VirtualNetwork && (caller.getType() != Account.Type.ADMIN || cmd.getDomainId() != null))) {
|
||||
_accountMgr.buildACLSearchCriteria(sc, domainId, isRecursive, permittedAccounts, listProjectResourcesCriteria);
|
||||
@ -2603,7 +2621,7 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe
|
||||
buildParameters(searchBuilder, cmd, false);
|
||||
|
||||
SearchCriteria<IPAddressVO> searchCriteria = searchBuilder.create();
|
||||
setParameters(searchCriteria, cmd, vlanType, false);
|
||||
setParameters(searchCriteria, cmd, vlanType, false, states);
|
||||
searchCriteria.setParameters("state", IpAddress.State.Free.name());
|
||||
addrs.addAll(_publicIpAddressDao.search(searchCriteria, searchFilter)); // Free IPs on shared network
|
||||
}
|
||||
@ -2616,7 +2634,7 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe
|
||||
sb2.and("quarantinedPublicIpsIdsNIN", sb2.entity().getId(), SearchCriteria.Op.NIN);
|
||||
|
||||
SearchCriteria<IPAddressVO> sc2 = sb2.create();
|
||||
setParameters(sc2, cmd, vlanType, isAllocated);
|
||||
setParameters(sc2, cmd, vlanType, isAllocated, states);
|
||||
sc2.setParameters("ids", freeAddrIds.toArray());
|
||||
_publicIpAddressDao.buildQuarantineSearchCriteria(sc2);
|
||||
addrs.addAll(_publicIpAddressDao.search(sc2, searchFilter)); // Allocated + Free
|
||||
@ -2646,7 +2664,7 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe
|
||||
sb.and("isSourceNat", sb.entity().isSourceNat(), SearchCriteria.Op.EQ);
|
||||
sb.and("isStaticNat", sb.entity().isOneToOneNat(), SearchCriteria.Op.EQ);
|
||||
sb.and("vpcId", sb.entity().getVpcId(), SearchCriteria.Op.EQ);
|
||||
sb.and("state", sb.entity().getState(), SearchCriteria.Op.EQ);
|
||||
sb.and("state", sb.entity().getState(), SearchCriteria.Op.IN);
|
||||
sb.and("display", sb.entity().isDisplay(), SearchCriteria.Op.EQ);
|
||||
sb.and(FOR_SYSTEMVMS, sb.entity().isForSystemVms(), SearchCriteria.Op.EQ);
|
||||
|
||||
@ -2689,7 +2707,8 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe
|
||||
}
|
||||
}
|
||||
|
||||
protected void setParameters(SearchCriteria<IPAddressVO> sc, final ListPublicIpAddressesCmd cmd, VlanType vlanType, Boolean isAllocated) {
|
||||
protected void setParameters(SearchCriteria<IPAddressVO> sc, final ListPublicIpAddressesCmd cmd, VlanType vlanType,
|
||||
Boolean isAllocated, List<IpAddress.State> states) {
|
||||
final Object keyword = cmd.getKeyword();
|
||||
final Long physicalNetworkId = cmd.getPhysicalNetworkId();
|
||||
final Long sourceNetworkId = cmd.getNetworkId();
|
||||
@ -2700,7 +2719,6 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe
|
||||
final Boolean sourceNat = cmd.isSourceNat();
|
||||
final Boolean staticNat = cmd.isStaticNat();
|
||||
final Boolean forDisplay = cmd.getDisplay();
|
||||
final String state = cmd.getState();
|
||||
final Boolean forSystemVms = cmd.getForSystemVMs();
|
||||
final boolean forProvider = cmd.isForProvider();
|
||||
final Map<String, String> tags = cmd.getTags();
|
||||
@ -2757,13 +2775,14 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe
|
||||
sc.setParameters("display", forDisplay);
|
||||
}
|
||||
|
||||
if (state != null) {
|
||||
sc.setParameters("state", state);
|
||||
if (CollectionUtils.isNotEmpty(states)) {
|
||||
sc.setParameters("state", states.toArray());
|
||||
} else if (isAllocated != null && isAllocated) {
|
||||
sc.setParameters("state", IpAddress.State.Allocated);
|
||||
}
|
||||
|
||||
if (IpAddressManagerImpl.getSystemvmpublicipreservationmodestrictness().value() && IpAddress.State.Free.name().equalsIgnoreCase(state)) {
|
||||
if (IpAddressManagerImpl.getSystemvmpublicipreservationmodestrictness().value() &&
|
||||
states.contains(IpAddress.State.Free)) {
|
||||
sc.setParameters(FOR_SYSTEMVMS, false);
|
||||
} else {
|
||||
sc.setParameters(FOR_SYSTEMVMS, forSystemVms);
|
||||
|
||||
@ -1004,7 +1004,7 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic
|
||||
if (snapshotId == null && displayVolume) {
|
||||
// for volume created from snapshot, create usage event after volume creation
|
||||
UsageEventUtils.publishUsageEvent(EventTypes.EVENT_VOLUME_CREATE, volume.getAccountId(), volume.getDataCenterId(), volume.getId(), volume.getName(), diskOfferingId, null, size,
|
||||
Volume.class.getName(), volume.getUuid(), displayVolume);
|
||||
Volume.class.getName(), volume.getUuid(), volume.getInstanceId(), displayVolume);
|
||||
}
|
||||
|
||||
if (volume != null && details != null) {
|
||||
@ -1106,7 +1106,7 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic
|
||||
createdVolume = _volumeMgr.createVolumeFromSnapshot(volume, snapshot, vm);
|
||||
VolumeVO volumeVo = _volsDao.findById(createdVolume.getId());
|
||||
UsageEventUtils.publishUsageEvent(EventTypes.EVENT_VOLUME_CREATE, createdVolume.getAccountId(), createdVolume.getDataCenterId(), createdVolume.getId(), createdVolume.getName(),
|
||||
createdVolume.getDiskOfferingId(), null, createdVolume.getSize(), Volume.class.getName(), createdVolume.getUuid(), volumeVo.isDisplayVolume());
|
||||
createdVolume.getDiskOfferingId(), null, createdVolume.getSize(), Volume.class.getName(), createdVolume.getUuid(), volume.getInstanceId(), volumeVo.isDisplayVolume());
|
||||
|
||||
return volumeVo;
|
||||
}
|
||||
@ -1578,6 +1578,7 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic
|
||||
}
|
||||
}
|
||||
|
||||
volume = _volsDao.findById(volumeId);
|
||||
if (newDiskOfferingId != null) {
|
||||
volume.setDiskOfferingId(newDiskOfferingId);
|
||||
_volumeMgr.saveVolumeDetails(newDiskOfferingId, volume.getId());
|
||||
@ -1592,7 +1593,6 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic
|
||||
}
|
||||
|
||||
// Update size if volume has same size as before, else it is already updated
|
||||
volume = _volsDao.findById(volumeId);
|
||||
if (currentSize == volume.getSize() && currentSize != newSize) {
|
||||
volume.setSize(newSize);
|
||||
} else if (volume.getSize() != newSize) {
|
||||
@ -1903,7 +1903,7 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic
|
||||
}
|
||||
UsageEventUtils
|
||||
.publishUsageEvent(EventTypes.EVENT_VOLUME_CREATE, volume.getAccountId(), volume.getDataCenterId(), volume.getId(), volume.getName(), offeringId,
|
||||
volume.getTemplateId(), volume.getSize(), Volume.class.getName(), volume.getUuid(), volume.isDisplay());
|
||||
volume.getTemplateId(), volume.getSize(), Volume.class.getName(), volume.getUuid(), volume.getInstanceId(), volume.isDisplay());
|
||||
|
||||
logger.debug("Volume [{}] has been successfully recovered, thus a new usage event {} has been published.", volume, EventTypes.EVENT_VOLUME_CREATE);
|
||||
}
|
||||
@ -2998,7 +2998,7 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic
|
||||
if (displayVolume) {
|
||||
// flag turned 1 equivalent to freshly created volume
|
||||
UsageEventUtils.publishUsageEvent(EventTypes.EVENT_VOLUME_CREATE, volume.getAccountId(), volume.getDataCenterId(), volume.getId(), volume.getName(), volume.getDiskOfferingId(),
|
||||
volume.getTemplateId(), volume.getSize(), Volume.class.getName(), volume.getUuid());
|
||||
volume.getTemplateId(), volume.getSize(), Volume.class.getName(), volume.getUuid(), volume.getInstanceId(), displayVolume);
|
||||
} else {
|
||||
// flag turned 0 equivalent to deleting a volume
|
||||
UsageEventUtils.publishUsageEvent(EventTypes.EVENT_VOLUME_DELETE, volume.getAccountId(), volume.getDataCenterId(), volume.getId(), volume.getName(), Volume.class.getName(),
|
||||
@ -3259,6 +3259,8 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic
|
||||
handleTargetsForVMware(hostId, volumePool.getHostAddress(), volumePool.getPort(), volume.get_iScsiName());
|
||||
}
|
||||
|
||||
UsageEventUtils.publishUsageEvent(EventTypes.EVENT_VOLUME_DETACH, volume.getAccountId(), volume.getDataCenterId(), volume.getId(), volume.getName(),
|
||||
volume.getDiskOfferingId(), null, volume.getSize(), Volume.class.getName(), volume.getUuid(), null, volume.isDisplay());
|
||||
return _volsDao.findById(volumeId);
|
||||
} else {
|
||||
|
||||
@ -4339,7 +4341,7 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic
|
||||
diskOfferingVO);
|
||||
UsageEventUtils.publishUsageEvent(EventTypes.EVENT_VOLUME_CREATE, volume.getAccountId(), volume.getDataCenterId(), volume.getId(), volume.getName(),
|
||||
volume.getDiskOfferingId(), volume.getTemplateId(), volume.getSize(), Volume.class.getName(),
|
||||
volume.getUuid(), volume.isDisplayVolume());
|
||||
volume.getUuid(), volume.getInstanceId(), volume.isDisplayVolume());
|
||||
|
||||
volService.moveVolumeOnSecondaryStorageToAnotherAccount(volume, oldAccount, newAccount);
|
||||
}
|
||||
@ -4863,6 +4865,9 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic
|
||||
if (attached) {
|
||||
ev = Volume.Event.OperationSucceeded;
|
||||
logger.debug("Volume: {} successfully attached to VM: {}", volInfo.getVolume(), volInfo.getAttachedVM());
|
||||
UsageEventUtils.publishUsageEvent(EventTypes.EVENT_VOLUME_ATTACH, volumeToAttach.getAccountId(), volumeToAttach.getDataCenterId(), volumeToAttach.getId(), volumeToAttach.getName(),
|
||||
volumeToAttach.getDiskOfferingId(), volumeToAttach.getTemplateId(), volumeToAttach.getSize(), Volume.class.getName(), volumeToAttach.getUuid(), vm.getId(), volumeToAttach.isDisplay());
|
||||
|
||||
provideVMInfo(dataStore, vm.getId(), volInfo.getId());
|
||||
} else {
|
||||
logger.debug("Volume: {} failed to attach to VM: {}", volInfo.getVolume(), volInfo.getAttachedVM());
|
||||
|
||||
@ -81,7 +81,7 @@ public class VolumeStateListener implements StateListener<State, Event, Volume>
|
||||
// For the Resize Volume Event, this publishes an event with an incorrect disk offering ID, so do nothing for now
|
||||
} else {
|
||||
UsageEventUtils.publishUsageEvent(EventTypes.EVENT_VOLUME_CREATE, vol.getAccountId(), vol.getDataCenterId(), vol.getId(), vol.getName(), vol.getDiskOfferingId(), null, vol.getSize(),
|
||||
Volume.class.getName(), vol.getUuid(), vol.isDisplayVolume());
|
||||
Volume.class.getName(), vol.getUuid(), instanceId, vol.isDisplayVolume());
|
||||
}
|
||||
} else if (transition.getToState() == State.Destroy && vol.getVolumeType() != Volume.Type.ROOT) { //Do not Publish Usage Event for ROOT Disk as it would have been published already while destroying a VM
|
||||
UsageEventUtils.publishUsageEvent(EventTypes.EVENT_VOLUME_DELETE, vol.getAccountId(), vol.getDataCenterId(), vol.getId(), vol.getName(),
|
||||
|
||||
@ -2408,6 +2408,8 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir
|
||||
if (Volume.State.Destroy.equals(volume.getState())) {
|
||||
_volumeService.recoverVolume(volume.getId());
|
||||
_volsDao.attachVolume(volume.getId(), vmId, ROOT_DEVICE_ID);
|
||||
UsageEventUtils.publishUsageEvent(EventTypes.EVENT_VOLUME_ATTACH, volume.getAccountId(), volume.getDataCenterId(), volume.getId(), volume.getName(),
|
||||
volume.getDiskOfferingId(), volume.getTemplateId(), volume.getSize(), Volume.class.getName(), volume.getUuid(), vmId, volume.isDisplay());
|
||||
} else {
|
||||
_volumeService.publishVolumeCreationUsageEvent(volume);
|
||||
}
|
||||
@ -8156,7 +8158,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir
|
||||
|
||||
logger.trace("Generating a create volume event for volume [{}].", volume);
|
||||
UsageEventUtils.publishUsageEvent(EventTypes.EVENT_VOLUME_CREATE, volume.getAccountId(), volume.getDataCenterId(), volume.getId(), volume.getName(),
|
||||
volume.getDiskOfferingId(), volume.getTemplateId(), volume.getSize(), Volume.class.getName(), volume.getUuid(), volume.isDisplayVolume());
|
||||
volume.getDiskOfferingId(), volume.getTemplateId(), volume.getSize(), Volume.class.getName(), volume.getUuid(), volume.getInstanceId(), volume.isDisplayVolume());
|
||||
}
|
||||
}
|
||||
|
||||
@ -8959,6 +8961,8 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir
|
||||
handleManagedStorage(vm, root);
|
||||
|
||||
_volsDao.attachVolume(newVol.getId(), vmId, newVol.getDeviceId());
|
||||
UsageEventUtils.publishUsageEvent(EventTypes.EVENT_VOLUME_ATTACH, newVol.getAccountId(), newVol.getDataCenterId(), newVol.getId(), newVol.getName(),
|
||||
newVol.getDiskOfferingId(), newVol.getTemplateId(), newVol.getSize(), Volume.class.getName(), newVol.getUuid(), vmId, newVol.isDisplay());
|
||||
|
||||
// Detach, destroy and create the usage event for the old root volume.
|
||||
_volsDao.detachVolume(root.getId());
|
||||
|
||||
@ -26,6 +26,7 @@ import static org.mockito.Mockito.when;
|
||||
import java.lang.reflect.Field;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
|
||||
import org.apache.cloudstack.annotation.dao.AnnotationDao;
|
||||
@ -258,14 +259,14 @@ public class ManagementServerImplTest {
|
||||
Mockito.when(cmd.getId()).thenReturn(null);
|
||||
Mockito.when(cmd.isSourceNat()).thenReturn(null);
|
||||
Mockito.when(cmd.isStaticNat()).thenReturn(null);
|
||||
Mockito.when(cmd.getState()).thenReturn(IpAddress.State.Free.name());
|
||||
Mockito.when(cmd.getTags()).thenReturn(null);
|
||||
spy.setParameters(sc, cmd, VlanType.VirtualNetwork, Boolean.FALSE);
|
||||
List<IpAddress.State> states = Collections.singletonList(IpAddress.State.Free);
|
||||
spy.setParameters(sc, cmd, VlanType.VirtualNetwork, Boolean.FALSE, states);
|
||||
|
||||
Mockito.verify(sc, Mockito.times(1)).setJoinParameters("vlanSearch", "vlanType", VlanType.VirtualNetwork);
|
||||
Mockito.verify(sc, Mockito.times(1)).setParameters("display", false);
|
||||
Mockito.verify(sc, Mockito.times(1)).setParameters("sourceNetworkId", 10L);
|
||||
Mockito.verify(sc, Mockito.times(1)).setParameters("state", "Free");
|
||||
Mockito.verify(sc, Mockito.times(1)).setParameters("state", states.toArray());
|
||||
Mockito.verify(sc, Mockito.times(1)).setParameters("forsystemvms", false);
|
||||
}
|
||||
|
||||
@ -281,14 +282,14 @@ public class ManagementServerImplTest {
|
||||
Mockito.when(cmd.getId()).thenReturn(null);
|
||||
Mockito.when(cmd.isSourceNat()).thenReturn(null);
|
||||
Mockito.when(cmd.isStaticNat()).thenReturn(null);
|
||||
Mockito.when(cmd.getState()).thenReturn(IpAddress.State.Free.name());
|
||||
Mockito.when(cmd.getTags()).thenReturn(null);
|
||||
spy.setParameters(sc, cmd, VlanType.VirtualNetwork, Boolean.FALSE);
|
||||
List<IpAddress.State> states = Collections.singletonList(IpAddress.State.Free);
|
||||
spy.setParameters(sc, cmd, VlanType.VirtualNetwork, Boolean.FALSE, states);
|
||||
|
||||
Mockito.verify(sc, Mockito.times(1)).setJoinParameters("vlanSearch", "vlanType", VlanType.VirtualNetwork);
|
||||
Mockito.verify(sc, Mockito.times(1)).setParameters("display", false);
|
||||
Mockito.verify(sc, Mockito.times(1)).setParameters("sourceNetworkId", 10L);
|
||||
Mockito.verify(sc, Mockito.times(1)).setParameters("state", "Free");
|
||||
Mockito.verify(sc, Mockito.times(1)).setParameters("state", states.toArray());
|
||||
Mockito.verify(sc, Mockito.times(1)).setParameters("forsystemvms", false);
|
||||
}
|
||||
|
||||
@ -304,13 +305,13 @@ public class ManagementServerImplTest {
|
||||
Mockito.when(cmd.getId()).thenReturn(null);
|
||||
Mockito.when(cmd.isSourceNat()).thenReturn(null);
|
||||
Mockito.when(cmd.isStaticNat()).thenReturn(null);
|
||||
Mockito.when(cmd.getState()).thenReturn(null);
|
||||
Mockito.when(cmd.getTags()).thenReturn(null);
|
||||
spy.setParameters(sc, cmd, VlanType.VirtualNetwork, Boolean.TRUE);
|
||||
spy.setParameters(sc, cmd, VlanType.VirtualNetwork, Boolean.TRUE, Collections.emptyList());
|
||||
|
||||
Mockito.verify(sc, Mockito.times(1)).setJoinParameters("vlanSearch", "vlanType", VlanType.VirtualNetwork);
|
||||
Mockito.verify(sc, Mockito.times(1)).setParameters("display", false);
|
||||
Mockito.verify(sc, Mockito.times(1)).setParameters("sourceNetworkId", 10L);
|
||||
Mockito.verify(sc, Mockito.times(1)).setParameters("state", IpAddress.State.Allocated);
|
||||
Mockito.verify(sc, Mockito.times(1)).setParameters("forsystemvms", false);
|
||||
}
|
||||
|
||||
@ -326,13 +327,13 @@ public class ManagementServerImplTest {
|
||||
Mockito.when(cmd.getId()).thenReturn(null);
|
||||
Mockito.when(cmd.isSourceNat()).thenReturn(null);
|
||||
Mockito.when(cmd.isStaticNat()).thenReturn(null);
|
||||
Mockito.when(cmd.getState()).thenReturn(null);
|
||||
Mockito.when(cmd.getTags()).thenReturn(null);
|
||||
spy.setParameters(sc, cmd, VlanType.VirtualNetwork, Boolean.TRUE);
|
||||
spy.setParameters(sc, cmd, VlanType.VirtualNetwork, Boolean.TRUE, Collections.emptyList());
|
||||
|
||||
Mockito.verify(sc, Mockito.times(1)).setJoinParameters("vlanSearch", "vlanType", VlanType.VirtualNetwork);
|
||||
Mockito.verify(sc, Mockito.times(1)).setParameters("display", false);
|
||||
Mockito.verify(sc, Mockito.times(1)).setParameters("sourceNetworkId", 10L);
|
||||
Mockito.verify(sc, Mockito.times(1)).setParameters("state", IpAddress.State.Allocated);
|
||||
Mockito.verify(sc, Mockito.times(1)).setParameters("forsystemvms", false);
|
||||
}
|
||||
|
||||
@ -1033,4 +1034,49 @@ public class ManagementServerImplTest {
|
||||
Assert.assertNotNull(spy.getExternalVmConsole(virtualMachine, host));
|
||||
Mockito.verify(extensionManager).getInstanceConsole(virtualMachine, host);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void getStatesForIpAddressSearchReturnsValidStates() {
|
||||
ListPublicIpAddressesCmd cmd = Mockito.mock(ListPublicIpAddressesCmd.class);
|
||||
Mockito.when(cmd.getState()).thenReturn("Allocated ,free");
|
||||
List<IpAddress.State> result = spy.getStatesForIpAddressSearch(cmd);
|
||||
Assert.assertEquals(2, result.size());
|
||||
Assert.assertTrue(result.contains(IpAddress.State.Allocated));
|
||||
Assert.assertTrue(result.contains(IpAddress.State.Free));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void getStatesForIpAddressSearchReturnsEmptyListForNullState() {
|
||||
ListPublicIpAddressesCmd cmd = Mockito.mock(ListPublicIpAddressesCmd.class);
|
||||
Mockito.when(cmd.getState()).thenReturn(null);
|
||||
List<IpAddress.State> result = spy.getStatesForIpAddressSearch(cmd);
|
||||
Assert.assertTrue(result.isEmpty());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void getStatesForIpAddressSearchReturnsEmptyListForBlankState() {
|
||||
ListPublicIpAddressesCmd cmd = Mockito.mock(ListPublicIpAddressesCmd.class);
|
||||
Mockito.when(cmd.getState()).thenReturn(" ");
|
||||
List<IpAddress.State> result = spy.getStatesForIpAddressSearch(cmd);
|
||||
Assert.assertTrue(result.isEmpty());
|
||||
}
|
||||
|
||||
@Test(expected = InvalidParameterValueException.class)
|
||||
public void getStatesForIpAddressSearchThrowsExceptionForInvalidState() {
|
||||
ListPublicIpAddressesCmd cmd = Mockito.mock(ListPublicIpAddressesCmd.class);
|
||||
Mockito.when(cmd.getState()).thenReturn("InvalidState");
|
||||
spy.getStatesForIpAddressSearch(cmd);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void getStatesForIpAddressSearchHandlesMixedValidAndInvalidStates() {
|
||||
ListPublicIpAddressesCmd cmd = Mockito.mock(ListPublicIpAddressesCmd.class);
|
||||
Mockito.when(cmd.getState()).thenReturn("Allocated,InvalidState");
|
||||
try {
|
||||
spy.getStatesForIpAddressSearch(cmd);
|
||||
Assert.fail("Expected InvalidParameterValueException to be thrown");
|
||||
} catch (InvalidParameterValueException e) {
|
||||
Assert.assertEquals("Invalid state: InvalidState", e.getMessage());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -1545,7 +1545,7 @@ public class VolumeApiServiceImplTest {
|
||||
volumeApiServiceImpl.publishVolumeCreationUsageEvent(volumeVoMock);
|
||||
|
||||
usageEventUtilsMocked.verify(() -> UsageEventUtils.publishUsageEvent(EventTypes.EVENT_VOLUME_CREATE, volumeVoMock.getAccountId(), volumeVoMock.getDataCenterId(), volumeVoMock.getId(), volumeVoMock.getName(),
|
||||
null, volumeVoMock.getTemplateId(), volumeVoMock.getSize(), Volume.class.getName(), volumeVoMock.getUuid(), volumeVoMock.isDisplay()));
|
||||
null, volumeVoMock.getTemplateId(), volumeVoMock.getSize(), Volume.class.getName(), volumeVoMock.getUuid(), volumeVoMock.getInstanceId(), volumeVoMock.isDisplay()));
|
||||
}
|
||||
}
|
||||
|
||||
@ -1558,7 +1558,7 @@ public class VolumeApiServiceImplTest {
|
||||
volumeApiServiceImpl.publishVolumeCreationUsageEvent(volumeVoMock);
|
||||
|
||||
usageEventUtilsMocked.verify(() -> UsageEventUtils.publishUsageEvent(EventTypes.EVENT_VOLUME_CREATE, volumeVoMock.getAccountId(), volumeVoMock.getDataCenterId(), volumeVoMock.getId(), volumeVoMock.getName(),
|
||||
null, volumeVoMock.getTemplateId(), volumeVoMock.getSize(), Volume.class.getName(), volumeVoMock.getUuid(), volumeVoMock.isDisplay()));
|
||||
null, volumeVoMock.getTemplateId(), volumeVoMock.getSize(), Volume.class.getName(), volumeVoMock.getUuid(), volumeVoMock.getInstanceId(), volumeVoMock.isDisplay()));
|
||||
}
|
||||
}
|
||||
|
||||
@ -1573,7 +1573,7 @@ public class VolumeApiServiceImplTest {
|
||||
volumeApiServiceImpl.publishVolumeCreationUsageEvent(volumeVoMock);
|
||||
|
||||
usageEventUtilsMocked.verify(() -> UsageEventUtils.publishUsageEvent(EventTypes.EVENT_VOLUME_CREATE, volumeVoMock.getAccountId(), volumeVoMock.getDataCenterId(), volumeVoMock.getId(), volumeVoMock.getName(),
|
||||
null, volumeVoMock.getTemplateId(), volumeVoMock.getSize(), Volume.class.getName(), volumeVoMock.getUuid(), volumeVoMock.isDisplay()));
|
||||
null, volumeVoMock.getTemplateId(), volumeVoMock.getSize(), Volume.class.getName(), volumeVoMock.getUuid(), volumeVoMock.getInstanceId(), volumeVoMock.isDisplay()));
|
||||
}
|
||||
}
|
||||
|
||||
@ -1589,7 +1589,7 @@ public class VolumeApiServiceImplTest {
|
||||
volumeApiServiceImpl.publishVolumeCreationUsageEvent(volumeVoMock);
|
||||
|
||||
usageEventUtilsMocked.verify(() -> UsageEventUtils.publishUsageEvent(EventTypes.EVENT_VOLUME_CREATE, volumeVoMock.getAccountId(), volumeVoMock.getDataCenterId(), volumeVoMock.getId(), volumeVoMock.getName(),
|
||||
offeringMockId, volumeVoMock.getTemplateId(), volumeVoMock.getSize(), Volume.class.getName(), volumeVoMock.getUuid(), volumeVoMock.isDisplay()));
|
||||
offeringMockId, volumeVoMock.getTemplateId(), volumeVoMock.getSize(), Volume.class.getName(), volumeVoMock.getUuid(), volumeVoMock.getInstanceId(), volumeVoMock.isDisplay()));
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -1120,10 +1120,12 @@ public class UserVmManagerImplTest {
|
||||
public void recoverRootVolumeTestDestroyState() {
|
||||
Mockito.doReturn(Volume.State.Destroy).when(volumeVOMock).getState();
|
||||
|
||||
userVmManagerImpl.recoverRootVolume(volumeVOMock, vmId);
|
||||
try (MockedStatic<UsageEventUtils> ignored = Mockito.mockStatic(UsageEventUtils.class)) {
|
||||
userVmManagerImpl.recoverRootVolume(volumeVOMock, vmId);
|
||||
|
||||
Mockito.verify(volumeApiService).recoverVolume(volumeVOMock.getId());
|
||||
Mockito.verify(volumeDaoMock).attachVolume(volumeVOMock.getId(), vmId, UserVmManagerImpl.ROOT_DEVICE_ID);
|
||||
Mockito.verify(volumeApiService).recoverVolume(volumeVOMock.getId());
|
||||
Mockito.verify(volumeDaoMock).attachVolume(volumeVOMock.getId(), vmId, UserVmManagerImpl.ROOT_DEVICE_ID);
|
||||
}
|
||||
}
|
||||
|
||||
@Test(expected = InvalidParameterValueException.class)
|
||||
|
||||
@ -199,6 +199,10 @@ for full help
|
||||
self.info("No mysql root user specified, will not create Cloud DB schema\n", None)
|
||||
return
|
||||
|
||||
if self.areCloudDatabasesCreated() and not self.options.schemaonly and not self.options.forcerecreate:
|
||||
self.errorAndExit("Aborting script as the databases (cloud, cloud_usage) already exist.\n" \
|
||||
"Please use the --force-recreate parameter if you want to recreate the databases and schemas, or use --schema-only if you only want to create the schemas only.")
|
||||
|
||||
replacements = (
|
||||
("CREATE USER cloud identified by 'cloud';",
|
||||
"CREATE USER %s@`localhost` identified by '%s'; CREATE USER %s@`%%` identified by '%s';"%(
|
||||
@ -239,10 +243,6 @@ for full help
|
||||
("DROP USER 'cloud'@'%' ;", "DO NULL;")
|
||||
)
|
||||
|
||||
if self.areCloudDatabasesCreated() and not self.options.forcerecreate:
|
||||
self.errorAndExit("Aborting script as the databases (cloud, cloud_usage) already exist.\n" \
|
||||
"Please use the --force-recreate parameter if you want to recreate the schemas.")
|
||||
|
||||
scriptsToRun = ["create-database","create-schema", "create-database-premium","create-schema-premium"]
|
||||
if self.options.schemaonly:
|
||||
scriptsToRun = ["create-schema", "create-schema-premium"]
|
||||
@ -617,11 +617,11 @@ for example:
|
||||
self.parser.add_option("-d", "--deploy-as", action="store", type="string", dest="rootcreds", default="",
|
||||
help="Colon-separated user name and password of a MySQL user with administrative privileges")
|
||||
self.parser.add_option("-s", "--schema-only", action="store_true", dest="schemaonly", default=False,
|
||||
help="Creates the db schema without having to pass root credentials - " \
|
||||
help="Creates the db schema only without having to pass root credentials - " \
|
||||
"Please note: The databases (cloud, cloud_usage) and user (cloud) has to be configured " \
|
||||
"manually prior to running this script when using this flag.")
|
||||
self.parser.add_option("--force-recreate", action="store_true", dest="forcerecreate", default=False,
|
||||
help="Force recreation of the existing DB schemas. This option is disabled by default." \
|
||||
help="Force recreation of the existing DB databases and schemas. This option is disabled by default." \
|
||||
"Please note: The databases (cloud, cloud_usage) and its tables data will be lost and recreated.")
|
||||
|
||||
self.parser.add_option("-a", "--auto", action="store", type="string", dest="serversetup", default="",
|
||||
|
||||
@ -60,6 +60,7 @@
|
||||
<include>log4j-cloud.xml</include>
|
||||
<include>consoleproxy.properties</include>
|
||||
<include>agent.properties</include>
|
||||
<include>uefi.properties</include>
|
||||
</includes>
|
||||
</fileSet>
|
||||
<fileSet>
|
||||
|
||||
@ -3121,7 +3121,7 @@
|
||||
"message.change.offering.confirm": "Please confirm that you wish to change the service offering of this virtual Instance.",
|
||||
"message.change.offering.for.volume": "Successfully changed offering for the volume",
|
||||
"message.change.offering.for.volume.failed": "Change offering for the volume failed",
|
||||
"message.change.offering.processing": "Changing offering for the volume...",
|
||||
"message.change.offering.for.volume.processing": "Changing offering for the volume...",
|
||||
"message.change.password": "Please change your password.",
|
||||
"message.change.scope.failed": "Scope change failed",
|
||||
"message.change.scope.processing": "Scope change in progress",
|
||||
|
||||
@ -71,9 +71,14 @@ export default {
|
||||
if (this.$route.meta.name === 'iso') {
|
||||
this.imageApi = 'listIsos'
|
||||
}
|
||||
setTimeout(() => {
|
||||
this.fetchData()
|
||||
}, 100)
|
||||
this.fetchData()
|
||||
},
|
||||
watch: {
|
||||
resource (newValue) {
|
||||
if (newValue?.id) {
|
||||
this.fetchData()
|
||||
}
|
||||
}
|
||||
},
|
||||
computed: {
|
||||
allowed () {
|
||||
@ -82,23 +87,22 @@ export default {
|
||||
}
|
||||
},
|
||||
methods: {
|
||||
arrayHasItems (array) {
|
||||
return array !== null && array !== undefined && Array.isArray(array) && array.length > 0
|
||||
},
|
||||
fetchData () {
|
||||
this.fetchResourceData()
|
||||
},
|
||||
fetchResourceData () {
|
||||
const params = {}
|
||||
params.id = this.resource.id
|
||||
params.templatefilter = 'executable'
|
||||
params.listall = true
|
||||
params.page = this.page
|
||||
params.pagesize = this.pageSize
|
||||
if (!this.resource || !this.resource.id) {
|
||||
return
|
||||
}
|
||||
const params = {
|
||||
id: this.resource.id,
|
||||
templatefilter: 'executable',
|
||||
listall: true
|
||||
}
|
||||
|
||||
this.dataSource = []
|
||||
this.itemCount = 0
|
||||
this.fetchLoading = true
|
||||
this.loading = true
|
||||
this.zones = []
|
||||
getAPI(this.imageApi, params).then(json => {
|
||||
const imageResponse = json?.[this.imageApi.toLowerCase() + 'response']?.[this.$route.meta.name] || []
|
||||
@ -108,8 +112,8 @@ export default {
|
||||
}))
|
||||
}).catch(error => {
|
||||
this.$notifyError(error)
|
||||
this.loading = false
|
||||
}).finally(() => {
|
||||
this.loading = false
|
||||
if (this.zones.length !== 0) {
|
||||
this.$emit('update-zones', this.zones)
|
||||
}
|
||||
@ -122,7 +126,8 @@ export default {
|
||||
}
|
||||
const zoneids = this.zones.map(z => z.id)
|
||||
this.loading = true
|
||||
getAPI('listZones', { showicon: true, ids: zoneids.join(',') }).then(json => {
|
||||
const params = { showicon: true, ids: zoneids.join(',') }
|
||||
getAPI('listZones', params).then(json => {
|
||||
this.zones = json.listzonesresponse.zone || []
|
||||
}).finally(() => {
|
||||
this.loading = false
|
||||
|
||||
@ -25,7 +25,8 @@
|
||||
@search="handleSearch" />
|
||||
<ConfigurationTable
|
||||
:columns="columns"
|
||||
:config="items" />
|
||||
:config="items"
|
||||
:resource="resource" />
|
||||
</a-col>
|
||||
</div>
|
||||
</template>
|
||||
|
||||
@ -43,6 +43,7 @@
|
||||
- defaultOption (Object, optional): Preselected object to include initially
|
||||
- showIcon (Boolean, optional): Whether to show icon for the options. Default is true
|
||||
- defaultIcon (String, optional): Icon to be shown when there is no resource icon for the option. Default is 'cloud-outlined'
|
||||
- autoSelectFirstOption (Boolean, optional): Whether to automatically select the first option when options are loaded. Default is false
|
||||
|
||||
Events:
|
||||
- @change-option-value (Function): Emits the selected option value(s) when value(s) changes. Do not use @change as it will give warnings and may not work
|
||||
@ -81,7 +82,7 @@
|
||||
<resource-icon v-if="option.icon && option.icon.base64image" :image="option.icon.base64image" size="1x" style="margin-right: 5px"/>
|
||||
<render-icon v-else :icon="defaultIcon" style="margin-right: 5px" />
|
||||
</span>
|
||||
<span>{{ option[optionLabelKey] }}</span>
|
||||
<span>{{ optionLabelFn ? optionLabelFn(option) : option[optionLabelKey] }}</span>
|
||||
</span>
|
||||
</a-select-option>
|
||||
</a-select>
|
||||
@ -120,6 +121,10 @@ export default {
|
||||
type: String,
|
||||
default: 'name'
|
||||
},
|
||||
optionLabelFn: {
|
||||
type: Function,
|
||||
default: null
|
||||
},
|
||||
defaultOption: {
|
||||
type: Object,
|
||||
default: null
|
||||
@ -135,6 +140,10 @@ export default {
|
||||
pageSize: {
|
||||
type: Number,
|
||||
default: null
|
||||
},
|
||||
autoSelectFirstOption: {
|
||||
type: Boolean,
|
||||
default: false
|
||||
}
|
||||
},
|
||||
data () {
|
||||
@ -147,11 +156,12 @@ export default {
|
||||
searchTimer: null,
|
||||
scrollHandlerAttached: false,
|
||||
preselectedOptionValue: null,
|
||||
successiveFetches: 0
|
||||
successiveFetches: 0,
|
||||
canSelectFirstOption: false
|
||||
}
|
||||
},
|
||||
created () {
|
||||
this.addDefaultOptionIfNeeded(true)
|
||||
this.addDefaultOptionIfNeeded()
|
||||
},
|
||||
mounted () {
|
||||
this.preselectedOptionValue = this.$attrs.value
|
||||
@ -208,6 +218,7 @@ export default {
|
||||
}).catch(error => {
|
||||
this.$notifyError(error)
|
||||
}).finally(() => {
|
||||
this.canSelectFirstOption = true
|
||||
if (this.successiveFetches === 0) {
|
||||
this.loading = false
|
||||
}
|
||||
@ -218,6 +229,12 @@ export default {
|
||||
(Array.isArray(this.preselectedOptionValue) && this.preselectedOptionValue.length === 0) ||
|
||||
this.successiveFetches >= this.maxSuccessiveFetches) {
|
||||
this.resetPreselectedOptionValue()
|
||||
if (!this.canSelectFirstOption && this.autoSelectFirstOption && this.options.length > 0) {
|
||||
this.$nextTick(() => {
|
||||
this.preselectedOptionValue = this.options[0][this.optionValueKey]
|
||||
this.onChange(this.preselectedOptionValue)
|
||||
})
|
||||
}
|
||||
return
|
||||
}
|
||||
const matchValue = Array.isArray(this.preselectedOptionValue) ? this.preselectedOptionValue[0] : this.preselectedOptionValue
|
||||
@ -239,6 +256,7 @@ export default {
|
||||
},
|
||||
addDefaultOptionIfNeeded () {
|
||||
if (this.defaultOption) {
|
||||
this.canSelectFirstOption = true
|
||||
this.options.push(this.defaultOption)
|
||||
}
|
||||
},
|
||||
|
||||
@ -29,7 +29,7 @@
|
||||
</template>
|
||||
<template #select-option="{ item }">
|
||||
<span>
|
||||
<resource-icon v-if="item.icon && zone1.icon.base64image" :image="item.icon.base64image" size="2x" style="margin-right: 5px"/>
|
||||
<resource-icon v-if="item.icon && item.icon.base64image" :image="item.icon.base64image" size="2x" style="margin-right: 5px"/>
|
||||
<global-outlined v-else style="margin-right: 5px" />
|
||||
{{ item.name }}
|
||||
</span>
|
||||
|
||||
@ -148,20 +148,17 @@
|
||||
<a-alert :message="$t('message.action.acquire.ip')" type="warning" />
|
||||
<a-form layout="vertical" style="margin-top: 10px">
|
||||
<a-form-item :label="$t('label.ipaddress')">
|
||||
<a-select
|
||||
<infinite-scroll-select
|
||||
v-focus="true"
|
||||
style="width: 100%;"
|
||||
v-model:value="acquireIp"
|
||||
showSearch
|
||||
optionFilterProp="label"
|
||||
:filterOption="(input, option) => {
|
||||
return option.label.toLowerCase().indexOf(input.toLowerCase()) >= 0
|
||||
}" >
|
||||
<a-select-option
|
||||
v-for="ip in listPublicIpAddress"
|
||||
:key="ip.ipaddress"
|
||||
:label="ip.ipaddress + '(' + ip.state + ')'">{{ ip.ipaddress }} ({{ ip.state }})</a-select-option>
|
||||
</a-select>
|
||||
api="listPublicIpAddresses"
|
||||
:apiParams="listApiParamsForAssociate"
|
||||
resourceType="publicipaddress"
|
||||
optionValueKey="ipaddress"
|
||||
:optionLabelFn="ip => ip.ipaddress + ' (' + ip.state + ')'"
|
||||
defaultIcon="environment-outlined"
|
||||
:autoSelectFirstOption="true"
|
||||
@change-option-value="(ip) => acquireIp = ip" />
|
||||
</a-form-item>
|
||||
<div :span="24" class="action-button">
|
||||
<a-button @click="onCloseModal">{{ $t('label.cancel') }}</a-button>
|
||||
@ -212,13 +209,15 @@ import Status from '@/components/widgets/Status'
|
||||
import TooltipButton from '@/components/widgets/TooltipButton'
|
||||
import BulkActionView from '@/components/view/BulkActionView'
|
||||
import eventBus from '@/config/eventBus'
|
||||
import InfiniteScrollSelect from '@/components/widgets/InfiniteScrollSelect'
|
||||
|
||||
export default {
|
||||
name: 'IpAddressesTab',
|
||||
components: {
|
||||
Status,
|
||||
TooltipButton,
|
||||
BulkActionView
|
||||
BulkActionView,
|
||||
InfiniteScrollSelect
|
||||
},
|
||||
props: {
|
||||
resource: {
|
||||
@ -281,7 +280,6 @@ export default {
|
||||
showAcquireIp: false,
|
||||
acquireLoading: false,
|
||||
acquireIp: null,
|
||||
listPublicIpAddress: [],
|
||||
changeSourceNat: false,
|
||||
zoneExtNetProvider: ''
|
||||
}
|
||||
@ -302,6 +300,26 @@ export default {
|
||||
}
|
||||
},
|
||||
inject: ['parentFetchData'],
|
||||
computed: {
|
||||
listApiParams () {
|
||||
const params = {
|
||||
zoneid: this.resource.zoneid,
|
||||
domainid: this.resource.domainid,
|
||||
account: this.resource.account,
|
||||
forvirtualnetwork: true,
|
||||
allocatedonly: false
|
||||
}
|
||||
if (['nsx', 'netris'].includes(this.zoneExtNetProvider?.toLowerCase())) {
|
||||
params.forprovider = true
|
||||
}
|
||||
return params
|
||||
},
|
||||
listApiParamsForAssociate () {
|
||||
const params = this.listApiParams
|
||||
params.state = 'Free,Reserved'
|
||||
return params
|
||||
}
|
||||
},
|
||||
methods: {
|
||||
fetchData () {
|
||||
const params = {
|
||||
@ -344,19 +362,9 @@ export default {
|
||||
}).catch(reject)
|
||||
})
|
||||
},
|
||||
fetchListPublicIpAddress () {
|
||||
fetchListPublicIpAddress (state) {
|
||||
return new Promise((resolve, reject) => {
|
||||
const params = {
|
||||
zoneid: this.resource.zoneid,
|
||||
domainid: this.resource.domainid,
|
||||
account: this.resource.account,
|
||||
forvirtualnetwork: true,
|
||||
allocatedonly: false
|
||||
}
|
||||
if (['nsx', 'netris'].includes(this.zoneExtNetProvider?.toLowerCase())) {
|
||||
params.forprovider = true
|
||||
}
|
||||
getAPI('listPublicIpAddresses', params).then(json => {
|
||||
getAPI('listPublicIpAddresses', this.listApiParams).then(json => {
|
||||
const listPublicIps = json.listpublicipaddressesresponse.publicipaddress || []
|
||||
resolve(listPublicIps)
|
||||
}).catch(reject)
|
||||
@ -554,30 +562,6 @@ export default {
|
||||
},
|
||||
async onShowAcquireIp () {
|
||||
this.showAcquireIp = true
|
||||
this.acquireLoading = true
|
||||
this.listPublicIpAddress = []
|
||||
|
||||
try {
|
||||
const listPublicIpAddress = await this.fetchListPublicIpAddress()
|
||||
listPublicIpAddress.forEach(item => {
|
||||
if (item.state === 'Free' || item.state === 'Reserved') {
|
||||
this.listPublicIpAddress.push({
|
||||
ipaddress: item.ipaddress,
|
||||
state: item.state
|
||||
})
|
||||
}
|
||||
})
|
||||
this.listPublicIpAddress.sort(function (a, b) {
|
||||
if (a.ipaddress < b.ipaddress) { return -1 }
|
||||
if (a.ipaddress > b.ipaddress) { return 1 }
|
||||
return 0
|
||||
})
|
||||
this.acquireIp = this.listPublicIpAddress && this.listPublicIpAddress.length > 0 ? this.listPublicIpAddress[0].ipaddress : null
|
||||
this.acquireLoading = false
|
||||
} catch (e) {
|
||||
this.acquireLoading = false
|
||||
this.$notifyError(e)
|
||||
}
|
||||
},
|
||||
onCloseModal () {
|
||||
this.showAcquireIp = false
|
||||
|
||||
@ -32,7 +32,7 @@
|
||||
<b> {{record.displaytext }} </b> {{ ' (' + record.name + ')' }} <br/> {{ record.description }}
|
||||
</template>
|
||||
<template v-if="column.key === 'value'">
|
||||
<ConfigurationValue :configrecord="record" />
|
||||
<ConfigurationValue :configrecord="record" :resource="resource" />
|
||||
</template>
|
||||
</template>
|
||||
</a-table>
|
||||
@ -85,6 +85,10 @@ export default {
|
||||
pagesize: {
|
||||
type: Number,
|
||||
default: 20
|
||||
},
|
||||
resource: {
|
||||
type: Object,
|
||||
required: false
|
||||
}
|
||||
},
|
||||
data () {
|
||||
|
||||
@ -217,6 +217,10 @@ export default {
|
||||
actions: {
|
||||
type: Array,
|
||||
default: () => []
|
||||
},
|
||||
resource: {
|
||||
type: Object,
|
||||
required: false
|
||||
}
|
||||
},
|
||||
data () {
|
||||
@ -254,6 +258,12 @@ export default {
|
||||
this.setConfigData()
|
||||
},
|
||||
watch: {
|
||||
configrecord: {
|
||||
handler () {
|
||||
this.setConfigData()
|
||||
},
|
||||
deep: true
|
||||
}
|
||||
},
|
||||
methods: {
|
||||
setConfigData () {
|
||||
@ -280,6 +290,9 @@ export default {
|
||||
name: configrecord.name,
|
||||
value: newValue
|
||||
}
|
||||
if (this.scopeKey === 'domainid' && !params[this.scopeKey]) {
|
||||
params[this.scopeKey] = this.resource?.id
|
||||
}
|
||||
postAPI('updateConfiguration', params).then(json => {
|
||||
this.editableValue = this.getEditableValue(json.updateconfigurationresponse.configuration)
|
||||
this.actualValue = this.editableValue
|
||||
@ -315,6 +328,9 @@ export default {
|
||||
[this.scopeKey]: this.$route.params?.id,
|
||||
name: configrecord.name
|
||||
}
|
||||
if (this.scopeKey === 'domainid' && !params[this.scopeKey]) {
|
||||
params[this.scopeKey] = this.resource?.id
|
||||
}
|
||||
postAPI('resetConfiguration', params).then(json => {
|
||||
this.editableValue = this.getEditableValue(json.resetconfigurationresponse.configuration)
|
||||
this.actualValue = this.editableValue
|
||||
|
||||
@ -1008,7 +1008,12 @@ public class UsageManagerImpl extends ManagerBase implements UsageManager, Runna
|
||||
|
||||
private boolean isVolumeEvent(String eventType) {
|
||||
return eventType != null &&
|
||||
(eventType.equals(EventTypes.EVENT_VOLUME_CREATE) || eventType.equals(EventTypes.EVENT_VOLUME_DELETE) || eventType.equals(EventTypes.EVENT_VOLUME_RESIZE) || eventType.equals(EventTypes.EVENT_VOLUME_UPLOAD));
|
||||
(eventType.equals(EventTypes.EVENT_VOLUME_CREATE) ||
|
||||
eventType.equals(EventTypes.EVENT_VOLUME_DELETE) ||
|
||||
eventType.equals(EventTypes.EVENT_VOLUME_RESIZE) ||
|
||||
eventType.equals(EventTypes.EVENT_VOLUME_UPLOAD) ||
|
||||
eventType.equals(EventTypes.EVENT_VOLUME_ATTACH) ||
|
||||
eventType.equals(EventTypes.EVENT_VOLUME_DETACH));
|
||||
}
|
||||
|
||||
private boolean isTemplateEvent(String eventType) {
|
||||
@ -1424,92 +1429,112 @@ public class UsageManagerImpl extends ManagerBase implements UsageManager, Runna
|
||||
}
|
||||
}
|
||||
|
||||
private void deleteExistingSecondaryStorageUsageForVolume(long volId, long accountId, Date deletedDate) {
|
||||
List<UsageStorageVO> storageVOs = _usageStorageDao.listById(accountId, volId, StorageTypes.VOLUME);
|
||||
for (UsageStorageVO storageVO : storageVOs) {
|
||||
logger.debug("Setting the volume with id: {} to 'deleted' in the usage_storage table for account: {}.", volId, accountId);
|
||||
storageVO.setDeleted(deletedDate);
|
||||
_usageStorageDao.update(storageVO);
|
||||
}
|
||||
}
|
||||
|
||||
private void deleteExistingInstanceVolumeUsage(long volId, long accountId, Date deletedDate) {
|
||||
List<UsageVolumeVO> volumesVOs = _usageVolumeDao.listByVolumeId(volId, accountId);
|
||||
for (UsageVolumeVO volumesVO : volumesVOs) {
|
||||
if (volumesVO.getVmId() != null) {
|
||||
logger.debug("Setting the volume with id: {} for instance id: {} to 'deleted' in the usage_volume table for account {}.",
|
||||
volumesVO.getVolumeId(), volumesVO.getVmId(), accountId);
|
||||
volumesVO.setDeleted(deletedDate);
|
||||
_usageVolumeDao.update(volumesVO.getId(), volumesVO);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private void deleteExistingVolumeUsage(long volId, long accountId, Date deletedDate) {
|
||||
List<UsageVolumeVO> volumesVOs = _usageVolumeDao.listByVolumeId(volId, accountId);
|
||||
for (UsageVolumeVO volumesVO : volumesVOs) {
|
||||
logger.debug("Setting the volume with id: {} to 'deleted' in the usage_volume table for account: {}.", volId, accountId);
|
||||
volumesVO.setDeleted(deletedDate);
|
||||
_usageVolumeDao.update(volumesVO.getId(), volumesVO);
|
||||
}
|
||||
}
|
||||
|
||||
private void createVolumeHelperEvent(UsageEventVO event) {
|
||||
|
||||
long volId = event.getResourceId();
|
||||
Account acct = _accountDao.findByIdIncludingRemoved(event.getAccountId());
|
||||
List<UsageVolumeVO> volumesVOs;
|
||||
UsageVolumeVO volumeVO;
|
||||
|
||||
if (EventTypes.EVENT_VOLUME_CREATE.equals(event.getType())) {
|
||||
//For volumes which are 'attached' successfully, set the 'deleted' column in the usage_storage table,
|
||||
switch (event.getType()) {
|
||||
case EventTypes.EVENT_VOLUME_CREATE:
|
||||
//For volumes which are 'attached' successfully from uploaded state, set the 'deleted' column in the usage_storage table,
|
||||
//so that the secondary storage should stop accounting and only primary will be accounted.
|
||||
SearchCriteria<UsageStorageVO> sc = _usageStorageDao.createSearchCriteria();
|
||||
sc.addAnd("entityId", SearchCriteria.Op.EQ, volId);
|
||||
sc.addAnd("storageType", SearchCriteria.Op.EQ, StorageTypes.VOLUME);
|
||||
List<UsageStorageVO> volumesVOs = _usageStorageDao.search(sc, null);
|
||||
if (volumesVOs != null) {
|
||||
if (volumesVOs.size() == 1) {
|
||||
logger.debug("Setting the volume with id: " + volId + " to 'deleted' in the usage_storage table.");
|
||||
volumesVOs.get(0).setDeleted(event.getCreateDate());
|
||||
_usageStorageDao.update(volumesVOs.get(0));
|
||||
}
|
||||
}
|
||||
}
|
||||
if (EventTypes.EVENT_VOLUME_CREATE.equals(event.getType()) || EventTypes.EVENT_VOLUME_RESIZE.equals(event.getType())) {
|
||||
SearchCriteria<UsageVolumeVO> sc = _usageVolumeDao.createSearchCriteria();
|
||||
sc.addAnd("accountId", SearchCriteria.Op.EQ, event.getAccountId());
|
||||
sc.addAnd("volumeId", SearchCriteria.Op.EQ, volId);
|
||||
sc.addAnd("deleted", SearchCriteria.Op.NULL);
|
||||
List<UsageVolumeVO> volumesVOs = _usageVolumeDao.search(sc, null);
|
||||
deleteExistingSecondaryStorageUsageForVolume(volId, event.getAccountId(), event.getCreateDate());
|
||||
|
||||
volumesVOs = _usageVolumeDao.listByVolumeId(volId, event.getAccountId());
|
||||
if (volumesVOs.size() > 0) {
|
||||
//This is a safeguard to avoid double counting of volumes.
|
||||
logger.error("Found duplicate usage entry for volume: " + volId + " assigned to account: " + event.getAccountId() + "; marking as deleted...");
|
||||
deleteExistingVolumeUsage(volId, event.getAccountId(), event.getCreateDate());
|
||||
}
|
||||
//an entry exists if it is a resize volume event. marking the existing deleted and creating a new one in the case of resize.
|
||||
for (UsageVolumeVO volumesVO : volumesVOs) {
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug("deleting volume: " + volumesVO.getId() + " from account: " + volumesVO.getAccountId());
|
||||
}
|
||||
volumesVO.setDeleted(event.getCreateDate());
|
||||
_usageVolumeDao.update(volumesVO);
|
||||
}
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug("create volume with id : " + volId + " for account: " + event.getAccountId());
|
||||
}
|
||||
Account acct = _accountDao.findByIdIncludingRemoved(event.getAccountId());
|
||||
UsageVolumeVO volumeVO = new UsageVolumeVO(volId, event.getZoneId(), event.getAccountId(), acct.getDomainId(), event.getOfferingId(), event.getTemplateId(), event.getSize(), event.getCreateDate(), null);
|
||||
|
||||
logger.debug("Creating a new entry in usage_volume for volume with id: {} for account: {}", volId, event.getAccountId());
|
||||
volumeVO = new UsageVolumeVO(volId, event.getZoneId(), event.getAccountId(), acct.getDomainId(), event.getOfferingId(), event.getTemplateId(), null, event.getSize(), event.getCreateDate(), null);
|
||||
_usageVolumeDao.persist(volumeVO);
|
||||
} else if (EventTypes.EVENT_VOLUME_DELETE.equals(event.getType())) {
|
||||
SearchCriteria<UsageVolumeVO> sc = _usageVolumeDao.createSearchCriteria();
|
||||
sc.addAnd("accountId", SearchCriteria.Op.EQ, event.getAccountId());
|
||||
sc.addAnd("volumeId", SearchCriteria.Op.EQ, volId);
|
||||
sc.addAnd("deleted", SearchCriteria.Op.NULL);
|
||||
List<UsageVolumeVO> volumesVOs = _usageVolumeDao.search(sc, null);
|
||||
if (volumesVOs.size() > 1) {
|
||||
logger.warn("More that one usage entry for volume: " + volId + " assigned to account: " + event.getAccountId() + "; marking them all as deleted...");
|
||||
|
||||
if (event.getVmId() != null) {
|
||||
volumeVO = new UsageVolumeVO(volId, event.getZoneId(), event.getAccountId(), acct.getDomainId(), event.getOfferingId(), event.getTemplateId(), event.getVmId(), event.getSize(), event.getCreateDate(), null);
|
||||
_usageVolumeDao.persist(volumeVO);
|
||||
}
|
||||
break;
|
||||
|
||||
case EventTypes.EVENT_VOLUME_RESIZE:
|
||||
volumesVOs = _usageVolumeDao.listByVolumeId(volId, event.getAccountId());
|
||||
for (UsageVolumeVO volumesVO : volumesVOs) {
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug("deleting volume: " + volumesVO.getId() + " from account: " + volumesVO.getAccountId());
|
||||
}
|
||||
volumesVO.setDeleted(event.getCreateDate()); // there really shouldn't be more than one
|
||||
_usageVolumeDao.update(volumesVO);
|
||||
}
|
||||
} else if (EventTypes.EVENT_VOLUME_UPLOAD.equals(event.getType())) {
|
||||
//For Upload event add an entry to the usage_storage table.
|
||||
SearchCriteria<UsageStorageVO> sc = _usageStorageDao.createSearchCriteria();
|
||||
sc.addAnd("accountId", SearchCriteria.Op.EQ, event.getAccountId());
|
||||
sc.addAnd("entityId", SearchCriteria.Op.EQ, volId);
|
||||
sc.addAnd("storageType", SearchCriteria.Op.EQ, StorageTypes.VOLUME);
|
||||
sc.addAnd("deleted", SearchCriteria.Op.NULL);
|
||||
List<UsageStorageVO> volumesVOs = _usageStorageDao.search(sc, null);
|
||||
|
||||
if (volumesVOs.size() > 0) {
|
||||
//This is a safeguard to avoid double counting of volumes.
|
||||
logger.error("Found duplicate usage entry for volume: " + volId + " assigned to account: " + event.getAccountId() + "; marking as deleted...");
|
||||
}
|
||||
for (UsageStorageVO volumesVO : volumesVOs) {
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug("deleting volume: " + volumesVO.getId() + " from account: " + volumesVO.getAccountId());
|
||||
String delete_msg = String.format("Setting the volume with id: %s to 'deleted' in the usage_volume table for account: %s.", volId, event.getAccountId());
|
||||
String create_msg = String.format("Creating a new entry in usage_volume for volume with id: %s after resize for account: %s", volId, event.getAccountId());
|
||||
Long vmId = volumesVO.getVmId();
|
||||
if (vmId != null) {
|
||||
delete_msg = String.format("Setting the volume with id: %s for instance id: %s to 'deleted' in the usage_volume table for account: %s.",
|
||||
volId, vmId, event.getAccountId());
|
||||
create_msg = String.format("Creating a new entry in usage_volume for volume with id: %s and instance id: %s after resize for account: %s",
|
||||
volId, vmId, event.getAccountId());
|
||||
}
|
||||
logger.debug(delete_msg);
|
||||
volumesVO.setDeleted(event.getCreateDate());
|
||||
_usageStorageDao.update(volumesVO);
|
||||
}
|
||||
_usageVolumeDao.update(volumesVO.getId(), volumesVO);
|
||||
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug("create volume with id : " + volId + " for account: " + event.getAccountId());
|
||||
logger.debug(create_msg);
|
||||
volumeVO = new UsageVolumeVO(volId, event.getZoneId(), event.getAccountId(), acct.getDomainId(), event.getOfferingId(), event.getTemplateId(), vmId, event.getSize(), event.getCreateDate(), null);
|
||||
_usageVolumeDao.persist(volumeVO);
|
||||
}
|
||||
Account acct = _accountDao.findByIdIncludingRemoved(event.getAccountId());
|
||||
UsageStorageVO volumeVO = new UsageStorageVO(volId, event.getZoneId(), event.getAccountId(), acct.getDomainId(), StorageTypes.VOLUME, event.getTemplateId(), event.getSize(), event.getCreateDate(), null);
|
||||
_usageStorageDao.persist(volumeVO);
|
||||
break;
|
||||
|
||||
case EventTypes.EVENT_VOLUME_DELETE:
|
||||
deleteExistingVolumeUsage(volId, event.getAccountId(), event.getCreateDate());
|
||||
break;
|
||||
|
||||
case EventTypes.EVENT_VOLUME_ATTACH:
|
||||
deleteExistingInstanceVolumeUsage(event.getResourceId(), event.getAccountId(), event.getCreateDate());
|
||||
|
||||
logger.debug("Creating a new entry in usage_volume for volume with id: {}, and instance id: {} for account: {}",
|
||||
volId, event.getVmId(), event.getAccountId());
|
||||
volumeVO = new UsageVolumeVO(volId, event.getZoneId(), event.getAccountId(), acct.getDomainId(), event.getOfferingId(), event.getTemplateId(), event.getVmId(), event.getSize(), event.getCreateDate(), null);
|
||||
_usageVolumeDao.persist(volumeVO);
|
||||
break;
|
||||
|
||||
case EventTypes.EVENT_VOLUME_DETACH:
|
||||
deleteExistingInstanceVolumeUsage(event.getResourceId(), event.getAccountId(), event.getCreateDate());
|
||||
break;
|
||||
|
||||
case EventTypes.EVENT_VOLUME_UPLOAD:
|
||||
deleteExistingSecondaryStorageUsageForVolume(volId, event.getAccountId(), event.getCreateDate());
|
||||
|
||||
logger.debug("Creating a new entry in usage_storage for volume with id : {} for account: {}", volId, event.getAccountId());
|
||||
UsageStorageVO storageVO = new UsageStorageVO(volId, event.getZoneId(), event.getAccountId(), acct.getDomainId(), StorageTypes.VOLUME, event.getTemplateId(), event.getSize(), event.getCreateDate(), null);
|
||||
_usageStorageDao.persist(storageVO);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -73,12 +73,13 @@ public class VolumeUsageParser extends UsageParser {
|
||||
for (UsageVolumeVO usageVol : usageUsageVols) {
|
||||
long volId = usageVol.getVolumeId();
|
||||
Long doId = usageVol.getDiskOfferingId();
|
||||
Long vmId = usageVol.getVmId();
|
||||
long zoneId = usageVol.getZoneId();
|
||||
Long templateId = usageVol.getTemplateId();
|
||||
long size = usageVol.getSize();
|
||||
String key = volId + "-" + doId + "-" + size;
|
||||
String key = volId + "-" + doId + "-" + vmId + "-" + size;
|
||||
|
||||
diskOfferingMap.put(key, new VolInfo(volId, zoneId, doId, templateId, size));
|
||||
diskOfferingMap.put(key, new VolInfo(volId, zoneId, doId, templateId, size, vmId));
|
||||
|
||||
Date volCreateDate = usageVol.getCreated();
|
||||
Date volDeleteDate = usageVol.getDeleted();
|
||||
@ -110,7 +111,7 @@ public class VolumeUsageParser extends UsageParser {
|
||||
if (useTime > 0L) {
|
||||
VolInfo info = diskOfferingMap.get(volIdKey);
|
||||
createUsageRecord(UsageTypes.VOLUME, useTime, startDate, endDate, account, info.getVolumeId(), info.getZoneId(), info.getDiskOfferingId(),
|
||||
info.getTemplateId(), info.getSize());
|
||||
info.getTemplateId(), info.getVmId(), info.getSize());
|
||||
}
|
||||
}
|
||||
|
||||
@ -130,7 +131,7 @@ public class VolumeUsageParser extends UsageParser {
|
||||
}
|
||||
|
||||
private void createUsageRecord(int type, long runningTime, Date startDate, Date endDate, AccountVO account, long volId, long zoneId, Long doId,
|
||||
Long templateId, long size) {
|
||||
Long templateId, Long vmId, long size) {
|
||||
// Our smallest increment is hourly for now
|
||||
logger.debug("Total running time {} ms", runningTime);
|
||||
|
||||
@ -152,7 +153,11 @@ public class VolumeUsageParser extends UsageParser {
|
||||
usageDesc += " (DiskOffering: " + doId + ")";
|
||||
}
|
||||
|
||||
UsageVO usageRecord = new UsageVO(zoneId, account.getId(), account.getDomainId(), usageDesc, usageDisplay + " Hrs", type, new Double(usage), null, null, doId, templateId, volId,
|
||||
if (vmId != null) {
|
||||
usageDesc += " (VM: " + vmId + ")";
|
||||
}
|
||||
|
||||
UsageVO usageRecord = new UsageVO(zoneId, account.getId(), account.getDomainId(), usageDesc, usageDisplay + " Hrs", type, new Double(usage), vmId, null, doId, templateId, volId,
|
||||
size, startDate, endDate);
|
||||
usageDao.persist(usageRecord);
|
||||
}
|
||||
@ -163,13 +168,15 @@ public class VolumeUsageParser extends UsageParser {
|
||||
private Long diskOfferingId;
|
||||
private Long templateId;
|
||||
private long size;
|
||||
private Long vmId;
|
||||
|
||||
public VolInfo(long volId, long zoneId, Long diskOfferingId, Long templateId, long size) {
|
||||
public VolInfo(long volId, long zoneId, Long diskOfferingId, Long templateId, long size, Long vmId) {
|
||||
this.volId = volId;
|
||||
this.zoneId = zoneId;
|
||||
this.diskOfferingId = diskOfferingId;
|
||||
this.templateId = templateId;
|
||||
this.size = size;
|
||||
this.vmId = vmId;
|
||||
}
|
||||
|
||||
public long getZoneId() {
|
||||
@ -191,5 +198,9 @@ public class VolumeUsageParser extends UsageParser {
|
||||
public long getSize() {
|
||||
return size;
|
||||
}
|
||||
|
||||
public Long getVmId() {
|
||||
return vmId;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -19,7 +19,7 @@
|
||||
|
||||
package com.cloud.utils;
|
||||
|
||||
public class EnumUtils {
|
||||
public class EnumUtils extends org.apache.commons.lang3.EnumUtils {
|
||||
public static String listValues(Enum<?>[] enums) {
|
||||
StringBuilder b = new StringBuilder("[");
|
||||
|
||||
|
||||
@ -67,11 +67,13 @@ public final class ProcessRunner {
|
||||
public ProcessRunner(ExecutorService executor) {
|
||||
this.executor = executor;
|
||||
commandLogReplacements.add(new Ternary<>("ipmitool", "-P\\s+\\S+", "-P *****"));
|
||||
commandLogReplacements.add(new Ternary<>("ipmitool", "(?i)password\\s+\\S+\\s+\\S+", "password **** ****"));
|
||||
}
|
||||
|
||||
/**
|
||||
* Executes a process with provided list of commands with a max default timeout
|
||||
* of 5 minutes
|
||||
*
|
||||
* @param commands list of string commands
|
||||
* @return returns process result
|
||||
*/
|
||||
@ -82,6 +84,7 @@ public final class ProcessRunner {
|
||||
/**
|
||||
* Executes a process with provided list of commands with a given timeout that is less
|
||||
* than or equal to DEFAULT_MAX_TIMEOUT
|
||||
*
|
||||
* @param commands list of string commands
|
||||
* @param timeOut timeout duration
|
||||
* @return returns process result
|
||||
@ -109,14 +112,16 @@ public final class ProcessRunner {
|
||||
}
|
||||
});
|
||||
try {
|
||||
logger.debug("Waiting for a response from command [{}]. Defined timeout: [{}].", commandLog, timeOut.getStandardSeconds());
|
||||
logger.debug("Waiting for a response from command [{}]. Defined timeout: [{}].", commandLog,
|
||||
timeOut.getStandardSeconds());
|
||||
retVal = processFuture.get(timeOut.getStandardSeconds(), TimeUnit.SECONDS);
|
||||
} catch (ExecutionException e) {
|
||||
logger.warn("Failed to complete the requested command [{}] due to execution error.", commands, e);
|
||||
logger.warn("Failed to complete the requested command [{}] due to execution error.", commandLog, e);
|
||||
retVal = -2;
|
||||
stdError = e.getMessage();
|
||||
} catch (TimeoutException e) {
|
||||
logger.warn("Failed to complete the requested command [{}] within timeout. Defined timeout: [{}].", commandLog, timeOut.getStandardSeconds(), e);
|
||||
logger.warn("Failed to complete the requested command [{}] within timeout. Defined timeout: [{}].",
|
||||
commandLog, timeOut.getStandardSeconds(), e);
|
||||
retVal = -1;
|
||||
stdError = "Operation timed out, aborted.";
|
||||
} finally {
|
||||
|
||||
@ -60,4 +60,16 @@ public class ProcessRunnerTest {
|
||||
Assert.assertTrue(log.contains(password));
|
||||
Assert.assertEquals(1, countSubstringOccurrences(log, password));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testRemoveCommandSensitiveInfoForLoggingIpmiPasswordCommand() {
|
||||
String userId = "3";
|
||||
String newPassword = "Sup3rSecr3t!";
|
||||
String command = String.format("/usr/bin/ipmitool user set password %s %s", userId, newPassword);
|
||||
String log = processRunner.removeCommandSensitiveInfoForLogging(command);
|
||||
|
||||
Assert.assertFalse(log.contains(userId));
|
||||
Assert.assertFalse(log.contains(newPassword));
|
||||
Assert.assertTrue(log.contains("password **** ****"));
|
||||
}
|
||||
}
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user