mirror of
https://github.com/apache/cloudstack.git
synced 2025-10-26 08:42:29 +01:00
Merge pull request #654 from DaanHoogland/CLOUDSTACK-8656
Cloudstack 8656: do away with more silently ignoring exceptions.a lot of messages added. some restructuring for test exception assertions and try-with-resource blocks * pr/654: (29 commits) CLOUDSTACK-8656: more logging instead of sysout CLOUDSTACK-8656: use catch block for validation CLOUDSTACK-8656: class in json specified not found CLOUDSTACK-8656: removed unused classes CLOUDSTACK-8656: restructure of tests CLOUDSTACK-8656: reorganise sychronized block CLOUDSTACK-8656: restructure tests to ensure exception throwing CLOUDSTACK-8656: validate the throwing of ServerApiException CLOUDSTACK-8656: logging ignored exceptions CLOUDSTACK-8656: try-w-r removes need for empty catch block CLOUDSTACK-8656: try-w-r instead of clunckey close-except CLOUDSTACK-8656: deal with empty SQLException catch block by try-w-r CLOUDSTACK-8656: unnecessary close construct removed CLOUDSTACK-8656: message about timed buffer logging CLOUDSTACK-8656: message about invalid number from store CLOUDSTACK-8656: move cli test tool to separate file CLOUDSTACK-8656: exception is the rule for some tests CLOUDSTACK-8656: network related exception logging CLOUDSTACK-8656: reporting ignored exceptions in server CLOUDSTACK-8656: log in case we are on a platform not supporting UTF8 ... Signed-off-by: Remi Bergsma <github@remi.nl>
This commit is contained in:
commit
64ff67da55
@ -82,23 +82,14 @@ public class Upgrade40to41 implements DbUpgrade {
|
||||
if (regionId != null) {
|
||||
region_id = Integer.parseInt(regionId);
|
||||
}
|
||||
PreparedStatement pstmt = null;
|
||||
try {
|
||||
try (PreparedStatement pstmt = conn.prepareStatement("update `cloud`.`region` set id = ?");) {
|
||||
//Update regionId in region table
|
||||
s_logger.debug("Updating region table with Id: " + region_id);
|
||||
pstmt = conn.prepareStatement("update `cloud`.`region` set id = ?");
|
||||
pstmt.setInt(1, region_id);
|
||||
pstmt.executeUpdate();
|
||||
|
||||
} catch (SQLException e) {
|
||||
throw new CloudRuntimeException("Error while updating region entries", e);
|
||||
} finally {
|
||||
try {
|
||||
if (pstmt != null) {
|
||||
pstmt.close();
|
||||
}
|
||||
} catch (SQLException e) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -33,7 +33,6 @@ import java.util.Map;
|
||||
import java.util.UUID;
|
||||
|
||||
import org.apache.log4j.Logger;
|
||||
|
||||
import org.apache.cloudstack.api.ApiConstants;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreProvider;
|
||||
|
||||
@ -1396,12 +1395,13 @@ public class Upgrade410to420 implements DbUpgrade {
|
||||
// Corrects upgrade for deployment with F5 and SRX devices (pre 3.0) to network offering &
|
||||
// network service provider paradigm
|
||||
private void correctExternalNetworkDevicesSetup(Connection conn) {
|
||||
PreparedStatement zoneSearchStmt = null, pNetworkStmt = null, f5DevicesStmt = null, srxDevicesStmt = null;
|
||||
ResultSet zoneResults = null, pNetworksResults = null, f5DevicesResult = null, srxDevicesResult = null;
|
||||
PreparedStatement pNetworkStmt = null, f5DevicesStmt = null, srxDevicesStmt = null;
|
||||
ResultSet pNetworksResults = null, f5DevicesResult = null, srxDevicesResult = null;
|
||||
|
||||
try {
|
||||
zoneSearchStmt = conn.prepareStatement("SELECT id, networktype FROM `cloud`.`data_center`");
|
||||
zoneResults = zoneSearchStmt.executeQuery();
|
||||
try (
|
||||
PreparedStatement zoneSearchStmt = conn.prepareStatement("SELECT id, networktype FROM `cloud`.`data_center`");
|
||||
ResultSet zoneResults = zoneSearchStmt.executeQuery();
|
||||
){
|
||||
while (zoneResults.next()) {
|
||||
long zoneId = zoneResults.getLong(1);
|
||||
String networkType = zoneResults.getString(2);
|
||||
@ -1438,12 +1438,13 @@ public class Upgrade410to420 implements DbUpgrade {
|
||||
}
|
||||
}
|
||||
|
||||
PreparedStatement fetchSRXNspStmt =
|
||||
boolean hasSrxNsp = false;
|
||||
try (PreparedStatement fetchSRXNspStmt =
|
||||
conn.prepareStatement("SELECT id from `cloud`.`physical_network_service_providers` where physical_network_id=" + physicalNetworkId +
|
||||
" and provider_name = 'JuniperSRX'");
|
||||
ResultSet rsSRXNSP = fetchSRXNspStmt.executeQuery();
|
||||
boolean hasSrxNsp = rsSRXNSP.next();
|
||||
fetchSRXNspStmt.close();
|
||||
ResultSet rsSRXNSP = fetchSRXNspStmt.executeQuery();) {
|
||||
hasSrxNsp = rsSRXNSP.next();
|
||||
}
|
||||
|
||||
// if there is no 'JuniperSRX' physical network service provider added into physical network then
|
||||
// add 'JuniperSRX' as network service provider and add the entry in 'external_firewall_devices'
|
||||
@ -1466,24 +1467,8 @@ public class Upgrade410to420 implements DbUpgrade {
|
||||
// not the network service provider has been provisioned in to physical network, mark all guest network
|
||||
// to be using network offering 'Isolated with external providers'
|
||||
fixZoneUsingExternalDevices(conn);
|
||||
|
||||
if (zoneResults != null) {
|
||||
try {
|
||||
zoneResults.close();
|
||||
} catch (SQLException e) {
|
||||
}
|
||||
}
|
||||
|
||||
if (zoneSearchStmt != null) {
|
||||
try {
|
||||
zoneSearchStmt.close();
|
||||
} catch (SQLException e) {
|
||||
}
|
||||
}
|
||||
} catch (SQLException e) {
|
||||
throw new CloudRuntimeException("Exception while adding PhysicalNetworks", e);
|
||||
} finally {
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
@ -1762,40 +1747,38 @@ public class Upgrade410to420 implements DbUpgrade {
|
||||
|
||||
// migrate secondary storages NFS from host tables to image_store table
|
||||
private void migrateSecondaryStorageToImageStore(Connection conn) {
|
||||
PreparedStatement storeInsert = null;
|
||||
PreparedStatement storeDetailInsert = null;
|
||||
PreparedStatement nfsQuery = null;
|
||||
PreparedStatement pstmt = null;
|
||||
ResultSet rs = null;
|
||||
ResultSet storeInfo = null;
|
||||
String sqlSelectS3Count = "select count(*) from `cloud`.`s3`";
|
||||
String sqlSelectSwiftCount = "select count(*) from `cloud`.`swift`";
|
||||
String sqlInsertStoreDetail = "INSERT INTO `cloud`.`image_store_details` (store_id, name, value) values(?, ?, ?)";
|
||||
String sqlUpdateHostAsRemoved = "UPDATE `cloud`.`host` SET removed = now() WHERE type = 'SecondaryStorage' and removed is null";
|
||||
|
||||
s_logger.debug("Migrating secondary storage to image store");
|
||||
boolean hasS3orSwift = false;
|
||||
try {
|
||||
try (
|
||||
PreparedStatement pstmtSelectS3Count = conn.prepareStatement(sqlSelectS3Count);
|
||||
PreparedStatement pstmtSelectSwiftCount = conn.prepareStatement(sqlSelectSwiftCount);
|
||||
PreparedStatement storeDetailInsert = conn.prepareStatement(sqlInsertStoreDetail);
|
||||
PreparedStatement storeInsert =
|
||||
conn.prepareStatement("INSERT INTO `cloud`.`image_store` (id, uuid, name, image_provider_name, protocol, url, data_center_id, scope, role, parent, total_size, created) values(?, ?, ?, 'NFS', 'nfs', ?, ?, 'ZONE', ?, ?, ?, ?)");
|
||||
PreparedStatement nfsQuery =
|
||||
conn.prepareStatement("select id, uuid, url, data_center_id, parent, total_size, created from `cloud`.`host` where type = 'SecondaryStorage' and removed is null");
|
||||
PreparedStatement pstmtUpdateHostAsRemoved = conn.prepareStatement(sqlUpdateHostAsRemoved);
|
||||
ResultSet rsSelectS3Count = pstmtSelectS3Count.executeQuery();
|
||||
ResultSet rsSelectSwiftCount = pstmtSelectSwiftCount.executeQuery();
|
||||
ResultSet rsNfs = nfsQuery.executeQuery();
|
||||
) {
|
||||
s_logger.debug("Checking if we need to migrate NFS secondary storage to image store or staging store");
|
||||
int numRows = 0;
|
||||
pstmt = conn.prepareStatement("select count(*) from `cloud`.`s3`");
|
||||
rs = pstmt.executeQuery();
|
||||
if (rs.next()) {
|
||||
numRows = rs.getInt(1);
|
||||
if (rsSelectS3Count.next()) {
|
||||
numRows = rsSelectS3Count.getInt(1);
|
||||
}
|
||||
rs.close();
|
||||
pstmt.close();
|
||||
if (numRows > 0) {
|
||||
hasS3orSwift = true;
|
||||
} else {
|
||||
// check if there is swift storage
|
||||
pstmt = conn.prepareStatement("select count(*) from `cloud`.`swift`");
|
||||
rs = pstmt.executeQuery();
|
||||
if (rs.next()) {
|
||||
numRows = rs.getInt(1);
|
||||
if (rsSelectSwiftCount.next()) {
|
||||
numRows += rsSelectSwiftCount.getInt(1);
|
||||
}
|
||||
rs.close();
|
||||
pstmt.close();
|
||||
if (numRows > 0) {
|
||||
hasS3orSwift = true;
|
||||
}
|
||||
}
|
||||
|
||||
String store_role = "Image";
|
||||
if (hasS3orSwift) {
|
||||
@ -1804,23 +1787,15 @@ public class Upgrade410to420 implements DbUpgrade {
|
||||
|
||||
s_logger.debug("Migrating NFS secondary storage to " + store_role + " store");
|
||||
|
||||
storeDetailInsert = conn.prepareStatement("INSERT INTO `cloud`.`image_store_details` (store_id, name, value) values(?, ?, ?)");
|
||||
|
||||
// migrate NFS secondary storage, for nfs, keep previous host_id as the store_id
|
||||
storeInsert =
|
||||
conn.prepareStatement("INSERT INTO `cloud`.`image_store` (id, uuid, name, image_provider_name, protocol, url, data_center_id, scope, role, parent, total_size, created) values(?, ?, ?, 'NFS', 'nfs', ?, ?, 'ZONE', ?, ?, ?, ?)");
|
||||
nfsQuery =
|
||||
conn.prepareStatement("select id, uuid, url, data_center_id, parent, total_size, created from `cloud`.`host` where type = 'SecondaryStorage' and removed is null");
|
||||
rs = nfsQuery.executeQuery();
|
||||
|
||||
while (rs.next()) {
|
||||
Long nfs_id = rs.getLong("id");
|
||||
String nfs_uuid = rs.getString("uuid");
|
||||
String nfs_url = rs.getString("url");
|
||||
String nfs_parent = rs.getString("parent");
|
||||
int nfs_dcid = rs.getInt("data_center_id");
|
||||
Long nfs_totalsize = rs.getObject("total_size") != null ? rs.getLong("total_size") : null;
|
||||
Date nfs_created = rs.getDate("created");
|
||||
while (rsNfs.next()) {
|
||||
Long nfs_id = rsNfs.getLong("id");
|
||||
String nfs_uuid = rsNfs.getString("uuid");
|
||||
String nfs_url = rsNfs.getString("url");
|
||||
String nfs_parent = rsNfs.getString("parent");
|
||||
int nfs_dcid = rsNfs.getInt("data_center_id");
|
||||
Long nfs_totalsize = rsNfs.getObject("total_size") != null ? rsNfs.getLong("total_size") : null;
|
||||
Date nfs_created = rsNfs.getDate("created");
|
||||
|
||||
// insert entry in image_store table and image_store_details
|
||||
// table and store host_id and store_id mapping
|
||||
@ -1841,36 +1816,11 @@ public class Upgrade410to420 implements DbUpgrade {
|
||||
}
|
||||
|
||||
s_logger.debug("Marking NFS secondary storage in host table as removed");
|
||||
pstmt = conn.prepareStatement("UPDATE `cloud`.`host` SET removed = now() WHERE type = 'SecondaryStorage' and removed is null");
|
||||
pstmt.executeUpdate();
|
||||
pstmt.close();
|
||||
pstmtUpdateHostAsRemoved.executeUpdate();
|
||||
} catch (SQLException e) {
|
||||
String msg = "Unable to migrate secondary storages." + e.getMessage();
|
||||
s_logger.error(msg);
|
||||
throw new CloudRuntimeException(msg, e);
|
||||
} finally {
|
||||
try {
|
||||
if (rs != null) {
|
||||
rs.close();
|
||||
}
|
||||
if (storeInfo != null) {
|
||||
storeInfo.close();
|
||||
}
|
||||
|
||||
if (storeInsert != null) {
|
||||
storeInsert.close();
|
||||
}
|
||||
if (storeDetailInsert != null) {
|
||||
storeDetailInsert.close();
|
||||
}
|
||||
if (nfsQuery != null) {
|
||||
nfsQuery.close();
|
||||
}
|
||||
if (pstmt != null) {
|
||||
pstmt.close();
|
||||
}
|
||||
} catch (SQLException e) {
|
||||
}
|
||||
}
|
||||
s_logger.debug("Completed migrating secondary storage to image store");
|
||||
}
|
||||
@ -1947,26 +1897,21 @@ public class Upgrade410to420 implements DbUpgrade {
|
||||
|
||||
// migrate secondary storages S3 from s3 tables to image_store table
|
||||
private void migrateS3ToImageStore(Connection conn) {
|
||||
PreparedStatement storeInsert = null;
|
||||
PreparedStatement storeDetailInsert = null;
|
||||
PreparedStatement storeQuery = null;
|
||||
PreparedStatement s3Query = null;
|
||||
ResultSet rs = null;
|
||||
ResultSet storeInfo = null;
|
||||
Long storeId = null;
|
||||
Map<Long, Long> s3_store_id_map = new HashMap<Long, Long>();
|
||||
|
||||
s_logger.debug("Migrating S3 to image store");
|
||||
try {
|
||||
storeQuery = conn.prepareStatement("select id from `cloud`.`image_store` where uuid = ?");
|
||||
storeDetailInsert = conn.prepareStatement("INSERT INTO `cloud`.`image_store_details` (store_id, name, value) values(?, ?, ?)");
|
||||
try (
|
||||
PreparedStatement storeQuery = conn.prepareStatement("select id from `cloud`.`image_store` where uuid = ?");
|
||||
PreparedStatement storeDetailInsert = conn.prepareStatement("INSERT INTO `cloud`.`image_store_details` (store_id, name, value) values(?, ?, ?)");
|
||||
|
||||
// migrate S3 to image_store
|
||||
storeInsert = conn.prepareStatement("INSERT INTO `cloud`.`image_store` (uuid, name, image_provider_name, protocol, scope, role, created) " +
|
||||
PreparedStatement storeInsert = conn.prepareStatement("INSERT INTO `cloud`.`image_store` (uuid, name, image_provider_name, protocol, scope, role, created) " +
|
||||
"values(?, ?, 'S3', ?, 'REGION', 'Image', ?)");
|
||||
s3Query = conn.prepareStatement("select id, uuid, access_key, secret_key, end_point, bucket, https, connection_timeout, " +
|
||||
PreparedStatement s3Query = conn.prepareStatement("select id, uuid, access_key, secret_key, end_point, bucket, https, connection_timeout, " +
|
||||
"max_error_retry, socket_timeout, created from `cloud`.`s3`");
|
||||
rs = s3Query.executeQuery();
|
||||
ResultSet rs = s3Query.executeQuery();
|
||||
) {
|
||||
|
||||
while (rs.next()) {
|
||||
Long s3_id = rs.getLong("id");
|
||||
@ -1991,10 +1936,11 @@ public class Upgrade410to420 implements DbUpgrade {
|
||||
storeInsert.executeUpdate();
|
||||
|
||||
storeQuery.setString(1, s3_uuid);
|
||||
storeInfo = storeQuery.executeQuery();
|
||||
try (ResultSet storeInfo = storeQuery.executeQuery();) {
|
||||
if (storeInfo.next()) {
|
||||
storeId = storeInfo.getLong("id");
|
||||
}
|
||||
}
|
||||
|
||||
Map<String, String> detailMap = new HashMap<String, String>();
|
||||
detailMap.put(ApiConstants.S3_ACCESS_KEY, s3_accesskey);
|
||||
@ -2027,29 +1973,6 @@ public class Upgrade410to420 implements DbUpgrade {
|
||||
String msg = "Unable to migrate S3 secondary storages." + e.getMessage();
|
||||
s_logger.error(msg);
|
||||
throw new CloudRuntimeException(msg, e);
|
||||
} finally {
|
||||
try {
|
||||
if (rs != null) {
|
||||
rs.close();
|
||||
}
|
||||
if (storeInfo != null) {
|
||||
storeInfo.close();
|
||||
}
|
||||
|
||||
if (storeInsert != null) {
|
||||
storeInsert.close();
|
||||
}
|
||||
if (storeDetailInsert != null) {
|
||||
storeDetailInsert.close();
|
||||
}
|
||||
if (storeQuery != null) {
|
||||
storeQuery.close();
|
||||
}
|
||||
if (s3Query != null) {
|
||||
s3Query.close();
|
||||
}
|
||||
} catch (SQLException e) {
|
||||
}
|
||||
}
|
||||
|
||||
s_logger.debug("Migrating template_s3_ref to template_store_ref");
|
||||
@ -2162,26 +2085,20 @@ public class Upgrade410to420 implements DbUpgrade {
|
||||
|
||||
// migrate secondary storages Swift from swift tables to image_store table
|
||||
private void migrateSwiftToImageStore(Connection conn) {
|
||||
PreparedStatement storeInsert = null;
|
||||
PreparedStatement storeDetailInsert = null;
|
||||
PreparedStatement storeQuery = null;
|
||||
PreparedStatement swiftQuery = null;
|
||||
ResultSet rs = null;
|
||||
ResultSet storeInfo = null;
|
||||
Long storeId = null;
|
||||
Map<Long, Long> swift_store_id_map = new HashMap<Long, Long>();
|
||||
|
||||
s_logger.debug("Migrating Swift to image store");
|
||||
try {
|
||||
storeQuery = conn.prepareStatement("select id from `cloud`.`image_store` where uuid = ?");
|
||||
storeDetailInsert = conn.prepareStatement("INSERT INTO `cloud`.`image_store_details` (store_id, name, value) values(?, ?, ?)");
|
||||
try (
|
||||
PreparedStatement storeQuery = conn.prepareStatement("select id from `cloud`.`image_store` where uuid = ?");
|
||||
PreparedStatement storeDetailInsert = conn.prepareStatement("INSERT INTO `cloud`.`image_store_details` (store_id, name, value) values(?, ?, ?)");
|
||||
|
||||
// migrate SWIFT secondary storage
|
||||
storeInsert =
|
||||
PreparedStatement storeInsert =
|
||||
conn.prepareStatement("INSERT INTO `cloud`.`image_store` (uuid, name, image_provider_name, protocol, url, scope, role, created) values(?, ?, 'Swift', 'http', ?, 'REGION', 'Image', ?)");
|
||||
swiftQuery = conn.prepareStatement("select id, uuid, url, account, username, swift.key, created from `cloud`.`swift`");
|
||||
rs = swiftQuery.executeQuery();
|
||||
|
||||
PreparedStatement swiftQuery = conn.prepareStatement("select id, uuid, url, account, username, swift.key, created from `cloud`.`swift`");
|
||||
ResultSet rs = swiftQuery.executeQuery();
|
||||
) {
|
||||
while (rs.next()) {
|
||||
Long swift_id = rs.getLong("id");
|
||||
String swift_uuid = rs.getString("uuid");
|
||||
@ -2200,10 +2117,11 @@ public class Upgrade410to420 implements DbUpgrade {
|
||||
storeInsert.executeUpdate();
|
||||
|
||||
storeQuery.setString(1, swift_uuid);
|
||||
storeInfo = storeQuery.executeQuery();
|
||||
try (ResultSet storeInfo = storeQuery.executeQuery();) {
|
||||
if (storeInfo.next()) {
|
||||
storeId = storeInfo.getLong("id");
|
||||
}
|
||||
}
|
||||
|
||||
Map<String, String> detailMap = new HashMap<String, String>();
|
||||
detailMap.put(ApiConstants.ACCOUNT, swift_account);
|
||||
@ -2225,29 +2143,6 @@ public class Upgrade410to420 implements DbUpgrade {
|
||||
String msg = "Unable to migrate swift secondary storages." + e.getMessage();
|
||||
s_logger.error(msg);
|
||||
throw new CloudRuntimeException(msg, e);
|
||||
} finally {
|
||||
try {
|
||||
if (rs != null) {
|
||||
rs.close();
|
||||
}
|
||||
if (storeInfo != null) {
|
||||
storeInfo.close();
|
||||
}
|
||||
|
||||
if (storeInsert != null) {
|
||||
storeInsert.close();
|
||||
}
|
||||
if (storeDetailInsert != null) {
|
||||
storeDetailInsert.close();
|
||||
}
|
||||
if (storeQuery != null) {
|
||||
storeQuery.close();
|
||||
}
|
||||
if (swiftQuery != null) {
|
||||
swiftQuery.close();
|
||||
}
|
||||
} catch (SQLException e) {
|
||||
}
|
||||
}
|
||||
|
||||
s_logger.debug("Migrating template_swift_ref to template_store_ref");
|
||||
@ -2261,16 +2156,13 @@ public class Upgrade410to420 implements DbUpgrade {
|
||||
|
||||
// migrate template_s3_ref to template_store_ref
|
||||
private void migrateTemplateSwiftRef(Connection conn, Map<Long, Long> swiftStoreMap) {
|
||||
PreparedStatement tmplStoreInsert = null;
|
||||
PreparedStatement s3Query = null;
|
||||
ResultSet rs = null;
|
||||
s_logger.debug("Updating template_store_ref table from template_swift_ref table");
|
||||
try {
|
||||
tmplStoreInsert =
|
||||
try (
|
||||
PreparedStatement tmplStoreInsert =
|
||||
conn.prepareStatement("INSERT INTO `cloud`.`template_store_ref` (store_id, template_id, created, download_pct, size, physical_size, download_state, local_path, install_path, update_count, ref_cnt, store_role, state) values(?, ?, ?, 100, ?, ?, 'DOWNLOADED', '?', '?', 0, 0, 'Image', 'Ready')");
|
||||
s3Query = conn.prepareStatement("select swift_id, template_id, created, path, size, physical_size from `cloud`.`template_swift_ref`");
|
||||
rs = s3Query.executeQuery();
|
||||
|
||||
PreparedStatement s3Query = conn.prepareStatement("select swift_id, template_id, created, path, size, physical_size from `cloud`.`template_swift_ref`");
|
||||
ResultSet rs = s3Query.executeQuery();
|
||||
) {
|
||||
while (rs.next()) {
|
||||
Long swift_id = rs.getLong("swift_id");
|
||||
Long tmpl_id = rs.getLong("template_id");
|
||||
@ -2300,19 +2192,6 @@ public class Upgrade410to420 implements DbUpgrade {
|
||||
String msg = "Unable to migrate template_swift_ref." + e.getMessage();
|
||||
s_logger.error(msg);
|
||||
throw new CloudRuntimeException(msg, e);
|
||||
} finally {
|
||||
try {
|
||||
if (rs != null) {
|
||||
rs.close();
|
||||
}
|
||||
if (tmplStoreInsert != null) {
|
||||
tmplStoreInsert.close();
|
||||
}
|
||||
if (s3Query != null) {
|
||||
s3Query.close();
|
||||
}
|
||||
} catch (SQLException e) {
|
||||
}
|
||||
}
|
||||
s_logger.debug("Completed migrating template_swift_ref table.");
|
||||
}
|
||||
@ -2575,10 +2454,10 @@ public class Upgrade410to420 implements DbUpgrade {
|
||||
|
||||
private void upgradeResourceCount(Connection conn) {
|
||||
s_logger.debug("upgradeResourceCount start");
|
||||
ResultSet rsAccount = null;
|
||||
try( PreparedStatement sel_dom_pstmt = conn.prepareStatement("select id, domain_id FROM `cloud`.`account` where removed is NULL ");)
|
||||
{
|
||||
rsAccount = sel_dom_pstmt.executeQuery();
|
||||
try(
|
||||
PreparedStatement sel_dom_pstmt = conn.prepareStatement("select id, domain_id FROM `cloud`.`account` where removed is NULL ");
|
||||
ResultSet rsAccount = sel_dom_pstmt.executeQuery();
|
||||
) {
|
||||
while (rsAccount.next()) {
|
||||
long account_id = rsAccount.getLong(1);
|
||||
long domain_id = rsAccount.getLong(2);
|
||||
@ -2706,13 +2585,6 @@ public class Upgrade410to420 implements DbUpgrade {
|
||||
s_logger.debug("upgradeResourceCount finish");
|
||||
} catch (SQLException e) {
|
||||
throw new CloudRuntimeException("Unable to upgrade resource count (cpu,memory,primary_storage,secondary_storage) ", e);
|
||||
} finally {
|
||||
try {
|
||||
if (rsAccount != null) {
|
||||
rsAccount.close();
|
||||
}
|
||||
} catch (SQLException e) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -131,80 +131,84 @@ public class Upgrade420to421 implements DbUpgrade {
|
||||
|
||||
private void upgradeResourceCount(Connection conn) {
|
||||
s_logger.debug("upgradeResourceCount start");
|
||||
PreparedStatement pstmt1 = null;
|
||||
PreparedStatement pstmt2 = null;
|
||||
PreparedStatement pstmt3 = null;
|
||||
PreparedStatement pstmt4 = null;
|
||||
PreparedStatement pstmt5 = null;
|
||||
ResultSet rsAccount = null;
|
||||
ResultSet rsCount = null;
|
||||
try {
|
||||
pstmt1 = conn.prepareStatement("select id, domain_id FROM `cloud`.`account` where removed is NULL ");
|
||||
rsAccount = pstmt1.executeQuery();
|
||||
String sqlSelectAccountIds = "select id, domain_id FROM `cloud`.`account` where removed is NULL ";
|
||||
String sqlSelectOfferingTotals = "SELECT SUM(service_offering.cpu), SUM(service_offering.ram_size)"
|
||||
+ " FROM `cloud`.`vm_instance`, `cloud`.`service_offering`"
|
||||
+ " WHERE vm_instance.service_offering_id = service_offering.id AND vm_instance.account_id = ?"
|
||||
+ " AND vm_instance.removed is NULL"
|
||||
+ " AND vm_instance.vm_type='User' AND state not in ('Destroyed', 'Error', 'Expunging')";
|
||||
String sqlSelectTotalVolumeSize =
|
||||
"SELECT sum(size) FROM `cloud`.`volumes` WHERE account_id= ?"
|
||||
+ " AND (path is not NULL OR state in ('Allocated')) AND removed is NULL"
|
||||
+ " AND instance_id IN (SELECT id FROM `cloud`.`vm_instance` WHERE vm_type='User')";
|
||||
String sqlSelectTotalPathlessVolumeSize =
|
||||
"SELECT sum(size) FROM `cloud`.`volumes` WHERE account_id= ?"
|
||||
+ " AND path is NULL AND state not in ('Allocated') AND removed is NULL";
|
||||
String sqlSelectTotalSnapshotSize = "SELECT sum(size) FROM `cloud`.`snapshots` WHERE account_id= ? AND removed is NULL";
|
||||
String sqlSelectTotalTemplateStoreSize = "SELECT sum(template_store_ref.size) FROM `cloud`.`template_store_ref`,`cloud`.`vm_template` WHERE account_id = ?"
|
||||
+ " AND template_store_ref.template_id = vm_template.id AND download_state = 'DOWNLOADED' AND destroyed = false AND removed is NULL";
|
||||
String sqlSelectDomainIds = "select id FROM `cloud`.`domain`";
|
||||
String sqlSelectAccountCount = "select account.domain_id,sum(resource_count.count) from `cloud`.`account` left join `cloud`.`resource_count` on account.id=resource_count.account_id "
|
||||
+ "where resource_count.type=? group by account.domain_id;";
|
||||
|
||||
try (
|
||||
PreparedStatement pstmtSelectAccountIds = conn.prepareStatement(sqlSelectAccountIds);
|
||||
PreparedStatement pstmtSelectOfferingTotals = conn.prepareStatement(sqlSelectOfferingTotals);
|
||||
PreparedStatement pstmtSelectTotalVolumeSize = conn.prepareStatement(sqlSelectTotalVolumeSize);
|
||||
PreparedStatement pstmtSelectTotalPathlessVolumeSize = conn.prepareStatement(sqlSelectTotalPathlessVolumeSize);
|
||||
PreparedStatement pstmtSelectTotalSnapshotSize = conn.prepareStatement(sqlSelectTotalSnapshotSize);
|
||||
PreparedStatement pstmtSelectTotalTemplateStoreSize = conn.prepareStatement(sqlSelectTotalTemplateStoreSize);
|
||||
PreparedStatement pstmtSelectDomainIds = conn.prepareStatement(sqlSelectDomainIds);
|
||||
PreparedStatement pstmtSelectAccountCount = conn.prepareStatement(sqlSelectAccountCount);
|
||||
ResultSet rsAccount = pstmtSelectAccountIds.executeQuery();
|
||||
) {
|
||||
while (rsAccount.next()) {
|
||||
long account_id = rsAccount.getLong(1);
|
||||
long domain_id = rsAccount.getLong(2);
|
||||
// 1. update cpu,memory for all accounts
|
||||
pstmt2 =
|
||||
conn.prepareStatement("SELECT SUM(service_offering.cpu), SUM(service_offering.ram_size)" + " FROM `cloud`.`vm_instance`, `cloud`.`service_offering`"
|
||||
+ " WHERE vm_instance.service_offering_id = service_offering.id AND vm_instance.account_id = ?" + " AND vm_instance.removed is NULL"
|
||||
+ " AND vm_instance.vm_type='User' AND state not in ('Destroyed', 'Error', 'Expunging')");
|
||||
pstmt2.setLong(1, account_id);
|
||||
rsCount = pstmt2.executeQuery();
|
||||
if (rsCount.next()) {
|
||||
upgradeResourceCountforAccount(conn, account_id, domain_id, "cpu", rsCount.getLong(1));
|
||||
upgradeResourceCountforAccount(conn, account_id, domain_id, "memory", rsCount.getLong(2));
|
||||
pstmtSelectOfferingTotals.setLong(1, account_id);
|
||||
try (ResultSet rsOfferingTotals = pstmtSelectOfferingTotals.executeQuery();) {
|
||||
if (rsOfferingTotals.next()) {
|
||||
upgradeResourceCountforAccount(conn, account_id, domain_id, "cpu", rsOfferingTotals.getLong(1));
|
||||
upgradeResourceCountforAccount(conn, account_id, domain_id, "memory", rsOfferingTotals.getLong(2));
|
||||
} else {
|
||||
upgradeResourceCountforAccount(conn, account_id, domain_id, "cpu", 0L);
|
||||
upgradeResourceCountforAccount(conn, account_id, domain_id, "memory", 0L);
|
||||
}
|
||||
rsCount.close();
|
||||
}
|
||||
|
||||
// 2. update primary_storage for all accounts
|
||||
pstmt3 =
|
||||
conn.prepareStatement("SELECT sum(size) FROM `cloud`.`volumes` WHERE account_id= ?"
|
||||
+ " AND (path is not NULL OR state in ('Allocated')) AND removed is NULL"
|
||||
+ " AND instance_id IN (SELECT id FROM `cloud`.`vm_instance` WHERE vm_type='User')");
|
||||
pstmt3.setLong(1, account_id);
|
||||
rsCount = pstmt3.executeQuery();
|
||||
if (rsCount.next()) {
|
||||
upgradeResourceCountforAccount(conn, account_id, domain_id, "primary_storage", rsCount.getLong(1));
|
||||
pstmtSelectTotalVolumeSize.setLong(1, account_id);
|
||||
try (ResultSet rsTotalVolumeSize = pstmtSelectTotalVolumeSize.executeQuery();) {
|
||||
if (rsTotalVolumeSize.next()) {
|
||||
upgradeResourceCountforAccount(conn, account_id, domain_id, "primary_storage", rsTotalVolumeSize.getLong(1));
|
||||
} else {
|
||||
upgradeResourceCountforAccount(conn, account_id, domain_id, "primary_storage", 0L);
|
||||
}
|
||||
rsCount.close();
|
||||
}
|
||||
|
||||
// 3. update secondary_storage for all accounts
|
||||
long totalVolumesSize = 0;
|
||||
long totalSnapshotsSize = 0;
|
||||
long totalTemplatesSize = 0;
|
||||
pstmt4 =
|
||||
conn.prepareStatement("SELECT sum(size) FROM `cloud`.`volumes` WHERE account_id= ?"
|
||||
+ " AND path is NULL AND state not in ('Allocated') AND removed is NULL");
|
||||
pstmt4.setLong(1, account_id);
|
||||
rsCount = pstmt4.executeQuery();
|
||||
if (rsCount.next()) {
|
||||
totalVolumesSize = rsCount.getLong(1);
|
||||
pstmtSelectTotalPathlessVolumeSize.setLong(1, account_id);
|
||||
try (ResultSet rsTotalPathlessVolumeSize = pstmtSelectTotalPathlessVolumeSize.executeQuery();) {
|
||||
if (rsTotalPathlessVolumeSize.next()) {
|
||||
totalVolumesSize = rsTotalPathlessVolumeSize.getLong(1);
|
||||
}
|
||||
rsCount.close();
|
||||
pstmt4.close();
|
||||
|
||||
pstmt4 = conn.prepareStatement("SELECT sum(size) FROM `cloud`.`snapshots` WHERE account_id= ? AND removed is NULL");
|
||||
pstmt4.setLong(1, account_id);
|
||||
rsCount = pstmt4.executeQuery();
|
||||
if (rsCount.next()) {
|
||||
totalSnapshotsSize = rsCount.getLong(1);
|
||||
}
|
||||
rsCount.close();
|
||||
pstmt4.close();
|
||||
|
||||
pstmt4 =
|
||||
conn.prepareStatement("SELECT sum(template_store_ref.size) FROM `cloud`.`template_store_ref`,`cloud`.`vm_template` WHERE account_id = ?"
|
||||
+ " AND template_store_ref.template_id = vm_template.id AND download_state = 'DOWNLOADED' AND destroyed = false AND removed is NULL");
|
||||
pstmt4.setLong(1, account_id);
|
||||
rsCount = pstmt4.executeQuery();
|
||||
if (rsCount.next()) {
|
||||
totalTemplatesSize = rsCount.getLong(1);
|
||||
pstmtSelectTotalSnapshotSize.setLong(1, account_id);
|
||||
try (ResultSet rsTotalSnapshotSize = pstmtSelectTotalSnapshotSize.executeQuery();) {
|
||||
if (rsTotalSnapshotSize.next()) {
|
||||
totalSnapshotsSize = rsTotalSnapshotSize.getLong(1);
|
||||
}
|
||||
}
|
||||
pstmtSelectTotalTemplateStoreSize.setLong(1, account_id);
|
||||
try (ResultSet rsTotalTemplateStoreSize = pstmtSelectTotalTemplateStoreSize.executeQuery();) {
|
||||
if (rsTotalTemplateStoreSize.next()) {
|
||||
totalTemplatesSize = rsTotalTemplateStoreSize.getLong(1);
|
||||
}
|
||||
}
|
||||
upgradeResourceCountforAccount(conn, account_id, domain_id, "secondary_storage", totalVolumesSize + totalSnapshotsSize + totalTemplatesSize);
|
||||
}
|
||||
@ -212,56 +216,29 @@ public class Upgrade420to421 implements DbUpgrade {
|
||||
|
||||
// 4. upgrade cpu,memory,primary_storage,secondary_storage for domains
|
||||
String resource_types[] = {"cpu", "memory", "primary_storage", "secondary_storage"};
|
||||
pstmt5 = conn.prepareStatement("select id FROM `cloud`.`domain`");
|
||||
rsAccount = pstmt5.executeQuery();
|
||||
while (rsAccount.next()) {
|
||||
long domain_id = rsAccount.getLong(1);
|
||||
try (ResultSet rsDomainIds = pstmtSelectDomainIds.executeQuery();) {
|
||||
while (rsDomainIds.next()) {
|
||||
long domain_id = rsDomainIds.getLong(1);
|
||||
for (int count = 0; count < resource_types.length; count++) {
|
||||
String resource_type = resource_types[count];
|
||||
upgradeResourceCountforDomain(conn, domain_id, resource_type, 0L); // reset value to 0 before statistics
|
||||
}
|
||||
}
|
||||
}
|
||||
for (int count = 0; count < resource_types.length; count++) {
|
||||
String resource_type = resource_types[count];
|
||||
pstmt5 =
|
||||
conn.prepareStatement("select account.domain_id,sum(resource_count.count) from `cloud`.`account` left join `cloud`.`resource_count` on account.id=resource_count.account_id "
|
||||
+ "where resource_count.type=? group by account.domain_id;");
|
||||
pstmt5.setString(1, resource_type);
|
||||
rsCount = pstmt5.executeQuery();
|
||||
while (rsCount.next()) {
|
||||
long domain_id = rsCount.getLong(1);
|
||||
long resource_count = rsCount.getLong(2);
|
||||
pstmtSelectAccountCount.setString(1, resource_type);
|
||||
try (ResultSet rsAccountCount = pstmtSelectAccountCount.executeQuery();) {
|
||||
while (rsAccountCount.next()) {
|
||||
long domain_id = rsAccountCount.getLong(1);
|
||||
long resource_count = rsAccountCount.getLong(2);
|
||||
upgradeResourceCountforDomain(conn, domain_id, resource_type, resource_count);
|
||||
}
|
||||
}
|
||||
}
|
||||
s_logger.debug("upgradeResourceCount finish");
|
||||
} catch (SQLException e) {
|
||||
throw new CloudRuntimeException("Unable to upgrade resource count (cpu,memory,primary_storage,secondary_storage) ", e);
|
||||
} finally {
|
||||
try {
|
||||
if (rsAccount != null) {
|
||||
rsAccount.close();
|
||||
}
|
||||
if (rsCount != null) {
|
||||
rsCount.close();
|
||||
}
|
||||
if (pstmt1 != null) {
|
||||
pstmt1.close();
|
||||
}
|
||||
if (pstmt2 != null) {
|
||||
pstmt2.close();
|
||||
}
|
||||
if (pstmt3 != null) {
|
||||
pstmt3.close();
|
||||
}
|
||||
if (pstmt4 != null) {
|
||||
pstmt4.close();
|
||||
}
|
||||
if (pstmt5 != null) {
|
||||
pstmt5.close();
|
||||
}
|
||||
} catch (SQLException e) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -32,6 +32,7 @@ import java.util.Set;
|
||||
|
||||
import com.cloud.hypervisor.Hypervisor;
|
||||
import com.cloud.utils.crypt.DBEncryptionUtil;
|
||||
|
||||
import org.apache.log4j.Logger;
|
||||
|
||||
import com.cloud.utils.exception.CloudRuntimeException;
|
||||
@ -76,31 +77,20 @@ public class Upgrade442to450 implements DbUpgrade {
|
||||
}
|
||||
|
||||
private void updateMaxRouterSizeConfig(Connection conn) {
|
||||
PreparedStatement updatePstmt = null;
|
||||
try {
|
||||
String sqlUpdateConfig = "UPDATE `cloud`.`configuration` SET value=? WHERE name='router.ram.size' AND category='Hidden'";
|
||||
try (PreparedStatement updatePstmt = conn.prepareStatement(sqlUpdateConfig);){
|
||||
String encryptedValue = DBEncryptionUtil.encrypt("256");
|
||||
updatePstmt = conn.prepareStatement("UPDATE `cloud`.`configuration` SET value=? WHERE name='router.ram.size' AND category='Hidden'");
|
||||
updatePstmt.setBytes(1, encryptedValue.getBytes("UTF-8"));
|
||||
updatePstmt.executeUpdate();
|
||||
} catch (SQLException e) {
|
||||
throw new CloudRuntimeException("Unable to upgrade max ram size of router in config.", e);
|
||||
} catch (UnsupportedEncodingException e) {
|
||||
throw new CloudRuntimeException("Unable encrypt configuration values ", e);
|
||||
} finally {
|
||||
try {
|
||||
if (updatePstmt != null) {
|
||||
updatePstmt.close();
|
||||
}
|
||||
} catch (SQLException e) {
|
||||
}
|
||||
}
|
||||
s_logger.debug("Done updating router.ram.size config to 256");
|
||||
}
|
||||
|
||||
private void upgradeMemoryOfVirtualRoutervmOffering(Connection conn) {
|
||||
PreparedStatement updatePstmt = null;
|
||||
PreparedStatement selectPstmt = null;
|
||||
ResultSet selectResultSet = null;
|
||||
int newRamSize = 256; //256MB
|
||||
long serviceOfferingId = 0;
|
||||
|
||||
@ -109,10 +99,11 @@ public class Upgrade442to450 implements DbUpgrade {
|
||||
* We should not update/modify any user-defined offering.
|
||||
*/
|
||||
|
||||
try {
|
||||
selectPstmt = conn.prepareStatement("SELECT id FROM `cloud`.`service_offering` WHERE vm_type='domainrouter'");
|
||||
updatePstmt = conn.prepareStatement("UPDATE `cloud`.`service_offering` SET ram_size=? WHERE id=?");
|
||||
selectResultSet = selectPstmt.executeQuery();
|
||||
try (
|
||||
PreparedStatement selectPstmt = conn.prepareStatement("SELECT id FROM `cloud`.`service_offering` WHERE vm_type='domainrouter'");
|
||||
PreparedStatement updatePstmt = conn.prepareStatement("UPDATE `cloud`.`service_offering` SET ram_size=? WHERE id=?");
|
||||
ResultSet selectResultSet = selectPstmt.executeQuery();
|
||||
) {
|
||||
if(selectResultSet.next()) {
|
||||
serviceOfferingId = selectResultSet.getLong("id");
|
||||
}
|
||||
@ -122,19 +113,6 @@ public class Upgrade442to450 implements DbUpgrade {
|
||||
updatePstmt.executeUpdate();
|
||||
} catch (SQLException e) {
|
||||
throw new CloudRuntimeException("Unable to upgrade ram_size of service offering for domain router. ", e);
|
||||
} finally {
|
||||
try {
|
||||
if (selectPstmt != null) {
|
||||
selectPstmt.close();
|
||||
}
|
||||
if (selectResultSet != null) {
|
||||
selectResultSet.close();
|
||||
}
|
||||
if (updatePstmt != null) {
|
||||
updatePstmt.close();
|
||||
}
|
||||
} catch (SQLException e) {
|
||||
}
|
||||
}
|
||||
s_logger.debug("Done upgrading RAM for service offering of domain router to " + newRamSize);
|
||||
}
|
||||
|
||||
@ -134,15 +134,4 @@ public class ClusterServiceServletImpl implements ClusterService {
|
||||
return s_client;
|
||||
}
|
||||
|
||||
// for test purpose only
|
||||
public static void main(final String[] args) {
|
||||
/*
|
||||
ClusterServiceServletImpl service = new ClusterServiceServletImpl("http://localhost:9090/clusterservice", 300);
|
||||
try {
|
||||
String result = service.execute("test", 1, "{ p1:v1, p2:v2 }", true);
|
||||
System.out.println(result);
|
||||
} catch (RemoteException e) {
|
||||
}
|
||||
*/
|
||||
}
|
||||
}
|
||||
|
||||
@ -142,26 +142,17 @@ public class ConnectionConcierge {
|
||||
}
|
||||
|
||||
protected String testValidity(String name, Connection conn) {
|
||||
PreparedStatement pstmt = null;
|
||||
try {
|
||||
if (conn != null) {
|
||||
synchronized (conn) {
|
||||
pstmt = conn.prepareStatement("SELECT 1");
|
||||
try (PreparedStatement pstmt = conn.prepareStatement("SELECT 1");) {
|
||||
pstmt.executeQuery();
|
||||
}
|
||||
}
|
||||
return null;
|
||||
} catch (Throwable th) {
|
||||
s_logger.error("Unable to keep the db connection for " + name, th);
|
||||
return th.toString();
|
||||
} finally {
|
||||
if (pstmt != null) {
|
||||
try {
|
||||
pstmt.close();
|
||||
} catch (SQLException e) {
|
||||
}
|
||||
}
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
||||
@ -43,6 +43,8 @@ import javax.persistence.Transient;
|
||||
|
||||
import org.apache.log4j.Logger;
|
||||
|
||||
import static com.cloud.utils.AutoCloseableUtil.closeAutoCloseable;
|
||||
|
||||
public class DbUtil {
|
||||
protected final static Logger s_logger = Logger.getLogger(DbUtil.class);
|
||||
|
||||
@ -280,16 +282,4 @@ public class DbUtil {
|
||||
closeAutoCloseable(connection, "exception while close connection.");
|
||||
}
|
||||
|
||||
public static void closeAutoCloseable(AutoCloseable ac, String message) {
|
||||
try {
|
||||
|
||||
if (ac != null) {
|
||||
ac.close();
|
||||
}
|
||||
|
||||
} catch (Exception e) {
|
||||
s_logger.warn("[ignored] " + message, e);
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@ -38,32 +38,17 @@ public class TransactionTest {
|
||||
|
||||
@BeforeClass
|
||||
public static void oneTimeSetup() {
|
||||
Connection conn = null;
|
||||
PreparedStatement pstmt = null;
|
||||
try {
|
||||
conn = TransactionLegacy.getStandaloneConnection();
|
||||
|
||||
pstmt =
|
||||
try (
|
||||
Connection conn = TransactionLegacy.getStandaloneConnection();
|
||||
PreparedStatement pstmt =
|
||||
conn.prepareStatement("CREATE TABLE `cloud`.`test` (" + "`id` bigint unsigned NOT NULL UNIQUE AUTO_INCREMENT," + "`fld_int` int unsigned,"
|
||||
+ "`fld_long` bigint unsigned," + "`fld_string` varchar(255)," + "PRIMARY KEY (`id`)" + ") ENGINE=InnoDB DEFAULT CHARSET=utf8;");
|
||||
) {
|
||||
|
||||
pstmt.execute();
|
||||
|
||||
} catch (SQLException e) {
|
||||
throw new CloudRuntimeException("Problem with sql", e);
|
||||
} finally {
|
||||
if (pstmt != null) {
|
||||
try {
|
||||
pstmt.close();
|
||||
} catch (SQLException e) {
|
||||
}
|
||||
}
|
||||
if (conn != null) {
|
||||
try {
|
||||
conn.close();
|
||||
} catch (SQLException e) {
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -157,57 +142,25 @@ public class TransactionTest {
|
||||
* Delete all records after each test, but table is still kept
|
||||
*/
|
||||
public void tearDown() {
|
||||
Connection conn = null;
|
||||
PreparedStatement pstmt = null;
|
||||
try {
|
||||
conn = TransactionLegacy.getStandaloneConnection();
|
||||
|
||||
pstmt = conn.prepareStatement("truncate table `cloud`.`test`");
|
||||
try (
|
||||
Connection conn = TransactionLegacy.getStandaloneConnection();
|
||||
PreparedStatement pstmt = conn.prepareStatement("truncate table `cloud`.`test`");
|
||||
) {
|
||||
pstmt.execute();
|
||||
|
||||
} catch (SQLException e) {
|
||||
throw new CloudRuntimeException("Problem with sql", e);
|
||||
} finally {
|
||||
if (pstmt != null) {
|
||||
try {
|
||||
pstmt.close();
|
||||
} catch (SQLException e) {
|
||||
}
|
||||
}
|
||||
if (conn != null) {
|
||||
try {
|
||||
conn.close();
|
||||
} catch (SQLException e) {
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@AfterClass
|
||||
public static void oneTimeTearDown() {
|
||||
Connection conn = null;
|
||||
PreparedStatement pstmt = null;
|
||||
try {
|
||||
conn = TransactionLegacy.getStandaloneConnection();
|
||||
|
||||
pstmt = conn.prepareStatement("DROP TABLE IF EXISTS `cloud`.`test`");
|
||||
try (
|
||||
Connection conn = TransactionLegacy.getStandaloneConnection();
|
||||
PreparedStatement pstmt = conn.prepareStatement("DROP TABLE IF EXISTS `cloud`.`test`");
|
||||
) {
|
||||
pstmt.execute();
|
||||
|
||||
} catch (SQLException e) {
|
||||
throw new CloudRuntimeException("Problem with sql", e);
|
||||
} finally {
|
||||
if (pstmt != null) {
|
||||
try {
|
||||
pstmt.close();
|
||||
} catch (SQLException e) {
|
||||
}
|
||||
}
|
||||
if (conn != null) {
|
||||
try {
|
||||
conn.close();
|
||||
} catch (SQLException e) {
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -125,6 +125,7 @@ public class OnwireClassRegistry {
|
||||
} catch (IOException e) {
|
||||
s_logger.debug("Encountered IOException", e);
|
||||
} catch (ClassNotFoundException e) {
|
||||
s_logger.info("[ignored] class not found", e);
|
||||
}
|
||||
return classes;
|
||||
}
|
||||
@ -139,6 +140,7 @@ public class OnwireClassRegistry {
|
||||
Class<?> clazz = Class.forName(name);
|
||||
classes.add(clazz);
|
||||
} catch (ClassNotFoundException e) {
|
||||
s_logger.info("[ignored] class not found in directory " + directory, e);
|
||||
} catch (Exception e) {
|
||||
s_logger.debug("Encountered unexpect exception! ", e);
|
||||
}
|
||||
|
||||
@ -1,58 +0,0 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.apache.cloudstack.framework.codestyle;
|
||||
|
||||
import org.apache.cloudstack.framework.rpc.RpcCallbackDispatcher;
|
||||
import org.apache.cloudstack.framework.rpc.RpcClientCall;
|
||||
import org.apache.cloudstack.framework.rpc.RpcException;
|
||||
import org.apache.cloudstack.framework.rpc.RpcIOException;
|
||||
import org.apache.cloudstack.framework.rpc.RpcProvider;
|
||||
import org.apache.cloudstack.framework.rpc.RpcTimeoutException;
|
||||
|
||||
public class ClientOnlyEventDrivenStyle {
|
||||
RpcProvider _rpcProvider;
|
||||
|
||||
public void AsyncCallRpcService() {
|
||||
String cmd = new String();
|
||||
RpcCallbackDispatcher<ClientOnlyEventDrivenStyle> callbackDispatcher = RpcCallbackDispatcher.create(this);
|
||||
callbackDispatcher.setCallback(callbackDispatcher.getTarget().OnAsyncCallRpcServiceCallback(null, null));
|
||||
_rpcProvider.newCall("host-2")
|
||||
.setCommand("TestCommand")
|
||||
.setCommandArg(cmd)
|
||||
.setTimeout(10000)
|
||||
.setCallbackDispatcher(callbackDispatcher)
|
||||
.setContext("Context Object")
|
||||
// save context object for callback handler
|
||||
.apply();
|
||||
}
|
||||
|
||||
public Void OnAsyncCallRpcServiceCallback(RpcClientCall call, String context) {
|
||||
try {
|
||||
String answer = call.get();
|
||||
|
||||
} catch (RpcTimeoutException e) {
|
||||
|
||||
} catch (RpcIOException e) {
|
||||
|
||||
} catch (RpcException e) {
|
||||
}
|
||||
|
||||
return null;
|
||||
}
|
||||
}
|
||||
@ -1,58 +0,0 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.apache.cloudstack.framework.codestyle;
|
||||
|
||||
import org.apache.cloudstack.framework.rpc.RpcCallbackListener;
|
||||
import org.apache.cloudstack.framework.rpc.RpcClientCall;
|
||||
import org.apache.cloudstack.framework.rpc.RpcException;
|
||||
import org.apache.cloudstack.framework.rpc.RpcIOException;
|
||||
import org.apache.cloudstack.framework.rpc.RpcProvider;
|
||||
import org.apache.cloudstack.framework.rpc.RpcTimeoutException;
|
||||
|
||||
public class ClientOnlyListenerStyle {
|
||||
|
||||
RpcProvider _rpcProvider;
|
||||
|
||||
public void AsyncCallRpcService() {
|
||||
String cmd = new String();
|
||||
_rpcProvider.newCall("host-2").setCommand("TestCommand").setCommandArg(cmd).setTimeout(10000).addCallbackListener(new RpcCallbackListener<String>() {
|
||||
@Override
|
||||
public void onSuccess(String result) {
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(RpcException e) {
|
||||
}
|
||||
}).apply();
|
||||
}
|
||||
|
||||
public void SyncCallRpcService() {
|
||||
String cmd = new String();
|
||||
RpcClientCall call = _rpcProvider.newCall("host-2").setCommand("TestCommand").setCommandArg(cmd).setTimeout(10000).apply();
|
||||
|
||||
try {
|
||||
String answer = call.get();
|
||||
} catch (RpcTimeoutException e) {
|
||||
|
||||
} catch (RpcIOException e) {
|
||||
|
||||
} catch (RpcException e) {
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -103,16 +103,19 @@ public class ModuleBasedContextFactoryTest {
|
||||
assertEquals(parent, parentBean);
|
||||
}
|
||||
|
||||
int notfound = 0;
|
||||
for (String notThere : notTheres) {
|
||||
try {
|
||||
context.getBean(notThere, String.class);
|
||||
fail();
|
||||
} catch (NoSuchBeanDefinitionException e) {
|
||||
notfound++;
|
||||
}
|
||||
}
|
||||
|
||||
int count = context.getBean("count", InstantiationCounter.class).getCount();
|
||||
|
||||
assertEquals(notTheres.length, notfound);
|
||||
assertEquals(order, count);
|
||||
}
|
||||
|
||||
|
||||
@ -2363,10 +2363,8 @@ public class HypervDirectConnectResource extends ServerResourceBase implements S
|
||||
// VM patching/rebooting time that may need
|
||||
int retry = _retry;
|
||||
while (System.currentTimeMillis() - startTick <= _opsTimeout || --retry > 0) {
|
||||
SocketChannel sch = null;
|
||||
try {
|
||||
s_logger.info("Trying to connect to " + ipAddress);
|
||||
sch = SocketChannel.open();
|
||||
try (SocketChannel sch = SocketChannel.open();) {
|
||||
sch.configureBlocking(true);
|
||||
sch.socket().setSoTimeout(5000);
|
||||
// we need to connect to the control ip address to check the status of the system vm
|
||||
@ -2385,13 +2383,6 @@ public class HypervDirectConnectResource extends ServerResourceBase implements S
|
||||
s_logger.debug("[ignored] interupted while waiting to retry connecting to vm after exception: "+e.getLocalizedMessage());
|
||||
}
|
||||
}
|
||||
} finally {
|
||||
if (sch != null) {
|
||||
try {
|
||||
sch.close();
|
||||
} catch (IOException e) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
try {
|
||||
|
||||
@ -136,6 +136,7 @@ public class AgentRoutingResource extends AgentStorageResource {
|
||||
try {
|
||||
clz = Class.forName(objectType);
|
||||
} catch (ClassNotFoundException e) {
|
||||
s_logger.info("[ignored] ping returned class", e);
|
||||
}
|
||||
if (clz != null) {
|
||||
StringReader reader = new StringReader(objectData);
|
||||
|
||||
@ -18,6 +18,7 @@ package com.cloud.hypervisor.vmware.resource;
|
||||
|
||||
import java.io.File;
|
||||
import java.io.IOException;
|
||||
import java.io.UnsupportedEncodingException;
|
||||
import java.net.ConnectException;
|
||||
import java.net.InetSocketAddress;
|
||||
import java.net.URI;
|
||||
@ -33,12 +34,10 @@ import java.util.HashMap;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Map.Entry;
|
||||
import java.util.Random;
|
||||
import java.util.Set;
|
||||
import java.util.TimeZone;
|
||||
import java.util.UUID;
|
||||
import java.io.UnsupportedEncodingException;
|
||||
|
||||
import javax.naming.ConfigurationException;
|
||||
|
||||
@ -216,8 +215,8 @@ import com.cloud.dc.Vlan;
|
||||
import com.cloud.exception.CloudException;
|
||||
import com.cloud.exception.InternalErrorException;
|
||||
import com.cloud.host.Host.Type;
|
||||
import com.cloud.hypervisor.guru.VMwareGuru;
|
||||
import com.cloud.hypervisor.Hypervisor.HypervisorType;
|
||||
import com.cloud.hypervisor.guru.VMwareGuru;
|
||||
import com.cloud.hypervisor.vmware.manager.VmwareHostService;
|
||||
import com.cloud.hypervisor.vmware.manager.VmwareManager;
|
||||
import com.cloud.hypervisor.vmware.manager.VmwareStorageMount;
|
||||
@ -566,7 +565,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
|
||||
// we need to spawn a worker VM to attach the volume to and
|
||||
// resize the volume.
|
||||
useWorkerVm = true;
|
||||
vmName = this.getWorkerName(getServiceContext(), cmd, 0);
|
||||
vmName = getWorkerName(getServiceContext(), cmd, 0);
|
||||
|
||||
morDS = HypervisorHostHelper.findDatastoreWithBackwardsCompatibility(hyperHost, poolId);
|
||||
dsMo = new DatastoreMO(hyperHost.getContext(), morDS);
|
||||
@ -4803,10 +4802,8 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
|
||||
// VM patching/rebooting time that may need
|
||||
int retry = _retry;
|
||||
while (System.currentTimeMillis() - startTick <= _opsTimeout || --retry > 0) {
|
||||
SocketChannel sch = null;
|
||||
try {
|
||||
s_logger.info("Trying to connect to " + ipAddress);
|
||||
sch = SocketChannel.open();
|
||||
try (SocketChannel sch = SocketChannel.open();) {
|
||||
sch.configureBlocking(true);
|
||||
sch.socket().setSoTimeout(5000);
|
||||
|
||||
@ -4825,13 +4822,6 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
|
||||
s_logger.debug("[ignored] interupted while waiting to retry connect after failure.", e);
|
||||
}
|
||||
}
|
||||
} finally {
|
||||
if (sch != null) {
|
||||
try {
|
||||
sch.close();
|
||||
} catch (IOException e) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
try {
|
||||
|
||||
@ -17,6 +17,8 @@
|
||||
|
||||
package org.apache.cloudstack.network.contrail.management;
|
||||
|
||||
import static com.cloud.utils.AutoCloseableUtil.closeAutoCloseable;
|
||||
|
||||
import java.io.File;
|
||||
import java.io.FileInputStream;
|
||||
import java.io.FileNotFoundException;
|
||||
@ -38,7 +40,6 @@ import com.cloud.network.dao.NetworkVO;
|
||||
import com.cloud.offering.NetworkOffering;
|
||||
import com.cloud.user.Account;
|
||||
import com.cloud.utils.PropertiesUtil;
|
||||
|
||||
/**
|
||||
* ManagementNetworkGuru
|
||||
*
|
||||
@ -81,10 +82,7 @@ public class ManagementNetworkGuru extends ContrailGuru {
|
||||
s_logger.error(e.getMessage());
|
||||
throw new ConfigurationException(e.getMessage());
|
||||
} finally {
|
||||
try {
|
||||
inputFile.close();
|
||||
} catch (IOException e) {
|
||||
}
|
||||
closeAutoCloseable(inputFile, "error closing config file");
|
||||
}
|
||||
_mgmtCidr = configProps.getProperty("management.cidr");
|
||||
_mgmtGateway = configProps.getProperty("management.gateway");
|
||||
|
||||
@ -22,6 +22,8 @@ import java.io.Serializable;
|
||||
import java.lang.ref.WeakReference;
|
||||
import java.util.TreeSet;
|
||||
|
||||
import org.apache.log4j.Logger;
|
||||
|
||||
import com.cloud.exception.InternalErrorException;
|
||||
|
||||
/**
|
||||
@ -43,6 +45,7 @@ public interface ModelObject {
|
||||
public static class ModelReference implements Comparable<ModelReference>, Serializable {
|
||||
|
||||
private static final long serialVersionUID = -2019113974956703526L;
|
||||
private static final Logger s_logger = Logger.getLogger(ModelReference.class);
|
||||
|
||||
/*
|
||||
* WeakReference class is not serializable by definition. So, we cannot enforce its serialization unless we write the implementation of
|
||||
@ -86,9 +89,10 @@ public interface ModelObject {
|
||||
ModelReference rhs = (ModelReference)other;
|
||||
return compareTo(rhs) == 0;
|
||||
} catch (ClassCastException ex) {
|
||||
}
|
||||
// not this class , so
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
public ModelObject get() {
|
||||
return reference.get();
|
||||
|
||||
@ -29,6 +29,8 @@ import java.util.UUID;
|
||||
|
||||
import javax.inject.Inject;
|
||||
|
||||
import org.apache.log4j.Logger;
|
||||
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.ClusterScope;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.DataStore;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager;
|
||||
@ -40,7 +42,6 @@ import org.apache.cloudstack.engine.subsystem.api.storage.ZoneScope;
|
||||
import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao;
|
||||
import org.apache.cloudstack.storage.datastore.db.StoragePoolVO;
|
||||
import org.apache.cloudstack.storage.volume.datastore.PrimaryDataStoreHelper;
|
||||
import org.apache.log4j.Logger;
|
||||
|
||||
import com.cloud.agent.AgentManager;
|
||||
import com.cloud.agent.api.Answer;
|
||||
@ -189,6 +190,7 @@ public class CloudStackPrimaryDataStoreLifeCycleImpl implements PrimaryDataStore
|
||||
try {
|
||||
hostPath = URLDecoder.decode(uri.getPath(), "UTF-8");
|
||||
} catch (UnsupportedEncodingException e) {
|
||||
s_logger.error("[ignored] we are on a platform not supporting \"UTF-8\"!?!", e);
|
||||
}
|
||||
if (hostPath == null) { // if decoding fails, use getPath() anyway
|
||||
hostPath = uri.getPath();
|
||||
|
||||
@ -94,6 +94,7 @@ public class SAML2LogoutAPIAuthenticatorCmd extends BaseCmd implements APIAuthen
|
||||
try {
|
||||
resp.sendRedirect(SAML2AuthManager.SAMLCloudStackRedirectionUrl.value());
|
||||
} catch (IOException ignored) {
|
||||
s_logger.info("[ignored] sending redirected failed.", ignored);
|
||||
}
|
||||
return responseString;
|
||||
}
|
||||
@ -123,6 +124,7 @@ public class SAML2LogoutAPIAuthenticatorCmd extends BaseCmd implements APIAuthen
|
||||
try {
|
||||
resp.sendRedirect(SAML2AuthManager.SAMLCloudStackRedirectionUrl.value());
|
||||
} catch (IOException ignored) {
|
||||
s_logger.info("[ignored] second redirected sending failed.", ignored);
|
||||
}
|
||||
return responseString;
|
||||
}
|
||||
@ -134,6 +136,7 @@ public class SAML2LogoutAPIAuthenticatorCmd extends BaseCmd implements APIAuthen
|
||||
try {
|
||||
resp.sendRedirect(SAML2AuthManager.SAMLCloudStackRedirectionUrl.value());
|
||||
} catch (IOException ignored) {
|
||||
s_logger.info("[ignored] final redirected failed.", ignored);
|
||||
}
|
||||
return responseString;
|
||||
}
|
||||
|
||||
@ -288,18 +288,21 @@ public class SAML2AuthManagerImpl extends AdapterBase implements SAML2AuthManage
|
||||
try {
|
||||
idpMetadata.setSigningCertificate(KeyInfoHelper.getCertificates(kd.getKeyInfo()).get(0));
|
||||
} catch (CertificateException ignored) {
|
||||
s_logger.info("[ignored] encountered invalid certificate signing.", ignored);
|
||||
}
|
||||
}
|
||||
if (kd.getUse() == UsageType.ENCRYPTION) {
|
||||
try {
|
||||
idpMetadata.setEncryptionCertificate(KeyInfoHelper.getCertificates(kd.getKeyInfo()).get(0));
|
||||
} catch (CertificateException ignored) {
|
||||
s_logger.info("[ignored] encountered invalid certificate encryption.", ignored);
|
||||
}
|
||||
}
|
||||
if (kd.getUse() == UsageType.UNSPECIFIED) {
|
||||
try {
|
||||
unspecifiedKey = KeyInfoHelper.getCertificates(kd.getKeyInfo()).get(0);
|
||||
} catch (CertificateException ignored) {
|
||||
s_logger.info("[ignored] encountered invalid certificate.", ignored);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -19,12 +19,15 @@
|
||||
|
||||
package org.apache.cloudstack.api.command;
|
||||
|
||||
import static org.junit.Assert.assertFalse;
|
||||
|
||||
import com.cloud.domain.Domain;
|
||||
import com.cloud.user.AccountService;
|
||||
import com.cloud.user.DomainManager;
|
||||
import com.cloud.user.UserAccountVO;
|
||||
import com.cloud.user.dao.UserAccountDao;
|
||||
import com.cloud.utils.HttpUtils;
|
||||
|
||||
import org.apache.cloudstack.api.ApiServerService;
|
||||
import org.apache.cloudstack.api.BaseCmd;
|
||||
import org.apache.cloudstack.api.ServerApiException;
|
||||
@ -64,6 +67,7 @@ import org.opensaml.saml2.core.impl.SubjectBuilder;
|
||||
import javax.servlet.http.HttpServletRequest;
|
||||
import javax.servlet.http.HttpServletResponse;
|
||||
import javax.servlet.http.HttpSession;
|
||||
|
||||
import java.lang.reflect.Field;
|
||||
import java.security.KeyPair;
|
||||
import java.security.cert.X509Certificate;
|
||||
@ -154,8 +158,6 @@ public class SAML2LoginAPIAuthenticatorCmdTest {
|
||||
userAccountDaoField.setAccessible(true);
|
||||
userAccountDaoField.set(cmd, userAccountDao);
|
||||
|
||||
String spId = "someSPID";
|
||||
String url = "someUrl";
|
||||
KeyPair kp = SAMLUtils.generateRandomKeyPair();
|
||||
X509Certificate cert = SAMLUtils.generateRandomX509Certificate(kp);
|
||||
|
||||
@ -187,10 +189,13 @@ public class SAML2LoginAPIAuthenticatorCmdTest {
|
||||
// SSO SAMLResponse verification test, this should throw ServerApiException for auth failure
|
||||
params.put(SAMLPluginConstants.SAML_RESPONSE, new String[]{"Some String"});
|
||||
Mockito.stub(cmd.processSAMLResponse(Mockito.anyString())).toReturn(buildMockResponse());
|
||||
boolean failing = true;
|
||||
try {
|
||||
cmd.authenticate("command", params, session, InetAddress.getByName("127.0.0.1"), HttpUtils.RESPONSE_TYPE_JSON, new StringBuilder(), req, resp);
|
||||
} catch (ServerApiException ignored) {
|
||||
failing = false;
|
||||
}
|
||||
assertFalse("authentication should not have succeeded", failing);
|
||||
Mockito.verify(userAccountDao, Mockito.times(0)).getUserAccount(Mockito.anyString(), Mockito.anyLong());
|
||||
Mockito.verify(apiServer, Mockito.times(0)).verifyUser(Mockito.anyLong());
|
||||
}
|
||||
|
||||
@ -16,6 +16,8 @@
|
||||
// under the License.
|
||||
package com.cloud.network;
|
||||
|
||||
import static com.cloud.utils.AutoCloseableUtil.closeAutoCloseable;
|
||||
|
||||
import java.io.BufferedReader;
|
||||
import java.io.IOException;
|
||||
import java.io.InputStreamReader;
|
||||
@ -81,12 +83,7 @@ public class ExternalIpAddressAllocator extends AdapterBase implements IpAddrAll
|
||||
} catch (IOException e) {
|
||||
return new IpAddr();
|
||||
} finally {
|
||||
if (in != null) {
|
||||
try {
|
||||
in.close();
|
||||
} catch (IOException e) {
|
||||
}
|
||||
}
|
||||
closeAutoCloseable(in, "closing buffered reader");
|
||||
}
|
||||
|
||||
}
|
||||
@ -121,12 +118,7 @@ public class ExternalIpAddressAllocator extends AdapterBase implements IpAddrAll
|
||||
} catch (IOException e) {
|
||||
return false;
|
||||
} finally {
|
||||
if (in != null) {
|
||||
try {
|
||||
in.close();
|
||||
} catch (IOException e) {
|
||||
}
|
||||
}
|
||||
closeAutoCloseable(in, "buffered reader close");
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -693,6 +693,7 @@ public class LoadBalancingRulesManagerImpl<Type> extends ManagerBase implements
|
||||
if (backupState.equals(FirewallRule.State.Active))
|
||||
applyLoadBalancerConfig(cmd.getLbRuleId());
|
||||
} catch (ResourceUnavailableException e1) {
|
||||
s_logger.info("[ignored] applying load balancer config.", e1);
|
||||
} finally {
|
||||
loadBalancer.setState(backupState);
|
||||
_lbDao.persist(loadBalancer);
|
||||
|
||||
@ -43,7 +43,6 @@ import javax.crypto.SecretKey;
|
||||
import javax.inject.Inject;
|
||||
import javax.naming.ConfigurationException;
|
||||
|
||||
import com.cloud.utils.nio.Link;
|
||||
import org.apache.commons.codec.binary.Base64;
|
||||
import org.apache.commons.io.FileUtils;
|
||||
import org.apache.commons.lang.StringUtils;
|
||||
@ -118,6 +117,7 @@ import com.cloud.utils.db.TransactionLegacy;
|
||||
import com.cloud.utils.db.TransactionStatus;
|
||||
import com.cloud.utils.exception.CloudRuntimeException;
|
||||
import com.cloud.utils.net.NetUtils;
|
||||
import com.cloud.utils.nio.Link;
|
||||
import com.cloud.utils.script.Script;
|
||||
|
||||
public class ConfigurationServerImpl extends ManagerBase implements ConfigurationServer {
|
||||
@ -757,6 +757,7 @@ public class ConfigurationServerImpl extends ManagerBase implements Configuratio
|
||||
try (DataInputStream dis = new DataInputStream(new FileInputStream(privkeyfile))) {
|
||||
dis.readFully(arr1);
|
||||
} catch (EOFException e) {
|
||||
s_logger.info("[ignored] eof reached");
|
||||
} catch (Exception e) {
|
||||
s_logger.error("Cannot read the private key file", e);
|
||||
throw new CloudRuntimeException("Cannot read the private key file");
|
||||
@ -766,6 +767,7 @@ public class ConfigurationServerImpl extends ManagerBase implements Configuratio
|
||||
try (DataInputStream dis = new DataInputStream(new FileInputStream(pubkeyfile))) {
|
||||
dis.readFully(arr2);
|
||||
} catch (EOFException e) {
|
||||
s_logger.info("[ignored] eof reached");
|
||||
} catch (Exception e) {
|
||||
s_logger.warn("Cannot read the public key file", e);
|
||||
throw new CloudRuntimeException("Cannot read the public key file");
|
||||
@ -902,7 +904,7 @@ public class ConfigurationServerImpl extends ManagerBase implements Configuratio
|
||||
} else {
|
||||
command = new Script("/bin/bash", s_logger);
|
||||
}
|
||||
if (this.isOnWindows()) {
|
||||
if (isOnWindows()) {
|
||||
scriptPath = scriptPath.replaceAll("\\\\" ,"/" );
|
||||
systemVmIsoPath = systemVmIsoPath.replaceAll("\\\\" ,"/" );
|
||||
publicKeyPath = publicKeyPath.replaceAll("\\\\" ,"/" );
|
||||
|
||||
@ -228,12 +228,14 @@ public class ConsoleProxyServlet extends HttpServlet {
|
||||
try {
|
||||
w = Integer.parseInt(value);
|
||||
} catch (NumberFormatException e) {
|
||||
s_logger.info("[ignored] not a number: " + value);
|
||||
}
|
||||
|
||||
value = req.getParameter("h");
|
||||
try {
|
||||
h = Integer.parseInt(value);
|
||||
} catch (NumberFormatException e) {
|
||||
s_logger.info("[ignored] not a number: " + value);
|
||||
}
|
||||
|
||||
try {
|
||||
|
||||
@ -41,6 +41,7 @@ import com.cloud.utils.component.ComponentContext;
|
||||
public class VpcApiUnitTest extends TestCase {
|
||||
@Inject
|
||||
VpcManagerImpl _vpcService = null;
|
||||
VpcVO _vo = new VpcVO(1, "new vpc", "new vpc", 1, 1, 1, "0.0.0.0/0", "vpc domain", false, false, false);
|
||||
|
||||
@Override
|
||||
@Before
|
||||
@ -81,93 +82,68 @@ public class VpcApiUnitTest extends TestCase {
|
||||
}
|
||||
}
|
||||
|
||||
//1) correct network offering
|
||||
@Test
|
||||
public void validateNtwkOffForVpc() {
|
||||
//validate network offering
|
||||
//1) correct network offering
|
||||
VpcVO vo = new VpcVO(1, "new vpc", "new vpc", 1, 1, 1, "0.0.0.0/0", "vpc domain", false, false, false);
|
||||
boolean result = false;
|
||||
try {
|
||||
_vpcService.validateNtwkOffForNtwkInVpc(2L, 1, "0.0.0.0", "111-", vo, "10.1.1.1", new AccountVO(), null);
|
||||
_vpcService.validateNtwkOffForNtwkInVpc(2L, 1, "0.0.0.0", "111-", _vo, "10.1.1.1", new AccountVO(), null);
|
||||
result = true;
|
||||
} catch (Exception ex) {
|
||||
} finally {
|
||||
assertTrue("Validate network offering: Test passed: the offering is valid for vpc creation", result);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
//2) invalid offering - source nat is not included
|
||||
result = false;
|
||||
@Test(expected=InvalidParameterValueException.class)
|
||||
public void validateNtwkOffForVpcInvalidMissingSourceNat() {
|
||||
boolean result = false;
|
||||
try {
|
||||
_vpcService.validateNtwkOffForNtwkInVpc(2L, 2, "0.0.0.0", "111-", vo, "10.1.1.1", new AccountVO(), null);
|
||||
_vpcService.validateNtwkOffForNtwkInVpc(2L, 2, "0.0.0.0", "111-", _vo, "10.1.1.1", new AccountVO(), null);
|
||||
result = true;
|
||||
} catch (InvalidParameterValueException ex) {
|
||||
} finally {
|
||||
assertFalse("Validate network offering: TEST FAILED, can't use network offering without SourceNat service", result);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
//3) invalid offering - conserve mode is off
|
||||
result = false;
|
||||
@Test(expected=InvalidParameterValueException.class)
|
||||
public void validateNtwkOffForVpcInvalidNoConserveMode() {
|
||||
boolean result = false;
|
||||
try {
|
||||
_vpcService.validateNtwkOffForNtwkInVpc(2L, 3, "0.0.0.0", "111-", vo, "10.1.1.1", new AccountVO(), null);
|
||||
_vpcService.validateNtwkOffForNtwkInVpc(2L, 3, "0.0.0.0", "111-", _vo, "10.1.1.1", new AccountVO(), null);
|
||||
result = true;
|
||||
} catch (InvalidParameterValueException ex) {
|
||||
} finally {
|
||||
assertFalse("Validate network offering: TEST FAILED, can't use network offering without conserve mode = true", result);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
//4) invalid offering - guest type shared
|
||||
result = false;
|
||||
@Test(expected=InvalidParameterValueException.class)
|
||||
public void validateNtwkOffForVpcInvalidTypeIsGuest() {
|
||||
boolean result = false;
|
||||
try {
|
||||
_vpcService.validateNtwkOffForNtwkInVpc(2L, 4, "0.0.0.0", "111-", vo, "10.1.1.1", new AccountVO(), null);
|
||||
_vpcService.validateNtwkOffForNtwkInVpc(2L, 4, "0.0.0.0", "111-", _vo, "10.1.1.1", new AccountVO(), null);
|
||||
result = true;
|
||||
} catch (InvalidParameterValueException ex) {
|
||||
} finally {
|
||||
assertFalse("Validate network offering: TEST FAILED, can't use network offering with guest type = Shared", result);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
//5) Invalid offering - no redundant router support
|
||||
result = false;
|
||||
@Test(expected=InvalidParameterValueException.class)
|
||||
public void validateNtwkOffForVpcInvalidNoRVRSupport() {
|
||||
boolean result = false;
|
||||
try {
|
||||
_vpcService.validateNtwkOffForNtwkInVpc(2L, 5, "0.0.0.0", "111-", vo, "10.1.1.1", new AccountVO(), null);
|
||||
_vpcService.validateNtwkOffForNtwkInVpc(2L, 5, "0.0.0.0", "111-", _vo, "10.1.1.1", new AccountVO(), null);
|
||||
result = true;
|
||||
} catch (InvalidParameterValueException ex) {
|
||||
} finally {
|
||||
assertFalse("TEST FAILED, can't use network offering with guest type = Shared", result);
|
||||
}
|
||||
}
|
||||
|
||||
// public void destroyVpc() {
|
||||
// boolean result = false;
|
||||
// try {
|
||||
// result = _vpcService.destroyVpc(vo, new AccountVO(), 1L);
|
||||
// } catch (Exception ex) {
|
||||
// s_logger.debug(ex);
|
||||
// } finally {
|
||||
// assertTrue("Failed to destroy VPC", result);
|
||||
// }
|
||||
// }
|
||||
//
|
||||
// public void deleteVpc() {
|
||||
// //delete existing offering
|
||||
// boolean result = false;
|
||||
// try {
|
||||
// List<String> svcs = new ArrayList<String>();
|
||||
// svcs.add(Service.SourceNat.getName());
|
||||
// result = _vpcService.deleteVpc(1);
|
||||
// } catch (Exception ex) {
|
||||
// } finally {
|
||||
// assertTrue("Delete vpc: TEST FAILED, vpc failed to delete" + result, result);
|
||||
// }
|
||||
//
|
||||
// //delete non-existing offering
|
||||
// result = false;
|
||||
// try {
|
||||
// List<String> svcs = new ArrayList<String>();
|
||||
// svcs.add(Service.SourceNat.getName());
|
||||
// result = _vpcService.deleteVpc(100);
|
||||
// } catch (Exception ex) {
|
||||
// } finally {
|
||||
// assertFalse("Delete vpc: TEST FAILED, true is returned when try to delete non existing vpc" + result, result);
|
||||
// }
|
||||
// }
|
||||
}
|
||||
|
||||
@ -125,15 +125,12 @@ public class CreateNetworkOfferingTest extends TestCase {
|
||||
assertNotNull("Shared network offering with specifyVlan=true failed to create ", off);
|
||||
}
|
||||
|
||||
@Test
|
||||
@Test(expected=InvalidParameterValueException.class)
|
||||
public void createSharedNtwkOffWithNoVlan() {
|
||||
try {
|
||||
NetworkOfferingVO off =
|
||||
configMgr.createNetworkOffering("shared", "shared", TrafficType.Guest, null, false, Availability.Optional, 200, null, false, Network.GuestType.Shared,
|
||||
false, null, false, null, true, false, null, false, null, true);
|
||||
assertNull("Shared network offering with specifyVlan=false was created", off);
|
||||
} catch (InvalidParameterValueException ex) {
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
@ -145,15 +142,12 @@ public class CreateNetworkOfferingTest extends TestCase {
|
||||
assertNotNull("Shared network offering with specifyIpRanges=true failed to create ", off);
|
||||
}
|
||||
|
||||
@Test
|
||||
@Test(expected=InvalidParameterValueException.class)
|
||||
public void createSharedNtwkOffWithoutSpecifyIpRanges() {
|
||||
try {
|
||||
NetworkOfferingVO off =
|
||||
configMgr.createNetworkOffering("shared", "shared", TrafficType.Guest, null, true, Availability.Optional, 200, null, false, Network.GuestType.Shared,
|
||||
false, null, false, null, false, false, null, false, null, true);
|
||||
assertNull("Shared network offering with specifyIpRanges=false was created", off);
|
||||
} catch (InvalidParameterValueException ex) {
|
||||
}
|
||||
}
|
||||
|
||||
//Test Isolated network offerings
|
||||
@ -183,9 +177,8 @@ public class CreateNetworkOfferingTest extends TestCase {
|
||||
|
||||
}
|
||||
|
||||
@Test
|
||||
@Test(expected=InvalidParameterValueException.class)
|
||||
public void createIsolatedNtwkOffWithSpecifyIpRangesAndSourceNat() {
|
||||
try {
|
||||
Map<Service, Set<Provider>> serviceProviderMap = new HashMap<Network.Service, Set<Network.Provider>>();
|
||||
Set<Network.Provider> vrProvider = new HashSet<Network.Provider>();
|
||||
vrProvider.add(Provider.VirtualRouter);
|
||||
@ -194,8 +187,6 @@ public class CreateNetworkOfferingTest extends TestCase {
|
||||
configMgr.createNetworkOffering("isolated", "isolated", TrafficType.Guest, null, false, Availability.Optional, 200, serviceProviderMap, false,
|
||||
Network.GuestType.Isolated, false, null, false, null, true, false, null, false, null, true);
|
||||
assertNull("Isolated network offering with specifyIpRanges=true and source nat service enabled, was created", off);
|
||||
} catch (InvalidParameterValueException ex) {
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
|
||||
@ -21,12 +21,15 @@ import java.awt.image.DataBuffer;
|
||||
import java.awt.image.DataBufferInt;
|
||||
import java.util.Arrays;
|
||||
|
||||
import org.apache.log4j.Logger;
|
||||
|
||||
import streamer.BaseElement;
|
||||
import streamer.ByteBuffer;
|
||||
import streamer.Element;
|
||||
import streamer.Link;
|
||||
|
||||
public class BufferedImagePixelsAdapter extends BaseElement {
|
||||
private static final Logger s_logger = Logger.getLogger(BufferedImagePixelsAdapter.class);
|
||||
|
||||
public static final String TARGET_X = "x";
|
||||
public static final String TARGET_Y = "y";
|
||||
@ -55,7 +58,7 @@ public class BufferedImagePixelsAdapter extends BaseElement {
|
||||
@Override
|
||||
public void handleData(ByteBuffer buf, Link link) {
|
||||
if (verbose)
|
||||
System.out.println("[" + this + "] INFO: Data received: " + buf + ".");
|
||||
s_logger.debug("[" + this + "] INFO: Data received: " + buf + ".");
|
||||
|
||||
int x = (Integer)buf.getMetadata(TARGET_X);
|
||||
int y = (Integer)buf.getMetadata(TARGET_Y);
|
||||
@ -100,6 +103,7 @@ public class BufferedImagePixelsAdapter extends BaseElement {
|
||||
try {
|
||||
System.arraycopy(intArray, srcLine * rectWidth, imageBuffer, x + dstLine * imageWidth, rectWidth);
|
||||
} catch (IndexOutOfBoundsException e) {
|
||||
s_logger.info("[ignored] copy error",e);
|
||||
}
|
||||
}
|
||||
break;
|
||||
@ -141,7 +145,7 @@ public class BufferedImagePixelsAdapter extends BaseElement {
|
||||
String actualData = Arrays.toString(((DataBufferInt)canvas.getOfflineImage().getRaster().getDataBuffer()).getData());
|
||||
String expectedData = Arrays.toString(pixelsLE);
|
||||
if (!actualData.equals(expectedData))
|
||||
System.err.println("Actual image: " + actualData + "\nExpected image: " + expectedData + ".");
|
||||
s_logger.error("Actual image: " + actualData + "\nExpected image: " + expectedData + ".");
|
||||
|
||||
}
|
||||
|
||||
|
||||
@ -16,11 +16,14 @@
|
||||
// under the License.
|
||||
package streamer;
|
||||
|
||||
import org.apache.log4j.Logger;
|
||||
|
||||
/**
|
||||
* Link to transfer data in bounds of single thread (synchronized transfer).
|
||||
* Must not be used to send data to elements served in different threads.
|
||||
*/
|
||||
public class SyncLink implements Link {
|
||||
private static final Logger s_logger = Logger.getLogger(SyncLink.class);
|
||||
|
||||
/**
|
||||
* When null packet is pulled from source element, then make slight delay to
|
||||
@ -112,7 +115,7 @@ public class SyncLink implements Link {
|
||||
@Override
|
||||
public void pushBack(ByteBuffer buf) {
|
||||
if (verbose)
|
||||
System.out.println("[" + this + "] INFO: Buffer pushed back: " + buf + ".");
|
||||
s_logger.debug("[" + this + "] INFO: Buffer pushed back: " + buf + ".");
|
||||
|
||||
if (cacheBuffer != null) {
|
||||
ByteBuffer tmp = cacheBuffer.join(buf);
|
||||
@ -151,7 +154,7 @@ public class SyncLink implements Link {
|
||||
throw new RuntimeException("[" + this + "] ERROR: link is not in push mode.");
|
||||
|
||||
if (verbose)
|
||||
System.out.println("[" + this + "] INFO: Incoming buffer: " + buf + ".");
|
||||
s_logger.debug("[" + this + "] INFO: Incoming buffer: " + buf + ".");
|
||||
|
||||
if (buf == null && cacheBuffer == null)
|
||||
return;
|
||||
@ -172,7 +175,7 @@ public class SyncLink implements Link {
|
||||
while (cacheBuffer != null) {
|
||||
if (paused || hold) {
|
||||
if (verbose)
|
||||
System.out.println("[" + this + "] INFO: Transfer is paused. Data in cache buffer: " + cacheBuffer + ".");
|
||||
s_logger.debug("[" + this + "] INFO: Transfer is paused. Data in cache buffer: " + cacheBuffer + ".");
|
||||
|
||||
// Wait until rest of packet will be read
|
||||
return;
|
||||
@ -180,7 +183,7 @@ public class SyncLink implements Link {
|
||||
|
||||
if (expectedPacketSize > 0 && cacheBuffer.length < expectedPacketSize) {
|
||||
if (verbose)
|
||||
System.out.println("[" + this + "] INFO: Transfer is suspended because available data is less than expected packet size. Expected packet size: "
|
||||
s_logger.debug("[" + this + "] INFO: Transfer is suspended because available data is less than expected packet size. Expected packet size: "
|
||||
+ expectedPacketSize + ", data in cache buffer: " + cacheBuffer + ".");
|
||||
|
||||
// Wait until rest of packet will be read
|
||||
@ -207,7 +210,7 @@ public class SyncLink implements Link {
|
||||
public void sendEvent(Event event, Direction direction) {
|
||||
|
||||
if (verbose)
|
||||
System.out.println("[" + this + "] INFO: Event " + event + " is received.");
|
||||
s_logger.debug("[" + this + "] INFO: Event " + event + " is received.");
|
||||
|
||||
// Shutdown main loop (if any) when STREAM_CLOSE event is received.
|
||||
switch (event) {
|
||||
@ -254,13 +257,14 @@ public class SyncLink implements Link {
|
||||
|
||||
if (paused) {
|
||||
if (verbose)
|
||||
System.out.println("[" + this + "] INFO: Cannot pull, link is paused.");
|
||||
s_logger.debug("[" + this + "] INFO: Cannot pull, link is paused.");
|
||||
|
||||
// Make slight delay in such case, to avoid consuming 100% of CPU
|
||||
if (block) {
|
||||
try {
|
||||
Thread.sleep(100);
|
||||
} catch (InterruptedException e) {
|
||||
s_logger.info("[ignored] interupted during pull", e);
|
||||
}
|
||||
}
|
||||
|
||||
@ -271,7 +275,7 @@ public class SyncLink implements Link {
|
||||
// then return it instead of asking for more data from source
|
||||
if (cacheBuffer != null && (expectedPacketSize == 0 || (expectedPacketSize > 0 && cacheBuffer.length >= expectedPacketSize))) {
|
||||
if (verbose)
|
||||
System.out.println("[" + this + "] INFO: Data pulled from cache buffer: " + cacheBuffer + ".");
|
||||
s_logger.debug("[" + this + "] INFO: Data pulled from cache buffer: " + cacheBuffer + ".");
|
||||
|
||||
ByteBuffer tmp = cacheBuffer;
|
||||
cacheBuffer = null;
|
||||
@ -290,7 +294,7 @@ public class SyncLink implements Link {
|
||||
// Can return something only when data was stored in buffer
|
||||
if (cacheBuffer != null && (expectedPacketSize == 0 || (expectedPacketSize > 0 && cacheBuffer.length >= expectedPacketSize))) {
|
||||
if (verbose)
|
||||
System.out.println("[" + this + "] INFO: Data pulled from source: " + cacheBuffer + ".");
|
||||
s_logger.debug("[" + this + "] INFO: Data pulled from source: " + cacheBuffer + ".");
|
||||
|
||||
ByteBuffer tmp = cacheBuffer;
|
||||
cacheBuffer = null;
|
||||
@ -366,7 +370,7 @@ public class SyncLink implements Link {
|
||||
sendEvent(Event.LINK_SWITCH_TO_PULL_MODE, Direction.IN);
|
||||
|
||||
if (verbose)
|
||||
System.out.println("[" + this + "] INFO: Starting pull loop.");
|
||||
s_logger.debug("[" + this + "] INFO: Starting pull loop.");
|
||||
|
||||
// Pull source in loop
|
||||
while (!shutdown) {
|
||||
@ -382,7 +386,7 @@ public class SyncLink implements Link {
|
||||
}
|
||||
|
||||
if (verbose)
|
||||
System.out.println("[" + this + "] INFO: Pull loop finished.");
|
||||
s_logger.debug("[" + this + "] INFO: Pull loop finished.");
|
||||
|
||||
}
|
||||
|
||||
@ -397,7 +401,7 @@ public class SyncLink implements Link {
|
||||
@Override
|
||||
public void setPullMode() {
|
||||
if (verbose)
|
||||
System.out.println("[" + this + "] INFO: Switching to PULL mode.");
|
||||
s_logger.debug("[" + this + "] INFO: Switching to PULL mode.");
|
||||
|
||||
pullMode = true;
|
||||
}
|
||||
|
||||
@ -16,6 +16,8 @@
|
||||
// under the License.
|
||||
package streamer.debug;
|
||||
|
||||
import org.apache.log4j.Logger;
|
||||
|
||||
import streamer.BaseElement;
|
||||
import streamer.ByteBuffer;
|
||||
import streamer.Direction;
|
||||
@ -25,6 +27,7 @@ import streamer.Link;
|
||||
import streamer.SyncLink;
|
||||
|
||||
public class FakeSource extends BaseElement {
|
||||
private static final Logger s_logger = Logger.getLogger(FakeSource.class);
|
||||
|
||||
/**
|
||||
* Delay for null packets in poll method when blocking is requested, in
|
||||
@ -66,6 +69,7 @@ public class FakeSource extends BaseElement {
|
||||
try {
|
||||
Thread.sleep(delay);
|
||||
} catch (InterruptedException e) {
|
||||
s_logger.info("[ignored] interupted while creating latency", e);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -16,6 +16,8 @@
|
||||
// under the License.
|
||||
package com.cloud.consoleproxy;
|
||||
|
||||
import static com.cloud.utils.AutoCloseableUtil.closeAutoCloseable;
|
||||
|
||||
import java.io.BufferedReader;
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
@ -201,10 +203,7 @@ public class ConsoleProxyAjaxHandler implements HttpHandler {
|
||||
s_logger.warn("Exception while reading request body: ", e);
|
||||
} finally {
|
||||
if (closeStreamAfterRead) {
|
||||
try {
|
||||
is.close();
|
||||
} catch (IOException e) {
|
||||
}
|
||||
closeAutoCloseable(is, "error closing stream after read");
|
||||
}
|
||||
}
|
||||
return sb.toString();
|
||||
|
||||
@ -25,6 +25,7 @@ import java.util.List;
|
||||
|
||||
import com.cloud.consoleproxy.ConsoleProxyRdpClient;
|
||||
import com.cloud.consoleproxy.util.ImageHelper;
|
||||
import com.cloud.consoleproxy.util.Logger;
|
||||
import com.cloud.consoleproxy.util.TileInfo;
|
||||
import com.cloud.consoleproxy.vnc.FrameBufferCanvas;
|
||||
|
||||
@ -35,6 +36,7 @@ public class RdpBufferedImageCanvas extends BufferedImageCanvas implements Frame
|
||||
*
|
||||
*/
|
||||
private static final long serialVersionUID = 1L;
|
||||
private static final Logger s_logger = Logger.getLogger(RdpBufferedImageCanvas.class);
|
||||
|
||||
private final ConsoleProxyRdpClient _rdpClient;
|
||||
|
||||
@ -66,6 +68,7 @@ public class RdpBufferedImageCanvas extends BufferedImageCanvas implements Frame
|
||||
try {
|
||||
imgBits = ImageHelper.jpegFromImage(bufferedImage);
|
||||
} catch (IOException e) {
|
||||
s_logger.info("[ignored] read error on image", e);
|
||||
}
|
||||
|
||||
return imgBits;
|
||||
@ -91,6 +94,7 @@ public class RdpBufferedImageCanvas extends BufferedImageCanvas implements Frame
|
||||
try {
|
||||
imgBits = ImageHelper.jpegFromImage(bufferedImage);
|
||||
} catch (IOException e) {
|
||||
s_logger.info("[ignored] read error on image tiles", e);
|
||||
}
|
||||
return imgBits;
|
||||
}
|
||||
|
||||
@ -27,6 +27,7 @@ import java.io.IOException;
|
||||
import java.util.List;
|
||||
|
||||
import com.cloud.consoleproxy.util.ImageHelper;
|
||||
import com.cloud.consoleproxy.util.Logger;
|
||||
import com.cloud.consoleproxy.util.TileInfo;
|
||||
|
||||
/**
|
||||
@ -35,6 +36,7 @@ import com.cloud.consoleproxy.util.TileInfo;
|
||||
*/
|
||||
public class BufferedImageCanvas extends Canvas implements FrameBufferCanvas {
|
||||
private static final long serialVersionUID = 1L;
|
||||
private static final Logger s_logger = Logger.getLogger(BufferedImageCanvas.class);
|
||||
|
||||
// Offline screen buffer
|
||||
private BufferedImage offlineImage;
|
||||
@ -42,7 +44,7 @@ public class BufferedImageCanvas extends Canvas implements FrameBufferCanvas {
|
||||
// Cached Graphics2D object for offline screen buffer
|
||||
private Graphics2D graphics;
|
||||
|
||||
private PaintNotificationListener listener;
|
||||
private final PaintNotificationListener listener;
|
||||
|
||||
public BufferedImageCanvas(PaintNotificationListener listener, int width, int height) {
|
||||
super();
|
||||
@ -59,7 +61,7 @@ public class BufferedImageCanvas extends Canvas implements FrameBufferCanvas {
|
||||
}
|
||||
|
||||
public void setCanvasSize(int width, int height) {
|
||||
this.offlineImage = new BufferedImage(width, height, BufferedImage.TYPE_INT_RGB);
|
||||
offlineImage = new BufferedImage(width, height, BufferedImage.TYPE_INT_RGB);
|
||||
graphics = offlineImage.createGraphics();
|
||||
|
||||
setSize(offlineImage.getWidth(), offlineImage.getHeight());
|
||||
@ -121,6 +123,7 @@ public class BufferedImageCanvas extends Canvas implements FrameBufferCanvas {
|
||||
try {
|
||||
imgBits = ImageHelper.jpegFromImage(bufferedImage);
|
||||
} catch (IOException e) {
|
||||
s_logger.info("[ignored] read error on image", e);
|
||||
}
|
||||
return imgBits;
|
||||
}
|
||||
@ -144,6 +147,7 @@ public class BufferedImageCanvas extends Canvas implements FrameBufferCanvas {
|
||||
try {
|
||||
imgBits = ImageHelper.jpegFromImage(bufferedImage);
|
||||
} catch (IOException e) {
|
||||
s_logger.info("[ignored] read error on image tiles", e);
|
||||
}
|
||||
return imgBits;
|
||||
}
|
||||
|
||||
@ -23,9 +23,11 @@ import java.awt.image.DataBufferInt;
|
||||
import java.io.DataInputStream;
|
||||
import java.io.IOException;
|
||||
|
||||
import com.cloud.consoleproxy.util.Logger;
|
||||
import com.cloud.consoleproxy.vnc.VncScreenDescription;
|
||||
|
||||
public class RawRect extends AbstractRect {
|
||||
private static final Logger s_logger = Logger.getLogger(RawRect.class);
|
||||
private final int[] buf;
|
||||
|
||||
public RawRect(VncScreenDescription screen, int x, int y, int width, int height, DataInputStream is) throws IOException {
|
||||
@ -63,6 +65,7 @@ public class RawRect extends AbstractRect {
|
||||
try {
|
||||
System.arraycopy(buf, srcLine * width, imageBuffer, x + dstLine * imageWidth, width);
|
||||
} catch (IndexOutOfBoundsException e) {
|
||||
s_logger.info("[ignored] buffer overflow!?!", e);
|
||||
}
|
||||
}
|
||||
break;
|
||||
|
||||
@ -89,6 +89,7 @@ public class UsageServer implements Daemon {
|
||||
try {
|
||||
Log4jConfigurer.initLogging(file.getAbsolutePath());
|
||||
} catch (FileNotFoundException e) {
|
||||
s_logger.info("[ignored] log initialisation ;)" + e.getLocalizedMessage(), e);
|
||||
}
|
||||
DOMConfigurator.configureAndWatch(file.getAbsolutePath());
|
||||
|
||||
@ -99,6 +100,7 @@ public class UsageServer implements Daemon {
|
||||
try {
|
||||
Log4jConfigurer.initLogging(file.getAbsolutePath());
|
||||
} catch (FileNotFoundException e) {
|
||||
s_logger.info("[ignored] log properties initialization :)" + e.getLocalizedMessage(), e);
|
||||
}
|
||||
PropertyConfigurator.configureAndWatch(file.getAbsolutePath());
|
||||
}
|
||||
|
||||
36
utils/src/com/cloud/utils/AutoCloseableUtil.java
Normal file
36
utils/src/com/cloud/utils/AutoCloseableUtil.java
Normal file
@ -0,0 +1,36 @@
|
||||
// Licensed to the Apache Software Foundation (ASF) under one
|
||||
// or more contributor license agreements. See the NOTICE file
|
||||
// distributed with this work for additional information
|
||||
// regarding copyright ownership. The ASF licenses this file
|
||||
// to you under the Apache License, Version 2.0 (the
|
||||
// "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
package com.cloud.utils;
|
||||
|
||||
import org.apache.log4j.Logger;
|
||||
|
||||
public class AutoCloseableUtil {
|
||||
private final static Logger s_logger = Logger.getLogger(AutoCloseableUtil.class);
|
||||
|
||||
public static void closeAutoCloseable(AutoCloseable ac, String message) {
|
||||
try {
|
||||
|
||||
if (ac != null) {
|
||||
ac.close();
|
||||
}
|
||||
|
||||
} catch (Exception e) {
|
||||
s_logger.warn("[ignored] " + message, e);
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
@ -271,30 +271,4 @@ public class DateUtil {
|
||||
return (dateCalendar1.getTimeInMillis() - dateCalendar2.getTimeInMillis() )/1000;
|
||||
|
||||
}
|
||||
|
||||
// test only
|
||||
public static void main(String[] args) {
|
||||
TimeZone localTimezone = Calendar.getInstance().getTimeZone();
|
||||
TimeZone gmtTimezone = TimeZone.getTimeZone("GMT");
|
||||
TimeZone estTimezone = TimeZone.getTimeZone("EST");
|
||||
|
||||
Date time = new Date();
|
||||
System.out.println("local time :" + getDateDisplayString(localTimezone, time));
|
||||
System.out.println("GMT time :" + getDateDisplayString(gmtTimezone, time));
|
||||
System.out.println("EST time :" + getDateDisplayString(estTimezone, time));
|
||||
//Test next run time. Expects interval and schedule as arguments
|
||||
if (args.length == 2) {
|
||||
System.out.println("Next run time: " + getNextRunTime(IntervalType.getIntervalType(args[0]), args[1], "GMT", time).toString());
|
||||
}
|
||||
|
||||
time = new Date();
|
||||
DateFormat dfDate = new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ss'Z'Z");
|
||||
String str = dfDate.format(time);
|
||||
System.out.println("Formated TZ time string : " + str);
|
||||
try {
|
||||
Date dtParsed = DateUtil.parseTZDateString(str);
|
||||
System.out.println("Parsed TZ time string : " + dtParsed.toString());
|
||||
} catch (ParseException e) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -19,7 +19,7 @@
|
||||
|
||||
package com.cloud.utils.net;
|
||||
|
||||
import com.cloud.utils.NumbersUtil;
|
||||
import static com.cloud.utils.AutoCloseableUtil.closeAutoCloseable;
|
||||
|
||||
import java.io.BufferedReader;
|
||||
import java.io.File;
|
||||
@ -29,12 +29,17 @@ import java.net.InetAddress;
|
||||
import java.net.UnknownHostException;
|
||||
import java.util.Formatter;
|
||||
|
||||
import org.apache.log4j.Logger;
|
||||
|
||||
import com.cloud.utils.NumbersUtil;
|
||||
|
||||
/**
|
||||
* copied from the public domain utility from John Burkard.
|
||||
* @author <a href="mailto:jb@eaio.com">Johann Burkard</a>
|
||||
* @version 2.1.3
|
||||
**/
|
||||
public class MacAddress {
|
||||
private static final Logger s_logger = Logger.getLogger(MacAddress.class);
|
||||
private long _addr = 0;
|
||||
|
||||
protected MacAddress() {
|
||||
@ -124,23 +129,14 @@ public class MacAddress {
|
||||
}
|
||||
|
||||
} catch (SecurityException ex) {
|
||||
s_logger.info("[ignored] security exception in static initializer of MacAddress", ex);
|
||||
} catch (IOException ex) {
|
||||
s_logger.info("[ignored] io exception in static initializer of MacAddress");
|
||||
} finally {
|
||||
if (p != null) {
|
||||
if (in != null) {
|
||||
try {
|
||||
in.close();
|
||||
} catch (IOException ex) {
|
||||
}
|
||||
}
|
||||
try {
|
||||
p.getErrorStream().close();
|
||||
} catch (IOException ex) {
|
||||
}
|
||||
try {
|
||||
p.getOutputStream().close();
|
||||
} catch (IOException ex) {
|
||||
}
|
||||
closeAutoCloseable(in, "closing init process input stream");
|
||||
closeAutoCloseable(p.getErrorStream(), "closing init process error output stream");
|
||||
closeAutoCloseable(p.getOutputStream(), "closing init process std output stream");
|
||||
p.destroy();
|
||||
}
|
||||
}
|
||||
@ -184,20 +180,9 @@ public class MacAddress {
|
||||
return reader.readLine();
|
||||
} finally {
|
||||
if (p != null) {
|
||||
if (reader != null) {
|
||||
try {
|
||||
reader.close();
|
||||
} catch (IOException ex) {
|
||||
}
|
||||
}
|
||||
try {
|
||||
p.getErrorStream().close();
|
||||
} catch (IOException ex) {
|
||||
}
|
||||
try {
|
||||
p.getOutputStream().close();
|
||||
} catch (IOException ex) {
|
||||
}
|
||||
closeAutoCloseable(reader, "closing process input stream");
|
||||
closeAutoCloseable(p.getErrorStream(), "closing process error output stream");
|
||||
closeAutoCloseable(p.getOutputStream(), "closing process std output stream");
|
||||
p.destroy();
|
||||
}
|
||||
}
|
||||
|
||||
@ -19,6 +19,8 @@
|
||||
|
||||
package com.cloud.utils.nio;
|
||||
|
||||
import static com.cloud.utils.AutoCloseableUtil.closeAutoCloseable;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.net.ConnectException;
|
||||
import java.net.InetSocketAddress;
|
||||
@ -41,10 +43,10 @@ import java.util.concurrent.TimeUnit;
|
||||
import javax.net.ssl.SSLContext;
|
||||
import javax.net.ssl.SSLEngine;
|
||||
|
||||
import org.apache.cloudstack.utils.security.SSLUtils;
|
||||
|
||||
import org.apache.log4j.Logger;
|
||||
|
||||
import org.apache.cloudstack.utils.security.SSLUtils;
|
||||
|
||||
import com.cloud.utils.concurrency.NamedThreadFactory;
|
||||
|
||||
/**
|
||||
@ -208,11 +210,8 @@ public abstract class NioConnection implements Runnable {
|
||||
if (s_logger.isTraceEnabled()) {
|
||||
s_logger.trace("Socket " + socket + " closed on read. Probably -1 returned: " + e.getMessage());
|
||||
}
|
||||
try {
|
||||
socketChannel.close();
|
||||
socket.close();
|
||||
} catch (IOException ignore) {
|
||||
}
|
||||
closeAutoCloseable(socketChannel, "accepting socketChannel");
|
||||
closeAutoCloseable(socket, "opened socket");
|
||||
return;
|
||||
}
|
||||
|
||||
@ -334,6 +333,7 @@ public abstract class NioConnection implements Runnable {
|
||||
try {
|
||||
((SocketChannel)(todo.key)).close();
|
||||
} catch (IOException ignore) {
|
||||
s_logger.info("[ignored] socket channel");
|
||||
} finally {
|
||||
Link link = (Link)todo.att;
|
||||
link.terminated();
|
||||
@ -420,6 +420,7 @@ public abstract class NioConnection implements Runnable {
|
||||
channel.close();
|
||||
}
|
||||
} catch (IOException ignore) {
|
||||
s_logger.info("[ignored] channel");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -19,11 +19,11 @@
|
||||
|
||||
package com.cloud.utils.script;
|
||||
|
||||
import org.apache.log4j.Logger;
|
||||
|
||||
import java.io.BufferedReader;
|
||||
import java.io.IOException;
|
||||
|
||||
import org.apache.log4j.Logger;
|
||||
|
||||
/**
|
||||
*/
|
||||
public abstract class OutputInterpreter {
|
||||
@ -50,6 +50,7 @@ public abstract class OutputInterpreter {
|
||||
};
|
||||
|
||||
public static class TimedOutLogger extends OutputInterpreter {
|
||||
private static final Logger s_logger = Logger.getLogger(TimedOutLogger.class);
|
||||
Process _process;
|
||||
|
||||
public TimedOutLogger(Process process) {
|
||||
@ -76,6 +77,7 @@ public abstract class OutputInterpreter {
|
||||
buff.append(reader.readLine());
|
||||
}
|
||||
} catch (IOException e) {
|
||||
s_logger.info("[ignored] can not append line to buffer",e);
|
||||
}
|
||||
|
||||
return buff.toString();
|
||||
|
||||
@ -26,7 +26,11 @@ import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import org.apache.log4j.Logger;
|
||||
|
||||
public class Decoder {
|
||||
private static final Logger s_logger = Logger.getLogger(Decoder.class);
|
||||
|
||||
private static Map<String, String> getParameters(URI uri) {
|
||||
String parameters = uri.getQuery();
|
||||
Map<String, String> params = new HashMap<String, String>();
|
||||
@ -52,7 +56,7 @@ public class Decoder {
|
||||
try {
|
||||
size = Long.parseLong(params.get(EncodingType.SIZE.toString()));
|
||||
} catch (NumberFormatException e) {
|
||||
|
||||
s_logger.info("[ignored] number not recognised",e);
|
||||
}
|
||||
DecodedDataObject obj =
|
||||
new DecodedDataObject(params.get(EncodingType.OBJTYPE.toString()), size, params.get(EncodingType.NAME.toString()), params.get(EncodingType.PATH.toString()),
|
||||
|
||||
60
utils/test/com/cloud/utils/DateUtilTest.java
Normal file
60
utils/test/com/cloud/utils/DateUtilTest.java
Normal file
@ -0,0 +1,60 @@
|
||||
//
|
||||
// Licensed to the Apache Software Foundation (ASF) under one
|
||||
// or more contributor license agreements. See the NOTICE file
|
||||
// distributed with this work for additional information
|
||||
// regarding copyright ownership. The ASF licenses this file
|
||||
// to you under the Apache License, Version 2.0 (the
|
||||
// "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
//
|
||||
package com.cloud.utils;
|
||||
|
||||
import java.text.DateFormat;
|
||||
import java.text.ParseException;
|
||||
import java.text.SimpleDateFormat;
|
||||
import java.util.Calendar;
|
||||
import java.util.Date;
|
||||
import java.util.TimeZone;
|
||||
|
||||
import com.cloud.utils.DateUtil.IntervalType;
|
||||
|
||||
|
||||
public class DateUtilTest {
|
||||
|
||||
// command line test tool
|
||||
public static void main(String[] args) {
|
||||
TimeZone localTimezone = Calendar.getInstance().getTimeZone();
|
||||
TimeZone gmtTimezone = TimeZone.getTimeZone("GMT");
|
||||
TimeZone estTimezone = TimeZone.getTimeZone("EST");
|
||||
|
||||
Date time = new Date();
|
||||
System.out.println("local time :" + DateUtil.getDateDisplayString(localTimezone, time));
|
||||
System.out.println("GMT time :" + DateUtil.getDateDisplayString(gmtTimezone, time));
|
||||
System.out.println("EST time :" + DateUtil.getDateDisplayString(estTimezone, time));
|
||||
//Test next run time. Expects interval and schedule as arguments
|
||||
if (args.length == 2) {
|
||||
System.out.println("Next run time: " + DateUtil.getNextRunTime(IntervalType.getIntervalType(args[0]), args[1], "GMT", time).toString());
|
||||
}
|
||||
|
||||
time = new Date();
|
||||
DateFormat dfDate = new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ss'Z'Z");
|
||||
String str = dfDate.format(time);
|
||||
System.out.println("Formated TZ time string : " + str);
|
||||
try {
|
||||
Date dtParsed = DateUtil.parseTZDateString(str);
|
||||
System.out.println("Parsed TZ time string : " + dtParsed.toString());
|
||||
} catch (ParseException e) {
|
||||
System.err.println("Parsing failed\n string : " + str + "\nexception :" + e.getLocalizedMessage());
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
@ -19,6 +19,7 @@
|
||||
|
||||
package com.cloud.utils.exception;
|
||||
|
||||
import static org.junit.Assert.assertTrue;
|
||||
import static org.junit.Assert.fail;
|
||||
|
||||
import java.io.FileNotFoundException;
|
||||
@ -35,6 +36,7 @@ public class ExceptionUtilTest {
|
||||
ExceptionUtil.rethrow(fnfe, IOException.class);
|
||||
fail();
|
||||
} catch (IOException e) {
|
||||
assertTrue("we won !?!", true);
|
||||
}
|
||||
|
||||
ExceptionUtil.rethrow(fnfe, ClassNotFoundException.class);
|
||||
@ -43,6 +45,7 @@ public class ExceptionUtilTest {
|
||||
ExceptionUtil.rethrow(fnfe, FileNotFoundException.class);
|
||||
fail();
|
||||
} catch (FileNotFoundException e) {
|
||||
assertTrue("we won !?!", true);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -31,7 +31,7 @@ import org.apache.log4j.Logger;
|
||||
public class SnapshotDescriptor {
|
||||
private static final Logger s_logger = Logger.getLogger(SnapshotDescriptor.class);
|
||||
|
||||
private Properties _properties = new Properties();
|
||||
private final Properties _properties = new Properties();
|
||||
|
||||
public SnapshotDescriptor() {
|
||||
}
|
||||
@ -90,11 +90,9 @@ public class SnapshotDescriptor {
|
||||
}
|
||||
|
||||
public byte[] getVmsdContent() {
|
||||
BufferedWriter out = null;
|
||||
ByteArrayOutputStream bos = new ByteArrayOutputStream();
|
||||
|
||||
try {
|
||||
out = new BufferedWriter(new OutputStreamWriter(bos, "UTF-8"));
|
||||
try (BufferedWriter out = new BufferedWriter(new OutputStreamWriter(bos, "UTF-8"));) {
|
||||
|
||||
out.write(".encoding = \"UTF-8\"");
|
||||
out.newLine();
|
||||
@ -165,13 +163,6 @@ public class SnapshotDescriptor {
|
||||
} catch (IOException e) {
|
||||
assert (false);
|
||||
s_logger.error("Unexpected exception ", e);
|
||||
} finally {
|
||||
if (out != null) {
|
||||
try {
|
||||
out.close();
|
||||
} catch (IOException e) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return bos.toByteArray();
|
||||
@ -288,8 +279,8 @@ public class SnapshotDescriptor {
|
||||
}
|
||||
|
||||
public static class DiskInfo {
|
||||
private String _diskFileName;
|
||||
private String _deviceName;
|
||||
private final String _diskFileName;
|
||||
private final String _deviceName;
|
||||
|
||||
public DiskInfo(String diskFileName, String deviceName) {
|
||||
_diskFileName = diskFileName;
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user