diff --git a/engine/schema/src/com/cloud/upgrade/dao/Upgrade2214to30.java b/engine/schema/src/com/cloud/upgrade/dao/Upgrade2214to30.java index 62fdcf10dd2..58dd916ea6e 100644 --- a/engine/schema/src/com/cloud/upgrade/dao/Upgrade2214to30.java +++ b/engine/schema/src/com/cloud/upgrade/dao/Upgrade2214to30.java @@ -34,6 +34,7 @@ import org.apache.log4j.Logger; import com.cloud.offering.NetworkOffering; import com.cloud.utils.crypt.DBEncryptionUtil; import com.cloud.utils.crypt.EncryptionSecretKeyChecker; +import com.cloud.utils.db.TransactionLegacy; import com.cloud.utils.exception.CloudRuntimeException; import com.cloud.utils.script.Script; @@ -110,19 +111,6 @@ public class Upgrade2214to30 extends Upgrade30xBase implements DbUpgrade { return new File[] {new File(script)}; } - protected void closePstmts(List pstmt2Close){ - for(PreparedStatement pstmt : pstmt2Close) { - try { - if (pstmt != null && !pstmt.isClosed()) { - pstmt.close(); - } - } catch (SQLException e) { - // It's not possible to recover from this and we need to continue closing - e.printStackTrace(); - } - } - } - private void setupPhysicalNetworks(Connection conn) { /** * for each zone: @@ -374,7 +362,7 @@ public class Upgrade2214to30 extends Upgrade30xBase implements DbUpgrade { } catch (SQLException e) { throw new CloudRuntimeException("Exception while adding PhysicalNetworks", e); } finally { - closePstmts(pstmt2Close); + TransactionLegacy.closePstmts(pstmt2Close); } } @@ -454,7 +442,7 @@ public class Upgrade2214to30 extends Upgrade30xBase implements DbUpgrade { } catch (UnsupportedEncodingException e) { throw new CloudRuntimeException("Unable encrypt host_details values ", e); } finally { - closePstmts(pstmt2Close); + TransactionLegacy.closePstmts(pstmt2Close); } s_logger.debug("Done encrypting host details"); } @@ -500,7 +488,7 @@ public class Upgrade2214to30 extends Upgrade30xBase implements DbUpgrade { } catch (UnsupportedEncodingException e) { throw new CloudRuntimeException("Unable encrypt vm_instance vnc_password ", e); } finally { - closePstmts(pstmt2Close); + TransactionLegacy.closePstmts(pstmt2Close); } s_logger.debug("Done encrypting vm_instance vnc_password"); } @@ -533,7 +521,7 @@ public class Upgrade2214to30 extends Upgrade30xBase implements DbUpgrade { } catch (UnsupportedEncodingException e) { throw new CloudRuntimeException("Unable encrypt user secret key ", e); } finally { - closePstmts(pstmt2Close); + TransactionLegacy.closePstmts(pstmt2Close); } s_logger.debug("Done encrypting user keys"); } @@ -566,7 +554,7 @@ public class Upgrade2214to30 extends Upgrade30xBase implements DbUpgrade { } catch (UnsupportedEncodingException e) { throw new CloudRuntimeException("Unable encrypt vpn_users password ", e); } finally { - closePstmts(pstmt2Close); + TransactionLegacy.closePstmts(pstmt2Close); } s_logger.debug("Done encrypting vpn_users password"); } @@ -687,7 +675,7 @@ public class Upgrade2214to30 extends Upgrade30xBase implements DbUpgrade { } catch (SQLException e) { throw new CloudRuntimeException("Unable to create service/provider map for network offerings", e); } finally { - closePstmts(pstmt2Close); + TransactionLegacy.closePstmts(pstmt2Close); } } @@ -725,7 +713,7 @@ public class Upgrade2214to30 extends Upgrade30xBase implements DbUpgrade { } catch (SQLException e) { throw new CloudRuntimeException("Unable to update domain network ref", e); } finally { - closePstmts(pstmt2Close); + TransactionLegacy.closePstmts(pstmt2Close); } } @@ -759,7 +747,7 @@ public class Upgrade2214to30 extends Upgrade30xBase implements DbUpgrade { } catch (SQLException e) { throw new CloudRuntimeException("Unable to create service/provider map for networks", e); } finally { - closePstmts(pstmt2Close); + TransactionLegacy.closePstmts(pstmt2Close); } } @@ -873,7 +861,7 @@ public class Upgrade2214to30 extends Upgrade30xBase implements DbUpgrade { pstmt.close(); } catch (SQLException e) { } - closePstmts(pstmt2Close); + TransactionLegacy.closePstmts(pstmt2Close); } } @@ -904,7 +892,7 @@ public class Upgrade2214to30 extends Upgrade30xBase implements DbUpgrade { } catch (SQLException e) { throw new CloudRuntimeException("Unable to update op_host_capacity table. ", e); } finally { - closePstmts(pstmt2Close); + TransactionLegacy.closePstmts(pstmt2Close); } } @@ -1011,7 +999,7 @@ public class Upgrade2214to30 extends Upgrade30xBase implements DbUpgrade { pstmt.close(); } catch (SQLException e) { } - closePstmts(pstmt2Close); + TransactionLegacy.closePstmts(pstmt2Close); } } @@ -1084,7 +1072,7 @@ public class Upgrade2214to30 extends Upgrade30xBase implements DbUpgrade { zoneIds.add(rs.getLong(1)); } } catch (SQLException e) { - closePstmts(pstmt2Close); + TransactionLegacy.closePstmts(pstmt2Close); throw new CloudRuntimeException("Unable to switch networks to the new network offering", e); } @@ -1167,7 +1155,7 @@ public class Upgrade2214to30 extends Upgrade30xBase implements DbUpgrade { pstmt.close(); } catch (SQLException e) { } - closePstmts(pstmt2Close); + TransactionLegacy.closePstmts(pstmt2Close); } } diff --git a/framework/db/src/com/cloud/utils/db/TransactionLegacy.java b/framework/db/src/com/cloud/utils/db/TransactionLegacy.java index b36c02746a6..ac0ea21d1f1 100755 --- a/framework/db/src/com/cloud/utils/db/TransactionLegacy.java +++ b/framework/db/src/com/cloud/utils/db/TransactionLegacy.java @@ -769,7 +769,7 @@ public class TransactionLegacy { try { // we should only close db connection when it is not user managed - if (this._dbId != CONNECTED_DB) { + if (_dbId != CONNECTED_DB) { if (s_connLogger.isTraceEnabled()) { s_connLogger.trace("Closing DB connection: dbconn" + System.identityHashCode(_conn)); } @@ -1212,6 +1212,26 @@ public class TransactionLegacy { * @param conn */ protected void setConnection(Connection conn) { - this._conn = conn; + _conn = conn; } + + /** + * Receives a list of {@link PreparedStatement} and quietly closes all of them, which + * triggers also closing their dependent objects, like a {@link ResultSet} + * + * @param pstmt2Close + */ + public static void closePstmts(List pstmt2Close) { + for (PreparedStatement pstmt : pstmt2Close) { + try { + if (pstmt != null && !pstmt.isClosed()) { + pstmt.close(); + } + } catch (SQLException e) { + // It's not possible to recover from this and we need to continue closing + e.printStackTrace(); + } + } + } + } diff --git a/usage/pom.xml b/usage/pom.xml index a0055ccc12c..ecd765eabaa 100644 --- a/usage/pom.xml +++ b/usage/pom.xml @@ -44,6 +44,13 @@ commons-daemon commons-daemon + + + org.dbunit + dbunit + 2.4.9 + test + @@ -213,5 +220,24 @@ + + integration + + + + org.apache.maven.plugins + maven-failsafe-plugin + + + + integration-test + verify + + + + + + + diff --git a/usage/src/com/cloud/usage/UsageSanityChecker.java b/usage/src/com/cloud/usage/UsageSanityChecker.java index 34032bcbd74..5e6123bc777 100644 --- a/usage/src/com/cloud/usage/UsageSanityChecker.java +++ b/usage/src/com/cloud/usage/UsageSanityChecker.java @@ -25,191 +25,182 @@ import java.sql.Connection; import java.sql.PreparedStatement; import java.sql.ResultSet; import java.sql.SQLException; +import java.util.ArrayList; +import java.util.List; + +import org.apache.log4j.Logger; import com.cloud.utils.db.TransactionLegacy; +/** + * This class must not be used concurrently because its state changes often during + * execution in a non synchronized way + */ public class UsageSanityChecker { - private StringBuffer errors; - private String lastCheckId = ""; - private final String lastCheckFile = "/usr/local/libexec/sanity-check-last-id"; + protected static final Logger s_logger = Logger.getLogger(UsageSanityChecker.class); + protected static final int DEFAULT_AGGREGATION_RANGE = 1440; + protected StringBuilder errors; + protected List checkCases; + protected String lastCheckFile = "/usr/local/libexec/sanity-check-last-id"; + protected String lastCheckId = ""; + protected int lastId = -1; + protected int maxId = -1; + protected Connection conn; - private boolean checkMaxUsage(Connection conn) throws SQLException { + protected void reset() { + errors = new StringBuilder(); + checkCases = new ArrayList(); + } - PreparedStatement pstmt = conn.prepareStatement("SELECT value FROM `cloud`.`configuration` where name = 'usage.stats.job.aggregation.range'"); - ResultSet rs = pstmt.executeQuery(); + protected boolean checkItemCountByPstmt() throws SQLException { + boolean checkOk = true; - int aggregationRange = 1440; - if (rs.next()) { - aggregationRange = rs.getInt(1); - } else { - System.out.println("Failed to retrieve aggregation range. Using default : " + aggregationRange); + for(CheckCase check : checkCases) { + checkOk &= checkItemCountByPstmt(check); + } + + return checkOk; + } + + protected boolean checkItemCountByPstmt(CheckCase checkCase) throws SQLException { + List pstmt2Close = new ArrayList(); + boolean checkOk = true; + + /* + * Check for item usage records which are created after it is removed + */ + PreparedStatement pstmt; + try { + pstmt = conn.prepareStatement(checkCase.sqlTemplate); + if(checkCase.checkId) { + pstmt.setInt(1, lastId); + pstmt.setInt(2, maxId); + } + + pstmt2Close.add(pstmt); + ResultSet rs = pstmt.executeQuery(); + if (rs.next() && (rs.getInt(1) > 0)) { + errors.append(String.format("Error: Found %s %s\n", rs.getInt(1), checkCase.itemName)); + checkOk = false; + } + } catch (SQLException e) { + throw e; + } finally { + TransactionLegacy.closePstmts(pstmt2Close); + } + return checkOk; + } + + protected void checkMaxUsage() throws SQLException { + int aggregationRange = DEFAULT_AGGREGATION_RANGE; + List pstmt2Close = new ArrayList(); + try { + PreparedStatement pstmt = conn.prepareStatement( + "SELECT value FROM `cloud`.`configuration` where name = 'usage.stats.job.aggregation.range'"); + pstmt2Close.add(pstmt); + ResultSet rs = pstmt.executeQuery(); + + if (rs.next()) { + aggregationRange = rs.getInt(1); + } else { + s_logger.debug("Failed to retrieve aggregation range. Using default : " + aggregationRange); + } + } catch (SQLException e) { + throw e; + } finally { + TransactionLegacy.closePstmts(pstmt2Close); } int aggregationHours = aggregationRange / 60; - /* - * Check for usage records with raw_usage > aggregationHours - */ - pstmt = - conn.prepareStatement("SELECT count(*) FROM `cloud_usage`.`cloud_usage` cu where usage_type not in (4,5) and raw_usage > " + aggregationHours + lastCheckId); - rs = pstmt.executeQuery(); - if (rs.next() && (rs.getInt(1) > 0)) { - errors.append("Error: Found " + rs.getInt(1) + " usage records with raw_usage > " + aggregationHours); - errors.append("\n"); - return false; - } - return true; + addCheckCase("SELECT count(*) FROM `cloud_usage`.`cloud_usage` cu where usage_type not in (4,5) and raw_usage > " + + aggregationHours, + "usage records with raw_usage > " + aggregationHours, + lastCheckId); } - private boolean checkVmUsage(Connection conn) throws SQLException { - boolean success = true; - /* - * Check for Vm usage records which are created after the vm is destroyed - */ - PreparedStatement pstmt = - conn.prepareStatement("select count(*) from cloud_usage.cloud_usage cu inner join cloud.vm_instance vm where vm.type = 'User' " + - "and cu.usage_type in (1 , 2) and cu.usage_id = vm.id and cu.start_date > vm.removed" + lastCheckId); - ResultSet rs = pstmt.executeQuery(); - if (rs.next() && (rs.getInt(1) > 0)) { - errors.append("Error: Found " + rs.getInt(1) + " Vm usage records which are created after Vm is destroyed"); - errors.append("\n"); - success = false; - } + protected void checkVmUsage() { + addCheckCase("select count(*) from cloud_usage.cloud_usage cu inner join cloud.vm_instance vm " + + "where vm.type = 'User' and cu.usage_type in (1 , 2) " + + "and cu.usage_id = vm.id and cu.start_date > vm.removed ", + "Vm usage records which are created after Vm is destroyed", + lastCheckId); - /* - * Check for Vms which have multiple running vm records in helper table - */ - pstmt = - conn.prepareStatement("select sum(cnt) from (select count(*) as cnt from cloud_usage.usage_vm_instance where usage_type =1 " - + "and end_date is null group by vm_instance_id having count(vm_instance_id) > 1) c ;"); - rs = pstmt.executeQuery(); - if (rs.next() && (rs.getInt(1) > 0)) { - errors.append("Error: Found " + rs.getInt(1) + " duplicate running Vm entries in vm usage helper table"); - errors.append("\n"); - success = false; - } + addCheckCase("select sum(cnt) from (select count(*) as cnt from cloud_usage.usage_vm_instance " + + "where usage_type =1 and end_date is null group by vm_instance_id " + + "having count(vm_instance_id) > 1) c ;", + "duplicate running Vm entries in vm usage helper table"); - /* - * Check for Vms which have multiple allocated vm records in helper table - */ - pstmt = - conn.prepareStatement("select sum(cnt) from (select count(*) as cnt from cloud_usage.usage_vm_instance where usage_type =2 " - + "and end_date is null group by vm_instance_id having count(vm_instance_id) > 1) c ;"); - rs = pstmt.executeQuery(); - if (rs.next() && (rs.getInt(1) > 0)) { - errors.append("Error: Found " + rs.getInt(1) + " duplicate allocated Vm entries in vm usage helper table"); - errors.append("\n"); - success = false; - } + addCheckCase("select sum(cnt) from (select count(*) as cnt from cloud_usage.usage_vm_instance " + + "where usage_type =2 and end_date is null group by vm_instance_id " + + "having count(vm_instance_id) > 1) c ;", + "duplicate allocated Vm entries in vm usage helper table"); - /* - * Check for Vms which have running vm entry without allocated vm entry in helper table - */ - pstmt = - conn.prepareStatement("select count(vm_instance_id) from cloud_usage.usage_vm_instance o where o.end_date is null and o.usage_type=1 and not exists " - + "(select 1 from cloud_usage.usage_vm_instance i where i.vm_instance_id=o.vm_instance_id and usage_type=2 and i.end_date is null)"); - rs = pstmt.executeQuery(); - if (rs.next() && (rs.getInt(1) > 0)) { - errors.append("Error: Found " + rs.getInt(1) + " running Vm entries without corresponding allocated entries in vm usage helper table"); - errors.append("\n"); - success = false; - } - return success; + addCheckCase("select count(vm_instance_id) from cloud_usage.usage_vm_instance o " + + "where o.end_date is null and o.usage_type=1 and not exists " + + "(select 1 from cloud_usage.usage_vm_instance i where " + + "i.vm_instance_id=o.vm_instance_id and usage_type=2 and i.end_date is null)", + "running Vm entries without corresponding allocated entries in vm usage helper table"); } - private boolean checkVolumeUsage(Connection conn) throws SQLException { - boolean success = true; - /* - * Check for Volume usage records which are created after the volume is removed - */ - PreparedStatement pstmt = - conn.prepareStatement("select count(*) from cloud_usage.cloud_usage cu inner join cloud.volumes v " + - "where cu.usage_type = 6 and cu.usage_id = v.id and cu.start_date > v.removed" + lastCheckId); - ResultSet rs = pstmt.executeQuery(); - if (rs.next() && (rs.getInt(1) > 0)) { - errors.append("Error: Found " + rs.getInt(1) + " volume usage records which are created after volume is removed"); - errors.append("\n"); - success = false; - } + protected void checkVolumeUsage() { + addCheckCase("select count(*) from cloud_usage.cloud_usage cu inner join cloud.volumes v where " + + "cu.usage_type = 6 and cu.usage_id = v.id and cu.start_date > v.removed ", + "volume usage records which are created after volume is removed", + lastCheckId); - /* - * Check for duplicate records in volume usage helper table - */ - pstmt = - conn.prepareStatement("select sum(cnt) from (select count(*) as cnt from cloud_usage.usage_volume " - + "where deleted is null group by id having count(id) > 1) c;"); - rs = pstmt.executeQuery(); - if (rs.next() && (rs.getInt(1) > 0)) { - errors.append("Error: Found " + rs.getInt(1) + " duplicate records is volume usage helper table"); - errors.append("\n"); - success = false; - } - return success; + addCheckCase("select sum(cnt) from (select count(*) as cnt from cloud_usage.usage_volume " + + "where deleted is null group by id having count(id) > 1) c;", + "duplicate records in volume usage helper table"); } - private boolean checkTemplateISOUsage(Connection conn) throws SQLException { - /* - * Check for Template/ISO usage records which are created after it is removed - */ - PreparedStatement pstmt = - conn.prepareStatement("select count(*) from cloud_usage.cloud_usage cu inner join cloud.template_zone_ref tzr " + - "where cu.usage_id = tzr.template_id and cu.zone_id = tzr.zone_id and cu.usage_type in (7,8) and cu.start_date > tzr.removed" + lastCheckId); - ResultSet rs = pstmt.executeQuery(); - if (rs.next() && (rs.getInt(1) > 0)) { - errors.append("Error: Found " + rs.getInt(1) + " template/ISO usage records which are created after it is removed"); - errors.append("\n"); - return false; - } - return true; + protected void checkTemplateISOUsage() { + addCheckCase("select count(*) from cloud_usage.cloud_usage cu inner join cloud.template_zone_ref tzr where " + + "cu.usage_id = tzr.template_id and cu.zone_id = tzr.zone_id and cu.usage_type in (7,8) and cu.start_date > tzr.removed ", + "template/ISO usage records which are created after it is removed", + lastCheckId); } - private boolean checkSnapshotUsage(Connection conn) throws SQLException { - /* - * Check for snapshot usage records which are created after snapshot is removed - */ - PreparedStatement pstmt = - conn.prepareStatement("select count(*) from cloud_usage.cloud_usage cu inner join cloud.snapshots s " + - "where cu.usage_id = s.id and cu.usage_type = 9 and cu.start_date > s.removed" + lastCheckId); - ResultSet rs = pstmt.executeQuery(); - if (rs.next() && (rs.getInt(1) > 0)) { - errors.append("Error: Found " + rs.getInt(1) + " snapshot usage records which are created after snapshot is removed"); - errors.append("\n"); - return false; - } - return true; + protected void checkSnapshotUsage() { + addCheckCase("select count(*) from cloud_usage.cloud_usage cu inner join cloud.snapshots s where " + + "cu.usage_id = s.id and cu.usage_type = 9 and cu.start_date > s.removed ", + "snapshot usage records which are created after it is removed", + lastCheckId); } - public String runSanityCheck() throws SQLException { + protected void readLastCheckId(){ + BufferedReader reader = null; try { - BufferedReader reader = new BufferedReader(new FileReader(lastCheckFile)); - String last_id = null; - if ((reader != null) && (last_id = reader.readLine()) != null) { - int lastId = Integer.parseInt(last_id); - if (lastId > 0) { - lastCheckId = " and cu.id > " + last_id; - } + reader = new BufferedReader(new FileReader(lastCheckFile)); + String lastIdText = null; + lastId = -1; + if ((reader != null) && (lastIdText = reader.readLine()) != null) { + lastId = Integer.parseInt(lastIdText); + } + } catch (IOException e) { + s_logger.error(e); + } finally { + try { + reader.close(); + } catch (IOException e) { + s_logger.error(e); } - reader.close(); - } catch (Exception e) { - // Error while reading last check id } + } - Connection conn = TransactionLegacy.getStandaloneConnection(); - int maxId = 0; + protected void readMaxId() throws SQLException { PreparedStatement pstmt = conn.prepareStatement("select max(id) from cloud_usage.cloud_usage"); ResultSet rs = pstmt.executeQuery(); + maxId = -1; if (rs.next() && (rs.getInt(1) > 0)) { maxId = rs.getInt(1); - lastCheckId += " and cu.id <= " + maxId; + lastCheckId += " and cu.id <= ?"; } - errors = new StringBuffer(); - checkMaxUsage(conn); - checkVmUsage(conn); - checkVolumeUsage(conn); - checkTemplateISOUsage(conn); - checkSnapshotUsage(conn); - FileWriter fstream; + } + + protected void updateNewMaxId() { + FileWriter fstream = null; try { fstream = new FileWriter(lastCheckFile); BufferedWriter out = new BufferedWriter(fstream); @@ -217,20 +208,89 @@ public class UsageSanityChecker { out.close(); } catch (IOException e) { // Error while writing last check id + } finally { + if (fstream != null) { + try { + fstream.close(); + } catch (IOException e) { + s_logger.error(e); + } + } } + } + + public String runSanityCheck() throws SQLException { + + readLastCheckId(); + if (lastId > 0) { + lastCheckId = " and cu.id > ?"; + } + + conn = getConnection(); + readMaxId(); + + reset(); + + checkMaxUsage(); + checkVmUsage(); + checkVolumeUsage(); + checkTemplateISOUsage(); + checkSnapshotUsage(); + + checkItemCountByPstmt(); + return errors.toString(); } + /** + * Local acquisition of {@link Connection} to remove static cling + * @return + */ + protected Connection getConnection() { + return TransactionLegacy.getStandaloneConnection(); + } + public static void main(String args[]) { UsageSanityChecker usc = new UsageSanityChecker(); String sanityErrors; try { sanityErrors = usc.runSanityCheck(); if (sanityErrors.length() > 0) { - System.out.println(sanityErrors.toString()); + s_logger.error(sanityErrors.toString()); } } catch (SQLException e) { e.printStackTrace(); } } + + protected void addCheckCase(String sqlTemplate, String itemName, String lastCheckId) { + checkCases.add(new CheckCase(sqlTemplate, itemName, lastCheckId)); + } + + protected void addCheckCase(String sqlTemplate, String itemName) { + checkCases.add(new CheckCase(sqlTemplate, itemName)); + } } + + +/** + * Just an abstraction of the kind of check to repeat across these cases + * encapsulating what change for each specific case + */ +class CheckCase { + public String sqlTemplate; + public String itemName; + public boolean checkId = false; + + public CheckCase(String sqlTemplate, String itemName, String lastCheckId) { + checkId = true; + this.sqlTemplate = sqlTemplate + lastCheckId; + this.itemName = itemName; + } + + public CheckCase(String sqlTemplate, String itemName) { + checkId = false; + this.sqlTemplate = sqlTemplate; + this.itemName = itemName; + } +} \ No newline at end of file diff --git a/usage/test/com/cloud/usage/UsageSanityCheckerIT.java b/usage/test/com/cloud/usage/UsageSanityCheckerIT.java new file mode 100644 index 00000000000..412bebc6106 --- /dev/null +++ b/usage/test/com/cloud/usage/UsageSanityCheckerIT.java @@ -0,0 +1,144 @@ +package com.cloud.usage; + +import org.dbunit.DatabaseUnitException; +import org.dbunit.dataset.DataSetException; +import org.dbunit.dataset.IDataSet; +import org.dbunit.dataset.xml.FlatXmlDataSetBuilder; +import org.dbunit.ext.mysql.MySqlConnection; +import org.dbunit.operation.DatabaseOperation; + +import static org.junit.Assert.assertEquals; + +import org.junit.After; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; +import org.junit.runners.Parameterized.Parameters; +import org.mockito.Mockito; + +import com.cloud.utils.PropertiesUtil; + +import java.io.FileNotFoundException; +import java.sql.Connection; +import java.sql.DriverManager; +import java.sql.SQLException; +import java.util.Arrays; +import java.util.Collection; +import java.util.Properties; + +@RunWith(Parameterized.class) +public class UsageSanityCheckerIT{ + + protected Connection cloudConn; + + protected Connection usageConn; + + protected MySqlConnection dbuUsageConn; + + protected MySqlConnection dbuCloudConn; + + protected Properties properties = new Properties(); + + protected IDataSet cloudDataSet; + + protected IDataSet usageDataSet; + + protected String cloudDbuFileName; + + protected String usageDbuFileName; + + protected String expectedErrors; + + protected static final String EXPECTED_ERRORS_1 = "Error: Found 2 usage records with raw_usage > 10\n" + + "Error: Found 1 Vm usage records which are created after Vm is destroyed\n" + + "Error: Found 2 duplicate allocated Vm entries in vm usage helper table\n" + + "Error: Found 1 running Vm entries without corresponding allocated entries in vm usage helper table\n" + + "Error: Found 1 volume usage records which are created after volume is removed\n" + + "Error: Found 1 template/ISO usage records which are created after it is removed\n" + + "Error: Found 1 snapshot usage records which are created after it is removed\n"; + + protected static final String EXPECTED_ERRORS_2 = "Error: Found 3 usage records with raw_usage > 10\n" + + "Error: Found 1 Vm usage records which are created after Vm is destroyed\n" + + "Error: Found 8 duplicate running Vm entries in vm usage helper table\n" + + "Error: Found 4 duplicate allocated Vm entries in vm usage helper table\n" + + "Error: Found 4 running Vm entries without corresponding allocated entries in vm usage helper table\n" + + "Error: Found 2 volume usage records which are created after volume is removed\n" + + "Error: Found 6 duplicate records in volume usage helper table\n" + + "Error: Found 2 template/ISO usage records which are created after it is removed\n" + + "Error: Found 1 snapshot usage records which are created after it is removed\n"; + + protected static final String EXPECTED_ERRORS_3 = ""; + + + public UsageSanityCheckerIT(String cloudDbuFileName, String usageDbuFileName, + String expectedErrors) { + this.cloudDbuFileName = cloudDbuFileName; + this.usageDbuFileName = usageDbuFileName; + this.expectedErrors = expectedErrors; + } + + @Parameters + public static Collection data() { + Object [][] data = new Object[][] { + {"cloud1.xml", "cloud_usage1.xml", EXPECTED_ERRORS_1}, + {"cloud2.xml", "cloud_usage2.xml", EXPECTED_ERRORS_2}, + {"cloud3.xml", "cloud_usage3.xml", EXPECTED_ERRORS_3} + }; + return Arrays.asList(data); + } + + protected Connection createConnection(String dbSchema) throws SQLException { + String cloudDbUrl = "jdbc:mysql://"+properties.getProperty("db."+dbSchema+".host") + + ":" + properties.getProperty("db."+dbSchema+".port") + "/" + + properties.getProperty("db."+dbSchema+".name"); + return DriverManager.getConnection(cloudDbUrl, properties.getProperty("db."+dbSchema+".username"), + properties.getProperty("db."+dbSchema+".password")); + } + + @Before + public void setUp() throws Exception { + PropertiesUtil.loadFromFile(properties, PropertiesUtil.findConfigFile("db.properties")); + + Class.forName("com.mysql.jdbc.Driver"); + cloudConn = createConnection("cloud"); + usageConn = createConnection("usage"); + + dbuCloudConn = new MySqlConnection(cloudConn, properties.getProperty("db.cloud.name")); + dbuUsageConn = new MySqlConnection(usageConn, properties.getProperty("db.usage.name")); + cloudDataSet = getCloudDataSet(); + usageDataSet = getUsageDataSet(); + DatabaseOperation.CLEAN_INSERT.execute(dbuCloudConn, cloudDataSet); + DatabaseOperation.CLEAN_INSERT.execute(dbuUsageConn, usageDataSet); + } + + @After + public void tearDown() throws DataSetException, FileNotFoundException, DatabaseUnitException, SQLException { + DatabaseOperation.DELETE_ALL.execute(dbuCloudConn, getCloudDataSet()); + DatabaseOperation.DELETE_ALL.execute(dbuUsageConn, getUsageDataSet()); + } + + @Test + public void testRunSanityCheck() throws SQLException, ClassNotFoundException, FileNotFoundException, DatabaseUnitException { + // Prepare + UsageSanityChecker checker = Mockito.spy(new UsageSanityChecker()); + Mockito.doReturn(cloudConn).when(checker).getConnection(); + Mockito.doNothing().when(checker).readLastCheckId(); + Mockito.doNothing().when(checker).updateNewMaxId(); + checker.lastId = 2; + + // Execute + String actualErrors = checker.runSanityCheck(); + + // Assert + assertEquals("Expected errors not found", expectedErrors, actualErrors); + } + + protected IDataSet getCloudDataSet() throws DataSetException, FileNotFoundException { + return new FlatXmlDataSetBuilder().build(PropertiesUtil.openStreamFromURL(cloudDbuFileName)); + } + + protected IDataSet getUsageDataSet() throws DataSetException, FileNotFoundException { + return new FlatXmlDataSetBuilder().build(PropertiesUtil.openStreamFromURL(usageDbuFileName)); + } +} diff --git a/usage/test/com/cloud/usage/UsageSanityCheckerTest.java b/usage/test/com/cloud/usage/UsageSanityCheckerTest.java new file mode 100644 index 00000000000..346a8ad3cdb --- /dev/null +++ b/usage/test/com/cloud/usage/UsageSanityCheckerTest.java @@ -0,0 +1,52 @@ +package com.cloud.usage; + +import org.junit.Test; +import org.mockito.Mockito; + +import java.sql.Connection; +import java.sql.PreparedStatement; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.util.regex.Matcher; +import java.util.regex.Pattern; + +import junit.framework.TestCase; + +public class UsageSanityCheckerTest extends TestCase { + + @Test + public void testCheckItemCountByPstmt() throws SQLException { + // Prepare + // Mock dependencies to exclude from the test + String sqlTemplate1 = "SELECT * FROM mytable1"; + String sqlTemplate2 = "SELECT * FROM mytable2"; + + Connection conn = Mockito.mock(Connection.class); + PreparedStatement pstmt = Mockito.mock(PreparedStatement.class); + ResultSet rs = Mockito.mock(ResultSet.class); + + Mockito.when(conn.prepareStatement(sqlTemplate1)).thenReturn(pstmt); + Mockito.when(conn.prepareStatement(sqlTemplate2)).thenReturn(pstmt); + Mockito.when(pstmt.executeQuery()).thenReturn(rs, rs); + + // First if: true -> 8 + // Second loop: true -> 16 + Mockito.when(rs.next()).thenReturn(true, true); + Mockito.when(rs.getInt(1)).thenReturn(8, 8, 16, 16); + + // Prepare class under test + UsageSanityChecker checker = new UsageSanityChecker(); + checker.conn = conn; + checker.reset(); + checker.addCheckCase(sqlTemplate1, "item1"); + checker.addCheckCase(sqlTemplate2, "item2"); + + // Execute + checker.checkItemCountByPstmt(); + + // Verify + Pattern pattern = Pattern.compile(".*8.*item1.*\n.*16.*item2.*"); + Matcher matcher = pattern.matcher(checker.errors); + assertTrue("Didn't create complete errors. It should create 2 errors: 8 item1 and 16 item2", matcher.find()); + } +} diff --git a/usage/test/resources/cloud1.xml b/usage/test/resources/cloud1.xml new file mode 100644 index 00000000000..e56ed073e01 --- /dev/null +++ b/usage/test/resources/cloud1.xml @@ -0,0 +1,15 @@ + + + + + + + + + + + + + + + diff --git a/usage/test/resources/cloud2.xml b/usage/test/resources/cloud2.xml new file mode 100644 index 00000000000..e56ed073e01 --- /dev/null +++ b/usage/test/resources/cloud2.xml @@ -0,0 +1,15 @@ + + + + + + + + + + + + + + + diff --git a/usage/test/resources/cloud3.xml b/usage/test/resources/cloud3.xml new file mode 100644 index 00000000000..6e7a7453d06 --- /dev/null +++ b/usage/test/resources/cloud3.xml @@ -0,0 +1,4 @@ + + + + diff --git a/usage/test/resources/cloud_usage1.xml b/usage/test/resources/cloud_usage1.xml new file mode 100644 index 00000000000..aaad17d3322 --- /dev/null +++ b/usage/test/resources/cloud_usage1.xml @@ -0,0 +1,17 @@ + + + + + + + + + + + + + + + + + diff --git a/usage/test/resources/cloud_usage2.xml b/usage/test/resources/cloud_usage2.xml new file mode 100644 index 00000000000..d87d05a8734 --- /dev/null +++ b/usage/test/resources/cloud_usage2.xml @@ -0,0 +1,34 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/usage/test/resources/cloud_usage3.xml b/usage/test/resources/cloud_usage3.xml new file mode 100644 index 00000000000..332ddfc2496 --- /dev/null +++ b/usage/test/resources/cloud_usage3.xml @@ -0,0 +1,4 @@ + + + +