mirror of
https://github.com/apache/cloudstack.git
synced 2025-11-03 04:12:31 +01:00
network tagging changes
This commit is contained in:
parent
67ff27496d
commit
0ab12edd6c
@ -301,6 +301,11 @@ public class Request {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return log("", true, Level.DEBUG);
|
||||
}
|
||||
|
||||
protected String log(String msg, boolean logContent, Level level) {
|
||||
StringBuilder content = new StringBuilder();
|
||||
|
||||
@ -120,8 +120,8 @@ import com.cloud.agent.api.StartupStorageCommand;
|
||||
import com.cloud.agent.api.StopAnswer;
|
||||
import com.cloud.agent.api.StopCommand;
|
||||
import com.cloud.agent.api.StoragePoolInfo;
|
||||
import com.cloud.agent.api.UpgradeSnapshotCommand;
|
||||
import com.cloud.agent.api.UpdateHostPasswordCommand;
|
||||
import com.cloud.agent.api.UpgradeSnapshotCommand;
|
||||
import com.cloud.agent.api.VmStatsEntry;
|
||||
import com.cloud.agent.api.check.CheckSshAnswer;
|
||||
import com.cloud.agent.api.check.CheckSshCommand;
|
||||
@ -302,7 +302,7 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe
|
||||
|
||||
if (VmPowerState.HALTED.equals(vmRec.powerState) && vmRec.affinity.equals(host)) {
|
||||
try {
|
||||
vm.destroy(conn);
|
||||
vm.destroy(conn);
|
||||
} catch (Exception e) {
|
||||
s_logger.warn("Catch Exception " + e.getClass().getName() + ": unable to destroy VM " + vmRec.nameLabel + " due to " + e.toString());
|
||||
success = false;
|
||||
@ -318,7 +318,7 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe
|
||||
|
||||
@Override
|
||||
public void disconnected() {
|
||||
}
|
||||
}
|
||||
|
||||
protected Pair<VM, VM.Record> getVmByNameLabel(Connection conn, Host host, String nameLabel, boolean getRecord) throws XmlRpcException, XenAPIException {
|
||||
Set<VM> vms = host.getResidentVMs(conn);
|
||||
@ -499,11 +499,18 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe
|
||||
}
|
||||
}
|
||||
|
||||
Pair<Network, String> getNativeNetworkForTraffic(Connection conn, TrafficType type) throws XenAPIException, XmlRpcException {
|
||||
Pair<Network, String> getNativeNetworkForTraffic(Connection conn, TrafficType type, String tag) throws XenAPIException, XmlRpcException {
|
||||
if (tag != null) {
|
||||
if (s_logger.isDebugEnabled()) {
|
||||
s_logger.debug("Looking for network named " + tag);
|
||||
}
|
||||
Network network = getNetworkByName(conn, tag);
|
||||
}
|
||||
|
||||
if (type == TrafficType.Guest) {
|
||||
return new Pair<Network, String>(Network.getByUuid(conn, _host.guestNetwork), _host.guestPif);
|
||||
} else if (type == TrafficType.Control) {
|
||||
setupLinkLocalNetwork(conn);
|
||||
setupLinkLocalNetwork(conn);
|
||||
return new Pair<Network, String>(Network.getByUuid(conn, _host.linkLocalNetwork), null);
|
||||
} else if (type == TrafficType.Management) {
|
||||
return new Pair<Network, String>(Network.getByUuid(conn, _host.privateNetwork), _host.privatePif);
|
||||
@ -524,8 +531,8 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe
|
||||
* then you will get an expection that is "REQUIRED_NETWROK" when you start a
|
||||
* vm with this network. The soultion is, create a vif of dom0 and plug it in
|
||||
* network, xenserver will create the bridge on behalf of you
|
||||
* @throws XmlRpcException
|
||||
* @throws XenAPIException
|
||||
* @throws XmlRpcException
|
||||
* @throws XenAPIException
|
||||
*/
|
||||
private void enableXenServerNetwork(Connection conn, Network nw,
|
||||
String vifNameLabel, String networkDesc) throws XenAPIException, XmlRpcException {
|
||||
@ -575,7 +582,7 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe
|
||||
enableXenServerNetwork(conn, vswitchNw, "vswitch",
|
||||
"vswicth network");
|
||||
_host.vswitchNetwork = vswitchNw;
|
||||
}
|
||||
}
|
||||
return _host.vswitchNetwork;
|
||||
} catch (Exception e) {
|
||||
e.printStackTrace();
|
||||
@ -609,7 +616,8 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe
|
||||
|
||||
|
||||
protected Network getNetwork(Connection conn, NicTO nic) throws XenAPIException, XmlRpcException {
|
||||
Pair<Network, String> network = getNativeNetworkForTraffic(conn, nic.getType());
|
||||
String[] tags = nic.getTags();
|
||||
Pair<Network, String> network = getNativeNetworkForTraffic(conn, nic.getType(), tags != null && tags.length > 0 ? tags[0] : null);
|
||||
if (nic.getBroadcastUri() != null && nic.getBroadcastUri().toString().contains("untagged")) {
|
||||
return network.first();
|
||||
} else if (nic.getBroadcastType() == BroadcastDomainType.Vlan) {
|
||||
@ -753,12 +761,12 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe
|
||||
} else if (volume.getType() == Volume.Type.ROOT) {
|
||||
vbdr.mode = Types.VbdMode.RW;
|
||||
vbdr.type = Types.VbdType.DISK;
|
||||
vbdr.unpluggable = false;
|
||||
vbdr.unpluggable = false;
|
||||
} else {
|
||||
vbdr.mode = Types.VbdMode.RW;
|
||||
vbdr.type = Types.VbdType.DISK;
|
||||
vbdr.unpluggable = true;
|
||||
}
|
||||
}
|
||||
VBD vbd = VBD.create(conn, vbdr);
|
||||
|
||||
if (s_logger.isDebugEnabled()) {
|
||||
@ -774,7 +782,7 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe
|
||||
assert templates.size() == 1 : "Should only have 1 template but found " + templates.size();
|
||||
VM template = templates.iterator().next();
|
||||
|
||||
VM vm = template.createClone(conn, vmSpec.getName());
|
||||
VM vm = template.createClone(conn, vmSpec.getName());
|
||||
VM.Record vmr = vm.getRecord(conn);
|
||||
if (s_logger.isDebugEnabled()) {
|
||||
s_logger.debug("Created VM " + vmr.uuid + " for " + vmSpec.getName());
|
||||
@ -931,7 +939,7 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe
|
||||
}
|
||||
if( _host.systemvmisouuid == null ) {
|
||||
throw new CloudRuntimeException("can not find systemvmiso");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
VBD.Record cdromVBDR = new VBD.Record();
|
||||
@ -961,7 +969,7 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe
|
||||
String result = connect(conn, cmd.getName(), privateIp, cmdPort);
|
||||
if (result != null) {
|
||||
return new CheckSshAnswer(cmd, "Can not ping System vm " + vmName + "due to:" + result);
|
||||
}
|
||||
}
|
||||
} catch (Exception e) {
|
||||
return new CheckSshAnswer(cmd, e);
|
||||
}
|
||||
@ -995,7 +1003,7 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe
|
||||
|
||||
private void cleanUpTmpDomVif(Connection conn) {
|
||||
List<VIF> vifs;
|
||||
synchronized(_tmpDom0Vif) {
|
||||
synchronized(_tmpDom0Vif) {
|
||||
vifs = _tmpDom0Vif;
|
||||
_tmpDom0Vif = new ArrayList<VIF>();
|
||||
}
|
||||
@ -1018,8 +1026,8 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe
|
||||
public StartAnswer execute(StartCommand cmd) {
|
||||
Connection conn = getConnection();
|
||||
VirtualMachineTO vmSpec = cmd.getVirtualMachine();
|
||||
String vmName = vmSpec.getName();
|
||||
State state = State.Stopped;
|
||||
String vmName = vmSpec.getName();
|
||||
State state = State.Stopped;
|
||||
VM vm = null;
|
||||
try {
|
||||
|
||||
@ -1040,7 +1048,7 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe
|
||||
synchronized (_vms) {
|
||||
_vms.put(vmName, State.Starting);
|
||||
}
|
||||
Host host = Host.getByUuid(conn, _host.uuid);
|
||||
Host host = Host.getByUuid(conn, _host.uuid);
|
||||
vm = createVmFromTemplate(conn, vmSpec, host);
|
||||
|
||||
for (VolumeTO disk : vmSpec.getDisks()) {
|
||||
@ -1087,7 +1095,7 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe
|
||||
} else {
|
||||
//For user vm, program the rules for each nic if the isolation uri scheme is ec2
|
||||
NicTO[] nics = vmSpec.getNics();
|
||||
for (NicTO nic : nics) {
|
||||
for (NicTO nic : nics) {
|
||||
if (nic.getIsolationUri() != null && nic.getIsolationUri().getScheme().equalsIgnoreCase(IsolationType.Ec2.toString())) {
|
||||
result = callHostPlugin(conn, "vmops", "default_network_rules", "vmName", vmName, "vmIP", nic.getIp(), "vmMAC", nic.getMac(), "vmID", Long.toString(vmSpec.getId()));
|
||||
|
||||
@ -1095,10 +1103,10 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe
|
||||
s_logger.warn("Failed to program default network rules for " + vmName+" on nic with ip:"+nic.getIp()+" mac:"+nic.getMac());
|
||||
} else {
|
||||
s_logger.info("Programmed default network rules for " + vmName+" on nic with ip:"+nic.getIp()+" mac:"+nic.getMac());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
state = State.Running;
|
||||
@ -1243,7 +1251,7 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe
|
||||
int i = 0;
|
||||
for (StaticNatRuleTO rule : cmd.getRules()) {
|
||||
//1:1 NAT needs instanceip;publicip;domrip;op
|
||||
args += rule.revoked() ? " -D " : " -A ";
|
||||
args += rule.revoked() ? " -D " : " -A ";
|
||||
args += " -l " + rule.getSrcIp();
|
||||
args += " -r " + rule.getDstIp();
|
||||
args += " -P " + rule.getProtocol().toLowerCase();
|
||||
@ -1543,7 +1551,7 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe
|
||||
String routerName = cmd.getAccessDetail(NetworkElementCommand.ROUTER_NAME);
|
||||
String routerIp = cmd.getAccessDetail(NetworkElementCommand.ROUTER_IP);
|
||||
try {
|
||||
IpAddressTO[] ips = cmd.getIpAddresses();
|
||||
IpAddressTO[] ips = cmd.getIpAddresses();
|
||||
for (IpAddressTO ip : ips) {
|
||||
|
||||
assignPublicIpAddress(conn, routerName, routerIp, ip.getPublicIp(), ip.isAdd(), ip.isFirstIP(), ip.isSourceNat(), ip.getVlanId(),
|
||||
@ -1676,16 +1684,16 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe
|
||||
hostStats.setCpuUtilization(hostStats.getCpuUtilization() + getDataAverage(dataNode, col, numRows));
|
||||
}
|
||||
|
||||
/*
|
||||
/*
|
||||
if (param.contains("loadavg")) {
|
||||
hostStats.setAverageLoad((hostStats.getAverageLoad() + getDataAverage(dataNode, col, numRows)));
|
||||
}
|
||||
*/
|
||||
*/
|
||||
}
|
||||
}
|
||||
|
||||
// add the host cpu utilization
|
||||
/*
|
||||
/*
|
||||
if (hostStats.getNumCpus() != 0) {
|
||||
hostStats.setCpuUtilization(hostStats.getCpuUtilization() / hostStats.getNumCpus());
|
||||
s_logger.debug("Host cpu utilization " + hostStats.getCpuUtilization());
|
||||
@ -1701,7 +1709,7 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe
|
||||
HashMap<String, VmStatsEntry> vmStatsNameMap = new HashMap<String, VmStatsEntry>();
|
||||
if( vmNames.size() == 0 ) {
|
||||
return new GetVmStatsAnswer(cmd, vmStatsNameMap);
|
||||
}
|
||||
}
|
||||
try {
|
||||
|
||||
// Determine the UUIDs of the requested VMs
|
||||
@ -1799,8 +1807,9 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe
|
||||
}
|
||||
|
||||
vmStatsAnswer.setCPUUtilization(vmStatsAnswer.getCPUUtilization()*100);
|
||||
if(s_logger.isDebugEnabled())
|
||||
s_logger.debug("Vm cpu utilization " + vmStatsAnswer.getCPUUtilization());
|
||||
if(s_logger.isDebugEnabled()) {
|
||||
s_logger.debug("Vm cpu utilization " + vmStatsAnswer.getCPUUtilization());
|
||||
}
|
||||
}
|
||||
|
||||
return vmResponseMap;
|
||||
@ -1912,7 +1921,7 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe
|
||||
s_logger.warn("Found an invalid value (infinity/NaN) in getDataAverage(), numRows>0");
|
||||
return dummy;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@ -2173,9 +2182,9 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe
|
||||
snapshotvdi.setNameLabel(conn, "Template " + cmd.getName());
|
||||
tmpl.destroy(conn);
|
||||
poolsr.scan(conn);
|
||||
try{
|
||||
try{
|
||||
Thread.sleep(5000);
|
||||
} catch (Exception e) {
|
||||
} catch (Exception e) {
|
||||
}
|
||||
String parentuuid = getVhdParent(conn, pUuid, snapshotUuid, isISCSI);
|
||||
VDI parent = getVDIbyUuid(conn, parentuuid);
|
||||
@ -2355,7 +2364,7 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe
|
||||
break;
|
||||
}
|
||||
}
|
||||
vm.poolMigrate(conn, dsthost, new HashMap<String, String>());
|
||||
vm.poolMigrate(conn, dsthost, new HashMap<String, String>());
|
||||
vm.setAffinity(conn, dsthost);
|
||||
state = State.Stopping;
|
||||
}
|
||||
@ -2704,7 +2713,7 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe
|
||||
if( System.currentTimeMillis() - beginTime > timeout){
|
||||
String msg = "Async " + timeout/1000 + " seconds timeout for task " + task.toString();
|
||||
s_logger.warn(msg);
|
||||
task.cancel(c);
|
||||
task.cancel(c);
|
||||
throw new Types.BadAsyncResult(msg);
|
||||
}
|
||||
}
|
||||
@ -2750,7 +2759,7 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe
|
||||
try {
|
||||
task.destroy(conn);
|
||||
} catch (Exception e1) {
|
||||
s_logger.debug("unable to destroy task(" + task.toString() + ") on host(" + _host.uuid +") due to " + e1.toString());
|
||||
s_logger.debug("unable to destroy task(" + task.toString() + ") on host(" + _host.uuid +") due to " + e1.toString());
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -2816,7 +2825,7 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe
|
||||
try {
|
||||
task.destroy(conn);
|
||||
} catch (Exception e1) {
|
||||
s_logger.debug("unable to destroy task(" + task.toString() + ") on host(" + _host.uuid +") due to " + e1.toString());
|
||||
s_logger.debug("unable to destroy task(" + task.toString() + ") on host(" + _host.uuid +") due to " + e1.toString());
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -2846,7 +2855,7 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe
|
||||
try {
|
||||
task.destroy(conn);
|
||||
} catch (Exception e1) {
|
||||
s_logger.debug("unable to destroy task(" + task.toString() + ") on host(" + _host.uuid +") due to " + e1.toString());
|
||||
s_logger.debug("unable to destroy task(" + task.toString() + ") on host(" + _host.uuid +") due to " + e1.toString());
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -2880,8 +2889,9 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe
|
||||
"op", "download", "hostname", swift.getHostName(), "account", swift.getAccount(),
|
||||
"username", swift.getUserName(), "token", swift.getToken(), "rfilename", rfilename,
|
||||
"lfilename", lfilename);
|
||||
if( result != null && result.equals("true"))
|
||||
if( result != null && result.equals("true")) {
|
||||
return true;
|
||||
}
|
||||
} catch (Exception e) {
|
||||
s_logger.warn("swift download failed due to " + e.toString());
|
||||
}
|
||||
@ -2895,8 +2905,9 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe
|
||||
"op", "upload", "hostname", swift.getHostName(), "account", swift.getAccount(),
|
||||
"username", swift.getUserName(), "token", swift.getToken(), "rfilename", rfilename,
|
||||
"lfilename", lfilename);
|
||||
if( result != null && result.equals("true"))
|
||||
if( result != null && result.equals("true")) {
|
||||
return true;
|
||||
}
|
||||
} catch (Exception e) {
|
||||
s_logger.warn("swift download failed due to " + e.toString());
|
||||
}
|
||||
@ -2909,8 +2920,9 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe
|
||||
result = callHostPlugin(conn, "swift", "swift",
|
||||
"op", "delete", "hostname", swift.getHostName(), "account", swift.getAccount(),
|
||||
"username", swift.getUserName(), "token", swift.getToken(), "rfilename", rfilename);
|
||||
if( result != null && result.equals("true"))
|
||||
if( result != null && result.equals("true")) {
|
||||
return true;
|
||||
}
|
||||
} catch (Exception e) {
|
||||
s_logger.warn("swift download failed due to " + e.toString());
|
||||
}
|
||||
@ -3109,7 +3121,7 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe
|
||||
}
|
||||
}
|
||||
return new StopAnswer(cmd, "Stop VM " + vmName + " Succeed", 0, bytesSent, bytesRcvd);
|
||||
}
|
||||
}
|
||||
} catch (XenAPIException e) {
|
||||
String msg = "VM destroy failed in Stop " + vmName + " Command due to " + e.toString();
|
||||
s_logger.warn(msg, e);
|
||||
@ -3145,11 +3157,11 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe
|
||||
}
|
||||
} catch (XenAPIException e) {
|
||||
String msg = "getVdis can not get VPD due to " + e.toString();
|
||||
s_logger.warn(msg, e);
|
||||
s_logger.warn(msg, e);
|
||||
} catch (XmlRpcException e) {
|
||||
String msg = "getVdis can not get VPD due to " + e.getMessage();
|
||||
s_logger.warn(msg, e);
|
||||
}
|
||||
}
|
||||
return vdis;
|
||||
}
|
||||
|
||||
@ -3267,7 +3279,7 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe
|
||||
return true;
|
||||
}
|
||||
|
||||
protected Nic getManageMentNetwork(Connection conn) throws XmlRpcException, XenAPIException {
|
||||
protected Nic getManageMentNetwork(Connection conn) throws XmlRpcException, XenAPIException {
|
||||
PIF mgmtPif = null;
|
||||
PIF.Record mgmtPifRec = null;
|
||||
Host host = Host.getByUuid(conn, _host.uuid);
|
||||
@ -3429,66 +3441,11 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return found;
|
||||
}
|
||||
|
||||
protected Network enableVlanNetwork(Connection conn, long tag, String pifUuid) throws XenAPIException, XmlRpcException {
|
||||
// In XenServer, vlan is added by
|
||||
// 1. creating a network.
|
||||
// 2. creating a vlan associating network with the pif.
|
||||
// We always create
|
||||
// 1. a network with VLAN[vlan id in decimal]
|
||||
// 2. a vlan associating the network created with the pif to private
|
||||
// network.
|
||||
Network vlanNetwork = null;
|
||||
String name = "VLAN" + Long.toString(tag);
|
||||
|
||||
synchronized (name.intern()) {
|
||||
vlanNetwork = getNetworkByName(conn, name);
|
||||
if (vlanNetwork == null) { // Can't find it, then create it.
|
||||
if (s_logger.isDebugEnabled()) {
|
||||
s_logger.debug("Creating VLAN network for " + tag + " on host " + _host.ip);
|
||||
}
|
||||
Network.Record nwr = new Network.Record();
|
||||
nwr.nameLabel = name;
|
||||
nwr.bridge = name;
|
||||
vlanNetwork = Network.create(conn, nwr);
|
||||
}
|
||||
|
||||
PIF nPif = PIF.getByUuid(conn, pifUuid);
|
||||
PIF.Record nPifr = nPif.getRecord(conn);
|
||||
|
||||
Network.Record vlanNetworkr = vlanNetwork.getRecord(conn);
|
||||
if (vlanNetworkr.PIFs != null) {
|
||||
for (PIF pif : vlanNetworkr.PIFs) {
|
||||
PIF.Record pifr = pif.getRecord(conn);
|
||||
if(pifr.host.equals(nPifr.host)) {
|
||||
if (pifr.device.equals(nPifr.device) ) {
|
||||
pif.plug(conn);
|
||||
return vlanNetwork;
|
||||
} else {
|
||||
throw new CloudRuntimeException("Creating VLAN " + tag + " on " + nPifr.device + " failed due to this VLAN is already created on " + pifr.device);
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (s_logger.isDebugEnabled()) {
|
||||
s_logger.debug("Creating VLAN " + tag + " on host " + _host.ip + " on device " + nPifr.device);
|
||||
}
|
||||
VLAN vlan = VLAN.create(conn, nPif, tag, vlanNetwork);
|
||||
PIF untaggedPif = vlan.getUntaggedPIF(conn);
|
||||
if (!untaggedPif.getCurrentlyAttached(conn)) {
|
||||
untaggedPif.plug(conn);
|
||||
}
|
||||
}
|
||||
|
||||
return vlanNetwork;
|
||||
}
|
||||
|
||||
protected Network enableVlanNetwork(Connection conn, long tag, Network network, String pifUuid) throws XenAPIException, XmlRpcException {
|
||||
// In XenServer, vlan is added by
|
||||
// 1. creating a network.
|
||||
@ -3541,7 +3498,7 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe
|
||||
}
|
||||
|
||||
protected SR getLocalLVMSR(Connection conn) {
|
||||
try {
|
||||
try {
|
||||
Map<SR, SR.Record> map = SR.getAllRecords(conn);
|
||||
for (Map.Entry<SR, SR.Record> entry : map.entrySet()) {
|
||||
SR.Record srRec = entry.getValue();
|
||||
@ -3620,7 +3577,7 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe
|
||||
s_logger.warn(" can not ping xenserver " + _host.uuid);
|
||||
return null;
|
||||
}
|
||||
}
|
||||
}
|
||||
HashMap<String, State> newStates = sync(conn);
|
||||
if (newStates == null) {
|
||||
s_logger.warn("Unable to get current status from sync");
|
||||
@ -3678,7 +3635,7 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe
|
||||
break;
|
||||
}
|
||||
Nic privateNic = getManageMentNetwork(conn);
|
||||
_privateNetworkName = privateNic.nr.nameLabel;
|
||||
_privateNetworkName = privateNic.nr.nameLabel;
|
||||
_host.privatePif = privateNic.pr.uuid;
|
||||
_host.privateNetwork = privateNic.nr.uuid;
|
||||
|
||||
@ -3900,7 +3857,7 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe
|
||||
String tag = it.next();
|
||||
if (tag.startsWith("vmops-version-")) {
|
||||
if (tag.contains(version)) {
|
||||
s_logger.info(logX(host, "Host " + hr.address + " is already setup."));
|
||||
s_logger.info(logX(host, "Host " + hr.address + " is already setup."));
|
||||
return;
|
||||
} else {
|
||||
it.remove();
|
||||
@ -3911,7 +3868,7 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe
|
||||
com.trilead.ssh2.Connection sshConnection = new com.trilead.ssh2.Connection(hr.address, 22);
|
||||
try {
|
||||
sshConnection.connect(null, 60000, 60000);
|
||||
if (!sshConnection.authenticateWithPassword(_username, _password.peek())) {
|
||||
if (!sshConnection.authenticateWithPassword(_username, _password.peek())) {
|
||||
throw new CloudRuntimeException("Unable to authenticate");
|
||||
}
|
||||
|
||||
@ -4070,7 +4027,7 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe
|
||||
PBD.Record pbdr = pbd.getRecord(conn);
|
||||
if (host.equals(pbdr.host)) {
|
||||
if (!pbdr.currentlyAttached) {
|
||||
pbdPlug(conn, pbd, pbdr.uuid);
|
||||
pbdPlug(conn, pbd, pbdr.uuid);
|
||||
}
|
||||
found = true;
|
||||
break;
|
||||
@ -4081,7 +4038,7 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe
|
||||
pbdr.host = host;
|
||||
pbdr.uuid = "";
|
||||
PBD pbd = PBD.create(conn, pbdr);
|
||||
pbdPlug(conn, pbd, pbd.getUuid(conn));
|
||||
pbdPlug(conn, pbd, pbd.getUuid(conn));
|
||||
}
|
||||
} else {
|
||||
for (PBD pbd : pbds) {
|
||||
@ -4117,7 +4074,7 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe
|
||||
String msg = "Catch Exception " + e.getClass().getName() + ", create StoragePool failed due to " + e.toString() + " on host:" + _host.uuid + " pool: " + pool.getHost() + pool.getPath();
|
||||
s_logger.warn(msg, e);
|
||||
return new Answer(cmd, false, msg);
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@ -4229,7 +4186,7 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe
|
||||
|
||||
}
|
||||
|
||||
protected boolean can_bridge_firewall(Connection conn) {
|
||||
protected boolean can_bridge_firewall(Connection conn) {
|
||||
return Boolean.valueOf(callHostPlugin(conn, "vmops", "can_bridge_firewall", "host_uuid", _host.uuid, "instance", _instance));
|
||||
}
|
||||
|
||||
@ -4369,7 +4326,7 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe
|
||||
_isOvs = true;
|
||||
|
||||
Connection conn = getConnection();
|
||||
String bridge = "unkonwn";
|
||||
String bridge = "unkonwn";
|
||||
try {
|
||||
Network nw = setupvSwitchNetwork(conn);
|
||||
bridge = nw.getBridge(conn);
|
||||
@ -4383,7 +4340,7 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe
|
||||
_host.ip, bridge);
|
||||
} else {
|
||||
return new OvsCreateGreTunnelAnswer(cmd, true, result, _host.ip, bridge, Integer.parseInt(res[1]));
|
||||
}
|
||||
}
|
||||
} catch (Exception e) {
|
||||
e.printStackTrace();
|
||||
}
|
||||
@ -4456,7 +4413,7 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe
|
||||
}
|
||||
if (_publicNetworkName != null) {
|
||||
details.put("public.network.device", _publicNetworkName);
|
||||
}
|
||||
}
|
||||
if (_guestNetworkName != null) {
|
||||
details.put("guest.network.device", _guestNetworkName);
|
||||
}
|
||||
@ -4610,7 +4567,7 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe
|
||||
|
||||
private void CheckXenHostInfo() throws ConfigurationException {
|
||||
Connection conn = _connPool.slaveConnect(_host.ip, _username, _password);
|
||||
if( conn == null ) {
|
||||
if( conn == null ) {
|
||||
throw new ConfigurationException("Can not create slave connection to " + _host.ip);
|
||||
}
|
||||
try {
|
||||
@ -4630,7 +4587,7 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe
|
||||
throw new ConfigurationException(msg);
|
||||
}
|
||||
} finally {
|
||||
try {
|
||||
try {
|
||||
Session.localLogout(conn);
|
||||
} catch (Exception e) {
|
||||
}
|
||||
@ -4674,7 +4631,7 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe
|
||||
vdir = vdi.getRecord(conn);
|
||||
s_logger.debug("Succesfully created VDI for " + cmd + ". Uuid = " + vdir.uuid);
|
||||
|
||||
VolumeTO vol = new VolumeTO(cmd.getVolumeId(), dskch.getType(), pool.getType(), pool.getUuid(), vdir.nameLabel,
|
||||
VolumeTO vol = new VolumeTO(cmd.getVolumeId(), dskch.getType(), pool.getType(), pool.getUuid(), vdir.nameLabel,
|
||||
pool.getPath(), vdir.uuid, vdir.virtualSize, null);
|
||||
return new CreateAnswer(cmd, vol);
|
||||
} catch (Exception e) {
|
||||
@ -4731,7 +4688,7 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe
|
||||
SR sr = SR.create(conn, host, deviceConfig, new Long(0), name, uri.getHost() + uri.getPath(), SRType.NFS.toString(), "user", shared, new HashMap<String, String>());
|
||||
if( !checkSR(conn, sr) ) {
|
||||
throw new Exception("no attached PBD");
|
||||
}
|
||||
}
|
||||
if (s_logger.isDebugEnabled()) {
|
||||
s_logger.debug(logX(sr, "Created a SR; UUID is " + sr.getUuid(conn) + " device config is " + deviceConfig));
|
||||
}
|
||||
@ -4850,7 +4807,7 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe
|
||||
continue;
|
||||
}
|
||||
if (target.equals(dc.get("target")) && targetiqn.equals(dc.get("targetIQN")) && lunid.equals(dc.get("lunid"))) {
|
||||
throw new CloudRuntimeException("There is a SR using the same configuration target:" + dc.get("target") + ", targetIQN:"
|
||||
throw new CloudRuntimeException("There is a SR using the same configuration target:" + dc.get("target") + ", targetIQN:"
|
||||
+ dc.get("targetIQN") + ", lunid:" + dc.get("lunid") + " for pool " + pool.getUuid() + "on host:" + _host.uuid);
|
||||
}
|
||||
}
|
||||
@ -4894,7 +4851,7 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe
|
||||
s_logger.warn(msg, e);
|
||||
throw new CloudRuntimeException(msg, e);
|
||||
}
|
||||
}
|
||||
}
|
||||
deviceConfig.put("SCSIid", scsiid);
|
||||
|
||||
String result = SR.probe(conn, host, deviceConfig, type , smConfig);
|
||||
@ -4906,9 +4863,9 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe
|
||||
sr = SR.create(conn, host, deviceConfig, new Long(0), pool.getUuid(), poolId, type, "user", true,
|
||||
smConfig);
|
||||
} else {
|
||||
sr = SR.introduce(conn, pooluuid, pool.getUuid(), poolId,
|
||||
sr = SR.introduce(conn, pooluuid, pool.getUuid(), poolId,
|
||||
type, "user", true, smConfig);
|
||||
Pool.Record pRec = XenServerConnectionPool.getPoolRecord(conn);
|
||||
Pool.Record pRec = XenServerConnectionPool.getPoolRecord(conn);
|
||||
PBD.Record rec = new PBD.Record();
|
||||
rec.deviceConfig = deviceConfig;
|
||||
rec.host = pRec.master;
|
||||
@ -4964,7 +4921,7 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe
|
||||
}
|
||||
|
||||
if (server.equals(dc.get("server")) && serverpath.equals(dc.get("serverpath"))) {
|
||||
throw new CloudRuntimeException("There is a SR using the same configuration server:" + dc.get("server") + ", serverpath:"
|
||||
throw new CloudRuntimeException("There is a SR using the same configuration server:" + dc.get("server") + ", serverpath:"
|
||||
+ dc.get("serverpath") + " for pool " + pool.getUuid() + "on host:" + _host.uuid);
|
||||
}
|
||||
|
||||
@ -5035,7 +4992,7 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe
|
||||
String volumeFolder = String.valueOf(cmd.getVolumeId()) + "/";
|
||||
String mountpoint = remoteVolumesMountPath + volumeFolder;
|
||||
SR primaryStoragePool = getStorageRepository(conn, poolTO);
|
||||
String srUuid = primaryStoragePool.getUuid(conn);
|
||||
String srUuid = primaryStoragePool.getUuid(conn);
|
||||
if (toSecondaryStorage) {
|
||||
// Create the volume folder
|
||||
if (!createSecondaryStorageFolder(conn, remoteVolumesMountPath, volumeFolder)) {
|
||||
@ -5058,7 +5015,7 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe
|
||||
removeSR(conn, secondaryStorage);
|
||||
}
|
||||
} else {
|
||||
String uuid = copy_vhd_to_secondarystorage(conn, mountpoint, volumeUUID, srUuid);
|
||||
String uuid = copy_vhd_to_secondarystorage(conn, mountpoint, volumeUUID, srUuid);
|
||||
return new CopyVolumeAnswer(cmd, true, null, null, uuid);
|
||||
}
|
||||
} else {
|
||||
@ -5118,12 +5075,12 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe
|
||||
if( deviceId != null ) {
|
||||
if( deviceId.longValue() == 3 ) {
|
||||
String msg = "Device 3 is reserved for CD-ROM, choose other device";
|
||||
return new AttachVolumeAnswer(cmd,msg);
|
||||
return new AttachVolumeAnswer(cmd,msg);
|
||||
}
|
||||
if(isDeviceUsed(conn, vm, deviceId)) {
|
||||
String msg = "Device " + deviceId + " is used in VM " + vmName;
|
||||
return new AttachVolumeAnswer(cmd,msg);
|
||||
}
|
||||
}
|
||||
diskNumber = deviceId.toString();
|
||||
} else {
|
||||
diskNumber = getUnusedDeviceNum(conn, vm);
|
||||
@ -5474,7 +5431,7 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe
|
||||
result = postCreatePrivateTemplate(conn, templatePath, tmpltFilename, tmpltUuid, userSpecifiedName, null, physicalSize, virtualSize, newTemplateId);
|
||||
if (!result) {
|
||||
throw new CloudRuntimeException("Could not create the template.properties file on secondary storage dir: " + templatePath);
|
||||
}
|
||||
}
|
||||
installPath = installPath + "/" + tmpltFilename;
|
||||
return new CreatePrivateTemplateAnswer(cmd, true, null, installPath, virtualSize, physicalSize, tmpltUuid, ImageFormat.VHD);
|
||||
} catch (Exception e) {
|
||||
@ -5512,7 +5469,7 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
protected BackupSnapshotAnswer execute(final BackupSnapshotCommand cmd) {
|
||||
Connection conn = getConnection();
|
||||
@ -5540,7 +5497,7 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe
|
||||
String secondaryStorageMountPath = uri.getHost() + ":" + uri.getPath();
|
||||
VDI snapshotVdi = getVDIbyUuid(conn, snapshotUuid);
|
||||
if ( prevBackupUuid != null ) {
|
||||
try {
|
||||
try {
|
||||
String snapshotPaUuid = getVhdParent(conn, psUuid, snapshotUuid, isISCSI);
|
||||
if( snapshotPaUuid != null ) {
|
||||
String snashotPaPaPaUuid = getVhdParent(conn, psUuid, snapshotPaUuid, isISCSI);
|
||||
@ -5549,7 +5506,7 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe
|
||||
fullbackup = false;
|
||||
}
|
||||
}
|
||||
} catch (Exception e) {
|
||||
} catch (Exception e) {
|
||||
}
|
||||
}
|
||||
String filename = volumeId + "_" + cmd.getSnapshotId() + "_" + cmd.getSnapshotUuid();
|
||||
@ -5636,7 +5593,7 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe
|
||||
// Get the absolute path of the snapshot on the secondary storage.
|
||||
URI snapshotURI = new URI(secondaryStoragePoolURL + "/snapshots/" + accountId + "/" + volumeId );
|
||||
String snapshotPath = snapshotURI.getHost() + ":" + snapshotURI.getPath() + "/" + backedUpSnapshotUuid + ".vhd";
|
||||
String srUuid = primaryStorageSR.getUuid(conn);
|
||||
String srUuid = primaryStorageSR.getUuid(conn);
|
||||
volumeUUID = copy_vhd_from_secondarystorage(conn, snapshotPath, srUuid);
|
||||
result = true;
|
||||
} catch (XenAPIException e) {
|
||||
@ -5935,10 +5892,10 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe
|
||||
}
|
||||
|
||||
return success;
|
||||
}
|
||||
}
|
||||
|
||||
protected String getVhdParent(Connection conn, String primaryStorageSRUuid, String snapshotUuid, Boolean isISCSI) {
|
||||
String parentUuid = callHostPlugin(conn, "vmopsSnapshot", "getVhdParent", "primaryStorageSRUuid", primaryStorageSRUuid,
|
||||
String parentUuid = callHostPlugin(conn, "vmopsSnapshot", "getVhdParent", "primaryStorageSRUuid", primaryStorageSRUuid,
|
||||
"snapshotUuid", snapshotUuid, "isISCSI", isISCSI.toString());
|
||||
|
||||
if (parentUuid == null || parentUuid.isEmpty() || parentUuid.equalsIgnoreCase("None")) {
|
||||
@ -6058,7 +6015,7 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe
|
||||
}
|
||||
} catch (XenAPIException e) {
|
||||
String msg = "Unable to eject host " + _host.uuid + " due to " + e.toString();
|
||||
s_logger.warn(msg);
|
||||
s_logger.warn(msg);
|
||||
host.destroy(conn);
|
||||
}
|
||||
return new Answer(cmd);
|
||||
@ -6070,7 +6027,7 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe
|
||||
String msg = "Exception Unable to destroy host " + _host.uuid + " in xenserver database due to " + e.getMessage();
|
||||
s_logger.warn(msg, e);
|
||||
return new Answer(cmd, false, msg);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private Answer execute(CleanupNetworkRulesCmd cmd) {
|
||||
|
||||
@ -42,8 +42,13 @@ public class RequestTest extends TestCase {
|
||||
private static final Logger s_logger = Logger.getLogger(RequestTest.class);
|
||||
|
||||
public void testSerDeser() {
|
||||
s_logger.info("Testing serializing and deserializing works as expected");
|
||||
|
||||
s_logger.info("UpdateHostPasswordCommand should have two parameters that doesn't show in logging");
|
||||
UpdateHostPasswordCommand cmd1 = new UpdateHostPasswordCommand("abc", "def");
|
||||
s_logger.info("SecStorageFirewallCfgCommand has a context map that shouldn't show up in debug level");
|
||||
SecStorageFirewallCfgCommand cmd2 = new SecStorageFirewallCfgCommand();
|
||||
s_logger.info("GetHostStatsCommand should not show up at all in debug level");
|
||||
GetHostStatsCommand cmd3 = new GetHostStatsCommand("hostguid", "hostname", 101);
|
||||
cmd2.addPortConfig("abc", "24", true, "eth0");
|
||||
cmd2.addPortConfig("127.0.0.1", "44", false, "eth1");
|
||||
@ -113,6 +118,7 @@ public class RequestTest extends TestCase {
|
||||
}
|
||||
|
||||
public void testDownload() {
|
||||
s_logger.info("Testing Download answer");
|
||||
VMTemplateVO template = new VMTemplateVO(1, "templatename", ImageFormat.QCOW2, true, true, true, TemplateType.USER, "url", true, 32, 1, "chksum", "displayText", true, 30, true,
|
||||
HypervisorType.KVM);
|
||||
DownloadCommand cmd = new DownloadCommand("secUrl", template, 30000000l);
|
||||
@ -128,6 +134,7 @@ public class RequestTest extends TestCase {
|
||||
}
|
||||
|
||||
public void testLogging() {
|
||||
s_logger.info("Testing Logging");
|
||||
GetHostStatsCommand cmd3 = new GetHostStatsCommand("hostguid", "hostname", 101);
|
||||
Request sreq = new Request(2, 3, new Command[] { cmd3 }, true, true);
|
||||
sreq.setSequence(1);
|
||||
|
||||
@ -2153,7 +2153,7 @@ public class AgentManagerImpl implements AgentManager, HandlerFactory, Manager {
|
||||
|
||||
Response response = null;
|
||||
if (attache == null) {
|
||||
s_logger.debug("Processing sequence " + request.getSequence() + ": Processing " + request.toString());
|
||||
request.logD("Processing the first command ");
|
||||
if (!(cmd instanceof StartupCommand)) {
|
||||
s_logger.warn("Throwing away a request because it came through as the first command on a connect: " + request.toString());
|
||||
return;
|
||||
|
||||
@ -280,7 +280,7 @@ public class StoragePoolDaoImpl extends GenericDaoBase<StoragePoolVO, Long> imp
|
||||
sql.delete(sql.length() - 4, sql.length());
|
||||
sql.append(DetailsSqlSuffix);
|
||||
Transaction txn = Transaction.currentTxn();
|
||||
PreparedStatement pstmt = s_initStmt;
|
||||
PreparedStatement pstmt = null;
|
||||
try {
|
||||
pstmt = txn.prepareAutoCloseStatement(sql.toString());
|
||||
int i = 1;
|
||||
@ -297,7 +297,7 @@ public class StoragePoolDaoImpl extends GenericDaoBase<StoragePoolVO, Long> imp
|
||||
}
|
||||
return pools;
|
||||
} catch (SQLException e) {
|
||||
throw new CloudRuntimeException("Unable to execute " + pstmt.toString(), e);
|
||||
throw new CloudRuntimeException("Unable to execute " + pstmt, e);
|
||||
}
|
||||
}
|
||||
|
||||
@ -351,7 +351,7 @@ public class StoragePoolDaoImpl extends GenericDaoBase<StoragePoolVO, Long> imp
|
||||
|
||||
while (rs.next()) {
|
||||
tags.add(rs.getString("name"));
|
||||
}
|
||||
}
|
||||
return tags;
|
||||
} catch (SQLException e) {
|
||||
throw new CloudRuntimeException("Unable to execute " + pstmt.toString(), e);
|
||||
|
||||
@ -137,7 +137,7 @@ public class ConsoleProxyDaoImpl extends GenericDaoBase<ConsoleProxyVO, Long> im
|
||||
HostUpSearch = createSearchBuilder();
|
||||
HostUpSearch.and("host", HostUpSearch.entity().getHostId(), SearchCriteria.Op.EQ);
|
||||
HostUpSearch.and("states", HostUpSearch.entity().getState(), SearchCriteria.Op.NIN);
|
||||
HostUpSearch.done();
|
||||
HostUpSearch.done();
|
||||
|
||||
StateChangeSearch = createSearchBuilder();
|
||||
StateChangeSearch.and("id", StateChangeSearch.entity().getId(), SearchCriteria.Op.EQ);
|
||||
@ -171,7 +171,7 @@ public class ConsoleProxyDaoImpl extends GenericDaoBase<ConsoleProxyVO, Long> im
|
||||
UpdateBuilder ub = getUpdateBuilder(proxy);
|
||||
ub.set(proxy, "state", State.Destroyed);
|
||||
ub.set(proxy, "privateIpAddress", null);
|
||||
update(id, ub);
|
||||
update(id, ub, proxy);
|
||||
|
||||
boolean result = super.remove(id);
|
||||
txn.commit();
|
||||
@ -204,7 +204,7 @@ public class ConsoleProxyDaoImpl extends GenericDaoBase<ConsoleProxyVO, Long> im
|
||||
public List<ConsoleProxyVO> listUpByHostId(long hostId) {
|
||||
SearchCriteria<ConsoleProxyVO> sc = HostUpSearch.create();
|
||||
sc.setParameters("host", hostId);
|
||||
sc.setParameters("states", new Object[] {State.Destroyed, State.Stopped, State.Expunging});
|
||||
sc.setParameters("states", new Object[] {State.Destroyed, State.Stopped, State.Expunging});
|
||||
return listBy(sc);
|
||||
}
|
||||
|
||||
|
||||
@ -29,9 +29,9 @@ import com.cloud.network.dao.NetworkDaoImpl;
|
||||
import com.cloud.network.router.VirtualRouter.Role;
|
||||
import com.cloud.utils.component.ComponentLocator;
|
||||
import com.cloud.utils.db.GenericDaoBase;
|
||||
import com.cloud.utils.db.JoinBuilder.JoinType;
|
||||
import com.cloud.utils.db.SearchBuilder;
|
||||
import com.cloud.utils.db.SearchCriteria;
|
||||
import com.cloud.utils.db.JoinBuilder.JoinType;
|
||||
import com.cloud.utils.db.SearchCriteria.Op;
|
||||
import com.cloud.utils.db.Transaction;
|
||||
import com.cloud.utils.db.UpdateBuilder;
|
||||
@ -83,7 +83,7 @@ public class DomainRouterDaoImpl extends GenericDaoBase<DomainRouterVO, Long> im
|
||||
router.setPublicIpAddress(null);
|
||||
UpdateBuilder ub = getUpdateBuilder(router);
|
||||
ub.set(router, "state", State.Destroyed);
|
||||
update(id, ub);
|
||||
update(id, ub, router);
|
||||
|
||||
boolean result = super.remove(id);
|
||||
txn.commit();
|
||||
|
||||
@ -113,7 +113,7 @@ public class SecondaryStorageVmDaoImpl extends GenericDaoBase<SecondaryStorageVm
|
||||
UpdateBuilder ub = getUpdateBuilder(proxy);
|
||||
ub.set(proxy, "state", State.Destroyed);
|
||||
ub.set(proxy, "privateIpAddress", null);
|
||||
update(id, ub);
|
||||
update(id, ub, proxy);
|
||||
|
||||
boolean result = super.remove(id);
|
||||
txn.commit();
|
||||
@ -125,8 +125,9 @@ public class SecondaryStorageVmDaoImpl extends GenericDaoBase<SecondaryStorageVm
|
||||
SearchCriteria<SecondaryStorageVmVO> sc = DataCenterStatusSearch.create();
|
||||
sc.setParameters("states", (Object[])states);
|
||||
sc.setParameters("dc", dataCenterId);
|
||||
if(role != null)
|
||||
sc.setParameters("role", role);
|
||||
if(role != null) {
|
||||
sc.setParameters("role", role);
|
||||
}
|
||||
return listBy(sc);
|
||||
}
|
||||
|
||||
@ -134,8 +135,9 @@ public class SecondaryStorageVmDaoImpl extends GenericDaoBase<SecondaryStorageVm
|
||||
public List<SecondaryStorageVmVO> getSecStorageVmListInStates(SecondaryStorageVm.Role role, State... states) {
|
||||
SearchCriteria<SecondaryStorageVmVO> sc = StateSearch.create();
|
||||
sc.setParameters("states", (Object[])states);
|
||||
if(role != null)
|
||||
sc.setParameters("role", role);
|
||||
if(role != null) {
|
||||
sc.setParameters("role", role);
|
||||
}
|
||||
|
||||
return listBy(sc);
|
||||
}
|
||||
@ -144,8 +146,9 @@ public class SecondaryStorageVmDaoImpl extends GenericDaoBase<SecondaryStorageVm
|
||||
public List<SecondaryStorageVmVO> listByHostId(SecondaryStorageVm.Role role, long hostId) {
|
||||
SearchCriteria<SecondaryStorageVmVO> sc = HostSearch.create();
|
||||
sc.setParameters("host", hostId);
|
||||
if(role != null)
|
||||
sc.setParameters("role", role);
|
||||
if(role != null) {
|
||||
sc.setParameters("role", role);
|
||||
}
|
||||
return listBy(sc);
|
||||
}
|
||||
|
||||
@ -153,9 +156,10 @@ public class SecondaryStorageVmDaoImpl extends GenericDaoBase<SecondaryStorageVm
|
||||
public List<SecondaryStorageVmVO> listUpByHostId(SecondaryStorageVm.Role role, long hostId) {
|
||||
SearchCriteria<SecondaryStorageVmVO> sc = HostUpSearch.create();
|
||||
sc.setParameters("host", hostId);
|
||||
sc.setParameters("states", new Object[] {State.Destroyed, State.Stopped, State.Expunging});
|
||||
if(role != null)
|
||||
sc.setParameters("role", role);
|
||||
sc.setParameters("states", new Object[] {State.Destroyed, State.Stopped, State.Expunging});
|
||||
if(role != null) {
|
||||
sc.setParameters("role", role);
|
||||
}
|
||||
return listBy(sc);
|
||||
}
|
||||
|
||||
@ -166,12 +170,13 @@ public class SecondaryStorageVmDaoImpl extends GenericDaoBase<SecondaryStorageVm
|
||||
PreparedStatement pstmt = null;
|
||||
try {
|
||||
String sql;
|
||||
if(role == null)
|
||||
sql = "SELECT s.id FROM secondary_storage_vm s, vm_instance v, host h " +
|
||||
if(role == null) {
|
||||
sql = "SELECT s.id FROM secondary_storage_vm s, vm_instance v, host h " +
|
||||
"WHERE s.id=v.id AND v.state='Running' AND v.host_id=h.id AND h.mgmt_server_id=?";
|
||||
else
|
||||
sql = "SELECT s.id FROM secondary_storage_vm s, vm_instance v, host h " +
|
||||
} else {
|
||||
sql = "SELECT s.id FROM secondary_storage_vm s, vm_instance v, host h " +
|
||||
"WHERE s.id=v.id AND v.state='Running' AND s.role=? AND v.host_id=h.id AND h.mgmt_server_id=?";
|
||||
}
|
||||
|
||||
pstmt = txn.prepareAutoCloseStatement(sql);
|
||||
|
||||
@ -209,8 +214,9 @@ public class SecondaryStorageVmDaoImpl extends GenericDaoBase<SecondaryStorageVm
|
||||
public List<SecondaryStorageVmVO> listByZoneId(SecondaryStorageVm.Role role, long zoneId) {
|
||||
SearchCriteria<SecondaryStorageVmVO> sc = ZoneSearch.create();
|
||||
sc.setParameters("zone", zoneId);
|
||||
if(role != null)
|
||||
sc.setParameters("role", role);
|
||||
if(role != null) {
|
||||
sc.setParameters("role", role);
|
||||
}
|
||||
return listBy(sc);
|
||||
}
|
||||
|
||||
@ -219,8 +225,9 @@ public class SecondaryStorageVmDaoImpl extends GenericDaoBase<SecondaryStorageVm
|
||||
SearchCriteria<SecondaryStorageVmVO> sc = LastHostSearch.create();
|
||||
sc.setParameters("lastHost", hostId);
|
||||
sc.setParameters("state", State.Stopped);
|
||||
if(role != null)
|
||||
sc.setParameters("role", role);
|
||||
if(role != null) {
|
||||
sc.setParameters("role", role);
|
||||
}
|
||||
|
||||
return listBy(sc);
|
||||
}
|
||||
@ -233,10 +240,11 @@ public class SecondaryStorageVmDaoImpl extends GenericDaoBase<SecondaryStorageVm
|
||||
PreparedStatement pstmt = null;
|
||||
try {
|
||||
String sql;
|
||||
if(role == null)
|
||||
sql = "SELECT s.id, count(l.id) as count FROM secondary_storage_vm s INNER JOIN vm_instance v ON s.id=v.id LEFT JOIN cmd_exec_log l ON s.id=l.instance_id WHERE v.state='Running' AND v.data_center_id=? GROUP BY s.id ORDER BY count";
|
||||
else
|
||||
sql = "SELECT s.id, count(l.id) as count FROM secondary_storage_vm s INNER JOIN vm_instance v ON s.id=v.id LEFT JOIN cmd_exec_log l ON s.id=l.instance_id WHERE v.state='Running' AND v.data_center_id=? AND s.role=? GROUP BY s.id ORDER BY count";
|
||||
if(role == null) {
|
||||
sql = "SELECT s.id, count(l.id) as count FROM secondary_storage_vm s INNER JOIN vm_instance v ON s.id=v.id LEFT JOIN cmd_exec_log l ON s.id=l.instance_id WHERE v.state='Running' AND v.data_center_id=? GROUP BY s.id ORDER BY count";
|
||||
} else {
|
||||
sql = "SELECT s.id, count(l.id) as count FROM secondary_storage_vm s INNER JOIN vm_instance v ON s.id=v.id LEFT JOIN cmd_exec_log l ON s.id=l.instance_id WHERE v.state='Running' AND v.data_center_id=? AND s.role=? GROUP BY s.id ORDER BY count";
|
||||
}
|
||||
|
||||
pstmt = txn.prepareAutoCloseStatement(sql);
|
||||
|
||||
|
||||
@ -27,7 +27,6 @@ import java.net.MalformedURLException;
|
||||
import java.net.URI;
|
||||
import java.net.URISyntaxException;
|
||||
import java.net.URL;
|
||||
import java.sql.Connection;
|
||||
import java.sql.PreparedStatement;
|
||||
import java.sql.ResultSet;
|
||||
import java.sql.ResultSetMetaData;
|
||||
@ -43,6 +42,7 @@ import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.TimeZone;
|
||||
import java.util.UUID;
|
||||
import java.util.concurrent.ConcurrentHashMap;
|
||||
|
||||
import javax.naming.ConfigurationException;
|
||||
import javax.persistence.AttributeOverride;
|
||||
@ -73,6 +73,7 @@ import com.cloud.utils.exception.CloudRuntimeException;
|
||||
import com.cloud.utils.net.Ip;
|
||||
import com.cloud.utils.net.NetUtils;
|
||||
|
||||
import edu.emory.mathcs.backport.java.util.Arrays;
|
||||
import edu.emory.mathcs.backport.java.util.Collections;
|
||||
|
||||
/**
|
||||
@ -114,7 +115,7 @@ public abstract class GenericDaoBase<T, ID extends Serializable> implements Gene
|
||||
|
||||
protected final static TimeZone s_gmtTimeZone = TimeZone.getTimeZone("GMT");
|
||||
|
||||
protected final static Map<Class<?>, GenericDao<?, ? extends Serializable>> s_daoMaps = new HashMap<Class<?>, GenericDao<?, ? extends Serializable>>(71);
|
||||
protected final static Map<Class<?>, GenericDao<?, ? extends Serializable>> s_daoMaps = new ConcurrentHashMap<Class<?>, GenericDao<?, ? extends Serializable>>(71);
|
||||
|
||||
protected Class<T> _entityBeanType;
|
||||
protected String _table;
|
||||
@ -135,10 +136,10 @@ public abstract class GenericDaoBase<T, ID extends Serializable> implements Gene
|
||||
protected Pair<String, Attribute> _removed;
|
||||
protected Pair<String, Attribute[]> _removeSql;
|
||||
protected List<Pair<String, Attribute[]>> _deleteSqls;
|
||||
protected Map<String, Attribute[]> _idAttributes;
|
||||
protected Map<String, TableGenerator> _tgs;
|
||||
protected final Map<String, Attribute[]> _idAttributes;
|
||||
protected final Map<String, TableGenerator> _tgs;
|
||||
protected final Map<String, Attribute> _allAttributes;
|
||||
protected List<Attribute> _ecAttributes;
|
||||
protected final List<Attribute> _ecAttributes;
|
||||
protected final Map<Pair<String, String>, Attribute> _allColumns;
|
||||
protected Enhancer _enhancer;
|
||||
protected Factory _factory;
|
||||
@ -153,21 +154,6 @@ public abstract class GenericDaoBase<T, ID extends Serializable> implements Gene
|
||||
|
||||
protected static final SequenceFetcher s_seqFetcher = SequenceFetcher.getInstance();
|
||||
|
||||
protected static PreparedStatement s_initStmt;
|
||||
static {
|
||||
Connection conn = Transaction.getStandaloneConnection();
|
||||
try {
|
||||
s_initStmt = conn.prepareStatement("SELECT 1");
|
||||
} catch (final SQLException e) {
|
||||
} finally {
|
||||
try {
|
||||
conn.close();
|
||||
} catch (SQLException e) {
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
protected String _name;
|
||||
|
||||
public static <J> GenericDao<? extends J, ? extends Serializable> getDao(Class<J> entityType) {
|
||||
@ -238,7 +224,7 @@ public abstract class GenericDaoBase<T, ID extends Serializable> implements Gene
|
||||
_tgs.put(tg.name(), tg);
|
||||
}
|
||||
|
||||
Callback[] callbacks = new Callback[] { NoOp.INSTANCE, new UpdateBuilder(_allAttributes) };
|
||||
Callback[] callbacks = new Callback[] { NoOp.INSTANCE, new UpdateBuilder(this) };
|
||||
|
||||
_enhancer = new Enhancer();
|
||||
_enhancer.setSuperclass(_entityBeanType);
|
||||
@ -248,7 +234,7 @@ public abstract class GenericDaoBase<T, ID extends Serializable> implements Gene
|
||||
|
||||
_searchEnhancer = new Enhancer();
|
||||
_searchEnhancer.setSuperclass(_entityBeanType);
|
||||
_searchEnhancer.setCallback(new UpdateBuilder(_allAttributes));
|
||||
_searchEnhancer.setCallback(new UpdateBuilder(this));
|
||||
|
||||
if (s_logger.isTraceEnabled()) {
|
||||
s_logger.trace("Select SQL: " + _partialSelectSql.first().toString());
|
||||
@ -277,7 +263,7 @@ public abstract class GenericDaoBase<T, ID extends Serializable> implements Gene
|
||||
@Override @DB(txn=false)
|
||||
@SuppressWarnings("unchecked")
|
||||
public T createForUpdate(final ID id) {
|
||||
final T entity = (T)_factory.newInstance(new Callback[] {NoOp.INSTANCE, new UpdateBuilder(_allAttributes)});
|
||||
final T entity = (T)_factory.newInstance(new Callback[] {NoOp.INSTANCE, new UpdateBuilder(this)});
|
||||
if (id != null) {
|
||||
try {
|
||||
_idField.set(entity, id);
|
||||
@ -355,7 +341,7 @@ public abstract class GenericDaoBase<T, ID extends Serializable> implements Gene
|
||||
|
||||
final String sql = str.toString();
|
||||
|
||||
PreparedStatement pstmt = s_initStmt;
|
||||
PreparedStatement pstmt = null;
|
||||
final List<T> result = new ArrayList<T>();
|
||||
try {
|
||||
pstmt = txn.prepareAutoCloseStatement(sql);
|
||||
@ -385,9 +371,9 @@ public abstract class GenericDaoBase<T, ID extends Serializable> implements Gene
|
||||
}
|
||||
return result;
|
||||
} catch (final SQLException e) {
|
||||
throw new CloudRuntimeException("DB Exception on: " + pstmt.toString(), e);
|
||||
throw new CloudRuntimeException("DB Exception on: " + pstmt, e);
|
||||
} catch (final Throwable e) {
|
||||
throw new CloudRuntimeException("Caught: " + pstmt.toString(), e);
|
||||
throw new CloudRuntimeException("Caught: " + pstmt, e);
|
||||
}
|
||||
}
|
||||
|
||||
@ -417,7 +403,7 @@ public abstract class GenericDaoBase<T, ID extends Serializable> implements Gene
|
||||
final String sql = str.toString();
|
||||
|
||||
final Transaction txn = Transaction.currentTxn();
|
||||
PreparedStatement pstmt = s_initStmt;
|
||||
PreparedStatement pstmt = null;
|
||||
try {
|
||||
pstmt = txn.prepareAutoCloseStatement(sql);
|
||||
int i = 0;
|
||||
@ -457,9 +443,9 @@ public abstract class GenericDaoBase<T, ID extends Serializable> implements Gene
|
||||
|
||||
return results;
|
||||
} catch (final SQLException e) {
|
||||
throw new CloudRuntimeException("DB Exception on: " + pstmt.toString(), e);
|
||||
throw new CloudRuntimeException("DB Exception on: " + pstmt, e);
|
||||
} catch (final Throwable e) {
|
||||
throw new CloudRuntimeException("Caught: " + pstmt.toString(), e);
|
||||
throw new CloudRuntimeException("Caught: " + pstmt, e);
|
||||
}
|
||||
}
|
||||
|
||||
@ -718,26 +704,38 @@ public abstract class GenericDaoBase<T, ID extends Serializable> implements Gene
|
||||
}
|
||||
|
||||
if (s_logger.isTraceEnabled()) {
|
||||
s_logger.trace("join search statement is " + pstmt.toString());
|
||||
s_logger.trace("join search statement is " + pstmt);
|
||||
}
|
||||
return count;
|
||||
}
|
||||
|
||||
@DB(txn=false)
|
||||
protected int update(final ID id, final UpdateBuilder ub) {
|
||||
SearchCriteria<T> sc = createSearchCriteria();
|
||||
sc.addAnd(_idAttributes.get(_table)[0], SearchCriteria.Op.EQ, id);
|
||||
int rowsUpdated = update(ub, sc, null);
|
||||
protected int update(ID id, UpdateBuilder ub, T entity) {
|
||||
if (_cache != null) {
|
||||
_cache.remove(id);
|
||||
}
|
||||
SearchCriteria<T> sc = createSearchCriteria();
|
||||
sc.addAnd(_idAttributes.get(_table)[0], SearchCriteria.Op.EQ, id);
|
||||
Transaction txn = Transaction.currentTxn();
|
||||
txn.start();
|
||||
|
||||
int rowsUpdated = update(ub, sc, null);
|
||||
|
||||
try {
|
||||
if (ub.getCollectionChanges() != null) {
|
||||
insertElementCollection(entity, _idAttributes.get(_table)[0], id, ub.getCollectionChanges());
|
||||
}
|
||||
} catch (SQLException e) {
|
||||
throw new CloudRuntimeException("Unable to persist element collection", e);
|
||||
}
|
||||
txn.commit();
|
||||
|
||||
return rowsUpdated;
|
||||
}
|
||||
|
||||
// @Override
|
||||
public int update(final UpdateBuilder ub, final SearchCriteria<?> sc, Integer rows) {
|
||||
public int update(UpdateBuilder ub, final SearchCriteria<?> sc, Integer rows) {
|
||||
StringBuilder sql = null;
|
||||
PreparedStatement pstmt = s_initStmt;
|
||||
PreparedStatement pstmt = null;
|
||||
final Transaction txn = Transaction.currentTxn();
|
||||
try {
|
||||
final String searchClause = sc.getWhereClause();
|
||||
@ -775,8 +773,7 @@ public abstract class GenericDaoBase<T, ID extends Serializable> implements Gene
|
||||
if (e.getSQLState().equals("23000") && e.getErrorCode() == 1062) {
|
||||
throw new EntityExistsException("Entity already exists ", e);
|
||||
}
|
||||
final String sqlStr = pstmt.toString();
|
||||
throw new CloudRuntimeException("DB Exception on: " + sqlStr, e);
|
||||
throw new CloudRuntimeException("DB Exception on: " + pstmt, e);
|
||||
}
|
||||
}
|
||||
|
||||
@ -891,7 +888,7 @@ public abstract class GenericDaoBase<T, ID extends Serializable> implements Gene
|
||||
sql.append(lock ? FOR_UPDATE_CLAUSE : SHARE_MODE_CLAUSE);
|
||||
}
|
||||
Transaction txn = Transaction.currentTxn();
|
||||
PreparedStatement pstmt = s_initStmt;
|
||||
PreparedStatement pstmt = null;
|
||||
try {
|
||||
pstmt = txn.prepareAutoCloseStatement(sql.toString());
|
||||
|
||||
@ -996,7 +993,7 @@ public abstract class GenericDaoBase<T, ID extends Serializable> implements Gene
|
||||
|
||||
protected List<T> executeList(final String sql, final Object... params) {
|
||||
final Transaction txn = Transaction.currentTxn();
|
||||
PreparedStatement pstmt = s_initStmt;
|
||||
PreparedStatement pstmt = null;
|
||||
final List<T> result = new ArrayList<T>();
|
||||
try {
|
||||
pstmt = txn.prepareAutoCloseStatement(sql);
|
||||
@ -1011,9 +1008,9 @@ public abstract class GenericDaoBase<T, ID extends Serializable> implements Gene
|
||||
}
|
||||
return result;
|
||||
} catch (final SQLException e) {
|
||||
throw new CloudRuntimeException("DB Exception on: " + pstmt.toString(), e);
|
||||
throw new CloudRuntimeException("DB Exception on: " + pstmt, e);
|
||||
} catch (final Throwable e) {
|
||||
throw new CloudRuntimeException("Caught: " + pstmt.toString(), e);
|
||||
throw new CloudRuntimeException("Caught: " + pstmt, e);
|
||||
}
|
||||
}
|
||||
|
||||
@ -1038,7 +1035,7 @@ public abstract class GenericDaoBase<T, ID extends Serializable> implements Gene
|
||||
@Override
|
||||
public boolean expunge(final ID id) {
|
||||
final Transaction txn = Transaction.currentTxn();
|
||||
PreparedStatement pstmt = s_initStmt;
|
||||
PreparedStatement pstmt = null;
|
||||
String sql = null;
|
||||
try {
|
||||
txn.start();
|
||||
@ -1060,8 +1057,7 @@ public abstract class GenericDaoBase<T, ID extends Serializable> implements Gene
|
||||
}
|
||||
return true;
|
||||
} catch (final SQLException e) {
|
||||
final String sqlStr = pstmt.toString();
|
||||
throw new CloudRuntimeException("DB Exception on: " + sqlStr, e);
|
||||
throw new CloudRuntimeException("DB Exception on: " + pstmt, e);
|
||||
}
|
||||
}
|
||||
|
||||
@ -1079,7 +1075,7 @@ public abstract class GenericDaoBase<T, ID extends Serializable> implements Gene
|
||||
final String sql = str.toString();
|
||||
|
||||
final Transaction txn = Transaction.currentTxn();
|
||||
PreparedStatement pstmt = s_initStmt;
|
||||
PreparedStatement pstmt = null;
|
||||
try {
|
||||
pstmt = txn.prepareAutoCloseStatement(sql);
|
||||
int i = 0;
|
||||
@ -1088,9 +1084,9 @@ public abstract class GenericDaoBase<T, ID extends Serializable> implements Gene
|
||||
}
|
||||
return pstmt.executeUpdate();
|
||||
} catch (final SQLException e) {
|
||||
throw new CloudRuntimeException("DB Exception on: " + pstmt.toString(), e);
|
||||
throw new CloudRuntimeException("DB Exception on: " + pstmt, e);
|
||||
} catch (final Throwable e) {
|
||||
throw new CloudRuntimeException("Caught: " + pstmt.toString(), e);
|
||||
throw new CloudRuntimeException("Caught: " + pstmt, e);
|
||||
}
|
||||
}
|
||||
|
||||
@ -1149,11 +1145,11 @@ public abstract class GenericDaoBase<T, ID extends Serializable> implements Gene
|
||||
}
|
||||
|
||||
@Override @DB(txn=false)
|
||||
public boolean update(final ID id, final T entity) {
|
||||
public boolean update(ID id, T entity) {
|
||||
assert Enhancer.isEnhanced(entity.getClass()) : "Entity is not generated by this dao";
|
||||
|
||||
final UpdateBuilder ub = getUpdateBuilder(entity);
|
||||
final boolean result = update(id, ub) != 0;
|
||||
UpdateBuilder ub = getUpdateBuilder(entity);
|
||||
boolean result = update(id, ub, entity) != 0;
|
||||
return result;
|
||||
}
|
||||
|
||||
@ -1189,7 +1185,7 @@ public abstract class GenericDaoBase<T, ID extends Serializable> implements Gene
|
||||
|
||||
ID id = null;
|
||||
final Transaction txn = Transaction.currentTxn();
|
||||
PreparedStatement pstmt = s_initStmt;
|
||||
PreparedStatement pstmt = null;
|
||||
String sql = null;
|
||||
try {
|
||||
txn.start();
|
||||
@ -1222,43 +1218,63 @@ public abstract class GenericDaoBase<T, ID extends Serializable> implements Gene
|
||||
}
|
||||
}
|
||||
}
|
||||
HashMap<Attribute, Object> ecAttributes = new HashMap<Attribute, Object>();
|
||||
for (Attribute attr : _ecAttributes) {
|
||||
EcInfo ec = (EcInfo)attr.attache;
|
||||
Object obj;
|
||||
try {
|
||||
obj = attr.field.get(entity);
|
||||
if (ec.rawClass != null) {
|
||||
Enumeration en = Collections.enumeration((Collection)obj);
|
||||
while (en.hasMoreElements()) {
|
||||
pstmt = txn.prepareAutoCloseStatement(ec.insertSql);
|
||||
if (ec.targetClass == Date.class) {
|
||||
pstmt.setString(1, DateUtil.getDateDisplayString(TimeZone.getTimeZone("GMT"), (Date)en.nextElement()));
|
||||
} else {
|
||||
pstmt.setObject(1, en.nextElement());
|
||||
}
|
||||
prepareAttribute(2, pstmt, _idAttributes.get(attr.table)[0], _idField.get(entity));
|
||||
pstmt.executeUpdate();
|
||||
}
|
||||
}
|
||||
} catch (IllegalArgumentException e) {
|
||||
throw new CloudRuntimeException("Yikes! ", e);
|
||||
} catch (IllegalAccessException e) {
|
||||
throw new CloudRuntimeException("Yikes! ", e);
|
||||
Object ec = attr.field.get(entity);
|
||||
if (ec != null) {
|
||||
ecAttributes.put(attr, ec);
|
||||
}
|
||||
}
|
||||
|
||||
insertElementCollection(entity, _idAttributes.get(_table)[0], id, ecAttributes);
|
||||
txn.commit();
|
||||
} catch (final SQLException e) {
|
||||
if (e.getSQLState().equals("23000") && e.getErrorCode() == 1062) {
|
||||
throw new EntityExistsException("Entity already exists: ", e);
|
||||
} else {
|
||||
final String sqlStr = pstmt.toString();
|
||||
throw new CloudRuntimeException("DB Exception on: " + sqlStr, e);
|
||||
throw new CloudRuntimeException("DB Exception on: " + pstmt, e);
|
||||
}
|
||||
} catch (IllegalArgumentException e) {
|
||||
throw new CloudRuntimeException("Problem with getting the ec attribute ", e);
|
||||
} catch (IllegalAccessException e) {
|
||||
throw new CloudRuntimeException("Problem with getting the ec attribute ", e);
|
||||
}
|
||||
|
||||
return _idField != null ? findByIdIncludingRemoved(id) : null;
|
||||
}
|
||||
|
||||
protected void insertElementCollection(T entity, Attribute idAttribute, ID id, Map<Attribute, Object> ecAttributes) throws SQLException {
|
||||
Transaction txn = Transaction.currentTxn();
|
||||
txn.start();
|
||||
for (Map.Entry<Attribute, Object> entry : ecAttributes.entrySet()) {
|
||||
Attribute attr = entry.getKey();
|
||||
Object obj = entry.getValue();
|
||||
|
||||
EcInfo ec = (EcInfo)attr.attache;
|
||||
Enumeration en = null;
|
||||
if (ec.rawClass == null) {
|
||||
en = Collections.enumeration(Arrays.asList((Object[])obj));
|
||||
} else {
|
||||
en = Collections.enumeration((Collection)obj);
|
||||
}
|
||||
PreparedStatement pstmt = txn.prepareAutoCloseStatement(ec.clearSql);
|
||||
prepareAttribute(1, pstmt, idAttribute, id);
|
||||
pstmt.executeUpdate();
|
||||
|
||||
while (en.hasMoreElements()) {
|
||||
pstmt = txn.prepareAutoCloseStatement(ec.insertSql);
|
||||
if (ec.targetClass == Date.class) {
|
||||
pstmt.setString(1, DateUtil.getDateDisplayString(TimeZone.getTimeZone("GMT"), (Date)en.nextElement()));
|
||||
} else {
|
||||
pstmt.setObject(1, en.nextElement());
|
||||
}
|
||||
prepareAttribute(2, pstmt, idAttribute, id);
|
||||
pstmt.executeUpdate();
|
||||
}
|
||||
}
|
||||
txn.commit();
|
||||
}
|
||||
|
||||
@DB(txn=false)
|
||||
protected Object generateValue(final Attribute attr) {
|
||||
if (attr.is(Attribute.Flag.Created) || attr.is(Attribute.Flag.Removed)) {
|
||||
@ -1392,7 +1408,7 @@ public abstract class GenericDaoBase<T, ID extends Serializable> implements Gene
|
||||
|
||||
@SuppressWarnings("unchecked") @DB(txn=false)
|
||||
protected T toEntityBean(final ResultSet result, final boolean cache) throws SQLException {
|
||||
final T entity = (T)_factory.newInstance(new Callback[] {NoOp.INSTANCE, new UpdateBuilder(_allAttributes)});
|
||||
final T entity = (T)_factory.newInstance(new Callback[] {NoOp.INSTANCE, new UpdateBuilder(this)});
|
||||
|
||||
toEntityBean(result, entity);
|
||||
|
||||
@ -1496,7 +1512,7 @@ public abstract class GenericDaoBase<T, ID extends Serializable> implements Gene
|
||||
final StringBuilder sql = new StringBuilder("DELETE FROM ");
|
||||
sql.append(_table).append(" WHERE ").append(_removed.first()).append(" IS NOT NULL");
|
||||
final Transaction txn = Transaction.currentTxn();
|
||||
PreparedStatement pstmt = s_initStmt;
|
||||
PreparedStatement pstmt = null;
|
||||
try {
|
||||
txn.start();
|
||||
pstmt = txn.prepareAutoCloseStatement(sql.toString());
|
||||
@ -1504,8 +1520,7 @@ public abstract class GenericDaoBase<T, ID extends Serializable> implements Gene
|
||||
pstmt.executeUpdate();
|
||||
txn.commit();
|
||||
} catch (final SQLException e) {
|
||||
final String sqlStr = pstmt.toString();
|
||||
throw new CloudRuntimeException("DB Exception on " + sqlStr, e);
|
||||
throw new CloudRuntimeException("DB Exception on " + pstmt, e);
|
||||
}
|
||||
}
|
||||
|
||||
@ -1523,7 +1538,7 @@ public abstract class GenericDaoBase<T, ID extends Serializable> implements Gene
|
||||
}
|
||||
|
||||
final Transaction txn = Transaction.currentTxn();
|
||||
PreparedStatement pstmt = s_initStmt;
|
||||
PreparedStatement pstmt = null;
|
||||
try {
|
||||
|
||||
txn.start();
|
||||
@ -1541,8 +1556,7 @@ public abstract class GenericDaoBase<T, ID extends Serializable> implements Gene
|
||||
}
|
||||
return result > 0;
|
||||
} catch (final SQLException e) {
|
||||
final String sqlStr = pstmt.toString();
|
||||
throw new CloudRuntimeException("DB Exception on: " + sqlStr, e);
|
||||
throw new CloudRuntimeException("DB Exception on: " + pstmt, e);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -29,11 +29,12 @@ import com.cloud.utils.Ternary;
|
||||
import com.cloud.utils.exception.CloudRuntimeException;
|
||||
|
||||
public class UpdateBuilder implements MethodInterceptor {
|
||||
protected final Map<String, Attribute> _attrs;
|
||||
protected Map<String, Ternary<Attribute, Boolean, Object>> _changes;
|
||||
protected HashMap<Attribute, Object> _collectionChanges;
|
||||
protected GenericDaoBase<?, ?> _dao;
|
||||
|
||||
protected UpdateBuilder(Map<String, Attribute> attrs) {
|
||||
_attrs = attrs;
|
||||
protected UpdateBuilder(GenericDaoBase<?, ?> dao) {
|
||||
_dao = dao;
|
||||
_changes = new HashMap<String, Ternary<Attribute, Boolean, Object>>();
|
||||
}
|
||||
|
||||
@ -47,7 +48,7 @@ public class UpdateBuilder implements MethodInterceptor {
|
||||
makeIncrChange(name, args);
|
||||
} else if (name.startsWith("decr")) {
|
||||
makeDecrChange(name, args);
|
||||
}
|
||||
}
|
||||
return methodProxy.invokeSuper(object, args);
|
||||
}
|
||||
|
||||
@ -58,25 +59,32 @@ public class UpdateBuilder implements MethodInterceptor {
|
||||
}
|
||||
|
||||
protected Attribute makeChange(String field, Object value) {
|
||||
Attribute attr = _attrs.get(field);
|
||||
Attribute attr = _dao._allAttributes.get(field);
|
||||
|
||||
assert (attr == null || attr.isUpdatable()) : "Updating an attribute that's not updatable: " + field;
|
||||
if (attr != null) {
|
||||
_changes.put(field, new Ternary<Attribute, Boolean, Object>(attr, null, value));
|
||||
if (attr.attache == null) {
|
||||
_changes.put(field, new Ternary<Attribute, Boolean, Object>(attr, null, value));
|
||||
} else {
|
||||
if (_collectionChanges == null) {
|
||||
_collectionChanges = new HashMap<Attribute, Object>();
|
||||
}
|
||||
_collectionChanges.put(attr, value);
|
||||
}
|
||||
}
|
||||
return attr;
|
||||
}
|
||||
|
||||
protected void makeIncrChange(String method, Object[] args) {
|
||||
String field = methodToField(method, 4);
|
||||
Attribute attr = _attrs.get(field);
|
||||
Attribute attr = _dao._allAttributes.get(field);
|
||||
assert (attr != null && attr.isUpdatable()) : "Updating an attribute that's not updatable: " + field;
|
||||
incr(attr, args == null || args.length == 0 ? 1 : args[0]);
|
||||
}
|
||||
|
||||
protected void makeDecrChange(String method, Object[] args) {
|
||||
String field = methodToField(method, 4);
|
||||
Attribute attr = _attrs.get(field);
|
||||
Attribute attr = _dao._allAttributes.get(field);
|
||||
assert (attr != null && attr.isUpdatable()) : "Updating an attribute that's not updatable: " + field;
|
||||
decr(attr, args == null || args.length == 0 ? 1 : args[0]);
|
||||
}
|
||||
@ -107,19 +115,23 @@ public class UpdateBuilder implements MethodInterceptor {
|
||||
}
|
||||
|
||||
public boolean hasChanges() {
|
||||
return _changes.size() != 0;
|
||||
return (_changes.size() + (_collectionChanges != null ? _collectionChanges.size() : 0)) != 0;
|
||||
}
|
||||
|
||||
public boolean has(String name) {
|
||||
return _changes.containsKey(name);
|
||||
}
|
||||
|
||||
public Object get(String name) {
|
||||
return _changes.get(name).second();
|
||||
public Map<Attribute, Object> getCollectionChanges() {
|
||||
return _collectionChanges;
|
||||
}
|
||||
|
||||
protected void clear() {
|
||||
_changes.clear();
|
||||
if (_collectionChanges != null) {
|
||||
_collectionChanges.clear();
|
||||
_collectionChanges = null;
|
||||
}
|
||||
}
|
||||
|
||||
public StringBuilder toSql(String tables) {
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user