mirror of
https://github.com/apache/cloudstack.git
synced 2025-11-02 11:52:28 +01:00
Merge branch 'master' of https://git-wip-us.apache.org/repos/asf/cloudstack
This commit is contained in:
commit
6db212d413
@ -14,7 +14,7 @@
|
||||
# KIND, either express or implied. See the License for the
|
||||
# specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
sudo: required
|
||||
language: java
|
||||
cache:
|
||||
directories:
|
||||
|
||||
@ -47,7 +47,7 @@ public class CreateVMSnapshotCmd extends BaseAsyncCreateCmd {
|
||||
@Parameter(name = ApiConstants.VIRTUAL_MACHINE_ID, type = CommandType.UUID, required = true, entityType = UserVmResponse.class, description = "The ID of the vm")
|
||||
private Long vmId;
|
||||
|
||||
@Parameter(name = ApiConstants.VM_SNAPSHOT_DESCRIPTION, type = CommandType.STRING, required = false, description = "The discription of the snapshot")
|
||||
@Parameter(name = ApiConstants.VM_SNAPSHOT_DESCRIPTION, type = CommandType.STRING, required = false, description = "The description of the snapshot")
|
||||
private String description;
|
||||
|
||||
@Parameter(name = ApiConstants.VM_SNAPSHOT_DISPLAYNAME, type = CommandType.STRING, required = false, description = "The display name of the snapshot")
|
||||
|
||||
@ -22,11 +22,14 @@ package com.cloud.agent.api;
|
||||
import com.cloud.agent.api.to.NicTO;
|
||||
import com.cloud.vm.VirtualMachine;
|
||||
|
||||
import java.util.Map;
|
||||
|
||||
public class PlugNicCommand extends Command {
|
||||
|
||||
NicTO nic;
|
||||
String instanceName;
|
||||
VirtualMachine.Type vmType;
|
||||
Map<String, String> details;
|
||||
|
||||
public NicTO getNic() {
|
||||
return nic;
|
||||
@ -46,6 +49,13 @@ public class PlugNicCommand extends Command {
|
||||
this.vmType = vmtype;
|
||||
}
|
||||
|
||||
public PlugNicCommand(NicTO nic, String instanceName, VirtualMachine.Type vmtype, Map<String, String> details) {
|
||||
this.nic = nic;
|
||||
this.instanceName = instanceName;
|
||||
this.vmType = vmtype;
|
||||
this.details = details;
|
||||
}
|
||||
|
||||
public String getVmName() {
|
||||
return instanceName;
|
||||
}
|
||||
@ -53,4 +63,8 @@ public class PlugNicCommand extends Command {
|
||||
public VirtualMachine.Type getVMType() {
|
||||
return vmType;
|
||||
}
|
||||
|
||||
public Map<String, String> getDetails() {
|
||||
return this.details;
|
||||
}
|
||||
}
|
||||
|
||||
@ -25,6 +25,7 @@ import java.io.InputStream;
|
||||
import java.io.InputStreamReader;
|
||||
import java.net.URL;
|
||||
import java.net.URLConnection;
|
||||
import java.nio.charset.Charset;
|
||||
|
||||
import org.apache.log4j.Logger;
|
||||
|
||||
@ -70,7 +71,7 @@ public abstract class CommandWrapper<T extends Command, A extends Answer, R exte
|
||||
conn.setReadTimeout(5000);
|
||||
|
||||
final InputStream is = conn.getInputStream();
|
||||
final BufferedReader reader = new BufferedReader(new InputStreamReader(is));
|
||||
final BufferedReader reader = new BufferedReader(new InputStreamReader(is, Charset.defaultCharset()));
|
||||
final StringBuilder sb2 = new StringBuilder();
|
||||
String line = null;
|
||||
try {
|
||||
|
||||
@ -498,7 +498,9 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust
|
||||
} catch (UnknownHostException e) {
|
||||
throw new CloudRuntimeException("Unable to resolve " + ip);
|
||||
}
|
||||
try (SocketChannel ch1 = SocketChannel.open(new InetSocketAddress(addr, Port.value()));){
|
||||
SocketChannel ch1 = null;
|
||||
try {
|
||||
ch1 = SocketChannel.open(new InetSocketAddress(addr, Port.value()));
|
||||
ch1.configureBlocking(true); // make sure we are working at blocking mode
|
||||
ch1.socket().setKeepAlive(true);
|
||||
ch1.socket().setSoTimeout(60 * 1000);
|
||||
@ -511,14 +513,21 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust
|
||||
Link.doHandshake(ch1, sslEngine, true);
|
||||
s_logger.info("SSL: Handshake done");
|
||||
} catch (Exception e) {
|
||||
ch1.close();
|
||||
throw new IOException("SSL: Fail to init SSL! " + e);
|
||||
}
|
||||
if (s_logger.isDebugEnabled()) {
|
||||
s_logger.debug("Connection to peer opened: " + peerName + ", ip: " + ip);
|
||||
}
|
||||
_peers.put(peerName, ch);
|
||||
_peers.put(peerName, ch1);
|
||||
_sslEngines.put(peerName, sslEngine);
|
||||
return ch1;
|
||||
} catch (IOException e) {
|
||||
try {
|
||||
ch1.close();
|
||||
} catch (IOException ex) {
|
||||
s_logger.error("failed to close failed peer socket: " + ex);
|
||||
}
|
||||
s_logger.warn("Unable to connect to peer management server: " + peerName + ", ip: " + ip + " due to " + e.getMessage(), e);
|
||||
return null;
|
||||
}
|
||||
|
||||
@ -3422,8 +3422,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
|
||||
final VMInstanceVO router = _vmDao.findById(vm.getId());
|
||||
if (router.getState() == State.Running) {
|
||||
try {
|
||||
final PlugNicCommand plugNicCmd = new PlugNicCommand(nic, vm.getName(), vm.getType());
|
||||
|
||||
final PlugNicCommand plugNicCmd = new PlugNicCommand(nic, vm.getName(), vm.getType(), vm.getDetails());
|
||||
final Commands cmds = new Commands(Command.OnError.Stop);
|
||||
cmds.addCommand("plugnic", plugNicCmd);
|
||||
_agentMgr.send(dest.getHost().getId(), cmds);
|
||||
|
||||
@ -22,6 +22,7 @@ import com.cloud.utils.db.TransactionLegacy;
|
||||
import com.cloud.utils.exception.CloudRuntimeException;
|
||||
import com.cloud.utils.script.Script;
|
||||
import org.apache.log4j.Logger;
|
||||
import org.jasypt.exceptions.EncryptionOperationNotPossibleException;
|
||||
|
||||
import java.io.File;
|
||||
import java.io.UnsupportedEncodingException;
|
||||
@ -74,6 +75,8 @@ public class Upgrade450to451 implements DbUpgrade {
|
||||
encryptKeyInKeyStore(conn);
|
||||
encryptIpSecPresharedKeysOfRemoteAccessVpn(conn);
|
||||
encryptStoragePoolUserInfo(conn);
|
||||
updateUserVmDetailsWithNicAdapterType(conn);
|
||||
upgradeVMWareLocalStorage(conn);
|
||||
}
|
||||
|
||||
private void encryptKeyInKeyStore(Connection conn) {
|
||||
@ -84,9 +87,11 @@ public class Upgrade450to451 implements DbUpgrade {
|
||||
selectStatement = conn.prepareStatement("SELECT ks.id, ks.key FROM cloud.keystore ks WHERE ks.key IS NOT null");
|
||||
selectResultSet = selectStatement.executeQuery();
|
||||
while (selectResultSet.next()) {
|
||||
Long keyId = selectResultSet.getLong(1);
|
||||
String preSharedKey = selectResultSet.getString(2);
|
||||
updateStatement = conn.prepareStatement("UPDATE cloud.keystore ks SET ks.key = ? WHERE ks.id = ?");
|
||||
updateStatement.setString(1, DBEncryptionUtil.encrypt(selectResultSet.getString(2)));
|
||||
updateStatement.setLong(2, selectResultSet.getLong(1));
|
||||
updateStatement.setString(1, DBEncryptionUtil.encrypt(preSharedKey));
|
||||
updateStatement.setLong(2, keyId);
|
||||
updateStatement.executeUpdate();
|
||||
updateStatement.close();
|
||||
}
|
||||
@ -120,10 +125,16 @@ public class Upgrade450to451 implements DbUpgrade {
|
||||
selectStatement = conn.prepareStatement("SELECT id, ipsec_psk FROM `cloud`.`remote_access_vpn`");
|
||||
resultSet = selectStatement.executeQuery();
|
||||
while (resultSet.next()) {
|
||||
Long rowId = resultSet.getLong(1);
|
||||
String preSharedKey = resultSet.getString(2);
|
||||
try {
|
||||
preSharedKey = DBEncryptionUtil.decrypt(preSharedKey);
|
||||
} catch (EncryptionOperationNotPossibleException ignored) {
|
||||
s_logger.debug("The ipsec_psk preshared key id=" + rowId + "in remote_access_vpn is not encrypted, encrypting it.");
|
||||
}
|
||||
updateStatement = conn.prepareStatement("UPDATE `cloud`.`remote_access_vpn` SET ipsec_psk=? WHERE id=?");
|
||||
updateStatement.setString(1, DBEncryptionUtil.encrypt(preSharedKey));
|
||||
updateStatement.setLong(2, resultSet.getLong(1));
|
||||
updateStatement.setLong(2, rowId);
|
||||
updateStatement.executeUpdate();
|
||||
updateStatement.close();
|
||||
}
|
||||
@ -175,4 +186,38 @@ public class Upgrade450to451 implements DbUpgrade {
|
||||
}
|
||||
s_logger.debug("Done encrypting storage_pool's user_info column");
|
||||
}
|
||||
|
||||
private void updateUserVmDetailsWithNicAdapterType(Connection conn) {
|
||||
PreparedStatement insertPstmt = null;
|
||||
try {
|
||||
insertPstmt = conn.prepareStatement("INSERT INTO `cloud`.`user_vm_details`(vm_id,name,value,display) select v.id as vm_id, details.name, details.value, details.display from `cloud`.`vm_instance` as v, `cloud`.`vm_template_details` as details where v.removed is null and v.vm_template_id=details.template_id and details.name='nicAdapter' and details.template_id in (select id from `cloud`.`vm_template` where hypervisor_type = 'vmware') and v.id not in (select vm_id from `cloud`.`user_vm_details` where name='nicAdapter');");
|
||||
insertPstmt.executeUpdate();
|
||||
} catch (SQLException e) {
|
||||
throw new CloudRuntimeException("Failed to update user_vm_details table with nicAdapter entries by copying from vm_template_detail table", e);
|
||||
} finally {
|
||||
try {
|
||||
if (insertPstmt != null)
|
||||
insertPstmt.close();
|
||||
} catch (SQLException e) {
|
||||
}
|
||||
}
|
||||
s_logger.debug("Done. Updated user_vm_details table with nicAdapter entries by copying from vm_template_detail table. This affects only VM/templates with hypervisor_type as VMware.");
|
||||
}
|
||||
|
||||
private void upgradeVMWareLocalStorage(Connection conn) {
|
||||
PreparedStatement updatePstmt = null;
|
||||
try {
|
||||
updatePstmt = conn.prepareStatement("UPDATE storage_pool SET pool_type='VMFS',host_address=@newaddress WHERE (@newaddress:=concat('VMFS datastore: ', path)) IS NOT NULL AND scope = 'HOST' AND pool_type = 'LVM' AND id IN (SELECT * FROM (SELECT storage_pool.id FROM storage_pool,cluster WHERE storage_pool.cluster_id = cluster.id AND cluster.hypervisor_type='VMware') AS t);");
|
||||
updatePstmt.executeUpdate();
|
||||
} catch (SQLException e) {
|
||||
throw new CloudRuntimeException("Unable to upgrade VMWare local storage pool type", e);
|
||||
} finally {
|
||||
try {
|
||||
if (updatePstmt != null)
|
||||
updatePstmt.close();
|
||||
} catch (SQLException e) {
|
||||
}
|
||||
s_logger.debug("Done, upgraded VMWare local storage pool type to VMFS and host_address to the VMFS format");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -18,7 +18,6 @@
|
||||
*/
|
||||
package org.apache.cloudstack.storage.cache.allocator;
|
||||
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
|
||||
import javax.inject.Inject;
|
||||
@ -33,7 +32,9 @@ import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreStateMachine;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.Scope;
|
||||
import org.apache.cloudstack.storage.datastore.ObjectInDataStoreManager;
|
||||
import org.apache.cloudstack.storage.image.datastore.ImageStoreProviderManager;
|
||||
|
||||
import com.cloud.server.StatsCollector;
|
||||
import com.cloud.storage.ScopeType;
|
||||
|
||||
@Component
|
||||
@ -43,6 +44,10 @@ public class StorageCacheRandomAllocator implements StorageCacheAllocator {
|
||||
DataStoreManager dataStoreMgr;
|
||||
@Inject
|
||||
ObjectInDataStoreManager objectInStoreMgr;
|
||||
@Inject
|
||||
ImageStoreProviderManager imageStoreMgr;
|
||||
@Inject
|
||||
StatsCollector statsCollector;
|
||||
|
||||
@Override
|
||||
public DataStore getCacheStore(Scope scope) {
|
||||
@ -57,8 +62,7 @@ public class StorageCacheRandomAllocator implements StorageCacheAllocator {
|
||||
return null;
|
||||
}
|
||||
|
||||
Collections.shuffle(cacheStores);
|
||||
return cacheStores.get(0);
|
||||
return imageStoreMgr.getImageStore(cacheStores);
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -78,18 +82,12 @@ public class StorageCacheRandomAllocator implements StorageCacheAllocator {
|
||||
if (cacheStores.size() > 1) {
|
||||
for (DataStore store : cacheStores) {
|
||||
DataObjectInStore obj = objectInStoreMgr.findObject(data, store);
|
||||
if (obj != null && obj.getState() == ObjectInDataStoreStateMachine.State.Ready) {
|
||||
if (obj != null && obj.getState() == ObjectInDataStoreStateMachine.State.Ready && statsCollector.imageStoreHasEnoughCapacity(store)) {
|
||||
s_logger.debug("pick the cache store " + store.getId() + " where data is already there");
|
||||
return store;
|
||||
}
|
||||
}
|
||||
|
||||
// otherwise, just random pick one
|
||||
Collections.shuffle(cacheStores);
|
||||
}
|
||||
return cacheStores.get(0);
|
||||
|
||||
return imageStoreMgr.getImageStore(cacheStores);
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
|
||||
@ -19,7 +19,9 @@
|
||||
package org.apache.cloudstack.storage.image.manager;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.Iterator;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
@ -41,6 +43,7 @@ import org.apache.cloudstack.storage.image.datastore.ImageStoreEntity;
|
||||
import org.apache.cloudstack.storage.image.datastore.ImageStoreProviderManager;
|
||||
import org.apache.cloudstack.storage.image.store.ImageStoreImpl;
|
||||
|
||||
import com.cloud.server.StatsCollector;
|
||||
import com.cloud.storage.ScopeType;
|
||||
import com.cloud.storage.dao.VMTemplateDao;
|
||||
|
||||
@ -53,6 +56,8 @@ public class ImageStoreProviderManagerImpl implements ImageStoreProviderManager
|
||||
VMTemplateDao imageDataDao;
|
||||
@Inject
|
||||
DataStoreProviderManager providerManager;
|
||||
@Inject
|
||||
StatsCollector _statsCollector;
|
||||
Map<String, ImageStoreDriver> driverMaps;
|
||||
|
||||
@PostConstruct
|
||||
@ -137,4 +142,21 @@ public class ImageStoreProviderManagerImpl implements ImageStoreProviderManager
|
||||
}
|
||||
return imageStores;
|
||||
}
|
||||
|
||||
@Override
|
||||
public DataStore getImageStore(List<DataStore> imageStores) {
|
||||
if (imageStores.size() > 1) {
|
||||
Collections.shuffle(imageStores); // Randomize image store list.
|
||||
Iterator<DataStore> i = imageStores.iterator();
|
||||
DataStore imageStore = null;
|
||||
while(i.hasNext()) {
|
||||
imageStore = i.next();
|
||||
// Return image store if used percentage is less then threshold value i.e. 90%.
|
||||
if (_statsCollector.imageStoreHasEnoughCapacity(imageStore)) {
|
||||
return imageStore;
|
||||
}
|
||||
}
|
||||
}
|
||||
return imageStores.get(0);
|
||||
}
|
||||
}
|
||||
|
||||
@ -18,7 +18,6 @@
|
||||
*/
|
||||
package org.apache.cloudstack.storage.datastore;
|
||||
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
|
||||
import javax.inject.Inject;
|
||||
@ -79,8 +78,7 @@ public class DataStoreManagerImpl implements DataStoreManager {
|
||||
if (stores == null || stores.size() == 0) {
|
||||
return null;
|
||||
}
|
||||
Collections.shuffle(stores);
|
||||
return stores.get(0);
|
||||
return imageDataStoreMgr.getImageStore(stores);
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -112,8 +110,7 @@ public class DataStoreManagerImpl implements DataStoreManager {
|
||||
if (stores == null || stores.size() == 0) {
|
||||
return null;
|
||||
}
|
||||
Collections.shuffle(stores);
|
||||
return stores.get(0);
|
||||
return imageDataStoreMgr.getImageStore(stores);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
||||
@ -41,4 +41,6 @@ public interface ImageStoreProviderManager {
|
||||
List<DataStore> listImageCacheStores(Scope scope);
|
||||
|
||||
boolean registerDriver(String uuid, ImageStoreDriver driver);
|
||||
|
||||
DataStore getImageStore(List<DataStore> imageStores);
|
||||
}
|
||||
|
||||
@ -28,7 +28,7 @@
|
||||
</repository>
|
||||
<repository>
|
||||
<id>ceph-com</id>
|
||||
<url>http://ceph.com/maven</url>
|
||||
<url>http://eu.ceph.com/maven</url>
|
||||
<snapshots>
|
||||
<enabled>false</enabled>
|
||||
</snapshots>
|
||||
|
||||
@ -294,7 +294,7 @@ public class BridgeVifDriver extends VifDriverBase {
|
||||
}
|
||||
}
|
||||
if (!foundLinkLocalBr) {
|
||||
Script.runSimpleBashScript("ifconfig " + linkLocalBr + " 169.254.0.1;" + "ip route add " + NetUtils.getLinkLocalCIDR() + " dev " + linkLocalBr + " src " +
|
||||
Script.runSimpleBashScript("ip address add 169.254.0.1/16 dev " + linkLocalBr + ";" + "ip route add " + NetUtils.getLinkLocalCIDR() + " dev " + linkLocalBr + " src " +
|
||||
NetUtils.getLinkLocalGateway());
|
||||
}
|
||||
}
|
||||
@ -302,7 +302,7 @@ public class BridgeVifDriver extends VifDriverBase {
|
||||
private void createControlNetwork(String privBrName) {
|
||||
deleteExitingLinkLocalRouteTable(privBrName);
|
||||
if (!isBridgeExists(privBrName)) {
|
||||
Script.runSimpleBashScript("brctl addbr " + privBrName + "; ifconfig " + privBrName + " up; ifconfig " + privBrName + " 169.254.0.1", _timeout);
|
||||
Script.runSimpleBashScript("brctl addbr " + privBrName + "; ip link set " + privBrName + " up; ip address add 169.254.0.1/16 dev " + privBrName, _timeout);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@ -263,7 +263,7 @@ public class IvsVifDriver extends VifDriverBase {
|
||||
}
|
||||
}
|
||||
if (!foundLinkLocalBr) {
|
||||
Script.runSimpleBashScript("ifconfig " + linkLocalBr + " 169.254.0.1;" + "ip route add " + NetUtils.getLinkLocalCIDR() + " dev " + linkLocalBr + " src " +
|
||||
Script.runSimpleBashScript("ip address add 169.254.0.1/16 dev " + linkLocalBr + ";" + "ip route add " + NetUtils.getLinkLocalCIDR() + " dev " + linkLocalBr + " src " +
|
||||
NetUtils.getLinkLocalGateway());
|
||||
}
|
||||
}
|
||||
@ -271,7 +271,7 @@ public class IvsVifDriver extends VifDriverBase {
|
||||
private void createControlNetwork(String privBrName) {
|
||||
deleteExitingLinkLocalRouteTable(privBrName);
|
||||
if (!isBridgeExists(privBrName)) {
|
||||
Script.runSimpleBashScript("brctl addbr " + privBrName + "; ifconfig " + privBrName + " up; ifconfig " + privBrName + " 169.254.0.1", _timeout);
|
||||
Script.runSimpleBashScript("brctl addbr " + privBrName + "; ip link set " + privBrName + " up; ip address add 169.254.0.1/16 dev " + privBrName, _timeout);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -16,6 +16,8 @@
|
||||
// under the License.
|
||||
package com.cloud.hypervisor.kvm.resource;
|
||||
|
||||
import org.apache.commons.lang.StringEscapeUtils;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
@ -1168,7 +1170,7 @@ public class LibvirtVMDef {
|
||||
_port = port;
|
||||
_autoPort = autoPort;
|
||||
_listenAddr = listenAddr;
|
||||
_passwd = passwd;
|
||||
_passwd = StringEscapeUtils.escapeXml(passwd);
|
||||
_keyMap = keyMap;
|
||||
}
|
||||
|
||||
|
||||
@ -151,7 +151,7 @@ public class OvsVifDriver extends VifDriverBase {
|
||||
}
|
||||
}
|
||||
if (!foundLinkLocalBr) {
|
||||
Script.runSimpleBashScript("ifconfig " + linkLocalBr + " 169.254.0.1;" + "ip route add " + NetUtils.getLinkLocalCIDR() + " dev " + linkLocalBr + " src " +
|
||||
Script.runSimpleBashScript("ip address add 169.254.0.1/16 dev " + linkLocalBr + ";" + "ip route add " + NetUtils.getLinkLocalCIDR() + " dev " + linkLocalBr + " src " +
|
||||
NetUtils.getLinkLocalGateway());
|
||||
}
|
||||
}
|
||||
@ -159,7 +159,7 @@ public class OvsVifDriver extends VifDriverBase {
|
||||
private void createControlNetwork(String privBrName) {
|
||||
deleteExitingLinkLocalRouteTable(privBrName);
|
||||
if (!isBridgeExists(privBrName)) {
|
||||
Script.runSimpleBashScript("ovs-vsctl add-br " + privBrName + "; ifconfig " + privBrName + " up; ifconfig " + privBrName + " 169.254.0.1", _timeout);
|
||||
Script.runSimpleBashScript("ovs-vsctl add-br " + privBrName + "; ip link set " + privBrName + " up; ip address add 169.254.0.1/16 dev " + privBrName, _timeout);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -378,12 +378,22 @@ public class VMwareGuru extends HypervisorGuruBase implements HypervisorGuru, Co
|
||||
//NOTE: the hostid can be a hypervisor host, or a ssvm agent. For copycommand, if it's for volume upload, the hypervisor
|
||||
//type is empty, so we need to check the format of volume at first.
|
||||
if (cmd instanceof CopyCommand) {
|
||||
CopyCommand cpyCommand = (CopyCommand)cmd;
|
||||
CopyCommand cpyCommand = (CopyCommand) cmd;
|
||||
DataTO srcData = cpyCommand.getSrcTO();
|
||||
DataStoreTO srcStoreTO = srcData.getDataStore();
|
||||
DataTO destData = cpyCommand.getDestTO();
|
||||
DataStoreTO destStoreTO = destData.getDataStore();
|
||||
|
||||
boolean inSeq = true;
|
||||
if ((srcData.getObjectType() == DataObjectType.SNAPSHOT) || (destData.getObjectType() == DataObjectType.SNAPSHOT)) {
|
||||
inSeq = false;
|
||||
} else if ((destStoreTO.getRole() == DataStoreRole.Image) || (destStoreTO.getRole() == DataStoreRole.ImageCache)) {
|
||||
inSeq = false;
|
||||
} else if (!VmwareFullClone.value()) {
|
||||
inSeq = false;
|
||||
}
|
||||
cpyCommand.setExecuteInSequence(inSeq);
|
||||
|
||||
if (srcData.getObjectType() == DataObjectType.VOLUME) {
|
||||
VolumeObjectTO volumeObjectTO = (VolumeObjectTO)srcData;
|
||||
if (Storage.ImageFormat.OVA == volumeObjectTO.getFormat()) {
|
||||
|
||||
@ -765,7 +765,7 @@ public class VmwareManagerImpl extends ManagerBase implements VmwareManager, Vmw
|
||||
|
||||
// Change permissions for the mountpoint
|
||||
script = new Script(true, "chmod", _timeout, s_logger);
|
||||
script.add("777", mountPoint);
|
||||
script.add("1777", mountPoint);
|
||||
result = script.execute();
|
||||
if (result != null) {
|
||||
s_logger.warn("Unable to set permissions for " + mountPoint + " due to " + result);
|
||||
|
||||
@ -916,8 +916,12 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
|
||||
return new PlugNicAnswer(cmd, false, "Unable to execute PlugNicCommand due to " + errMsg);
|
||||
}
|
||||
*/
|
||||
// TODO need a way to specify the control of NIC device type
|
||||
// Fallback to E1000 if no specific nicAdapter is passed
|
||||
VirtualEthernetCardType nicDeviceType = VirtualEthernetCardType.E1000;
|
||||
Map details = cmd.getDetails();
|
||||
if (details != null) {
|
||||
nicDeviceType = VirtualEthernetCardType.valueOf((String) details.get("nicAdapter"));
|
||||
}
|
||||
|
||||
// find a usable device number in VMware environment
|
||||
VirtualDevice[] nicDevices = vmMo.getNicDevices();
|
||||
@ -1818,7 +1822,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
|
||||
|
||||
int getReservedCpuMHZ(VirtualMachineTO vmSpec) {
|
||||
if (vmSpec.getDetails().get(VMwareGuru.VmwareReserveCpu.key()).equalsIgnoreCase("true")) {
|
||||
return vmSpec.getMinSpeed();
|
||||
return vmSpec.getMinSpeed() * vmSpec.getCpus();
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
@ -2468,7 +2472,6 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
|
||||
private Pair<ManagedObjectReference, String> prepareNetworkFromNicInfo(HostMO hostMo, NicTO nicTo, boolean configureVServiceInNexus, VirtualMachine.Type vmType) throws Exception {
|
||||
|
||||
Ternary<String, String, String> switchDetails = getTargetSwitch(nicTo);
|
||||
nicTo.getType();
|
||||
VirtualSwitchType switchType = VirtualSwitchType.getType(switchDetails.second());
|
||||
String switchName = switchDetails.first();
|
||||
String vlanToken = switchDetails.third();
|
||||
@ -4242,7 +4245,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
|
||||
DatastoreSummary dsSummary = dsMo.getSummary();
|
||||
String address = hostMo.getHostName();
|
||||
StoragePoolInfo pInfo =
|
||||
new StoragePoolInfo(poolUuid, address, dsMo.getMor().getValue(), "", StoragePoolType.LVM, dsSummary.getCapacity(), dsSummary.getFreeSpace());
|
||||
new StoragePoolInfo(poolUuid, address, dsMo.getMor().getValue(), "", StoragePoolType.VMFS, dsSummary.getCapacity(), dsSummary.getFreeSpace());
|
||||
StartupStorageCommand cmd = new StartupStorageCommand();
|
||||
cmd.setName(poolUuid);
|
||||
cmd.setPoolInfo(pInfo);
|
||||
|
||||
@ -46,9 +46,6 @@ import javax.naming.ConfigurationException;
|
||||
import javax.xml.parsers.DocumentBuilderFactory;
|
||||
import javax.xml.parsers.ParserConfigurationException;
|
||||
|
||||
import org.apache.cloudstack.storage.command.StorageSubSystemCommand;
|
||||
import org.apache.cloudstack.storage.to.TemplateObjectTO;
|
||||
import org.apache.cloudstack.storage.to.VolumeObjectTO;
|
||||
import org.apache.log4j.Logger;
|
||||
import org.apache.xmlrpc.XmlRpcException;
|
||||
import org.w3c.dom.Document;
|
||||
@ -57,6 +54,35 @@ import org.w3c.dom.NodeList;
|
||||
import org.xml.sax.InputSource;
|
||||
import org.xml.sax.SAXException;
|
||||
|
||||
import com.trilead.ssh2.SCPClient;
|
||||
import com.xensource.xenapi.Bond;
|
||||
import com.xensource.xenapi.Connection;
|
||||
import com.xensource.xenapi.Console;
|
||||
import com.xensource.xenapi.Host;
|
||||
import com.xensource.xenapi.HostCpu;
|
||||
import com.xensource.xenapi.HostMetrics;
|
||||
import com.xensource.xenapi.Network;
|
||||
import com.xensource.xenapi.PBD;
|
||||
import com.xensource.xenapi.PIF;
|
||||
import com.xensource.xenapi.Pool;
|
||||
import com.xensource.xenapi.SR;
|
||||
import com.xensource.xenapi.Session;
|
||||
import com.xensource.xenapi.Task;
|
||||
import com.xensource.xenapi.Types;
|
||||
import com.xensource.xenapi.Types.BadServerResponse;
|
||||
import com.xensource.xenapi.Types.VmPowerState;
|
||||
import com.xensource.xenapi.Types.XenAPIException;
|
||||
import com.xensource.xenapi.VBD;
|
||||
import com.xensource.xenapi.VDI;
|
||||
import com.xensource.xenapi.VIF;
|
||||
import com.xensource.xenapi.VLAN;
|
||||
import com.xensource.xenapi.VM;
|
||||
import com.xensource.xenapi.XenAPIObject;
|
||||
|
||||
import org.apache.cloudstack.storage.command.StorageSubSystemCommand;
|
||||
import org.apache.cloudstack.storage.to.TemplateObjectTO;
|
||||
import org.apache.cloudstack.storage.to.VolumeObjectTO;
|
||||
|
||||
import com.cloud.agent.IAgentControl;
|
||||
import com.cloud.agent.api.Answer;
|
||||
import com.cloud.agent.api.Command;
|
||||
@ -123,30 +149,6 @@ import com.cloud.utils.net.NetUtils;
|
||||
import com.cloud.utils.ssh.SSHCmdHelper;
|
||||
import com.cloud.utils.ssh.SshHelper;
|
||||
import com.cloud.vm.VirtualMachine.PowerState;
|
||||
import com.trilead.ssh2.SCPClient;
|
||||
import com.xensource.xenapi.Bond;
|
||||
import com.xensource.xenapi.Connection;
|
||||
import com.xensource.xenapi.Console;
|
||||
import com.xensource.xenapi.Host;
|
||||
import com.xensource.xenapi.HostCpu;
|
||||
import com.xensource.xenapi.HostMetrics;
|
||||
import com.xensource.xenapi.Network;
|
||||
import com.xensource.xenapi.PBD;
|
||||
import com.xensource.xenapi.PIF;
|
||||
import com.xensource.xenapi.Pool;
|
||||
import com.xensource.xenapi.SR;
|
||||
import com.xensource.xenapi.Session;
|
||||
import com.xensource.xenapi.Task;
|
||||
import com.xensource.xenapi.Types;
|
||||
import com.xensource.xenapi.Types.BadServerResponse;
|
||||
import com.xensource.xenapi.Types.VmPowerState;
|
||||
import com.xensource.xenapi.Types.XenAPIException;
|
||||
import com.xensource.xenapi.VBD;
|
||||
import com.xensource.xenapi.VDI;
|
||||
import com.xensource.xenapi.VIF;
|
||||
import com.xensource.xenapi.VLAN;
|
||||
import com.xensource.xenapi.VM;
|
||||
import com.xensource.xenapi.XenAPIObject;
|
||||
|
||||
/**
|
||||
* CitrixResourceBase encapsulates the calls to the XenServer Xapi process to
|
||||
@ -4862,7 +4864,7 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe
|
||||
dest.reconfigureIp(conn, spr.ipConfigurationMode, spr.IP, spr.netmask, spr.gateway, spr.DNS);
|
||||
Host.managementReconfigure(conn, dest);
|
||||
String hostUuid = null;
|
||||
final int count = 0;
|
||||
int count = 0;
|
||||
while (count < 10) {
|
||||
try {
|
||||
Thread.sleep(10000);
|
||||
@ -4870,6 +4872,7 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe
|
||||
if (hostUuid != null) {
|
||||
break;
|
||||
}
|
||||
++count;
|
||||
} catch (final XmlRpcException e) {
|
||||
s_logger.debug("Waiting for host to come back: " + e.getMessage());
|
||||
} catch (final XenAPIException e) {
|
||||
|
||||
@ -23,20 +23,19 @@ import java.util.List;
|
||||
import javax.ejb.Local;
|
||||
|
||||
import org.apache.log4j.Logger;
|
||||
import org.apache.xmlrpc.XmlRpcException;
|
||||
|
||||
import com.xensource.xenapi.Connection;
|
||||
import com.xensource.xenapi.Host;
|
||||
import com.xensource.xenapi.Network;
|
||||
import com.xensource.xenapi.PIF;
|
||||
import com.xensource.xenapi.Types.XenAPIException;
|
||||
import com.xensource.xenapi.VLAN;
|
||||
|
||||
import com.cloud.agent.api.StartupCommand;
|
||||
import com.cloud.resource.ServerResource;
|
||||
import com.cloud.utils.exception.CloudRuntimeException;
|
||||
import com.cloud.utils.script.Script;
|
||||
import com.cloud.utils.ssh.SSHCmdHelper;
|
||||
import com.xensource.xenapi.Connection;
|
||||
import com.xensource.xenapi.Host;
|
||||
import com.xensource.xenapi.Network;
|
||||
import com.xensource.xenapi.PIF;
|
||||
import com.xensource.xenapi.Types.IpConfigurationMode;
|
||||
import com.xensource.xenapi.Types.XenAPIException;
|
||||
import com.xensource.xenapi.VLAN;
|
||||
|
||||
@Local(value = ServerResource.class)
|
||||
public class XenServer56Resource extends CitrixResourceBase {
|
||||
@ -142,37 +141,6 @@ public class XenServer56Resource extends CitrixResourceBase {
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean transferManagementNetwork(final Connection conn, final Host host, final PIF src, final PIF.Record spr, final PIF dest) throws XmlRpcException, XenAPIException {
|
||||
dest.reconfigureIp(conn, spr.ipConfigurationMode, spr.IP, spr.netmask, spr.gateway, spr.DNS);
|
||||
Host.managementReconfigure(conn, dest);
|
||||
String hostUuid = null;
|
||||
final int count = 0;
|
||||
while (count < 10) {
|
||||
try {
|
||||
Thread.sleep(10000);
|
||||
hostUuid = host.getUuid(conn);
|
||||
if (hostUuid != null) {
|
||||
break;
|
||||
}
|
||||
} catch (final XmlRpcException e) {
|
||||
s_logger.debug("Waiting for host to come back: " + e.getMessage());
|
||||
} catch (final XenAPIException e) {
|
||||
s_logger.debug("Waiting for host to come back: " + e.getMessage());
|
||||
} catch (final InterruptedException e) {
|
||||
s_logger.debug("Gotta run");
|
||||
return false;
|
||||
}
|
||||
}
|
||||
if (hostUuid == null) {
|
||||
s_logger.warn("Unable to transfer the management network from " + spr.uuid);
|
||||
return false;
|
||||
}
|
||||
|
||||
src.reconfigureIp(conn, IpConfigurationMode.NONE, null, null, null, null);
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public StartupCommand[] initialize() {
|
||||
pingXAPI();
|
||||
|
||||
@ -21,13 +21,14 @@ package com.cloud.hypervisor.xenserver.resource.wrapper;
|
||||
|
||||
import org.apache.log4j.Logger;
|
||||
|
||||
import com.xensource.xenapi.Connection;
|
||||
|
||||
import com.cloud.agent.api.Answer;
|
||||
import com.cloud.agent.api.NetworkUsageAnswer;
|
||||
import com.cloud.agent.api.NetworkUsageCommand;
|
||||
import com.cloud.hypervisor.xenserver.resource.XenServer56Resource;
|
||||
import com.cloud.resource.CommandWrapper;
|
||||
import com.cloud.utils.ExecutionResult;
|
||||
import com.xensource.xenapi.Connection;
|
||||
|
||||
public final class XenServer56NetworkUsageCommandWrapper extends CommandWrapper<NetworkUsageCommand, Answer, XenServer56Resource> {
|
||||
|
||||
@ -87,8 +88,8 @@ public final class XenServer56NetworkUsageCommandWrapper extends CommandWrapper<
|
||||
final String[] splitResult = detail.split(":");
|
||||
int i = 0;
|
||||
while (i < splitResult.length - 1) {
|
||||
stats[0] += new Long(splitResult[i++]).longValue();
|
||||
stats[1] += new Long(splitResult[i++]).longValue();
|
||||
stats[0] += Long.parseLong(splitResult[i++]);
|
||||
stats[1] += Long.parseLong(splitResult[i++]);
|
||||
}
|
||||
return new NetworkUsageAnswer(command, "success", stats[0], stats[1]);
|
||||
}
|
||||
|
||||
@ -22,17 +22,17 @@ package com.cloud.agent.api;
|
||||
public class BcfAnswer extends Answer{
|
||||
private final String hash;
|
||||
|
||||
public BcfAnswer(Command command, boolean success, String details) {
|
||||
public BcfAnswer(final Command command, final boolean success, final String details) {
|
||||
super(command, success, details);
|
||||
this.hash = "";
|
||||
}
|
||||
|
||||
public BcfAnswer(Command command, boolean success, String details, String hash) {
|
||||
public BcfAnswer(final Command command, final boolean success, final String details, final String hash) {
|
||||
super(command, success, details);
|
||||
this.hash = hash;
|
||||
}
|
||||
|
||||
public BcfAnswer(Command command, Exception e) {
|
||||
public BcfAnswer(final Command command, final Exception e) {
|
||||
super(command, e);
|
||||
this.hash = "";
|
||||
}
|
||||
|
||||
@ -22,8 +22,8 @@ package com.cloud.agent.api;
|
||||
import com.cloud.network.bigswitch.TopologyData;
|
||||
|
||||
public class BcfCommand extends Command {
|
||||
private TopologyData topology = null;
|
||||
private boolean _topologySyncRequested = false;
|
||||
private TopologyData topology;
|
||||
private boolean topologySyncRequested;
|
||||
|
||||
@Override
|
||||
public boolean executeInSequence() {
|
||||
@ -34,15 +34,15 @@ public class BcfCommand extends Command {
|
||||
return topology;
|
||||
}
|
||||
|
||||
public void setTopology(TopologyData topology) {
|
||||
public void setTopology(final TopologyData topology) {
|
||||
this.topology = topology;
|
||||
}
|
||||
|
||||
public boolean is_topologySyncRequested() {
|
||||
return _topologySyncRequested;
|
||||
public boolean isTopologySyncRequested() {
|
||||
return topologySyncRequested;
|
||||
}
|
||||
|
||||
public void set_topologySyncRequested(boolean requested) {
|
||||
this._topologySyncRequested = requested;
|
||||
public void setTopologySyncRequested(final boolean requested) {
|
||||
this.topologySyncRequested = requested;
|
||||
}
|
||||
}
|
||||
|
||||
@ -22,10 +22,10 @@ package com.cloud.agent.api;
|
||||
import com.cloud.network.bigswitch.TopologyData;
|
||||
|
||||
public class CacheBcfTopologyCommand extends Command{
|
||||
private final TopologyData topology;
|
||||
private final TopologyData _topology;
|
||||
|
||||
public CacheBcfTopologyCommand(TopologyData topology){
|
||||
this.topology = topology;
|
||||
public CacheBcfTopologyCommand(final TopologyData topology){
|
||||
this._topology = topology;
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -34,6 +34,6 @@ public class CacheBcfTopologyCommand extends Command{
|
||||
}
|
||||
|
||||
public TopologyData getTopology() {
|
||||
return topology;
|
||||
return _topology;
|
||||
}
|
||||
}
|
||||
|
||||
@ -20,18 +20,18 @@
|
||||
package com.cloud.agent.api;
|
||||
|
||||
public class CreateBcfAttachmentCommand extends BcfCommand {
|
||||
private String _tenantId;
|
||||
private String _tenantName;
|
||||
private String _networkId;
|
||||
private String _portId;
|
||||
private String _nicId;
|
||||
private Integer _vlan;
|
||||
private String _ipv4;
|
||||
private String _mac;
|
||||
private final String _tenantId;
|
||||
private final String _tenantName;
|
||||
private final String _networkId;
|
||||
private final String _portId;
|
||||
private final String _nicId;
|
||||
private final Integer _vlan;
|
||||
private final String _ipv4;
|
||||
private final String _mac;
|
||||
|
||||
public CreateBcfAttachmentCommand(String tenantId, String tenantName,
|
||||
String networkId, String portId, String nicId,
|
||||
Integer vlan, String ipv4, String mac) {
|
||||
public CreateBcfAttachmentCommand(final String tenantId, final String tenantName,
|
||||
final String networkId, final String portId, final String nicId,
|
||||
final Integer vlan, final String ipv4, final String mac) {
|
||||
this._tenantId = tenantId;
|
||||
this._tenantName = tenantName;
|
||||
this._networkId = networkId;
|
||||
|
||||
@ -22,11 +22,11 @@ package com.cloud.agent.api;
|
||||
public class CreateBcfRouterCommand extends BcfCommand {
|
||||
private final String _tenantId;
|
||||
|
||||
public CreateBcfRouterCommand(String tenantId){
|
||||
public CreateBcfRouterCommand(final String tenantId){
|
||||
this._tenantId = tenantId;
|
||||
}
|
||||
|
||||
public String get_tenantId() {
|
||||
public String getTenantId() {
|
||||
return _tenantId;
|
||||
}
|
||||
}
|
||||
|
||||
@ -26,8 +26,8 @@ public class CreateBcfRouterInterfaceCommand extends BcfCommand{
|
||||
private final String _gateway;
|
||||
private final String _networkName;
|
||||
|
||||
public CreateBcfRouterInterfaceCommand(String tenantId, String networkId, String cidr,
|
||||
String gateway, String networkName){
|
||||
public CreateBcfRouterInterfaceCommand(final String tenantId, final String networkId,
|
||||
final String cidr, final String gateway, final String networkName){
|
||||
this._tenantId = tenantId;
|
||||
this._networkId = networkId;
|
||||
this._networkName = networkName;
|
||||
@ -35,23 +35,23 @@ public class CreateBcfRouterInterfaceCommand extends BcfCommand{
|
||||
this._gateway = gateway;
|
||||
}
|
||||
|
||||
public String get_tenantId() {
|
||||
public String getTenantId() {
|
||||
return _tenantId;
|
||||
}
|
||||
|
||||
public String get_networkId() {
|
||||
public String getNetworkId() {
|
||||
return _networkId;
|
||||
}
|
||||
|
||||
public String get_networkName() {
|
||||
public String getNetworkName() {
|
||||
return _networkName;
|
||||
}
|
||||
|
||||
public String get_cidr() {
|
||||
public String getCidr() {
|
||||
return _cidr;
|
||||
}
|
||||
|
||||
public String get_gateway() {
|
||||
public String getGateway() {
|
||||
return _gateway;
|
||||
}
|
||||
}
|
||||
|
||||
@ -20,14 +20,14 @@
|
||||
package com.cloud.agent.api;
|
||||
|
||||
public class CreateBcfSegmentCommand extends BcfCommand {
|
||||
private String _tenantId;
|
||||
private String _tenantName;
|
||||
private String _networkId;
|
||||
private String _networkName;
|
||||
private Integer _vlan;
|
||||
private final String _tenantId;
|
||||
private final String _tenantName;
|
||||
private final String _networkId;
|
||||
private final String _networkName;
|
||||
private final Integer _vlan;
|
||||
|
||||
public CreateBcfSegmentCommand(String tenantId, String tenantName,
|
||||
String networkId, String networkName, Integer vlan) {
|
||||
public CreateBcfSegmentCommand(final String tenantId, final String tenantName,
|
||||
final String networkId, final String networkName, final Integer vlan) {
|
||||
this._tenantId = tenantId;
|
||||
this._tenantName = tenantName;
|
||||
this._networkId = networkId;
|
||||
|
||||
@ -26,8 +26,8 @@ public class CreateBcfStaticNatCommand extends BcfCommand {
|
||||
private final String _publicIp;
|
||||
private final String _mac;
|
||||
|
||||
public CreateBcfStaticNatCommand(String tenantId, String networkId,
|
||||
String privateIp, String publicIp, String mac){
|
||||
public CreateBcfStaticNatCommand(final String tenantId, final String networkId,
|
||||
final String privateIp, final String publicIp, final String mac){
|
||||
this._tenantId = tenantId;
|
||||
this._networkId = networkId;
|
||||
this._privateIp = privateIp;
|
||||
@ -35,23 +35,23 @@ public class CreateBcfStaticNatCommand extends BcfCommand {
|
||||
this._mac = mac;
|
||||
}
|
||||
|
||||
public String get_tenantId() {
|
||||
public String getTenantId() {
|
||||
return _tenantId;
|
||||
}
|
||||
|
||||
public String get_networkId() {
|
||||
public String getNetworkId() {
|
||||
return _networkId;
|
||||
}
|
||||
|
||||
public String get_privateIp() {
|
||||
public String getPrivateIp() {
|
||||
return _privateIp;
|
||||
}
|
||||
|
||||
public String get_publicIp() {
|
||||
public String getPublicIp() {
|
||||
return _publicIp;
|
||||
}
|
||||
|
||||
public String get_mac() {
|
||||
public String getMac() {
|
||||
return _mac;
|
||||
}
|
||||
}
|
||||
|
||||
@ -20,12 +20,12 @@
|
||||
package com.cloud.agent.api;
|
||||
|
||||
public class DeleteBcfAttachmentCommand extends BcfCommand {
|
||||
private String _tenantId;
|
||||
private String _networkId;
|
||||
private String _attachmentId;
|
||||
private final String _tenantId;
|
||||
private final String _networkId;
|
||||
private final String _attachmentId;
|
||||
|
||||
public DeleteBcfAttachmentCommand(String tenantId,
|
||||
String networkId, String attachmentId) {
|
||||
public DeleteBcfAttachmentCommand(final String tenantId,
|
||||
final String networkId, final String attachmentId) {
|
||||
this._tenantId = tenantId;
|
||||
this._networkId = networkId;
|
||||
this._attachmentId = attachmentId;
|
||||
|
||||
@ -21,19 +21,19 @@ package com.cloud.agent.api;
|
||||
|
||||
public class DeleteBcfSegmentCommand extends BcfCommand {
|
||||
|
||||
private String _tenantUuid;
|
||||
private String _networkUuid;
|
||||
private final String _tenantUuid;
|
||||
private final String _networkUuid;
|
||||
|
||||
public DeleteBcfSegmentCommand(String tenantUuid, String networkUuid) {
|
||||
public DeleteBcfSegmentCommand(final String tenantUuid, final String networkUuid) {
|
||||
this._tenantUuid = tenantUuid;
|
||||
this._networkUuid = networkUuid;
|
||||
}
|
||||
|
||||
public String get_tenantUuid() {
|
||||
public String getTenantUuid() {
|
||||
return _tenantUuid;
|
||||
}
|
||||
|
||||
public String getNetworkUuid() {
|
||||
return _networkUuid;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -24,21 +24,21 @@ public class DeleteBcfStaticNatCommand extends BcfCommand {
|
||||
private final String _publicIp;
|
||||
private final String _floatingIpId;
|
||||
|
||||
public DeleteBcfStaticNatCommand(String tenantId, String publicIp){
|
||||
public DeleteBcfStaticNatCommand(final String tenantId, final String publicIp){
|
||||
this._tenantId = tenantId;
|
||||
this._publicIp = publicIp;
|
||||
this._floatingIpId = publicIp.replace(".", "-");
|
||||
}
|
||||
|
||||
public String get_tenantId() {
|
||||
public String getTenantId() {
|
||||
return _tenantId;
|
||||
}
|
||||
|
||||
public String get_publicIp() {
|
||||
public String getPublicIp() {
|
||||
return _publicIp;
|
||||
}
|
||||
|
||||
public String get_floatingIpId() {
|
||||
public String getFloatingIpId() {
|
||||
return _floatingIpId;
|
||||
}
|
||||
}
|
||||
|
||||
@ -20,26 +20,27 @@
|
||||
package com.cloud.agent.api;
|
||||
|
||||
public class GetControllerDataAnswer extends Answer {
|
||||
private final String ipAddress;
|
||||
private final boolean isMaster;
|
||||
private final String _ipAddress;
|
||||
private final boolean _isMaster;
|
||||
|
||||
public GetControllerDataAnswer(GetControllerDataCommand cmd,
|
||||
String ipAddress, boolean isMaster){
|
||||
this.ipAddress = ipAddress;
|
||||
this.isMaster = isMaster;
|
||||
public GetControllerDataAnswer(final GetControllerDataCommand cmd,
|
||||
final String ipAddress, final boolean isMaster){
|
||||
super(cmd);
|
||||
this._ipAddress = ipAddress;
|
||||
this._isMaster = isMaster;
|
||||
}
|
||||
|
||||
public GetControllerDataAnswer(Command command, Exception e) {
|
||||
public GetControllerDataAnswer(final Command command, final Exception e) {
|
||||
super(command, e);
|
||||
this.ipAddress = null;
|
||||
this.isMaster = false;
|
||||
this._ipAddress = null;
|
||||
this._isMaster = false;
|
||||
}
|
||||
|
||||
public String getIpAddress() {
|
||||
return ipAddress;
|
||||
return _ipAddress;
|
||||
}
|
||||
|
||||
public boolean isMaster() {
|
||||
return isMaster;
|
||||
return _isMaster;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -20,6 +20,4 @@
|
||||
package com.cloud.agent.api;
|
||||
|
||||
public class GetControllerDataCommand extends BcfCommand {
|
||||
public GetControllerDataCommand() {
|
||||
}
|
||||
}
|
||||
|
||||
@ -28,13 +28,13 @@ public class GetControllerHostsAnswer {
|
||||
public HostVO getMaster() {
|
||||
return master;
|
||||
}
|
||||
public void setMaster(HostVO master) {
|
||||
public void setMaster(final HostVO master) {
|
||||
this.master = master;
|
||||
}
|
||||
public HostVO getSlave() {
|
||||
return slave;
|
||||
}
|
||||
public void setSlave(HostVO slave) {
|
||||
public void setSlave(final HostVO slave) {
|
||||
this.slave = slave;
|
||||
}
|
||||
}
|
||||
|
||||
@ -20,6 +20,4 @@
|
||||
package com.cloud.agent.api;
|
||||
|
||||
public class GetControllerHostsCommand extends BcfCommand {
|
||||
public GetControllerHostsCommand() {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -20,19 +20,19 @@
|
||||
package com.cloud.agent.api;
|
||||
|
||||
public class SyncBcfTopologyCommand extends BcfCommand {
|
||||
private final boolean networkIncluded;
|
||||
private final boolean routerIncluded;
|
||||
private final boolean _networkIncluded;
|
||||
private final boolean _routerIncluded;
|
||||
|
||||
public SyncBcfTopologyCommand(boolean networkIncluded, boolean routerIncluded) {
|
||||
this.networkIncluded = networkIncluded;
|
||||
this.routerIncluded = routerIncluded;
|
||||
public SyncBcfTopologyCommand(final boolean networkIncluded, final boolean routerIncluded) {
|
||||
this._networkIncluded = networkIncluded;
|
||||
this._routerIncluded = routerIncluded;
|
||||
}
|
||||
|
||||
public boolean isNetworkIncluded() {
|
||||
return networkIncluded;
|
||||
return _networkIncluded;
|
||||
}
|
||||
|
||||
public boolean isRouterIncluded() {
|
||||
return routerIncluded;
|
||||
return _routerIncluded;
|
||||
}
|
||||
}
|
||||
|
||||
@ -20,12 +20,13 @@
|
||||
package com.cloud.agent.api;
|
||||
|
||||
public class UpdateBcfAttachmentCommand extends BcfCommand {
|
||||
private String _networkId;
|
||||
private String _attachmentId;
|
||||
private String _tenantId;
|
||||
private String _attachmentName;
|
||||
private final String _networkId;
|
||||
private final String _attachmentId;
|
||||
private final String _tenantId;
|
||||
private final String _attachmentName;
|
||||
|
||||
public UpdateBcfAttachmentCommand(String networkId, String attachmentId, String tenantId, String attachmentName) {
|
||||
public UpdateBcfAttachmentCommand(final String networkId, final String attachmentId,
|
||||
final String tenantId, final String attachmentName) {
|
||||
this._networkId = networkId;
|
||||
this._attachmentId = attachmentId;
|
||||
this._tenantId = tenantId;
|
||||
|
||||
@ -27,9 +27,9 @@ import com.cloud.network.bigswitch.AclData;
|
||||
public class UpdateBcfRouterCommand extends BcfCommand {
|
||||
private String tenantId;
|
||||
private String publicIp;
|
||||
private List<AclData> acls;
|
||||
private final List<AclData> acls;
|
||||
|
||||
public UpdateBcfRouterCommand(String tenantId){
|
||||
public UpdateBcfRouterCommand(final String tenantId){
|
||||
this.tenantId = tenantId;
|
||||
this.publicIp = null;
|
||||
this.acls = new ArrayList<AclData>();
|
||||
@ -39,7 +39,7 @@ public class UpdateBcfRouterCommand extends BcfCommand {
|
||||
return tenantId;
|
||||
}
|
||||
|
||||
public void setTenantId(String tenantId) {
|
||||
public void setTenantId(final String tenantId) {
|
||||
this.tenantId = tenantId;
|
||||
}
|
||||
|
||||
@ -47,7 +47,7 @@ public class UpdateBcfRouterCommand extends BcfCommand {
|
||||
return publicIp;
|
||||
}
|
||||
|
||||
public void setPublicIp(String publicIp) {
|
||||
public void setPublicIp(final String publicIp) {
|
||||
this.publicIp = publicIp;
|
||||
}
|
||||
|
||||
@ -55,7 +55,7 @@ public class UpdateBcfRouterCommand extends BcfCommand {
|
||||
return acls;
|
||||
}
|
||||
|
||||
public void addAcl(AclData acl){
|
||||
public void addAcl(final AclData acl){
|
||||
this.acls.add(acl);
|
||||
}
|
||||
}
|
||||
|
||||
@ -43,9 +43,9 @@ import com.cloud.utils.exception.CloudRuntimeException;
|
||||
@APICommand(name = "addBigSwitchBcfDevice", responseObject = BigSwitchBcfDeviceResponse.class, description = "Adds a BigSwitch BCF Controller device", since = "4.6.0",
|
||||
requestHasSensitiveInfo = false, responseHasSensitiveInfo = false)
|
||||
public class AddBigSwitchBcfDeviceCmd extends BaseAsyncCmd {
|
||||
private static final String s_name = "addbigswitchbcfdeviceresponse";
|
||||
private static final String S_NAME = "addbigswitchbcfdeviceresponse";
|
||||
@Inject
|
||||
BigSwitchBcfElementService _bigswitchBcfElementService;
|
||||
private BigSwitchBcfElementService bcfElementService;
|
||||
|
||||
/////////////////////////////////////////////////////
|
||||
//////////////// API parameters /////////////////////
|
||||
@ -105,25 +105,24 @@ public class AddBigSwitchBcfDeviceCmd extends BaseAsyncCmd {
|
||||
public void execute() throws ResourceUnavailableException, InsufficientCapacityException, ServerApiException, ConcurrentOperationException,
|
||||
ResourceAllocationException {
|
||||
try {
|
||||
BigSwitchBcfDeviceVO bigswitchBcfDeviceVO = _bigswitchBcfElementService.addBigSwitchBcfDevice(this);
|
||||
if (bigswitchBcfDeviceVO != null) {
|
||||
BigSwitchBcfDeviceResponse response = _bigswitchBcfElementService.createBigSwitchBcfDeviceResponse(bigswitchBcfDeviceVO);
|
||||
response.setObjectName("bigswitchbcfdevice");
|
||||
response.setResponseName(getCommandName());
|
||||
this.setResponseObject(response);
|
||||
} else {
|
||||
final BigSwitchBcfDeviceVO bigswitchBcfDeviceVO = bcfElementService.addBigSwitchBcfDevice(this);
|
||||
if (bigswitchBcfDeviceVO == null) {
|
||||
throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, "Failed to add BigSwitch BCF Controller device due to internal error.");
|
||||
}
|
||||
final BigSwitchBcfDeviceResponse response = bcfElementService.createBigSwitchBcfDeviceResponse(bigswitchBcfDeviceVO);
|
||||
response.setObjectName("bigswitchbcfdevice");
|
||||
response.setResponseName(getCommandName());
|
||||
this.setResponseObject(response);
|
||||
} catch (InvalidParameterValueException invalidParamExcp) {
|
||||
throw new ServerApiException(ApiErrorCode.PARAM_ERROR, invalidParamExcp.getMessage());
|
||||
throw new ServerApiException(ApiErrorCode.PARAM_ERROR, invalidParamExcp.getMessage(), invalidParamExcp);
|
||||
} catch (CloudRuntimeException runtimeExcp) {
|
||||
throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, runtimeExcp.getMessage());
|
||||
throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, runtimeExcp.getMessage(), runtimeExcp);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getCommandName() {
|
||||
return s_name;
|
||||
return S_NAME;
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -133,7 +132,7 @@ public class AddBigSwitchBcfDeviceCmd extends BaseAsyncCmd {
|
||||
|
||||
@Override
|
||||
public String getEventType() {
|
||||
return BcfConstants.EVENT_EXTERNAL_BCF_CONTROLLER_ADD;
|
||||
return BcfConstants.EVENT_BCF_CONTROLLER_ADD;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
||||
@ -27,11 +27,11 @@ public class BcfConstants {
|
||||
public static final String BIGSWITCH_BCF_DEVICE_ID = "bcfdeviceid";
|
||||
public static final String BIGSWITCH_BCF_DEVICE_NAME = "bigswitchdevicename";
|
||||
public static final String BIGSWITCH_BCF_DEVICE_NAT = "nat";
|
||||
public static final String EVENT_EXTERNAL_BCF_CONTROLLER_ADD = "PHYSICAL.BCFCONTROLLER.ADD";
|
||||
public static final String EVENT_EXTERNAL_BCF_CONTROLLER_DELETE = "PHYSICAL.BCFCONTROLLER.DELETE";
|
||||
public static final String EVENT_BCF_CONTROLLER_ADD = "PHYSICAL.BCFCONTROLLER.ADD";
|
||||
public static final String EVENT_BCF_CONTROLLER_DELETE = "PHYSICAL.BCFCONTROLLER.DELETE";
|
||||
|
||||
public static final Provider BigSwitchBcf = new Provider("BigSwitchBcf", true);
|
||||
public static final Provider BIG_SWITCH_BCF = new Provider("BigSwitchBcf", true);
|
||||
|
||||
public static final NetworkDevice BigSwitchBcfDevice = new NetworkDevice("BigSwitchBcf", BigSwitchBcf.getName());
|
||||
public static final NetworkDevice BIG_SWITCH_BCF_DEVICE = new NetworkDevice("BigSwitchBcf", BIG_SWITCH_BCF.getName());
|
||||
|
||||
}
|
||||
|
||||
@ -41,9 +41,9 @@ import com.cloud.utils.exception.CloudRuntimeException;
|
||||
@APICommand(name = "deleteBigSwitchBcfDevice", responseObject = SuccessResponse.class, description = " delete a BigSwitch BCF Controller device", since = "4.6.0",
|
||||
requestHasSensitiveInfo = false, responseHasSensitiveInfo = false)
|
||||
public class DeleteBigSwitchBcfDeviceCmd extends BaseAsyncCmd {
|
||||
private static final String s_name = "deletebigswitchbcfdeviceresponse";
|
||||
private static final String S_NAME = "deletebigswitchbcfdeviceresponse";
|
||||
@Inject
|
||||
BigSwitchBcfElementService _bigswitchBcfElementService;
|
||||
private BigSwitchBcfElementService bigswitchBcfElementService;
|
||||
|
||||
/////////////////////////////////////////////////////
|
||||
//////////////// API parameters /////////////////////
|
||||
@ -72,24 +72,24 @@ public class DeleteBigSwitchBcfDeviceCmd extends BaseAsyncCmd {
|
||||
public void execute() throws ResourceUnavailableException, InsufficientCapacityException, ServerApiException, ConcurrentOperationException,
|
||||
ResourceAllocationException {
|
||||
try {
|
||||
boolean result = _bigswitchBcfElementService.deleteBigSwitchBcfDevice(this);
|
||||
final boolean result = bigswitchBcfElementService.deleteBigSwitchBcfDevice(this);
|
||||
if (result) {
|
||||
SuccessResponse response = new SuccessResponse(getCommandName());
|
||||
final SuccessResponse response = new SuccessResponse(getCommandName());
|
||||
response.setResponseName(getCommandName());
|
||||
this.setResponseObject(response);
|
||||
} else {
|
||||
throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, "Failed to delete BigSwitch device.");
|
||||
}
|
||||
} catch (InvalidParameterValueException invalidParamExcp) {
|
||||
throw new ServerApiException(ApiErrorCode.PARAM_ERROR, invalidParamExcp.getMessage());
|
||||
throw new ServerApiException(ApiErrorCode.PARAM_ERROR, invalidParamExcp.getMessage(), invalidParamExcp);
|
||||
} catch (CloudRuntimeException runtimeExcp) {
|
||||
throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, runtimeExcp.getMessage());
|
||||
throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, runtimeExcp.getMessage(), runtimeExcp);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getCommandName() {
|
||||
return s_name;
|
||||
return S_NAME;
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -99,7 +99,7 @@ public class DeleteBigSwitchBcfDeviceCmd extends BaseAsyncCmd {
|
||||
|
||||
@Override
|
||||
public String getEventType() {
|
||||
return BcfConstants.EVENT_EXTERNAL_BCF_CONTROLLER_DELETE;
|
||||
return BcfConstants.EVENT_BCF_CONTROLLER_DELETE;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
||||
@ -48,10 +48,10 @@ import com.cloud.utils.exception.CloudRuntimeException;
|
||||
@APICommand(name = "listBigSwitchBcfDevices", responseObject = BigSwitchBcfDeviceResponse.class, description = "Lists BigSwitch BCF Controller devices", since = "4.6.0",
|
||||
requestHasSensitiveInfo = false, responseHasSensitiveInfo = false)
|
||||
public class ListBigSwitchBcfDevicesCmd extends BaseListCmd {
|
||||
public static final Logger s_logger = Logger.getLogger(ListBigSwitchBcfDevicesCmd.class.getName());
|
||||
private static final String s_name = "listbigswitchbcfdeviceresponse";
|
||||
public static final Logger S_LOGGER = Logger.getLogger(ListBigSwitchBcfDevicesCmd.class.getName());
|
||||
private static final String S_NAME = "listbigswitchbcfdeviceresponse";
|
||||
@Inject
|
||||
BigSwitchBcfElementService _bigswitchBcfElementService;
|
||||
private BigSwitchBcfElementService bigswitchBcfElementService;
|
||||
|
||||
/////////////////////////////////////////////////////
|
||||
//////////////// API parameters /////////////////////
|
||||
@ -86,13 +86,13 @@ public class ListBigSwitchBcfDevicesCmd extends BaseListCmd {
|
||||
public void execute() throws ResourceUnavailableException, InsufficientCapacityException, ServerApiException, ConcurrentOperationException,
|
||||
ResourceAllocationException {
|
||||
try {
|
||||
List<BigSwitchBcfDeviceVO> bigswitchDevices = _bigswitchBcfElementService.listBigSwitchBcfDevices(this);
|
||||
ListResponse<BigSwitchBcfDeviceResponse> response = new ListResponse<BigSwitchBcfDeviceResponse>();
|
||||
List<BigSwitchBcfDeviceResponse> bigswitchDevicesResponse = new ArrayList<BigSwitchBcfDeviceResponse>();
|
||||
final List<BigSwitchBcfDeviceVO> bigswitchDevices = bigswitchBcfElementService.listBigSwitchBcfDevices(this);
|
||||
final ListResponse<BigSwitchBcfDeviceResponse> response = new ListResponse<BigSwitchBcfDeviceResponse>();
|
||||
final List<BigSwitchBcfDeviceResponse> bigswitchDevicesResponse = new ArrayList<BigSwitchBcfDeviceResponse>();
|
||||
|
||||
if (bigswitchDevices != null && !bigswitchDevices.isEmpty()) {
|
||||
for (BigSwitchBcfDeviceVO bigswitchDeviceVO : bigswitchDevices) {
|
||||
BigSwitchBcfDeviceResponse bigswitchDeviceResponse = _bigswitchBcfElementService.createBigSwitchBcfDeviceResponse(bigswitchDeviceVO);
|
||||
for (final BigSwitchBcfDeviceVO bigswitchDeviceVO : bigswitchDevices) {
|
||||
final BigSwitchBcfDeviceResponse bigswitchDeviceResponse = bigswitchBcfElementService.createBigSwitchBcfDeviceResponse(bigswitchDeviceVO);
|
||||
bigswitchDevicesResponse.add(bigswitchDeviceResponse);
|
||||
}
|
||||
}
|
||||
@ -101,14 +101,14 @@ public class ListBigSwitchBcfDevicesCmd extends BaseListCmd {
|
||||
response.setResponseName(getCommandName());
|
||||
this.setResponseObject(response);
|
||||
} catch (InvalidParameterValueException invalidParamExcp) {
|
||||
throw new ServerApiException(ApiErrorCode.PARAM_ERROR, invalidParamExcp.getMessage());
|
||||
throw new ServerApiException(ApiErrorCode.PARAM_ERROR, invalidParamExcp.getMessage(), invalidParamExcp);
|
||||
} catch (CloudRuntimeException runtimeExcp) {
|
||||
throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, runtimeExcp.getMessage());
|
||||
throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, runtimeExcp.getMessage(), runtimeExcp);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getCommandName() {
|
||||
return s_name;
|
||||
return S_NAME;
|
||||
}
|
||||
}
|
||||
|
||||
@ -65,35 +65,35 @@ public class BigSwitchBcfDeviceResponse extends BaseResponse {
|
||||
return this.id;
|
||||
}
|
||||
|
||||
public void setId(String bcfDeviceId) {
|
||||
public void setId(final String bcfDeviceId) {
|
||||
this.id = bcfDeviceId;
|
||||
}
|
||||
|
||||
public void setPhysicalNetworkId(String physicalNetworkId) {
|
||||
public void setPhysicalNetworkId(final String physicalNetworkId) {
|
||||
this.physicalNetworkId = physicalNetworkId;
|
||||
}
|
||||
|
||||
public void setProviderName(String providerName) {
|
||||
public void setProviderName(final String providerName) {
|
||||
this.providerName = providerName;
|
||||
}
|
||||
|
||||
public void setDeviceName(String deviceName) {
|
||||
public void setDeviceName(final String deviceName) {
|
||||
this.deviceName = deviceName;
|
||||
}
|
||||
|
||||
public void setHostName(String hostName) {
|
||||
public void setHostName(final String hostName) {
|
||||
this.hostName = hostName;
|
||||
}
|
||||
|
||||
public void setUserName(String username) {
|
||||
public void setUserName(final String username) {
|
||||
this.username = username;
|
||||
}
|
||||
|
||||
public void setPassword(String password) {
|
||||
public void setPassword(final String password) {
|
||||
this.password = password;
|
||||
}
|
||||
|
||||
public void setNat(Boolean nat) {
|
||||
public void setNat(final Boolean nat) {
|
||||
this.nat = nat;
|
||||
}
|
||||
}
|
||||
|
||||
@ -73,8 +73,10 @@ public class BigSwitchBcfDeviceVO implements InternalIdentity {
|
||||
this.uuid = UUID.randomUUID().toString();
|
||||
}
|
||||
|
||||
public BigSwitchBcfDeviceVO(long hostId, long physicalNetworkId, String providerName, String deviceName,
|
||||
String hostName, String username, String password, Boolean nat, String hash) {
|
||||
public BigSwitchBcfDeviceVO(final long hostId, final long physicalNetworkId,
|
||||
final String providerName, final String deviceName,final String hostName,
|
||||
final String username, final String password, final Boolean nat,
|
||||
final String hash) {
|
||||
super();
|
||||
this.hostId = hostId;
|
||||
this.physicalNetworkId = physicalNetworkId;
|
||||
@ -97,7 +99,7 @@ public class BigSwitchBcfDeviceVO implements InternalIdentity {
|
||||
return uuid;
|
||||
}
|
||||
|
||||
public void setUuid(String uuid) {
|
||||
public void setUuid(final String uuid) {
|
||||
this.uuid = uuid;
|
||||
}
|
||||
|
||||
@ -121,7 +123,7 @@ public class BigSwitchBcfDeviceVO implements InternalIdentity {
|
||||
return hash;
|
||||
}
|
||||
|
||||
public void setHash(String h) {
|
||||
public void setHash(final String h) {
|
||||
hash = h;
|
||||
}
|
||||
|
||||
|
||||
@ -19,6 +19,8 @@
|
||||
|
||||
package com.cloud.network.bigswitch;
|
||||
|
||||
import java.util.Locale;
|
||||
|
||||
import com.cloud.network.vpc.NetworkACLItem;
|
||||
import com.google.gson.annotations.SerializedName;
|
||||
|
||||
@ -41,19 +43,19 @@ public class AclData {
|
||||
this.action = null;
|
||||
this.ipProto = null;
|
||||
this.source = new AclNetwork();
|
||||
this.destination = new AclNetwork();;
|
||||
this.destination = new AclNetwork();
|
||||
}
|
||||
|
||||
public class AclNetwork{
|
||||
@SerializedName("cidr") private String cidr;
|
||||
@SerializedName("port") private Integer port;
|
||||
@SerializedName("cidr") final private String cidr;
|
||||
@SerializedName("port") final private Integer port;
|
||||
|
||||
public AclNetwork(){
|
||||
this.cidr = null;
|
||||
this.port = null;
|
||||
}
|
||||
|
||||
public AclNetwork(String cidr, Integer port){
|
||||
public AclNetwork(final String cidr, final Integer port){
|
||||
this.cidr = cidr;
|
||||
this.port = port;
|
||||
}
|
||||
@ -62,19 +64,19 @@ public class AclData {
|
||||
public String getId() {
|
||||
return id;
|
||||
}
|
||||
public void setId(String id) {
|
||||
public void setId(final String id) {
|
||||
this.id = id;
|
||||
}
|
||||
public int getPriority() {
|
||||
return priority;
|
||||
}
|
||||
public void setPriority(int priority) {
|
||||
public void setPriority(final int priority) {
|
||||
this.priority = priority;
|
||||
}
|
||||
public String getAction() {
|
||||
return action;
|
||||
}
|
||||
public void setAction(String action) {
|
||||
public void setAction(final String action) {
|
||||
if(action.equalsIgnoreCase(NetworkACLItem.Action.Allow.name())){
|
||||
this.action = "permit";
|
||||
} else {
|
||||
@ -84,9 +86,9 @@ public class AclData {
|
||||
public String getIpProto() {
|
||||
return ipProto;
|
||||
}
|
||||
public void setIpProto(String ipProto) {
|
||||
if (!ipProto.equalsIgnoreCase("all")){
|
||||
switch(ipProto.toLowerCase()){
|
||||
public void setIpProto(final String ipProto) {
|
||||
if (ipProto != null && !ipProto.equalsIgnoreCase("all")){
|
||||
switch(ipProto.toLowerCase(Locale.ENGLISH)){
|
||||
case "tcp":
|
||||
this.ipProto = "6";
|
||||
break;
|
||||
@ -96,19 +98,21 @@ public class AclData {
|
||||
case "icmp":
|
||||
this.ipProto = "1";
|
||||
break;
|
||||
default:
|
||||
throw new IllegalArgumentException("Protocol in ACL rule not supported");
|
||||
}
|
||||
}
|
||||
}
|
||||
public AclNetwork getSource() {
|
||||
return source;
|
||||
}
|
||||
public void setSource(AclNetwork source) {
|
||||
public void setSource(final AclNetwork source) {
|
||||
this.source = source;
|
||||
}
|
||||
public AclNetwork getDestination() {
|
||||
return destination;
|
||||
}
|
||||
public void setDestination(AclNetwork destination) {
|
||||
public void setDestination(final AclNetwork destination) {
|
||||
this.destination = destination;
|
||||
}
|
||||
}
|
||||
|
||||
@ -20,6 +20,7 @@
|
||||
package com.cloud.network.bigswitch;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
|
||||
import com.google.gson.annotations.SerializedName;
|
||||
|
||||
@ -28,7 +29,7 @@ import com.google.gson.annotations.SerializedName;
|
||||
* in CreateBcfAttachmentCommand
|
||||
*/
|
||||
public class AttachmentData {
|
||||
@SerializedName("port") private Attachment attachment;
|
||||
@SerializedName("port") final private Attachment attachment;
|
||||
|
||||
public Attachment getAttachment() {
|
||||
return this.attachment;
|
||||
@ -42,9 +43,9 @@ public class AttachmentData {
|
||||
@SerializedName("id") private String id;
|
||||
@SerializedName("tenant_name") private String tenantName;
|
||||
@SerializedName("vlan") private Integer vlan;
|
||||
@SerializedName("fixed_ips") private ArrayList<IpAddress> fixedIps;
|
||||
@SerializedName("fixed_ips") final private List<IpAddress> fixedIps;
|
||||
@SerializedName("mac_address") private String macAddress;
|
||||
@SerializedName("bound_segment") private BoundSegment boundSegment;
|
||||
@SerializedName("bound_segment") final private BoundSegment boundSegment;
|
||||
@SerializedName("binding:host_id") private String hostId;
|
||||
|
||||
public Attachment(){
|
||||
@ -65,14 +66,14 @@ public class AttachmentData {
|
||||
}
|
||||
|
||||
public class IpAddress {
|
||||
@SerializedName("ip_address") private String ipAddress;
|
||||
@SerializedName("ip_address") private String address;
|
||||
|
||||
public IpAddress(String ipAddr) {
|
||||
this.ipAddress = ipAddr;
|
||||
public IpAddress(final String ipAddr) {
|
||||
this.address = ipAddr;
|
||||
}
|
||||
|
||||
public String getIpAddress(){
|
||||
return ipAddress;
|
||||
return address;
|
||||
}
|
||||
}
|
||||
|
||||
@ -82,7 +83,7 @@ public class AttachmentData {
|
||||
return tenantName;
|
||||
}
|
||||
|
||||
public void setTenantName(String tenantName) {
|
||||
public void setTenantName(final String tenantName) {
|
||||
this.tenantName = tenantName;
|
||||
}
|
||||
|
||||
@ -90,7 +91,7 @@ public class AttachmentData {
|
||||
return id;
|
||||
}
|
||||
|
||||
public void setId(String id) {
|
||||
public void setId(final String id) {
|
||||
this.id = id;
|
||||
}
|
||||
|
||||
@ -98,7 +99,7 @@ public class AttachmentData {
|
||||
return hostId;
|
||||
}
|
||||
|
||||
public void setHostId(String hostId) {
|
||||
public void setHostId(final String hostId) {
|
||||
this.hostId = hostId;
|
||||
}
|
||||
|
||||
@ -106,16 +107,16 @@ public class AttachmentData {
|
||||
return vlan;
|
||||
}
|
||||
|
||||
public void setVlan(Integer vlan) {
|
||||
public void setVlan(final Integer vlan) {
|
||||
this.vlan = vlan;
|
||||
this.boundSegment.setSegmentationId(vlan);
|
||||
}
|
||||
|
||||
public ArrayList<IpAddress> getIpv4List() {
|
||||
public List<IpAddress> getIpv4List() {
|
||||
return fixedIps;
|
||||
}
|
||||
|
||||
public void addIpv4(String ipv4) {
|
||||
public void addIpv4(final String ipv4) {
|
||||
this.fixedIps.add(new IpAddress(ipv4));
|
||||
}
|
||||
|
||||
@ -123,7 +124,7 @@ public class AttachmentData {
|
||||
return macAddress;
|
||||
}
|
||||
|
||||
public void setMac(String mac) {
|
||||
public void setMac(final String mac) {
|
||||
this.macAddress = mac;
|
||||
}
|
||||
|
||||
@ -135,7 +136,7 @@ public class AttachmentData {
|
||||
return state;
|
||||
}
|
||||
|
||||
public void setState(String state) {
|
||||
public void setState(final String state) {
|
||||
this.state = state;
|
||||
}
|
||||
}
|
||||
|
||||
@ -53,25 +53,25 @@ import com.google.gson.Gson;
|
||||
import com.google.gson.reflect.TypeToken;
|
||||
|
||||
public class BigSwitchBcfApi {
|
||||
private static final Logger s_logger = Logger.getLogger(BigSwitchBcfApi.class);
|
||||
private final static String s_protocol = "https";
|
||||
private final static String s_nsBaseUri = "/networkService/v1.1";
|
||||
private static final Logger S_LOGGER = Logger.getLogger(BigSwitchBcfApi.class);
|
||||
private final static String S_PROTOCOL = "https";
|
||||
private final static String S_NS_BASE_URL = "/networkService/v1.1";
|
||||
private final static String CONTENT_TYPE = "Content-type";
|
||||
private final static String ACCEPT = "Accept";
|
||||
private final static String CONTENT_JSON = "application/json";
|
||||
private final static String HTTP_HEADER_INSTANCE_ID = "Instance-ID";
|
||||
private final static String CLOUDSTACK_INSTANCE_ID = "cloudstack";
|
||||
private final static String HASH_MATCH = "X-BSN-BVS-HASH-MATCH";
|
||||
private final static MultiThreadedHttpConnectionManager s_httpClientManager = new MultiThreadedHttpConnectionManager();
|
||||
private final static MultiThreadedHttpConnectionManager S_HTTP_CLIENT_MANAGER = new MultiThreadedHttpConnectionManager();
|
||||
|
||||
private String _host;
|
||||
private String _username;
|
||||
private String _password;
|
||||
private String _hash;
|
||||
private String _zoneId;
|
||||
private Boolean _nat;
|
||||
private String host;
|
||||
private String username;
|
||||
private String password;
|
||||
private String hash;
|
||||
private String zoneId;
|
||||
private Boolean nat;
|
||||
|
||||
private boolean _isMaster = false;
|
||||
private boolean isMaster;
|
||||
|
||||
private int _port = 8000;
|
||||
|
||||
@ -85,15 +85,15 @@ public class BigSwitchBcfApi {
|
||||
* in the unittests.
|
||||
*/
|
||||
protected HttpClient createHttpClient() {
|
||||
return new HttpClient(s_httpClientManager);
|
||||
return new HttpClient(S_HTTP_CLIENT_MANAGER);
|
||||
}
|
||||
|
||||
protected HttpMethod createMethod(String type, String uri, int port) throws BigSwitchBcfApiException {
|
||||
protected HttpMethod createMethod(final String type, final String uri, final int port) throws BigSwitchBcfApiException {
|
||||
String url;
|
||||
try {
|
||||
url = new URL(s_protocol, _host, port, uri).toString();
|
||||
url = new URL(S_PROTOCOL, host, port, uri).toString();
|
||||
} catch (MalformedURLException e) {
|
||||
s_logger.error("Unable to build Big Switch API URL", e);
|
||||
S_LOGGER.error("Unable to build Big Switch API URL", e);
|
||||
throw new BigSwitchBcfApiException("Unable to build Big Switch API URL", e);
|
||||
}
|
||||
|
||||
@ -118,7 +118,7 @@ public class BigSwitchBcfApi {
|
||||
// Cast to ProtocolSocketFactory to avoid the deprecated constructor with the SecureProtocolSocketFactory parameter
|
||||
Protocol.registerProtocol("https", new Protocol("https", (ProtocolSocketFactory) new TrustingProtocolSocketFactory(), _port));
|
||||
} catch (IOException e) {
|
||||
s_logger.warn("Failed to register the TrustingProtocolSocketFactory, falling back to default SSLSocketFactory", e);
|
||||
S_LOGGER.warn("Failed to register the TrustingProtocolSocketFactory, falling back to default SSLSocketFactory", e);
|
||||
}
|
||||
}
|
||||
|
||||
@ -126,99 +126,99 @@ public class BigSwitchBcfApi {
|
||||
* Setter used by UI to set BSN controller address
|
||||
* @param address
|
||||
*/
|
||||
public void setControllerAddress(String address) {
|
||||
this._host = address;
|
||||
public void setControllerAddress(final String address) {
|
||||
this.host = address;
|
||||
}
|
||||
|
||||
/**
|
||||
* Setter used by UI to set BSN controller user name
|
||||
* @param username
|
||||
*/
|
||||
public void setControllerUsername(String username) {
|
||||
this._username = username;
|
||||
public void setControllerUsername(final String username) {
|
||||
this.username = username;
|
||||
}
|
||||
|
||||
/**
|
||||
* Setter used by UI to set BSN controller password
|
||||
* @param password
|
||||
*/
|
||||
public void setControllerPassword(String password) {
|
||||
this._password = password;
|
||||
public void setControllerPassword(final String password) {
|
||||
this.password = password;
|
||||
}
|
||||
|
||||
/**
|
||||
* Setter used by UI to set BSN controller NAT mode
|
||||
* @param nat
|
||||
*/
|
||||
public void setControllerNat(Boolean nat) {
|
||||
this._nat = nat;
|
||||
public void setControllerNat(final Boolean nat) {
|
||||
this.nat = nat;
|
||||
}
|
||||
|
||||
public boolean isNatEnabled() {
|
||||
return this._nat;
|
||||
return this.nat;
|
||||
}
|
||||
|
||||
/**
|
||||
* Setter used by UI to set BSN controller password
|
||||
* @param password
|
||||
*/
|
||||
public void setZoneId(String zoneId) {
|
||||
this._zoneId = zoneId;
|
||||
public void setZoneId(final String zoneId) {
|
||||
this.zoneId = zoneId;
|
||||
}
|
||||
|
||||
public String createNetwork(NetworkData network) throws BigSwitchBcfApiException {
|
||||
String uri = s_nsBaseUri + "/tenants/" + network.getNetwork().getTenantId() + "/networks";
|
||||
public String createNetwork(final NetworkData network) throws BigSwitchBcfApiException {
|
||||
String uri = S_NS_BASE_URL + "/tenants/" + network.getNetwork().getTenantId() + "/networks";
|
||||
return executeCreateObject(network, uri, Collections.<String, String> emptyMap());
|
||||
}
|
||||
|
||||
public String deleteNetwork(String tenantId, String networkId) throws BigSwitchBcfApiException {
|
||||
String uri = s_nsBaseUri + "/tenants/" + tenantId + "/networks/" + networkId;
|
||||
public String deleteNetwork(final String tenantId, final String networkId) throws BigSwitchBcfApiException {
|
||||
String uri = S_NS_BASE_URL + "/tenants/" + tenantId + "/networks/" + networkId;
|
||||
return executeDeleteObject(uri);
|
||||
}
|
||||
|
||||
public String createAttachment(String tenantId, String networkId, AttachmentData attachment) throws BigSwitchBcfApiException {
|
||||
String uri = s_nsBaseUri + "/tenants/" + tenantId + "/networks/" + networkId + "/ports/" + attachment.getAttachment().getId() + "/attachment";
|
||||
public String createAttachment(final String tenantId, final String networkId,
|
||||
final AttachmentData attachment) throws BigSwitchBcfApiException {
|
||||
String uri = S_NS_BASE_URL + "/tenants/" + tenantId + "/networks/" + networkId + "/ports/" + attachment.getAttachment().getId() + "/attachment";
|
||||
return executeCreateObject(attachment, uri, Collections.<String, String> emptyMap());
|
||||
}
|
||||
|
||||
public String modifyAttachment(String tenantId, String networkId, AttachmentData attachment) throws BigSwitchBcfApiException {
|
||||
String uri = s_nsBaseUri + "/tenants/" + tenantId + "/networks/" + networkId + "/ports/" + attachment.getAttachment().getId() + "/attachment";
|
||||
public String modifyAttachment(final String tenantId, final String networkId,
|
||||
final AttachmentData attachment) throws BigSwitchBcfApiException {
|
||||
String uri = S_NS_BASE_URL + "/tenants/" + tenantId + "/networks/" + networkId + "/ports/" + attachment.getAttachment().getId() + "/attachment";
|
||||
return executeUpdateObject(attachment, uri, Collections.<String, String> emptyMap());
|
||||
}
|
||||
|
||||
public String deleteAttachment(String tenantId, String networkId, String attachmentId) throws BigSwitchBcfApiException {
|
||||
String uri = s_nsBaseUri + "/tenants/" + tenantId + "/networks/" + networkId + "/ports/" + attachmentId + "/attachment";
|
||||
public String deleteAttachment(final String tenantId, final String networkId,
|
||||
final String attachmentId) throws BigSwitchBcfApiException {
|
||||
String uri = S_NS_BASE_URL + "/tenants/" + tenantId + "/networks/" + networkId + "/ports/" + attachmentId + "/attachment";
|
||||
return executeDeleteObject(uri);
|
||||
}
|
||||
|
||||
public String createRouter(String tenantId, RouterData router) throws BigSwitchBcfApiException {
|
||||
String uri = s_nsBaseUri + "/tenants/" + tenantId + "/routers";
|
||||
public String createRouter(final String tenantId, final RouterData router) throws BigSwitchBcfApiException {
|
||||
String uri = S_NS_BASE_URL + "/tenants/" + tenantId + "/routers";
|
||||
return executeCreateObject(router, uri, Collections.<String, String> emptyMap());
|
||||
}
|
||||
|
||||
public String modifyRouter(String tenantId, RouterData router) throws BigSwitchBcfApiException,
|
||||
public String modifyRouter(final String tenantId, final RouterData router) throws BigSwitchBcfApiException,
|
||||
IllegalArgumentException{
|
||||
String uri = s_nsBaseUri + "/tenants/" + tenantId + "/routers";
|
||||
String uri = S_NS_BASE_URL + "/tenants/" + tenantId + "/routers";
|
||||
return executeCreateObject(router, uri, Collections.<String, String> emptyMap());
|
||||
}
|
||||
|
||||
public String createRouterInterface(String tenantId, String routerId, RouterInterfaceData routerInterface) throws BigSwitchBcfApiException {
|
||||
String uri = s_nsBaseUri + "/tenants/" + tenantId + "/routers/" + routerId + "/interfaces";
|
||||
public String createRouterInterface(final String tenantId, final String routerId,
|
||||
final RouterInterfaceData routerInterface) throws BigSwitchBcfApiException {
|
||||
String uri = S_NS_BASE_URL + "/tenants/" + tenantId + "/routers/" + routerId + "/interfaces";
|
||||
return executeCreateObject(routerInterface, uri, Collections.<String, String> emptyMap());
|
||||
}
|
||||
|
||||
public String createFloatingIp(String tenantId, FloatingIpData fip) throws BigSwitchBcfApiException {
|
||||
String uri = s_nsBaseUri + "/tenants/" + tenantId + "/floatingips";
|
||||
public String createFloatingIp(final String tenantId, final FloatingIpData fip) throws BigSwitchBcfApiException {
|
||||
String uri = S_NS_BASE_URL + "/tenants/" + tenantId + "/floatingips";
|
||||
return executeCreateObject(fip, uri, Collections.<String, String> emptyMap());
|
||||
}
|
||||
|
||||
public String deleteFloatingIp(String tenantId, String fipId) throws BigSwitchBcfApiException {
|
||||
String uri = s_nsBaseUri + "/tenants/" + tenantId + "/floatingips/" + fipId;
|
||||
public String deleteFloatingIp(final String tenantId, final String fipId) throws BigSwitchBcfApiException {
|
||||
String uri = S_NS_BASE_URL + "/tenants/" + tenantId + "/floatingips/" + fipId;
|
||||
return executeDeleteObject(uri);
|
||||
}
|
||||
|
||||
public ControlClusterStatus getControlClusterStatus() throws BigSwitchBcfApiException {
|
||||
String uri = s_nsBaseUri + "/health";
|
||||
String uri = S_NS_BASE_URL + "/health";
|
||||
ControlClusterStatus ccs = executeRetrieveObject(new TypeToken<ControlClusterStatus>() {
|
||||
}.getType(), uri, null);
|
||||
ccs.setStatus(true);
|
||||
@ -226,7 +226,7 @@ public class BigSwitchBcfApi {
|
||||
}
|
||||
|
||||
public Capabilities getCapabilities() throws BigSwitchBcfApiException {
|
||||
String uri = s_nsBaseUri + "/capabilities";
|
||||
String uri = S_NS_BASE_URL + "/capabilities";
|
||||
List<String> capslist = executeRetrieveObject(new TypeToken<List<String>>() {
|
||||
}.getType(), uri, null);
|
||||
Capabilities caps = new Capabilities();
|
||||
@ -234,28 +234,28 @@ public class BigSwitchBcfApi {
|
||||
return caps;
|
||||
}
|
||||
|
||||
public String syncTopology(TopologyData topo) throws BigSwitchBcfApiException {
|
||||
String uri = s_nsBaseUri + "/topology";
|
||||
public String syncTopology(final TopologyData topo) throws BigSwitchBcfApiException {
|
||||
String uri = S_NS_BASE_URL + "/topology";
|
||||
return executeCreateObject(topo, uri, Collections.<String, String> emptyMap());
|
||||
}
|
||||
|
||||
public ControllerData getControllerData() {
|
||||
return new ControllerData(_host, _isMaster);
|
||||
return new ControllerData(host, isMaster);
|
||||
}
|
||||
|
||||
private void checkInvariants() throws BigSwitchBcfApiException{
|
||||
if (_host == null || _host.isEmpty()) {
|
||||
if (host == null || host.isEmpty()) {
|
||||
throw new BigSwitchBcfApiException("Hostname is null or empty");
|
||||
}
|
||||
if (_username == null || _username.isEmpty()){
|
||||
if (username == null || username.isEmpty()){
|
||||
throw new BigSwitchBcfApiException("Username is null or empty");
|
||||
}
|
||||
if (_password == null || _password.isEmpty()){
|
||||
if (password == null || password.isEmpty()){
|
||||
throw new BigSwitchBcfApiException("Password is null or empty");
|
||||
}
|
||||
}
|
||||
|
||||
private String checkResponse(HttpMethodBase m, String errorMessageBase) throws BigSwitchBcfApiException,
|
||||
private String checkResponse(final HttpMethodBase m, final String errorMessageBase) throws BigSwitchBcfApiException,
|
||||
IllegalArgumentException{
|
||||
String customErrorMsg = null;
|
||||
if (m.getStatusCode() == HttpStatus.SC_OK) {
|
||||
@ -273,7 +273,7 @@ public class BigSwitchBcfApi {
|
||||
throw new BigSwitchBcfApiException("BCF topology sync required", true);
|
||||
}
|
||||
if (m.getStatusCode() == HttpStatus.SC_SEE_OTHER) {
|
||||
_isMaster = false;
|
||||
isMaster = false;
|
||||
set_hash(HASH_IGNORE);
|
||||
return HASH_IGNORE;
|
||||
}
|
||||
@ -288,24 +288,25 @@ public class BigSwitchBcfApi {
|
||||
}
|
||||
String errorMessage = responseToErrorMessage(m);
|
||||
m.releaseConnection();
|
||||
s_logger.error(errorMessageBase + errorMessage);
|
||||
S_LOGGER.error(errorMessageBase + errorMessage);
|
||||
throw new BigSwitchBcfApiException(errorMessageBase + errorMessage + customErrorMsg);
|
||||
}
|
||||
|
||||
private void setHttpHeader(HttpMethodBase m) {
|
||||
private void setHttpHeader(final HttpMethodBase m) {
|
||||
m.setRequestHeader(CONTENT_TYPE, CONTENT_JSON);
|
||||
m.setRequestHeader(ACCEPT, CONTENT_JSON);
|
||||
m.setRequestHeader(HTTP_HEADER_INSTANCE_ID, CLOUDSTACK_INSTANCE_ID + "-" + _zoneId);
|
||||
if (_hash != "" ) {
|
||||
m.setRequestHeader(HASH_MATCH, _hash);
|
||||
m.setRequestHeader(HTTP_HEADER_INSTANCE_ID, CLOUDSTACK_INSTANCE_ID + "-" + zoneId);
|
||||
if (hash != "" ) {
|
||||
m.setRequestHeader(HASH_MATCH, hash);
|
||||
}
|
||||
|
||||
String authString = _username + ":" + _password;
|
||||
String authString = username + ":" + password;
|
||||
String encodedAuthString = "Basic " + Base64.encodeBase64String(authString.getBytes(Charset.forName("UTF-8")));
|
||||
m.setRequestHeader("Authorization", encodedAuthString);
|
||||
}
|
||||
|
||||
protected <T> String executeUpdateObject(T newObject, String uri, Map<String, String> parameters) throws BigSwitchBcfApiException,
|
||||
protected <T> String executeUpdateObject(final T newObject, final String uri,
|
||||
final Map<String, String> parameters) throws BigSwitchBcfApiException,
|
||||
IllegalArgumentException{
|
||||
checkInvariants();
|
||||
|
||||
@ -328,7 +329,8 @@ public class BigSwitchBcfApi {
|
||||
return hash;
|
||||
}
|
||||
|
||||
protected <T> String executeCreateObject(T newObject, String uri, Map<String, String> parameters) throws BigSwitchBcfApiException {
|
||||
protected <T> String executeCreateObject(final T newObject, final String uri,
|
||||
final Map<String, String> parameters) throws BigSwitchBcfApiException {
|
||||
checkInvariants();
|
||||
|
||||
PostMethod pm = (PostMethod)createMethod("post", uri, _port);
|
||||
@ -350,7 +352,7 @@ public class BigSwitchBcfApi {
|
||||
return hash;
|
||||
}
|
||||
|
||||
protected String executeDeleteObject(String uri) throws BigSwitchBcfApiException {
|
||||
protected String executeDeleteObject(final String uri) throws BigSwitchBcfApiException {
|
||||
checkInvariants();
|
||||
|
||||
DeleteMethod dm = (DeleteMethod)createMethod("delete", uri, _port);
|
||||
@ -367,8 +369,8 @@ public class BigSwitchBcfApi {
|
||||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
protected <T> T executeRetrieveObject(Type returnObjectType,
|
||||
String uri, Map<String, String> parameters) throws BigSwitchBcfApiException {
|
||||
protected <T> T executeRetrieveObject(final Type returnObjectType,
|
||||
final String uri, final Map<String, String> parameters) throws BigSwitchBcfApiException {
|
||||
checkInvariants();
|
||||
|
||||
GetMethod gm = (GetMethod)createMethod("get", uri, _port);
|
||||
@ -392,41 +394,41 @@ public class BigSwitchBcfApi {
|
||||
// CAUTIOUS: Safety margin of 2048 characters - extend if needed.
|
||||
returnValue = (T)gson.fromJson(gm.getResponseBodyAsString(2048), returnObjectType);
|
||||
} catch (IOException e) {
|
||||
s_logger.error("IOException while retrieving response body", e);
|
||||
S_LOGGER.error("IOException while retrieving response body", e);
|
||||
throw new BigSwitchBcfApiException(e);
|
||||
} finally {
|
||||
gm.releaseConnection();
|
||||
}
|
||||
if(returnValue instanceof ControlClusterStatus) {
|
||||
if(hash == HASH_CONFLICT) {
|
||||
_isMaster = true;
|
||||
isMaster = true;
|
||||
((ControlClusterStatus) returnValue).setTopologySyncRequested(true);
|
||||
} else if (hash != HASH_IGNORE && !_isMaster) {
|
||||
_isMaster = true;
|
||||
} else if (hash != HASH_IGNORE && !isMaster) {
|
||||
isMaster = true;
|
||||
((ControlClusterStatus) returnValue).setTopologySyncRequested(true);
|
||||
}
|
||||
}
|
||||
return returnValue;
|
||||
}
|
||||
|
||||
protected void executeMethod(HttpMethodBase method) throws BigSwitchBcfApiException {
|
||||
protected void executeMethod(final HttpMethodBase method) throws BigSwitchBcfApiException {
|
||||
try {
|
||||
_client.executeMethod(method);
|
||||
if (method.getStatusCode() == HttpStatus.SC_UNAUTHORIZED) {
|
||||
method.releaseConnection();
|
||||
}
|
||||
} catch (HttpException e) {
|
||||
s_logger.error("HttpException caught while trying to connect to the BigSwitch Controller", e);
|
||||
S_LOGGER.error("HttpException caught while trying to connect to the BigSwitch Controller", e);
|
||||
method.releaseConnection();
|
||||
throw new BigSwitchBcfApiException("API call to BigSwitch Controller Failed", e);
|
||||
} catch (IOException e) {
|
||||
s_logger.error("IOException caught while trying to connect to the BigSwitch Controller", e);
|
||||
S_LOGGER.error("IOException caught while trying to connect to the BigSwitch Controller", e);
|
||||
method.releaseConnection();
|
||||
throw new BigSwitchBcfApiException("API call to BigSwitch Controller Failed", e);
|
||||
}
|
||||
}
|
||||
|
||||
private String responseToErrorMessage(HttpMethodBase method) {
|
||||
private String responseToErrorMessage(final HttpMethodBase method) {
|
||||
assert method.isRequestSent() : "no use getting an error message unless the request is sent";
|
||||
|
||||
if ("text/html".equals(method.getResponseHeader(CONTENT_TYPE).getValue())) {
|
||||
@ -436,7 +438,7 @@ public class BigSwitchBcfApi {
|
||||
try {
|
||||
return method.getResponseBodyAsString(2048);
|
||||
} catch (IOException e) {
|
||||
s_logger.debug("Error while loading response body", e);
|
||||
S_LOGGER.debug("Error while loading response body", e);
|
||||
}
|
||||
}
|
||||
|
||||
@ -449,11 +451,11 @@ public class BigSwitchBcfApi {
|
||||
}
|
||||
|
||||
public String get_hash() {
|
||||
return _hash;
|
||||
return hash;
|
||||
}
|
||||
|
||||
public void set_hash(String hash) {
|
||||
this._hash = hash;
|
||||
public void set_hash(final String hash) {
|
||||
this.hash = hash;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@ -177,7 +177,7 @@ public class BigSwitchBcfUtils {
|
||||
|
||||
// handle external network first, only if NAT service is enabled
|
||||
if(networks != null) {
|
||||
if(!(networks.isEmpty()) && isNatEnabled()!=null && isNatEnabled()){
|
||||
if(!(networks.isEmpty()) && isNatEnabled()){
|
||||
// get public net info - needed to set up source nat gateway
|
||||
NetworkVO pubNet = getPublicNetwork(physicalNetworkId);
|
||||
|
||||
@ -380,7 +380,7 @@ public class BigSwitchBcfUtils {
|
||||
Integer port = rule.getSourcePortStart();
|
||||
fwCidrList = _fwCidrsDao.listByFirewallRuleId(rule.getId());
|
||||
if(fwCidrList != null){
|
||||
if(fwCidrList.size()>1 || rule.getSourcePortEnd()!=port){
|
||||
if(fwCidrList.size()>1 || !rule.getSourcePortEnd().equals(port)){
|
||||
continue;
|
||||
} else {
|
||||
cidr = fwCidrList.get(0).getCidr();
|
||||
@ -414,7 +414,7 @@ public class BigSwitchBcfUtils {
|
||||
Integer port = item.getSourcePortStart(); // currently BCF supports single port policy
|
||||
aclCidrList = _aclItemCidrsDao.listByNetworkACLItemId(item.getId());
|
||||
if(aclCidrList != null){
|
||||
if(aclCidrList.size()>1 || item.getSourcePortEnd()!=port){
|
||||
if(aclCidrList.size()>1 || !item.getSourcePortEnd().equals(port)){
|
||||
continue;
|
||||
} else {
|
||||
cidr = aclCidrList.get(0).getCidr();
|
||||
@ -440,7 +440,7 @@ public class BigSwitchBcfUtils {
|
||||
|
||||
public String syncTopologyToBcfHost(HostVO bigswitchBcfHost){
|
||||
SyncBcfTopologyCommand syncCmd;
|
||||
if(isNatEnabled()!=null && isNatEnabled()){
|
||||
if(isNatEnabled()){
|
||||
syncCmd = new SyncBcfTopologyCommand(true, true);
|
||||
} else {
|
||||
syncCmd = new SyncBcfTopologyCommand(true, false);
|
||||
@ -486,7 +486,7 @@ public class BigSwitchBcfUtils {
|
||||
}
|
||||
|
||||
String newHash = answer.getHash();
|
||||
if (cmd.is_topologySyncRequested()) {
|
||||
if (cmd.isTopologySyncRequested()) {
|
||||
newHash = syncTopologyToBcfHost(cluster.getMaster());
|
||||
}
|
||||
if(newHash != null){
|
||||
@ -524,7 +524,7 @@ public class BigSwitchBcfUtils {
|
||||
if(devices != null && !devices.isEmpty()){
|
||||
return devices.get(0).getNat();
|
||||
} else {
|
||||
return null;
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
@ -532,7 +532,7 @@ public class BigSwitchBcfUtils {
|
||||
if(!IPAddress.isValidIPv4(maskString)){
|
||||
return null;
|
||||
}
|
||||
String[] octets = maskString.split(".");
|
||||
String[] octets = maskString.split("\\.");
|
||||
Integer bits = 0;
|
||||
for (String o: octets){
|
||||
switch(o){
|
||||
|
||||
@ -208,7 +208,7 @@ NetworkACLServiceProvider, FirewallServiceProvider, ResourceStateAdapter {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (!_ntwkSrvcDao.canProviderSupportServiceInNetwork(network.getId(), service, BcfConstants.BigSwitchBcf)) {
|
||||
if (!_ntwkSrvcDao.canProviderSupportServiceInNetwork(network.getId(), service, BcfConstants.BIG_SWITCH_BCF)) {
|
||||
s_logger.debug("BigSwitchBcfElement can't provide the " + service.getName() + " service on network " + network.getDisplayText());
|
||||
return false;
|
||||
}
|
||||
@ -410,7 +410,7 @@ NetworkACLServiceProvider, FirewallServiceProvider, ResourceStateAdapter {
|
||||
|
||||
ServerResource resource = new BigSwitchBcfResource();
|
||||
|
||||
final String deviceName = BcfConstants.BigSwitchBcf.getName();
|
||||
final String deviceName = BcfConstants.BIG_SWITCH_BCF.getName();
|
||||
NetworkDevice networkDevice = NetworkDevice.getNetworkDevice(deviceName);
|
||||
final Long physicalNetworkId = cmd.getPhysicalNetworkId();
|
||||
final String hostname = cmd.getHost();
|
||||
@ -452,7 +452,6 @@ NetworkACLServiceProvider, FirewallServiceProvider, ResourceStateAdapter {
|
||||
}
|
||||
|
||||
Boolean natNow = _bcfUtils.isNatEnabled();
|
||||
if( natNow != null)
|
||||
if (!nat && natNow){
|
||||
throw new CloudRuntimeException("NAT is enabled in existing controller. Enable NAT for new controller or remove existing controller first.");
|
||||
} else if (nat && !natNow){
|
||||
@ -684,7 +683,7 @@ NetworkACLServiceProvider, FirewallServiceProvider, ResourceStateAdapter {
|
||||
}
|
||||
cidrList = r.getSourceCidrList();
|
||||
if(cidrList != null){
|
||||
if(cidrList.size()>1 || r.getSourcePortEnd()!=r.getSourcePortStart()){
|
||||
if(cidrList.size()>1 || !r.getSourcePortEnd().equals(r.getSourcePortStart())){
|
||||
throw new ResourceUnavailableException("One CIDR and one port only please.",
|
||||
Network.class, network.getId());
|
||||
} else {
|
||||
@ -718,7 +717,7 @@ NetworkACLServiceProvider, FirewallServiceProvider, ResourceStateAdapter {
|
||||
}
|
||||
cidrList = r.getSourceCidrList();
|
||||
if(cidrList != null){
|
||||
if(cidrList.size()>1 || r.getSourcePortEnd()!=r.getSourcePortStart()){
|
||||
if(cidrList.size()>1 || !r.getSourcePortEnd().equals(r.getSourcePortStart())){
|
||||
throw new ResourceUnavailableException("One CIDR and one port only please.",
|
||||
Network.class, network.getId());
|
||||
} else {
|
||||
|
||||
@ -312,7 +312,7 @@ public class BigSwitchBcfResource extends ManagerBase implements ServerResource
|
||||
return new BcfAnswer(cmd, true, "Segment " + network.getNetwork().getId() + " created", hash);
|
||||
} catch (BigSwitchBcfApiException e) {
|
||||
if (e.is_topologySyncRequested()) {
|
||||
cmd.set_topologySyncRequested(true);
|
||||
cmd.setTopologySyncRequested(true);
|
||||
return new BcfAnswer(cmd, true, "Segment " + network.getNetwork().getId() + " created; topology sync required.");
|
||||
} else {
|
||||
if (numRetries > 0) {
|
||||
@ -327,11 +327,11 @@ public class BigSwitchBcfResource extends ManagerBase implements ServerResource
|
||||
|
||||
private Answer executeRequest(DeleteBcfSegmentCommand cmd, int numRetries) {
|
||||
try {
|
||||
String hash = _bigswitchBcfApi.deleteNetwork(cmd.get_tenantUuid(), cmd.getNetworkUuid());
|
||||
String hash = _bigswitchBcfApi.deleteNetwork(cmd.getTenantUuid(), cmd.getNetworkUuid());
|
||||
return new BcfAnswer(cmd, true, "Segment " + cmd.getNetworkUuid() + " deleted", hash);
|
||||
} catch (BigSwitchBcfApiException e) {
|
||||
if (e.is_topologySyncRequested()) {
|
||||
cmd.set_topologySyncRequested(true);
|
||||
cmd.setTopologySyncRequested(true);
|
||||
return new BcfAnswer(cmd, true, "Segment " + cmd.getNetworkUuid() + " deleted; topology sync required.");
|
||||
} else {
|
||||
if (numRetries > 0) {
|
||||
@ -357,7 +357,7 @@ public class BigSwitchBcfResource extends ManagerBase implements ServerResource
|
||||
return new BcfAnswer(cmd, true, "network attachment " + cmd.getPortId() + " created", hash);
|
||||
} catch (BigSwitchBcfApiException e) {
|
||||
if (e.is_topologySyncRequested()) {
|
||||
cmd.set_topologySyncRequested(true);
|
||||
cmd.setTopologySyncRequested(true);
|
||||
return new BcfAnswer(cmd, true, "network attachment " + cmd.getPortId() + " created; topology sync required.");
|
||||
} else {
|
||||
if (numRetries > 0) {
|
||||
@ -376,7 +376,7 @@ public class BigSwitchBcfResource extends ManagerBase implements ServerResource
|
||||
return new BcfAnswer(cmd, true, "network attachment " + nicName + " deleted", hash);
|
||||
} catch (BigSwitchBcfApiException e) {
|
||||
if (e.is_topologySyncRequested()) {
|
||||
cmd.set_topologySyncRequested(true);
|
||||
cmd.setTopologySyncRequested(true);
|
||||
return new BcfAnswer(cmd, true, "network attachment " + nicName + " deleted; topology sync required.");
|
||||
} else {
|
||||
if (numRetries > 0) {
|
||||
@ -398,7 +398,7 @@ public class BigSwitchBcfResource extends ManagerBase implements ServerResource
|
||||
return new BcfAnswer(cmd, true, "Network attachment " + cmd.getAttachmentId() + " updated", hash);
|
||||
} catch (BigSwitchBcfApiException e) {
|
||||
if (e.is_topologySyncRequested()) {
|
||||
cmd.set_topologySyncRequested(true);
|
||||
cmd.setTopologySyncRequested(true);
|
||||
return new BcfAnswer(cmd, true, "Network attachment " + cmd.getAttachmentId() + " updated; topology sync required.");
|
||||
} else {
|
||||
if (numRetries > 0) {
|
||||
@ -412,21 +412,21 @@ public class BigSwitchBcfResource extends ManagerBase implements ServerResource
|
||||
|
||||
private Answer executeRequest(CreateBcfStaticNatCommand cmd, int numRetries) {
|
||||
FloatingIpData fip = new FloatingIpData();
|
||||
fip.setTenantId(cmd.get_tenantId());
|
||||
fip.setNetworkId(cmd.get_networkId());
|
||||
fip.setFixedIp(cmd.get_privateIp());
|
||||
fip.setFloatingIpAndId(cmd.get_publicIp());
|
||||
fip.setMac(cmd.get_mac());
|
||||
fip.setTenantId(cmd.getTenantId());
|
||||
fip.setNetworkId(cmd.getNetworkId());
|
||||
fip.setFixedIp(cmd.getPrivateIp());
|
||||
fip.setFloatingIpAndId(cmd.getPublicIp());
|
||||
fip.setMac(cmd.getMac());
|
||||
|
||||
try {
|
||||
String hash = _bigswitchBcfApi.createFloatingIp(cmd.get_tenantId(), fip);
|
||||
return new BcfAnswer(cmd, true, "floating ip " + cmd.get_publicIp() + ":" +
|
||||
cmd.get_privateIp() + " created", hash);
|
||||
String hash = _bigswitchBcfApi.createFloatingIp(cmd.getTenantId(), fip);
|
||||
return new BcfAnswer(cmd, true, "floating ip " + cmd.getPublicIp() + ":" +
|
||||
cmd.getPrivateIp() + " created", hash);
|
||||
} catch (BigSwitchBcfApiException e) {
|
||||
if (e.is_topologySyncRequested()) {
|
||||
cmd.set_topologySyncRequested(true);
|
||||
return new BcfAnswer(cmd, true, "floating ip " + cmd.get_publicIp() + ":" +
|
||||
cmd.get_privateIp() + " created; topology sync required.");
|
||||
cmd.setTopologySyncRequested(true);
|
||||
return new BcfAnswer(cmd, true, "floating ip " + cmd.getPublicIp() + ":" +
|
||||
cmd.getPrivateIp() + " created; topology sync required.");
|
||||
} else {
|
||||
if (numRetries > 0) {
|
||||
return retry(cmd, --numRetries);
|
||||
@ -439,12 +439,12 @@ public class BigSwitchBcfResource extends ManagerBase implements ServerResource
|
||||
|
||||
private Answer executeRequest(DeleteBcfStaticNatCommand cmd, int numRetries) {
|
||||
try {
|
||||
String hash = _bigswitchBcfApi.deleteFloatingIp(cmd.get_tenantId(), cmd.get_floatingIpId());
|
||||
return new BcfAnswer(cmd, true, "floating ip " + cmd.get_publicIp() + " deleted", hash);
|
||||
String hash = _bigswitchBcfApi.deleteFloatingIp(cmd.getTenantId(), cmd.getFloatingIpId());
|
||||
return new BcfAnswer(cmd, true, "floating ip " + cmd.getPublicIp() + " deleted", hash);
|
||||
} catch (BigSwitchBcfApiException e) {
|
||||
if (e.is_topologySyncRequested()) {
|
||||
cmd.set_topologySyncRequested(true);
|
||||
return new BcfAnswer(cmd, true, "floating ip " + cmd.get_publicIp() + " deleted; topology sync required.");
|
||||
cmd.setTopologySyncRequested(true);
|
||||
return new BcfAnswer(cmd, true, "floating ip " + cmd.getPublicIp() + " deleted; topology sync required.");
|
||||
} else {
|
||||
if (numRetries > 0) {
|
||||
return retry(cmd, --numRetries);
|
||||
@ -456,16 +456,16 @@ public class BigSwitchBcfResource extends ManagerBase implements ServerResource
|
||||
}
|
||||
|
||||
private Answer executeRequest(CreateBcfRouterCommand cmd, int numRetries) {
|
||||
RouterData router = new RouterData(cmd.get_tenantId());
|
||||
RouterData router = new RouterData(cmd.getTenantId());
|
||||
try {
|
||||
String hash;
|
||||
hash = _bigswitchBcfApi.createRouter(cmd.get_tenantId(), router);
|
||||
hash = _bigswitchBcfApi.createRouter(cmd.getTenantId(), router);
|
||||
|
||||
return new BcfAnswer(cmd, true, "router " + cmd.get_tenantId() +
|
||||
return new BcfAnswer(cmd, true, "router " + cmd.getTenantId() +
|
||||
" created.", hash);
|
||||
} catch (BigSwitchBcfApiException e) {
|
||||
if (e.is_topologySyncRequested()) {
|
||||
cmd.set_topologySyncRequested(true);
|
||||
cmd.setTopologySyncRequested(true);
|
||||
return new BcfAnswer(cmd, true, " created; topology sync required.");
|
||||
} else {
|
||||
if (numRetries > 0) {
|
||||
@ -478,18 +478,18 @@ public class BigSwitchBcfResource extends ManagerBase implements ServerResource
|
||||
}
|
||||
|
||||
private Answer executeRequest(CreateBcfRouterInterfaceCommand cmd, int numRetries) {
|
||||
RouterInterfaceData routerInterface = new RouterInterfaceData(cmd.get_tenantId(),
|
||||
cmd.get_gateway(), cmd.get_cidr(), cmd.get_networkId(), cmd.get_networkName());
|
||||
RouterInterfaceData routerInterface = new RouterInterfaceData(cmd.getTenantId(),
|
||||
cmd.getGateway(), cmd.getCidr(), cmd.getNetworkId(), cmd.getNetworkName());
|
||||
try {
|
||||
String hash;
|
||||
hash = _bigswitchBcfApi.createRouterInterface(cmd.get_tenantId(),
|
||||
cmd.get_tenantId(), routerInterface);
|
||||
hash = _bigswitchBcfApi.createRouterInterface(cmd.getTenantId(),
|
||||
cmd.getTenantId(), routerInterface);
|
||||
|
||||
return new BcfAnswer(cmd, true, "router " + cmd.get_tenantId() +
|
||||
return new BcfAnswer(cmd, true, "router " + cmd.getTenantId() +
|
||||
" created.", hash);
|
||||
} catch (BigSwitchBcfApiException e) {
|
||||
if (e.is_topologySyncRequested()) {
|
||||
cmd.set_topologySyncRequested(true);
|
||||
cmd.setTopologySyncRequested(true);
|
||||
return new BcfAnswer(cmd, true, " created; topology sync required.");
|
||||
} else {
|
||||
if (numRetries > 0) {
|
||||
@ -515,7 +515,7 @@ public class BigSwitchBcfResource extends ManagerBase implements ServerResource
|
||||
return new BcfAnswer(cmd, true, "tenant " + cmd.getTenantId() + " router updated", hash);
|
||||
} catch (BigSwitchBcfApiException e) {
|
||||
if (e.is_topologySyncRequested()) {
|
||||
cmd.set_topologySyncRequested(true);
|
||||
cmd.setTopologySyncRequested(true);
|
||||
return new BcfAnswer(cmd, true, "tenant " + cmd.getTenantId() + " router updated but topology sync required.");
|
||||
} else {
|
||||
if (numRetries > 0) {
|
||||
|
||||
@ -0,0 +1,93 @@
|
||||
//
|
||||
// Licensed to the Apache Software Foundation (ASF) under one
|
||||
// or more contributor license agreements. See the NOTICE file
|
||||
// distributed with this work for additional information
|
||||
// regarding copyright ownership. The ASF licenses this file
|
||||
// to you under the Apache License, Version 2.0 (the
|
||||
// "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
//
|
||||
|
||||
package com.cloud.network.bigswitch;
|
||||
|
||||
import static org.junit.Assert.assertEquals;
|
||||
|
||||
import org.junit.Before;
|
||||
import org.junit.Test;
|
||||
import org.mockito.Mock;
|
||||
import org.mockito.MockitoAnnotations;
|
||||
|
||||
import com.cloud.agent.AgentManager;
|
||||
import com.cloud.dc.dao.VlanDao;
|
||||
import com.cloud.host.dao.HostDao;
|
||||
import com.cloud.network.NetworkModel;
|
||||
import com.cloud.network.dao.BigSwitchBcfDao;
|
||||
import com.cloud.network.dao.FirewallRulesCidrsDao;
|
||||
import com.cloud.network.dao.FirewallRulesDao;
|
||||
import com.cloud.network.dao.IPAddressDao;
|
||||
import com.cloud.network.dao.NetworkDao;
|
||||
import com.cloud.network.vpc.NetworkACLItemCidrsDao;
|
||||
import com.cloud.network.vpc.NetworkACLItemDao;
|
||||
import com.cloud.network.vpc.dao.VpcDao;
|
||||
import com.cloud.vm.dao.NicDao;
|
||||
import com.cloud.vm.dao.VMInstanceDao;
|
||||
|
||||
public class BigSwitchBcfUtilsTest {
|
||||
|
||||
@Mock
|
||||
NetworkDao networkDao;
|
||||
@Mock
|
||||
NicDao nicDao;
|
||||
@Mock
|
||||
VMInstanceDao vmDao;
|
||||
@Mock
|
||||
HostDao hostDao;
|
||||
@Mock
|
||||
VpcDao vpcDao;
|
||||
@Mock
|
||||
BigSwitchBcfDao bigswitchBcfDao;
|
||||
@Mock
|
||||
AgentManager agentMgr;
|
||||
@Mock
|
||||
VlanDao vlanDao;
|
||||
@Mock
|
||||
IPAddressDao ipAddressDao;
|
||||
@Mock
|
||||
FirewallRulesDao fwRulesDao;
|
||||
@Mock
|
||||
FirewallRulesCidrsDao fwCidrsDao;
|
||||
@Mock
|
||||
NetworkACLItemDao aclItemDao;
|
||||
@Mock
|
||||
NetworkACLItemCidrsDao aclItemCidrsDao;
|
||||
@Mock
|
||||
NetworkModel networkModel;
|
||||
@Mock
|
||||
BigSwitchBcfUtils bsUtil;
|
||||
|
||||
@Before
|
||||
public void setUp() {
|
||||
MockitoAnnotations.initMocks(this);
|
||||
bsUtil = new BigSwitchBcfUtils(networkDao, nicDao, vmDao, hostDao,
|
||||
vpcDao, bigswitchBcfDao, agentMgr, vlanDao, ipAddressDao,
|
||||
fwRulesDao, fwCidrsDao, aclItemDao, aclItemCidrsDao,
|
||||
networkModel);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void getSubnetMaskLengthTest() {
|
||||
Integer rc = bsUtil.getSubnetMaskLength("255.255.255.254");
|
||||
assertEquals("failed", new Integer(31), rc);
|
||||
rc = bsUtil.getSubnetMaskLength("128.255.255.254");
|
||||
assertEquals("failed", new Integer(1), rc);
|
||||
}
|
||||
}
|
||||
@ -1,13 +1,22 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<!-- Licensed to the Apache Software Foundation (ASF) under one or more contributor
|
||||
license agreements. See the NOTICE file distributed with this work for additional
|
||||
information regarding copyright ownership. The ASF licenses this file to you under
|
||||
the Apache License, Version 2.0 (the "License"); you may not use this file except
|
||||
in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
|
||||
Unless required by applicable law or agreed to in writing, software distributed under
|
||||
the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS
|
||||
OF ANY KIND, either express or implied. See the License for the specific language
|
||||
governing permissions and limitations under the License. -->
|
||||
<!--
|
||||
Licensed to the Apache Software Foundation (ASF) under one
|
||||
or more contributor license agreements. See the NOTICE file
|
||||
distributed with this work for additional information
|
||||
regarding copyright ownership. The ASF licenses this file
|
||||
to you under the Apache License, Version 2.0 (the
|
||||
"License"); you may not use this file except in compliance
|
||||
with the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing,
|
||||
software distributed under the License is distributed on an
|
||||
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations
|
||||
under the License.
|
||||
-->
|
||||
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
|
||||
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
|
||||
<modelVersion>4.0.0</modelVersion>
|
||||
|
||||
1
pom.xml
1
pom.xml
@ -843,6 +843,7 @@
|
||||
<exclude>tools/ngui/static/js/lib/*</exclude>
|
||||
<exclude>**/.checkstyle</exclude>
|
||||
<exclude>scripts/installer/windows/acs_license.rtf</exclude>
|
||||
<exclude>**/*.md</exclude>
|
||||
</excludes>
|
||||
</configuration>
|
||||
</plugin>
|
||||
|
||||
@ -6,9 +6,9 @@
|
||||
# to you under the Apache License, Version 2.0 (the
|
||||
# "License"); you may not use this file except in compliance
|
||||
# with the License. You may obtain a copy of the License at
|
||||
#
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing,
|
||||
# software distributed under the License is distributed on an
|
||||
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
@ -16,5 +16,5 @@
|
||||
# specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
/sbin/ifconfig $1 0.0.0.0 up
|
||||
/usr/sbin/ivs-ctl add-port $1
|
||||
/sbin/ifconfig $1 0.0.0.0 up
|
||||
/usr/sbin/ivs-ctl add-port $1
|
||||
|
||||
@ -156,7 +156,8 @@ then
|
||||
elif [ "$cflag" == "1" ]
|
||||
then
|
||||
/usr/bin/logger -t heartbeat "kvmheartbeat.sh rebooted system because it was unable to write the heartbeat to the storage."
|
||||
sync
|
||||
sync &
|
||||
sleep 5
|
||||
echo b > /proc/sysrq-trigger
|
||||
exit $?
|
||||
else
|
||||
|
||||
@ -90,7 +90,7 @@ public class ParamGenericValidationWorker implements DispatchWorker {
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (!matchedCurrentParam) {
|
||||
if (!matchedCurrentParam && !((String)actualParamName).equalsIgnoreCase("expires") && !((String)actualParamName).equalsIgnoreCase("signatureversion")) {
|
||||
errorMsg.append(" ").append(actualParamName);
|
||||
foundUnknownParam= true;
|
||||
}
|
||||
|
||||
@ -1215,7 +1215,7 @@ StateListener<State, VirtualMachine.Event, VirtualMachine> {
|
||||
// volume is ready and the pool should be reused.
|
||||
// In this case, also check if rest of the volumes are ready and can
|
||||
// be reused.
|
||||
if (plan.getPoolId() != null) {
|
||||
if (plan.getPoolId() != null || (toBeCreated.getVolumeType() == Volume.Type.DATADISK && toBeCreated.getPoolId() != null && toBeCreated.getState() == Volume.State.Ready)) {
|
||||
s_logger.debug("Volume has pool already allocated, checking if pool can be reused, poolId: " + toBeCreated.getPoolId());
|
||||
List<StoragePool> suitablePools = new ArrayList<StoragePool>();
|
||||
StoragePool pool = null;
|
||||
|
||||
@ -638,6 +638,9 @@ public class HighAvailabilityManagerImpl extends ManagerBase implements HighAvai
|
||||
_haDao.update(work.getId(), work);
|
||||
|
||||
VMInstanceVO vm = _instanceDao.findById(vmId);
|
||||
if (vm == null) {
|
||||
return null;
|
||||
}
|
||||
// First try starting the vm with its original planner, if it doesn't succeed send HAPlanner as its an emergency.
|
||||
_itMgr.migrateAway(vm.getUuid(), srcHostId);
|
||||
return null;
|
||||
@ -757,7 +760,10 @@ public class HighAvailabilityManagerImpl extends ManagerBase implements HighAvai
|
||||
List<HaWorkVO> works = _haDao.findTakenWorkItems(WorkType.Migration);
|
||||
List<VMInstanceVO> vms = new ArrayList<VMInstanceVO>(works.size());
|
||||
for (HaWorkVO work : works) {
|
||||
vms.add(_instanceDao.findById(work.getInstanceId()));
|
||||
VMInstanceVO vm = _instanceDao.findById(work.getInstanceId());
|
||||
if (vm != null) {
|
||||
vms.add(vm);
|
||||
}
|
||||
}
|
||||
return vms;
|
||||
}
|
||||
@ -917,6 +923,7 @@ public class HighAvailabilityManagerImpl extends ManagerBase implements HighAvai
|
||||
} else {
|
||||
s_logger.info("Rescheduling " + work + " to try again at " + new Date(nextTime << 10));
|
||||
work.setTimeToTry(nextTime);
|
||||
work.setTimesTried(work.getTimesTried() + 1);
|
||||
work.setServerId(null);
|
||||
work.setDateTaken(null);
|
||||
}
|
||||
@ -927,6 +934,7 @@ public class HighAvailabilityManagerImpl extends ManagerBase implements HighAvai
|
||||
|
||||
s_logger.info("Rescheduling " + work + " to try again at " + new Date(nextTime << 10));
|
||||
work.setTimeToTry(nextTime);
|
||||
work.setTimesTried(work.getTimesTried() + 1);
|
||||
work.setServerId(null);
|
||||
work.setDateTaken(null);
|
||||
|
||||
@ -935,6 +943,10 @@ public class HighAvailabilityManagerImpl extends ManagerBase implements HighAvai
|
||||
VMInstanceVO vm = _instanceDao.findById(work.getInstanceId());
|
||||
work.setUpdateTime(vm.getUpdated());
|
||||
work.setPreviousState(vm.getState());
|
||||
if (!Step.Done.equals(work.getStep()) && work.getTimesTried() >= _maxRetries) {
|
||||
s_logger.warn("Giving up, retries max times for work: " + work);
|
||||
work.setStep(Step.Done);
|
||||
}
|
||||
}
|
||||
_haDao.update(work.getId(), work);
|
||||
} catch (final Throwable th) {
|
||||
|
||||
@ -473,7 +473,7 @@ public class ConfigurationServerImpl extends ManagerBase implements Configuratio
|
||||
PreparedStatement stmt = txn.prepareAutoCloseStatement(insertSql);
|
||||
stmt.executeUpdate();
|
||||
} catch (SQLException ex) {
|
||||
s_logger.debug("Caught exception when inserting system account: " + ex.getMessage());
|
||||
s_logger.debug("Looks like system account already exists");
|
||||
}
|
||||
// insert system user
|
||||
insertSql = "INSERT INTO `cloud`.`user` (id, uuid, username, password, account_id, firstname, lastname, created, user.default)"
|
||||
@ -483,7 +483,7 @@ public class ConfigurationServerImpl extends ManagerBase implements Configuratio
|
||||
PreparedStatement stmt = txn.prepareAutoCloseStatement(insertSql);
|
||||
stmt.executeUpdate();
|
||||
} catch (SQLException ex) {
|
||||
s_logger.debug("Caught SQLException when inserting system user: " + ex.getMessage());
|
||||
s_logger.debug("Looks like system user already exists");
|
||||
}
|
||||
|
||||
// insert admin user, but leave the account disabled until we set a
|
||||
@ -500,7 +500,7 @@ public class ConfigurationServerImpl extends ManagerBase implements Configuratio
|
||||
PreparedStatement stmt = txn.prepareAutoCloseStatement(insertSql);
|
||||
stmt.executeUpdate();
|
||||
} catch (SQLException ex) {
|
||||
s_logger.debug("Caught SQLException when creating admin account: " + ex.getMessage());
|
||||
s_logger.debug("Looks like admin account already exists");
|
||||
}
|
||||
|
||||
// now insert the user
|
||||
@ -511,7 +511,7 @@ public class ConfigurationServerImpl extends ManagerBase implements Configuratio
|
||||
PreparedStatement stmt = txn.prepareAutoCloseStatement(insertSql);
|
||||
stmt.executeUpdate();
|
||||
} catch (SQLException ex) {
|
||||
s_logger.debug("Caught SQLException when inserting admin user: " + ex.getMessage());
|
||||
s_logger.debug("Looks like admin user already exists");
|
||||
}
|
||||
|
||||
try {
|
||||
@ -522,8 +522,7 @@ public class ConfigurationServerImpl extends ManagerBase implements Configuratio
|
||||
stmt.executeQuery();
|
||||
tableName = "network_group";
|
||||
} catch (Exception ex) {
|
||||
// if network_groups table exists, create the default security group there
|
||||
s_logger.debug("Caught (SQL?)Exception: no network_group " + ex.getLocalizedMessage());
|
||||
// Ignore in case of exception, table must not exist
|
||||
}
|
||||
|
||||
insertSql = "SELECT * FROM " + tableName + " where account_id=2 and name='default'";
|
||||
|
||||
@ -212,6 +212,7 @@ public class StatsCollector extends ManagerBase implements ComponentMethodInterc
|
||||
long autoScaleStatsInterval = -1L;
|
||||
int vmDiskStatsInterval = 0;
|
||||
List<Long> hostIds = null;
|
||||
private double _imageStoreCapacityThreshold = 0.90;
|
||||
|
||||
String externalStatsPrefix = "";
|
||||
String externalStatsHost = null;
|
||||
@ -1045,6 +1046,14 @@ public class StatsCollector extends ManagerBase implements ComponentMethodInterc
|
||||
}
|
||||
}
|
||||
|
||||
public boolean imageStoreHasEnoughCapacity(DataStore imageStore) {
|
||||
StorageStats imageStoreStats = _storageStats.get(imageStore.getId());
|
||||
if (imageStoreStats != null && (imageStoreStats.getByteUsed()/(imageStoreStats.getCapacityBytes()*1.0)) <= _imageStoreCapacityThreshold) {
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
public StorageStats getStorageStats(long id) {
|
||||
return _storageStats.get(id);
|
||||
}
|
||||
|
||||
@ -19,7 +19,6 @@ package com.cloud.servlet;
|
||||
import java.security.InvalidAlgorithmParameterException;
|
||||
import java.security.InvalidKeyException;
|
||||
import java.security.NoSuchAlgorithmException;
|
||||
import java.security.SecureRandom;
|
||||
|
||||
import javax.crypto.BadPaddingException;
|
||||
import javax.crypto.Cipher;
|
||||
@ -159,33 +158,4 @@ public class ConsoleProxyPasswordBasedEncryptor {
|
||||
}
|
||||
}
|
||||
|
||||
public static void main(String[] args) {
|
||||
SecureRandom random;
|
||||
try {
|
||||
random = SecureRandom.getInstance("SHA1PRNG");
|
||||
byte[] keyBytes = new byte[16];
|
||||
random.nextBytes(keyBytes);
|
||||
|
||||
byte[] ivBytes = new byte[16];
|
||||
random.nextBytes(ivBytes);
|
||||
|
||||
KeyIVPair keyIvPair = new KeyIVPair("8x/xUBgX0Up+3UEo39dSeG277JhVj31+ElHkN5+EC0Q=", "Y2SUiIN6JXTdKNK/ZMDyVtLB7gAM9MCCiyrP1xd3bSQ=");
|
||||
//keyIvPair.setKeyBytes(keyBytes);
|
||||
//keyIvPair.setIvBytes(ivBytes);
|
||||
|
||||
Gson gson = new GsonBuilder().create();
|
||||
ConsoleProxyPasswordBasedEncryptor encryptor = new ConsoleProxyPasswordBasedEncryptor(gson.toJson(keyIvPair));
|
||||
|
||||
String encrypted = encryptor.encryptText("Hello, world");
|
||||
|
||||
System.out.println("Encrypted result: " + encrypted);
|
||||
|
||||
String decrypted = encryptor.decryptText(encrypted);
|
||||
|
||||
System.out.println("Decrypted result: " + decrypted);
|
||||
|
||||
} catch (NoSuchAlgorithmException e) {
|
||||
e.printStackTrace();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -41,6 +41,7 @@ import javax.ejb.Local;
|
||||
import javax.inject.Inject;
|
||||
import javax.naming.ConfigurationException;
|
||||
|
||||
import com.cloud.hypervisor.Hypervisor;
|
||||
import org.apache.log4j.Logger;
|
||||
import org.springframework.stereotype.Component;
|
||||
|
||||
@ -545,7 +546,11 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C
|
||||
}
|
||||
DataStore store;
|
||||
try {
|
||||
StoragePoolVO pool = _storagePoolDao.findPoolByHostPath(host.getDataCenterId(), host.getPodId(), pInfo.getHost(), pInfo.getHostPath(), pInfo.getUuid());
|
||||
String hostAddress = pInfo.getHost();
|
||||
if (host.getHypervisorType() == Hypervisor.HypervisorType.VMware) {
|
||||
hostAddress = "VMFS datastore: " + pInfo.getHostPath();
|
||||
}
|
||||
StoragePoolVO pool = _storagePoolDao.findPoolByHostPath(host.getDataCenterId(), host.getPodId(), hostAddress, pInfo.getHostPath(), pInfo.getUuid());
|
||||
if (pool == null && host.getHypervisorType() == HypervisorType.VMware) {
|
||||
// perform run-time upgrade. In versions prior to 2.2.12, there
|
||||
// is a bug that we don't save local datastore info (host path
|
||||
@ -554,12 +559,12 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C
|
||||
// available on the host, to support smooth migration, we
|
||||
// need to perform runtime upgrade here
|
||||
if (pInfo.getHostPath().length() > 0) {
|
||||
pool = _storagePoolDao.findPoolByHostPath(host.getDataCenterId(), host.getPodId(), pInfo.getHost(), "", pInfo.getUuid());
|
||||
pool = _storagePoolDao.findPoolByHostPath(host.getDataCenterId(), host.getPodId(), hostAddress, "", pInfo.getUuid());
|
||||
}
|
||||
}
|
||||
if (pool == null) {
|
||||
//the path can be different, but if they have the same uuid, assume they are the same storage
|
||||
pool = _storagePoolDao.findPoolByHostPath(host.getDataCenterId(), host.getPodId(), pInfo.getHost(), null,
|
||||
pool = _storagePoolDao.findPoolByHostPath(host.getDataCenterId(), host.getPodId(), hostAddress, null,
|
||||
pInfo.getUuid());
|
||||
if (pool != null) {
|
||||
s_logger.debug("Found a storage pool: " + pInfo.getUuid() + ", but with different hostpath " + pInfo.getHostPath() + ", still treat it as the same pool");
|
||||
@ -1600,7 +1605,7 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C
|
||||
}
|
||||
|
||||
long totalOverProvCapacity;
|
||||
if (pool.getPoolType() == StoragePoolType.NetworkFilesystem || pool.getPoolType() == StoragePoolType.VMFS) {
|
||||
if (pool.getPoolType() == StoragePoolType.NetworkFilesystem || pool.getPoolType() == StoragePoolType.VMFS || pool.getPoolType() == StoragePoolType.Filesystem) {
|
||||
BigDecimal overProvFactor = getStorageOverProvisioningFactor(pool.getId());
|
||||
totalOverProvCapacity = overProvFactor.multiply(new BigDecimal(pool.getCapacityBytes())).longValue();
|
||||
s_logger.debug("Found storage pool " + poolVO.getName() + " of type " + pool.getPoolType().toString() + " with overprovisioning factor "
|
||||
|
||||
@ -1650,12 +1650,12 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic
|
||||
}
|
||||
|
||||
HostVO host = null;
|
||||
StoragePoolVO volumePool = _storagePoolDao.findById(volume.getPoolId());
|
||||
StoragePoolVO volumePool = _storagePoolDao.findByIdIncludingRemoved(volume.getPoolId());
|
||||
|
||||
if (hostId != null) {
|
||||
host = _hostDao.findById(hostId);
|
||||
|
||||
if (host != null && host.getHypervisorType() == HypervisorType.XenServer && volumePool.isManaged()) {
|
||||
if (host != null && host.getHypervisorType() == HypervisorType.XenServer && volumePool != null && volumePool.isManaged()) {
|
||||
sendCommand = true;
|
||||
}
|
||||
}
|
||||
|
||||
@ -60,6 +60,7 @@ import com.cloud.event.UsageEventUtils;
|
||||
import com.cloud.exception.InvalidParameterValueException;
|
||||
import com.cloud.exception.ResourceAllocationException;
|
||||
import com.cloud.org.Grouping;
|
||||
import com.cloud.server.StatsCollector;
|
||||
import com.cloud.storage.ScopeType;
|
||||
import com.cloud.storage.Storage.ImageFormat;
|
||||
import com.cloud.storage.Storage.TemplateType;
|
||||
@ -83,8 +84,10 @@ public class HypervisorTemplateAdapter extends TemplateAdapterBase {
|
||||
DownloadMonitor _downloadMonitor;
|
||||
@Inject
|
||||
AgentManager _agentMgr;
|
||||
|
||||
@Inject TemplateDataStoreDao templateDataStoreDao;
|
||||
@Inject
|
||||
StatsCollector _statsCollector;
|
||||
@Inject
|
||||
TemplateDataStoreDao templateDataStoreDao;
|
||||
@Inject
|
||||
DataStoreManager storeMgr;
|
||||
@Inject
|
||||
@ -164,13 +167,17 @@ public class HypervisorTemplateAdapter extends TemplateAdapterBase {
|
||||
continue;
|
||||
}
|
||||
|
||||
// Check if image store has enough capacity for template
|
||||
if (!_statsCollector.imageStoreHasEnoughCapacity(imageStore)) {
|
||||
s_logger.info("Image store doesn't has enough capacity, so skip downloading template to this image store " + imageStore.getId());
|
||||
continue;
|
||||
}
|
||||
// We want to download private template to one of the image store in a zone
|
||||
if(isPrivateTemplate(template) && zoneSet.contains(zoneId)){
|
||||
continue;
|
||||
}else {
|
||||
zoneSet.add(zoneId);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
TemplateInfo tmpl = imageFactory.getTemplate(template.getId(), imageStore);
|
||||
|
||||
@ -1016,6 +1016,9 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir
|
||||
|
||||
NicProfile profile = new NicProfile(null, null);
|
||||
if (ipAddress != null) {
|
||||
if (!(NetUtils.isValidIp(ipAddress) || NetUtils.isValidIpv6(ipAddress))) {
|
||||
throw new InvalidParameterValueException("Invalid format for IP address parameter: " + ipAddress);
|
||||
}
|
||||
profile = new NicProfile(ipAddress, null);
|
||||
}
|
||||
|
||||
@ -2892,6 +2895,19 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir
|
||||
}
|
||||
|
||||
profile.setDefaultNic(true);
|
||||
if (!_networkModel.areServicesSupportedInNetwork(network.getId(), new Service[]{Service.UserData})) {
|
||||
if ((userData != null) && (!userData.isEmpty())) {
|
||||
throw new InvalidParameterValueException("Unable to deploy VM as UserData is provided while deploying the VM, but there is no support for " + Network.Service.UserData.getName() + " service in the default network " + network.getId());
|
||||
}
|
||||
|
||||
if ((sshPublicKey != null) && (!sshPublicKey.isEmpty())) {
|
||||
throw new InvalidParameterValueException("Unable to deploy VM as SSH keypair is provided while deploying the VM, but there is no support for " + Network.Service.UserData.getName() + " service in the default network " + network.getId());
|
||||
}
|
||||
|
||||
if (template.getEnablePassword()) {
|
||||
throw new InvalidParameterValueException("Unable to deploy VM as template " + template.getId() + " is password enabled, but there is no support for " + Network.Service.UserData.getName() + " service in the default network " + network.getId());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
networks.add(new Pair<NetworkVO, NicProfile>(network, profile));
|
||||
|
||||
@ -19,7 +19,6 @@ package com.cloud.consoleproxy;
|
||||
import java.security.InvalidAlgorithmParameterException;
|
||||
import java.security.InvalidKeyException;
|
||||
import java.security.NoSuchAlgorithmException;
|
||||
import java.security.SecureRandom;
|
||||
|
||||
import javax.crypto.BadPaddingException;
|
||||
import javax.crypto.Cipher;
|
||||
@ -165,33 +164,4 @@ public class ConsoleProxyPasswordBasedEncryptor {
|
||||
}
|
||||
}
|
||||
|
||||
public static void main(String[] args) {
|
||||
SecureRandom random;
|
||||
try {
|
||||
random = SecureRandom.getInstance("SHA1PRNG");
|
||||
byte[] keyBytes = new byte[16];
|
||||
random.nextBytes(keyBytes);
|
||||
|
||||
byte[] ivBytes = new byte[16];
|
||||
random.nextBytes(ivBytes);
|
||||
|
||||
KeyIVPair keyIvPair = new KeyIVPair("8x/xUBgX0Up+3UEo39dSeG277JhVj31+ElHkN5+EC0Q=", "Y2SUiIN6JXTdKNK/ZMDyVtLB7gAM9MCCiyrP1xd3bSQ=");
|
||||
//keyIvPair.setKeyBytes(keyBytes);
|
||||
//keyIvPair.setIvBytes(ivBytes);
|
||||
|
||||
Gson gson = new GsonBuilder().create();
|
||||
ConsoleProxyPasswordBasedEncryptor encryptor = new ConsoleProxyPasswordBasedEncryptor(gson.toJson(keyIvPair));
|
||||
|
||||
String encrypted = encryptor.encryptText("Hello, world");
|
||||
|
||||
System.out.println("Encrypted result: " + encrypted);
|
||||
|
||||
String decrypted = encryptor.decryptText(encrypted);
|
||||
|
||||
System.out.println("Decrypted result: " + decrypted);
|
||||
|
||||
} catch (NoSuchAlgorithmException e) {
|
||||
e.printStackTrace();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -37,6 +37,7 @@ ALTER TABLE `cloud`.`volumes` ADD COLUMN `provisioning_type` VARCHAR(32) NOT NUL
|
||||
ALTER TABLE `cloud`.`disk_offering` ADD COLUMN `provisioning_type` VARCHAR(32) NOT NULL DEFAULT 'thin' COMMENT 'pre allocation setting of the volume';
|
||||
|
||||
-- Have primary keys of following table AUTO_INCREMENT
|
||||
SET foreign_key_checks = 0;
|
||||
ALTER TABLE `cloud`.`region` MODIFY `id` int unsigned AUTO_INCREMENT UNIQUE NOT NULL;
|
||||
ALTER TABLE `cloud`.`vm_instance` MODIFY `id` bigint unsigned AUTO_INCREMENT UNIQUE NOT NULL;
|
||||
ALTER TABLE `cloud`.`user_vm` MODIFY `id` bigint unsigned AUTO_INCREMENT UNIQUE NOT NULL;
|
||||
@ -44,6 +45,7 @@ ALTER TABLE `cloud`.`domain_router` MODIFY `id` bigint unsigned AUTO_INCREMENT U
|
||||
ALTER TABLE `cloud`.`service_offering` MODIFY `id` bigint unsigned AUTO_INCREMENT NOT NULL;
|
||||
ALTER TABLE `cloud`.`load_balancing_rules` MODIFY `id` bigint unsigned AUTO_INCREMENT NOT NULL;
|
||||
ALTER TABLE `cloud`.`port_forwarding_rules` MODIFY `id` bigint unsigned AUTO_INCREMENT NOT NULL;
|
||||
SET foreign_key_checks = 1;
|
||||
|
||||
DROP VIEW IF EXISTS `cloud`.`disk_offering_view`;
|
||||
CREATE VIEW `cloud`.`disk_offering_view` AS
|
||||
|
||||
@ -55,6 +55,12 @@
|
||||
]
|
||||
}
|
||||
],
|
||||
"vmwaredc": {
|
||||
"username": "",
|
||||
"vcenter": "",
|
||||
"password": "",
|
||||
"name": ""
|
||||
},
|
||||
"ipranges": [
|
||||
{
|
||||
"startip": "192.168.2.2",
|
||||
|
||||
@ -4,6 +4,6 @@
|
||||
rotate 3
|
||||
compress
|
||||
dateext
|
||||
size=+10M
|
||||
size 10M
|
||||
notifempty
|
||||
}
|
||||
|
||||
@ -17,6 +17,7 @@
|
||||
/var/log/cloud.log {
|
||||
rotate 4
|
||||
daily
|
||||
size 10M
|
||||
missingok
|
||||
notifempty
|
||||
compress
|
||||
|
||||
@ -1,5 +1,6 @@
|
||||
/var/log/conntrackd-stats.log {
|
||||
daily
|
||||
size 10M
|
||||
rotate 2
|
||||
missingok
|
||||
compress
|
||||
|
||||
@ -1,5 +1,6 @@
|
||||
/var/log/dnsmasq.log {
|
||||
daily
|
||||
size 10M
|
||||
missingok
|
||||
rotate 5
|
||||
notifempty
|
||||
|
||||
@ -1,5 +1,6 @@
|
||||
/var/log/ppp-connect-errors {
|
||||
daily
|
||||
size 10M
|
||||
rotate 5
|
||||
missingok
|
||||
notifempty
|
||||
|
||||
@ -46,3 +46,16 @@ python /opt/cloud/bin/baremetal-vr.py &
|
||||
|
||||
date > /var/cache/cloud/boot_up_done
|
||||
logger -t cloud "Boot up process done"
|
||||
|
||||
#Restore the persistent iptables nat, rules and filters for IPv4 and IPv6 if they exist
|
||||
ipv4="/etc/iptables/router_rules.v4"
|
||||
if [ -e $ipv4 ]
|
||||
then
|
||||
iptables-restore < $ipv4
|
||||
fi
|
||||
|
||||
ipv6="/etc/iptables/router_rules.v6"
|
||||
if [ -e $ipv6 ]
|
||||
then
|
||||
iptables-restore < $ipv6
|
||||
fi
|
||||
@ -676,6 +676,10 @@ def main(argv):
|
||||
|
||||
mon = CsMonitor("monitorservice", config)
|
||||
mon.process()
|
||||
|
||||
|
||||
#Save iptables configuration - will be loaded on reboot by the iptables-restore that is configured on /etc/rc.local
|
||||
CsHelper.save_iptables("iptables-save", "/etc/iptables/router_rules.v4")
|
||||
CsHelper.save_iptables("ip6tables-save", "/etc/iptables/router_rules.v6")
|
||||
|
||||
if __name__ == "__main__":
|
||||
main(sys.argv)
|
||||
|
||||
@ -16,7 +16,7 @@
|
||||
# specific language governing permissions and limitations
|
||||
# under the License.
|
||||
""" General helper functions
|
||||
for use in the configuation process
|
||||
for use in the configuration process
|
||||
|
||||
"""
|
||||
import subprocess
|
||||
@ -27,7 +27,6 @@ import shutil
|
||||
from netaddr import *
|
||||
from pprint import pprint
|
||||
|
||||
|
||||
def is_mounted(name):
|
||||
for i in execute("mount"):
|
||||
vals = i.lstrip().split()
|
||||
@ -163,6 +162,19 @@ def execute(command):
|
||||
return result.splitlines()
|
||||
|
||||
|
||||
def save_iptables(command, iptables_file):
|
||||
""" Execute command """
|
||||
logging.debug("Saving iptables for %s" % command)
|
||||
|
||||
result = execute(command)
|
||||
fIptables = open(iptables_file, "w+")
|
||||
|
||||
for line in result:
|
||||
fIptables.write(line)
|
||||
fIptables.write("\n")
|
||||
fIptables.close()
|
||||
|
||||
|
||||
def execute2(command):
|
||||
""" Execute command """
|
||||
logging.debug("Executing %s" % command)
|
||||
|
||||
@ -187,23 +187,24 @@ class TestVRServiceFailureAlerting(cloudstackTestCase):
|
||||
self.debug("apache process status: %s" % res)
|
||||
|
||||
configs = Configurations.list(
|
||||
self.apiclient,
|
||||
name='router.alerts.check.interval'
|
||||
)
|
||||
self.apiclient,
|
||||
name='router.alerts.check.interval'
|
||||
)
|
||||
|
||||
# Set the value for one more minute than
|
||||
# actual range to be on safer side
|
||||
waitingPeriod = (
|
||||
int(configs[0].value) + 600) # in seconds
|
||||
int(configs[0].value) + 60) # in seconds
|
||||
|
||||
time.sleep(waitingPeriod)
|
||||
# wait for (router.alerts.check.interval + 10) minutes meanwhile monitor service on
|
||||
# VR starts the apache service (
|
||||
# wait for (router.alerts.check.interval + 10) minutes meanwhile
|
||||
# monitor service on VR starts the apache service (
|
||||
# router.alerts.check.interval default value is
|
||||
# 30minutes)
|
||||
|
||||
qresultset = self.dbclient.execute(
|
||||
"select id from alert where subject = '%s' ORDER BY id DESC LIMIT 1;" %
|
||||
"select id from alert where subject \
|
||||
= '%s' ORDER BY id DESC LIMIT 1;" %
|
||||
str(alertSubject))
|
||||
self.assertNotEqual(
|
||||
len(qresultset),
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@ -40,7 +40,8 @@ from marvin.lib.common import (get_domain,
|
||||
get_template,
|
||||
get_free_vlan,
|
||||
wait_for_cleanup,
|
||||
verifyRouterState)
|
||||
verifyRouterState,
|
||||
verifyGuestTrafficPortGroups)
|
||||
from marvin.sshClient import SshClient
|
||||
from marvin.codes import PASS
|
||||
from ddt import ddt, data
|
||||
@ -61,6 +62,7 @@ class TestSharedNetworks(cloudstackTestCase):
|
||||
# Get Zone, Domain and templates
|
||||
cls.domain = get_domain(cls.api_client)
|
||||
cls.zone = get_zone(cls.api_client, cls.testClient.getZoneForTests())
|
||||
cls.hypervisor = cls.testClient.getHypervisorInfo()
|
||||
cls.template = get_template(
|
||||
cls.api_client,
|
||||
cls.zone.id,
|
||||
@ -3672,3 +3674,78 @@ class TestSharedNetworks(cloudstackTestCase):
|
||||
"Check if disassociated IP Address is no longer available"
|
||||
)
|
||||
return
|
||||
|
||||
@attr(tags=["advanced", "dvs"], required_hardware="true")
|
||||
def test_guest_traffic_port_groups_shared_network(self):
|
||||
""" Verify vcenter port groups are created for shared network
|
||||
|
||||
# Steps,
|
||||
# 1. Create a shared network
|
||||
# 2. Deploy a VM in shared network so that router is
|
||||
# created
|
||||
# 3. Verify that corresponding port groups are created
|
||||
for guest traffic
|
||||
"""
|
||||
|
||||
if self.hypervisor.lower() != "vmware":
|
||||
self.skipTest("This test is intended for only vmware")
|
||||
|
||||
physical_network, shared_vlan = get_free_vlan(
|
||||
self.api_client, self.zone.id)
|
||||
if shared_vlan is None:
|
||||
self.fail("Failed to get free vlan id for shared network")
|
||||
|
||||
self.testdata["shared_network_offering"]["specifyVlan"] = "True"
|
||||
self.testdata["shared_network_offering"]["specifyIpRanges"] = "True"
|
||||
|
||||
# Create Network Offering
|
||||
self.shared_network_offering = NetworkOffering.create(
|
||||
self.api_client,
|
||||
self.testdata["shared_network_offering"],
|
||||
conservemode=False
|
||||
)
|
||||
|
||||
# Update network offering state from disabled to enabled.
|
||||
NetworkOffering.update(
|
||||
self.shared_network_offering,
|
||||
self.api_client,
|
||||
id=self.shared_network_offering.id,
|
||||
state="enabled"
|
||||
)
|
||||
|
||||
# create network using the shared network offering created
|
||||
self.testdata["shared_network"]["acltype"] = "Domain"
|
||||
self.testdata["shared_network"][
|
||||
"networkofferingid"] = self.shared_network_offering.id
|
||||
self.testdata["shared_network"][
|
||||
"physicalnetworkid"] = physical_network.id
|
||||
self.testdata["shared_network"]["vlan"] = shared_vlan
|
||||
|
||||
self.network = Network.create(
|
||||
self.api_client,
|
||||
self.testdata["shared_network"],
|
||||
networkofferingid=self.shared_network_offering.id,
|
||||
zoneid=self.zone.id,
|
||||
)
|
||||
self.cleanup_networks.append(self.network)
|
||||
|
||||
vm = VirtualMachine.create(
|
||||
self.api_client,
|
||||
self.testdata["virtual_machine"],
|
||||
networkids=self.network.id,
|
||||
serviceofferingid=self.service_offering.id
|
||||
)
|
||||
self.cleanup_vms.append(vm)
|
||||
|
||||
routers = Router.list(self.api_client,
|
||||
networkid=self.network.id,
|
||||
listall=True)
|
||||
|
||||
self.assertEqual(validateList(routers)[0], PASS,
|
||||
"No Router associated with the network found")
|
||||
|
||||
response = verifyGuestTrafficPortGroups(self.api_client,
|
||||
self.config,
|
||||
self.zone)
|
||||
self.assertEqual(response[0], PASS, response[1])
|
||||
return
|
||||
|
||||
@ -43,7 +43,8 @@ from marvin.lib.common import (get_domain,
|
||||
wait_for_cleanup,
|
||||
list_virtual_machines,
|
||||
list_hosts,
|
||||
findSuitableHostForMigration)
|
||||
findSuitableHostForMigration,
|
||||
verifyGuestTrafficPortGroups)
|
||||
|
||||
from marvin.codes import PASS, ERROR_NO_HOST_FOR_MIGRATION
|
||||
|
||||
@ -1734,6 +1735,7 @@ class TestVMLifeCycleBothIsolated(cloudstackTestCase):
|
||||
cls.api_client = cls.testClient.getApiClient()
|
||||
|
||||
cls.services = Services().services
|
||||
cls.hypervisor = cls.testClient.getHypervisorInfo()
|
||||
# Get Zone, Domain and templates
|
||||
cls.domain = get_domain(cls.api_client)
|
||||
cls.zone = get_zone(cls.api_client, cls.testClient.getZoneForTests())
|
||||
@ -2060,6 +2062,17 @@ class TestVMLifeCycleBothIsolated(cloudstackTestCase):
|
||||
)
|
||||
return
|
||||
|
||||
@attr(tags=["advanced", "dvs"], required_hardware="true")
|
||||
def test_guest_traffic_port_groups_vpc_network(self):
|
||||
""" Verify port groups are created for guest traffic
|
||||
used by vpc network """
|
||||
|
||||
if self.hypervisor.lower() == "vmware":
|
||||
response = verifyGuestTrafficPortGroups(self.apiclient,
|
||||
self.config,
|
||||
self.zone)
|
||||
assert response[0] == PASS, response[1]
|
||||
|
||||
class TestVMLifeCycleStoppedVPCVR(cloudstackTestCase):
|
||||
|
||||
@classmethod
|
||||
|
||||
@ -17,7 +17,8 @@
|
||||
""" BVT tests for Network Life Cycle
|
||||
"""
|
||||
# Import Local Modules
|
||||
from marvin.codes import FAILED, STATIC_NAT_RULE, LB_RULE, NAT_RULE
|
||||
from marvin.codes import (FAILED, STATIC_NAT_RULE, LB_RULE,
|
||||
NAT_RULE, PASS)
|
||||
from marvin.cloudstackTestCase import cloudstackTestCase
|
||||
from marvin.cloudstackException import CloudstackAPIException
|
||||
from marvin.cloudstackAPI import rebootRouter
|
||||
@ -43,7 +44,8 @@ from marvin.lib.common import (get_domain,
|
||||
list_routers,
|
||||
list_virtual_machines,
|
||||
list_lb_rules,
|
||||
list_configurations)
|
||||
list_configurations,
|
||||
verifyGuestTrafficPortGroups)
|
||||
from nose.plugins.attrib import attr
|
||||
from ddt import ddt, data
|
||||
# Import System modules
|
||||
@ -247,6 +249,7 @@ class TestPortForwarding(cloudstackTestCase):
|
||||
testClient = super(TestPortForwarding, cls).getClsTestClient()
|
||||
cls.apiclient = testClient.getApiClient()
|
||||
cls.services = testClient.getParsedTestDataConfig()
|
||||
cls.hypervisor = testClient.getHypervisorInfo()
|
||||
# Get Zone, Domain and templates
|
||||
cls.domain = get_domain(cls.apiclient)
|
||||
cls.zone = get_zone(cls.apiclient, testClient.getZoneForTests())
|
||||
@ -551,6 +554,17 @@ class TestPortForwarding(cloudstackTestCase):
|
||||
)
|
||||
return
|
||||
|
||||
@attr(tags=["advanced", "dvs"], required_hardware="true")
|
||||
def test_guest_traffic_port_groups_isolated_network(self):
|
||||
""" Verify port groups are created for guest traffic
|
||||
used by isolated network """
|
||||
|
||||
if self.hypervisor.lower() == "vmware":
|
||||
response = verifyGuestTrafficPortGroups(self.apiclient,
|
||||
self.config,
|
||||
self.zone)
|
||||
assert response[0] == PASS, response[1]
|
||||
|
||||
|
||||
class TestRebootRouter(cloudstackTestCase):
|
||||
|
||||
|
||||
@ -236,7 +236,7 @@ class TestAdvancedZoneStoppedVM(cloudstackTestCase):
|
||||
raise Exception("Warning: Exception during cleanup : %s" % e)
|
||||
return
|
||||
|
||||
@attr(tags=["advanced", "basic"], required_hardware="False")
|
||||
@attr(tags=["advanced", "basic"], required_hardware="True")
|
||||
def test_01_pt_deploy_vm_without_startvm(self):
|
||||
""" Positive test for stopped VM test path - T1
|
||||
|
||||
@ -304,7 +304,7 @@ class TestAdvancedZoneStoppedVM(cloudstackTestCase):
|
||||
self.assertTrue(response[0], response[1])
|
||||
return
|
||||
|
||||
@attr(tags=["advanced", "basic"], required_hardware="False")
|
||||
@attr(tags=["advanced", "basic"], required_hardware="True")
|
||||
def test_02_pt_deploy_vm_with_startvm_true(self):
|
||||
""" Positive test for stopped VM test path - T1 variant
|
||||
|
||||
@ -373,7 +373,7 @@ class TestAdvancedZoneStoppedVM(cloudstackTestCase):
|
||||
self.assertTrue(response[0], response[1])
|
||||
return
|
||||
|
||||
@attr(tags=["advanced", "basic"], required_hardware="False")
|
||||
@attr(tags=["advanced", "basic"], required_hardware="false")
|
||||
def test_03_pt_deploy_vm_with_startvm_false(self):
|
||||
""" Positive test for stopped VM test path - T2
|
||||
|
||||
@ -415,7 +415,7 @@ class TestAdvancedZoneStoppedVM(cloudstackTestCase):
|
||||
self.assertTrue(response[0], response[1])
|
||||
return
|
||||
|
||||
@attr(tags=["advanced", "basic"], required_hardware="False")
|
||||
@attr(tags=["advanced", "basic"], required_hardware="false")
|
||||
def test_04_pt_startvm_false_attach_disk(self):
|
||||
""" Positive test for stopped VM test path - T3 and variant, T9
|
||||
|
||||
@ -554,7 +554,7 @@ class TestAdvancedZoneStoppedVM(cloudstackTestCase):
|
||||
)
|
||||
return
|
||||
|
||||
@attr(tags=["advanced", "basic"], required_hardware="False")
|
||||
@attr(tags=["advanced", "basic"], required_hardware="false")
|
||||
def test_05_pt_startvm_false_attach_disk_change_SO(self):
|
||||
""" Positive test for stopped VM test path - T4
|
||||
|
||||
@ -652,7 +652,7 @@ class TestAdvancedZoneStoppedVM(cloudstackTestCase):
|
||||
)
|
||||
return
|
||||
|
||||
@attr(tags=["advanced", "basic"], required_hardware="False")
|
||||
@attr(tags=["advanced", "basic"], required_hardware="True")
|
||||
def test_06_pt_startvm_false_attach_iso(self):
|
||||
""" Positive test for stopped VM test path - T5
|
||||
|
||||
@ -712,7 +712,7 @@ class TestAdvancedZoneStoppedVM(cloudstackTestCase):
|
||||
)
|
||||
return
|
||||
|
||||
@attr(tags=["advanced", "basic"], required_hardware="False")
|
||||
@attr(tags=["advanced", "basic"], required_hardware="True")
|
||||
def test_07_pt_startvm_false_attach_iso_running_vm(self):
|
||||
""" Positive test for stopped VM test path - T5 variant
|
||||
|
||||
@ -780,7 +780,7 @@ class TestAdvancedZoneStoppedVM(cloudstackTestCase):
|
||||
)
|
||||
return
|
||||
|
||||
@attr(tags=["advanced", "basic"], required_hardware="False")
|
||||
@attr(tags=["advanced", "basic"], required_hardware="True")
|
||||
def test_08_pt_startvm_false_password_enabled_template(self):
|
||||
""" Positive test for stopped VM test path - T10
|
||||
|
||||
@ -888,7 +888,7 @@ class TestAdvancedZoneStoppedVM(cloudstackTestCase):
|
||||
)
|
||||
return
|
||||
|
||||
@attr(tags=["advanced", "basic"], required_hardware="False")
|
||||
@attr(tags=["advanced", "basic"], required_hardware="false")
|
||||
def test_09_pt_destroy_stopped_vm(self):
|
||||
""" Positive test for stopped VM test path - T11
|
||||
|
||||
@ -939,7 +939,7 @@ class TestAdvancedZoneStoppedVM(cloudstackTestCase):
|
||||
self.assertEqual(response[0], PASS, response[1])
|
||||
return
|
||||
|
||||
@attr(tags=["advanced", "basic"], required_hardware="False")
|
||||
@attr(tags=["advanced", "basic"], required_hardware="false")
|
||||
def test_10_max_account_limit(self):
|
||||
""" Positive test for stopped VM test path - T12
|
||||
|
||||
|
||||
@ -2791,7 +2791,8 @@ class TestLiveStorageMigration(cloudstackTestCase):
|
||||
raise Exception("Warning: Exception during cleanup : %s" % e)
|
||||
return
|
||||
|
||||
@attr(tags=["advanced", "basic"])
|
||||
@attr(tags=["advanced", "basic"],
|
||||
required_hardware="True")
|
||||
def test_01_migrate_live(self):
|
||||
""" Test migrate Volume (root and data disk)
|
||||
|
||||
|
||||
@ -170,6 +170,7 @@ class TestPathVMLC(cloudstackTestCase):
|
||||
testClient = super(TestPathVMLC, cls).getClsTestClient()
|
||||
cls.apiclient = testClient.getApiClient()
|
||||
cls.testdata = testClient.getParsedTestDataConfig()
|
||||
cls.hypervisor = testClient.getHypervisorInfo()
|
||||
|
||||
# Get Zone, Domain and templates
|
||||
cls.domain = get_domain(cls.apiclient)
|
||||
@ -315,7 +316,7 @@ class TestPathVMLC(cloudstackTestCase):
|
||||
raise Exception("Warning: Exception during cleanup : %s" % e)
|
||||
return
|
||||
|
||||
@attr(tags=["advanced"], required_hardware="False")
|
||||
@attr(tags=["advanced"], required_hardware="false")
|
||||
@data(ISOLATED_NETWORK, VPC_NETWORK)
|
||||
def test_01_positive_tests_vm_operations_advanced_zone(self, value):
|
||||
""" Positive tests for VMLC test path - Advanced Zone
|
||||
@ -336,6 +337,8 @@ class TestPathVMLC(cloudstackTestCase):
|
||||
# 13. Find suitable host for VM to migrate and migrate the VM
|
||||
# 14. Verify VM accessibility on new host
|
||||
"""
|
||||
if self.hypervisor.lower() == 'hyperv' and value == VPC_NETWORK:
|
||||
self.skipTest("cann't be run for {} hypervisor".format(self.hypervisor))
|
||||
|
||||
# List created service offering in setUpClass by name
|
||||
listServiceOfferings = ServiceOffering.list(
|
||||
@ -505,7 +508,7 @@ class TestPathVMLC(cloudstackTestCase):
|
||||
self.fail("Exception while SSHing to VM: %s" % e)
|
||||
return
|
||||
|
||||
@attr(tags=["advanced"], required_hardware="False")
|
||||
@attr(tags=["advanced"], required_hardware="false")
|
||||
def test_01_positive_tests_vm_deploy_shared_nw(self):
|
||||
""" Positive tests for VMLC test path - Advanced Zone in Shared Network
|
||||
|
||||
@ -557,7 +560,7 @@ class TestPathVMLC(cloudstackTestCase):
|
||||
)
|
||||
return
|
||||
|
||||
@attr(tags=["basic"], required_hardware="False")
|
||||
@attr(tags=["basic"], required_hardware="false")
|
||||
def test_01_positive_tests_vm_operations_basic_zone(self):
|
||||
""" Positive tests for VMLC test path - Basic Zone
|
||||
|
||||
@ -719,7 +722,7 @@ class TestPathVMLC(cloudstackTestCase):
|
||||
self.fail("Exception while SSHing to VM: %s" % e)
|
||||
return
|
||||
|
||||
@attr(tags=["advanced"], required_hardware="False")
|
||||
@attr(tags=["advanced"], required_hardware="false")
|
||||
@data(ISOLATED_NETWORK, SHARED_NETWORK, VPC_NETWORK)
|
||||
def test_02_negative_tests_destroy_VM_operations_advanced_zone(
|
||||
self,
|
||||
@ -733,6 +736,8 @@ class TestPathVMLC(cloudstackTestCase):
|
||||
# 4. Try to stop the VM in destroyed state, operation should fail
|
||||
# 5. Try to reboot the VM in destroyed state, operation should fail
|
||||
"""
|
||||
if self.hypervisor.lower() == 'hyperv' and value == VPC_NETWORK:
|
||||
self.skipTest("cann't be run for {} hypervisor".format(self.hypervisor))
|
||||
network = CreateNetwork(self, value)
|
||||
networkid = network.id
|
||||
|
||||
@ -769,7 +774,7 @@ class TestPathVMLC(cloudstackTestCase):
|
||||
|
||||
return
|
||||
|
||||
@attr(tags=["basic"], required_hardware="False")
|
||||
@attr(tags=["basic"], required_hardware="false")
|
||||
def test_02_negative_tests_destroy_VM_operations_basic_zone(self):
|
||||
""" Negative tests for VMLC test path - destroy VM
|
||||
|
||||
@ -812,7 +817,7 @@ class TestPathVMLC(cloudstackTestCase):
|
||||
|
||||
return
|
||||
|
||||
@attr(tags=["advanced"], required_hardware="False")
|
||||
@attr(tags=["advanced"], required_hardware="false")
|
||||
@data(ISOLATED_NETWORK, SHARED_NETWORK, VPC_NETWORK)
|
||||
def test_03_negative_tests_expunge_VM_operations_advanced_zone(
|
||||
self,
|
||||
@ -827,6 +832,9 @@ class TestPathVMLC(cloudstackTestCase):
|
||||
# 6. Try to destroy the VM in expunging state, operation should fail
|
||||
# 7. Try to recover the VM in expunging state, operation should fail
|
||||
"""
|
||||
|
||||
if self.hypervisor.lower() == 'hyperv' and value == VPC_NETWORK:
|
||||
self.skipTest("cann't be run for {} hypervisor".format(self.hypervisor))
|
||||
network = CreateNetwork(self, value)
|
||||
networkid = network.id
|
||||
|
||||
@ -867,7 +875,7 @@ class TestPathVMLC(cloudstackTestCase):
|
||||
|
||||
return
|
||||
|
||||
@attr(tags=["basic"], required_hardware="False")
|
||||
@attr(tags=["basic"], required_hardware="false")
|
||||
def test_03_negative_tests_expunge_VM_operations_basic_zone(self):
|
||||
""" Negative tests for VMLC test path - expunge VM
|
||||
|
||||
|
||||
@ -37,7 +37,8 @@ from marvin.lib.utils import cleanup_resources, validateList
|
||||
from marvin.lib.common import (get_zone,
|
||||
get_domain,
|
||||
get_template,
|
||||
list_virtual_machines)
|
||||
list_virtual_machines,
|
||||
find_storage_pool_type)
|
||||
from nose.plugins.attrib import attr
|
||||
import os
|
||||
import urllib
|
||||
@ -235,7 +236,7 @@ class TestPathVolume(cloudstackTestCase):
|
||||
"advancedsg",
|
||||
"basic",
|
||||
],
|
||||
required_hardware="false")
|
||||
required_hardware="True")
|
||||
def test_01_positive_path(self):
|
||||
"""
|
||||
positive test for volume life cycle
|
||||
@ -819,7 +820,7 @@ class TestPathVolume(cloudstackTestCase):
|
||||
"advancedsg",
|
||||
"basic",
|
||||
],
|
||||
required_hardware="false")
|
||||
required_hardware="True")
|
||||
def test_02_negative_path(self):
|
||||
"""
|
||||
negative test for volume life cycle
|
||||
|
||||
@ -70,38 +70,28 @@ function install_packages() {
|
||||
openjdk-7-jre-headless \
|
||||
iptables-persistent \
|
||||
libtcnative-1 libssl-dev libapr1-dev \
|
||||
open-vm-tools \
|
||||
python-flask \
|
||||
haproxy \
|
||||
radvd \
|
||||
sharutils
|
||||
|
||||
${apt_get} -t wheezy-backports install irqbalance
|
||||
${apt_get} -t wheezy-backports install irqbalance open-vm-tools
|
||||
|
||||
# hold on installed openswan version, upgrade rest of the packages (if any)
|
||||
apt-mark hold openswan
|
||||
apt-get update
|
||||
apt-get -y --force-yes upgrade
|
||||
|
||||
# commented out installation of vmware-tools as we are using the open source open-vm-tools:
|
||||
# ${apt_get} install build-essential linux-headers-`uname -r`
|
||||
# df -h
|
||||
# PREVDIR=$PWD
|
||||
# cd /opt
|
||||
# wget http://people.apache.org/~bhaisaab/cloudstack/VMwareTools-9.2.1-818201.tar.gz
|
||||
# tar xzf VMwareTools-9.2.1-818201.tar.gz
|
||||
# rm VMwareTools-*.tar.gz
|
||||
# cd vmware-tools-distrib
|
||||
# ./vmware-install.pl -d
|
||||
# cd $PREV
|
||||
# rm -fr /opt/vmware-tools-distrib
|
||||
# apt-get -q -y --force-yes purge build-essential
|
||||
|
||||
# Hyperv kvp daemon - 64bit only
|
||||
if [ "${arch}" == "amd64" ]; then
|
||||
# Hyperv kvp daemon - 64bit only
|
||||
# Download the hv kvp daemon
|
||||
wget http://people.apache.org/~rajeshbattala/hv-kvp-daemon_3.1_amd64.deb
|
||||
dpkg -i hv-kvp-daemon_3.1_amd64.deb
|
||||
rm -f hv-kvp-daemon_3.1_amd64.deb
|
||||
# XS tools
|
||||
wget https://raw.githubusercontent.com/bhaisaab/cloudstack-nonoss/master/xe-guest-utilities_6.5.0_amd64.deb
|
||||
dpkg -i xe-guest-utilities_6.5.0_amd64.deb
|
||||
rm -f xe-guest-utilities_6.5.0_amd64.deb
|
||||
fi
|
||||
}
|
||||
|
||||
|
||||
4
tools/devcloud4/.gitignore
vendored
Normal file
4
tools/devcloud4/.gitignore
vendored
Normal file
@ -0,0 +1,4 @@
|
||||
tmp
|
||||
cookbooks
|
||||
*.lock
|
||||
.vagrant
|
||||
101
tools/devcloud4/README.md
Normal file
101
tools/devcloud4/README.md
Normal file
@ -0,0 +1,101 @@
|
||||
# Devcloud 4
|
||||
|
||||
## Introduction
|
||||
|
||||
The follow project aims to simplify getting a full Apache CloudStack environment running on your machine. You can either take the easy ride and run `vagrant up` in either one of the 'binary installation' directories or compile CloudStack yourself. See for instructions in the 'basic' and 'advanced' directories.
|
||||
|
||||
The included VagrantFile will give you:
|
||||
|
||||
- Management
|
||||
- NFS Server
|
||||
- MySQL Server
|
||||
- Router
|
||||
- * Cloudstack Management Server * (Only given in binary installation)
|
||||
|
||||
- XenServer 6.2
|
||||
|
||||
## Getting started
|
||||
|
||||
1. Due to the large amount of data to be pulled from the Internet, it's probably not a good idea to do this over WiFi or Mobile data.
|
||||
|
||||
1. Given the amount of virtual machines this brings up it is recommended you have atleast 8gb of ram before attempting this.
|
||||
|
||||
1. Ensure your system has `git` installed.
|
||||
|
||||
1. When on Windows, make sure you've set the git option `autocrlf` to `false`:
|
||||
|
||||
```
|
||||
git config --global core.autocrlf false
|
||||
```
|
||||
|
||||
1. Clone the repository:
|
||||
|
||||
```
|
||||
git clone https://github.com/imduffy15/devcloud4.git
|
||||
```
|
||||
|
||||
1. Download and Install [VirtualBox](https://www.virtualbox.org/wiki/Downloads)
|
||||
|
||||
On Windows7, the Xenserver VM crashed immediately after booting with a General Protection Fault.
|
||||
Installing VirtualBox version 4.3.6r91406 (https://www.virtualbox.org/wiki/Download_Old_Builds_4_3) fixed the problem, but only downgrade if the latest version does not work for you.
|
||||
|
||||
1. Download and install [Vagrant](https://www.vagrantup.com/downloads.html)
|
||||
|
||||
1. Ensure all Vagrant Plugins are installed:
|
||||
|
||||
```bash
|
||||
vagrant plugin install vagrant-berkshelf vagrant-omnibus
|
||||
```
|
||||
|
||||
1. Download and install [ChefDK](https://downloads.chef.io/chef-dk/)
|
||||
|
||||
### Configure virtualbox
|
||||
|
||||
1. Open virtualbox and navigate to its preferences/settings window.
|
||||
|
||||
1. Click onto the network tab and then onto the host only network tab.
|
||||
|
||||
1. Configure your adapters as follows:
|
||||
|
||||
- On Windows, the adapternames are different, and map as follows:
|
||||
- vboxnet0: VirtualBox Host-Only Ethernet Adapter
|
||||
- vboxnet1: VirtualBox Host-Only Ethernet Adapter 2
|
||||
- vboxnet2: VirtualBox Host-Only Ethernet Adapter 3
|
||||
|
||||
#### For Basic Networking you only need:
|
||||
|
||||
##### vboxnet0
|
||||
- IPv4 IP address of 192.168.22.1
|
||||
- Subnet of 255.255.255.0
|
||||
- DHCP server disabled
|
||||
|
||||
#### For Advanced Networking you will need:
|
||||
|
||||
|
||||
|
||||
##### vboxnet1
|
||||
- IPv4 IP address of 192.168.23.1
|
||||
- Subnet of 255.255.255.0
|
||||
- DHCP server disabled
|
||||
|
||||
|
||||
|
||||
##### vboxnet2
|
||||
- IPv4 IP address of 192.168.24.1
|
||||
- Subnet of 255.255.255.0
|
||||
- DHCP server disabled
|
||||
|
||||
## Defaults
|
||||
|
||||
### Management Server
|
||||
|
||||
- IP: 192.168.22.5
|
||||
- Username: vagrant or root
|
||||
- Password: vagrant
|
||||
|
||||
### Hypervisor
|
||||
|
||||
- IP: 192.168.22.10
|
||||
- Username: root
|
||||
- Password: password
|
||||
|
||||
27
tools/devcloud4/advanced/Berksfile
Normal file
27
tools/devcloud4/advanced/Berksfile
Normal file
@ -0,0 +1,27 @@
|
||||
#
|
||||
# Licensed to the Apache Software Foundation (ASF) under one
|
||||
# or more contributor license agreements. See the NOTICE file
|
||||
# distributed with this work for additional information
|
||||
# regarding copyright ownership. The ASF licenses this file
|
||||
# to you under the Apache License, Version 2.0 (the
|
||||
# "License"); you may not use this file except in compliance
|
||||
# with the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing,
|
||||
# software distributed under the License is distributed on an
|
||||
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
# KIND, either express or implied. See the License for the
|
||||
# specific language governing permissions and limitations
|
||||
# under the License.
|
||||
#
|
||||
|
||||
source "https://api.berkshelf.com"
|
||||
|
||||
cookbook 'hostname'
|
||||
cookbook 'selinux'
|
||||
cookbook 'nat-router', git: 'http://github.com/imduffy15/cookbook_nat-router'
|
||||
cookbook 'cloudstack', git: 'https://github.com/imduffy15/cookbook_cloudstack-1'
|
||||
cookbook 'development-installation', path: '../common/development-installation'
|
||||
cookbook 'python', git: 'https://github.com/imduffy15/python.git'
|
||||
95
tools/devcloud4/advanced/README.md
Normal file
95
tools/devcloud4/advanced/README.md
Normal file
@ -0,0 +1,95 @@
|
||||
### Configure virtualbox
|
||||
|
||||
1. Open virtualbox and navigate to its preferences/settings window.
|
||||
|
||||
1. Click onto the network tab and then onto the host only network tab.
|
||||
|
||||
1. Configure your adapters as follows:
|
||||
|
||||
##### vboxnet0
|
||||
- IPv4 IP address of 192.168.22.1
|
||||
- Subnet of 255.255.255.0
|
||||
- DHCP server disabled
|
||||
|
||||
##### vboxnet1
|
||||
- IPv4 IP address of 192.168.23.1
|
||||
- Subnet of 255.255.255.0
|
||||
- DHCP server disabled
|
||||
|
||||
##### vboxnet2
|
||||
- IPv4 IP address of 192.168.24.1
|
||||
- Subnet of 255.255.255.0
|
||||
- DHCP server disabled
|
||||
|
||||
|
||||
### Start the vagrant boxes
|
||||
|
||||
```bash
|
||||
vagrant up
|
||||
```
|
||||
|
||||
*** Common issues: ***
|
||||
|
||||
- 'Cannot forward the specified ports on this VM': There could be MySQL or some other
|
||||
service running on the host OS causing vagrant to fail setting up local port forwarding.
|
||||
|
||||
|
||||
### Start Cloudstack
|
||||
|
||||
1. Clone the Cloudstack Repository:
|
||||
|
||||
```
|
||||
git clone https://github.com/apache/cloudstack.git
|
||||
```
|
||||
|
||||
*** Note: ***
|
||||
|
||||
Personally I prefer to use the 4.3 codebase rather than master. If you wish to do the same:
|
||||
|
||||
```
|
||||
git reset --hard 0810029
|
||||
```
|
||||
|
||||
1. Download vhd-util:
|
||||
|
||||
```bash
|
||||
cd /path/to/cloudstack/repo
|
||||
wget http://download.cloud.com.s3.amazonaws.com/tools/vhd-util -P scripts/vm/hypervisor/xenserver/
|
||||
chmod +x scripts/vm/hypervisor/xenserver/vhd-util
|
||||
```
|
||||
|
||||
1. Compile Cloudstack:
|
||||
|
||||
```bash
|
||||
cd /path/to/cloudstack/repo
|
||||
mvn -P developer,systemvm clean install -DskipTests=true
|
||||
```
|
||||
|
||||
1. Deploy Cloudstack Database:
|
||||
|
||||
```bash
|
||||
cd /path/to/cloudstack/repo
|
||||
mvn -P developer -pl developer,tools/devcloud4 -Ddeploydb
|
||||
```
|
||||
|
||||
1. Start Cloudstack:
|
||||
|
||||
```bash
|
||||
cd /path/to/cloudstack/repo
|
||||
mvn -pl :cloud-client-ui jetty:run
|
||||
```
|
||||
|
||||
1. Install Marvin:
|
||||
|
||||
```
|
||||
cd /path/to/cloudstack/repo
|
||||
pip install tools/marvin/dist/Marvin-0.1.0.tar.gz
|
||||
```
|
||||
|
||||
1. Deploy:
|
||||
|
||||
```
|
||||
python -m marvin.deployDataCenter -i marvin.cfg
|
||||
```
|
||||
|
||||
|
||||
115
tools/devcloud4/advanced/Vagrantfile
vendored
Normal file
115
tools/devcloud4/advanced/Vagrantfile
vendored
Normal file
@ -0,0 +1,115 @@
|
||||
# -*- mode: ruby -*-
|
||||
# vi: set ft=ruby :
|
||||
#
|
||||
# Licensed to the Apache Software Foundation (ASF) under one
|
||||
# or more contributor license agreements. See the NOTICE file
|
||||
# distributed with this work for additional information
|
||||
# regarding copyright ownership. The ASF licenses this file
|
||||
# to you under the Apache License, Version 2.0 (the
|
||||
# "License"); you may not use this file except in compliance
|
||||
# with the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing,
|
||||
# software distributed under the License is distributed on an
|
||||
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
# KIND, either express or implied. See the License for the
|
||||
# specific language governing permissions and limitations
|
||||
# under the License.
|
||||
#
|
||||
|
||||
VAGRANTFILE_API_VERSION = '2'
|
||||
|
||||
Vagrant.require_version '>= 1.5.0'
|
||||
|
||||
unless Vagrant.has_plugin?('vagrant-berkshelf')
|
||||
raise 'vagrant-berkshelf is not installed!'
|
||||
end
|
||||
|
||||
unless Vagrant.has_plugin?('vagrant-omnibus')
|
||||
raise 'vagrant-omnibus is not installed!'
|
||||
end
|
||||
|
||||
xenserver_networking_script = File.join(File.dirname(__FILE__), '../common/', 'configure-network.sh')
|
||||
|
||||
Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
|
||||
|
||||
config.vm.define 'xenserver' do |xenserver|
|
||||
xenserver.vm.box = 'duffy/xenserver'
|
||||
|
||||
# Public Network (IP address is ignored.)
|
||||
xenserver.vm.network :private_network, :auto_config => false, :ip => '192.168.23.10'
|
||||
|
||||
# Guest Network (IP address is ignored.)
|
||||
xenserver.vm.network :private_network, :auto_config => false, :ip => '192.168.24.10'
|
||||
|
||||
# Configure Interfaces
|
||||
|
||||
## Configure Management Interface
|
||||
xenserver.vm.provision 'shell' do |s|
|
||||
s.path = xenserver_networking_script
|
||||
s.args = %w(eth1 192.168.22.10 255.255.255.0 MGMT)
|
||||
end
|
||||
|
||||
## Configure Public Interface
|
||||
xenserver.vm.provision 'shell' do |s|
|
||||
s.path = xenserver_networking_script
|
||||
s.args = %w(eth2 na na PUBLIC)
|
||||
end
|
||||
|
||||
## Configure Guest Interface
|
||||
xenserver.vm.provision 'shell' do |s|
|
||||
s.path = xenserver_networking_script
|
||||
s.args = %w(eth3 na na GUEST)
|
||||
end
|
||||
|
||||
## Tweak kernel
|
||||
xenserver.vm.provision "shell", inline: "sed -i -e 's/net.bridge.bridge-nf-call-iptables = 1/net.bridge.bridge-nf-call-iptables = 0/g' -e 's/net.bridge.bridge-nf-call-arptables = 1/net.bridge.bridge-nf-call-arptables = 0/g' /etc/sysctl.conf && /sbin/sysctl -p /etc/sysctl.conf"
|
||||
|
||||
## Map host only networks and the adapters
|
||||
xenserver.vm.provider 'virtualbox' do |v|
|
||||
v.customize ['modifyvm', :id, '--nicpromisc2', 'allow-all']
|
||||
v.customize ['modifyvm', :id, '--nicpromisc3', 'allow-all']
|
||||
v.customize ['modifyvm', :id, '--nicpromisc4', 'allow-all']
|
||||
v.customize ['modifyvm', :id, '--hostonlyadapter2', 'vboxnet0']
|
||||
v.customize ['modifyvm', :id, '--hostonlyadapter3', 'vboxnet1']
|
||||
v.customize ['modifyvm', :id, '--hostonlyadapter4', 'vboxnet2']
|
||||
v.customize ["modifyvm", :id, '--nictype2', 'Am79C973']
|
||||
v.customize ["modifyvm", :id, '--nictype3', 'Am79C973']
|
||||
v.customize ["modifyvm", :id, '--nictype4', 'Am79C973']
|
||||
end
|
||||
end
|
||||
|
||||
config.vm.define 'management' do |management|
|
||||
management.vm.box = 'chef/centos-6.5'
|
||||
|
||||
# Configure management interface
|
||||
management.vm.network :private_network, :auto_config => true, :ip => '192.168.22.5'
|
||||
|
||||
# Configure public interface
|
||||
management.vm.network :private_network, :auto_config => true, :ip => '192.168.23.5'
|
||||
|
||||
# Port forward MySQL
|
||||
management.vm.network 'forwarded_port', guest: 3306, host: 3306
|
||||
|
||||
management.vm.provider 'virtualbox' do |v|
|
||||
v.customize ['modifyvm', :id, '--memory', 512]
|
||||
v.customize ['modifyvm', :id, '--hostonlyadapter2', 'vboxnet0']
|
||||
v.customize ['modifyvm', :id, '--hostonlyadapter3', 'vboxnet1']
|
||||
v.customize ["modifyvm", :id, '--nictype2', 'Am79C973']
|
||||
v.customize ["modifyvm", :id, '--nictype3', 'Am79C973']
|
||||
end
|
||||
|
||||
management.omnibus.chef_version = "11.16.4"
|
||||
management.berkshelf.berksfile_path = File.join(File.dirname(__FILE__), 'Berksfile')
|
||||
management.berkshelf.enabled = true
|
||||
|
||||
CHEF_CONFIGURATION = JSON.parse(Pathname(__FILE__).dirname.join('chef_configuration.json').read)
|
||||
|
||||
management.vm.provision 'chef_solo' do |chef|
|
||||
chef.run_list = CHEF_CONFIGURATION.delete('run_list')
|
||||
chef.json = CHEF_CONFIGURATION
|
||||
end
|
||||
end
|
||||
end
|
||||
24
tools/devcloud4/advanced/chef_configuration.json
Normal file
24
tools/devcloud4/advanced/chef_configuration.json
Normal file
@ -0,0 +1,24 @@
|
||||
{
|
||||
"run_list": [
|
||||
"recipe[development-installation]",
|
||||
"recipe[nat-router]"
|
||||
],
|
||||
"iptables": {
|
||||
"lans": ["eth1", "eth2"]
|
||||
},
|
||||
"set_fqdn": "*.localdomain",
|
||||
"selinux": {
|
||||
"state": "permissive"
|
||||
},
|
||||
"cloudstack": {
|
||||
"secondary": {
|
||||
"path": "/exports/secondary"
|
||||
},
|
||||
"primary": {
|
||||
"path": "/exports/primary"
|
||||
},
|
||||
"hypervisor_tpl": {
|
||||
"xenserver": "http://jenkins.buildacloud.org/job/build-systemvm64-master/lastSuccessfulBuild/artifact/tools/appliance/dist/systemvm64template-master-4.6.0-xen.vhd.bz2"
|
||||
}
|
||||
}
|
||||
}
|
||||
124
tools/devcloud4/advanced/marvin.cfg
Normal file
124
tools/devcloud4/advanced/marvin.cfg
Normal file
@ -0,0 +1,124 @@
|
||||
#
|
||||
# Licensed to the Apache Software Foundation (ASF) under one
|
||||
# or more contributor license agreements. See the NOTICE file
|
||||
# distributed with this work for additional information
|
||||
# regarding copyright ownership. The ASF licenses this file
|
||||
# to you under the Apache License, Version 2.0 (the
|
||||
# "License"); you may not use this file except in compliance
|
||||
# with the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing,
|
||||
# software distributed under the License is distributed on an
|
||||
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
# KIND, either express or implied. See the License for the
|
||||
# specific language governing permissions and limitations
|
||||
# under the License.
|
||||
#
|
||||
|
||||
{
|
||||
"zones": [
|
||||
{
|
||||
"name": "DevCloud-Advanced-01",
|
||||
"guestcidraddress": "10.1.1.0/24",
|
||||
"localstorageenabled": true,
|
||||
"dns1": "8.8.8.8",
|
||||
"physical_networks": [
|
||||
{
|
||||
"broadcastdomainrange": "Zone",
|
||||
"vlan": "100-200",
|
||||
"name": "DevCloud-Network-01",
|
||||
"traffictypes": [
|
||||
{
|
||||
"xen": "GUEST",
|
||||
"typ": "Guest"
|
||||
},
|
||||
{
|
||||
"xen": "MGMT",
|
||||
"typ": "Management"
|
||||
},
|
||||
{
|
||||
"xen": "PUBLIC",
|
||||
"typ": "Public"
|
||||
}
|
||||
],
|
||||
"providers": [
|
||||
{
|
||||
"broadcastdomainrange": "ZONE",
|
||||
"name": "VirtualRouter"
|
||||
},
|
||||
{
|
||||
"broadcastdomainrange": "ZONE",
|
||||
"name": "VpcVirtualRouter"
|
||||
},
|
||||
{
|
||||
"broadcastdomainrange": "ZONE",
|
||||
"name": "InternalLbVm"
|
||||
}
|
||||
],
|
||||
"isolationmethods": [
|
||||
"VLAN"
|
||||
]
|
||||
}
|
||||
],
|
||||
"ipranges": [
|
||||
{
|
||||
"startip": "192.168.23.100",
|
||||
"endip": "192.168.23.120",
|
||||
"netmask": "255.255.255.0",
|
||||
"vlan": "untagged",
|
||||
"gateway": "192.168.23.5"
|
||||
}
|
||||
],
|
||||
"networktype": "Advanced",
|
||||
"pods": [
|
||||
{
|
||||
"startip": "192.168.22.100",
|
||||
"endip": "192.168.22.120",
|
||||
"name": "DevCloud-POD-01",
|
||||
"netmask": "255.255.255.0",
|
||||
"clusters": [
|
||||
{
|
||||
"clustername": "DevCloud-CLUSTER-01",
|
||||
"hypervisor": "XenServer",
|
||||
"hosts": [
|
||||
{
|
||||
"username": "root",
|
||||
"url": "http://192.168.22.10/",
|
||||
"password": "password"
|
||||
}
|
||||
],
|
||||
"clustertype": "CloudManaged"
|
||||
}
|
||||
],
|
||||
"gateway": "192.168.22.5"
|
||||
}
|
||||
],
|
||||
"internaldns1": "8.8.8.8",
|
||||
"secondaryStorages": [
|
||||
{
|
||||
"url": "nfs://192.168.22.5/exports/secondary",
|
||||
"provider": "NFS",
|
||||
"details": [ ]
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"logger": {
|
||||
"LogFolderPath": "/tmp/"
|
||||
},
|
||||
"mgtSvr": [
|
||||
{
|
||||
"mgtSvrIp": "192.168.22.1",
|
||||
"port": 8096
|
||||
}
|
||||
],
|
||||
"dbSvr": {
|
||||
"dbSvr": "127.0.0.1",
|
||||
"port": 3306,
|
||||
"user": "cloud",
|
||||
"passwd": "cloud",
|
||||
"db": "cloud"
|
||||
}
|
||||
}
|
||||
27
tools/devcloud4/basic/Berksfile
Normal file
27
tools/devcloud4/basic/Berksfile
Normal file
@ -0,0 +1,27 @@
|
||||
#
|
||||
# Licensed to the Apache Software Foundation (ASF) under one
|
||||
# or more contributor license agreements. See the NOTICE file
|
||||
# distributed with this work for additional information
|
||||
# regarding copyright ownership. The ASF licenses this file
|
||||
# to you under the Apache License, Version 2.0 (the
|
||||
# "License"); you may not use this file except in compliance
|
||||
# with the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing,
|
||||
# software distributed under the License is distributed on an
|
||||
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
# KIND, either express or implied. See the License for the
|
||||
# specific language governing permissions and limitations
|
||||
# under the License.
|
||||
#
|
||||
|
||||
source "https://api.berkshelf.com"
|
||||
|
||||
cookbook 'hostname'
|
||||
cookbook 'selinux'
|
||||
cookbook 'nat-router', git: 'http://github.com/imduffy15/cookbook_nat-router'
|
||||
cookbook 'cloudstack', git: 'https://github.com/imduffy15/cookbook_cloudstack-1'
|
||||
cookbook 'development-installation', path: '../common/development-installation'
|
||||
cookbook 'python', git: 'https://github.com/imduffy15/python.git'
|
||||
83
tools/devcloud4/basic/README.md
Normal file
83
tools/devcloud4/basic/README.md
Normal file
@ -0,0 +1,83 @@
|
||||
### Configure virtualbox
|
||||
|
||||
1. Open virtualbox and navigate to its preferences/settings window.
|
||||
|
||||
1. Click onto the network tab and then onto the host only network tab.
|
||||
|
||||
1. Configure your adapters as follows:
|
||||
|
||||
##### vboxnet0
|
||||
- IPv4 IP address of 192.168.22.1
|
||||
- Subnet of 255.255.255.0
|
||||
- DHCP server disabled
|
||||
|
||||
|
||||
### Start the vagrant boxes
|
||||
|
||||
```bash
|
||||
vagrant up
|
||||
```
|
||||
|
||||
*** Common issues: ***
|
||||
|
||||
- 'Cannot forward the specified ports on this VM': There could be MySQL or some other
|
||||
service running on the host OS causing vagrant to fail setting up local port forwarding.
|
||||
|
||||
|
||||
### Start Cloudstack
|
||||
|
||||
1. Clone the Cloudstack Repository:
|
||||
|
||||
```
|
||||
git clone https://github.com/apache/cloudstack.git
|
||||
```
|
||||
|
||||
*** Note: ***
|
||||
|
||||
Personally I prefer to use the 4.3 codebase rather than master. If you wish to do the same:
|
||||
|
||||
```
|
||||
git reset --hard 0810029
|
||||
```
|
||||
|
||||
1. Download vhd-util:
|
||||
|
||||
```bash
|
||||
cd /path/to/cloudstack/repo
|
||||
wget http://download.cloud.com.s3.amazonaws.com/tools/vhd-util -P scripts/vm/hypervisor/xenserver/
|
||||
chmod +x scripts/vm/hypervisor/xenserver/vhd-util
|
||||
```
|
||||
|
||||
1. Compile Cloudstack:
|
||||
|
||||
```bash
|
||||
cd /path/to/cloudstack/repo
|
||||
mvn -P developer,systemvm clean install -DskipTests=true
|
||||
```
|
||||
|
||||
1. Deploy Cloudstack Database:
|
||||
|
||||
```bash
|
||||
cd /path/to/cloudstack/repo
|
||||
mvn -P developer -pl developer,tools/devcloud4 -Ddeploydb
|
||||
```
|
||||
|
||||
1. Start Cloudstack:
|
||||
|
||||
```bash
|
||||
cd /path/to/cloudstack/repo
|
||||
mvn -pl :cloud-client-ui jetty:run
|
||||
```
|
||||
|
||||
1. Install Marvin:
|
||||
|
||||
```
|
||||
cd /path/to/cloudstack/repo
|
||||
pip install tools/marvin/dist/Marvin-4.6.0-SNAPSHOT.tar.gz --allow-external mysql-connector-python
|
||||
```
|
||||
|
||||
1. Deploying:
|
||||
|
||||
```
|
||||
python -m marvin.deployDataCenter -i marvin.cfg
|
||||
```
|
||||
81
tools/devcloud4/basic/Vagrantfile
vendored
Normal file
81
tools/devcloud4/basic/Vagrantfile
vendored
Normal file
@ -0,0 +1,81 @@
|
||||
# -*- mode: ruby -*-
|
||||
# vi: set ft=ruby :
|
||||
#
|
||||
# Licensed to the Apache Software Foundation (ASF) under one
|
||||
# or more contributor license agreements. See the NOTICE file
|
||||
# distributed with this work for additional information
|
||||
# regarding copyright ownership. The ASF licenses this file
|
||||
# to you under the Apache License, Version 2.0 (the
|
||||
# "License"); you may not use this file except in compliance
|
||||
# with the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing,
|
||||
# software distributed under the License is distributed on an
|
||||
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
# KIND, either express or implied. See the License for the
|
||||
# specific language governing permissions and limitations
|
||||
# under the License.
|
||||
#
|
||||
|
||||
VAGRANTFILE_API_VERSION = '2'
|
||||
|
||||
Vagrant.require_version '>= 1.5.0'
|
||||
|
||||
unless Vagrant.has_plugin?('vagrant-berkshelf')
|
||||
raise 'vagrant-berkshelf is not installed!'
|
||||
end
|
||||
|
||||
unless Vagrant.has_plugin?('vagrant-omnibus')
|
||||
raise 'vagrant-omnibus is not installed!'
|
||||
end
|
||||
|
||||
xenserver_networking_script = File.join(File.dirname(__FILE__), '../common/', 'configure-network.sh')
|
||||
|
||||
Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
|
||||
|
||||
config.vm.define 'xenserver' do |xenserver|
|
||||
xenserver.vm.box = 'duffy/xenserver'
|
||||
|
||||
## Map host only networks and the adapters
|
||||
xenserver.vm.provider 'virtualbox' do |v|
|
||||
v.customize ['modifyvm', :id, '--hostonlyadapter2', 'vboxnet0']
|
||||
v.customize ['modifyvm', :id, '--nicpromisc2', 'allow-all']
|
||||
v.customize ["modifyvm", :id, '--nictype2', 'Am79C973']
|
||||
end
|
||||
|
||||
# Configure Interface
|
||||
## Configure Management Interface
|
||||
xenserver.vm.provision 'shell' do |s|
|
||||
s.path = xenserver_networking_script
|
||||
s.args = %w(eth1 192.168.22.10 255.255.255.0 MGMT)
|
||||
end
|
||||
|
||||
end
|
||||
|
||||
config.vm.define 'management' do |management|
|
||||
management.vm.box = 'chef/centos-6.5'
|
||||
|
||||
management.vm.network :private_network, :auto_config => true, :ip => '192.168.22.5'
|
||||
|
||||
management.vm.network 'forwarded_port', guest: 3306, host: 3306
|
||||
|
||||
management.vm.provider 'virtualbox' do |v|
|
||||
v.customize ['modifyvm', :id, '--memory', 512]
|
||||
v.customize ['modifyvm', :id, '--hostonlyadapter2', 'vboxnet0']
|
||||
v.customize ["modifyvm", :id, '--nictype2', 'Am79C973']
|
||||
end
|
||||
|
||||
management.omnibus.chef_version = "11.16.4"
|
||||
management.berkshelf.berksfile_path = File.join(File.dirname(__FILE__), 'Berksfile')
|
||||
management.berkshelf.enabled = true
|
||||
|
||||
CHEF_CONFIGURATION = JSON.parse(Pathname(__FILE__).dirname.join('chef_configuration.json').read)
|
||||
|
||||
management.vm.provision 'chef_solo' do |chef|
|
||||
chef.run_list = CHEF_CONFIGURATION.delete('run_list')
|
||||
chef.json = CHEF_CONFIGURATION
|
||||
end
|
||||
end
|
||||
end
|
||||
22
tools/devcloud4/basic/chef_configuration.json
Normal file
22
tools/devcloud4/basic/chef_configuration.json
Normal file
@ -0,0 +1,22 @@
|
||||
|
||||
{
|
||||
"run_list": [
|
||||
"recipe[development-installation]",
|
||||
"recipe[nat-router]"
|
||||
],
|
||||
"set_fqdn": "*.localdomain",
|
||||
"selinux": {
|
||||
"state": "permissive"
|
||||
},
|
||||
"cloudstack": {
|
||||
"secondary": {
|
||||
"path": "/exports/secondary"
|
||||
},
|
||||
"primary": {
|
||||
"path": "/exports/primary"
|
||||
},
|
||||
"hypervisor_tpl": {
|
||||
"xenserver": "http://jenkins.buildacloud.org/job/build-systemvm64-master/lastSuccessfulBuild/artifact/tools/appliance/dist/systemvm64template-master-4.6.0-xen.vhd.bz2"
|
||||
}
|
||||
}
|
||||
}
|
||||
110
tools/devcloud4/basic/marvin.cfg
Normal file
110
tools/devcloud4/basic/marvin.cfg
Normal file
@ -0,0 +1,110 @@
|
||||
#
|
||||
# Licensed to the Apache Software Foundation (ASF) under one
|
||||
# or more contributor license agreements. See the NOTICE file
|
||||
# distributed with this work for additional information
|
||||
# regarding copyright ownership. The ASF licenses this file
|
||||
# to you under the Apache License, Version 2.0 (the
|
||||
# "License"); you may not use this file except in compliance
|
||||
# with the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing,
|
||||
# software distributed under the License is distributed on an
|
||||
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
# KIND, either express or implied. See the License for the
|
||||
# specific language governing permissions and limitations
|
||||
# under the License.
|
||||
#
|
||||
|
||||
{
|
||||
"zones": [
|
||||
{
|
||||
"name": "DevCloud-Basic-01",
|
||||
"enabled": "True",
|
||||
"physical_networks": [
|
||||
{
|
||||
"broadcastdomainrange": "Zone",
|
||||
"name": "Devcloud-Network-01",
|
||||
"traffictypes": [
|
||||
{
|
||||
"typ": "Guest"
|
||||
},
|
||||
{
|
||||
"typ": "Management"
|
||||
}
|
||||
],
|
||||
"providers": [
|
||||
{
|
||||
"broadcastdomainrange": "ZONE",
|
||||
"name": "VirtualRouter"
|
||||
},
|
||||
{
|
||||
"broadcastdomainrange": "Pod",
|
||||
"name": "SecurityGroupProvider"
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"dns2": "8.8.4.4",
|
||||
"dns1": "8.8.8.8",
|
||||
"securitygroupenabled": "true",
|
||||
"localstorageenabled": "true",
|
||||
"networktype": "Basic",
|
||||
"pods": [
|
||||
{
|
||||
"endip": "192.168.22.220",
|
||||
"name": "DevCloud-POD-01",
|
||||
"startip": "192.168.22.200",
|
||||
"guestIpRanges": [
|
||||
{
|
||||
"startip": "192.168.22.100",
|
||||
"endip": "192.168.22.199",
|
||||
"netmask": "255.255.255.0",
|
||||
"gateway": "192.168.22.5"
|
||||
}
|
||||
],
|
||||
"netmask": "255.255.255.0",
|
||||
"clusters": [
|
||||
{
|
||||
"clustername": "DevCloud-CLUSTER-01",
|
||||
"hypervisor": "XenServer",
|
||||
"hosts": [
|
||||
{
|
||||
"username": "root",
|
||||
"url": "http://192.168.22.10/",
|
||||
"password": "password"
|
||||
}
|
||||
],
|
||||
"clustertype": "CloudManaged"
|
||||
}
|
||||
],
|
||||
"gateway": "192.168.22.5"
|
||||
}
|
||||
],
|
||||
"internaldns1": "8.8.8.8",
|
||||
"secondaryStorages": [
|
||||
{
|
||||
"url": "nfs://192.168.22.5/exports/secondary",
|
||||
"provider": "NFS"
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"logger": {
|
||||
"LogFolderPath": "/tmp/"
|
||||
},
|
||||
"mgtSvr": [
|
||||
{
|
||||
"mgtSvrIp": "192.168.22.1",
|
||||
"port": 8096
|
||||
}
|
||||
],
|
||||
"dbSvr": {
|
||||
"dbSvr": "127.0.0.1",
|
||||
"port": 3306,
|
||||
"user": "cloud",
|
||||
"passwd": "cloud",
|
||||
"db": "cloud"
|
||||
}
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user