removed code in comments (#11145)

This commit is contained in:
dahn 2025-12-08 16:31:48 +01:00 committed by GitHub
parent b0d74fe00c
commit c81295439f
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
112 changed files with 52 additions and 901 deletions

View File

@ -1322,7 +1322,6 @@ public class Agent implements HandlerFactory, IAgentControl, AgentStatusUpdater
processResponse((Response)request, task.getLink());
} else {
//put the requests from mgt server into another thread pool, as the request may take a longer time to finish. Don't block the NIO main thread pool
//processRequest(request, task.getLink());
requestHandler.submit(new AgentRequestHandler(getType(), getLink(), request));
}
} catch (final ClassNotFoundException e) {

View File

@ -36,5 +36,4 @@ public interface HostStats {
public HostStats getHostStats();
public double getLoadAverage();
// public double getXapiMemoryUsageKBs();
}

View File

@ -108,8 +108,7 @@ public class LbStickinessMethod {
}
public void addParam(String name, Boolean required, String description, Boolean isFlag) {
/* FIXME : UI is breaking if the capability string length is larger , temporarily description is commented out */
// LbStickinessMethodParam param = new LbStickinessMethodParam(name, required, description);
/* is this still a valid comment: FIXME : UI is breaking if the capability string length is larger , temporarily description is commented out */
LbStickinessMethodParam param = new LbStickinessMethodParam(name, required, " ", isFlag);
_paramList.add(param);
return;
@ -133,7 +132,6 @@ public class LbStickinessMethod {
public void setDescription(String description) {
/* FIXME : UI is breaking if the capability string length is larger , temporarily description is commented out */
//this.description = description;
this._description = " ";
}
}

View File

@ -1,4 +1,4 @@
// Licensedname = "listIsoPermissions", to the Apache Software Foundation (ASF) under one
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file

View File

@ -1,4 +1,4 @@
// Licensedname = "listTemplatePermissions", to the Apache Software Foundation (ASF) under one
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file

View File

@ -1,4 +1,4 @@
// Licensedname = "listIsoPermissions", to the Apache Software Foundation (ASF) under one
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file

View File

@ -1,4 +1,4 @@
// Licensedname = "listTemplatePermissions", to the Apache Software Foundation (ASF) under one
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file

View File

@ -27,8 +27,6 @@ import org.apache.cloudstack.api.EntityReference;
import org.apache.cloudstack.network.tls.SslCert;
import com.cloud.serializer.Param;
//import org.apache.cloudstack.api.EntityReference;
@EntityReference(value = SslCert.class)
public class SslCertResponse extends BaseResponse {

View File

@ -78,10 +78,6 @@ public class ScaleVMCmdTest extends TestCase {
scaleVMCmd._responseGenerator = responseGenerator;
UserVmResponse userVmResponse = Mockito.mock(UserVmResponse.class);
//List<UserVmResponse> list = Mockito.mock(UserVmResponse.class);
//list.add(userVmResponse);
//LinkedList<UserVmResponse> mockedList = Mockito.mock(LinkedList.class);
//Mockito.when(mockedList.get(0)).thenReturn(userVmResponse);
List<UserVmResponse> list = new LinkedList<UserVmResponse>();
list.add(userVmResponse);

View File

@ -629,9 +629,6 @@ public class HAProxyConfigurator implements LoadBalancerConfigurator {
}
}
result.addAll(gSection);
// TODO decide under what circumstances these options are needed
// result.add("\tnokqueue");
// result.add("\tnopoll");
result.add(blankLine);
final List<String> dSection = Arrays.asList(defaultsSection);

View File

@ -417,8 +417,6 @@ public class VirtualRoutingResourceTest implements VirtualRouterDeployer {
// FIXME Check the json content
assertEquals(VRScripts.UPDATE_CONFIG, script);
assertEquals(VRScripts.NETWORK_ACL_CONFIG, args);
// assertEquals(args, " -d eth3 -M 01:23:45:67:89:AB -i 192.168.1.1 -m 24 -a Egress:ALL:0:0:192.168.0.1/24-192.168.0.2/24:ACCEPT:," +
// "Ingress:ICMP:0:0:192.168.0.1/24-192.168.0.2/24:DROP:,Ingress:TCP:20:80:192.168.0.1/24-192.168.0.2/24:ACCEPT:,");
break;
case 2:
assertEquals(VRScripts.UPDATE_CONFIG, script);
@ -464,8 +462,6 @@ public class VirtualRoutingResourceTest implements VirtualRouterDeployer {
private void verifyArgs(final SetupGuestNetworkCommand cmd, final String script, final String args) {
// TODO Check the contents of the json file
//assertEquals(script, VRScripts.VPC_GUEST_NETWORK);
//assertEquals(args, " -C -M 01:23:45:67:89:AB -d eth4 -i 10.1.1.2 -g 10.1.1.1 -m 24 -n 10.1.1.0 -s 8.8.8.8,8.8.4.4 -e cloud.test");
}
@Test

View File

@ -61,7 +61,6 @@ public class VmWorkSerializer {
// use java binary serialization instead
//
return JobSerializerHelper.toObjectSerializedString(work);
// return s_gson.toJson(work);
}
public static <T extends VmWork> T deserialize(Class<?> clazz, String workInJsonText) {
@ -69,6 +68,5 @@ public class VmWorkSerializer {
// use java binary serialization instead
//
return (T)JobSerializerHelper.fromObjectSerializedString(workInJsonText);
// return (T)s_gson.fromJson(workInJsonText, clazz);
}
}

View File

@ -1652,7 +1652,6 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl
final String reason = shutdown.getReason();
logger.info("Host {} has informed us that it is shutting down with reason {} and detail {}", attache, reason, shutdown.getDetail());
if (reason.equals(ShutdownCommand.Update)) {
// disconnectWithoutInvestigation(attache, Event.UpdateNeeded);
throw new CloudRuntimeException("Agent update not implemented");
} else if (reason.equals(ShutdownCommand.Requested)) {
disconnectWithoutInvestigation(attache, Event.ShutdownRequested);
@ -1753,7 +1752,6 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl
}
} catch (final UnsupportedVersionException e) {
logger.warn(e.getMessage());
// upgradeAgent(task.getLink(), data, e.getReason());
} catch (final ClassNotFoundException e) {
final String message = String.format("Exception occurred when executing tasks! Error '%s'", e.getMessage());
logger.error(message);

View File

@ -965,7 +965,6 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust
synchronized (_agentToTransferIds) {
if (!_agentToTransferIds.isEmpty()) {
logger.debug("Found {} agents to transfer", _agentToTransferIds.size());
// for (Long hostId : _agentToTransferIds) {
for (final Iterator<Long> iterator = _agentToTransferIds.iterator(); iterator.hasNext(); ) {
final Long hostId = iterator.next();
final AgentAttache attache = findAttache(hostId);

View File

@ -213,7 +213,6 @@ public class EngineHostDaoImpl extends GenericDaoBase<EngineHostVO, Long> implem
SequenceSearch = createSearchBuilder();
SequenceSearch.and("id", SequenceSearch.entity().getId(), SearchCriteria.Op.EQ);
// SequenceSearch.addRetrieve("sequence", SequenceSearch.entity().getSequence());
SequenceSearch.done();
DirectlyConnectedSearch = createSearchBuilder();

View File

@ -1583,12 +1583,8 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
vm.addDisk(disk);
}
//if (vm.getType() == VirtualMachine.Type.User && vm.getTemplate().getFormat() == ImageFormat.ISO) {
if (vm.getType() == VirtualMachine.Type.User) {
_tmpltMgr.prepareIsoForVmProfile(vm, dest);
//DataTO dataTO = tmplFactory.getTemplate(vm.getTemplate().getId(), DataStoreRole.Image, vm.getVirtualMachine().getDataCenterId()).getTO();
//DiskTO iso = new DiskTO(dataTO, 3L, null, Volume.Type.ISO);
//vm.addDisk(iso);
}
}

View File

@ -140,20 +140,12 @@ public class ProvisioningServiceImpl implements ProvisioningService {
@Override
public List<PodEntity> listPods() {
/*
* Not in use now, just commented out.
*/
//List<PodEntity> pods = new ArrayList<PodEntity>();
//pods.add(new PodEntityImpl("pod-uuid-1", "pod1"));
//pods.add(new PodEntityImpl("pod-uuid-2", "pod2"));
return null;
}
@Override
public List<ZoneEntity> listZones() {
List<ZoneEntity> zones = new ArrayList<ZoneEntity>();
//zones.add(new ZoneEntityImpl("zone-uuid-1"));
//zones.add(new ZoneEntityImpl("zone-uuid-2"));
return zones;
}

View File

@ -36,7 +36,6 @@ public class ClusterVSMMapDaoImpl extends GenericDaoBase<ClusterVSMMapVO, Long>
final SearchBuilder<ClusterVSMMapVO> VsmSearch;
public ClusterVSMMapDaoImpl() {
//super();
ClusterSearch = createSearchBuilder();
ClusterSearch.and("clusterId", ClusterSearch.entity().getClusterId(), SearchCriteria.Op.EQ);
@ -82,8 +81,6 @@ public class ClusterVSMMapDaoImpl extends GenericDaoBase<ClusterVSMMapVO, Long>
TransactionLegacy txn = TransactionLegacy.currentTxn();
txn.start();
ClusterVSMMapVO cluster = createForUpdate();
//cluster.setClusterId(null);
//cluster.setVsmId(null);
update(id, cluster);

View File

@ -76,7 +76,6 @@ public class VmRulesetLogDaoImpl extends GenericDaoBase<VmRulesetLogVO, Long> im
@Override
public int createOrUpdate(Set<Long> workItems) {
//return createOrUpdateUsingBatch(workItems);
return createOrUpdateUsingMultiInsert(workItems);
}

View File

@ -100,7 +100,6 @@ public class VMTemplateDaoImpl extends GenericDaoBase<VMTemplateVO, Long> implem
private SearchBuilder<VMTemplateVO> PublicIsoSearch;
private SearchBuilder<VMTemplateVO> UserIsoSearch;
private GenericSearchBuilder<VMTemplateVO, Long> CountTemplatesByAccount;
// private SearchBuilder<VMTemplateVO> updateStateSearch;
private SearchBuilder<VMTemplateVO> AllFieldsSearch;
protected SearchBuilder<VMTemplateVO> ParentTemplateIdSearch;
private SearchBuilder<VMTemplateVO> InactiveUnremovedTmpltSearch;
@ -404,12 +403,6 @@ public class VMTemplateDaoImpl extends GenericDaoBase<VMTemplateVO, Long> implem
CountTemplatesByAccount.and("state", CountTemplatesByAccount.entity().getState(), SearchCriteria.Op.EQ);
CountTemplatesByAccount.done();
// updateStateSearch = this.createSearchBuilder();
// updateStateSearch.and("id", updateStateSearch.entity().getId(), Op.EQ);
// updateStateSearch.and("state", updateStateSearch.entity().getState(), Op.EQ);
// updateStateSearch.and("updatedCount", updateStateSearch.entity().getUpdatedCount(), Op.EQ);
// updateStateSearch.done();
AllFieldsSearch = createSearchBuilder();
AllFieldsSearch.and("state", AllFieldsSearch.entity().getState(), SearchCriteria.Op.EQ);
AllFieldsSearch.and("accountId", AllFieldsSearch.entity().getAccountId(), SearchCriteria.Op.EQ);

View File

@ -77,8 +77,6 @@ public class Upgrade2214to30 extends Upgrade30xBase {
encryptData(conn);
// drop keys
dropKeysIfExist(conn);
//update template ID for system Vms
//updateSystemVms(conn); This is not required as system template update is handled during 4.2 upgrade
// update domain network ref
updateDomainNetworkRef(conn);
// update networks that use redundant routers to the new network offering

View File

@ -62,7 +62,6 @@ public class Upgrade302to40 extends Upgrade30xBase {
@Override
public void performDataMigration(Connection conn) {
//updateVmWareSystemVms(conn); This is not required as system template update is handled during 4.2 upgrade
correctVRProviders(conn);
correctMultiplePhysicaNetworkSetups(conn);
addHostDetailsUniqueKey(conn);

View File

@ -65,7 +65,6 @@ public class Upgrade304to305 extends Upgrade30xBase {
addVpcProvider(conn);
updateRouterNetworkRef(conn);
fixZoneUsingExternalDevices(conn);
// updateSystemVms(conn);
fixForeignKeys(conn);
encryptClusterDetails(conn);
}
@ -81,54 +80,6 @@ public class Upgrade304to305 extends Upgrade30xBase {
return new InputStream[] {script};
}
private void updateSystemVms(Connection conn) {
PreparedStatement pstmt = null;
ResultSet rs = null;
boolean VMware = false;
try {
pstmt = conn.prepareStatement("select distinct(hypervisor_type) from `cloud`.`cluster` where removed is null");
rs = pstmt.executeQuery();
while (rs.next()) {
if ("VMware".equals(rs.getString(1))) {
VMware = true;
}
}
} catch (SQLException e) {
throw new CloudRuntimeException("Error while iterating through list of hypervisors in use", e);
}
// Just update the VMware system template. Other hypervisor templates are unchanged from previous 3.0.x versions.
logger.debug("Updating VMware System Vms");
try {
//Get 3.0.5 VMware system Vm template Id
pstmt = conn.prepareStatement("select id from `cloud`.`vm_template` where name = 'systemvm-vmware-3.0.5' and removed is null");
rs = pstmt.executeQuery();
if (rs.next()) {
long templateId = rs.getLong(1);
rs.close();
pstmt.close();
// change template type to SYSTEM
pstmt = conn.prepareStatement("update `cloud`.`vm_template` set type='SYSTEM' where id = ?");
pstmt.setLong(1, templateId);
pstmt.executeUpdate();
pstmt.close();
// update template ID of system Vms
pstmt = conn.prepareStatement("update `cloud`.`vm_instance` set vm_template_id = ? where type <> 'User' and hypervisor_type = 'VMware'");
pstmt.setLong(1, templateId);
pstmt.executeUpdate();
pstmt.close();
} else {
if (VMware) {
throw new CloudRuntimeException("3.0.5 VMware SystemVm template not found. Cannot upgrade system Vms");
} else {
logger.warn("3.0.5 VMware SystemVm template not found. VMware hypervisor is not used, so not failing upgrade");
}
}
} catch (SQLException e) {
throw new CloudRuntimeException("Error while updating VMware systemVm template", e);
}
logger.debug("Updating System Vm Template IDs Complete");
}
private void addVpcProvider(Connection conn) {
//Encrypt config params and change category to Hidden
logger.debug("Adding vpc provider to all physical networks in the system");

View File

@ -226,10 +226,6 @@ public class UserAccountVO implements UserAccount, InternalIdentity {
return created;
}
// public void setCreated(Date created) {
// this.created = created;
// }
@Override
public Date getRemoved() {
return removed;

View File

@ -209,10 +209,8 @@ public class VolumeDataStoreVO implements StateObject<ObjectInDataStoreStateMach
public VolumeDataStoreVO(long hostId, long volumeId, Date lastUpdated, int downloadPercent, Status downloadState, String localDownloadPath, String errorString,
String jobId, String installPath, String downloadUrl, String checksum) {
// super();
dataStoreId = hostId;
this.volumeId = volumeId;
// this.zoneId = zoneId;
this.lastUpdated = lastUpdated;
this.downloadPercent = downloadPercent;
this.downloadState = downloadState;

View File

@ -62,7 +62,6 @@ public class StorageCacheReplacementAlgorithmLRU implements StorageCacheReplacem
/* Avoid using configDao at this time, we can't be sure that the database is already upgraded
* and there might be fatal errors when using a dao.
*/
//unusedTimeInterval = NumbersUtil.parseInt(configDao.getValue(Config.StorageCacheReplacementLRUTimeInterval.key()), 30);
}
public void setUnusedTimeInterval(Integer interval) {

View File

@ -87,8 +87,6 @@ import com.cloud.utils.component.ComponentContext;
@ContextConfiguration(locations = {"classpath:/storageContext.xml"})
public class VolumeServiceTest extends CloudStackTestNGBase {
// @Inject
// ImageDataStoreProviderManager imageProviderMgr;
@Inject
TemplateService imageService;
@Inject
@ -232,23 +230,7 @@ public class VolumeServiceTest extends CloudStackTestNGBase {
DataStore store = createImageStore();
VMTemplateVO image = createImageData();
TemplateInfo template = imageDataFactory.getTemplate(image.getId(), store);
// AsyncCallFuture<TemplateApiResult> future =
// imageService.createTemplateAsync(template, store);
// future.get();
template = imageDataFactory.getTemplate(image.getId(), store);
/*
* imageProviderMgr.configure("image Provider", new HashMap<String,
* Object>()); VMTemplateVO image = createImageData();
* ImageDataStoreProvider defaultProvider =
* imageProviderMgr.getProvider("DefaultProvider");
* ImageDataStoreLifeCycle lifeCycle =
* defaultProvider.getLifeCycle(); ImageDataStore store =
* lifeCycle.registerDataStore("defaultHttpStore", new
* HashMap<String, String>());
* imageService.registerTemplate(image.getId(),
* store.getImageDataStoreId()); TemplateEntity te =
* imageService.getTemplateEntity(image.getId()); return te;
*/
return template;
} catch (Exception e) {
Assert.fail("failed", e);
@ -333,30 +315,6 @@ public class VolumeServiceTest extends CloudStackTestNGBase {
ClusterScope scope = new ClusterScope(clusterId, podId, dcId);
lifeCycle.attachCluster(store, scope);
/*
* PrimaryDataStoreProvider provider =
* primaryDataStoreProviderMgr.getDataStoreProvider
* ("sample primary data store provider");
* primaryDataStoreProviderMgr.configure("primary data store mgr",
* new HashMap<String, Object>());
*
* List<PrimaryDataStoreVO> ds =
* primaryStoreDao.findPoolByName(this.primaryName); if (ds.size()
* >= 1) { PrimaryDataStoreVO store = ds.get(0); if
* (store.getRemoved() == null) { return
* provider.getDataStore(store.getId()); } }
*
*
* Map<String, String> params = new HashMap<String, String>();
* params.put("url", this.getPrimaryStorageUrl());
* params.put("dcId", dcId.toString()); params.put("clusterId",
* clusterId.toString()); params.put("name", this.primaryName);
* PrimaryDataStoreInfo primaryDataStoreInfo =
* provider.registerDataStore(params); PrimaryDataStoreLifeCycle lc
* = primaryDataStoreInfo.getLifeCycle(); ClusterScope scope = new
* ClusterScope(clusterId, podId, dcId); lc.attachCluster(scope);
* return primaryDataStoreInfo;
*/
return store;
} catch (Exception e) {
return null;
@ -376,7 +334,6 @@ public class VolumeServiceTest extends CloudStackTestNGBase {
TemplateInfo te = createTemplate();
VolumeVO volume = createVolume(te.getId(), primaryStore.getId());
VolumeInfo vol = volumeFactory.getVolume(volume.getId(), primaryStore);
// ve.createVolumeFromTemplate(primaryStore.getId(), new VHD(), te);
AsyncCallFuture<VolumeApiResult> future = volumeService.createVolumeFromTemplateAsync(vol, primaryStore.getId(), te);
try {
future.get();

View File

@ -237,7 +237,6 @@ public class VMSnapshotStrategyKVMTest extends TestCase{
when(vol.getDataStore()).thenReturn(dataStore);
when(snapshotVO.getId()).thenReturn(1L);
when(_snapshotService.revertSnapshot(snapshotVO.getId())).thenReturn(snap);
// testFindSnapshotByName(name);
vmStrategy.revertDiskSnapshot(vmSnapshot);
}

View File

@ -722,7 +722,6 @@ public class VolumeServiceImpl implements VolumeService {
VolumeApiResult res = new VolumeApiResult(volumeInfo);
if (result.isSuccess()) {
// volumeInfo.processEvent(Event.OperationSuccessed, result.getAnswer());
VolumeVO volume = volDao.findById(volumeInfo.getId());
CopyCmdAnswer answer = (CopyCmdAnswer)result.getAnswer();

View File

@ -27,22 +27,23 @@ import org.apache.logging.log4j.LogManager;
import com.cloud.utils.Profiler;
//
// Wrapper class for global database lock to reduce contention for database connections from within process
//
// Example of using dynamic named locks
//
// GlobalLock lock = GlobalLock.getInternLock("some table name" + rowId);
//
// if(lock.lock()) {
// try {
// do something
// } finally {
// lock.unlock();
// }
// }
// lock.releaseRef();
//
/**
* Wrapper class for global database lock to reduce contention for database connections from within process
* This class is used to acquire a global lock for a specific operation, identified by a unique name.
* Example of using dynamic named locks
* <p>
* GlobalLock lock = GlobalLock.getInternLock("some table name" + rowId);
*
* if(lock.lock()) {
* try {
* do something
* } finally {
* lock.unlock();
* }
* }
* lock.releaseRef();
* </p>
*/
public class GlobalLock {
protected Logger logger = LogManager.getLogger(getClass());

View File

@ -41,9 +41,6 @@ public class TransactionContextBuilderTest {
@Test
public void test() {
// _derived.DbAnnotatedMethod();
// _base.MethodWithClassDbAnnotated();
// test @DB injection on dynamically constructed objects
DbAnnotatedBase base = ComponentContext.inject(new DbAnnotatedBase());
base.MethodWithClassDbAnnotated();

View File

@ -20,6 +20,8 @@ package org.apache.cloudstack.framework.codestyle;
import java.util.concurrent.ExecutionException;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
@ -36,6 +38,7 @@ import org.apache.cloudstack.framework.async.AsyncRpcContext;
@RunWith(SpringJUnit4ClassRunner.class)
@ContextConfiguration(locations = "classpath:/SampleManagementServerAppContext.xml")
public class AsyncSampleEventDrivenStyleCaller {
protected Logger logger = LogManager.getLogger(getClass());
private AsyncSampleCallee _ds;
AsyncCallbackDriver _callbackDriver;
@ -53,12 +56,8 @@ public class AsyncSampleEventDrivenStyleCaller {
try {
String result = future.get();
Assert.assertEquals(result, vol);
} catch (InterruptedException e) {
// TODO Auto-generated catch block
e.printStackTrace();
} catch (ExecutionException e) {
// TODO Auto-generated catch block
e.printStackTrace();
} catch (InterruptedException | ExecutionException e) {
logger.info(e);
}
}
@ -87,10 +86,8 @@ public class AsyncSampleEventDrivenStyleCaller {
if (!this.finished) {
try {
this.wait();
} catch (InterruptedException e) {
// TODO Auto-generated catch block
e.printStackTrace();
logger.info(e);
}
}
return this.result;

View File

@ -44,8 +44,6 @@ public class AsyncJobJoinMapDaoImpl extends GenericDaoBase<AsyncJobJoinMapVO, Lo
private final SearchBuilder<AsyncJobJoinMapVO> CompleteJoinSearch;
private final SearchBuilder<AsyncJobJoinMapVO> WakeupSearch;
// private final GenericSearchBuilder<AsyncJobJoinMapVO, Long> JoinJobSearch;
protected AsyncJobJoinMapDaoImpl() {
RecordSearch = createSearchBuilder();
RecordSearch.and("jobId", RecordSearch.entity().getJobId(), Op.EQ);
@ -65,10 +63,6 @@ public class AsyncJobJoinMapDaoImpl extends GenericDaoBase<AsyncJobJoinMapVO, Lo
WakeupSearch.and("expiration", WakeupSearch.entity().getExpiration(), Op.GT);
WakeupSearch.and("joinStatus", WakeupSearch.entity().getJoinStatus(), Op.EQ);
WakeupSearch.done();
// JoinJobSearch = createSearchBuilder(Long.class);
// JoinJobSearch.and(JoinJobSearch.entity().getJoinJobId(), Op.SC, "joinJobId");
// JoinJobSearch.done();
}
@Override
@ -148,64 +142,6 @@ public class AsyncJobJoinMapDaoImpl extends GenericDaoBase<AsyncJobJoinMapVO, Lo
update(ub, sc, null);
}
// @Override
// public List<Long> wakeupScan() {
// List<Long> standaloneList = new ArrayList<Long>();
//
// Date cutDate = DateUtil.currentGMTTime();
//
// TransactionLegacy txn = TransactionLegacy.currentTxn();
// PreparedStatement pstmt = null;
// try {
// txn.start();
//
// //
// // performance sensitive processing, do it in plain SQL
// //
// String sql = "UPDATE async_job SET job_pending_signals=? WHERE id IN " +
// "(SELECT job_id FROM async_job_join_map WHERE next_wakeup < ? AND expiration > ?)";
// pstmt = txn.prepareStatement(sql);
// pstmt.setInt(1, AsyncJob.Constants.SIGNAL_MASK_WAKEUP);
// pstmt.setString(2, DateUtil.getDateDisplayString(TimeZone.getTimeZone("GMT"), cutDate));
// pstmt.setString(3, DateUtil.getDateDisplayString(TimeZone.getTimeZone("GMT"), cutDate));
// pstmt.executeUpdate();
// pstmt.close();
//
// sql = "UPDATE sync_queue_item SET queue_proc_msid=NULL, queue_proc_number=NULL WHERE content_id IN " +
// "(SELECT job_id FROM async_job_join_map WHERE next_wakeup < ? AND expiration > ?)";
// pstmt = txn.prepareStatement(sql);
// pstmt.setString(1, DateUtil.getDateDisplayString(TimeZone.getTimeZone("GMT"), cutDate));
// pstmt.setString(2, DateUtil.getDateDisplayString(TimeZone.getTimeZone("GMT"), cutDate));
// pstmt.executeUpdate();
// pstmt.close();
//
// sql = "SELECT job_id FROM async_job_join_map WHERE next_wakeup < ? AND expiration > ? AND job_id NOT IN (SELECT content_id FROM sync_queue_item)";
// pstmt = txn.prepareStatement(sql);
// pstmt.setString(1, DateUtil.getDateDisplayString(TimeZone.getTimeZone("GMT"), cutDate));
// pstmt.setString(2, DateUtil.getDateDisplayString(TimeZone.getTimeZone("GMT"), cutDate));
// ResultSet rs = pstmt.executeQuery();
// while(rs.next()) {
// standaloneList.add(rs.getLong(1));
// }
// rs.close();
// pstmt.close();
//
// // update for next wake-up
// sql = "UPDATE async_job_join_map SET next_wakeup=DATE_ADD(next_wakeup, INTERVAL wakeup_interval SECOND) WHERE next_wakeup < ? AND expiration > ?";
// pstmt = txn.prepareStatement(sql);
// pstmt.setString(1, DateUtil.getDateDisplayString(TimeZone.getTimeZone("GMT"), cutDate));
// pstmt.setString(2, DateUtil.getDateDisplayString(TimeZone.getTimeZone("GMT"), cutDate));
// pstmt.executeUpdate();
// pstmt.close();
//
// txn.commit();
// } catch (SQLException e) {
// logger.error("Unexpected exception", e);
// }
//
// return standaloneList;
// }
@Override
public List<Long> findJobsToWake(long joinedJobId) {
// TODO: We should fix this. We shouldn't be crossing daos in a dao code.

View File

@ -193,7 +193,6 @@ public class JobSerializerHelper {
json.add("class", new JsonPrimitive(th.getClass().getName()));
json.add("cause", s_gson.toJsonTree(th.getCause()));
json.add("msg", new JsonPrimitive(th.getMessage()));
// json.add("stack", s_gson.toJsonTree(th.getStackTrace()));
return json;
}

View File

@ -189,7 +189,6 @@ public abstract class APITest {
* @return login response string
*/
protected void login(String username, String password) {
//String md5Psw = createMD5String(password);
// send login request
HashMap<String, String> params = new HashMap<String, String>();
params.put("response", "json");

View File

@ -149,7 +149,6 @@ public class BaremetalDhcpManagerImpl extends ManagerBase implements BaremetalDh
String errMsg =
String.format("Set dhcp entry on external DHCP %1$s failed(ip=%2$s, mac=%3$s, vmname=%4$s)", h.getPrivateIpAddress(), nic.getIPv4Address(),
nic.getMacAddress(), profile.getVirtualMachine().getHostName());
// prepareBareMetalDhcpEntry(nic, dhcpCommand);
try {
Answer ans = _agentMgr.send(h.getId(), dhcpCommand);
if (ans.getResult()) {

View File

@ -23,37 +23,6 @@ import java.util.Map;
public class Test {
public static void main(final String[] args) {
try {
/*Connection c = new Connection("192.168.105.155", "oracle", "password");
Utils util = new UtilsImpl(c);
Storage storage = new StorageImpl(c);
String[] res = util.listDir("/etc", 1);
for (String s : res) {
System.out.println(s);
}
Pool pool = new PoolImpl(c);
//pool.registerServer("192.168.105.155", Pool.ServerType.SITE);
//pool.registerServer("192.168.105.155", Pool.ServerType.UTILITY);
//pool.registerServer("192.168.105.155", Pool.ServerType.XEN);
System.out.println("Is:" + pool.isServerRegistered());
System.out.println(pool.getServerConfig());
System.out.println(pool.getServerXmInfo());
System.out.println(pool.getHostInfo());
System.out.println(pool.getAgentVersion());
String[] srs = storage.listSr();
for (int i=0; i<srs.length; i++) {
System.out.println(srs[i]);
}
String spuuid = storage.createSp(StorageType.OVSSPNFS, "192.168.110.232:/export/frank/nfs");
System.out.println(spuuid);
String sruuid = storage.createSr(spuuid, "hi");
System.out.println(sruuid);
storage.initSr();
Pair<Long, Long> spaceInfo = storage.getSrSpaceInfo("192.168.110.232:/export/frank/nfs");
System.out.println("Total:" + spaceInfo.first());
System.out.println("Free:" + spaceInfo.second());*/
final OvmVm.Details vm = new OvmVm.Details();
vm.cpuNum = 1;
vm.memory = 512;
@ -80,23 +49,7 @@ public class Test {
vm.vifs.add(vif);
vm.vifs.add(vif);
vm.vifs.add(vif);
//System.out.println(vm.toJson());
final Connection c = new Connection("192.168.189.12", "oracle", "password");
//System.out.println(Coder.toJson(OvmHost.getDetails(c)));
/* This is not being used at the moment.
* Coverity issue: 1012179
*/
//OvmHost.Details d = new GsonBuilder().create().fromJson(txt, OvmHost.Details.class);
//OvmHost.Details d = Coder.fromJson(txt, OvmHost.Details.class);
//OvmHost.Details d = OvmHost.getDetails(c);
//System.out.println(Coder.toJson(d));
// OvmStoragePool.Details pool = new OvmStoragePool.Details();
// pool.path = "192.168.110.232:/export/frank/ovs";
// pool.type = OvmStoragePool.NFS;
// pool.uuid = "123";
// System.out.println(pool.toJson());
String cmd = null;
System.out.println(args.length);
@ -119,15 +72,10 @@ public class Test {
System.out.println(d.toJson());
if (cmd.equalsIgnoreCase("create")) {
// String s =
// "{\"cpuNum\":1,\"memory\":512,\"rootDisk\":{\"type\":\"w\",\"path\":\"/var/ovs/mount/60D0985974CA425AAF5D01A1F161CC8B/running_pool/36_systemvm/System.img\"},\"disks\":[],\"vifs\":[{\"mac\":\"00:16:3E:5C:B1:D1\",\"bridge\":\"xenbr0\",\"type\":\"netfront\"}],\"name\":\"MyTest\",\"uuid\":\"1-2-3-4-5\"}";
OvmVm.create(c, d);
// c.call("OvmVm.echo", new Object[]{s});
} else if (cmd.equalsIgnoreCase("reboot")) {
final Map<String, String> res = OvmVm.reboot(c, "MyTest");
System.out.println(res.get("vncPort"));
//OvmVm.stop(c, "MyTest");
//OvmVm.create(c, d);
} else if (cmd.equalsIgnoreCase("stop")) {
OvmVm.stop(c, "MyTest");
} else if (cmd.equalsIgnoreCase("details")) {
@ -166,12 +114,6 @@ public class Test {
l.add("4b4d8951-f0b6-36c5-b4f3-a82ff2611c65");
System.out.println(Coder.toJson(l));
// Map<String, String> res = OvmHost.getPerformanceStats(c, "xenbr0");
// System.out.println(res.toString());
// String stxt = "{\"vifs\": [{\"bridge\": \"xenbr0\", \"mac\": \"00:16:3E:5C:B1:D1\", \"type\": \"netfront\"}], \"powerState\": \"RUNNING\", \"disks\": [], \"cpuNum\": 1, \"memory\": 536870912, \"rootDisk\": {\"path\": \"/var/ovs/mount/60D0985974CA425AAF5D01A1F161CC8B/running_pool/MyTest/System.img\", \"type\": \"w\"}}";
// OvmVm.Details ddd = Coder.fromJson(stxt, OvmVm.Details.class);
// System.out.println(ddd.vifs.size());
// System.out.println(ddd.rootDisk.path);
} catch (final Exception e) {
// TODO Auto-generated catch block
e.printStackTrace();

View File

@ -65,15 +65,6 @@ public class VmwareCleanupMaid {
_vmName = vmName;
}
// @Override
// public int cleanup(CheckPointManager checkPointMgr) {
//
// // save a check-point in case we crash at current run so that we won't lose it
// _checkPoint = checkPointMgr.pushCheckPoint(new VmwareCleanupMaid(_vCenterAddress, _dcMorValue, _vmName));
// addLeftOverVM(this);
// return 0;
// }
public String getCleanupProcedure() {
return null;
}

View File

@ -66,9 +66,7 @@ public abstract class CiscoNexusVSMDeviceManagerImpl extends AdapterBase {
@DB
//public CiscoNexusVSMDeviceVO addCiscoNexusVSM(long clusterId, String ipaddress, String username, String password, ServerResource resource, String vsmName) {
public
CiscoNexusVSMDeviceVO addCiscoNexusVSM(long clusterId, String ipaddress, String username, String password, String vCenterIpaddr, String vCenterDcName) {
public CiscoNexusVSMDeviceVO addCiscoNexusVSM(long clusterId, String ipaddress, String username, String password, String vCenterIpaddr, String vCenterDcName) {
// In this function, we associate this VSM with each host
// in the clusterId specified.
@ -154,28 +152,6 @@ public abstract class CiscoNexusVSMDeviceManagerImpl extends AdapterBase {
// into each host's resource. Also, we first configure each resource's
// entries in the database to contain this VSM information before the injection.
//for (HostVO host : hosts) {
// Create a host details VO object and write it out for this hostid.
//Long hostid = new Long(vsmId);
//DetailVO vsmDetail = new DetailVO(host.getId(), "vsmId", hostid.toString());
//Transaction tx = Transaction.currentTxn();
//try {
//tx.start();
//_hostDetailDao.persist(vsmDetail);
//tx.commit();
//} catch (Exception e) {
//tx.rollback();
//throw new CloudRuntimeException(e.getMessage());
//}
//}
// Reconfigure the resource.
//Map hostDetails = new HashMap<String, String>();
//hostDetails.put(ApiConstants.ID, vsmId);
//hostDetails.put(ApiConstants.IP_ADDRESS, ipaddress);
//hostDetails.put(ApiConstants.USERNAME, username);
//hostDetails.put(ApiConstants.PASSWORD, password);
//_agentMrg.send(host.getId(), )
return VSMObj;
}

View File

@ -576,37 +576,6 @@ public class CitrixRequestWrapperTest {
fail(e.getMessage());
}
// try {
// PowerMockito.mockStatic(Host.class);
// //BDDMockito.given(Host.getByUuid(conn,
// xsHost.getUuid())).willReturn(host);
// PowerMockito.when(Host.getByUuid(conn,
// xsHost.getUuid())).thenReturn(host);
// PowerMockito.verifyStatic(times(1));
// } catch (final BadServerResponse e) {
// fail(e.getMessage());
// } catch (final XenAPIException e) {
// fail(e.getMessage());
// } catch (final XmlRpcException e) {
// fail(e.getMessage());
// }
//
// PowerMockito.mockStatic(Types.class);
// PowerMockito.when(Types.toHostRecord(spiedMap)).thenReturn(hr);
// PowerMockito.verifyStatic(times(1));
//
// try {
// PowerMockito.mockStatic(Host.Record.class);
// when(host.getRecord(conn)).thenReturn(hr);
// verify(host, times(1)).getRecord(conn);
// } catch (final BadServerResponse e) {
// fail(e.getMessage());
// } catch (final XenAPIException e) {
// fail(e.getMessage());
// } catch (final XmlRpcException e) {
// fail(e.getMessage());
// }
final Answer answer = wrapper.execute(maintainCommand, citrixResourceBase);
assertFalse(answer.getResult());

View File

@ -462,17 +462,6 @@ public class XenServer610WrapperTest {
verify(xenServer610Resource, times(1)).getConnection();
// try {
// verify(xenServer610Resource, times(1)).waitForTask(conn, task, 1000, timeout);
// verify(xenServer610Resource, times(1)).checkForSuccess(conn, task);
// } catch (final XenAPIException e) {
// fail(e.getMessage());
// } catch (final XmlRpcException e) {
// fail(e.getMessage());
// } catch (final TimeoutException e) {
// fail(e.getMessage());
// }
assertFalse(answer.getResult());
}

View File

@ -136,7 +136,6 @@ public class CiscoVnmcConnectionImpl implements CiscoVnmcConnection {
String xml = "";
String line;
while ((line = br.readLine()) != null) {
//xml += line.replaceAll("\n"," ");
xml += line;
}

View File

@ -28,8 +28,6 @@ import com.cloud.utils.component.PluggableService;
public interface CiscoVnmcElementService extends PluggableService {
//public static final Provider CiscoVnmc = new Provider("CiscoVnmc", true);
public CiscoVnmcController addCiscoVnmcResource(AddCiscoVnmcResourceCmd cmd);
public CiscoVnmcResourceResponse createCiscoVnmcResourceResponse(CiscoVnmcController ciscoVnmcResourceVO);

View File

@ -79,10 +79,6 @@ public class ElasticLbVmMapVO implements InternalIdentity {
return elbVmId;
}
// public String getLbName() {
// return lbName;
// }
public long getIpAddressId() {
return ipAddressId;
}

View File

@ -32,6 +32,9 @@ import org.mockito.Mockito;
import org.mockito.invocation.InvocationOnMock;
import org.mockito.stubbing.Answer;
import org.apache.logging.log4j.Logger;
import org.apache.logging.log4j.LogManager;
import org.apache.cloudstack.api.ApiConstants;
import org.apache.cloudstack.api.BaseCmd;
import org.apache.cloudstack.api.command.admin.vlan.CreateVlanIpRangeCmd;
@ -86,6 +89,7 @@ import com.cloud.vm.VirtualMachineManager;
import com.cloud.vm.dao.UserVmDao;
public class ManagementServerMock {
protected Logger logger = LogManager.getLogger(getClass());
@Inject
private AccountManager _accountMgr;
@ -217,15 +221,7 @@ public class ManagementServerMock {
return null;
}
};
try {
Mockito.when(_agentMgr.send(ArgumentMatchers.anyLong(), ArgumentMatchers.any(Commands.class))).thenAnswer(callback);
} catch (AgentUnavailableException e) {
// TODO Auto-generated catch block
e.printStackTrace();
} catch (OperationTimedoutException e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
sendCommands(callback);
long id = _userVmDao.getNextInSequence(Long.class, "id");
UserVmVO vm =
new UserVmVO(id, name, name, tmpl.getId(), HypervisorType.XenServer, tmpl.getGuestOSId(), false, false, _zone.getDomainId(), Account.ACCOUNT_ID_SYSTEM,
@ -239,12 +235,21 @@ public class ManagementServerMock {
try {
_vmMgr.addVmToNetwork(vm, network, profile);
} catch (Exception ex) {
// TODO Auto-generated catch block
//ex.printStackTrace();
// ignored
}
return vm;
}
private void sendCommands(Answer<?> callback) {
try {
Mockito.when(_agentMgr.send(ArgumentMatchers.anyLong(), ArgumentMatchers.any(Commands.class))).thenAnswer(callback);
} catch (AgentUnavailableException e) {
logger.warn("no agent running", e);
} catch (OperationTimedoutException e) {
logger.warn("agent not responding (in time)", e);
}
}
private void deleteHost() {
_hostDao.remove(_hostId);
@ -265,15 +270,7 @@ public class ManagementServerMock {
return null;
}
};
try {
Mockito.when(_agentMgr.send(ArgumentMatchers.anyLong(), ArgumentMatchers.any(Commands.class))).thenAnswer(callback);
} catch (AgentUnavailableException e) {
e.printStackTrace();
} catch (OperationTimedoutException e) {
e.printStackTrace();
}
sendCommands(callback);
_userVmDao.remove(vm.getId());
}

View File

@ -778,60 +778,6 @@ public class TungstenElementTest {
verify(tungstenService, times(1)).deleteManagementNetwork(anyLong());
}
//@Test
//public void processConnectWithoutSecurityGroupTest() throws ConnectionException {
// Host host = mock(Host.class);
// StartupCommand startupCommand = mock(StartupCommand.class);
// TungstenProviderVO tungstenProvider = mock(TungstenProviderVO.class);
// DataCenterVO dataCenterVO = mock(DataCenterVO.class);
// VlanVO vlanVO1 = mock(VlanVO.class);
// VlanVO vlanVO2 = mock(VlanVO.class);
// List<VlanVO> vlanList = Arrays.asList(vlanVO1, vlanVO2);
// Network publicNetwork = mock(Network.class);
// NetworkDetailVO networkDetail = mock(NetworkDetailVO.class);
//
// when(host.getHypervisorType()).thenReturn(Hypervisor.HypervisorType.KVM);
// when(tungstenProviderDao.findByZoneId(anyLong())).thenReturn(tungstenProvider);
// when(host.getPublicIpAddress()).thenReturn("192.168.100.100");
// when(tungstenProvider.getGateway()).thenReturn("192.168.100.100");
// when(dataCenterDao.findById(anyLong())).thenReturn(dataCenterVO);
// when(vlanDao.listByZone(anyLong())).thenReturn(vlanList);
// when(networkModel.getSystemNetworkByZoneAndTrafficType(anyLong(), eq(Networks.TrafficType.Public))).thenReturn(publicNetwork);
// when(networkDetailsDao.findDetail(anyLong(), anyString())).thenReturn(networkDetail);
// when(vlanVO1.getVlanGateway()).thenReturn("192.168.100.1");
// when(vlanVO1.getVlanNetmask()).thenReturn("255.255.255.0");
// when(vlanVO2.getVlanGateway()).thenReturn("192.168.101.1");
// when(vlanVO2.getVlanNetmask()).thenReturn("255.255.255.0");
// when(dataCenterVO.isSecurityGroupEnabled()).thenReturn(false);
//
// tungstenElement.processConnect(host, startupCommand, true);
// verify(agentManager, times(1)).easySend(anyLong(), any(SetupTungstenVRouterCommand.class));
//}
//@Test
//public void processConnectWithSecurityGroupTest() throws ConnectionException {
// Host host = mock(Host.class);
// StartupCommand startupCommand = mock(StartupCommand.class);
// TungstenProviderVO tungstenProvider = mock(TungstenProviderVO.class);
// DataCenterVO dataCenterVO = mock(DataCenterVO.class);
// NetworkVO network = mock(NetworkVO.class);
// NetworkDetailVO networkDetail = mock(NetworkDetailVO.class);
// Network publicNetwork = mock(Network.class);
//
// when(host.getHypervisorType()).thenReturn(Hypervisor.HypervisorType.KVM);
// when(tungstenProviderDao.findByZoneId(anyLong())).thenReturn(tungstenProvider);
// when(host.getPublicIpAddress()).thenReturn("192.168.100.100");
// when(tungstenProvider.getGateway()).thenReturn("192.168.100.100");
// when(dataCenterDao.findById(anyLong())).thenReturn(dataCenterVO);
// when(networkDao.listByZoneSecurityGroup(anyLong())).thenReturn(Arrays.asList(network));
// when(networkDetailsDao.findDetail(anyLong(), anyString())).thenReturn(networkDetail);
// when(networkModel.getSystemNetworkByZoneAndTrafficType(anyLong(), eq(Networks.TrafficType.Public))).thenReturn(publicNetwork);
// when(dataCenterVO.isSecurityGroupEnabled()).thenReturn(true);
//
// tungstenElement.processConnect(host, startupCommand, true);
// verify(agentManager, times(1)).easySend(anyLong(), any(SetupTungstenVRouterCommand.class));
//}
@Test
public void processHostAboutToBeRemovedWithSecurityGroupTest() {
HostVO hostVO = mock(HostVO.class);

View File

@ -175,9 +175,6 @@ public class ElastistorPrimaryDataStoreDriver extends CloudStackPrimaryDataStore
_volumeDao.update(volume.getId(), volume);
// create new volume details for the volume
//updateVolumeDetails(volume, esvolume);
long capacityBytes = storagePool.getCapacityBytes();
long usedBytes = storagePool.getUsedBytes();

View File

@ -341,7 +341,6 @@ public class ElastistorUtil {
String qosgroupid;
String VolumeName = volumeName;
String totaliops = String.valueOf(capacityIops);
//String totalthroughput = String.valueOf(capacityIops * 4);
String totalthroughput = "0";
String quotasize = convertCapacityBytes(capacityBytes);
@ -679,14 +678,6 @@ public class ElastistorUtil {
}
LOGGER.info("tsm id is null");
return false;
/*
* else { LOGGER.error("no volume is present in the tsm"); } } else {
* LOGGER.error(
* "List tsm failed, no tsm present in the eastistor for the given IP "
* ); return false; } return false;
*/
}
public static boolean deleteElastistorVolume(String esvolumeid) throws Throwable {

View File

@ -82,7 +82,6 @@ public class LinstorUtilTest {
mockStoragePool("thinpool", "nodeC", ProviderKind.LVM_THIN)
));
// when(LinstorUtil.getLinstorAPI(LINSTOR_URL_TEST)).thenReturn(api);
}
@Test

View File

@ -187,9 +187,6 @@ public class NexentaPrimaryDataStoreDriver implements PrimaryDataStoreDriver {
NexentaStorAppliance appliance = getNexentaStorAppliance(storagePoolId);
StoragePoolVO storagePool = _storagePoolDao.findById(storagePoolId);
// _storagePoolDao.update(stoagePoolId);
} else {
errorMessage = String.format(
"Invalid DataObjectType(%s) passed to deleteAsync",

View File

@ -60,7 +60,6 @@ public class NexentaStorApplianceTest {
public void init() {
final String url = "nmsUrl=https://admin:nexenta@10.1.3.182:8457;volume=cloudstack;storageType=iscsi";
NexentaUtil.NexentaPluginParameters parameters = NexentaUtil.parseNexentaPluginUrl(url);
//client = new NexentaNmsClient(parameters.getNmsUrl());
client = mock(NexentaNmsClient.class);
appliance = new NexentaStorAppliance(client, parameters);
}

View File

@ -64,8 +64,6 @@ public class VolumeTest {
DataCenterDao dcDao;
@Inject
PrimaryDataStoreDao primaryStoreDao;
// @Inject
// PrimaryDataStoreProviderManager primaryDataStoreProviderMgr;
@Inject
AgentManager agentMgr;
Long dcId;
@ -109,25 +107,15 @@ public class VolumeTest {
results.add(host);
Mockito.when(hostDao.listAll()).thenReturn(results);
Mockito.when(hostDao.findHypervisorHostInCluster(ArgumentMatchers.anyLong())).thenReturn(results);
// CreateObjectAnswer createVolumeFromImageAnswer = new
// CreateObjectAnswer(null,UUID.randomUUID().toString(), null);
// Mockito.when(primaryStoreDao.findById(Mockito.anyLong())).thenReturn(primaryStore);
}
private PrimaryDataStoreInfo createPrimaryDataStore() {
try {
// primaryDataStoreProviderMgr.configure("primary data store mgr",
// new HashMap<String, Object>());
// PrimaryDataStoreProvider provider =
// primaryDataStoreProviderMgr.getDataStoreProvider("Solidfre Primary Data Store Provider");
Map<String, String> params = new HashMap<String, String>();
params.put("url", "nfs://test/test");
params.put("dcId", dcId.toString());
params.put("clusterId", clusterId.toString());
params.put("name", "my primary data store");
// PrimaryDataStoreInfo primaryDataStoreInfo =
// provider.registerDataStore(params);
return null;
} catch (Exception e) {
return null;

View File

@ -29,7 +29,6 @@ import org.apache.cloudstack.storage.to.VolumeObjectTO;
import org.apache.cloudstack.utils.qemu.QemuImg;
import org.apache.cloudstack.utils.qemu.QemuImg.PhysicalDiskFormat;
import org.apache.cloudstack.utils.qemu.QemuImgFile;
//import java.io.File;
import com.cloud.agent.api.storage.StorPoolDownloadVolumeCommand;
import com.cloud.agent.api.to.DataStoreTO;

View File

@ -23,7 +23,6 @@ import com.cloud.dc.dao.ClusterDao;
import com.cloud.storage.dao.SnapshotDetailsDao;
import com.cloud.storage.dao.SnapshotDetailsVO;
import com.cloud.utils.component.ManagerBase;
import com.cloud.utils.concurrency.NamedThreadFactory;
import com.cloud.utils.db.DB;

View File

@ -934,7 +934,6 @@ public class StorPoolPrimaryDataStoreDriver implements PrimaryDataStoreDriver {
if (resp.getError() != null) {
err = String.format("Could not create Storpool volume for CS template %s. Error: %s", name, resp.getError());
} else {
//updateVolume(dstData.getId());
VolumeObjectTO dstTO = (VolumeObjectTO)dstData.getTO();
dstTO.setPath(StorPoolUtil.devPath(StorPoolUtil.getNameFromResponse(resp, false)));
dstTO.setSize(size);

View File

@ -178,32 +178,6 @@ public class StorPoolHelper {
return tags;
}
// Initialize custom logger for updated volume and snapshots
// public static void appendLogger(Logger log, String filePath, String kindOfLog) {
// Appender appender = null;
// PatternLayout patternLayout = new PatternLayout();
// patternLayout.setConversionPattern("%d{YYYY-MM-dd HH:mm:ss.SSS} %m%n");
// SimpleDateFormat sdf = new SimpleDateFormat("yyyyMMddHHmmss");
// Timestamp timestamp = new Timestamp(System.currentTimeMillis());
// String path = filePath + "-" + sdf.format(timestamp) + ".log";
// try {
// appender = new RollingFileAppender(patternLayout, path);
// log.setAdditivity(false);
// log.addAppender(appender);
// } catch (IOException e) {
// e.printStackTrace();
// }
// if (kindOfLog.equals("update")) {
// StorPoolUtil.spLog(
// "You can find information about volumes and snapshots, which will be updated in Database with their globalIs in %s log file",
// path);
// } else if (kindOfLog.equals("abandon")) {
// StorPoolUtil.spLog(
// "You can find information about volumes and snapshots, for which CloudStack doesn't have information in %s log file",
// path);
// }
// }
public static void setSpClusterIdIfNeeded(long hostId, String clusterId, ClusterDao clusterDao, HostDao hostDao,
ClusterDetailsDao clusterDetails) {
HostVO host = hostDao.findById(hostId);

View File

@ -272,9 +272,4 @@ echo "volume.size=$volsize" >> /$tmpltfs/template.properties
zfs snapshot -r $tmpltfs@vmops_ss
rollback_if_needed $tmpltfs $? "Failed to snapshot filesystem"
#if [ "$cleanup" == "true" ]
#then
#rm -f $tmpltimg
#fi
exit 0

View File

@ -273,9 +273,4 @@ echo "volume.size=$volsize" >> /$volfs/volume.properties
zfs snapshot -r $volfs@vmops_ss
rollback_if_needed $volfs $? "Failed to snapshot filesystem"
#if [ "$cleanup" == "true" ]
#then
#rm -f $volimg
#fi
exit 0

View File

@ -53,11 +53,6 @@ for i in $(find /$rootdir -name template.properties );
do
d=$(dirname $i)
filename=$(grep "^filename" $i | awk -F"=" '{print $NF}')
# size=$(grep "virtualsize" $i | awk -F"=" '{print $NF}')
# if [ -n "$filename" ] && [ -n "$size" ]
# then
# d=$d/$filename/$size
# fi
echo ${d#/}/$filename #remove leading slash
done

View File

@ -53,11 +53,6 @@ for i in $(find /$rootdir -name volume.properties );
do
d=$(dirname $i)
filename=$(grep "^filename" $i | awk -F"=" '{print $NF}')
# size=$(grep "virtualsize" $i | awk -F"=" '{print $NF}')
# if [ -n "$filename" ] && [ -n "$size" ]
# then
# d=$d/$filename/$size
# fi
echo ${d#/}/$filename #remove leading slash
done

View File

@ -67,10 +67,6 @@ class CloudStack(Agent):
'get_module_version': getModuleVersion,
'get_ovs_version': ovmVersion,
'ping': ping,
# 'patch': ovmCsPatch,
# 'ovs_agent_set_ssl': ovsAgentSetSsl,
# 'ovs_agent_set_port': ovsAgentSetPort,
# 'ovs_restart_agent': ovsRestartAgent,
}
def getName(self):

View File

@ -210,11 +210,6 @@ def get_vm_group_perfmon(args={}):
total_counter = int(args['total_counter'])
now = int(time.time()) / 60
# Get pool's info of this host
#pool = login.xenapi.pool.get_all()[0]
# Get master node's address of pool
#master = login.xenapi.pool.get_master(pool)
#master_address = login.xenapi.host.get_address(master)
session = login._session
max_duration = 0
@ -226,7 +221,6 @@ def get_vm_group_perfmon(args={}):
rrd_updates = RRDUpdates()
rrd_updates.refresh(login.xenapi, now * 60 - max_duration, session, {})
#for uuid in rrd_updates.get_vm_list():
for vm_count in xrange(1, total_vm + 1):
vm_name = args['vmname' + str(vm_count)]
vm_uuid = getuuid(vm_name)

View File

@ -175,20 +175,6 @@ class NFSSR(FileSR.FileSR):
pass
raise exn
#newpath = os.path.join(self.path, sr_uuid)
#if util.ioretry(lambda: util.pathexists(newpath)):
# if len(util.ioretry(lambda: util.listdir(newpath))) != 0:
# self.detach(sr_uuid)
# raise xs_errors.XenError('SRExists')
#else:
# try:
# util.ioretry(lambda: util.makedirs(newpath))
# except util.CommandException, inst:
# if inst.code != errno.EEXIST:
# self.detach(sr_uuid)
# raise xs_errors.XenError('NFSCreate',
# opterr='remote directory creation error is %d'
# % inst.code)
self.detach(sr_uuid)
def delete(self, sr_uuid):

View File

@ -106,7 +106,6 @@ class NFSSR(FileSR.FileSR):
def attach(self, sr_uuid):
self.validate_remotepath(False)
#self.remotepath = os.path.join(self.dconf['serverpath'], sr_uuid)
self.remotepath = self.dconf['serverpath']
util._testHost(self.dconf['server'], NFSPORT, 'NFSTarget')
self.mount_remotepath(sr_uuid)
@ -175,20 +174,6 @@ class NFSSR(FileSR.FileSR):
pass
raise exn
#newpath = os.path.join(self.path, sr_uuid)
#if util.ioretry(lambda: util.pathexists(newpath)):
# if len(util.ioretry(lambda: util.listdir(newpath))) != 0:
# self.detach(sr_uuid)
# raise xs_errors.XenError('SRExists')
#else:
# try:
# util.ioretry(lambda: util.makedirs(newpath))
# except util.CommandException, inst:
# if inst.code != errno.EEXIST:
# self.detach(sr_uuid)
# raise xs_errors.XenError('NFSCreate',
# opterr='remote directory creation error is %d'
# % inst.code)
self.detach(sr_uuid)
def delete(self, sr_uuid):

View File

@ -178,20 +178,6 @@ class NFSSR(FileSR.FileSR):
pass
raise exn
#newpath = os.path.join(self.path, sr_uuid)
#if util.ioretry(lambda: util.pathexists(newpath)):
# if len(util.ioretry(lambda: util.listdir(newpath))) != 0:
# self.detach(sr_uuid)
# raise xs_errors.XenError('SRExists')
#else:
# try:
# util.ioretry(lambda: util.makedirs(newpath))
# except util.CommandException, inst:
# if inst.code != errno.EEXIST:
# self.detach(sr_uuid)
# raise xs_errors.XenError('NFSCreate',
# opterr='remote directory creation error is %d'
# % inst.code)
self.detach(sr_uuid)
@FileSR.locking("SRUnavailable")

View File

@ -107,7 +107,6 @@ class NFSSR(FileSR.FileSR):
def attach(self, sr_uuid):
self.validate_remotepath(False)
#self.remotepath = os.path.join(self.dconf['serverpath'], sr_uuid)
self.remotepath = self.dconf['serverpath']
util._testHost(self.dconf['server'], NFSPORT, 'NFSTarget')
self.mount_remotepath(sr_uuid)
@ -176,20 +175,6 @@ class NFSSR(FileSR.FileSR):
pass
raise exn
#newpath = os.path.join(self.path, sr_uuid)
#if util.ioretry(lambda: util.pathexists(newpath)):
# if len(util.ioretry(lambda: util.listdir(newpath))) != 0:
# self.detach(sr_uuid)
# raise xs_errors.XenError('SRExists')
#else:
# try:
# util.ioretry(lambda: util.makedirs(newpath))
# except util.CommandException, inst:
# if inst.code != errno.EEXIST:
# self.detach(sr_uuid)
# raise xs_errors.XenError('NFSCreate',
# opterr='remote directory creation error is %d'
# % inst.code)
self.detach(sr_uuid)
def delete(self, sr_uuid):

View File

@ -181,20 +181,6 @@ class NFSSR(FileSR.FileSR):
pass
raise exn
#newpath = os.path.join(self.path, sr_uuid)
#if util.ioretry(lambda: util.pathexists(newpath)):
# if len(util.ioretry(lambda: util.listdir(newpath))) != 0:
# self.detach(sr_uuid)
# raise xs_errors.XenError('SRExists')
#else:
# try:
# util.ioretry(lambda: util.makedirs(newpath))
# except util.CommandException, inst:
# if inst.code != errno.EEXIST:
# self.detach(sr_uuid)
# raise xs_errors.XenError('NFSCreate',
# opterr='remote directory creation error is %d'
# % inst.code)
self.detach(sr_uuid)
def delete(self, sr_uuid):

View File

@ -1382,15 +1382,6 @@ def verify_network_rules(vm_name, vm_id, vm_ip, vm_ip6, vm_mac, vif, brname, sec
print("Cannot find vif")
sys.exit(1)
#vm_name = "i-2-55-VM"
#vm_id = 55
#vm_ip = "10.11.118.128"
#vm_ip6 = "fe80::1c00:b4ff:fe00:5"
#vm_mac = "1e:00:b4:00:00:05"
#vif = "vnet11"
#brname = "cloudbr0"
#sec_ips = "10.11.118.133;10.11.118.135;10.11.118.138;" # end with ";" and separated by ";"
vm_ips = []
if sec_ips is not None:
vm_ips = sec_ips.split(';')

View File

@ -49,17 +49,14 @@ def setup_ovs_bridge(bridge, key, cs_host_id):
logging.debug("Bridge has been manually created:%s" % res)
if res:
# result = "FAILURE:%s" % res
result = 'false'
else:
# Verify the bridge actually exists, with the gre_key properly set
res = lib.do_cmd([lib.VSCTL_PATH, "get", "bridge",
bridge, "other_config:gre_key"])
if key in str(res):
# result = "SUCCESS:%s" % bridge
result = 'true'
else:
# result = "FAILURE:%s" % res
result = 'false'
lib.do_cmd([lib.VSCTL_PATH, "set", "bridge", bridge, "other_config:is-ovs-tun-network=True"])
@ -134,10 +131,8 @@ def destroy_ovs_bridge(bridge):
res = lib.do_cmd([lib.VSCTL_PATH, "del-br", bridge])
logging.debug("Bridge has been manually removed:%s" % res)
if res:
# result = "FAILURE:%s" % res
result = 'false'
else:
# result = "SUCCESS:%s" % bridge
result = 'true'
logging.debug("Destroy_ovs_bridge completed with result:%s" % result)
@ -150,7 +145,6 @@ def create_tunnel(bridge, remote_ip, key, src_host, dst_host):
res = lib.check_switch()
if res != "SUCCESS":
logging.debug("Openvswitch running: NO")
# return "FAILURE:%s" % res
return 'false'
# We need to keep the name below 14 characters
@ -189,7 +183,6 @@ def create_tunnel(bridge, remote_ip, key, src_host, dst_host):
if len(iface_list) != 1:
logging.debug("WARNING: Unexpected output while verifying " +
"port %s on bridge %s" % (name, bridge))
# return "FAILURE:VERIFY_PORT_FAILED"
return 'false'
# verify interface
@ -205,7 +198,6 @@ def create_tunnel(bridge, remote_ip, key, src_host, dst_host):
if key not in str(key_validation) or remote_ip not in str(ip_validation):
logging.debug("WARNING: Unexpected output while verifying " +
"interface %s on bridge %s" % (name, bridge))
# return "FAILURE:VERIFY_INTERFACE_FAILED"
return 'false'
logging.debug("Tunnel interface validated:%s" % verify_interface_ip)
@ -268,7 +260,6 @@ def destroy_tunnel(bridge, iface_name):
ofport = get_field_of_interface(iface_name, "ofport")
lib.del_flows(bridge, in_port=ofport)
lib.del_port(bridge, iface_name)
# return "SUCCESS"
return 'true'
def get_field_of_interface(iface_name, field):

View File

@ -297,8 +297,6 @@ public class UserConcentratedAllocator extends AdapterBase implements PodAllocat
public boolean configure(String name, Map<String, Object> params) throws ConfigurationException {
Map<String, String> configs = _configDao.getConfiguration("management-server", params);
String stoppedValue = configs.get("vm.resource.release.interval");
// String destroyedValue =
// configs.get("capacity.skipcounting.destroyed.hours");
String destroyedValue = null;
_secondsToSkipStoppedVMs = NumbersUtil.parseInt(stoppedValue, 86400);
_secondsToSkipDestroyedVMs = NumbersUtil.parseInt(destroyedValue, 0);

View File

@ -77,13 +77,11 @@ public class ApiXmlDocWriter {
List<String> asyncResponses = new ArrayList<String>();
asyncResponses.add(TemplateResponse.class.getName());
asyncResponses.add(VolumeResponse.class.getName());
//asyncResponses.add(LoadBalancerResponse.class.getName());
asyncResponses.add(HostResponse.class.getName());
asyncResponses.add(IPAddressResponse.class.getName());
asyncResponses.add(StoragePoolResponse.class.getName());
asyncResponses.add(UserVmResponse.class.getName());
asyncResponses.add(SecurityGroupResponse.class.getName());
//asyncResponses.add(ExternalLoadBalancerResponse.class.getName());
asyncResponses.add(SnapshotResponse.class.getName());
return asyncResponses;

View File

@ -139,20 +139,6 @@ public class ExternalNetworkDeviceManagerImpl extends ManagerBase implements Ext
}
private List<Host> listNetworkDevice(Long zoneId, Long physicalNetworkId, Long podId, Host.Type type) {
// List<Host> res = new ArrayList<Host>();
// if (podId != null) {
// List<HostVO> devs = _hostDao.listBy(type, null, podId, zoneId);
// if (devs.size() == 1) {
// res.add(devs.get(0));
// } else {
// logger.debug("List " + type + ": " + devs.size() + " found");
// }
// } else {
// List<HostVO> devs = _hostDao.listBy(type, zoneId);
// res.addAll(devs);
// }
// return res;
return null;
}

View File

@ -69,7 +69,6 @@ public class PrivateGatewayRules extends RuleApplier {
// setup source nat
if (_nicProfile != null) {
_isAddOperation = true;
// result = setupVpcPrivateNetwork(router, true, guestNic);
result = visitor.visit(this);
}
} catch (final Exception ex) {

View File

@ -564,9 +564,7 @@ public class RulesManagerImpl extends ManagerBase implements RulesManager, Rules
_accountMgr.checkAccess(vmOwner, SecurityChecker.AccessType.UseEntry, false, network);
//is static nat is for vm secondary ip
//dstIp = guestNic.getIp4Address();
if (vmGuestIp != null) {
//dstIp = guestNic.getIp4Address();
if (!dstIp.equals(vmGuestIp)) {
//check whether the secondary ip set to the vm or not

View File

@ -212,7 +212,6 @@ public class SecurityGroupManagerImpl extends ManagerBase implements SecurityGro
try {
cleanupFinishedWork();
cleanupUnfinishedWork();
//processScheduledWork();
} catch (Throwable th) {
logger.error("Problem with SG Cleanup", th);
}

View File

@ -328,8 +328,6 @@ public class ConfigurationServerImpl extends ManagerBase implements Configuratio
// setup XenServer default PV driver version
initiateXenServerPVDriverVersion();
// We should not update seed data UUID column here since this will be invoked in upgrade case as well.
//updateUuids();
// Set init to true
_configDao.update("init", "Hidden", "true");

View File

@ -77,7 +77,6 @@ public class IPRangeConfig {
}
String pod = args[2];
String zone = args[3];
;
String startIP = args[4];
String endIP = null;
if (args.length == 6) {
@ -99,31 +98,6 @@ public class IPRangeConfig {
}
}
public List<String> changePublicIPRangeGUI(String op, String zone, String startIP, String endIP, long physicalNetworkId) {
String result = checkErrors("public", op, null, zone, startIP, endIP);
if (!result.equals("success")) {
return DatabaseConfig.genReturnList("false", result);
}
long zoneId = PodZoneConfig.getZoneId(zone);
result = changeRange(op, "public", -1, zoneId, startIP, endIP, null, physicalNetworkId);
return DatabaseConfig.genReturnList("true", result);
}
public List<String> changePrivateIPRangeGUI(String op, String pod, String zone, String startIP, String endIP) {
String result = checkErrors("private", op, pod, zone, startIP, endIP);
if (!result.equals("success")) {
return DatabaseConfig.genReturnList("false", result);
}
long podId = PodZoneConfig.getPodId(pod, zone);
long zoneId = PodZoneConfig.getZoneId(zone);
result = changeRange(op, "private", podId, zoneId, startIP, endIP, null, -1);
return DatabaseConfig.genReturnList("true", result);
}
private String checkErrors(String type, String op, String pod, String zone, String startIP, String endIP) {
if (!op.equals("add") && !op.equals("delete")) {
return usage();
@ -153,15 +127,7 @@ public class IPRangeConfig {
}
// Check that the IPs that are being added are compatible with either the zone's public netmask, or the pod's CIDR
if (type.equals("public")) {
// String publicNetmask = getPublicNetmask(zone);
// String publicGateway = getPublicGateway(zone);
// if (publicNetmask == null) return "Please ensure that your zone's public net mask is specified";
// if (!sameSubnet(startIP, endIP, publicNetmask)) return "Please ensure that your start IP and end IP are in the same subnet, as per the zone's netmask.";
// if (!sameSubnet(startIP, publicGateway, publicNetmask)) return "Please ensure that your start IP is in the same subnet as your zone's gateway, as per the zone's netmask.";
// if (!sameSubnet(endIP, publicGateway, publicNetmask)) return "Please ensure that your end IP is in the same subnet as your zone's gateway, as per the zone's netmask.";
} else if (type.equals("private")) {
if (type.equals("private")) {
String cidrAddress = getCidrAddress(pod, zone);
long cidrSize = getCidrSize(pod, zone);

View File

@ -75,11 +75,6 @@ public class PodZoneConfig {
}
private String checkPodCidrSubnets(long dcId, HashMap<Long, Vector<Object>> currentPodCidrSubnets) {
// DataCenterDao _dcDao = null;
// final ComponentLocator locator = ComponentLocator.getLocator("management-server");
// _dcDao = locator.getDao(DataCenterDao.class);
// For each pod, return an error if any of the following is true:
// 1. The pod's CIDR subnet conflicts with the guest network subnet
// 2. The pod's CIDR subnet conflicts with the CIDR subnet of any other pod
@ -87,7 +82,6 @@ public class PodZoneConfig {
String zoneName = PodZoneConfig.getZoneName(dcId);
//get the guest network cidr and guest netmask from the zone
// DataCenterVO dcVo = _dcDao.findById(dcId);
String guestNetworkCidr = IPRangeConfig.getGuestNetworkCidr(dcId);

View File

@ -97,7 +97,6 @@ public class NfsMountManagerImpl implements NfsMountManager {
if (nfsVersion != null){
command.add("-o", "vers=" + nfsVersion);
}
// command.add("-o", "soft,timeo=133,retrans=2147483647,tcp,acdirmax=0,acdirmin=0");
if ("Mac OS X".equalsIgnoreCase(System.getProperty("os.name"))) {
command.add("-o", "resvport");
}

View File

@ -187,7 +187,6 @@ public abstract class APITest {
* @return login response string
*/
protected void login(String username, String password) {
//String md5Psw = createMD5String(password);
// send login request
HashMap<String, String> params = new HashMap<String, String>();
params.put("response", "json");

View File

@ -1,75 +0,0 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package com.cloud.vpc;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.springframework.test.context.ContextConfiguration;
import org.springframework.test.context.junit4.SpringJUnit4ClassRunner;
@RunWith(SpringJUnit4ClassRunner.class)
@ContextConfiguration(locations = "classpath:/VpcTestContext.xml")
public class Site2SiteVpnTest {
// private static void addDaos(MockComponentLocator locator) {
// locator.addDao("AccountDao", AccountDaoImpl.class);
// locator.addDao("Site2SiteCustomerGatewayDao", Site2SiteCustomerGatewayDaoImpl.class);
// locator.addDao("Site2SiteVpnGatewayDao", Site2SiteVpnGatewayDaoImpl.class);
// locator.addDao("Site2SiteVpnConnectionDao", Site2SiteVpnConnectionDaoImpl.class);
//
// locator.addDao("IPAddressDao", IPAddressDaoImpl.class);
// locator.addDao("VpcDao", VpcDaoImpl.class);
// locator.addDao("ConfiguratioDao", MockConfigurationDaoImpl.class);
//
// }
//
// private static void addManagers(MockComponentLocator locator) {
// locator.addManager("AccountManager", MockAccountManagerImpl.class);
// locator.addManager("VpcManager", MockVpcManagerImpl.class);
// }
@Before
public void setUp() {
// locator = new MockComponentLocator("management-server");
// addDaos(locator);
// addManagers(locator);
// logger.info("Finished setUp");
}
@After
public void tearDown() throws Exception {
}
@Test
public void testInjected() throws Exception {
// List<Pair<String, Class<? extends Site2SiteVpnServiceProvider>>> list =
// new ArrayList<Pair<String, Class<? extends Site2SiteVpnServiceProvider>>>();
// list.add(new Pair<String, Class<? extends Site2SiteVpnServiceProvider>>("Site2SiteVpnServiceProvider", MockSite2SiteVpnServiceProvider.class));
// locator.addAdapterChain(Site2SiteVpnServiceProvider.class, list);
// logger.info("Finished add adapter");
// locator.makeActive(new DefaultInterceptorLibrary());
// logger.info("Finished make active");
// Site2SiteVpnManagerImpl vpnMgr = ComponentLocator.inject(Site2SiteVpnManagerImpl.class);
// logger.info("Finished inject");
// Assert.assertTrue(vpnMgr.configure("Site2SiteVpnMgr",new HashMap<String, Object>()) );
// Assert.assertTrue(vpnMgr.start());
}
}

View File

@ -176,11 +176,6 @@ public class VpcTestConfiguration {
return Mockito.mock(RemoteAccessVpnService.class);
}
// @Bean
// public VpcDao vpcDao() {
// return Mockito.mock(VpcDao.class);
// }
@Bean
public NetworkDao networkDao() {
return Mockito.mock(NetworkDao.class);

View File

@ -225,7 +225,6 @@ public class CreateNetworkOfferingTest extends TestCase {
NetworkOfferingVO off =
configMgr.createNetworkOffering("isolated", "isolated", TrafficType.Guest, null, true, Availability.Optional, 200, serviceProviderMap, false,
Network.GuestType.Isolated, false, null, false, null, false, false, null, false, null, true, true, false, false, false,null, null, null, false, null, null, false);
// System.out.println("Creating Vpc Network Offering");
assertNotNull("Vpc Isolated network offering with Vpc provider ", off);
}
@ -245,7 +244,6 @@ public class CreateNetworkOfferingTest extends TestCase {
NetworkOfferingVO off =
configMgr.createNetworkOffering("isolated", "isolated", TrafficType.Guest, null, true, Availability.Optional, 200, serviceProviderMap, false,
Network.GuestType.Isolated, false, null, false, null, false, false, null, false, null, true, true, false, false, false,null, null, null, false, null, null, false);
// System.out.println("Creating Vpc Network Offering");
assertNotNull("Vpc Isolated network offering with Vpc and Netscaler provider ", off);
}
}

View File

@ -70,7 +70,6 @@ public class AwtCanvasAdapter extends BaseElement {
default:
throw new RuntimeException("Order is not implemented: " + buf + ".");
// break;
}
buf.unref();
@ -93,8 +92,6 @@ public class AwtCanvasAdapter extends BaseElement {
Graphics2D g = (Graphics2D)image.getGraphics();
for (BitmapRectangle rectangle : order.rectangles) {
// *DEBUG*/System.out.println("["+this+"] DEBUG: Rectangle: " +
// rectangle.toString());
int x = rectangle.x;
int y = rectangle.y;
@ -148,9 +145,6 @@ public class AwtCanvasAdapter extends BaseElement {
* Example.
*/
public static void main(String args[]) {
// System.setProperty("streamer.Link.debug", "true");
// System.setProperty("streamer.Element.debug", "true");
// System.setProperty("streamer.Pipeline.debug", "true");
ByteBuffer packet = new ByteBuffer(new byte[] {0x01, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0f, 0x00, 0x00, 0x00, 0x10, 0x00, 0x01, 0x00, 0x10, 0x00,
0x01, 0x04, 0x0a, 0x00, 0x0c, (byte)0x84, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00});

View File

@ -96,15 +96,6 @@ public class RdpClient extends PipelineImpl {
assembleRDPPipeline(serverHostName, domain, userName, password, pcb, screen, canvas, sslState);
}
// /* DEBUG */
// @Override
// protected HashMap<String, streamer.Element> initElementMap(String id) {
// HashMap<String, streamer.Element> map = new HashMap<String, streamer.Element>();
// map.put("IN", new ServerPacketSniffer("server <"));
// map.put("OUT", new ClientPacketSniffer("> client"));
// return map;
// }
/**
* Assemble connection sequence and main pipeline.
*

View File

@ -40,11 +40,6 @@ public class ClipboardDataFormat {
// Names
HTML_FORMAT,
// RTF_AS_TEXT,
// RICH_TEXT_FORMAT_WITHOUT_OBJECTS,
// RICH_TEXT_FORMAT,
};
public final int id;
@ -115,15 +110,6 @@ public class ClipboardDataFormat {
if (HTML_FORMAT.equals(name))
return buf.readVariableString(RdpConstants.CHARSET_8); // TODO: verify
// if (RTF_AS_TEXT.equals(name))
// return buf.readVariableString(RdpConstants.CHARSET_8); // TODO: verify
//
// if (RICH_TEXT_FORMAT_WITHOUT_OBJECTS.equals(name))
// return buf.readVariableString(RdpConstants.CHARSET_8); // TODO: verify
//
// if (RICH_TEXT_FORMAT.equals(name))
// return buf.readVariableString(RdpConstants.CHARSET_8); // TODO: verify
return null;
}

View File

@ -180,14 +180,8 @@ public class TSRequest extends Sequence {
TSRequest request = new TSRequest("TSRequest");
// Read request from buffer
// System.out.println("Request BER tree before parsing: " + request);
ByteBuffer toReadBuf = new ByteBuffer(packet);
request.readTag(toReadBuf);
// System.out.println("Request BER tree after parsing: " + request);
// System.out.println("version value: " + request.version.value);
// System.out.println("negoToken value: " + ((NegoItem)
// request.negoTokens.tags[0]).negoToken.value);
// Write request to buffer and compare with original
ByteBuffer toWriteBuf = new ByteBuffer(packet.length + 100, true);

View File

@ -27,7 +27,7 @@ import streamer.debug.MockSource;
import common.ScreenDescription;
/**
* @see http://msdn.microsoft.com/en-us/library/cc240488.aspx
* @see <a href="http://msdn.microsoft.com/en-us/library/cc240488.aspx">microsoft msdn explanation</a>
*/
public class ClientConfirmActivePDU extends BaseElement {

View File

@ -60,9 +60,7 @@ public class ClientMCSAttachUserRequest extends OneTimeSwitch {
* Example.
*/
public static void main(String args[]) {
// System.setProperty("streamer.Link.debug", "true");
System.setProperty("streamer.Element.debug", "true");
// System.setProperty("streamer.Pipeline.debug", "true");
/* @formatter:off */
byte[] packet = new byte[] {

View File

@ -57,7 +57,6 @@ public class ClientMCSChannelJoinRequestServerMCSChannelConfirmPDUs extends OneT
// Parse channel confirm response
int typeAndFlags = buf.readUnsignedByte();
int type = typeAndFlags >> 2;
// int flags = typeAndFlags & 0x3;
if (type != MCS_CHANNEL_CONFIRM_PDU)
throw new RuntimeException("[" + this + "] ERROR: Incorrect type of MCS AttachUserConfirm PDU. Expected value: 15, actual value: " + type + ", data: " + buf + ".");
@ -74,11 +73,9 @@ public class ClientMCSChannelJoinRequestServerMCSChannelConfirmPDUs extends OneT
// Channel Join Request PDU the connection SHOULD be dropped.
// Initiator: 1007 (6+1001)
// int initiator=buf.readUnsignedShort();
buf.skipBytes(2);
// Requested channel
// int requestedChannel=buf.readUnsignedShort();
buf.skipBytes(2);
// Actual channel
@ -123,9 +120,7 @@ public class ClientMCSChannelJoinRequestServerMCSChannelConfirmPDUs extends OneT
* @see http://msdn.microsoft.com/en-us/library/cc240834.aspx
*/
public static void main(String args[]) {
// System.setProperty("streamer.Link.debug", "true");
System.setProperty("streamer.Element.debug", "true");
// System.setProperty("streamer.Pipeline.debug", "true");
/* @formatter:off */
byte[] clientRequestPacket = new byte[] {

View File

@ -105,9 +105,7 @@ public class ClientSynchronizePDU extends OneTimeSwitch {
* @see http://msdn.microsoft.com/en-us/library/cc240841.aspx
*/
public static void main(String args[]) {
// System.setProperty("streamer.Link.debug", "true");
System.setProperty("streamer.Element.debug", "true");
// System.setProperty("streamer.Pipeline.debug", "true");
/* @formatter:off */
byte[] packet = new byte[] {

View File

@ -142,7 +142,6 @@ public class ServerBitmapUpdate extends BaseElement {
// flag is not.
// Note: Even when compression header is enabled, server sends nothing.
// rectangle.compressedBitmapHeader = buf.readBytes(8);
}
// (variable): A variable-length array of bytes describing a bitmap image.

View File

@ -28,8 +28,8 @@ import streamer.debug.MockSource;
import common.ScreenDescription;
/**
* @see http://msdn.microsoft.com/en-us/library/cc240669.aspx
* @see http://msdn.microsoft.com/en-us/library/cc240484.aspx
* @see <a href="http://msdn.microsoft.com/en-us/library/cc240669.aspx">msdn cc240669</a>
* @see <a href="http://msdn.microsoft.com/en-us/library/cc240484.aspx">msdn cc240484</a>
*/
public class ServerDemandActivePDU extends BaseElement {
@ -83,7 +83,6 @@ public class ServerDemandActivePDU extends BaseElement {
// (variable): A variable-length array of bytes containing a source
// descriptor,
// ByteBuffer sourceDescriptor = buf.readBytes(lengthSourceDescriptor);
buf.skipBytes(lengthSourceDescriptor);
// (variable): An array of Capability Set (section 2.2.1.13.1.1.1)
@ -216,7 +215,7 @@ public class ServerDemandActivePDU extends BaseElement {
public static final int CAPSSETTYPE_FRAME_ACKNOWLEDGE = 0x001E;
/**
* @see http://msdn.microsoft.com/en-us/library/cc240486.aspx
* @see <a href="http://msdn.microsoft.com/en-us/library/cc240486.aspx">msdn cc240486</a>
*/
protected void handleCapabiltySets(ByteBuffer buf) {
// (2 bytes): A 16-bit, unsigned integer. The number of capability sets
@ -312,7 +311,7 @@ public class ServerDemandActivePDU extends BaseElement {
}
/**
* @see http://msdn.microsoft.com/en-us/library/cc240554.aspx
* @see <a href="http://msdn.microsoft.com/en-us/library/cc240554.aspx">msdn cc240554</a>
*/
protected void handleBitmapCapabilities(ByteBuffer buf) {
@ -388,9 +387,7 @@ public class ServerDemandActivePDU extends BaseElement {
*
*/
public static void main(String args[]) {
// System.setProperty("streamer.Link.debug", "true");
System.setProperty("streamer.Element.debug", "true");
// System.setProperty("streamer.Pipeline.debug", "true");
/* @formatter:off */
byte[] packet = new byte[] {

View File

@ -73,7 +73,6 @@ public class ServerIOChannelRouter extends BaseElement {
{
// It is ServerErrorAlert-ValidClient
// Ignore it
//throw new RuntimeException("[" + this + "] ERROR: Incorrect PDU length: " + length + ", data: " + buf + ".");
}
int type = buf.readUnsignedShortLE() & 0xf;
@ -88,14 +87,12 @@ public class ServerIOChannelRouter extends BaseElement {
case PDUTYPE_CONFIRMACTIVEPDU:
throw new RuntimeException("Unexpected client CONFIRM ACTIVE PDU. Data: " + buf + ".");
case PDUTYPE_DEACTIVATEALLPDU:
// pushDataToPad("deactivate_all", buf);
/* ignore */buf.unref();
break;
case PDUTYPE_DATAPDU:
handleDataPdu(buf);
break;
case PDUTYPE_SERVER_REDIR_PKT:
// pushDataToPad("server_redir", buf);
/* ignore */buf.unref();
break;
default:
@ -253,7 +250,6 @@ public class ServerIOChannelRouter extends BaseElement {
long shareId = buf.readUnsignedIntLE();
if (shareId != state.serverShareId)
throw new RuntimeException("Unexpected share ID: " + shareId + ".");
// buf.skipBytes(4);
// Padding.
buf.skipBytes(1);
@ -461,9 +457,7 @@ public class ServerIOChannelRouter extends BaseElement {
*
*/
public static void main(String args[]) {
// System.setProperty("streamer.Link.debug", "true");
System.setProperty("streamer.Element.debug", "true");
// System.setProperty("streamer.Pipeline.debug", "true");
byte[] packet = new byte[] {
// TPKT

View File

@ -42,7 +42,6 @@ public class ServerMCSPDU extends BaseElement {
switch (type) {
// Expected type: send data indication: 26 (0x1a, top 6 bits, or 0x68)
case 0x1a: {
// int userId = buf.readUnsignedShort() + 1001; // User ID: 1002 (1001+1)
buf.skipBytes(2); // Ignore user ID
int channelId = buf.readUnsignedShort(); // Channel ID: 1003
@ -78,9 +77,6 @@ public class ServerMCSPDU extends BaseElement {
*
*/
public static void main(String args[]) {
// System.setProperty("streamer.Link.debug", "true");
// System.setProperty("streamer.Element.debug", "true");
// System.setProperty("streamer.Pipeline.debug", "true");
byte[] packet = new byte[] {
// TPKT

View File

@ -187,7 +187,6 @@ public class BaseElement implements Element {
if (buf == null)
throw new NullPointerException();
//return;
if (outputPads.size() == 0)
throw new RuntimeException("Number of outgoing connection is zero. Cannot send data to output. Data: " + buf + ".");

View File

@ -307,9 +307,6 @@ public class PipelineImpl implements Pipeline {
* Example.
*/
public static void main(String args[]) {
// System.setProperty("streamer.Link.debug", "true");
// System.setProperty("streamer.Element.debug", "true");
// System.setProperty("streamer.Pipeline.debug", "true");
Pipeline pipeline = new PipelineImpl("main");

View File

@ -101,7 +101,6 @@ public class Queue extends BaseElement {
* Example.
*/
public static void main(String args[]) {
// System.setProperty("streamer.Link.debug", "true");
System.setProperty("streamer.Element.debug", "true");
Element source1 = new FakeSource("source1") {

View File

@ -75,7 +75,6 @@ public class MockSource extends FakeSource {
new ByteBuffer(new byte[] {3, 1, 2, 3}), new ByteBuffer(new byte[] {4, 1, 2}), new ByteBuffer(new byte[] {5, 1})};
verbose = true;
delay = 100;
// this.numBuffers = this.bufs.length;
}
};

View File

@ -256,9 +256,7 @@ public class Vnc33Authentication extends OneTimeSwitch {
* Example.
*/
public static void main(String args[]) {
// System.setProperty("streamer.Link.debug", "true");
System.setProperty("streamer.Element.debug", "true");
// System.setProperty("streamer.Pipeline.debug", "true");
final String password = "test";

Some files were not shown because too many files have changed in this diff Show More