mirror of
https://github.com/apache/cloudstack.git
synced 2025-10-26 08:42:29 +01:00
add backedn code
This commit is contained in:
parent
f7e75d3aaf
commit
b4988e86ab
@ -22,6 +22,7 @@ import com.cloud.agent.api.Answer;
|
||||
import com.cloud.agent.api.Command;
|
||||
|
||||
public interface EndPoint {
|
||||
public long getId();
|
||||
public Answer sendMessage(Command cmd);
|
||||
public void sendMessageAsync(Command cmd, AsyncCompletionCallback<Answer> callback);
|
||||
}
|
||||
|
||||
@ -51,7 +51,7 @@ public class MockHostEndpointRpcServerDirectCallResource implements HostEndpoint
|
||||
MockRpcCallBack run = ComponentContext.inject(MockRpcCallBack.class);
|
||||
run.setCallback(callback);
|
||||
run.setCmd(command);
|
||||
run.setHostId(host.getHostId());
|
||||
run.setHostId(host.getId());
|
||||
executor.schedule(run, 10, TimeUnit.SECONDS);
|
||||
}
|
||||
|
||||
@ -59,7 +59,7 @@ public class MockHostEndpointRpcServerDirectCallResource implements HostEndpoint
|
||||
public Answer sendCommand(HypervisorHostEndPoint host, Command command) {
|
||||
Answer answer;
|
||||
try {
|
||||
answer = agentMgr.send(host.getHostId(), command);
|
||||
answer = agentMgr.send(host.getId(), command);
|
||||
return answer;
|
||||
} catch (AgentUnavailableException e) {
|
||||
return null;
|
||||
|
||||
@ -19,6 +19,7 @@
|
||||
package org.apache.cloudstack.storage.test;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.UUID;
|
||||
|
||||
@ -31,6 +32,7 @@ import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreInfo;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.type.RootDisk;
|
||||
import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao;
|
||||
import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreVO;
|
||||
import org.apache.cloudstack.storage.datastore.provider.DataStoreProviderManager;
|
||||
import org.apache.cloudstack.storage.image.ImageService;
|
||||
import org.apache.cloudstack.storage.image.db.ImageDataDao;
|
||||
import org.apache.cloudstack.storage.image.db.ImageDataVO;
|
||||
@ -80,8 +82,8 @@ public class volumeServiceTest extends CloudStackTestNGBase {
|
||||
DataCenterDao dcDao;
|
||||
@Inject
|
||||
PrimaryDataStoreDao primaryStoreDao;
|
||||
//@Inject
|
||||
//PrimaryDataStoreProviderManager primaryDataStoreProviderMgr;
|
||||
@Inject
|
||||
DataStoreProviderManager dataStoreProviderMgr;
|
||||
@Inject
|
||||
AgentManager agentMgr;
|
||||
Long dcId;
|
||||
@ -135,6 +137,12 @@ public class volumeServiceTest extends CloudStackTestNGBase {
|
||||
host.setClusterId(cluster.getId());
|
||||
|
||||
host = hostDao.persist(host);
|
||||
try {
|
||||
dataStoreProviderMgr.configure(null, new HashMap<String, Object>());
|
||||
} catch (ConfigurationException e) {
|
||||
// TODO Auto-generated catch block
|
||||
e.printStackTrace();
|
||||
}
|
||||
primaryStore = createPrimaryDataStore();
|
||||
|
||||
//CreateVolumeAnswer createVolumeFromImageAnswer = new CreateVolumeAnswer(UUID.randomUUID().toString());
|
||||
@ -209,6 +217,7 @@ public class volumeServiceTest extends CloudStackTestNGBase {
|
||||
|
||||
private PrimaryDataStoreInfo createPrimaryDataStore() {
|
||||
try {
|
||||
|
||||
/*
|
||||
PrimaryDataStoreProvider provider = primaryDataStoreProviderMgr.getDataStoreProvider("default primary data store provider");
|
||||
primaryDataStoreProviderMgr.configure("primary data store mgr", new HashMap<String, Object>());
|
||||
|
||||
@ -48,7 +48,7 @@ public class HypervisorHostEndPoint implements EndPoint {
|
||||
return this.hostAddress;
|
||||
}
|
||||
|
||||
public long getHostId() {
|
||||
public long getId() {
|
||||
return this.hostId;
|
||||
}
|
||||
|
||||
|
||||
@ -16,18 +16,15 @@
|
||||
// under the License.
|
||||
package org.apache.cloudstack.storage.command;
|
||||
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreInfo;
|
||||
import org.apache.cloudstack.storage.to.PrimaryDataStoreTO;
|
||||
|
||||
import com.cloud.agent.api.Command;
|
||||
|
||||
public class CreatePrimaryDataStoreCmd extends Command implements StorageSubSystemCommand {
|
||||
private final PrimaryDataStoreTO dataStore;
|
||||
public CreatePrimaryDataStoreCmd(PrimaryDataStoreTO dataStore) {
|
||||
this.dataStore = dataStore;
|
||||
private final String dataStore;
|
||||
public CreatePrimaryDataStoreCmd(String uri) {
|
||||
this.dataStore = uri;
|
||||
}
|
||||
|
||||
public PrimaryDataStoreTO getDataStore() {
|
||||
public String getDataStore() {
|
||||
return this.dataStore;
|
||||
}
|
||||
|
||||
|
||||
@ -21,6 +21,8 @@ package org.apache.cloudstack.storage.endpoint;
|
||||
import java.sql.PreparedStatement;
|
||||
import java.sql.ResultSet;
|
||||
import java.sql.SQLException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
|
||||
import javax.inject.Inject;
|
||||
|
||||
@ -36,9 +38,13 @@ import org.apache.log4j.Logger;
|
||||
import org.springframework.stereotype.Component;
|
||||
|
||||
import com.cloud.host.HostVO;
|
||||
import com.cloud.host.Status;
|
||||
import com.cloud.host.dao.HostDao;
|
||||
import com.cloud.utils.db.DB;
|
||||
import com.cloud.utils.db.SearchCriteria2;
|
||||
import com.cloud.utils.db.SearchCriteriaService;
|
||||
import com.cloud.utils.db.Transaction;
|
||||
import com.cloud.utils.db.SearchCriteria.Op;
|
||||
import com.cloud.utils.exception.CloudRuntimeException;
|
||||
|
||||
@Component
|
||||
@ -164,4 +170,27 @@ public class DefaultEndPointSelector implements EndPointSelector {
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<EndPoint> selectAll(DataStore store) {
|
||||
List<EndPoint> endPoints = new ArrayList<EndPoint>();
|
||||
if (store.getScope().getScopeType() == ScopeType.HOST) {
|
||||
HostVO host = hostDao.findById(store.getScope().getScopeId());
|
||||
endPoints.add(new HypervisorHostEndPoint(host.getId(),
|
||||
host.getPrivateIpAddress()));
|
||||
} else if (store.getScope().getScopeType() == ScopeType.CLUSTER) {
|
||||
SearchCriteriaService<HostVO, HostVO> sc = SearchCriteria2.create(HostVO.class);
|
||||
sc.addAnd(sc.getEntity().getClusterId(), Op.EQ, store.getScope().getScopeId());
|
||||
sc.addAnd(sc.getEntity().getStatus(), Op.EQ, Status.Up);
|
||||
List<HostVO> hosts = sc.find();
|
||||
for (HostVO host : hosts) {
|
||||
endPoints.add(new HypervisorHostEndPoint(host.getId(),
|
||||
host.getPrivateIpAddress()));
|
||||
}
|
||||
|
||||
} else {
|
||||
throw new CloudRuntimeException("shouldn't use it for other scope");
|
||||
}
|
||||
return endPoints;
|
||||
}
|
||||
}
|
||||
|
||||
@ -18,7 +18,10 @@
|
||||
*/
|
||||
package org.apache.cloudstack.storage.endpoint;
|
||||
|
||||
import java.util.List;
|
||||
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.DataObject;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.DataStore;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.EndPoint;
|
||||
|
||||
public interface EndPointSelector {
|
||||
@ -29,4 +32,9 @@ public interface EndPointSelector {
|
||||
* @return
|
||||
*/
|
||||
EndPoint select(DataObject object);
|
||||
/**
|
||||
* @param store
|
||||
* @return
|
||||
*/
|
||||
List<EndPoint> selectAll(DataStore store);
|
||||
}
|
||||
|
||||
@ -46,6 +46,7 @@ import org.apache.cloudstack.storage.volume.db.VolumeVO;
|
||||
import org.apache.log4j.Logger;
|
||||
|
||||
import com.cloud.hypervisor.Hypervisor.HypervisorType;
|
||||
import com.cloud.storage.dao.StoragePoolHostDao;
|
||||
import com.cloud.utils.component.ComponentContext;
|
||||
|
||||
public class DefaultPrimaryDataStore implements PrimaryDataStore {
|
||||
@ -120,7 +121,8 @@ public class DefaultPrimaryDataStore implements PrimaryDataStore {
|
||||
return this.pdsv.getPoolType() + File.separator
|
||||
+ this.pdsv.getHostAddress() + File.separator
|
||||
+ this.pdsv.getPath() + File.separator
|
||||
+ "?role=" + this.getRole();
|
||||
+ "?role=" + this.getRole()
|
||||
+ "&storeUuid=" + this.pdsv.getUuid();
|
||||
}
|
||||
|
||||
@Override
|
||||
|
||||
@ -38,13 +38,16 @@ import org.apache.cloudstack.storage.volume.PrimaryDataStoreDriver;
|
||||
import org.apache.log4j.Logger;
|
||||
|
||||
import com.cloud.agent.api.Answer;
|
||||
import com.cloud.storage.StoragePoolHostVO;
|
||||
import com.cloud.storage.dao.StoragePoolHostDao;
|
||||
|
||||
|
||||
public class DefaultPrimaryDataStoreDriverImpl implements PrimaryDataStoreDriver {
|
||||
private static final Logger s_logger = Logger.getLogger(DefaultPrimaryDataStoreDriverImpl.class);
|
||||
@Inject
|
||||
EndPointSelector selector;
|
||||
|
||||
@Inject
|
||||
StoragePoolHostDao storeHostDao;
|
||||
public DefaultPrimaryDataStoreDriverImpl() {
|
||||
|
||||
}
|
||||
@ -65,13 +68,13 @@ public class DefaultPrimaryDataStoreDriverImpl implements PrimaryDataStoreDriver
|
||||
|
||||
}
|
||||
|
||||
public Void createAsyncCallback(AsyncCallbackDispatcher<DefaultPrimaryDataStoreDriverImpl, Answer> callback, CreateVolumeContext<CommandResult> context) {
|
||||
CommandResult result = new CommandResult();
|
||||
public Void createAsyncCallback(AsyncCallbackDispatcher<DefaultPrimaryDataStoreDriverImpl, Answer> callback, CreateVolumeContext<CreateCmdResult> context) {
|
||||
CreateCmdResult result = null;
|
||||
CreateVolumeAnswer volAnswer = (CreateVolumeAnswer) callback.getResult();
|
||||
if (volAnswer.getResult()) {
|
||||
DataObject volume = context.getVolume();
|
||||
//volume.setPath(volAnswer.getVolumeUuid());
|
||||
result = new CreateCmdResult(volAnswer.getVolumeUuid());
|
||||
} else {
|
||||
result = new CreateCmdResult("");
|
||||
result.setResult(volAnswer.getDetails());
|
||||
}
|
||||
|
||||
@ -155,7 +158,7 @@ public class DefaultPrimaryDataStoreDriverImpl implements PrimaryDataStoreDriver
|
||||
EndPoint ep = selector.select(vol);
|
||||
CreateObjectCommand createCmd = new CreateObjectCommand(vol.getUri());
|
||||
|
||||
CreateVolumeContext<CommandResult> context = null;
|
||||
CreateVolumeContext<CreateCmdResult> context = null;
|
||||
AsyncCallbackDispatcher<DefaultPrimaryDataStoreDriverImpl, Answer> caller = AsyncCallbackDispatcher.create(this);
|
||||
caller.setContext(context)
|
||||
.setCallback(caller.getTarget().createAsyncCallback(null, null));
|
||||
@ -165,7 +168,8 @@ public class DefaultPrimaryDataStoreDriverImpl implements PrimaryDataStoreDriver
|
||||
|
||||
@Override
|
||||
public String grantAccess(DataObject object, EndPoint ep) {
|
||||
return object.getUri();
|
||||
StoragePoolHostVO poolHost = storeHostDao.findByPoolHost(object.getDataStore().getId(), ep.getId());
|
||||
return object.getUri() + "&storagePath=" + poolHost.getLocalPath();
|
||||
}
|
||||
|
||||
@Override
|
||||
|
||||
@ -1,34 +0,0 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.apache.cloudstack.storage.datastore.lifecycle;
|
||||
|
||||
import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao;
|
||||
|
||||
public class DefaultKvmPrimaryDataStoreLifeCycle extends DefaultPrimaryDataStoreLifeCycleImpl {
|
||||
|
||||
/**
|
||||
* @param dataStoreDao
|
||||
* @param dataStore
|
||||
*/
|
||||
public DefaultKvmPrimaryDataStoreLifeCycle(PrimaryDataStoreDao dataStoreDao) {
|
||||
super(dataStoreDao);
|
||||
// TODO Auto-generated constructor stub
|
||||
}
|
||||
|
||||
}
|
||||
@ -18,24 +18,37 @@
|
||||
*/
|
||||
package org.apache.cloudstack.storage.datastore.lifecycle;
|
||||
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import javax.inject.Inject;
|
||||
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.ClusterScope;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.DataStore;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.EndPoint;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreLifeCycle;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.ZoneScope;
|
||||
import org.apache.cloudstack.storage.command.AttachPrimaryDataStoreCmd;
|
||||
import org.apache.cloudstack.storage.command.CreatePrimaryDataStoreCmd;
|
||||
import org.apache.cloudstack.storage.datastore.DataStoreStatus;
|
||||
import org.apache.cloudstack.storage.datastore.PrimaryDataStore;
|
||||
import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao;
|
||||
import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreVO;
|
||||
import org.apache.cloudstack.storage.endpoint.EndPointSelector;
|
||||
|
||||
import com.cloud.host.HostVO;
|
||||
import com.cloud.host.dao.HostDao;
|
||||
import com.cloud.hypervisor.Hypervisor.HypervisorType;
|
||||
|
||||
public class DefaultPrimaryDataStoreLifeCycleImpl implements PrimaryDataStoreLifeCycle {
|
||||
protected PrimaryDataStore dataStore;
|
||||
protected PrimaryDataStoreDao dataStoreDao;
|
||||
public DefaultPrimaryDataStoreLifeCycleImpl(PrimaryDataStoreDao dataStoreDao) {
|
||||
this.dataStoreDao = dataStoreDao;
|
||||
@Inject
|
||||
EndPointSelector selecotr;
|
||||
@Inject
|
||||
PrimaryDataStoreDao dataStoreDao;
|
||||
@Inject
|
||||
HostDao hostDao;
|
||||
public DefaultPrimaryDataStoreLifeCycleImpl() {
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -44,26 +57,36 @@ public class DefaultPrimaryDataStoreLifeCycleImpl implements PrimaryDataStoreLif
|
||||
return true;
|
||||
}
|
||||
|
||||
protected void attachCluster() {
|
||||
protected void attachCluster(DataStore store) {
|
||||
//send down AttachPrimaryDataStoreCmd command to all the hosts in the cluster
|
||||
AttachPrimaryDataStoreCmd cmd = new AttachPrimaryDataStoreCmd(this.dataStore.getUri());
|
||||
/*for (EndPoint ep : dataStore.getEndPoints()) {
|
||||
ep.sendMessage(cmd);
|
||||
} */
|
||||
List<EndPoint> endPoints = selecotr.selectAll(dataStore);
|
||||
CreatePrimaryDataStoreCmd createCmd = new CreatePrimaryDataStoreCmd(store.getUri());
|
||||
EndPoint ep = endPoints.get(0);
|
||||
HostVO host = hostDao.findById(ep.getId());
|
||||
if (host.getHypervisorType() == HypervisorType.XenServer) {
|
||||
ep.sendMessage(createCmd);
|
||||
}
|
||||
|
||||
endPoints.get(0).sendMessage(createCmd);
|
||||
AttachPrimaryDataStoreCmd cmd = new AttachPrimaryDataStoreCmd(dataStore.getUri());
|
||||
for (EndPoint endp : endPoints) {
|
||||
endp.sendMessage(cmd);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean attachCluster(DataStore dataStore, ClusterScope scope) {
|
||||
PrimaryDataStoreVO dataStoreVO = dataStoreDao.findById(this.dataStore.getId());
|
||||
PrimaryDataStoreVO dataStoreVO = dataStoreDao.findById(dataStore.getId());
|
||||
dataStoreVO.setDataCenterId(scope.getZoneId());
|
||||
dataStoreVO.setPodId(scope.getPodId());
|
||||
dataStoreVO.setClusterId(scope.getScopeId());
|
||||
dataStoreVO.setStatus(DataStoreStatus.Attaching);
|
||||
dataStoreVO.setScope(scope.getScopeType());
|
||||
dataStoreDao.update(dataStoreVO.getId(), dataStoreVO);
|
||||
|
||||
attachCluster();
|
||||
attachCluster(dataStore);
|
||||
|
||||
dataStoreVO = dataStoreDao.findById(this.dataStore.getId());
|
||||
dataStoreVO = dataStoreDao.findById(dataStore.getId());
|
||||
dataStoreVO.setStatus(DataStoreStatus.Up);
|
||||
dataStoreDao.update(dataStoreVO.getId(), dataStoreVO);
|
||||
|
||||
|
||||
@ -1,35 +0,0 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.apache.cloudstack.storage.datastore.lifecycle;
|
||||
|
||||
import org.apache.cloudstack.storage.datastore.PrimaryDataStore;
|
||||
import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao;
|
||||
|
||||
public class DefaultVmwarePrimaryDataStoreLifeCycle extends DefaultPrimaryDataStoreLifeCycleImpl {
|
||||
|
||||
/**
|
||||
* @param dataStoreDao
|
||||
* @param dataStore
|
||||
*/
|
||||
public DefaultVmwarePrimaryDataStoreLifeCycle(PrimaryDataStoreDao dataStoreDao) {
|
||||
super(dataStoreDao);
|
||||
// TODO Auto-generated constructor stub
|
||||
}
|
||||
|
||||
}
|
||||
@ -1,59 +0,0 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.apache.cloudstack.storage.datastore.lifecycle;
|
||||
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.EndPoint;
|
||||
import org.apache.cloudstack.storage.command.AttachPrimaryDataStoreCmd;
|
||||
import org.apache.cloudstack.storage.command.CreatePrimaryDataStoreCmd;
|
||||
import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao;
|
||||
|
||||
import com.cloud.agent.api.Answer;
|
||||
import com.cloud.utils.exception.CloudRuntimeException;
|
||||
|
||||
public class DefaultXenPrimaryDataStoreLifeCycle extends DefaultPrimaryDataStoreLifeCycleImpl {
|
||||
|
||||
/**
|
||||
* @param dataStoreDao
|
||||
* @param dataStore
|
||||
*/
|
||||
public DefaultXenPrimaryDataStoreLifeCycle(PrimaryDataStoreDao dataStoreDao) {
|
||||
super(dataStoreDao);
|
||||
// TODO Auto-generated constructor stub
|
||||
}
|
||||
|
||||
@Override
|
||||
public void attachCluster() {
|
||||
String result = null;
|
||||
//send one time is enough, as xenserver is clustered
|
||||
/*CreatePrimaryDataStoreCmd cmd = new CreatePrimaryDataStoreCmd(this.dataStore.getDataStoreTO());
|
||||
String result = null;
|
||||
for (EndPoint ep : dataStore.getEndPoints()) {
|
||||
Answer answer = ep.sendMessage(cmd);
|
||||
if (answer.getResult()) {
|
||||
return;
|
||||
}
|
||||
result = answer.getDetails();
|
||||
}*/
|
||||
|
||||
if (result != null)
|
||||
throw new CloudRuntimeException("AttachPrimaryDataStoreCmd failed: " + result);
|
||||
|
||||
super.attachCluster();
|
||||
}
|
||||
}
|
||||
@ -121,7 +121,7 @@ public class VolumeObject implements VolumeInfo {
|
||||
}
|
||||
ObjectInDataStoreVO obj = ojbectInStoreMgr.findObject(this.volumeVO.getId(), DataObjectType.VOLUME, this.dataStore.getId(), this.dataStore.getRole());
|
||||
if (obj.getState() != ObjectInDataStoreStateMachine.State.Ready) {
|
||||
return this.dataStore.getUri() + File.separator + "&objType=" + DataObjectType.VOLUME + "&size=" + this.volumeVO.getSize();
|
||||
return this.dataStore.getUri() + File.separator + "&objType=" + DataObjectType.VOLUME + "&size=" + this.volumeVO.getSize() + "&name=" + this.volumeVO.getName();
|
||||
} else {
|
||||
return this.dataStore.getUri() + File.separator + "&objType=" + DataObjectType.VOLUME + "&path=" + obj.getInstallPath();
|
||||
}
|
||||
|
||||
@ -22,7 +22,10 @@ import java.io.BufferedOutputStream;
|
||||
import java.io.File;
|
||||
import java.io.FileOutputStream;
|
||||
import java.io.IOException;
|
||||
import java.net.URI;
|
||||
import java.net.URISyntaxException;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.UUID;
|
||||
@ -31,9 +34,9 @@ import org.apache.cloudstack.storage.command.AttachPrimaryDataStoreAnswer;
|
||||
import org.apache.cloudstack.storage.command.AttachPrimaryDataStoreCmd;
|
||||
import org.apache.cloudstack.storage.command.CopyCmd;
|
||||
import org.apache.cloudstack.storage.command.CopyCmdAnswer;
|
||||
import org.apache.cloudstack.storage.command.CreateObjectCommand;
|
||||
import org.apache.cloudstack.storage.command.CreatePrimaryDataStoreCmd;
|
||||
import org.apache.cloudstack.storage.command.CreateVolumeAnswer;
|
||||
import org.apache.cloudstack.storage.command.CreateObjectCommand;
|
||||
import org.apache.cloudstack.storage.command.CreateVolumeFromBaseImageCommand;
|
||||
import org.apache.cloudstack.storage.command.StorageSubSystemCommand;
|
||||
import org.apache.cloudstack.storage.datastore.protocol.DataStoreProtocol;
|
||||
@ -65,6 +68,8 @@ import com.xensource.xenapi.Types.BadServerResponse;
|
||||
import com.xensource.xenapi.Types.XenAPIException;
|
||||
import com.xensource.xenapi.VDI;
|
||||
|
||||
import edu.emory.mathcs.backport.java.util.Arrays;
|
||||
|
||||
public class XenServerStorageResource {
|
||||
private static final Logger s_logger = Logger.getLogger(XenServerStorageResource.class);
|
||||
protected CitrixResourceBase hypervisorResource;
|
||||
@ -114,16 +119,39 @@ public class XenServerStorageResource {
|
||||
vdi.destroy(conn);
|
||||
}
|
||||
|
||||
private Map<String, String> getParameters(URI uri) {
|
||||
String parameters = uri.getQuery();
|
||||
Map<String, String> params = new HashMap<String, String>();
|
||||
List<String> paraLists = Arrays.asList(parameters.split("&"));
|
||||
for (String para : paraLists) {
|
||||
String[] pair = para.split("=");
|
||||
params.put(pair[0], pair[1]);
|
||||
}
|
||||
return params;
|
||||
}
|
||||
|
||||
protected CreateVolumeAnswer execute(CreateObjectCommand cmd) {
|
||||
VolumeTO volume = null;
|
||||
PrimaryDataStoreTO primaryDataStore = volume.getDataStore();
|
||||
String uriString = cmd.getObjectUri();
|
||||
Map<String, String> params = null;
|
||||
|
||||
try {
|
||||
URI uri = new URI(uriString);
|
||||
params = getParameters(uri);
|
||||
} catch (URISyntaxException e1) {
|
||||
s_logger.debug("uri exception", e1);
|
||||
return new CreateVolumeAnswer(cmd, false, e1.toString());
|
||||
}
|
||||
|
||||
long size = Long.parseLong(params.get("size"));
|
||||
String name = params.get("name");
|
||||
String storeUuid = params.get("storagePath");
|
||||
Connection conn = hypervisorResource.getConnection();
|
||||
VDI vdi = null;
|
||||
boolean result = false;
|
||||
String errorMsg = null;
|
||||
try {
|
||||
SR primaryDataStoreSR = getSRByNameLabel(conn, primaryDataStore.getUuid());
|
||||
vdi = createVdi(conn, volume.getName(), primaryDataStoreSR, volume.getSize());
|
||||
SR primaryDataStoreSR = getSRByNameLabel(conn, storeUuid);
|
||||
vdi = createVdi(conn, name, primaryDataStoreSR, size);
|
||||
VDI.Record record = vdi.getRecord(conn);
|
||||
result = true;
|
||||
return new CreateVolumeAnswer(cmd, record.uuid);
|
||||
@ -190,11 +218,14 @@ public class XenServerStorageResource {
|
||||
}
|
||||
}
|
||||
|
||||
protected SR getNfsSR(Connection conn, NfsPrimaryDataStoreTO pool) {
|
||||
protected SR getNfsSR(Connection conn, URI uri) {
|
||||
Map<String, String> deviceConfig = new HashMap<String, String>();
|
||||
Map<String, String> params = getParameters(uri);
|
||||
String uuid = params.get("storeUuid");
|
||||
try {
|
||||
String server = pool.getServer();
|
||||
String serverpath = pool.getPath();
|
||||
String server = uri.getHost();
|
||||
String serverpath = uri.getPath();
|
||||
|
||||
serverpath = serverpath.replace("//", "/");
|
||||
Set<SR> srs = SR.getAll(conn);
|
||||
for (SR sr : srs) {
|
||||
@ -225,21 +256,21 @@ public class XenServerStorageResource {
|
||||
|
||||
if (server.equals(dc.get("server")) && serverpath.equals(dc.get("serverpath"))) {
|
||||
throw new CloudRuntimeException("There is a SR using the same configuration server:" + dc.get("server") + ", serverpath:"
|
||||
+ dc.get("serverpath") + " for pool " + pool.getUuid() + "on host:" + hypervisorResource.getHost().uuid);
|
||||
+ dc.get("serverpath") + " for pool " + uuid + "on host:" + hypervisorResource.getHost().uuid);
|
||||
}
|
||||
|
||||
}
|
||||
deviceConfig.put("server", server);
|
||||
deviceConfig.put("serverpath", serverpath);
|
||||
Host host = Host.getByUuid(conn, hypervisorResource.getHost().uuid);
|
||||
SR sr = SR.create(conn, host, deviceConfig, new Long(0), pool.getUuid(), Long.toString(pool.getId()), SRType.NFS.toString(), "user", true,
|
||||
SR sr = SR.create(conn, host, deviceConfig, new Long(0), uuid, uuid, SRType.NFS.toString(), "user", true,
|
||||
new HashMap<String, String>());
|
||||
sr.scan(conn);
|
||||
return sr;
|
||||
} catch (XenAPIException e) {
|
||||
throw new CloudRuntimeException("Unable to create NFS SR " + pool.toString(), e);
|
||||
throw new CloudRuntimeException("Unable to create NFS SR " + uuid, e);
|
||||
} catch (XmlRpcException e) {
|
||||
throw new CloudRuntimeException("Unable to create NFS SR " + pool.toString(), e);
|
||||
throw new CloudRuntimeException("Unable to create NFS SR " + uuid, e);
|
||||
}
|
||||
}
|
||||
/*
|
||||
@ -369,15 +400,26 @@ public class XenServerStorageResource {
|
||||
|
||||
protected Answer execute(CreatePrimaryDataStoreCmd cmd) {
|
||||
Connection conn = hypervisorResource.getConnection();
|
||||
PrimaryDataStoreTO dataStore = cmd.getDataStore();
|
||||
String storeUrl = cmd.getDataStore();
|
||||
String scheme = null;
|
||||
String type = null;
|
||||
URI storeUri = null;
|
||||
try {
|
||||
if (DataStoreProtocol.NFS.toString().equalsIgnoreCase(dataStore.getType())) {
|
||||
getNfsSR(conn, (NfsPrimaryDataStoreTO)dataStore);
|
||||
} else if (DataStoreProtocol.NFS.toString().equalsIgnoreCase(dataStore.getType())) {
|
||||
storeUri = new URI(storeUrl);
|
||||
} catch(URISyntaxException e) {
|
||||
return new Answer(cmd, false, e.toString());
|
||||
}
|
||||
|
||||
scheme = storeUri.getScheme();
|
||||
|
||||
try {
|
||||
if (scheme.equalsIgnoreCase("nfs")) {
|
||||
SR sr = getNfsSR(conn, storeUri);
|
||||
} else if (scheme.equalsIgnoreCase("iscsi")) {
|
||||
//getIscsiSR(conn, dataStore);
|
||||
} else if (dataStore.getType() == StoragePoolType.PreSetup.toString()) {
|
||||
} else if (scheme.equalsIgnoreCase("presetup")) {
|
||||
} else {
|
||||
//return new Answer(cmd, false, "The pool type: " + pool.getType().name() + " is not supported.");
|
||||
return new Answer(cmd, false, "The pool type: " + scheme + " is not supported.");
|
||||
}
|
||||
return new Answer(cmd, true, "success");
|
||||
} catch (Exception e) {
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user