add backedn code

This commit is contained in:
Edison Su 2013-01-18 19:21:10 -08:00 committed by Edison Su
parent f7e75d3aaf
commit b4988e86ab
15 changed files with 165 additions and 178 deletions

View File

@ -22,6 +22,7 @@ import com.cloud.agent.api.Answer;
import com.cloud.agent.api.Command; import com.cloud.agent.api.Command;
public interface EndPoint { public interface EndPoint {
public long getId();
public Answer sendMessage(Command cmd); public Answer sendMessage(Command cmd);
public void sendMessageAsync(Command cmd, AsyncCompletionCallback<Answer> callback); public void sendMessageAsync(Command cmd, AsyncCompletionCallback<Answer> callback);
} }

View File

@ -51,7 +51,7 @@ public class MockHostEndpointRpcServerDirectCallResource implements HostEndpoint
MockRpcCallBack run = ComponentContext.inject(MockRpcCallBack.class); MockRpcCallBack run = ComponentContext.inject(MockRpcCallBack.class);
run.setCallback(callback); run.setCallback(callback);
run.setCmd(command); run.setCmd(command);
run.setHostId(host.getHostId()); run.setHostId(host.getId());
executor.schedule(run, 10, TimeUnit.SECONDS); executor.schedule(run, 10, TimeUnit.SECONDS);
} }
@ -59,7 +59,7 @@ public class MockHostEndpointRpcServerDirectCallResource implements HostEndpoint
public Answer sendCommand(HypervisorHostEndPoint host, Command command) { public Answer sendCommand(HypervisorHostEndPoint host, Command command) {
Answer answer; Answer answer;
try { try {
answer = agentMgr.send(host.getHostId(), command); answer = agentMgr.send(host.getId(), command);
return answer; return answer;
} catch (AgentUnavailableException e) { } catch (AgentUnavailableException e) {
return null; return null;

View File

@ -19,6 +19,7 @@
package org.apache.cloudstack.storage.test; package org.apache.cloudstack.storage.test;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.HashMap;
import java.util.List; import java.util.List;
import java.util.UUID; import java.util.UUID;
@ -31,6 +32,7 @@ import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreInfo;
import org.apache.cloudstack.engine.subsystem.api.storage.type.RootDisk; import org.apache.cloudstack.engine.subsystem.api.storage.type.RootDisk;
import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao;
import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreVO; import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreVO;
import org.apache.cloudstack.storage.datastore.provider.DataStoreProviderManager;
import org.apache.cloudstack.storage.image.ImageService; import org.apache.cloudstack.storage.image.ImageService;
import org.apache.cloudstack.storage.image.db.ImageDataDao; import org.apache.cloudstack.storage.image.db.ImageDataDao;
import org.apache.cloudstack.storage.image.db.ImageDataVO; import org.apache.cloudstack.storage.image.db.ImageDataVO;
@ -80,8 +82,8 @@ public class volumeServiceTest extends CloudStackTestNGBase {
DataCenterDao dcDao; DataCenterDao dcDao;
@Inject @Inject
PrimaryDataStoreDao primaryStoreDao; PrimaryDataStoreDao primaryStoreDao;
//@Inject @Inject
//PrimaryDataStoreProviderManager primaryDataStoreProviderMgr; DataStoreProviderManager dataStoreProviderMgr;
@Inject @Inject
AgentManager agentMgr; AgentManager agentMgr;
Long dcId; Long dcId;
@ -135,6 +137,12 @@ public class volumeServiceTest extends CloudStackTestNGBase {
host.setClusterId(cluster.getId()); host.setClusterId(cluster.getId());
host = hostDao.persist(host); host = hostDao.persist(host);
try {
dataStoreProviderMgr.configure(null, new HashMap<String, Object>());
} catch (ConfigurationException e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
primaryStore = createPrimaryDataStore(); primaryStore = createPrimaryDataStore();
//CreateVolumeAnswer createVolumeFromImageAnswer = new CreateVolumeAnswer(UUID.randomUUID().toString()); //CreateVolumeAnswer createVolumeFromImageAnswer = new CreateVolumeAnswer(UUID.randomUUID().toString());
@ -209,6 +217,7 @@ public class volumeServiceTest extends CloudStackTestNGBase {
private PrimaryDataStoreInfo createPrimaryDataStore() { private PrimaryDataStoreInfo createPrimaryDataStore() {
try { try {
/* /*
PrimaryDataStoreProvider provider = primaryDataStoreProviderMgr.getDataStoreProvider("default primary data store provider"); PrimaryDataStoreProvider provider = primaryDataStoreProviderMgr.getDataStoreProvider("default primary data store provider");
primaryDataStoreProviderMgr.configure("primary data store mgr", new HashMap<String, Object>()); primaryDataStoreProviderMgr.configure("primary data store mgr", new HashMap<String, Object>());

View File

@ -48,7 +48,7 @@ public class HypervisorHostEndPoint implements EndPoint {
return this.hostAddress; return this.hostAddress;
} }
public long getHostId() { public long getId() {
return this.hostId; return this.hostId;
} }

View File

@ -16,18 +16,15 @@
// under the License. // under the License.
package org.apache.cloudstack.storage.command; package org.apache.cloudstack.storage.command;
import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreInfo;
import org.apache.cloudstack.storage.to.PrimaryDataStoreTO;
import com.cloud.agent.api.Command; import com.cloud.agent.api.Command;
public class CreatePrimaryDataStoreCmd extends Command implements StorageSubSystemCommand { public class CreatePrimaryDataStoreCmd extends Command implements StorageSubSystemCommand {
private final PrimaryDataStoreTO dataStore; private final String dataStore;
public CreatePrimaryDataStoreCmd(PrimaryDataStoreTO dataStore) { public CreatePrimaryDataStoreCmd(String uri) {
this.dataStore = dataStore; this.dataStore = uri;
} }
public PrimaryDataStoreTO getDataStore() { public String getDataStore() {
return this.dataStore; return this.dataStore;
} }

View File

@ -21,6 +21,8 @@ package org.apache.cloudstack.storage.endpoint;
import java.sql.PreparedStatement; import java.sql.PreparedStatement;
import java.sql.ResultSet; import java.sql.ResultSet;
import java.sql.SQLException; import java.sql.SQLException;
import java.util.ArrayList;
import java.util.List;
import javax.inject.Inject; import javax.inject.Inject;
@ -36,9 +38,13 @@ import org.apache.log4j.Logger;
import org.springframework.stereotype.Component; import org.springframework.stereotype.Component;
import com.cloud.host.HostVO; import com.cloud.host.HostVO;
import com.cloud.host.Status;
import com.cloud.host.dao.HostDao; import com.cloud.host.dao.HostDao;
import com.cloud.utils.db.DB; import com.cloud.utils.db.DB;
import com.cloud.utils.db.SearchCriteria2;
import com.cloud.utils.db.SearchCriteriaService;
import com.cloud.utils.db.Transaction; import com.cloud.utils.db.Transaction;
import com.cloud.utils.db.SearchCriteria.Op;
import com.cloud.utils.exception.CloudRuntimeException; import com.cloud.utils.exception.CloudRuntimeException;
@Component @Component
@ -164,4 +170,27 @@ public class DefaultEndPointSelector implements EndPointSelector {
} }
} }
@Override
public List<EndPoint> selectAll(DataStore store) {
List<EndPoint> endPoints = new ArrayList<EndPoint>();
if (store.getScope().getScopeType() == ScopeType.HOST) {
HostVO host = hostDao.findById(store.getScope().getScopeId());
endPoints.add(new HypervisorHostEndPoint(host.getId(),
host.getPrivateIpAddress()));
} else if (store.getScope().getScopeType() == ScopeType.CLUSTER) {
SearchCriteriaService<HostVO, HostVO> sc = SearchCriteria2.create(HostVO.class);
sc.addAnd(sc.getEntity().getClusterId(), Op.EQ, store.getScope().getScopeId());
sc.addAnd(sc.getEntity().getStatus(), Op.EQ, Status.Up);
List<HostVO> hosts = sc.find();
for (HostVO host : hosts) {
endPoints.add(new HypervisorHostEndPoint(host.getId(),
host.getPrivateIpAddress()));
}
} else {
throw new CloudRuntimeException("shouldn't use it for other scope");
}
return endPoints;
}
} }

View File

@ -18,7 +18,10 @@
*/ */
package org.apache.cloudstack.storage.endpoint; package org.apache.cloudstack.storage.endpoint;
import java.util.List;
import org.apache.cloudstack.engine.subsystem.api.storage.DataObject; import org.apache.cloudstack.engine.subsystem.api.storage.DataObject;
import org.apache.cloudstack.engine.subsystem.api.storage.DataStore;
import org.apache.cloudstack.engine.subsystem.api.storage.EndPoint; import org.apache.cloudstack.engine.subsystem.api.storage.EndPoint;
public interface EndPointSelector { public interface EndPointSelector {
@ -29,4 +32,9 @@ public interface EndPointSelector {
* @return * @return
*/ */
EndPoint select(DataObject object); EndPoint select(DataObject object);
/**
* @param store
* @return
*/
List<EndPoint> selectAll(DataStore store);
} }

View File

@ -46,6 +46,7 @@ import org.apache.cloudstack.storage.volume.db.VolumeVO;
import org.apache.log4j.Logger; import org.apache.log4j.Logger;
import com.cloud.hypervisor.Hypervisor.HypervisorType; import com.cloud.hypervisor.Hypervisor.HypervisorType;
import com.cloud.storage.dao.StoragePoolHostDao;
import com.cloud.utils.component.ComponentContext; import com.cloud.utils.component.ComponentContext;
public class DefaultPrimaryDataStore implements PrimaryDataStore { public class DefaultPrimaryDataStore implements PrimaryDataStore {
@ -120,7 +121,8 @@ public class DefaultPrimaryDataStore implements PrimaryDataStore {
return this.pdsv.getPoolType() + File.separator return this.pdsv.getPoolType() + File.separator
+ this.pdsv.getHostAddress() + File.separator + this.pdsv.getHostAddress() + File.separator
+ this.pdsv.getPath() + File.separator + this.pdsv.getPath() + File.separator
+ "?role=" + this.getRole(); + "?role=" + this.getRole()
+ "&storeUuid=" + this.pdsv.getUuid();
} }
@Override @Override

View File

@ -38,13 +38,16 @@ import org.apache.cloudstack.storage.volume.PrimaryDataStoreDriver;
import org.apache.log4j.Logger; import org.apache.log4j.Logger;
import com.cloud.agent.api.Answer; import com.cloud.agent.api.Answer;
import com.cloud.storage.StoragePoolHostVO;
import com.cloud.storage.dao.StoragePoolHostDao;
public class DefaultPrimaryDataStoreDriverImpl implements PrimaryDataStoreDriver { public class DefaultPrimaryDataStoreDriverImpl implements PrimaryDataStoreDriver {
private static final Logger s_logger = Logger.getLogger(DefaultPrimaryDataStoreDriverImpl.class); private static final Logger s_logger = Logger.getLogger(DefaultPrimaryDataStoreDriverImpl.class);
@Inject @Inject
EndPointSelector selector; EndPointSelector selector;
@Inject
StoragePoolHostDao storeHostDao;
public DefaultPrimaryDataStoreDriverImpl() { public DefaultPrimaryDataStoreDriverImpl() {
} }
@ -65,13 +68,13 @@ public class DefaultPrimaryDataStoreDriverImpl implements PrimaryDataStoreDriver
} }
public Void createAsyncCallback(AsyncCallbackDispatcher<DefaultPrimaryDataStoreDriverImpl, Answer> callback, CreateVolumeContext<CommandResult> context) { public Void createAsyncCallback(AsyncCallbackDispatcher<DefaultPrimaryDataStoreDriverImpl, Answer> callback, CreateVolumeContext<CreateCmdResult> context) {
CommandResult result = new CommandResult(); CreateCmdResult result = null;
CreateVolumeAnswer volAnswer = (CreateVolumeAnswer) callback.getResult(); CreateVolumeAnswer volAnswer = (CreateVolumeAnswer) callback.getResult();
if (volAnswer.getResult()) { if (volAnswer.getResult()) {
DataObject volume = context.getVolume(); result = new CreateCmdResult(volAnswer.getVolumeUuid());
//volume.setPath(volAnswer.getVolumeUuid());
} else { } else {
result = new CreateCmdResult("");
result.setResult(volAnswer.getDetails()); result.setResult(volAnswer.getDetails());
} }
@ -155,7 +158,7 @@ public class DefaultPrimaryDataStoreDriverImpl implements PrimaryDataStoreDriver
EndPoint ep = selector.select(vol); EndPoint ep = selector.select(vol);
CreateObjectCommand createCmd = new CreateObjectCommand(vol.getUri()); CreateObjectCommand createCmd = new CreateObjectCommand(vol.getUri());
CreateVolumeContext<CommandResult> context = null; CreateVolumeContext<CreateCmdResult> context = null;
AsyncCallbackDispatcher<DefaultPrimaryDataStoreDriverImpl, Answer> caller = AsyncCallbackDispatcher.create(this); AsyncCallbackDispatcher<DefaultPrimaryDataStoreDriverImpl, Answer> caller = AsyncCallbackDispatcher.create(this);
caller.setContext(context) caller.setContext(context)
.setCallback(caller.getTarget().createAsyncCallback(null, null)); .setCallback(caller.getTarget().createAsyncCallback(null, null));
@ -165,7 +168,8 @@ public class DefaultPrimaryDataStoreDriverImpl implements PrimaryDataStoreDriver
@Override @Override
public String grantAccess(DataObject object, EndPoint ep) { public String grantAccess(DataObject object, EndPoint ep) {
return object.getUri(); StoragePoolHostVO poolHost = storeHostDao.findByPoolHost(object.getDataStore().getId(), ep.getId());
return object.getUri() + "&storagePath=" + poolHost.getLocalPath();
} }
@Override @Override

View File

@ -1,34 +0,0 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.cloudstack.storage.datastore.lifecycle;
import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao;
public class DefaultKvmPrimaryDataStoreLifeCycle extends DefaultPrimaryDataStoreLifeCycleImpl {
/**
* @param dataStoreDao
* @param dataStore
*/
public DefaultKvmPrimaryDataStoreLifeCycle(PrimaryDataStoreDao dataStoreDao) {
super(dataStoreDao);
// TODO Auto-generated constructor stub
}
}

View File

@ -18,24 +18,37 @@
*/ */
package org.apache.cloudstack.storage.datastore.lifecycle; package org.apache.cloudstack.storage.datastore.lifecycle;
import java.util.List;
import java.util.Map; import java.util.Map;
import javax.inject.Inject;
import org.apache.cloudstack.engine.subsystem.api.storage.ClusterScope; import org.apache.cloudstack.engine.subsystem.api.storage.ClusterScope;
import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; import org.apache.cloudstack.engine.subsystem.api.storage.DataStore;
import org.apache.cloudstack.engine.subsystem.api.storage.EndPoint; import org.apache.cloudstack.engine.subsystem.api.storage.EndPoint;
import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreLifeCycle; import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreLifeCycle;
import org.apache.cloudstack.engine.subsystem.api.storage.ZoneScope; import org.apache.cloudstack.engine.subsystem.api.storage.ZoneScope;
import org.apache.cloudstack.storage.command.AttachPrimaryDataStoreCmd; import org.apache.cloudstack.storage.command.AttachPrimaryDataStoreCmd;
import org.apache.cloudstack.storage.command.CreatePrimaryDataStoreCmd;
import org.apache.cloudstack.storage.datastore.DataStoreStatus; import org.apache.cloudstack.storage.datastore.DataStoreStatus;
import org.apache.cloudstack.storage.datastore.PrimaryDataStore; import org.apache.cloudstack.storage.datastore.PrimaryDataStore;
import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao;
import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreVO; import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreVO;
import org.apache.cloudstack.storage.endpoint.EndPointSelector;
import com.cloud.host.HostVO;
import com.cloud.host.dao.HostDao;
import com.cloud.hypervisor.Hypervisor.HypervisorType;
public class DefaultPrimaryDataStoreLifeCycleImpl implements PrimaryDataStoreLifeCycle { public class DefaultPrimaryDataStoreLifeCycleImpl implements PrimaryDataStoreLifeCycle {
protected PrimaryDataStore dataStore; protected PrimaryDataStore dataStore;
protected PrimaryDataStoreDao dataStoreDao; @Inject
public DefaultPrimaryDataStoreLifeCycleImpl(PrimaryDataStoreDao dataStoreDao) { EndPointSelector selecotr;
this.dataStoreDao = dataStoreDao; @Inject
PrimaryDataStoreDao dataStoreDao;
@Inject
HostDao hostDao;
public DefaultPrimaryDataStoreLifeCycleImpl() {
} }
@Override @Override
@ -44,26 +57,36 @@ public class DefaultPrimaryDataStoreLifeCycleImpl implements PrimaryDataStoreLif
return true; return true;
} }
protected void attachCluster() { protected void attachCluster(DataStore store) {
//send down AttachPrimaryDataStoreCmd command to all the hosts in the cluster //send down AttachPrimaryDataStoreCmd command to all the hosts in the cluster
AttachPrimaryDataStoreCmd cmd = new AttachPrimaryDataStoreCmd(this.dataStore.getUri()); List<EndPoint> endPoints = selecotr.selectAll(dataStore);
/*for (EndPoint ep : dataStore.getEndPoints()) { CreatePrimaryDataStoreCmd createCmd = new CreatePrimaryDataStoreCmd(store.getUri());
ep.sendMessage(cmd); EndPoint ep = endPoints.get(0);
} */ HostVO host = hostDao.findById(ep.getId());
if (host.getHypervisorType() == HypervisorType.XenServer) {
ep.sendMessage(createCmd);
}
endPoints.get(0).sendMessage(createCmd);
AttachPrimaryDataStoreCmd cmd = new AttachPrimaryDataStoreCmd(dataStore.getUri());
for (EndPoint endp : endPoints) {
endp.sendMessage(cmd);
}
} }
@Override @Override
public boolean attachCluster(DataStore dataStore, ClusterScope scope) { public boolean attachCluster(DataStore dataStore, ClusterScope scope) {
PrimaryDataStoreVO dataStoreVO = dataStoreDao.findById(this.dataStore.getId()); PrimaryDataStoreVO dataStoreVO = dataStoreDao.findById(dataStore.getId());
dataStoreVO.setDataCenterId(scope.getZoneId()); dataStoreVO.setDataCenterId(scope.getZoneId());
dataStoreVO.setPodId(scope.getPodId()); dataStoreVO.setPodId(scope.getPodId());
dataStoreVO.setClusterId(scope.getScopeId()); dataStoreVO.setClusterId(scope.getScopeId());
dataStoreVO.setStatus(DataStoreStatus.Attaching); dataStoreVO.setStatus(DataStoreStatus.Attaching);
dataStoreVO.setScope(scope.getScopeType());
dataStoreDao.update(dataStoreVO.getId(), dataStoreVO); dataStoreDao.update(dataStoreVO.getId(), dataStoreVO);
attachCluster(); attachCluster(dataStore);
dataStoreVO = dataStoreDao.findById(this.dataStore.getId()); dataStoreVO = dataStoreDao.findById(dataStore.getId());
dataStoreVO.setStatus(DataStoreStatus.Up); dataStoreVO.setStatus(DataStoreStatus.Up);
dataStoreDao.update(dataStoreVO.getId(), dataStoreVO); dataStoreDao.update(dataStoreVO.getId(), dataStoreVO);

View File

@ -1,35 +0,0 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.cloudstack.storage.datastore.lifecycle;
import org.apache.cloudstack.storage.datastore.PrimaryDataStore;
import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao;
public class DefaultVmwarePrimaryDataStoreLifeCycle extends DefaultPrimaryDataStoreLifeCycleImpl {
/**
* @param dataStoreDao
* @param dataStore
*/
public DefaultVmwarePrimaryDataStoreLifeCycle(PrimaryDataStoreDao dataStoreDao) {
super(dataStoreDao);
// TODO Auto-generated constructor stub
}
}

View File

@ -1,59 +0,0 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.cloudstack.storage.datastore.lifecycle;
import org.apache.cloudstack.engine.subsystem.api.storage.EndPoint;
import org.apache.cloudstack.storage.command.AttachPrimaryDataStoreCmd;
import org.apache.cloudstack.storage.command.CreatePrimaryDataStoreCmd;
import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao;
import com.cloud.agent.api.Answer;
import com.cloud.utils.exception.CloudRuntimeException;
public class DefaultXenPrimaryDataStoreLifeCycle extends DefaultPrimaryDataStoreLifeCycleImpl {
/**
* @param dataStoreDao
* @param dataStore
*/
public DefaultXenPrimaryDataStoreLifeCycle(PrimaryDataStoreDao dataStoreDao) {
super(dataStoreDao);
// TODO Auto-generated constructor stub
}
@Override
public void attachCluster() {
String result = null;
//send one time is enough, as xenserver is clustered
/*CreatePrimaryDataStoreCmd cmd = new CreatePrimaryDataStoreCmd(this.dataStore.getDataStoreTO());
String result = null;
for (EndPoint ep : dataStore.getEndPoints()) {
Answer answer = ep.sendMessage(cmd);
if (answer.getResult()) {
return;
}
result = answer.getDetails();
}*/
if (result != null)
throw new CloudRuntimeException("AttachPrimaryDataStoreCmd failed: " + result);
super.attachCluster();
}
}

View File

@ -121,7 +121,7 @@ public class VolumeObject implements VolumeInfo {
} }
ObjectInDataStoreVO obj = ojbectInStoreMgr.findObject(this.volumeVO.getId(), DataObjectType.VOLUME, this.dataStore.getId(), this.dataStore.getRole()); ObjectInDataStoreVO obj = ojbectInStoreMgr.findObject(this.volumeVO.getId(), DataObjectType.VOLUME, this.dataStore.getId(), this.dataStore.getRole());
if (obj.getState() != ObjectInDataStoreStateMachine.State.Ready) { if (obj.getState() != ObjectInDataStoreStateMachine.State.Ready) {
return this.dataStore.getUri() + File.separator + "&objType=" + DataObjectType.VOLUME + "&size=" + this.volumeVO.getSize(); return this.dataStore.getUri() + File.separator + "&objType=" + DataObjectType.VOLUME + "&size=" + this.volumeVO.getSize() + "&name=" + this.volumeVO.getName();
} else { } else {
return this.dataStore.getUri() + File.separator + "&objType=" + DataObjectType.VOLUME + "&path=" + obj.getInstallPath(); return this.dataStore.getUri() + File.separator + "&objType=" + DataObjectType.VOLUME + "&path=" + obj.getInstallPath();
} }

View File

@ -22,7 +22,10 @@ import java.io.BufferedOutputStream;
import java.io.File; import java.io.File;
import java.io.FileOutputStream; import java.io.FileOutputStream;
import java.io.IOException; import java.io.IOException;
import java.net.URI;
import java.net.URISyntaxException;
import java.util.HashMap; import java.util.HashMap;
import java.util.List;
import java.util.Map; import java.util.Map;
import java.util.Set; import java.util.Set;
import java.util.UUID; import java.util.UUID;
@ -31,9 +34,9 @@ import org.apache.cloudstack.storage.command.AttachPrimaryDataStoreAnswer;
import org.apache.cloudstack.storage.command.AttachPrimaryDataStoreCmd; import org.apache.cloudstack.storage.command.AttachPrimaryDataStoreCmd;
import org.apache.cloudstack.storage.command.CopyCmd; import org.apache.cloudstack.storage.command.CopyCmd;
import org.apache.cloudstack.storage.command.CopyCmdAnswer; import org.apache.cloudstack.storage.command.CopyCmdAnswer;
import org.apache.cloudstack.storage.command.CreateObjectCommand;
import org.apache.cloudstack.storage.command.CreatePrimaryDataStoreCmd; import org.apache.cloudstack.storage.command.CreatePrimaryDataStoreCmd;
import org.apache.cloudstack.storage.command.CreateVolumeAnswer; import org.apache.cloudstack.storage.command.CreateVolumeAnswer;
import org.apache.cloudstack.storage.command.CreateObjectCommand;
import org.apache.cloudstack.storage.command.CreateVolumeFromBaseImageCommand; import org.apache.cloudstack.storage.command.CreateVolumeFromBaseImageCommand;
import org.apache.cloudstack.storage.command.StorageSubSystemCommand; import org.apache.cloudstack.storage.command.StorageSubSystemCommand;
import org.apache.cloudstack.storage.datastore.protocol.DataStoreProtocol; import org.apache.cloudstack.storage.datastore.protocol.DataStoreProtocol;
@ -65,6 +68,8 @@ import com.xensource.xenapi.Types.BadServerResponse;
import com.xensource.xenapi.Types.XenAPIException; import com.xensource.xenapi.Types.XenAPIException;
import com.xensource.xenapi.VDI; import com.xensource.xenapi.VDI;
import edu.emory.mathcs.backport.java.util.Arrays;
public class XenServerStorageResource { public class XenServerStorageResource {
private static final Logger s_logger = Logger.getLogger(XenServerStorageResource.class); private static final Logger s_logger = Logger.getLogger(XenServerStorageResource.class);
protected CitrixResourceBase hypervisorResource; protected CitrixResourceBase hypervisorResource;
@ -114,16 +119,39 @@ public class XenServerStorageResource {
vdi.destroy(conn); vdi.destroy(conn);
} }
private Map<String, String> getParameters(URI uri) {
String parameters = uri.getQuery();
Map<String, String> params = new HashMap<String, String>();
List<String> paraLists = Arrays.asList(parameters.split("&"));
for (String para : paraLists) {
String[] pair = para.split("=");
params.put(pair[0], pair[1]);
}
return params;
}
protected CreateVolumeAnswer execute(CreateObjectCommand cmd) { protected CreateVolumeAnswer execute(CreateObjectCommand cmd) {
VolumeTO volume = null; String uriString = cmd.getObjectUri();
PrimaryDataStoreTO primaryDataStore = volume.getDataStore(); Map<String, String> params = null;
try {
URI uri = new URI(uriString);
params = getParameters(uri);
} catch (URISyntaxException e1) {
s_logger.debug("uri exception", e1);
return new CreateVolumeAnswer(cmd, false, e1.toString());
}
long size = Long.parseLong(params.get("size"));
String name = params.get("name");
String storeUuid = params.get("storagePath");
Connection conn = hypervisorResource.getConnection(); Connection conn = hypervisorResource.getConnection();
VDI vdi = null; VDI vdi = null;
boolean result = false; boolean result = false;
String errorMsg = null; String errorMsg = null;
try { try {
SR primaryDataStoreSR = getSRByNameLabel(conn, primaryDataStore.getUuid()); SR primaryDataStoreSR = getSRByNameLabel(conn, storeUuid);
vdi = createVdi(conn, volume.getName(), primaryDataStoreSR, volume.getSize()); vdi = createVdi(conn, name, primaryDataStoreSR, size);
VDI.Record record = vdi.getRecord(conn); VDI.Record record = vdi.getRecord(conn);
result = true; result = true;
return new CreateVolumeAnswer(cmd, record.uuid); return new CreateVolumeAnswer(cmd, record.uuid);
@ -190,11 +218,14 @@ public class XenServerStorageResource {
} }
} }
protected SR getNfsSR(Connection conn, NfsPrimaryDataStoreTO pool) { protected SR getNfsSR(Connection conn, URI uri) {
Map<String, String> deviceConfig = new HashMap<String, String>(); Map<String, String> deviceConfig = new HashMap<String, String>();
Map<String, String> params = getParameters(uri);
String uuid = params.get("storeUuid");
try { try {
String server = pool.getServer(); String server = uri.getHost();
String serverpath = pool.getPath(); String serverpath = uri.getPath();
serverpath = serverpath.replace("//", "/"); serverpath = serverpath.replace("//", "/");
Set<SR> srs = SR.getAll(conn); Set<SR> srs = SR.getAll(conn);
for (SR sr : srs) { for (SR sr : srs) {
@ -225,21 +256,21 @@ public class XenServerStorageResource {
if (server.equals(dc.get("server")) && serverpath.equals(dc.get("serverpath"))) { if (server.equals(dc.get("server")) && serverpath.equals(dc.get("serverpath"))) {
throw new CloudRuntimeException("There is a SR using the same configuration server:" + dc.get("server") + ", serverpath:" throw new CloudRuntimeException("There is a SR using the same configuration server:" + dc.get("server") + ", serverpath:"
+ dc.get("serverpath") + " for pool " + pool.getUuid() + "on host:" + hypervisorResource.getHost().uuid); + dc.get("serverpath") + " for pool " + uuid + "on host:" + hypervisorResource.getHost().uuid);
} }
} }
deviceConfig.put("server", server); deviceConfig.put("server", server);
deviceConfig.put("serverpath", serverpath); deviceConfig.put("serverpath", serverpath);
Host host = Host.getByUuid(conn, hypervisorResource.getHost().uuid); Host host = Host.getByUuid(conn, hypervisorResource.getHost().uuid);
SR sr = SR.create(conn, host, deviceConfig, new Long(0), pool.getUuid(), Long.toString(pool.getId()), SRType.NFS.toString(), "user", true, SR sr = SR.create(conn, host, deviceConfig, new Long(0), uuid, uuid, SRType.NFS.toString(), "user", true,
new HashMap<String, String>()); new HashMap<String, String>());
sr.scan(conn); sr.scan(conn);
return sr; return sr;
} catch (XenAPIException e) { } catch (XenAPIException e) {
throw new CloudRuntimeException("Unable to create NFS SR " + pool.toString(), e); throw new CloudRuntimeException("Unable to create NFS SR " + uuid, e);
} catch (XmlRpcException e) { } catch (XmlRpcException e) {
throw new CloudRuntimeException("Unable to create NFS SR " + pool.toString(), e); throw new CloudRuntimeException("Unable to create NFS SR " + uuid, e);
} }
} }
/* /*
@ -369,15 +400,26 @@ public class XenServerStorageResource {
protected Answer execute(CreatePrimaryDataStoreCmd cmd) { protected Answer execute(CreatePrimaryDataStoreCmd cmd) {
Connection conn = hypervisorResource.getConnection(); Connection conn = hypervisorResource.getConnection();
PrimaryDataStoreTO dataStore = cmd.getDataStore(); String storeUrl = cmd.getDataStore();
String scheme = null;
String type = null;
URI storeUri = null;
try { try {
if (DataStoreProtocol.NFS.toString().equalsIgnoreCase(dataStore.getType())) { storeUri = new URI(storeUrl);
getNfsSR(conn, (NfsPrimaryDataStoreTO)dataStore); } catch(URISyntaxException e) {
} else if (DataStoreProtocol.NFS.toString().equalsIgnoreCase(dataStore.getType())) { return new Answer(cmd, false, e.toString());
}
scheme = storeUri.getScheme();
try {
if (scheme.equalsIgnoreCase("nfs")) {
SR sr = getNfsSR(conn, storeUri);
} else if (scheme.equalsIgnoreCase("iscsi")) {
//getIscsiSR(conn, dataStore); //getIscsiSR(conn, dataStore);
} else if (dataStore.getType() == StoragePoolType.PreSetup.toString()) { } else if (scheme.equalsIgnoreCase("presetup")) {
} else { } else {
//return new Answer(cmd, false, "The pool type: " + pool.getType().name() + " is not supported."); return new Answer(cmd, false, "The pool type: " + scheme + " is not supported.");
} }
return new Answer(cmd, true, "success"); return new Answer(cmd, true, "success");
} catch (Exception e) { } catch (Exception e) {