move default primary storage plugin into its own pom

This commit is contained in:
Edison Su 2013-03-20 17:20:17 -07:00
parent 9270b4335c
commit 3ed6200ef8
52 changed files with 1931 additions and 1269 deletions

View File

@ -0,0 +1,28 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package com.cloud.storage;
import java.util.List;
import org.apache.cloudstack.api.response.StorageProviderResponse;
public interface DataStoreProviderApiService {
public List<StorageProviderResponse> getDataStoreProviders(String type);
}

View File

@ -99,7 +99,7 @@ public interface StoragePool extends Identity, InternalIdentity {
/**
* @return
*/
Long getStorageProviderId();
String getStorageProviderName();
boolean isInMaintenance();
}

View File

@ -61,6 +61,7 @@ import com.cloud.projects.ProjectService;
import com.cloud.resource.ResourceService;
import com.cloud.server.ManagementService;
import com.cloud.server.TaggedResourceService;
import com.cloud.storage.DataStoreProviderApiService;
import com.cloud.storage.StorageService;
import com.cloud.storage.VolumeApiService;
import com.cloud.storage.snapshot.SnapshotService;
@ -131,6 +132,7 @@ public abstract class BaseCmd {
@Inject public UsageService _usageService;
@Inject public NetworkUsageService _networkUsageService;
@Inject public VMSnapshotService _vmSnapshotService;
@Inject public DataStoreProviderApiService dataStoreProviderApiService;
public abstract void execute() throws ResourceUnavailableException, InsufficientCapacityException, ServerApiException, ConcurrentOperationException, ResourceAllocationException, NetworkRuleConflictException;

View File

@ -73,8 +73,8 @@ public class CreateStoragePoolCmd extends BaseCmd {
private Long zoneId;
@Parameter(name=ApiConstants.PROVIDER, type=CommandType.STRING,
required=false, description="the storage provider uuid")
private String storageProviderUuid;
required=false, description="the storage provider name")
private String storageProviderName;
@Parameter(name=ApiConstants.SCOPE, type=CommandType.STRING,
required=false, description="the scope of the storage: cluster or zone")
@ -112,8 +112,8 @@ public class CreateStoragePoolCmd extends BaseCmd {
return zoneId;
}
public String getStorageProviderUuid() {
return this.storageProviderUuid;
public String getStorageProviderName() {
return this.storageProviderName;
}
public String getScope() {

View File

@ -0,0 +1,72 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.cloudstack.api.command.admin.storage;
import java.util.List;
import org.apache.cloudstack.api.APICommand;
import org.apache.cloudstack.api.ApiConstants;
import org.apache.cloudstack.api.ApiErrorCode;
import org.apache.cloudstack.api.BaseListCmd;
import org.apache.cloudstack.api.Parameter;
import org.apache.cloudstack.api.ServerApiException;
import org.apache.cloudstack.api.response.ListResponse;
import org.apache.cloudstack.api.response.StorageProviderResponse;
import org.apache.log4j.Logger;
import com.cloud.exception.ConcurrentOperationException;
import com.cloud.exception.InsufficientCapacityException;
import com.cloud.exception.NetworkRuleConflictException;
import com.cloud.exception.ResourceAllocationException;
import com.cloud.exception.ResourceUnavailableException;
@APICommand(name = "listStorageProviders", description="Lists storage providers.", responseObject=StorageProviderResponse.class)
public class ListStorageProvidersCmd extends BaseListCmd {
public static final Logger s_logger = Logger.getLogger(ListStorageProvidersCmd.class.getName());
private static final String s_name = "liststorageprovidersresponse";
@Parameter(name=ApiConstants.TYPE, type=CommandType.STRING, description="the type of storage provider: either primary or image", required = true)
private String type;
@Override
public String getCommandName() {
return s_name;
}
public String getType() {
return this.type;
}
@Override
public void execute() throws ResourceUnavailableException, InsufficientCapacityException, ServerApiException, ConcurrentOperationException, ResourceAllocationException,
NetworkRuleConflictException {
if (getType() == null) {
throw new ServerApiException(ApiErrorCode.MALFORMED_PARAMETER_ERROR, "need to specify type: either primary or image");
}
List<StorageProviderResponse> providers = this.dataStoreProviderApiService.getDataStoreProviders(getType());
ListResponse<StorageProviderResponse> responses = new ListResponse<StorageProviderResponse>();
for (StorageProviderResponse provider : providers) {
provider.setObjectName("dataStoreProvider");
}
responses.setResponses(providers);
responses.setResponseName(this.getCommandName());
this.setResponseObject(responses);
}
}

View File

@ -0,0 +1,62 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.cloudstack.api.response;
import org.apache.cloudstack.api.BaseResponse;
import com.cloud.serializer.Param;
import com.google.gson.annotations.SerializedName;
public class StorageProviderResponse extends BaseResponse {
@SerializedName("name") @Param(description="the name of the storage provider")
private String name;
@SerializedName("type") @Param(description="the type of the storage provider: primary or image provider")
private String type;
/**
* @return the type
*/
public String getType() {
return type;
}
/**
* @param type the type to set
*/
public void setType(String type) {
this.type = type;
}
/**
* @return the name
*/
public String getName() {
return name;
}
/**
* @param name the name to set
*/
public void setName(String name) {
this.name = name;
}
}

View File

@ -133,7 +133,7 @@ public class BackupSnapshotCommandTest {
}
@Override
public Long getStorageProviderId() {
public String getStorageProviderName() {
// TODO Auto-generated method stub
return null;
}

View File

@ -115,7 +115,7 @@ public class SnapshotCommandTest {
}
@Override
public Long getStorageProviderId() {
public String getStorageProviderName() {
// TODO Auto-generated method stub
return null;
}

View File

@ -134,7 +134,7 @@ public class ResizeVolumeCommandTest {
}
@Override
public Long getStorageProviderId() {
public String getStorageProviderName() {
// TODO Auto-generated method stub
return null;
}

View File

@ -224,6 +224,11 @@
<artifactId>cloud-plugin-hypervisor-simulator</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>org.apache.cloudstack</groupId>
<artifactId>cloud-plugin-storage-volume-default</artifactId>
<version>${project.version}</version>
</dependency>
</dependencies>
<build>
<defaultGoal>install</defaultGoal>

View File

@ -278,6 +278,7 @@ listAsyncJobs=15
#### storage pools commands
listStoragePools=3
listStorageProviders=3
createStoragePool=1
updateStoragePool=1
deleteStoragePool=1

View File

@ -329,5 +329,8 @@
<bean id="BaremetalGuru" class="com.cloud.baremetal.manager.BareMetalGuru">
<property name="name" value="BaremetalGuru"/>
</bean>
<bean id="ClassicalPrimaryDataStoreProvider" class="org.apache.cloudstack.storage.datastore.provider.CloudStackPrimaryDataStoreProviderImpl">
</bean>
</beans>

View File

@ -21,4 +21,5 @@ package org.apache.cloudstack.engine.datacenter.entity.api;
import com.cloud.storage.StoragePool;
public interface StorageEntity extends DataCenterResourceEntity, StoragePool {
}

View File

@ -34,9 +34,9 @@ public interface DataStoreLifeCycle {
public boolean unmanaged();
public boolean maintain(long storeId);
public boolean maintain(DataStore store);
public boolean cancelMaintain(long storeId);
public boolean cancelMaintain(DataStore store);
public boolean deleteDataStore(long storeId);
public boolean deleteDataStore(DataStore store);
}

View File

@ -19,12 +19,19 @@
package org.apache.cloudstack.engine.subsystem.api.storage;
import java.util.Map;
import java.util.Set;
public interface DataStoreProvider {
public DataStoreLifeCycle getLifeCycle();
public static enum DataStoreProviderType {
PRIMARY,
IMAGE
}
public DataStoreLifeCycle getDataStoreLifeCycle();
public DataStoreDriver getDataStoreDriver();
public HypervisorHostListener getHostListener();
public String getName();
public String getUuid();
public long getId();
public boolean configure(Map<String, Object> params);
public Set<DataStoreProviderType> getTypes();
}

View File

@ -20,12 +20,12 @@ package org.apache.cloudstack.engine.subsystem.api.storage;
import java.util.List;
import com.cloud.storage.DataStoreProviderApiService;
import com.cloud.utils.component.Manager;
public interface DataStoreProviderManager extends Manager {
public DataStoreProvider getDataStoreProviderByUuid(String uuid);
public DataStoreProvider getDataStoreProviderById(long id);
public interface DataStoreProviderManager extends Manager, DataStoreProviderApiService {
public DataStoreProvider getDataStoreProvider(String name);
public DataStoreProvider getDefaultPrimaryDataStoreProvider();
public List<DataStoreProvider> getDataStoreProviders();
}

View File

@ -16,9 +16,8 @@
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.cloudstack.storage.datastore.provider;
package org.apache.cloudstack.engine.subsystem.api.storage;
import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreProvider;
public interface ImageDataStoreProvider extends DataStoreProvider {

View File

@ -0,0 +1,220 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.cloudstack.engine.subsystem.api.storage;
import java.util.Map;
import com.cloud.storage.Storage.StoragePoolType;
public class PrimaryDataStoreParameters {
private Long zoneId;
private Long podId;
private Long clusterId;
private String providerName;
private Map<String, String> details;
private String tags;
private StoragePoolType type;
private String host;
private String path;
private int port;
private String uuid;
private String name;
private String userInfo;
/**
* @return the userInfo
*/
public String getUserInfo() {
return userInfo;
}
/**
* @param userInfo the userInfo to set
*/
public void setUserInfo(String userInfo) {
this.userInfo = userInfo;
}
/**
* @return the name
*/
public String getName() {
return name;
}
/**
* @param name the name to set
*/
public void setName(String name) {
this.name = name;
}
/**
* @return the uuid
*/
public String getUuid() {
return uuid;
}
/**
* @param uuid the uuid to set
*/
public void setUuid(String uuid) {
this.uuid = uuid;
}
/**
* @return the port
*/
public int getPort() {
return port;
}
/**
* @param port the port to set
*/
public void setPort(int port) {
this.port = port;
}
/**
* @return the path
*/
public String getPath() {
return path;
}
/**
* @param path the path to set
*/
public void setPath(String path) {
this.path = path;
}
/**
* @return the host
*/
public String getHost() {
return host;
}
/**
* @param host the host to set
*/
public void setHost(String host) {
this.host = host;
}
/**
* @return the type
*/
public StoragePoolType getType() {
return type;
}
/**
* @param type the type to set
*/
public void setType(StoragePoolType type) {
this.type = type;
}
/**
* @return the tags
*/
public String getTags() {
return tags;
}
/**
* @param tags the tags to set
*/
public void setTags(String tags) {
this.tags = tags;
}
/**
* @return the details
*/
public Map<String, String> getDetails() {
return details;
}
/**
* @param details the details to set
*/
public void setDetails(Map<String, String> details) {
this.details = details;
}
/**
* @return the providerName
*/
public String getProviderName() {
return providerName;
}
/**
* @param providerName the providerName to set
*/
public void setProviderName(String providerName) {
this.providerName = providerName;
}
/**
* @return the clusterId
*/
public Long getClusterId() {
return clusterId;
}
/**
* @param clusterId the clusterId to set
*/
public void setClusterId(Long clusterId) {
this.clusterId = clusterId;
}
/**
* @return the podId
*/
public Long getPodId() {
return podId;
}
/**
* @param podId the podId to set
*/
public void setPodId(Long podId) {
this.podId = podId;
}
/**
* @return the zoneId
*/
public Long getZoneId() {
return zoneId;
}
/**
* @param zoneId the zoneId to set
*/
public void setZoneId(Long zoneId) {
this.zoneId = zoneId;
}
}

View File

@ -14,3 +14,7 @@
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.apache.cloudstack.engine.subsystem.api.storage;
public interface PrimaryDataStoreProvider extends DataStoreProvider {
}

View File

@ -80,8 +80,8 @@ public class StoragePoolVO implements StoragePool{
@Enumerated(value = EnumType.STRING)
private StoragePoolStatus status;
@Column(name = "storage_provider_id", updatable = true, nullable = false)
private Long storageProviderId;
@Column(name = "storage_provider_name", updatable = true, nullable = false)
private String storageProviderName;
@Column(name = "host_address")
private String hostAddress;
@ -180,12 +180,12 @@ public class StoragePoolVO implements StoragePool{
return availableBytes;
}
public Long getStorageProviderId() {
return storageProviderId;
public String getStorageProviderName() {
return storageProviderName;
}
public void setStorageProviderId(Long provider) {
storageProviderId = provider;
public void setStorageProviderName(String providerName) {
storageProviderName = providerName;
}
public long getCapacityBytes() {

View File

@ -28,7 +28,7 @@ import javax.inject.Inject;
import org.apache.cloudstack.engine.subsystem.api.storage.DataStore;
import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreProviderManager;
import org.apache.cloudstack.storage.datastore.provider.ImageDataStoreProvider;
import org.apache.cloudstack.engine.subsystem.api.storage.ImageDataStoreProvider;
import org.apache.cloudstack.storage.image.ImageDataStoreDriver;
import org.apache.cloudstack.storage.image.datastore.ImageDataStore;
import org.apache.cloudstack.storage.image.datastore.ImageDataStoreManager;
@ -57,21 +57,21 @@ public class ImageDataStoreManagerImpl implements ImageDataStoreManager {
@Override
public ImageDataStore getImageDataStore(long dataStoreId) {
ImageDataStoreVO dataStore = dataStoreDao.findById(dataStoreId);
long providerId = dataStore.getProvider();
ImageDataStoreProvider provider = (ImageDataStoreProvider)providerManager.getDataStoreProviderById(providerId);
String providerName = dataStore.getProviderName();
ImageDataStoreProvider provider = (ImageDataStoreProvider)providerManager.getDataStoreProvider(providerName);
ImageDataStore imgStore = DefaultImageDataStoreImpl.getDataStore(dataStore,
driverMaps.get(provider.getUuid()), provider
driverMaps.get(provider.getName()), provider
);
// TODO Auto-generated method stub
return imgStore;
}
@Override
public boolean registerDriver(String uuid, ImageDataStoreDriver driver) {
if (driverMaps.containsKey(uuid)) {
public boolean registerDriver(String providerName, ImageDataStoreDriver driver) {
if (driverMaps.containsKey(providerName)) {
return false;
}
driverMaps.put(uuid, driver);
driverMaps.put(providerName, driver);
return true;
}

View File

@ -19,14 +19,18 @@
package org.apache.cloudstack.storage.image.store;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Map;
import java.util.Set;
import java.util.UUID;
import javax.inject.Inject;
import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreDriver;
import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreLifeCycle;
import org.apache.cloudstack.engine.subsystem.api.storage.HypervisorHostListener;
import org.apache.cloudstack.engine.subsystem.api.storage.ImageDataStoreProvider;
import org.apache.cloudstack.engine.subsystem.api.storage.ScopeType;
import org.apache.cloudstack.storage.datastore.provider.ImageDataStoreProvider;
import org.apache.cloudstack.storage.image.ImageDataStoreDriver;
import org.apache.cloudstack.storage.image.datastore.ImageDataStoreHelper;
import org.apache.cloudstack.storage.image.datastore.ImageDataStoreManager;
@ -47,10 +51,9 @@ public class AncientImageDataStoreProvider implements ImageDataStoreProvider {
ImageDataStoreManager storeMgr;
@Inject
ImageDataStoreHelper helper;
long id;
String uuid;
@Override
public DataStoreLifeCycle getLifeCycle() {
public DataStoreLifeCycle getDataStoreLifeCycle() {
return lifeCycle;
}
@ -59,23 +62,12 @@ public class AncientImageDataStoreProvider implements ImageDataStoreProvider {
return this.name;
}
@Override
public String getUuid() {
return this.uuid;
}
@Override
public long getId() {
return this.id;
}
@Override
public boolean configure(Map<String, Object> params) {
lifeCycle = ComponentContext.inject(DefaultImageDataStoreLifeCycle.class);
driver = ComponentContext.inject(AncientImageDataStoreDriverImpl.class);
uuid = (String)params.get("uuid");
id = (Long)params.get("id");
storeMgr.registerDriver(uuid, driver);
storeMgr.registerDriver(this.getName(), driver);
Map<String, Object> infos = new HashMap<String, Object>();
String dataStoreName = UUID.nameUUIDFromBytes(this.name.getBytes()).toString();
@ -83,10 +75,27 @@ public class AncientImageDataStoreProvider implements ImageDataStoreProvider {
infos.put("uuid", dataStoreName);
infos.put("protocol", "http");
infos.put("scope", ScopeType.GLOBAL);
infos.put("provider", this.getId());
DataStoreLifeCycle lifeCycle = this.getLifeCycle();
infos.put("providerName", this.getName());
DataStoreLifeCycle lifeCycle = this.getDataStoreLifeCycle();
lifeCycle.initialize(infos);
return true;
}
@Override
public DataStoreDriver getDataStoreDriver() {
return this.driver;
}
@Override
public HypervisorHostListener getHostListener() {
return null;
}
@Override
public Set<DataStoreProviderType> getTypes() {
Set<DataStoreProviderType> types = new HashSet<DataStoreProviderType>();
types.add(DataStoreProviderType.IMAGE);
return types;
}
}

View File

@ -25,13 +25,13 @@ import javax.inject.Inject;
import org.apache.cloudstack.engine.subsystem.api.storage.DataObject;
import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreDriver;
import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreRole;
import org.apache.cloudstack.engine.subsystem.api.storage.ImageDataStoreProvider;
import org.apache.cloudstack.engine.subsystem.api.storage.Scope;
import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotInfo;
import org.apache.cloudstack.engine.subsystem.api.storage.TemplateInfo;
import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo;
import org.apache.cloudstack.engine.subsystem.api.storage.ZoneScope;
import org.apache.cloudstack.storage.datastore.ObjectInDataStoreManager;
import org.apache.cloudstack.storage.datastore.provider.ImageDataStoreProvider;
import org.apache.cloudstack.storage.image.ImageDataStoreDriver;
import org.apache.cloudstack.storage.image.datastore.ImageDataStore;
import org.apache.cloudstack.storage.image.db.ImageDataStoreVO;

View File

@ -18,12 +18,16 @@
*/
package org.apache.cloudstack.storage.image.store;
import java.util.HashSet;
import java.util.Map;
import java.util.Set;
import javax.inject.Inject;
import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreDriver;
import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreLifeCycle;
import org.apache.cloudstack.storage.datastore.provider.ImageDataStoreProvider;
import org.apache.cloudstack.engine.subsystem.api.storage.HypervisorHostListener;
import org.apache.cloudstack.engine.subsystem.api.storage.ImageDataStoreProvider;
import org.apache.cloudstack.storage.image.ImageDataStoreDriver;
import org.apache.cloudstack.storage.image.datastore.ImageDataStoreManager;
import org.apache.cloudstack.storage.image.driver.DefaultImageDataStoreDriverImpl;
@ -41,7 +45,7 @@ public class DefaultImageDataStoreProvider implements ImageDataStoreProvider {
long id;
String uuid;
@Override
public DataStoreLifeCycle getLifeCycle() {
public DataStoreLifeCycle getDataStoreLifeCycle() {
return lifeCycle;
}
@ -50,24 +54,29 @@ public class DefaultImageDataStoreProvider implements ImageDataStoreProvider {
return this.name;
}
@Override
public String getUuid() {
return this.uuid;
}
@Override
public long getId() {
return this.id;
}
@Override
public boolean configure(Map<String, Object> params) {
lifeCycle = ComponentContext.inject(DefaultImageDataStoreLifeCycle.class);
driver = ComponentContext.inject(DefaultImageDataStoreDriverImpl.class);
uuid = (String)params.get("uuid");
id = (Long)params.get("id");
storeMgr.registerDriver(uuid, driver);
storeMgr.registerDriver(this.getName(), driver);
return true;
}
@Override
public Set<DataStoreProviderType> getTypes() {
Set<DataStoreProviderType> types = new HashSet<DataStoreProviderType>();
types.add(DataStoreProviderType.IMAGE);
return types;
}
@Override
public DataStoreDriver getDataStoreDriver() {
return this.driver;
}
@Override
public HypervisorHostListener getHostListener() {
return null;
}
}

View File

@ -86,27 +86,22 @@ public class DefaultImageDataStoreLifeCycle implements ImageDataStoreLifeCycle {
@Override
public boolean maintain(long storeId) {
public boolean maintain(DataStore store) {
// TODO Auto-generated method stub
return false;
}
@Override
public boolean cancelMaintain(long storeId) {
public boolean cancelMaintain(DataStore store) {
// TODO Auto-generated method stub
return false;
}
@Override
public boolean deleteDataStore(long storeId) {
public boolean deleteDataStore(DataStore store) {
// TODO Auto-generated method stub
return false;
}
}

View File

@ -133,7 +133,7 @@ public class StorageAllocatorTest {
storage.setCapacityBytes(20000);
storage.setHostAddress(UUID.randomUUID().toString());
storage.setPath(UUID.randomUUID().toString());
storage.setStorageProviderId(provider.getId());
storage.setStorageProviderName(provider.getName());
storage = storagePoolDao.persist(storage);
storagePoolId = storage.getId();
@ -176,7 +176,7 @@ public class StorageAllocatorTest {
storage.setCapacityBytes(20000);
storage.setHostAddress(UUID.randomUUID().toString());
storage.setPath(UUID.randomUUID().toString());
storage.setStorageProviderId(provider.getId());
storage.setStorageProviderName(provider.getName());
StoragePoolVO newStorage = storagePoolDao.persist(storage);
newStorageId = newStorage.getId();

View File

@ -281,9 +281,9 @@ public class volumeServiceTest extends CloudStackTestNGBase {
params.put("port", "1");
params.put("roles", DataStoreRole.Primary.toString());
params.put("uuid", UUID.nameUUIDFromBytes(this.getPrimaryStorageUrl().getBytes()).toString());
params.put("providerId", String.valueOf(provider.getId()));
params.put("providerName", String.valueOf(provider.getName()));
DataStoreLifeCycle lifeCycle = provider.getLifeCycle();
DataStoreLifeCycle lifeCycle = provider.getDataStoreLifeCycle();
this.primaryStore = lifeCycle.initialize(params);
ClusterScope scope = new ClusterScope(clusterId, podId, dcId);
lifeCycle.attachCluster(this.primaryStore, scope);
@ -297,8 +297,8 @@ public class volumeServiceTest extends CloudStackTestNGBase {
params.put("uuid", name);
params.put("protocol", "http");
params.put("scope", ScopeType.GLOBAL.toString());
params.put("provider", Long.toString(provider.getId()));
DataStoreLifeCycle lifeCycle = provider.getLifeCycle();
params.put("providerName", name);
DataStoreLifeCycle lifeCycle = provider.getDataStoreLifeCycle();
DataStore store = lifeCycle.initialize(params);
return store;
}
@ -323,9 +323,9 @@ public class volumeServiceTest extends CloudStackTestNGBase {
params.put("port", "1");
params.put("roles", DataStoreRole.Primary.toString());
params.put("uuid", UUID.nameUUIDFromBytes(this.getPrimaryStorageUrl().getBytes()).toString());
params.put("providerId", String.valueOf(provider.getId()));
params.put("providerName", String.valueOf(provider.getName()));
DataStoreLifeCycle lifeCycle = provider.getLifeCycle();
DataStoreLifeCycle lifeCycle = provider.getDataStoreLifeCycle();
DataStore store = lifeCycle.initialize(params);
ClusterScope scope = new ClusterScope(clusterId, podId, dcId);
lifeCycle.attachCluster(store, scope);

View File

@ -242,16 +242,16 @@ public class PrimaryDataStoreEntityImpl implements StorageEntity {
}
@Override
public Long getStorageProviderId() {
// TODO Auto-generated method stub
return null;
}
@Override
public boolean isInMaintenance() {
// TODO Auto-generated method stub
return false;
}
@Override
public String getStorageProviderName() {
// TODO Auto-generated method stub
return null;
}
}

View File

@ -26,6 +26,6 @@ public interface PrimaryDataStoreProviderManager {
public PrimaryDataStore getPrimaryDataStore(long dataStoreId);
public PrimaryDataStore getPrimaryDataStore(String uuid);
boolean registerDriver(String uuid, PrimaryDataStoreDriver driver);
boolean registerHostListener(String uuid, HypervisorHostListener listener);
boolean registerDriver(String providerName, PrimaryDataStoreDriver driver);
boolean registerHostListener(String providerName, HypervisorHostListener listener);
}

View File

@ -18,21 +18,28 @@
*/
package org.apache.cloudstack.storage.datastore.provider;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.UUID;
import java.util.Set;
import javax.inject.Inject;
import javax.naming.ConfigurationException;
import org.apache.cloudstack.api.response.StorageProviderResponse;
import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreProvider;
import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreProvider.DataStoreProviderType;
import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreProviderManager;
import org.apache.cloudstack.engine.subsystem.api.storage.ImageDataStoreProvider;
import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreDriver;
import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreProvider;
import org.apache.cloudstack.storage.datastore.PrimaryDataStoreProviderManager;
import org.apache.cloudstack.storage.datastore.db.DataStoreProviderDao;
import org.apache.cloudstack.storage.datastore.db.DataStoreProviderVO;
import org.apache.log4j.Logger;
import org.springframework.stereotype.Component;
import com.cloud.exception.InvalidParameterValueException;
import com.cloud.utils.component.ManagerBase;
@Component
@ -44,15 +51,11 @@ public class DataStoreProviderManagerImpl extends ManagerBase implements DataSto
@Inject
DataStoreProviderDao providerDao;
protected Map<String, DataStoreProvider> providerMap = new HashMap<String, DataStoreProvider>();
@Override
public DataStoreProvider getDataStoreProviderByUuid(String uuid) {
return providerMap.get(uuid);
}
@Inject
PrimaryDataStoreProviderManager primaryDataStoreProviderMgr;
@Override
public DataStoreProvider getDataStoreProvider(String name) {
DataStoreProviderVO dspv = providerDao.findByName(name);
return providerMap.get(dspv.getUuid());
return providerMap.get(name);
}
@Override
@ -60,59 +63,86 @@ public class DataStoreProviderManagerImpl extends ManagerBase implements DataSto
// TODO Auto-generated method stub
return null;
}
public List<StorageProviderResponse> getPrimayrDataStoreProviders() {
List<StorageProviderResponse> providers = new ArrayList<StorageProviderResponse>();
for (DataStoreProvider provider : providerMap.values()) {
if (provider instanceof PrimaryDataStoreProvider) {
StorageProviderResponse response = new StorageProviderResponse();
response.setName(provider.getName());
response.setType(DataStoreProvider.DataStoreProviderType.PRIMARY.toString());
providers.add(response);
}
}
return providers;
}
public List<StorageProviderResponse> getImageDataStoreProviders() {
List<StorageProviderResponse> providers = new ArrayList<StorageProviderResponse>();
for (DataStoreProvider provider : providerMap.values()) {
if (provider instanceof ImageDataStoreProvider) {
StorageProviderResponse response = new StorageProviderResponse();
response.setName(provider.getName());
response.setType(DataStoreProvider.DataStoreProviderType.IMAGE.toString());
providers.add(response);
}
}
return providers;
}
@Override
public boolean configure(String name, Map<String, Object> params)
throws ConfigurationException {
Map<String, Object> copyParams = new HashMap<String, Object>(params);
//TODO: hold global lock
List<DataStoreProviderVO> providerVos = providerDao.listAll();
for (DataStoreProvider provider : providers) {
boolean existingProvider = false;
DataStoreProviderVO providerVO = null;
for (DataStoreProviderVO prov : providerVos) {
if (prov.getName().equalsIgnoreCase(provider.getName())) {
existingProvider = true;
providerVO = prov;
break;
}
String providerName = provider.getName();
if (providerMap.get(providerName) != null) {
s_logger.debug("Failed to register data store provider, provider name: " + providerName + " is not unique");
return false;
}
String uuid = null;
if (!existingProvider) {
uuid = UUID.nameUUIDFromBytes(provider.getName().getBytes()).toString();
providerVO = new DataStoreProviderVO();
providerVO.setName(provider.getName());
providerVO.setUuid(uuid);
providerVO = providerDao.persist(providerVO);
} else {
uuid = providerVO.getUuid();
}
copyParams.put("uuid", uuid);
copyParams.put("id", providerVO.getId());
providerMap.put(uuid, provider);
s_logger.debug("registering data store provider:" + provider.getName());
providerMap.put(providerName, provider);
try {
boolean registrationResult = provider.configure(copyParams);
if (!registrationResult) {
providerMap.remove(uuid);
providerMap.remove(providerName);
s_logger.debug("Failed to register data store provider: " + providerName);
return false;
}
Set<DataStoreProviderType> types = provider.getTypes();
if (types.contains(DataStoreProviderType.PRIMARY)) {
primaryDataStoreProviderMgr.registerDriver(provider.getName(), (PrimaryDataStoreDriver)provider.getDataStoreDriver());
primaryDataStoreProviderMgr.registerHostListener(provider.getName(), provider.getHostListener());
}
} catch(Exception e) {
s_logger.debug("configure provider failed", e);
providerMap.remove(uuid);
providerMap.remove(providerName);
}
}
return true;
}
@Override
public DataStoreProvider getDataStoreProviderById(long id) {
DataStoreProviderVO provider = providerDao.findById(id);
return providerMap.get(provider.getUuid());
}
@Override
public DataStoreProvider getDefaultPrimaryDataStoreProvider() {
return this.getDataStoreProvider("ancient primary data store provider");
}
@Override
public List<StorageProviderResponse> getDataStoreProviders(String type) {
if (type == null) {
throw new InvalidParameterValueException("Invalid parameter, need to specify type: either primary or image");
}
if (type.equalsIgnoreCase(DataStoreProvider.DataStoreProviderType.PRIMARY.toString())) {
return this.getPrimayrDataStoreProviders();
} else if (type.equalsIgnoreCase(DataStoreProvider.DataStoreProviderType.IMAGE.toString())) {
return this.getImageDataStoreProviders();
} else {
throw new InvalidParameterValueException("Invalid parameter: " + type);
}
}
}

View File

@ -1,23 +0,0 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.apache.cloudstack.storage.datastore.provider;
import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreProvider;
public interface PrimaryDataStoreProvider extends DataStoreProvider {
}

View File

@ -34,14 +34,14 @@ public class ImageDataStoreHelper {
@Inject
ImageDataStoreDao imageStoreDao;
public ImageDataStoreVO createImageDataStore(Map<String, Object> params) {
ImageDataStoreVO store = imageStoreDao.findByUuid((String)params.get("uuid"));
ImageDataStoreVO store = imageStoreDao.findByName((String)params.get("name"));
if (store != null) {
return store;
}
store = new ImageDataStoreVO();
store.setName((String)params.get("name"));
store.setProtocol((String)params.get("protocol"));
store.setProvider((Long)params.get("provider"));
store.setProviderName((String)params.get("providerName"));
store.setScope((ScopeType)params.get("scope"));
store.setUuid((String)params.get("uuid"));
store = imageStoreDao.persist(store);

View File

@ -45,8 +45,8 @@ public class ImageDataStoreVO {
@Column(name = "protocol", nullable = false)
private String protocol;
@Column(name = "image_provider_id", nullable = false)
private long provider;
@Column(name = "image_provider_name", nullable = false)
private String providerName;
@Column(name = "data_center_id")
private long dcId;
@ -64,16 +64,16 @@ public class ImageDataStoreVO {
return this.name;
}
public long getProvider() {
return this.provider;
public String getProviderName() {
return this.providerName;
}
public void setName(String name) {
this.name = name;
}
public void setProvider(long provider) {
this.provider = provider;
public void setProviderName(String provider) {
this.providerName = provider;
}
public void setProtocol(String protocol) {

View File

@ -18,57 +18,181 @@
*/
package org.apache.cloudstack.storage.volume.datastore;
import java.util.List;
import java.util.Map;
import javax.inject.Inject;
import org.apache.cloudstack.engine.subsystem.api.storage.DataStore;
import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager;
import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreRole;
import org.apache.cloudstack.engine.subsystem.api.storage.HostScope;
import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreParameters;
import org.apache.cloudstack.engine.subsystem.api.storage.ScopeType;
import org.apache.cloudstack.storage.command.AttachPrimaryDataStoreCmd;
import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao;
import org.apache.cloudstack.storage.datastore.db.StoragePoolVO;
import org.apache.log4j.Logger;
import org.springframework.stereotype.Component;
import com.cloud.storage.Storage.StoragePoolType;
import com.cloud.agent.api.Answer;
import com.cloud.agent.api.DeleteStoragePoolCommand;
import com.cloud.agent.api.StoragePoolInfo;
import com.cloud.alert.AlertManager;
import com.cloud.capacity.Capacity;
import com.cloud.capacity.CapacityVO;
import com.cloud.capacity.dao.CapacityDao;
import com.cloud.storage.StorageManager;
import com.cloud.storage.StoragePool;
import com.cloud.storage.StoragePoolHostVO;
import com.cloud.storage.StoragePoolStatus;
import com.cloud.storage.dao.StoragePoolHostDao;
import com.cloud.utils.db.DB;
import com.cloud.utils.db.Transaction;
import com.cloud.utils.exception.CloudRuntimeException;
@Component
public class PrimaryDataStoreHelper {
private static final Logger s_logger = Logger
.getLogger(PrimaryDataStoreHelper.class);
@Inject
private PrimaryDataStoreDao dataStoreDao;
public StoragePoolVO createPrimaryDataStore(Map<String, Object> params) {
StoragePoolVO dataStoreVO = dataStoreDao.findPoolByUUID((String)params.get("uuid"));
@Inject
DataStoreManager dataStoreMgr;
@Inject
StorageManager storageMgr;
@Inject
protected CapacityDao _capacityDao;
@Inject
protected StoragePoolHostDao storagePoolHostDao;
public DataStore createPrimaryDataStore(PrimaryDataStoreParameters params) {
StoragePoolVO dataStoreVO = dataStoreDao.findPoolByUUID(params.getUuid());
if (dataStoreVO != null) {
throw new CloudRuntimeException("duplicate uuid: " + params.get("uuid"));
throw new CloudRuntimeException("duplicate uuid: " + params.getUuid());
}
dataStoreVO = new StoragePoolVO();
dataStoreVO.setStorageProviderId(Long.parseLong((String)params.get("providerId")));
dataStoreVO.setHostAddress((String)params.get("server"));
dataStoreVO.setPath((String)params.get("path"));
dataStoreVO.setPoolType((StoragePoolType)params.get("protocol"));
dataStoreVO.setPort(Integer.parseInt((String)params.get("port")));
dataStoreVO.setName((String)params.get("name"));
dataStoreVO.setUuid((String)params.get("uuid"));
dataStoreVO = dataStoreDao.persist(dataStoreVO);
return dataStoreVO;
dataStoreVO.setStorageProviderName(params.getProviderName());
dataStoreVO.setHostAddress(params.getHost());
dataStoreVO.setPath(params.getPath());
dataStoreVO.setPoolType(params.getType());
dataStoreVO.setPort(params.getPort());
dataStoreVO.setName(params.getName());
dataStoreVO.setUuid(params.getUuid());
dataStoreVO.setDataCenterId(params.getZoneId());
dataStoreVO.setPodId(params.getPodId());
dataStoreVO.setClusterId(params.getClusterId());
dataStoreVO.setStatus(StoragePoolStatus.Initialized);
dataStoreVO.setUserInfo(params.getUserInfo());
Map<String, String> details = params.getDetails();
String tags = params.getTags();
if (tags != null) {
String[] tokens = tags.split(",");
for (String tag : tokens) {
tag = tag.trim();
if (tag.length() == 0) {
continue;
}
details.put(tag, "true");
}
}
dataStoreVO = dataStoreDao.persist(dataStoreVO, details);
return dataStoreMgr.getDataStore(dataStoreVO.getId(), DataStoreRole.Primary);
}
public boolean deletePrimaryDataStore(long id) {
StoragePoolVO dataStoreVO = dataStoreDao.findById(id);
if (dataStoreVO == null) {
throw new CloudRuntimeException("can't find store: " + id);
public DataStore attachHost(DataStore store, HostScope scope, StoragePoolInfo existingInfo) {
StoragePoolHostVO poolHost = storagePoolHostDao.findByPoolHost(store.getId(), scope.getScopeId());
if (poolHost == null) {
poolHost = new StoragePoolHostVO(store.getId(), scope.getScopeId(), existingInfo.getLocalPath());
storagePoolHostDao.persist(poolHost);
}
dataStoreDao.remove(id);
StoragePoolVO pool = this.dataStoreDao.findById(store.getId());
pool.setScope(scope.getScopeType());
pool.setAvailableBytes(existingInfo.getAvailableBytes());
pool.setCapacityBytes(existingInfo.getCapacityBytes());
pool.setStatus(StoragePoolStatus.Up);
this.dataStoreDao.update(pool.getId(), pool);
this.storageMgr.createCapacityEntry(pool, Capacity.CAPACITY_TYPE_LOCAL_STORAGE, pool.getCapacityBytes() - pool.getAvailableBytes());
return dataStoreMgr.getDataStore(pool.getId(), DataStoreRole.Primary);
}
public DataStore attachCluster(DataStore store) {
StoragePoolVO pool = this.dataStoreDao.findById(store.getId());
storageMgr.createCapacityEntry(pool.getId());
pool.setScope(ScopeType.CLUSTER);
pool.setStatus(StoragePoolStatus.Up);
this.dataStoreDao.update(pool.getId(), pool);
return dataStoreMgr.getDataStore(store.getId(), DataStoreRole.Primary);
}
public DataStore attachZone(DataStore store) {
StoragePoolVO pool = this.dataStoreDao.findById(store.getId());
pool.setScope(ScopeType.ZONE);
pool.setStatus(StoragePoolStatus.Up);
this.dataStoreDao.update(pool.getId(), pool);
return dataStoreMgr.getDataStore(store.getId(), DataStoreRole.Primary);
}
public boolean maintain(DataStore store) {
StoragePoolVO pool = this.dataStoreDao.findById(store.getId());
pool.setStatus(StoragePoolStatus.Maintenance);
this.dataStoreDao.update(pool.getId(), pool);
return true;
}
public void attachCluster(DataStore dataStore) {
//send down AttachPrimaryDataStoreCmd command to all the hosts in the cluster
AttachPrimaryDataStoreCmd cmd = new AttachPrimaryDataStoreCmd(dataStore.getUri());
/*for (EndPoint ep : dataStore.getEndPoints()) {
ep.sendMessage(cmd);
} */
public boolean cancelMaintain(DataStore store) {
StoragePoolVO pool = this.dataStoreDao.findById(store.getId());
pool.setStatus(StoragePoolStatus.Up);
dataStoreDao.update(store.getId(), pool);
return true;
}
protected boolean deletePoolStats(Long poolId) {
CapacityVO capacity1 = _capacityDao.findByHostIdType(poolId,
CapacityVO.CAPACITY_TYPE_STORAGE);
CapacityVO capacity2 = _capacityDao.findByHostIdType(poolId,
CapacityVO.CAPACITY_TYPE_STORAGE_ALLOCATED);
if (capacity1 != null) {
_capacityDao.remove(capacity1.getId());
}
if (capacity2 != null) {
_capacityDao.remove(capacity2.getId());
}
return true;
}
public boolean deletePrimaryDataStore(DataStore store) {
List<StoragePoolHostVO> hostPoolRecords = this.storagePoolHostDao
.listByPoolId(store.getId());
StoragePoolVO poolVO = this.dataStoreDao.findById(store.getId());
Transaction txn = Transaction.currentTxn();
txn.start();
for (StoragePoolHostVO host : hostPoolRecords) {
storagePoolHostDao.deleteStoragePoolHostDetails(
host.getHostId(), host.getPoolId());
}
poolVO.setUuid(null);
this.dataStoreDao.update(poolVO.getId(), poolVO);
dataStoreDao.remove(poolVO.getId());
deletePoolStats(poolVO.getId());
// Delete op_host_capacity entries
this._capacityDao.removeBy(Capacity.CAPACITY_TYPE_STORAGE_ALLOCATED,
null, null, null, poolVO.getId());
txn.commit();
s_logger.debug("Storage pool id=" + poolVO.getId()
+ " is removed successfully");
return true;
}
}

View File

@ -331,13 +331,13 @@ public class DefaultPrimaryDataStore implements PrimaryDataStore {
return this.pdsv.getPodId();
}
@Override
public Long getStorageProviderId() {
return this.pdsv.getStorageProviderId();
}
@Override
public boolean isInMaintenance() {
return this.getStatus() == StoragePoolStatus.Maintenance ? true : false;
}
@Override
public String getStorageProviderName() {
return this.pdsv.getStorageProviderName();
}
}

View File

@ -1,963 +0,0 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.cloudstack.storage.datastore.lifecycle;
import java.net.URI;
import java.net.URISyntaxException;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import java.util.UUID;
import javax.inject.Inject;
import org.apache.cloudstack.engine.subsystem.api.storage.ClusterScope;
import org.apache.cloudstack.engine.subsystem.api.storage.DataStore;
import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager;
import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreRole;
import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreStatus;
import org.apache.cloudstack.engine.subsystem.api.storage.HostScope;
import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreInfo;
import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreLifeCycle;
import org.apache.cloudstack.engine.subsystem.api.storage.ScopeType;
import org.apache.cloudstack.engine.subsystem.api.storage.ZoneScope;
import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao;
import org.apache.cloudstack.storage.datastore.db.StoragePoolVO;
import org.apache.log4j.Logger;
import com.cloud.agent.AgentManager;
import com.cloud.agent.api.Answer;
import com.cloud.agent.api.CreateStoragePoolCommand;
import com.cloud.agent.api.DeleteStoragePoolCommand;
import com.cloud.agent.api.ModifyStoragePoolCommand;
import com.cloud.agent.api.StoragePoolInfo;
import com.cloud.alert.AlertManager;
import com.cloud.capacity.Capacity;
import com.cloud.capacity.CapacityVO;
import com.cloud.capacity.dao.CapacityDao;
import com.cloud.exception.DiscoveryException;
import com.cloud.exception.InvalidParameterValueException;
import com.cloud.host.Host;
import com.cloud.host.HostVO;
import com.cloud.host.Status;
import com.cloud.hypervisor.Hypervisor.HypervisorType;
import com.cloud.resource.ResourceManager;
import com.cloud.server.ManagementServer;
import com.cloud.storage.OCFS2Manager;
import com.cloud.storage.Storage.StoragePoolType;
import com.cloud.storage.StorageManager;
import com.cloud.storage.StoragePool;
import com.cloud.storage.StoragePoolDiscoverer;
import com.cloud.storage.StoragePoolHostVO;
import com.cloud.storage.StoragePoolStatus;
import com.cloud.storage.StoragePoolWorkVO;
import com.cloud.storage.VolumeVO;
import com.cloud.storage.dao.StoragePoolHostDao;
import com.cloud.storage.dao.StoragePoolWorkDao;
import com.cloud.storage.dao.VolumeDao;
import com.cloud.user.Account;
import com.cloud.user.User;
import com.cloud.user.UserContext;
import com.cloud.user.dao.UserDao;
import com.cloud.utils.NumbersUtil;
import com.cloud.utils.UriUtils;
import com.cloud.utils.db.DB;
import com.cloud.utils.db.Transaction;
import com.cloud.utils.exception.CloudRuntimeException;
import com.cloud.utils.exception.ExecutionException;
import com.cloud.vm.ConsoleProxyVO;
import com.cloud.vm.DomainRouterVO;
import com.cloud.vm.SecondaryStorageVmVO;
import com.cloud.vm.UserVmVO;
import com.cloud.vm.VMInstanceVO;
import com.cloud.vm.VirtualMachine;
import com.cloud.vm.VirtualMachine.State;
import com.cloud.vm.VirtualMachineManager;
import com.cloud.vm.dao.ConsoleProxyDao;
import com.cloud.vm.dao.DomainRouterDao;
import com.cloud.vm.dao.SecondaryStorageVmDao;
import com.cloud.vm.dao.UserVmDao;
import com.cloud.vm.dao.VMInstanceDao;
public class AncientPrimaryDataStoreLifeCycleImpl implements
PrimaryDataStoreLifeCycle {
private static final Logger s_logger = Logger
.getLogger(AncientPrimaryDataStoreLifeCycleImpl.class);
@Inject
protected ResourceManager _resourceMgr;
protected List<StoragePoolDiscoverer> _discoverers;
@Inject
PrimaryDataStoreDao primaryDataStoreDao;
@Inject
protected OCFS2Manager _ocfs2Mgr;
@Inject
DataStoreManager dataStoreMgr;
@Inject
AgentManager agentMgr;
@Inject
StorageManager storageMgr;
@Inject
protected CapacityDao _capacityDao;
@Inject
VolumeDao volumeDao;
@Inject
VMInstanceDao vmDao;
@Inject
ManagementServer server;
@Inject
protected VirtualMachineManager vmMgr;
@Inject
protected SecondaryStorageVmDao _secStrgDao;
@Inject
UserVmDao userVmDao;
@Inject
protected UserDao _userDao;
@Inject
protected DomainRouterDao _domrDao;
@Inject
protected StoragePoolHostDao _storagePoolHostDao;
@Inject
protected AlertManager _alertMgr;
@Inject
protected ConsoleProxyDao _consoleProxyDao;
@Inject
protected StoragePoolWorkDao _storagePoolWorkDao;
@Override
public DataStore initialize(Map<String, Object> dsInfos) {
Long clusterId = (Long) dsInfos.get("clusterId");
Long podId = (Long) dsInfos.get("podId");
Long zoneId = (Long) dsInfos.get("zoneId");
String url = (String) dsInfos.get("url");
Long providerId = (Long)dsInfos.get("providerId");
if (clusterId != null && podId == null) {
throw new InvalidParameterValueException(
"Cluster id requires pod id");
}
URI uri = null;
try {
uri = new URI(UriUtils.encodeURIComponent(url));
if (uri.getScheme() == null) {
throw new InvalidParameterValueException("scheme is null "
+ url + ", add nfs:// as a prefix");
} else if (uri.getScheme().equalsIgnoreCase("nfs")) {
String uriHost = uri.getHost();
String uriPath = uri.getPath();
if (uriHost == null || uriPath == null
|| uriHost.trim().isEmpty() || uriPath.trim().isEmpty()) {
throw new InvalidParameterValueException(
"host or path is null, should be nfs://hostname/path");
}
} else if (uri.getScheme().equalsIgnoreCase("sharedMountPoint")) {
String uriPath = uri.getPath();
if (uriPath == null) {
throw new InvalidParameterValueException(
"host or path is null, should be sharedmountpoint://localhost/path");
}
} else if (uri.getScheme().equalsIgnoreCase("rbd")) {
String uriPath = uri.getPath();
if (uriPath == null) {
throw new InvalidParameterValueException(
"host or path is null, should be rbd://hostname/pool");
}
}
} catch (URISyntaxException e) {
throw new InvalidParameterValueException(url
+ " is not a valid uri");
}
String tags = (String) dsInfos.get("tags");
Map<String, String> details = (Map<String, String>) dsInfos
.get("details");
if (tags != null) {
String[] tokens = tags.split(",");
for (String tag : tokens) {
tag = tag.trim();
if (tag.length() == 0) {
continue;
}
details.put(tag, "true");
}
}
String scheme = uri.getScheme();
String storageHost = uri.getHost();
String hostPath = uri.getPath();
Object localStorage = dsInfos.get("localStorage");
if (localStorage != null) {
hostPath = hostPath.replace("/", "");
}
String userInfo = uri.getUserInfo();
int port = uri.getPort();
StoragePoolVO pool = null;
if (s_logger.isDebugEnabled()) {
s_logger.debug("createPool Params @ scheme - " + scheme
+ " storageHost - " + storageHost + " hostPath - "
+ hostPath + " port - " + port);
}
if (scheme.equalsIgnoreCase("nfs")) {
if (port == -1) {
port = 2049;
}
pool = new StoragePoolVO(StoragePoolType.NetworkFilesystem,
storageHost, port, hostPath);
} else if (scheme.equalsIgnoreCase("file")) {
if (port == -1) {
port = 0;
}
pool = new StoragePoolVO(StoragePoolType.Filesystem,
"localhost", 0, hostPath);
} else if (scheme.equalsIgnoreCase("sharedMountPoint")) {
pool = new StoragePoolVO(StoragePoolType.SharedMountPoint,
storageHost, 0, hostPath);
} else if (scheme.equalsIgnoreCase("clvm")) {
pool = new StoragePoolVO(StoragePoolType.CLVM, storageHost, 0,
hostPath.replaceFirst("/", ""));
} else if (scheme.equalsIgnoreCase("rbd")) {
if (port == -1) {
port = 6789;
}
pool = new StoragePoolVO(StoragePoolType.RBD, storageHost,
port, hostPath.replaceFirst("/", ""));
pool.setUserInfo(userInfo);
} else if (scheme.equalsIgnoreCase("PreSetup")) {
pool = new StoragePoolVO(StoragePoolType.PreSetup,
storageHost, 0, hostPath);
} else if (scheme.equalsIgnoreCase("iscsi")) {
String[] tokens = hostPath.split("/");
int lun = NumbersUtil.parseInt(tokens[tokens.length - 1], -1);
if (port == -1) {
port = 3260;
}
if (lun != -1) {
if (clusterId == null) {
throw new IllegalArgumentException(
"IscsiLUN need to have clusters specified");
}
hostPath.replaceFirst("/", "");
pool = new StoragePoolVO(StoragePoolType.IscsiLUN,
storageHost, port, hostPath);
} else {
for (StoragePoolDiscoverer discoverer : _discoverers) {
Map<StoragePoolVO, Map<String, String>> pools;
try {
pools = discoverer.find(zoneId, podId, uri, details);
} catch (DiscoveryException e) {
throw new IllegalArgumentException(
"Not enough information for discovery " + uri,
e);
}
if (pools != null) {
Map.Entry<StoragePoolVO, Map<String, String>> entry = pools
.entrySet().iterator().next();
pool = entry.getKey();
details = entry.getValue();
break;
}
}
}
} else if (scheme.equalsIgnoreCase("iso")) {
if (port == -1) {
port = 2049;
}
pool = new StoragePoolVO(StoragePoolType.ISO, storageHost,
port, hostPath);
} else if (scheme.equalsIgnoreCase("vmfs")) {
pool = new StoragePoolVO(StoragePoolType.VMFS,
"VMFS datastore: " + hostPath, 0, hostPath);
} else if (scheme.equalsIgnoreCase("ocfs2")) {
port = 7777;
pool = new StoragePoolVO(StoragePoolType.OCFS2, "clustered",
port, hostPath);
} else {
StoragePoolType type = Enum.valueOf(StoragePoolType.class, scheme);
if (type != null) {
pool = new StoragePoolVO(type, storageHost,
0, hostPath);
} else {
s_logger.warn("Unable to figure out the scheme for URI: " + uri);
throw new IllegalArgumentException(
"Unable to figure out the scheme for URI: " + uri);
}
}
if (pool == null) {
s_logger.warn("Unable to figure out the scheme for URI: " + uri);
throw new IllegalArgumentException(
"Unable to figure out the scheme for URI: " + uri);
}
if (localStorage == null) {
List<StoragePoolVO> pools = primaryDataStoreDao
.listPoolByHostPath(storageHost, hostPath);
if (!pools.isEmpty() && !scheme.equalsIgnoreCase("sharedmountpoint")) {
Long oldPodId = pools.get(0).getPodId();
throw new CloudRuntimeException("Storage pool " + uri
+ " already in use by another pod (id=" + oldPodId + ")");
}
}
long poolId = primaryDataStoreDao.getNextInSequence(Long.class, "id");
Object existingUuid = dsInfos.get("uuid");
String uuid = null;
if (existingUuid != null) {
uuid = (String)existingUuid;
} else if (scheme.equalsIgnoreCase("sharedmountpoint")
|| scheme.equalsIgnoreCase("clvm")) {
uuid = UUID.randomUUID().toString();
} else if (scheme.equalsIgnoreCase("PreSetup")) {
uuid = hostPath.replace("/", "");
} else {
uuid = UUID.nameUUIDFromBytes(
new String(storageHost + hostPath).getBytes()).toString();
}
List<StoragePoolVO> spHandles = primaryDataStoreDao
.findIfDuplicatePoolsExistByUUID(uuid);
if ((spHandles != null) && (spHandles.size() > 0)) {
if (s_logger.isDebugEnabled()) {
s_logger.debug("Another active pool with the same uuid already exists");
}
throw new CloudRuntimeException(
"Another active pool with the same uuid already exists");
}
String poolName = (String) dsInfos.get("name");
if (s_logger.isDebugEnabled()) {
s_logger.debug("In createPool Setting poolId - " + poolId
+ " uuid - " + uuid + " zoneId - " + zoneId + " podId - "
+ podId + " poolName - " + poolName);
}
pool.setId(poolId);
pool.setUuid(uuid);
pool.setDataCenterId(zoneId);
pool.setPodId(podId);
pool.setName(poolName);
pool.setClusterId(clusterId);
pool.setStorageProviderId(providerId);
pool.setStatus(StoragePoolStatus.Initialized);
pool = primaryDataStoreDao.persist(pool, details);
return dataStoreMgr.getDataStore(pool.getId(), DataStoreRole.Primary);
}
protected boolean createStoragePool(long hostId, StoragePool pool) {
s_logger.debug("creating pool " + pool.getName() + " on host "
+ hostId);
if (pool.getPoolType() != StoragePoolType.NetworkFilesystem
&& pool.getPoolType() != StoragePoolType.Filesystem
&& pool.getPoolType() != StoragePoolType.IscsiLUN
&& pool.getPoolType() != StoragePoolType.Iscsi
&& pool.getPoolType() != StoragePoolType.VMFS
&& pool.getPoolType() != StoragePoolType.SharedMountPoint
&& pool.getPoolType() != StoragePoolType.PreSetup
&& pool.getPoolType() != StoragePoolType.OCFS2
&& pool.getPoolType() != StoragePoolType.RBD
&& pool.getPoolType() != StoragePoolType.CLVM) {
s_logger.warn(" Doesn't support storage pool type "
+ pool.getPoolType());
return false;
}
CreateStoragePoolCommand cmd = new CreateStoragePoolCommand(true, pool);
final Answer answer = agentMgr.easySend(hostId, cmd);
if (answer != null && answer.getResult()) {
return true;
} else {
primaryDataStoreDao.expunge(pool.getId());
String msg = "";
if (answer != null) {
msg = "Can not create storage pool through host " + hostId
+ " due to " + answer.getDetails();
s_logger.warn(msg);
} else {
msg = "Can not create storage pool through host " + hostId
+ " due to CreateStoragePoolCommand returns null";
s_logger.warn(msg);
}
throw new CloudRuntimeException(msg);
}
}
@Override
public boolean attachCluster(DataStore store, ClusterScope scope) {
PrimaryDataStoreInfo primarystore = (PrimaryDataStoreInfo) store;
// Check if there is host up in this cluster
List<HostVO> allHosts = _resourceMgr.listAllUpAndEnabledHosts(
Host.Type.Routing, primarystore.getClusterId(),
primarystore.getPodId(), primarystore.getDataCenterId());
if (allHosts.isEmpty()) {
throw new CloudRuntimeException(
"No host up to associate a storage pool with in cluster "
+ primarystore.getClusterId());
}
if (primarystore.getPoolType() == StoragePoolType.OCFS2
&& !_ocfs2Mgr.prepareNodes(allHosts, primarystore)) {
s_logger.warn("Can not create storage pool " + primarystore
+ " on cluster " + primarystore.getClusterId());
primaryDataStoreDao.expunge(primarystore.getId());
return false;
}
boolean success = false;
for (HostVO h : allHosts) {
success = createStoragePool(h.getId(), primarystore);
if (success) {
break;
}
}
s_logger.debug("In createPool Adding the pool to each of the hosts");
List<HostVO> poolHosts = new ArrayList<HostVO>();
for (HostVO h : allHosts) {
try {
this.storageMgr.connectHostToSharedPool(h.getId(),
primarystore.getId());
poolHosts.add(h);
} catch (Exception e) {
s_logger.warn("Unable to establish a connection between " + h
+ " and " + primarystore, e);
}
}
if (poolHosts.isEmpty()) {
s_logger.warn("No host can access storage pool " + primarystore
+ " on cluster " + primarystore.getClusterId());
primaryDataStoreDao.expunge(primarystore.getId());
return false;
} else {
storageMgr.createCapacityEntry(primarystore.getId());
}
StoragePoolVO pool = this.primaryDataStoreDao.findById(store.getId());
pool.setScope(ScopeType.CLUSTER);
pool.setStatus(StoragePoolStatus.Up);
this.primaryDataStoreDao.update(pool.getId(), pool);
return true;
}
@Override
public boolean attachZone(DataStore dataStore, ZoneScope scope) {
List<HostVO> hosts = _resourceMgr.listAllUpAndEnabledHostsInOneZoneByHypervisor(HypervisorType.KVM, scope.getScopeId());
for (HostVO host : hosts) {
try {
this.storageMgr.connectHostToSharedPool(host.getId(),
dataStore.getId());
} catch (Exception e) {
s_logger.warn("Unable to establish a connection between " + host
+ " and " + dataStore, e);
}
}
StoragePoolVO pool = this.primaryDataStoreDao.findById(dataStore.getId());
pool.setScope(ScopeType.ZONE);
pool.setStatus(StoragePoolStatus.Up);
this.primaryDataStoreDao.update(pool.getId(), pool);
return true;
}
@Override
public boolean dettach() {
// TODO Auto-generated method stub
return false;
}
@Override
public boolean unmanaged() {
// TODO Auto-generated method stub
return false;
}
@Override
public boolean maintain(long storeId) {
Long userId = UserContext.current().getCallerUserId();
User user = _userDao.findById(userId);
Account account = UserContext.current().getCaller();
StoragePoolVO pool = this.primaryDataStoreDao.findById(storeId);
try {
StoragePool storagePool = (StoragePool) this.dataStoreMgr
.getDataStore(storeId, DataStoreRole.Primary);
List<HostVO> hosts = _resourceMgr.listHostsInClusterByStatus(
pool.getClusterId(), Status.Up);
if (hosts == null || hosts.size() == 0) {
pool.setStatus(StoragePoolStatus.Maintenance);
primaryDataStoreDao.update(pool.getId(), pool);
return true;
} else {
// set the pool state to prepare for maintenance
pool.setStatus(StoragePoolStatus.PrepareForMaintenance);
primaryDataStoreDao.update(pool.getId(), pool);
}
// remove heartbeat
for (HostVO host : hosts) {
ModifyStoragePoolCommand cmd = new ModifyStoragePoolCommand(
false, storagePool);
final Answer answer = agentMgr.easySend(host.getId(), cmd);
if (answer == null || !answer.getResult()) {
if (s_logger.isDebugEnabled()) {
s_logger.debug("ModifyStoragePool false failed due to "
+ ((answer == null) ? "answer null" : answer
.getDetails()));
}
} else {
if (s_logger.isDebugEnabled()) {
s_logger.debug("ModifyStoragePool false secceeded");
}
}
}
// check to see if other ps exist
// if they do, then we can migrate over the system vms to them
// if they dont, then just stop all vms on this one
List<StoragePoolVO> upPools = primaryDataStoreDao
.listByStatusInZone(pool.getDataCenterId(),
StoragePoolStatus.Up);
boolean restart = true;
if (upPools == null || upPools.size() == 0) {
restart = false;
}
// 2. Get a list of all the ROOT volumes within this storage pool
List<VolumeVO> allVolumes = this.volumeDao.findByPoolId(pool
.getId());
// 3. Enqueue to the work queue
for (VolumeVO volume : allVolumes) {
VMInstanceVO vmInstance = vmDao
.findById(volume.getInstanceId());
if (vmInstance == null) {
continue;
}
// enqueue sp work
if (vmInstance.getState().equals(State.Running)
|| vmInstance.getState().equals(State.Starting)
|| vmInstance.getState().equals(State.Stopping)) {
try {
StoragePoolWorkVO work = new StoragePoolWorkVO(
vmInstance.getId(), pool.getId(), false, false,
server.getId());
_storagePoolWorkDao.persist(work);
} catch (Exception e) {
if (s_logger.isDebugEnabled()) {
s_logger.debug("Work record already exists, re-using by re-setting values");
}
StoragePoolWorkVO work = _storagePoolWorkDao
.findByPoolIdAndVmId(pool.getId(),
vmInstance.getId());
work.setStartedAfterMaintenance(false);
work.setStoppedForMaintenance(false);
work.setManagementServerId(server.getId());
_storagePoolWorkDao.update(work.getId(), work);
}
}
}
// 4. Process the queue
List<StoragePoolWorkVO> pendingWork = _storagePoolWorkDao
.listPendingWorkForPrepareForMaintenanceByPoolId(pool
.getId());
for (StoragePoolWorkVO work : pendingWork) {
// shut down the running vms
VMInstanceVO vmInstance = vmDao.findById(work.getVmId());
if (vmInstance == null) {
continue;
}
// if the instance is of type consoleproxy, call the console
// proxy
if (vmInstance.getType().equals(
VirtualMachine.Type.ConsoleProxy)) {
// call the consoleproxymanager
ConsoleProxyVO consoleProxy = _consoleProxyDao
.findById(vmInstance.getId());
if (!vmMgr.advanceStop(consoleProxy, true, user, account)) {
String errorMsg = "There was an error stopping the console proxy id: "
+ vmInstance.getId()
+ " ,cannot enable storage maintenance";
s_logger.warn(errorMsg);
throw new CloudRuntimeException(errorMsg);
} else {
// update work status
work.setStoppedForMaintenance(true);
_storagePoolWorkDao.update(work.getId(), work);
}
if (restart) {
if (this.vmMgr.advanceStart(consoleProxy, null, user,
account) == null) {
String errorMsg = "There was an error starting the console proxy id: "
+ vmInstance.getId()
+ " on another storage pool, cannot enable primary storage maintenance";
s_logger.warn(errorMsg);
} else {
// update work status
work.setStartedAfterMaintenance(true);
_storagePoolWorkDao.update(work.getId(), work);
}
}
}
// if the instance is of type uservm, call the user vm manager
if (vmInstance.getType().equals(VirtualMachine.Type.User)) {
UserVmVO userVm = userVmDao.findById(vmInstance.getId());
if (!vmMgr.advanceStop(userVm, true, user, account)) {
String errorMsg = "There was an error stopping the user vm id: "
+ vmInstance.getId()
+ " ,cannot enable storage maintenance";
s_logger.warn(errorMsg);
throw new CloudRuntimeException(errorMsg);
} else {
// update work status
work.setStoppedForMaintenance(true);
_storagePoolWorkDao.update(work.getId(), work);
}
}
// if the instance is of type secondary storage vm, call the
// secondary storage vm manager
if (vmInstance.getType().equals(
VirtualMachine.Type.SecondaryStorageVm)) {
SecondaryStorageVmVO secStrgVm = _secStrgDao
.findById(vmInstance.getId());
if (!vmMgr.advanceStop(secStrgVm, true, user, account)) {
String errorMsg = "There was an error stopping the ssvm id: "
+ vmInstance.getId()
+ " ,cannot enable storage maintenance";
s_logger.warn(errorMsg);
throw new CloudRuntimeException(errorMsg);
} else {
// update work status
work.setStoppedForMaintenance(true);
_storagePoolWorkDao.update(work.getId(), work);
}
if (restart) {
if (vmMgr.advanceStart(secStrgVm, null, user, account) == null) {
String errorMsg = "There was an error starting the ssvm id: "
+ vmInstance.getId()
+ " on another storage pool, cannot enable primary storage maintenance";
s_logger.warn(errorMsg);
} else {
// update work status
work.setStartedAfterMaintenance(true);
_storagePoolWorkDao.update(work.getId(), work);
}
}
}
// if the instance is of type domain router vm, call the network
// manager
if (vmInstance.getType().equals(
VirtualMachine.Type.DomainRouter)) {
DomainRouterVO domR = _domrDao.findById(vmInstance.getId());
if (!vmMgr.advanceStop(domR, true, user, account)) {
String errorMsg = "There was an error stopping the domain router id: "
+ vmInstance.getId()
+ " ,cannot enable primary storage maintenance";
s_logger.warn(errorMsg);
throw new CloudRuntimeException(errorMsg);
} else {
// update work status
work.setStoppedForMaintenance(true);
_storagePoolWorkDao.update(work.getId(), work);
}
if (restart) {
if (vmMgr.advanceStart(domR, null, user, account) == null) {
String errorMsg = "There was an error starting the domain router id: "
+ vmInstance.getId()
+ " on another storage pool, cannot enable primary storage maintenance";
s_logger.warn(errorMsg);
} else {
// update work status
work.setStartedAfterMaintenance(true);
_storagePoolWorkDao.update(work.getId(), work);
}
}
}
}
// 5. Update the status
pool.setStatus(StoragePoolStatus.Maintenance);
this.primaryDataStoreDao.update(pool.getId(), pool);
return true;
} catch (Exception e) {
s_logger.error(
"Exception in enabling primary storage maintenance:", e);
setPoolStateToError(pool);
throw new CloudRuntimeException(e.getMessage());
}
}
private void setPoolStateToError(StoragePoolVO primaryStorage) {
primaryStorage.setStatus(StoragePoolStatus.ErrorInMaintenance);
this.primaryDataStoreDao.update(primaryStorage.getId(), primaryStorage);
}
@Override
public boolean cancelMaintain(long storageId) {
// Change the storage state back to up
Long userId = UserContext.current().getCallerUserId();
User user = _userDao.findById(userId);
Account account = UserContext.current().getCaller();
StoragePoolVO poolVO = this.primaryDataStoreDao
.findById(storageId);
StoragePool pool = (StoragePool) this.dataStoreMgr.getDataStore(
storageId, DataStoreRole.Primary);
poolVO.setStatus(StoragePoolStatus.Up);
primaryDataStoreDao.update(storageId, poolVO);
List<HostVO> hosts = _resourceMgr.listHostsInClusterByStatus(
pool.getClusterId(), Status.Up);
if (hosts == null || hosts.size() == 0) {
return true;
}
// add heartbeat
for (HostVO host : hosts) {
ModifyStoragePoolCommand msPoolCmd = new ModifyStoragePoolCommand(
true, pool);
final Answer answer = agentMgr.easySend(host.getId(), msPoolCmd);
if (answer == null || !answer.getResult()) {
if (s_logger.isDebugEnabled()) {
s_logger.debug("ModifyStoragePool add failed due to "
+ ((answer == null) ? "answer null" : answer
.getDetails()));
}
} else {
if (s_logger.isDebugEnabled()) {
s_logger.debug("ModifyStoragePool add secceeded");
}
}
}
// 2. Get a list of pending work for this queue
List<StoragePoolWorkVO> pendingWork = _storagePoolWorkDao
.listPendingWorkForCancelMaintenanceByPoolId(poolVO.getId());
// 3. work through the queue
for (StoragePoolWorkVO work : pendingWork) {
try {
VMInstanceVO vmInstance = vmDao.findById(work.getVmId());
if (vmInstance == null) {
continue;
}
// if the instance is of type consoleproxy, call the console
// proxy
if (vmInstance.getType().equals(
VirtualMachine.Type.ConsoleProxy)) {
ConsoleProxyVO consoleProxy = _consoleProxyDao
.findById(vmInstance.getId());
if (vmMgr.advanceStart(consoleProxy, null, user, account) == null) {
String msg = "There was an error starting the console proxy id: "
+ vmInstance.getId()
+ " on storage pool, cannot complete primary storage maintenance";
s_logger.warn(msg);
throw new ExecutionException(msg);
} else {
// update work queue
work.setStartedAfterMaintenance(true);
_storagePoolWorkDao.update(work.getId(), work);
}
}
// if the instance is of type ssvm, call the ssvm manager
if (vmInstance.getType().equals(
VirtualMachine.Type.SecondaryStorageVm)) {
SecondaryStorageVmVO ssVm = _secStrgDao.findById(vmInstance
.getId());
if (vmMgr.advanceStart(ssVm, null, user, account) == null) {
String msg = "There was an error starting the ssvm id: "
+ vmInstance.getId()
+ " on storage pool, cannot complete primary storage maintenance";
s_logger.warn(msg);
throw new ExecutionException(msg);
} else {
// update work queue
work.setStartedAfterMaintenance(true);
_storagePoolWorkDao.update(work.getId(), work);
}
}
// if the instance is of type ssvm, call the ssvm manager
if (vmInstance.getType().equals(
VirtualMachine.Type.DomainRouter)) {
DomainRouterVO domR = _domrDao.findById(vmInstance.getId());
if (vmMgr.advanceStart(domR, null, user, account) == null) {
String msg = "There was an error starting the domR id: "
+ vmInstance.getId()
+ " on storage pool, cannot complete primary storage maintenance";
s_logger.warn(msg);
throw new ExecutionException(msg);
} else {
// update work queue
work.setStartedAfterMaintenance(true);
_storagePoolWorkDao.update(work.getId(), work);
}
}
// if the instance is of type user vm, call the user vm manager
if (vmInstance.getType().equals(VirtualMachine.Type.User)) {
UserVmVO userVm = userVmDao.findById(vmInstance.getId());
if (vmMgr.advanceStart(userVm, null, user, account) == null) {
String msg = "There was an error starting the user vm id: "
+ vmInstance.getId()
+ " on storage pool, cannot complete primary storage maintenance";
s_logger.warn(msg);
throw new ExecutionException(msg);
} else {
// update work queue
work.setStartedAfterMaintenance(true);
_storagePoolWorkDao.update(work.getId(), work);
}
}
} catch (Exception e) {
s_logger.debug("Failed start vm", e);
throw new CloudRuntimeException(e.toString());
}
}
return true;
}
@DB
@Override
public boolean deleteDataStore(long storeId) {
// for the given pool id, find all records in the storage_pool_host_ref
List<StoragePoolHostVO> hostPoolRecords = this._storagePoolHostDao
.listByPoolId(storeId);
StoragePoolVO poolVO = this.primaryDataStoreDao.findById(storeId);
StoragePool pool = (StoragePool)this.dataStoreMgr.getDataStore(storeId, DataStoreRole.Primary);
boolean deleteFlag = false;
Transaction txn = Transaction.currentTxn();
try {
// if not records exist, delete the given pool (base case)
if (hostPoolRecords.size() == 0) {
txn.start();
poolVO.setUuid(null);
this.primaryDataStoreDao.update(poolVO.getId(), poolVO);
primaryDataStoreDao.remove(poolVO.getId());
deletePoolStats(poolVO.getId());
txn.commit();
deleteFlag = true;
return true;
} else {
// Remove the SR associated with the Xenserver
for (StoragePoolHostVO host : hostPoolRecords) {
DeleteStoragePoolCommand deleteCmd = new DeleteStoragePoolCommand(
pool);
final Answer answer = agentMgr.easySend(host.getHostId(),
deleteCmd);
if (answer != null && answer.getResult()) {
deleteFlag = true;
break;
}
}
}
} finally {
if (deleteFlag) {
// now delete the storage_pool_host_ref and storage_pool records
txn.start();
for (StoragePoolHostVO host : hostPoolRecords) {
_storagePoolHostDao.deleteStoragePoolHostDetails(
host.getHostId(), host.getPoolId());
}
poolVO.setUuid(null);
this.primaryDataStoreDao.update(poolVO.getId(), poolVO);
primaryDataStoreDao.remove(poolVO.getId());
deletePoolStats(poolVO.getId());
// Delete op_host_capacity entries
this._capacityDao.removeBy(Capacity.CAPACITY_TYPE_STORAGE_ALLOCATED,
null, null, null, poolVO.getId());
txn.commit();
s_logger.debug("Storage pool id=" + poolVO.getId()
+ " is removed successfully");
return true;
} else {
// alert that the storage cleanup is required
s_logger.warn("Failed to Delete storage pool id: " + poolVO.getId());
_alertMgr
.sendAlert(AlertManager.ALERT_TYPE_STORAGE_DELETE,
poolVO.getDataCenterId(), poolVO.getPodId(),
"Unable to delete storage pool id= " + poolVO.getId(),
"Delete storage pool command failed. Please check logs.");
}
}
return false;
}
@DB
private boolean deletePoolStats(Long poolId) {
CapacityVO capacity1 = _capacityDao.findByHostIdType(poolId,
CapacityVO.CAPACITY_TYPE_STORAGE);
CapacityVO capacity2 = _capacityDao.findByHostIdType(poolId,
CapacityVO.CAPACITY_TYPE_STORAGE_ALLOCATED);
Transaction txn = Transaction.currentTxn();
txn.start();
if (capacity1 != null) {
_capacityDao.remove(capacity1.getId());
}
if (capacity2 != null) {
_capacityDao.remove(capacity2.getId());
}
txn.commit();
return true;
}
@Override
public boolean attachHost(DataStore store, HostScope scope, StoragePoolInfo existingInfo) {
StoragePoolHostVO poolHost = _storagePoolHostDao.findByPoolHost(store.getId(), scope.getScopeId());
if (poolHost == null) {
poolHost = new StoragePoolHostVO(store.getId(), scope.getScopeId(), existingInfo.getLocalPath());
_storagePoolHostDao.persist(poolHost);
}
StoragePoolVO pool = this.primaryDataStoreDao.findById(store.getId());
pool.setScope(scope.getScopeType());
pool.setAvailableBytes(existingInfo.getAvailableBytes());
pool.setCapacityBytes(existingInfo.getCapacityBytes());
pool.setStatus(StoragePoolStatus.Up);
this.primaryDataStoreDao.update(pool.getId(), pool);
this.storageMgr.createCapacityEntry(pool, Capacity.CAPACITY_TYPE_LOCAL_STORAGE, pool.getCapacityBytes() - pool.getAvailableBytes());
return true;
}
}

View File

@ -60,8 +60,8 @@ public class DefaultPrimaryDataStoreLifeCycleImpl implements PrimaryDataStoreLif
@Override
public DataStore initialize(Map<String, Object> dsInfos) {
StoragePoolVO storeVO = primaryStoreHelper.createPrimaryDataStore(dsInfos);
return providerMgr.getPrimaryDataStore(storeVO.getId());
DataStore store = primaryStoreHelper.createPrimaryDataStore(null);
return providerMgr.getPrimaryDataStore(store.getId());
}
protected void attachCluster(DataStore store) {
@ -113,26 +113,6 @@ public class DefaultPrimaryDataStoreLifeCycleImpl implements PrimaryDataStoreLif
return false;
}
@Override
public boolean maintain(long storeId) {
// TODO Auto-generated method stub
return false;
}
@Override
public boolean cancelMaintain(long storeId) {
// TODO Auto-generated method stub
return false;
}
@Override
public boolean deleteDataStore(long storeId) {
// TODO Auto-generated method stub
return false;
}
@Override
public boolean attachZone(DataStore dataStore, ZoneScope scope) {
// TODO Auto-generated method stub
@ -146,4 +126,22 @@ public class DefaultPrimaryDataStoreLifeCycleImpl implements PrimaryDataStoreLif
return false;
}
@Override
public boolean maintain(DataStore store) {
// TODO Auto-generated method stub
return false;
}
@Override
public boolean cancelMaintain(DataStore store) {
// TODO Auto-generated method stub
return false;
}
@Override
public boolean deleteDataStore(DataStore store) {
// TODO Auto-generated method stub
return false;
}
}

View File

@ -57,18 +57,18 @@ public class DefaultPrimaryDataStoreProviderManagerImpl implements PrimaryDataSt
@Override
public PrimaryDataStore getPrimaryDataStore(long dataStoreId) {
StoragePoolVO dataStoreVO = dataStoreDao.findById(dataStoreId);
long providerId = dataStoreVO.getStorageProviderId();
DataStoreProvider provider = providerManager.getDataStoreProviderById(providerId);
DefaultPrimaryDataStore dataStore = DefaultPrimaryDataStore.createDataStore(dataStoreVO, driverMaps.get(provider.getUuid()), provider);
String providerName = dataStoreVO.getStorageProviderName();
DataStoreProvider provider = providerManager.getDataStoreProvider(providerName);
DefaultPrimaryDataStore dataStore = DefaultPrimaryDataStore.createDataStore(dataStoreVO, driverMaps.get(provider.getName()), provider);
return dataStore;
}
@Override
public boolean registerDriver(String uuid, PrimaryDataStoreDriver driver) {
if (driverMaps.get(uuid) != null) {
public boolean registerDriver(String providerName, PrimaryDataStoreDriver driver) {
if (driverMaps.get(providerName) != null) {
return false;
}
driverMaps.put(uuid, driver);
driverMaps.put(providerName, driver);
return true;
}
@ -79,7 +79,7 @@ public class DefaultPrimaryDataStoreProviderManagerImpl implements PrimaryDataSt
}
@Override
public boolean registerHostListener(String uuid, HypervisorHostListener listener) {
return storageMgr.registerHostListener(uuid, listener);
public boolean registerHostListener(String providerName, HypervisorHostListener listener) {
return storageMgr.registerHostListener(providerName, listener);
}
}

View File

@ -16,24 +16,29 @@
// under the License.
package org.apache.cloudstack.storage.datastore.provider;
import java.util.HashSet;
import java.util.Map;
import java.util.Set;
import javax.inject.Inject;
import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreLifeCycle;
import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreProvider;
import org.apache.cloudstack.engine.subsystem.api.storage.HypervisorHostListener;
import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreDriver;
import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreProvider;
import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreProvider.DataStoreProviderType;
import org.apache.cloudstack.storage.datastore.PrimaryDataStoreProviderManager;
import org.apache.cloudstack.storage.datastore.driver.DefaultPrimaryDataStoreDriverImpl;
import org.apache.cloudstack.storage.datastore.lifecycle.DefaultPrimaryDataStoreLifeCycleImpl;
import org.springframework.stereotype.Component;
import com.cloud.utils.component.ComponentContext;
@Component
public class DefaultPrimaryDatastoreProviderImpl implements PrimaryDataStoreProvider {
private final String providerName = "default primary data store provider";
protected PrimaryDataStoreDriver driver;
protected HypervisorHostListener listener;
@Inject
PrimaryDataStoreProviderManager storeMgr;
@ -46,7 +51,7 @@ public class DefaultPrimaryDatastoreProviderImpl implements PrimaryDataStoreProv
}
@Override
public DataStoreLifeCycle getLifeCycle() {
public DataStoreLifeCycle getDataStoreLifeCycle() {
return this.lifecyle;
}
@ -54,22 +59,25 @@ public class DefaultPrimaryDatastoreProviderImpl implements PrimaryDataStoreProv
public boolean configure(Map<String, Object> params) {
lifecyle = ComponentContext.inject(DefaultPrimaryDataStoreLifeCycleImpl.class);
driver = ComponentContext.inject(DefaultPrimaryDataStoreDriverImpl.class);
HypervisorHostListener listener = ComponentContext.inject(DefaultHostListener.class);
uuid = (String)params.get("uuid");
id = (Long)params.get("id");
storeMgr.registerDriver(uuid, this.driver);
storeMgr.registerHostListener(uuid, listener);
listener = ComponentContext.inject(DefaultHostListener.class);
return true;
}
@Override
public String getUuid() {
return this.uuid;
public PrimaryDataStoreDriver getDataStoreDriver() {
return this.driver;
}
@Override
public long getId() {
return this.id;
public HypervisorHostListener getHostListener() {
return this.listener;
}
@Override
public Set<DataStoreProviderType> getTypes() {
Set<DataStoreProviderType> types = new HashSet<DataStoreProviderType>();
types.add(DataStoreProviderType.PRIMARY);
return types;
}
}

View File

@ -28,7 +28,7 @@ import java.util.Map;
import javax.inject.Inject;
import javax.naming.ConfigurationException;
import org.apache.cloudstack.storage.datastore.provider.PrimaryDataStoreProvider;
import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreProvider;
import org.junit.Before;
import org.junit.Test;
import org.junit.runner.RunWith;

View File

@ -57,6 +57,7 @@
<module>network-elements/dns-notifier</module>
<module>storage/image/s3</module>
<module>storage/volume/solidfire</module>
<module>storage/volume/default</module>
<module>alert-handlers/snmp-alerts</module>
</modules>

View File

@ -0,0 +1,56 @@
<!-- Licensed to the Apache Software Foundation (ASF) under one or more contributor
license agreements. See the NOTICE file distributed with this work for additional
information regarding copyright ownership. The ASF licenses this file to
you under the Apache License, Version 2.0 (the "License"); you may not use
this file except in compliance with the License. You may obtain a copy of
the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required
by applicable law or agreed to in writing, software distributed under the
License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS
OF ANY KIND, either express or implied. See the License for the specific
language governing permissions and limitations under the License. -->
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
<artifactId>cloud-plugin-storage-volume-default</artifactId>
<name>Apache CloudStack Plugin - Storage Volume default provider</name>
<parent>
<groupId>org.apache.cloudstack</groupId>
<artifactId>cloudstack-plugins</artifactId>
<version>4.2.0-SNAPSHOT</version>
<relativePath>../../../pom.xml</relativePath>
</parent>
<dependencies>
<dependency>
<groupId>org.apache.cloudstack</groupId>
<artifactId>cloud-engine-storage-volume</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>mysql</groupId>
<artifactId>mysql-connector-java</artifactId>
<version>${cs.mysql.version}</version>
<scope>provided</scope>
</dependency>
</dependencies>
<build>
<defaultGoal>install</defaultGoal>
<sourceDirectory>src</sourceDirectory>
<testSourceDirectory>test</testSourceDirectory>
<plugins>
<plugin>
<artifactId>maven-surefire-plugin</artifactId>
<configuration>
<skipTests>true</skipTests>
</configuration>
<executions>
<execution>
<phase>integration-test</phase>
<goals>
<goal>test</goal>
</goals>
</execution>
</executions>
</plugin>
</plugins>
</build>
</project>

View File

@ -71,9 +71,9 @@ import com.cloud.utils.exception.CloudRuntimeException;
import com.cloud.vm.DiskProfile;
import com.cloud.vm.dao.VMInstanceDao;
public class AncientPrimaryDataStoreDriverImpl implements PrimaryDataStoreDriver {
public class CloudStackPrimaryDataStoreDriverImpl implements PrimaryDataStoreDriver {
private static final Logger s_logger = Logger
.getLogger(AncientPrimaryDataStoreDriverImpl.class);
.getLogger(CloudStackPrimaryDataStoreDriverImpl.class);
@Inject DiskOfferingDao diskOfferingDao;
@Inject VMTemplateDao templateDao;
@Inject VolumeDao volumeDao;

View File

@ -0,0 +1,542 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.cloudstack.storage.datastore.lifecycle;
import java.net.URI;
import java.net.URISyntaxException;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import java.util.UUID;
import javax.inject.Inject;
import org.apache.cloudstack.engine.subsystem.api.storage.ClusterScope;
import org.apache.cloudstack.engine.subsystem.api.storage.DataStore;
import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager;
import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreRole;
import org.apache.cloudstack.engine.subsystem.api.storage.HostScope;
import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreInfo;
import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreLifeCycle;
import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreParameters;
import org.apache.cloudstack.engine.subsystem.api.storage.ScopeType;
import org.apache.cloudstack.engine.subsystem.api.storage.ZoneScope;
import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao;
import org.apache.cloudstack.storage.datastore.db.StoragePoolVO;
import org.apache.cloudstack.storage.volume.datastore.PrimaryDataStoreHelper;
import org.apache.log4j.Logger;
import com.cloud.agent.AgentManager;
import com.cloud.agent.api.Answer;
import com.cloud.agent.api.CreateStoragePoolCommand;
import com.cloud.agent.api.DeleteStoragePoolCommand;
import com.cloud.agent.api.ModifyStoragePoolCommand;
import com.cloud.agent.api.StoragePoolInfo;
import com.cloud.alert.AlertManager;
import com.cloud.capacity.Capacity;
import com.cloud.capacity.CapacityVO;
import com.cloud.capacity.dao.CapacityDao;
import com.cloud.exception.DiscoveryException;
import com.cloud.exception.InvalidParameterValueException;
import com.cloud.host.Host;
import com.cloud.host.HostVO;
import com.cloud.host.Status;
import com.cloud.hypervisor.Hypervisor.HypervisorType;
import com.cloud.resource.ResourceManager;
import com.cloud.server.ManagementServer;
import com.cloud.storage.OCFS2Manager;
import com.cloud.storage.Storage.StoragePoolType;
import com.cloud.storage.StorageManager;
import com.cloud.storage.StoragePool;
import com.cloud.storage.StoragePoolAutomation;
import com.cloud.storage.StoragePoolDiscoverer;
import com.cloud.storage.StoragePoolHostVO;
import com.cloud.storage.StoragePoolStatus;
import com.cloud.storage.StoragePoolWorkVO;
import com.cloud.storage.VolumeVO;
import com.cloud.storage.dao.StoragePoolHostDao;
import com.cloud.storage.dao.StoragePoolWorkDao;
import com.cloud.storage.dao.VolumeDao;
import com.cloud.user.Account;
import com.cloud.user.User;
import com.cloud.user.UserContext;
import com.cloud.user.dao.UserDao;
import com.cloud.utils.NumbersUtil;
import com.cloud.utils.UriUtils;
import com.cloud.utils.db.DB;
import com.cloud.utils.db.Transaction;
import com.cloud.utils.exception.CloudRuntimeException;
import com.cloud.utils.exception.ExecutionException;
import com.cloud.vm.ConsoleProxyVO;
import com.cloud.vm.DomainRouterVO;
import com.cloud.vm.SecondaryStorageVmVO;
import com.cloud.vm.UserVmVO;
import com.cloud.vm.VMInstanceVO;
import com.cloud.vm.VirtualMachine;
import com.cloud.vm.VirtualMachine.State;
import com.cloud.vm.VirtualMachineManager;
import com.cloud.vm.dao.ConsoleProxyDao;
import com.cloud.vm.dao.DomainRouterDao;
import com.cloud.vm.dao.SecondaryStorageVmDao;
import com.cloud.vm.dao.UserVmDao;
import com.cloud.vm.dao.VMInstanceDao;
public class CloudStackPrimaryDataStoreLifeCycleImpl implements
PrimaryDataStoreLifeCycle {
private static final Logger s_logger = Logger
.getLogger(CloudStackPrimaryDataStoreLifeCycleImpl.class);
@Inject
protected ResourceManager _resourceMgr;
protected List<StoragePoolDiscoverer> _discoverers;
@Inject
PrimaryDataStoreDao primaryDataStoreDao;
@Inject
protected OCFS2Manager _ocfs2Mgr;
@Inject
DataStoreManager dataStoreMgr;
@Inject
AgentManager agentMgr;
@Inject
StorageManager storageMgr;
@Inject
VolumeDao volumeDao;
@Inject
VMInstanceDao vmDao;
@Inject
ManagementServer server;
@Inject
protected VirtualMachineManager vmMgr;
@Inject
protected SecondaryStorageVmDao _secStrgDao;
@Inject
UserVmDao userVmDao;
@Inject
protected UserDao _userDao;
@Inject
protected DomainRouterDao _domrDao;
@Inject
protected StoragePoolHostDao _storagePoolHostDao;
@Inject
protected AlertManager _alertMgr;
@Inject
protected ConsoleProxyDao _consoleProxyDao;
@Inject
protected StoragePoolWorkDao _storagePoolWorkDao;
@Inject
PrimaryDataStoreHelper dataStoreHelper;
@Inject
StoragePoolAutomation storagePoolAutmation;
@Override
public DataStore initialize(Map<String, Object> dsInfos) {
Long clusterId = (Long) dsInfos.get("clusterId");
Long podId = (Long) dsInfos.get("podId");
Long zoneId = (Long) dsInfos.get("zoneId");
String url = (String) dsInfos.get("url");
String providerName = (String)dsInfos.get("providerName");
if (clusterId != null && podId == null) {
throw new InvalidParameterValueException(
"Cluster id requires pod id");
}
PrimaryDataStoreParameters parameters = new PrimaryDataStoreParameters();
URI uri = null;
try {
uri = new URI(UriUtils.encodeURIComponent(url));
if (uri.getScheme() == null) {
throw new InvalidParameterValueException("scheme is null "
+ url + ", add nfs:// as a prefix");
} else if (uri.getScheme().equalsIgnoreCase("nfs")) {
String uriHost = uri.getHost();
String uriPath = uri.getPath();
if (uriHost == null || uriPath == null
|| uriHost.trim().isEmpty() || uriPath.trim().isEmpty()) {
throw new InvalidParameterValueException(
"host or path is null, should be nfs://hostname/path");
}
} else if (uri.getScheme().equalsIgnoreCase("sharedMountPoint")) {
String uriPath = uri.getPath();
if (uriPath == null) {
throw new InvalidParameterValueException(
"host or path is null, should be sharedmountpoint://localhost/path");
}
} else if (uri.getScheme().equalsIgnoreCase("rbd")) {
String uriPath = uri.getPath();
if (uriPath == null) {
throw new InvalidParameterValueException(
"host or path is null, should be rbd://hostname/pool");
}
}
} catch (URISyntaxException e) {
throw new InvalidParameterValueException(url
+ " is not a valid uri");
}
String tags = (String) dsInfos.get("tags");
Map<String, String> details = (Map<String, String>) dsInfos
.get("details");
parameters.setTags(tags);
parameters.setDetails(details);
String scheme = uri.getScheme();
String storageHost = uri.getHost();
String hostPath = uri.getPath();
Object localStorage = dsInfos.get("localStorage");
if (localStorage != null) {
hostPath = hostPath.replace("/", "");
}
String userInfo = uri.getUserInfo();
int port = uri.getPort();
StoragePoolVO pool = null;
if (s_logger.isDebugEnabled()) {
s_logger.debug("createPool Params @ scheme - " + scheme
+ " storageHost - " + storageHost + " hostPath - "
+ hostPath + " port - " + port);
}
if (scheme.equalsIgnoreCase("nfs")) {
if (port == -1) {
port = 2049;
}
parameters.setType(StoragePoolType.NetworkFilesystem);
parameters.setHost(storageHost);
parameters.setPort(port);
parameters.setPath(hostPath);
} else if (scheme.equalsIgnoreCase("file")) {
if (port == -1) {
port = 0;
}
parameters.setType(StoragePoolType.Filesystem);
parameters.setHost("localhost");
parameters.setPort(0);
parameters.setPath(hostPath);
} else if (scheme.equalsIgnoreCase("sharedMountPoint")) {
parameters.setType(StoragePoolType.SharedMountPoint);
parameters.setHost(storageHost);
parameters.setPort(0);
parameters.setPath(hostPath);
} else if (scheme.equalsIgnoreCase("clvm")) {
parameters.setType(StoragePoolType.CLVM);
parameters.setHost(storageHost);
parameters.setPort(0);
parameters.setPath(hostPath.replaceFirst("/", ""));
} else if (scheme.equalsIgnoreCase("rbd")) {
if (port == -1) {
port = 6789;
}
parameters.setType(StoragePoolType.RBD);
parameters.setHost(storageHost);
parameters.setPort(port);
parameters.setPath(hostPath.replaceFirst("/", ""));
parameters.setUserInfo(userInfo);
} else if (scheme.equalsIgnoreCase("PreSetup")) {
parameters.setType(StoragePoolType.PreSetup);
parameters.setHost(storageHost);
parameters.setPort(0);
parameters.setPath(hostPath);
} else if (scheme.equalsIgnoreCase("iscsi")) {
String[] tokens = hostPath.split("/");
int lun = NumbersUtil.parseInt(tokens[tokens.length - 1], -1);
if (port == -1) {
port = 3260;
}
if (lun != -1) {
if (clusterId == null) {
throw new IllegalArgumentException(
"IscsiLUN need to have clusters specified");
}
hostPath.replaceFirst("/", "");
parameters.setType(StoragePoolType.IscsiLUN);
parameters.setHost(storageHost);
parameters.setPort(port);
parameters.setPath(hostPath);
} else {
for (StoragePoolDiscoverer discoverer : _discoverers) {
Map<StoragePoolVO, Map<String, String>> pools;
try {
pools = discoverer.find(zoneId, podId, uri, details);
} catch (DiscoveryException e) {
throw new IllegalArgumentException(
"Not enough information for discovery " + uri,
e);
}
if (pools != null) {
Map.Entry<StoragePoolVO, Map<String, String>> entry = pools
.entrySet().iterator().next();
pool = entry.getKey();
details = entry.getValue();
break;
}
}
}
} else if (scheme.equalsIgnoreCase("iso")) {
if (port == -1) {
port = 2049;
}
parameters.setType(StoragePoolType.ISO);
parameters.setHost(storageHost);
parameters.setPort(port);
parameters.setPath(hostPath);
} else if (scheme.equalsIgnoreCase("vmfs")) {
parameters.setType(StoragePoolType.VMFS);
parameters.setHost("VMFS datastore: " + hostPath);
parameters.setPort(0);
parameters.setPath(hostPath);
} else if (scheme.equalsIgnoreCase("ocfs2")) {
port = 7777;
parameters.setType(StoragePoolType.OCFS2);
parameters.setHost("clustered");
parameters.setPort(port);
parameters.setPath(hostPath);
} else {
StoragePoolType type = Enum.valueOf(StoragePoolType.class, scheme);
if (type != null) {
parameters.setType(type);
parameters.setHost(storageHost);
parameters.setPort(0);
parameters.setPath(hostPath);
} else {
s_logger.warn("Unable to figure out the scheme for URI: " + uri);
throw new IllegalArgumentException(
"Unable to figure out the scheme for URI: " + uri);
}
}
if (localStorage == null) {
List<StoragePoolVO> pools = primaryDataStoreDao
.listPoolByHostPath(storageHost, hostPath);
if (!pools.isEmpty() && !scheme.equalsIgnoreCase("sharedmountpoint")) {
Long oldPodId = pools.get(0).getPodId();
throw new CloudRuntimeException("Storage pool " + uri
+ " already in use by another pod (id=" + oldPodId + ")");
}
}
Object existingUuid = dsInfos.get("uuid");
String uuid = null;
if (existingUuid != null) {
uuid = (String)existingUuid;
} else if (scheme.equalsIgnoreCase("sharedmountpoint")
|| scheme.equalsIgnoreCase("clvm")) {
uuid = UUID.randomUUID().toString();
} else if (scheme.equalsIgnoreCase("PreSetup")) {
uuid = hostPath.replace("/", "");
} else {
uuid = UUID.nameUUIDFromBytes(
new String(storageHost + hostPath).getBytes()).toString();
}
List<StoragePoolVO> spHandles = primaryDataStoreDao
.findIfDuplicatePoolsExistByUUID(uuid);
if ((spHandles != null) && (spHandles.size() > 0)) {
if (s_logger.isDebugEnabled()) {
s_logger.debug("Another active pool with the same uuid already exists");
}
throw new CloudRuntimeException(
"Another active pool with the same uuid already exists");
}
String poolName = (String) dsInfos.get("name");
parameters.setUuid(uuid);
parameters.setZoneId(zoneId);
parameters.setPodId(podId);
parameters.setName(poolName);
parameters.setClusterId(clusterId);
parameters.setProviderName(providerName);
return dataStoreHelper.createPrimaryDataStore(parameters);
}
protected boolean createStoragePool(long hostId, StoragePool pool) {
s_logger.debug("creating pool " + pool.getName() + " on host "
+ hostId);
if (pool.getPoolType() != StoragePoolType.NetworkFilesystem
&& pool.getPoolType() != StoragePoolType.Filesystem
&& pool.getPoolType() != StoragePoolType.IscsiLUN
&& pool.getPoolType() != StoragePoolType.Iscsi
&& pool.getPoolType() != StoragePoolType.VMFS
&& pool.getPoolType() != StoragePoolType.SharedMountPoint
&& pool.getPoolType() != StoragePoolType.PreSetup
&& pool.getPoolType() != StoragePoolType.OCFS2
&& pool.getPoolType() != StoragePoolType.RBD
&& pool.getPoolType() != StoragePoolType.CLVM) {
s_logger.warn(" Doesn't support storage pool type "
+ pool.getPoolType());
return false;
}
CreateStoragePoolCommand cmd = new CreateStoragePoolCommand(true, pool);
final Answer answer = agentMgr.easySend(hostId, cmd);
if (answer != null && answer.getResult()) {
return true;
} else {
primaryDataStoreDao.expunge(pool.getId());
String msg = "";
if (answer != null) {
msg = "Can not create storage pool through host " + hostId
+ " due to " + answer.getDetails();
s_logger.warn(msg);
} else {
msg = "Can not create storage pool through host " + hostId
+ " due to CreateStoragePoolCommand returns null";
s_logger.warn(msg);
}
throw new CloudRuntimeException(msg);
}
}
@Override
public boolean attachCluster(DataStore store, ClusterScope scope) {
PrimaryDataStoreInfo primarystore = (PrimaryDataStoreInfo) store;
// Check if there is host up in this cluster
List<HostVO> allHosts = _resourceMgr.listAllUpAndEnabledHosts(
Host.Type.Routing, primarystore.getClusterId(),
primarystore.getPodId(), primarystore.getDataCenterId());
if (allHosts.isEmpty()) {
throw new CloudRuntimeException(
"No host up to associate a storage pool with in cluster "
+ primarystore.getClusterId());
}
if (primarystore.getPoolType() == StoragePoolType.OCFS2
&& !_ocfs2Mgr.prepareNodes(allHosts, primarystore)) {
s_logger.warn("Can not create storage pool " + primarystore
+ " on cluster " + primarystore.getClusterId());
primaryDataStoreDao.expunge(primarystore.getId());
return false;
}
boolean success = false;
for (HostVO h : allHosts) {
success = createStoragePool(h.getId(), primarystore);
if (success) {
break;
}
}
s_logger.debug("In createPool Adding the pool to each of the hosts");
List<HostVO> poolHosts = new ArrayList<HostVO>();
for (HostVO h : allHosts) {
try {
this.storageMgr.connectHostToSharedPool(h.getId(),
primarystore.getId());
poolHosts.add(h);
} catch (Exception e) {
s_logger.warn("Unable to establish a connection between " + h
+ " and " + primarystore, e);
}
}
if (poolHosts.isEmpty()) {
s_logger.warn("No host can access storage pool " + primarystore
+ " on cluster " + primarystore.getClusterId());
primaryDataStoreDao.expunge(primarystore.getId());
return false;
}
this.dataStoreHelper.attachCluster(store);
return true;
}
@Override
public boolean attachZone(DataStore dataStore, ZoneScope scope) {
List<HostVO> hosts = _resourceMgr.listAllUpAndEnabledHostsInOneZoneByHypervisor(HypervisorType.KVM, scope.getScopeId());
for (HostVO host : hosts) {
try {
this.storageMgr.connectHostToSharedPool(host.getId(),
dataStore.getId());
} catch (Exception e) {
s_logger.warn("Unable to establish a connection between " + host
+ " and " + dataStore, e);
}
}
this.dataStoreHelper.attachZone(dataStore);
return true;
}
@Override
public boolean dettach() {
// TODO Auto-generated method stub
return false;
}
@Override
public boolean unmanaged() {
// TODO Auto-generated method stub
return false;
}
@Override
public boolean maintain(DataStore dataStore) {
storagePoolAutmation.maintain(dataStore);
this.dataStoreHelper.maintain(dataStore);
return true;
}
@Override
public boolean cancelMaintain(DataStore store) {
this.dataStoreHelper.cancelMaintain(store);
storagePoolAutmation.cancelMaintain(store);
return true;
}
@DB
@Override
public boolean deleteDataStore(DataStore store) {
List<StoragePoolHostVO> hostPoolRecords = this._storagePoolHostDao
.listByPoolId(store.getId());
StoragePool pool = (StoragePool)store;
boolean deleteFlag = false;
// Remove the SR associated with the Xenserver
for (StoragePoolHostVO host : hostPoolRecords) {
DeleteStoragePoolCommand deleteCmd = new DeleteStoragePoolCommand(
pool);
final Answer answer = agentMgr.easySend(host.getHostId(),
deleteCmd);
if (answer != null && answer.getResult()) {
deleteFlag = true;
break;
} else {
if (answer != null) {
s_logger.debug("Failed to delete storage pool: " + answer.getResult());
}
}
}
if (!deleteFlag) {
throw new CloudRuntimeException("Failed to delete storage pool on host");
}
this.dataStoreHelper.deletePrimaryDataStore(store);
return false;
}
@Override
public boolean attachHost(DataStore store, HostScope scope, StoragePoolInfo existingInfo) {
this.dataStoreHelper.attachHost(store, scope, existingInfo);
return true;
}
}

View File

@ -18,61 +18,63 @@
*/
package org.apache.cloudstack.storage.datastore.provider;
import java.util.HashSet;
import java.util.Map;
import javax.inject.Inject;
import java.util.Set;
import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreLifeCycle;
import org.apache.cloudstack.engine.subsystem.api.storage.HypervisorHostListener;
import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreDriver;
import org.apache.cloudstack.storage.datastore.PrimaryDataStoreProviderManager;
import org.apache.cloudstack.storage.datastore.driver.AncientPrimaryDataStoreDriverImpl;
import org.apache.cloudstack.storage.datastore.lifecycle.AncientPrimaryDataStoreLifeCycleImpl;
import org.springframework.stereotype.Component;
import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreProvider;
import org.apache.cloudstack.storage.datastore.driver.CloudStackPrimaryDataStoreDriverImpl;
import org.apache.cloudstack.storage.datastore.lifecycle.CloudStackPrimaryDataStoreLifeCycleImpl;
import com.cloud.utils.component.ComponentContext;
@Component
public class AncientPrimaryDataStoreProviderImpl implements
public class CloudStackPrimaryDataStoreProviderImpl implements
PrimaryDataStoreProvider {
private final String providerName = "ancient primary data store provider";
protected PrimaryDataStoreDriver driver;
@Inject
PrimaryDataStoreProviderManager storeMgr;
protected HypervisorHostListener listener;
protected DataStoreLifeCycle lifecyle;
protected String uuid;
protected long id;
CloudStackPrimaryDataStoreProviderImpl() {
}
@Override
public String getName() {
return providerName;
}
@Override
public DataStoreLifeCycle getLifeCycle() {
public DataStoreLifeCycle getDataStoreLifeCycle() {
return this.lifecyle;
}
@Override
public boolean configure(Map<String, Object> params) {
lifecyle = ComponentContext.inject(AncientPrimaryDataStoreLifeCycleImpl.class);
driver = ComponentContext.inject(AncientPrimaryDataStoreDriverImpl.class);
uuid = (String)params.get("uuid");
id = (Long)params.get("id");
storeMgr.registerDriver(uuid, this.driver);
HypervisorHostListener listener = ComponentContext.inject(DefaultHostListener.class);
storeMgr.registerHostListener(uuid, listener);
lifecyle = ComponentContext.inject(CloudStackPrimaryDataStoreLifeCycleImpl.class);
driver = ComponentContext.inject(CloudStackPrimaryDataStoreDriverImpl.class);
listener = ComponentContext.inject(DefaultHostListener.class);
return true;
}
@Override
public String getUuid() {
return this.uuid;
public PrimaryDataStoreDriver getDataStoreDriver() {
return this.driver;
}
@Override
public long getId() {
return this.id;
public HypervisorHostListener getHostListener() {
return this.listener;
}
@Override
public Set<DataStoreProviderType> getTypes() {
Set<DataStoreProviderType> types = new HashSet<DataStoreProviderType>();
types.add(DataStoreProviderType.PRIMARY);
return types;
}
}

View File

@ -26,10 +26,10 @@ import javax.inject.Inject;
import javax.naming.ConfigurationException;
import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreInfo;
import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreProvider;
import org.apache.cloudstack.storage.command.CreateObjectAnswer;
import org.apache.cloudstack.storage.command.CreateVolumeFromBaseImageCommand;
import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao;
import org.apache.cloudstack.storage.datastore.provider.PrimaryDataStoreProvider;
import org.junit.Before;
import org.junit.Test;
import org.junit.runner.RunWith;

View File

@ -2242,6 +2242,7 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe
cmdList.add(DeleteAlertsCmd.class);
cmdList.add(ArchiveEventsCmd.class);
cmdList.add(DeleteEventsCmd.class);
cmdList.add(ListStorageProvidersCmd.class);
return cmdList;
}

View File

@ -712,7 +712,7 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C
}
}
DataStoreProvider provider = this.dataStoreProviderMgr.getDefaultPrimaryDataStoreProvider();
DataStoreLifeCycle lifeCycle = provider.getLifeCycle();
DataStoreLifeCycle lifeCycle = provider.getDataStoreLifeCycle();
if (pool == null) {
Map<String, Object> params = new HashMap<String, Object>();
String name = (host.getName() + " Local Storage");
@ -724,7 +724,7 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C
params.put("localStorage", true);
params.put("details", pInfo.getDetails());
params.put("uuid", pInfo.getUuid());
params.put("providerId", provider.getId());
params.put("providerName", provider.getName());
store = lifeCycle.initialize(params);
} else {
@ -748,15 +748,15 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C
public PrimaryDataStoreInfo createPool(CreateStoragePoolCmd cmd)
throws ResourceInUseException, IllegalArgumentException,
UnknownHostException, ResourceUnavailableException {
String providerUuid = cmd.getStorageProviderUuid();
String providerName = cmd.getStorageProviderName();
DataStoreProvider storeProvider = dataStoreProviderMgr
.getDataStoreProviderByUuid(providerUuid);
.getDataStoreProvider(providerName);
if (storeProvider == null) {
storeProvider = dataStoreProviderMgr.getDefaultPrimaryDataStoreProvider();
if (storeProvider == null) {
throw new InvalidParameterValueException(
"can't find storage provider: " + providerUuid);
"can't find storage provider: " + providerName);
}
}
@ -821,9 +821,9 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C
params.put("tags", cmd.getTags());
params.put("name", cmd.getStoragePoolName());
params.put("details", details);
params.put("providerId", storeProvider.getId());
params.put("providerName", storeProvider.getName());
DataStoreLifeCycle lifeCycle = storeProvider.getLifeCycle();
DataStoreLifeCycle lifeCycle = storeProvider.getDataStoreLifeCycle();
DataStore store = null;
try {
store = lifeCycle.initialize(params);
@ -948,9 +948,11 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C
s_logger.trace("Released lock for storage pool " + id);
DataStoreProvider storeProvider = dataStoreProviderMgr
.getDataStoreProviderById(sPool.getStorageProviderId());
DataStoreLifeCycle lifeCycle = storeProvider.getLifeCycle();
lifeCycle.deleteDataStore(id);
.getDataStoreProvider(sPool.getStorageProviderName());
DataStoreLifeCycle lifeCycle = storeProvider.getDataStoreLifeCycle();
DataStore store = dataStoreMgr.getDataStore(
sPool.getId(), DataStoreRole.Primary);
lifeCycle.deleteDataStore(store);
return false;
}
@ -963,8 +965,8 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C
s_logger.debug("Adding pool " + pool.getName() + " to host " + hostId);
DataStoreProvider provider = dataStoreProviderMgr
.getDataStoreProviderById(pool.getStorageProviderId());
HypervisorHostListener listener = hostListeners.get(provider.getUuid());
.getDataStoreProvider(pool.getStorageProviderName());
HypervisorHostListener listener = hostListeners.get(provider.getName());
listener.hostConnect(hostId, pool.getId());
}
@ -1415,19 +1417,16 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C
}
DataStoreProvider provider = dataStoreProviderMgr
.getDataStoreProviderById(primaryStorage.getStorageProviderId());
DataStoreLifeCycle lifeCycle = provider.getLifeCycle();
lifeCycle.maintain(primaryStorage.getId());
.getDataStoreProvider(primaryStorage.getStorageProviderName());
DataStoreLifeCycle lifeCycle = provider.getDataStoreLifeCycle();
DataStore store = dataStoreMgr.getDataStore(
primaryStorage.getId(), DataStoreRole.Primary);
lifeCycle.maintain(store);
return (PrimaryDataStoreInfo) dataStoreMgr.getDataStore(
primaryStorage.getId(), DataStoreRole.Primary);
}
private void setPoolStateToError(StoragePoolVO primaryStorage) {
primaryStorage.setStatus(StoragePoolStatus.ErrorInMaintenance);
_storagePoolDao.update(primaryStorage.getId(), primaryStorage);
}
@Override
@DB
public PrimaryDataStoreInfo cancelPrimaryStorageForMaintenance(
@ -1457,29 +1456,16 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C
}
DataStoreProvider provider = dataStoreProviderMgr
.getDataStoreProviderById(primaryStorage.getStorageProviderId());
DataStoreLifeCycle lifeCycle = provider.getLifeCycle();
lifeCycle.cancelMaintain(primaryStorage.getId());
.getDataStoreProvider(primaryStorage.getStorageProviderName());
DataStoreLifeCycle lifeCycle = provider.getDataStoreLifeCycle();
DataStore store = dataStoreMgr.getDataStore(
primaryStorage.getId(), DataStoreRole.Primary);
lifeCycle.cancelMaintain(store);
return (PrimaryDataStoreInfo) dataStoreMgr.getDataStore(
primaryStorage.getId(), DataStoreRole.Primary);
}
private boolean sendToVmResidesOn(StoragePoolVO PrimaryDataStoreVO,
Command cmd) {
ClusterVO cluster = _clusterDao.findById(PrimaryDataStoreVO
.getClusterId());
if ((cluster.getHypervisorType() == HypervisorType.KVM || cluster
.getHypervisorType() == HypervisorType.VMware)
&& ((cmd instanceof ManageSnapshotCommand) || (cmd instanceof BackupSnapshotCommand))) {
return true;
} else {
return false;
}
}
protected class StorageGarbageCollector implements Runnable {
public StorageGarbageCollector() {
@ -1845,9 +1831,9 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C
@Override
public synchronized boolean registerHostListener(String providerUuid,
public synchronized boolean registerHostListener(String providerName,
HypervisorHostListener listener) {
hostListeners.put(providerUuid, listener);
hostListeners.put(providerName, listener);
return true;
}

View File

@ -0,0 +1,26 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package com.cloud.storage;
import org.apache.cloudstack.engine.subsystem.api.storage.DataStore;
public interface StoragePoolAutomation {
public boolean maintain(DataStore store);
public boolean cancelMaintain(DataStore store);
}

View File

@ -0,0 +1,456 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package com.cloud.storage;
import java.util.List;
import javax.inject.Inject;
import org.apache.cloudstack.engine.subsystem.api.storage.DataStore;
import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreLifeCycle;
import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager;
import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreProvider;
import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreProviderManager;
import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreRole;
import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao;
import org.apache.cloudstack.storage.datastore.db.StoragePoolVO;
import org.apache.log4j.Logger;
import org.springframework.stereotype.Component;
import com.cloud.agent.AgentManager;
import com.cloud.agent.api.Answer;
import com.cloud.agent.api.ModifyStoragePoolCommand;
import com.cloud.alert.AlertManager;
import com.cloud.host.HostVO;
import com.cloud.host.Status;
import com.cloud.resource.ResourceManager;
import com.cloud.server.ManagementServer;
import com.cloud.storage.dao.StoragePoolHostDao;
import com.cloud.storage.dao.StoragePoolWorkDao;
import com.cloud.storage.dao.VolumeDao;
import com.cloud.user.Account;
import com.cloud.user.User;
import com.cloud.user.UserContext;
import com.cloud.user.dao.UserDao;
import com.cloud.utils.exception.CloudRuntimeException;
import com.cloud.utils.exception.ExecutionException;
import com.cloud.vm.ConsoleProxyVO;
import com.cloud.vm.DomainRouterVO;
import com.cloud.vm.SecondaryStorageVmVO;
import com.cloud.vm.UserVmVO;
import com.cloud.vm.VMInstanceVO;
import com.cloud.vm.VirtualMachine;
import com.cloud.vm.VirtualMachine.State;
import com.cloud.vm.VirtualMachineManager;
import com.cloud.vm.dao.ConsoleProxyDao;
import com.cloud.vm.dao.DomainRouterDao;
import com.cloud.vm.dao.SecondaryStorageVmDao;
import com.cloud.vm.dao.UserVmDao;
import com.cloud.vm.dao.VMInstanceDao;
@Component
public class StoragePoolAutomationImpl implements StoragePoolAutomation {
private static final Logger s_logger = Logger
.getLogger(StoragePoolAutomationImpl.class);
@Inject
protected VirtualMachineManager vmMgr;
@Inject
protected SecondaryStorageVmDao _secStrgDao;
@Inject
UserVmDao userVmDao;
@Inject
protected UserDao _userDao;
@Inject
protected DomainRouterDao _domrDao;
@Inject
protected StoragePoolHostDao _storagePoolHostDao;
@Inject
protected AlertManager _alertMgr;
@Inject
protected ConsoleProxyDao _consoleProxyDao;
@Inject
protected StoragePoolWorkDao _storagePoolWorkDao;
@Inject
PrimaryDataStoreDao primaryDataStoreDao;
@Inject
DataStoreManager dataStoreMgr;
@Inject
protected ResourceManager _resourceMgr;
@Inject
AgentManager agentMgr;
@Inject
VolumeDao volumeDao;
@Inject
VMInstanceDao vmDao;
@Inject
ManagementServer server;
@Inject DataStoreProviderManager providerMgr;
@Override
public boolean maintain(DataStore store) {
Long userId = UserContext.current().getCallerUserId();
User user = _userDao.findById(userId);
Account account = UserContext.current().getCaller();
StoragePoolVO pool = this.primaryDataStoreDao.findById(store.getId());
try {
StoragePool storagePool = (StoragePool) store;
List<HostVO> hosts = _resourceMgr.listHostsInClusterByStatus(
pool.getClusterId(), Status.Up);
if (hosts == null || hosts.size() == 0) {
pool.setStatus(StoragePoolStatus.Maintenance);
primaryDataStoreDao.update(pool.getId(), pool);
return true;
} else {
// set the pool state to prepare for maintenance
pool.setStatus(StoragePoolStatus.PrepareForMaintenance);
primaryDataStoreDao.update(pool.getId(), pool);
}
// remove heartbeat
for (HostVO host : hosts) {
ModifyStoragePoolCommand cmd = new ModifyStoragePoolCommand(
false, storagePool);
final Answer answer = agentMgr.easySend(host.getId(), cmd);
if (answer == null || !answer.getResult()) {
if (s_logger.isDebugEnabled()) {
s_logger.debug("ModifyStoragePool false failed due to "
+ ((answer == null) ? "answer null" : answer
.getDetails()));
}
} else {
if (s_logger.isDebugEnabled()) {
s_logger.debug("ModifyStoragePool false secceeded");
}
}
}
// check to see if other ps exist
// if they do, then we can migrate over the system vms to them
// if they dont, then just stop all vms on this one
List<StoragePoolVO> upPools = primaryDataStoreDao
.listByStatusInZone(pool.getDataCenterId(),
StoragePoolStatus.Up);
boolean restart = true;
if (upPools == null || upPools.size() == 0) {
restart = false;
}
// 2. Get a list of all the ROOT volumes within this storage pool
List<VolumeVO> allVolumes = this.volumeDao.findByPoolId(pool
.getId());
// 3. Enqueue to the work queue
for (VolumeVO volume : allVolumes) {
VMInstanceVO vmInstance = vmDao
.findById(volume.getInstanceId());
if (vmInstance == null) {
continue;
}
// enqueue sp work
if (vmInstance.getState().equals(State.Running)
|| vmInstance.getState().equals(State.Starting)
|| vmInstance.getState().equals(State.Stopping)) {
try {
StoragePoolWorkVO work = new StoragePoolWorkVO(
vmInstance.getId(), pool.getId(), false, false,
server.getId());
_storagePoolWorkDao.persist(work);
} catch (Exception e) {
if (s_logger.isDebugEnabled()) {
s_logger.debug("Work record already exists, re-using by re-setting values");
}
StoragePoolWorkVO work = _storagePoolWorkDao
.findByPoolIdAndVmId(pool.getId(),
vmInstance.getId());
work.setStartedAfterMaintenance(false);
work.setStoppedForMaintenance(false);
work.setManagementServerId(server.getId());
_storagePoolWorkDao.update(work.getId(), work);
}
}
}
// 4. Process the queue
List<StoragePoolWorkVO> pendingWork = _storagePoolWorkDao
.listPendingWorkForPrepareForMaintenanceByPoolId(pool
.getId());
for (StoragePoolWorkVO work : pendingWork) {
// shut down the running vms
VMInstanceVO vmInstance = vmDao.findById(work.getVmId());
if (vmInstance == null) {
continue;
}
// if the instance is of type consoleproxy, call the console
// proxy
if (vmInstance.getType().equals(
VirtualMachine.Type.ConsoleProxy)) {
// call the consoleproxymanager
ConsoleProxyVO consoleProxy = _consoleProxyDao
.findById(vmInstance.getId());
if (!vmMgr.advanceStop(consoleProxy, true, user, account)) {
String errorMsg = "There was an error stopping the console proxy id: "
+ vmInstance.getId()
+ " ,cannot enable storage maintenance";
s_logger.warn(errorMsg);
throw new CloudRuntimeException(errorMsg);
} else {
// update work status
work.setStoppedForMaintenance(true);
_storagePoolWorkDao.update(work.getId(), work);
}
if (restart) {
if (this.vmMgr.advanceStart(consoleProxy, null, user,
account) == null) {
String errorMsg = "There was an error starting the console proxy id: "
+ vmInstance.getId()
+ " on another storage pool, cannot enable primary storage maintenance";
s_logger.warn(errorMsg);
} else {
// update work status
work.setStartedAfterMaintenance(true);
_storagePoolWorkDao.update(work.getId(), work);
}
}
}
// if the instance is of type uservm, call the user vm manager
if (vmInstance.getType().equals(VirtualMachine.Type.User)) {
UserVmVO userVm = userVmDao.findById(vmInstance.getId());
if (!vmMgr.advanceStop(userVm, true, user, account)) {
String errorMsg = "There was an error stopping the user vm id: "
+ vmInstance.getId()
+ " ,cannot enable storage maintenance";
s_logger.warn(errorMsg);
throw new CloudRuntimeException(errorMsg);
} else {
// update work status
work.setStoppedForMaintenance(true);
_storagePoolWorkDao.update(work.getId(), work);
}
}
// if the instance is of type secondary storage vm, call the
// secondary storage vm manager
if (vmInstance.getType().equals(
VirtualMachine.Type.SecondaryStorageVm)) {
SecondaryStorageVmVO secStrgVm = _secStrgDao
.findById(vmInstance.getId());
if (!vmMgr.advanceStop(secStrgVm, true, user, account)) {
String errorMsg = "There was an error stopping the ssvm id: "
+ vmInstance.getId()
+ " ,cannot enable storage maintenance";
s_logger.warn(errorMsg);
throw new CloudRuntimeException(errorMsg);
} else {
// update work status
work.setStoppedForMaintenance(true);
_storagePoolWorkDao.update(work.getId(), work);
}
if (restart) {
if (vmMgr.advanceStart(secStrgVm, null, user, account) == null) {
String errorMsg = "There was an error starting the ssvm id: "
+ vmInstance.getId()
+ " on another storage pool, cannot enable primary storage maintenance";
s_logger.warn(errorMsg);
} else {
// update work status
work.setStartedAfterMaintenance(true);
_storagePoolWorkDao.update(work.getId(), work);
}
}
}
// if the instance is of type domain router vm, call the network
// manager
if (vmInstance.getType().equals(
VirtualMachine.Type.DomainRouter)) {
DomainRouterVO domR = _domrDao.findById(vmInstance.getId());
if (!vmMgr.advanceStop(domR, true, user, account)) {
String errorMsg = "There was an error stopping the domain router id: "
+ vmInstance.getId()
+ " ,cannot enable primary storage maintenance";
s_logger.warn(errorMsg);
throw new CloudRuntimeException(errorMsg);
} else {
// update work status
work.setStoppedForMaintenance(true);
_storagePoolWorkDao.update(work.getId(), work);
}
if (restart) {
if (vmMgr.advanceStart(domR, null, user, account) == null) {
String errorMsg = "There was an error starting the domain router id: "
+ vmInstance.getId()
+ " on another storage pool, cannot enable primary storage maintenance";
s_logger.warn(errorMsg);
} else {
// update work status
work.setStartedAfterMaintenance(true);
_storagePoolWorkDao.update(work.getId(), work);
}
}
}
}
} catch(Exception e) {
s_logger.error(
"Exception in enabling primary storage maintenance:", e);
pool.setStatus(StoragePoolStatus.ErrorInMaintenance);
this.primaryDataStoreDao.update(pool.getId(), pool);
throw new CloudRuntimeException(e.getMessage());
}
return true;
}
@Override
public boolean cancelMaintain(DataStore store) {
// Change the storage state back to up
Long userId = UserContext.current().getCallerUserId();
User user = _userDao.findById(userId);
Account account = UserContext.current().getCaller();
StoragePoolVO poolVO = this.primaryDataStoreDao
.findById(store.getId());
StoragePool pool = (StoragePool)store;
List<HostVO> hosts = _resourceMgr.listHostsInClusterByStatus(
pool.getClusterId(), Status.Up);
if (hosts == null || hosts.size() == 0) {
return true;
}
// add heartbeat
for (HostVO host : hosts) {
ModifyStoragePoolCommand msPoolCmd = new ModifyStoragePoolCommand(
true, pool);
final Answer answer = agentMgr.easySend(host.getId(), msPoolCmd);
if (answer == null || !answer.getResult()) {
if (s_logger.isDebugEnabled()) {
s_logger.debug("ModifyStoragePool add failed due to "
+ ((answer == null) ? "answer null" : answer
.getDetails()));
}
} else {
if (s_logger.isDebugEnabled()) {
s_logger.debug("ModifyStoragePool add secceeded");
}
}
}
// 2. Get a list of pending work for this queue
List<StoragePoolWorkVO> pendingWork = _storagePoolWorkDao
.listPendingWorkForCancelMaintenanceByPoolId(poolVO.getId());
// 3. work through the queue
for (StoragePoolWorkVO work : pendingWork) {
try {
VMInstanceVO vmInstance = vmDao.findById(work.getVmId());
if (vmInstance == null) {
continue;
}
// if the instance is of type consoleproxy, call the console
// proxy
if (vmInstance.getType().equals(
VirtualMachine.Type.ConsoleProxy)) {
ConsoleProxyVO consoleProxy = _consoleProxyDao
.findById(vmInstance.getId());
if (vmMgr.advanceStart(consoleProxy, null, user, account) == null) {
String msg = "There was an error starting the console proxy id: "
+ vmInstance.getId()
+ " on storage pool, cannot complete primary storage maintenance";
s_logger.warn(msg);
throw new ExecutionException(msg);
} else {
// update work queue
work.setStartedAfterMaintenance(true);
_storagePoolWorkDao.update(work.getId(), work);
}
}
// if the instance is of type ssvm, call the ssvm manager
if (vmInstance.getType().equals(
VirtualMachine.Type.SecondaryStorageVm)) {
SecondaryStorageVmVO ssVm = _secStrgDao.findById(vmInstance
.getId());
if (vmMgr.advanceStart(ssVm, null, user, account) == null) {
String msg = "There was an error starting the ssvm id: "
+ vmInstance.getId()
+ " on storage pool, cannot complete primary storage maintenance";
s_logger.warn(msg);
throw new ExecutionException(msg);
} else {
// update work queue
work.setStartedAfterMaintenance(true);
_storagePoolWorkDao.update(work.getId(), work);
}
}
// if the instance is of type ssvm, call the ssvm manager
if (vmInstance.getType().equals(
VirtualMachine.Type.DomainRouter)) {
DomainRouterVO domR = _domrDao.findById(vmInstance.getId());
if (vmMgr.advanceStart(domR, null, user, account) == null) {
String msg = "There was an error starting the domR id: "
+ vmInstance.getId()
+ " on storage pool, cannot complete primary storage maintenance";
s_logger.warn(msg);
throw new ExecutionException(msg);
} else {
// update work queue
work.setStartedAfterMaintenance(true);
_storagePoolWorkDao.update(work.getId(), work);
}
}
// if the instance is of type user vm, call the user vm manager
if (vmInstance.getType().equals(VirtualMachine.Type.User)) {
UserVmVO userVm = userVmDao.findById(vmInstance.getId());
if (vmMgr.advanceStart(userVm, null, user, account) == null) {
String msg = "There was an error starting the user vm id: "
+ vmInstance.getId()
+ " on storage pool, cannot complete primary storage maintenance";
s_logger.warn(msg);
throw new ExecutionException(msg);
} else {
// update work queue
work.setStartedAfterMaintenance(true);
_storagePoolWorkDao.update(work.getId(), work);
}
}
return true;
} catch (Exception e) {
s_logger.debug("Failed start vm", e);
throw new CloudRuntimeException(e.toString());
}
}
return false;
}
}

View File

@ -29,6 +29,7 @@ DELETE FROM `cloud`.`configuration` where name='vmware.percluster.host.max';
INSERT IGNORE INTO `cloud`.`configuration` VALUES ('Advanced', 'DEFAULT', 'AgentManager', 'xen.nics.max', '7', 'Maximum allowed nics for Vms created on Xen');
ALTER TABLE `cloud`.`load_balancer_vm_map` ADD state VARCHAR(40) NULL COMMENT 'service status updated by LB healthcheck manager';
alter table storage_pool change storage_provider_id storage_provider_name varchar(255);
alter table template_host_ref add state varchar(255);
alter table template_host_ref add update_count bigint unsigned;
alter table template_host_ref add updated datetime;
@ -70,13 +71,12 @@ CREATE TABLE `cloud`.`data_store_provider` (
CREATE TABLE `cloud`.`image_data_store` (
`id` bigint unsigned NOT NULL AUTO_INCREMENT COMMENT 'id',
`name` varchar(255) NOT NULL COMMENT 'name of data store',
`image_provider_id` bigint unsigned NOT NULL COMMENT 'id of image_data_store_provider',
`image_provider_name` varchar(255) NOT NULL COMMENT 'id of image_data_store_provider',
`protocol` varchar(255) NOT NULL COMMENT 'protocol of data store',
`data_center_id` bigint unsigned COMMENT 'datacenter id of data store',
`scope` varchar(255) COMMENT 'scope of data store',
`uuid` varchar(255) COMMENT 'uuid of data store',
PRIMARY KEY(`id`),
CONSTRAINT `fk_tags__image_data_store_provider_id` FOREIGN KEY(`image_provider_id`) REFERENCES `data_store_provider`(`id`)
PRIMARY KEY(`id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
ALTER TABLE `cloud`.`vm_template` ADD COLUMN `image_data_store_id` bigint unsigned;

View File

@ -95,6 +95,7 @@ known_categories = {
'InstanceGroup': 'VM Group',
'StorageMaintenance': 'Storage Pool',
'StoragePool': 'Storage Pool',
'StorageProvider': 'Storage Pool',
'SecurityGroup': 'Security Group',
'SSH': 'SSH',
'register': 'Registration',