mirror of
https://github.com/apache/cloudstack.git
synced 2025-10-26 08:42:29 +01:00
move default primary storage plugin into its own pom
This commit is contained in:
parent
9270b4335c
commit
3ed6200ef8
28
api/src/com/cloud/storage/DataStoreProviderApiService.java
Normal file
28
api/src/com/cloud/storage/DataStoreProviderApiService.java
Normal file
@ -0,0 +1,28 @@
|
|||||||
|
/*
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing,
|
||||||
|
* software distributed under the License is distributed on an
|
||||||
|
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||||
|
* KIND, either express or implied. See the License for the
|
||||||
|
* specific language governing permissions and limitations
|
||||||
|
* under the License.
|
||||||
|
*/
|
||||||
|
package com.cloud.storage;
|
||||||
|
|
||||||
|
import java.util.List;
|
||||||
|
|
||||||
|
import org.apache.cloudstack.api.response.StorageProviderResponse;
|
||||||
|
|
||||||
|
public interface DataStoreProviderApiService {
|
||||||
|
public List<StorageProviderResponse> getDataStoreProviders(String type);
|
||||||
|
|
||||||
|
}
|
||||||
@ -99,7 +99,7 @@ public interface StoragePool extends Identity, InternalIdentity {
|
|||||||
/**
|
/**
|
||||||
* @return
|
* @return
|
||||||
*/
|
*/
|
||||||
Long getStorageProviderId();
|
String getStorageProviderName();
|
||||||
|
|
||||||
boolean isInMaintenance();
|
boolean isInMaintenance();
|
||||||
}
|
}
|
||||||
|
|||||||
@ -61,6 +61,7 @@ import com.cloud.projects.ProjectService;
|
|||||||
import com.cloud.resource.ResourceService;
|
import com.cloud.resource.ResourceService;
|
||||||
import com.cloud.server.ManagementService;
|
import com.cloud.server.ManagementService;
|
||||||
import com.cloud.server.TaggedResourceService;
|
import com.cloud.server.TaggedResourceService;
|
||||||
|
import com.cloud.storage.DataStoreProviderApiService;
|
||||||
import com.cloud.storage.StorageService;
|
import com.cloud.storage.StorageService;
|
||||||
import com.cloud.storage.VolumeApiService;
|
import com.cloud.storage.VolumeApiService;
|
||||||
import com.cloud.storage.snapshot.SnapshotService;
|
import com.cloud.storage.snapshot.SnapshotService;
|
||||||
@ -131,6 +132,7 @@ public abstract class BaseCmd {
|
|||||||
@Inject public UsageService _usageService;
|
@Inject public UsageService _usageService;
|
||||||
@Inject public NetworkUsageService _networkUsageService;
|
@Inject public NetworkUsageService _networkUsageService;
|
||||||
@Inject public VMSnapshotService _vmSnapshotService;
|
@Inject public VMSnapshotService _vmSnapshotService;
|
||||||
|
@Inject public DataStoreProviderApiService dataStoreProviderApiService;
|
||||||
|
|
||||||
public abstract void execute() throws ResourceUnavailableException, InsufficientCapacityException, ServerApiException, ConcurrentOperationException, ResourceAllocationException, NetworkRuleConflictException;
|
public abstract void execute() throws ResourceUnavailableException, InsufficientCapacityException, ServerApiException, ConcurrentOperationException, ResourceAllocationException, NetworkRuleConflictException;
|
||||||
|
|
||||||
|
|||||||
@ -73,8 +73,8 @@ public class CreateStoragePoolCmd extends BaseCmd {
|
|||||||
private Long zoneId;
|
private Long zoneId;
|
||||||
|
|
||||||
@Parameter(name=ApiConstants.PROVIDER, type=CommandType.STRING,
|
@Parameter(name=ApiConstants.PROVIDER, type=CommandType.STRING,
|
||||||
required=false, description="the storage provider uuid")
|
required=false, description="the storage provider name")
|
||||||
private String storageProviderUuid;
|
private String storageProviderName;
|
||||||
|
|
||||||
@Parameter(name=ApiConstants.SCOPE, type=CommandType.STRING,
|
@Parameter(name=ApiConstants.SCOPE, type=CommandType.STRING,
|
||||||
required=false, description="the scope of the storage: cluster or zone")
|
required=false, description="the scope of the storage: cluster or zone")
|
||||||
@ -112,8 +112,8 @@ public class CreateStoragePoolCmd extends BaseCmd {
|
|||||||
return zoneId;
|
return zoneId;
|
||||||
}
|
}
|
||||||
|
|
||||||
public String getStorageProviderUuid() {
|
public String getStorageProviderName() {
|
||||||
return this.storageProviderUuid;
|
return this.storageProviderName;
|
||||||
}
|
}
|
||||||
|
|
||||||
public String getScope() {
|
public String getScope() {
|
||||||
|
|||||||
@ -0,0 +1,72 @@
|
|||||||
|
/*
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing,
|
||||||
|
* software distributed under the License is distributed on an
|
||||||
|
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||||
|
* KIND, either express or implied. See the License for the
|
||||||
|
* specific language governing permissions and limitations
|
||||||
|
* under the License.
|
||||||
|
*/
|
||||||
|
package org.apache.cloudstack.api.command.admin.storage;
|
||||||
|
|
||||||
|
import java.util.List;
|
||||||
|
|
||||||
|
import org.apache.cloudstack.api.APICommand;
|
||||||
|
import org.apache.cloudstack.api.ApiConstants;
|
||||||
|
import org.apache.cloudstack.api.ApiErrorCode;
|
||||||
|
import org.apache.cloudstack.api.BaseListCmd;
|
||||||
|
import org.apache.cloudstack.api.Parameter;
|
||||||
|
import org.apache.cloudstack.api.ServerApiException;
|
||||||
|
import org.apache.cloudstack.api.response.ListResponse;
|
||||||
|
import org.apache.cloudstack.api.response.StorageProviderResponse;
|
||||||
|
import org.apache.log4j.Logger;
|
||||||
|
|
||||||
|
import com.cloud.exception.ConcurrentOperationException;
|
||||||
|
import com.cloud.exception.InsufficientCapacityException;
|
||||||
|
import com.cloud.exception.NetworkRuleConflictException;
|
||||||
|
import com.cloud.exception.ResourceAllocationException;
|
||||||
|
import com.cloud.exception.ResourceUnavailableException;
|
||||||
|
|
||||||
|
@APICommand(name = "listStorageProviders", description="Lists storage providers.", responseObject=StorageProviderResponse.class)
|
||||||
|
public class ListStorageProvidersCmd extends BaseListCmd {
|
||||||
|
public static final Logger s_logger = Logger.getLogger(ListStorageProvidersCmd.class.getName());
|
||||||
|
private static final String s_name = "liststorageprovidersresponse";
|
||||||
|
|
||||||
|
@Parameter(name=ApiConstants.TYPE, type=CommandType.STRING, description="the type of storage provider: either primary or image", required = true)
|
||||||
|
private String type;
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String getCommandName() {
|
||||||
|
return s_name;
|
||||||
|
}
|
||||||
|
|
||||||
|
public String getType() {
|
||||||
|
return this.type;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void execute() throws ResourceUnavailableException, InsufficientCapacityException, ServerApiException, ConcurrentOperationException, ResourceAllocationException,
|
||||||
|
NetworkRuleConflictException {
|
||||||
|
if (getType() == null) {
|
||||||
|
throw new ServerApiException(ApiErrorCode.MALFORMED_PARAMETER_ERROR, "need to specify type: either primary or image");
|
||||||
|
}
|
||||||
|
|
||||||
|
List<StorageProviderResponse> providers = this.dataStoreProviderApiService.getDataStoreProviders(getType());
|
||||||
|
ListResponse<StorageProviderResponse> responses = new ListResponse<StorageProviderResponse>();
|
||||||
|
for (StorageProviderResponse provider : providers) {
|
||||||
|
provider.setObjectName("dataStoreProvider");
|
||||||
|
}
|
||||||
|
responses.setResponses(providers);
|
||||||
|
responses.setResponseName(this.getCommandName());
|
||||||
|
this.setResponseObject(responses);
|
||||||
|
}
|
||||||
|
}
|
||||||
@ -0,0 +1,62 @@
|
|||||||
|
/*
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing,
|
||||||
|
* software distributed under the License is distributed on an
|
||||||
|
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||||
|
* KIND, either express or implied. See the License for the
|
||||||
|
* specific language governing permissions and limitations
|
||||||
|
* under the License.
|
||||||
|
*/
|
||||||
|
package org.apache.cloudstack.api.response;
|
||||||
|
|
||||||
|
import org.apache.cloudstack.api.BaseResponse;
|
||||||
|
|
||||||
|
import com.cloud.serializer.Param;
|
||||||
|
import com.google.gson.annotations.SerializedName;
|
||||||
|
|
||||||
|
public class StorageProviderResponse extends BaseResponse {
|
||||||
|
@SerializedName("name") @Param(description="the name of the storage provider")
|
||||||
|
private String name;
|
||||||
|
|
||||||
|
@SerializedName("type") @Param(description="the type of the storage provider: primary or image provider")
|
||||||
|
private String type;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @return the type
|
||||||
|
*/
|
||||||
|
public String getType() {
|
||||||
|
return type;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @param type the type to set
|
||||||
|
*/
|
||||||
|
public void setType(String type) {
|
||||||
|
this.type = type;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @return the name
|
||||||
|
*/
|
||||||
|
public String getName() {
|
||||||
|
return name;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @param name the name to set
|
||||||
|
*/
|
||||||
|
public void setName(String name) {
|
||||||
|
this.name = name;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
}
|
||||||
@ -133,7 +133,7 @@ public class BackupSnapshotCommandTest {
|
|||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public Long getStorageProviderId() {
|
public String getStorageProviderName() {
|
||||||
// TODO Auto-generated method stub
|
// TODO Auto-generated method stub
|
||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
|
|||||||
@ -115,7 +115,7 @@ public class SnapshotCommandTest {
|
|||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public Long getStorageProviderId() {
|
public String getStorageProviderName() {
|
||||||
// TODO Auto-generated method stub
|
// TODO Auto-generated method stub
|
||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
|
|||||||
@ -134,7 +134,7 @@ public class ResizeVolumeCommandTest {
|
|||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public Long getStorageProviderId() {
|
public String getStorageProviderName() {
|
||||||
// TODO Auto-generated method stub
|
// TODO Auto-generated method stub
|
||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
|
|||||||
@ -224,6 +224,11 @@
|
|||||||
<artifactId>cloud-plugin-hypervisor-simulator</artifactId>
|
<artifactId>cloud-plugin-hypervisor-simulator</artifactId>
|
||||||
<version>${project.version}</version>
|
<version>${project.version}</version>
|
||||||
</dependency>
|
</dependency>
|
||||||
|
<dependency>
|
||||||
|
<groupId>org.apache.cloudstack</groupId>
|
||||||
|
<artifactId>cloud-plugin-storage-volume-default</artifactId>
|
||||||
|
<version>${project.version}</version>
|
||||||
|
</dependency>
|
||||||
</dependencies>
|
</dependencies>
|
||||||
<build>
|
<build>
|
||||||
<defaultGoal>install</defaultGoal>
|
<defaultGoal>install</defaultGoal>
|
||||||
|
|||||||
@ -278,6 +278,7 @@ listAsyncJobs=15
|
|||||||
|
|
||||||
#### storage pools commands
|
#### storage pools commands
|
||||||
listStoragePools=3
|
listStoragePools=3
|
||||||
|
listStorageProviders=3
|
||||||
createStoragePool=1
|
createStoragePool=1
|
||||||
updateStoragePool=1
|
updateStoragePool=1
|
||||||
deleteStoragePool=1
|
deleteStoragePool=1
|
||||||
|
|||||||
@ -330,4 +330,7 @@
|
|||||||
<property name="name" value="BaremetalGuru"/>
|
<property name="name" value="BaremetalGuru"/>
|
||||||
</bean>
|
</bean>
|
||||||
|
|
||||||
|
<bean id="ClassicalPrimaryDataStoreProvider" class="org.apache.cloudstack.storage.datastore.provider.CloudStackPrimaryDataStoreProviderImpl">
|
||||||
|
</bean>
|
||||||
|
|
||||||
</beans>
|
</beans>
|
||||||
|
|||||||
@ -21,4 +21,5 @@ package org.apache.cloudstack.engine.datacenter.entity.api;
|
|||||||
import com.cloud.storage.StoragePool;
|
import com.cloud.storage.StoragePool;
|
||||||
|
|
||||||
public interface StorageEntity extends DataCenterResourceEntity, StoragePool {
|
public interface StorageEntity extends DataCenterResourceEntity, StoragePool {
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|||||||
@ -34,9 +34,9 @@ public interface DataStoreLifeCycle {
|
|||||||
|
|
||||||
public boolean unmanaged();
|
public boolean unmanaged();
|
||||||
|
|
||||||
public boolean maintain(long storeId);
|
public boolean maintain(DataStore store);
|
||||||
|
|
||||||
public boolean cancelMaintain(long storeId);
|
public boolean cancelMaintain(DataStore store);
|
||||||
|
|
||||||
public boolean deleteDataStore(long storeId);
|
public boolean deleteDataStore(DataStore store);
|
||||||
}
|
}
|
||||||
|
|||||||
@ -19,12 +19,19 @@
|
|||||||
package org.apache.cloudstack.engine.subsystem.api.storage;
|
package org.apache.cloudstack.engine.subsystem.api.storage;
|
||||||
|
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
|
import java.util.Set;
|
||||||
|
|
||||||
|
|
||||||
public interface DataStoreProvider {
|
public interface DataStoreProvider {
|
||||||
public DataStoreLifeCycle getLifeCycle();
|
public static enum DataStoreProviderType {
|
||||||
|
PRIMARY,
|
||||||
|
IMAGE
|
||||||
|
}
|
||||||
|
public DataStoreLifeCycle getDataStoreLifeCycle();
|
||||||
|
public DataStoreDriver getDataStoreDriver();
|
||||||
|
public HypervisorHostListener getHostListener();
|
||||||
public String getName();
|
public String getName();
|
||||||
public String getUuid();
|
|
||||||
public long getId();
|
|
||||||
public boolean configure(Map<String, Object> params);
|
public boolean configure(Map<String, Object> params);
|
||||||
|
public Set<DataStoreProviderType> getTypes();
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|||||||
@ -20,12 +20,12 @@ package org.apache.cloudstack.engine.subsystem.api.storage;
|
|||||||
|
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
|
|
||||||
|
import com.cloud.storage.DataStoreProviderApiService;
|
||||||
import com.cloud.utils.component.Manager;
|
import com.cloud.utils.component.Manager;
|
||||||
|
|
||||||
public interface DataStoreProviderManager extends Manager {
|
public interface DataStoreProviderManager extends Manager, DataStoreProviderApiService {
|
||||||
public DataStoreProvider getDataStoreProviderByUuid(String uuid);
|
|
||||||
public DataStoreProvider getDataStoreProviderById(long id);
|
|
||||||
public DataStoreProvider getDataStoreProvider(String name);
|
public DataStoreProvider getDataStoreProvider(String name);
|
||||||
public DataStoreProvider getDefaultPrimaryDataStoreProvider();
|
public DataStoreProvider getDefaultPrimaryDataStoreProvider();
|
||||||
public List<DataStoreProvider> getDataStoreProviders();
|
public List<DataStoreProvider> getDataStoreProviders();
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|||||||
@ -16,9 +16,8 @@
|
|||||||
* specific language governing permissions and limitations
|
* specific language governing permissions and limitations
|
||||||
* under the License.
|
* under the License.
|
||||||
*/
|
*/
|
||||||
package org.apache.cloudstack.storage.datastore.provider;
|
package org.apache.cloudstack.engine.subsystem.api.storage;
|
||||||
|
|
||||||
import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreProvider;
|
|
||||||
|
|
||||||
public interface ImageDataStoreProvider extends DataStoreProvider {
|
public interface ImageDataStoreProvider extends DataStoreProvider {
|
||||||
|
|
||||||
@ -0,0 +1,220 @@
|
|||||||
|
/*
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing,
|
||||||
|
* software distributed under the License is distributed on an
|
||||||
|
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||||
|
* KIND, either express or implied. See the License for the
|
||||||
|
* specific language governing permissions and limitations
|
||||||
|
* under the License.
|
||||||
|
*/
|
||||||
|
package org.apache.cloudstack.engine.subsystem.api.storage;
|
||||||
|
|
||||||
|
import java.util.Map;
|
||||||
|
|
||||||
|
import com.cloud.storage.Storage.StoragePoolType;
|
||||||
|
|
||||||
|
public class PrimaryDataStoreParameters {
|
||||||
|
private Long zoneId;
|
||||||
|
private Long podId;
|
||||||
|
private Long clusterId;
|
||||||
|
private String providerName;
|
||||||
|
private Map<String, String> details;
|
||||||
|
private String tags;
|
||||||
|
private StoragePoolType type;
|
||||||
|
private String host;
|
||||||
|
private String path;
|
||||||
|
private int port;
|
||||||
|
private String uuid;
|
||||||
|
private String name;
|
||||||
|
private String userInfo;
|
||||||
|
/**
|
||||||
|
* @return the userInfo
|
||||||
|
*/
|
||||||
|
public String getUserInfo() {
|
||||||
|
return userInfo;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @param userInfo the userInfo to set
|
||||||
|
*/
|
||||||
|
public void setUserInfo(String userInfo) {
|
||||||
|
this.userInfo = userInfo;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @return the name
|
||||||
|
*/
|
||||||
|
public String getName() {
|
||||||
|
return name;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @param name the name to set
|
||||||
|
*/
|
||||||
|
public void setName(String name) {
|
||||||
|
this.name = name;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @return the uuid
|
||||||
|
*/
|
||||||
|
public String getUuid() {
|
||||||
|
return uuid;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @param uuid the uuid to set
|
||||||
|
*/
|
||||||
|
public void setUuid(String uuid) {
|
||||||
|
this.uuid = uuid;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @return the port
|
||||||
|
*/
|
||||||
|
public int getPort() {
|
||||||
|
return port;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @param port the port to set
|
||||||
|
*/
|
||||||
|
public void setPort(int port) {
|
||||||
|
this.port = port;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @return the path
|
||||||
|
*/
|
||||||
|
public String getPath() {
|
||||||
|
return path;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @param path the path to set
|
||||||
|
*/
|
||||||
|
public void setPath(String path) {
|
||||||
|
this.path = path;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @return the host
|
||||||
|
*/
|
||||||
|
public String getHost() {
|
||||||
|
return host;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @param host the host to set
|
||||||
|
*/
|
||||||
|
public void setHost(String host) {
|
||||||
|
this.host = host;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @return the type
|
||||||
|
*/
|
||||||
|
public StoragePoolType getType() {
|
||||||
|
return type;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @param type the type to set
|
||||||
|
*/
|
||||||
|
public void setType(StoragePoolType type) {
|
||||||
|
this.type = type;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @return the tags
|
||||||
|
*/
|
||||||
|
public String getTags() {
|
||||||
|
return tags;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @param tags the tags to set
|
||||||
|
*/
|
||||||
|
public void setTags(String tags) {
|
||||||
|
this.tags = tags;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @return the details
|
||||||
|
*/
|
||||||
|
public Map<String, String> getDetails() {
|
||||||
|
return details;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @param details the details to set
|
||||||
|
*/
|
||||||
|
public void setDetails(Map<String, String> details) {
|
||||||
|
this.details = details;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @return the providerName
|
||||||
|
*/
|
||||||
|
public String getProviderName() {
|
||||||
|
return providerName;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @param providerName the providerName to set
|
||||||
|
*/
|
||||||
|
public void setProviderName(String providerName) {
|
||||||
|
this.providerName = providerName;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @return the clusterId
|
||||||
|
*/
|
||||||
|
public Long getClusterId() {
|
||||||
|
return clusterId;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @param clusterId the clusterId to set
|
||||||
|
*/
|
||||||
|
public void setClusterId(Long clusterId) {
|
||||||
|
this.clusterId = clusterId;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @return the podId
|
||||||
|
*/
|
||||||
|
public Long getPodId() {
|
||||||
|
return podId;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @param podId the podId to set
|
||||||
|
*/
|
||||||
|
public void setPodId(Long podId) {
|
||||||
|
this.podId = podId;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @return the zoneId
|
||||||
|
*/
|
||||||
|
public Long getZoneId() {
|
||||||
|
return zoneId;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @param zoneId the zoneId to set
|
||||||
|
*/
|
||||||
|
public void setZoneId(Long zoneId) {
|
||||||
|
this.zoneId = zoneId;
|
||||||
|
}
|
||||||
|
}
|
||||||
@ -14,3 +14,7 @@
|
|||||||
// KIND, either express or implied. See the License for the
|
// KIND, either express or implied. See the License for the
|
||||||
// specific language governing permissions and limitations
|
// specific language governing permissions and limitations
|
||||||
// under the License.
|
// under the License.
|
||||||
|
package org.apache.cloudstack.engine.subsystem.api.storage;
|
||||||
|
|
||||||
|
public interface PrimaryDataStoreProvider extends DataStoreProvider {
|
||||||
|
}
|
||||||
|
|||||||
@ -80,8 +80,8 @@ public class StoragePoolVO implements StoragePool{
|
|||||||
@Enumerated(value = EnumType.STRING)
|
@Enumerated(value = EnumType.STRING)
|
||||||
private StoragePoolStatus status;
|
private StoragePoolStatus status;
|
||||||
|
|
||||||
@Column(name = "storage_provider_id", updatable = true, nullable = false)
|
@Column(name = "storage_provider_name", updatable = true, nullable = false)
|
||||||
private Long storageProviderId;
|
private String storageProviderName;
|
||||||
|
|
||||||
@Column(name = "host_address")
|
@Column(name = "host_address")
|
||||||
private String hostAddress;
|
private String hostAddress;
|
||||||
@ -180,12 +180,12 @@ public class StoragePoolVO implements StoragePool{
|
|||||||
return availableBytes;
|
return availableBytes;
|
||||||
}
|
}
|
||||||
|
|
||||||
public Long getStorageProviderId() {
|
public String getStorageProviderName() {
|
||||||
return storageProviderId;
|
return storageProviderName;
|
||||||
}
|
}
|
||||||
|
|
||||||
public void setStorageProviderId(Long provider) {
|
public void setStorageProviderName(String providerName) {
|
||||||
storageProviderId = provider;
|
storageProviderName = providerName;
|
||||||
}
|
}
|
||||||
|
|
||||||
public long getCapacityBytes() {
|
public long getCapacityBytes() {
|
||||||
|
|||||||
@ -28,7 +28,7 @@ import javax.inject.Inject;
|
|||||||
|
|
||||||
import org.apache.cloudstack.engine.subsystem.api.storage.DataStore;
|
import org.apache.cloudstack.engine.subsystem.api.storage.DataStore;
|
||||||
import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreProviderManager;
|
import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreProviderManager;
|
||||||
import org.apache.cloudstack.storage.datastore.provider.ImageDataStoreProvider;
|
import org.apache.cloudstack.engine.subsystem.api.storage.ImageDataStoreProvider;
|
||||||
import org.apache.cloudstack.storage.image.ImageDataStoreDriver;
|
import org.apache.cloudstack.storage.image.ImageDataStoreDriver;
|
||||||
import org.apache.cloudstack.storage.image.datastore.ImageDataStore;
|
import org.apache.cloudstack.storage.image.datastore.ImageDataStore;
|
||||||
import org.apache.cloudstack.storage.image.datastore.ImageDataStoreManager;
|
import org.apache.cloudstack.storage.image.datastore.ImageDataStoreManager;
|
||||||
@ -57,21 +57,21 @@ public class ImageDataStoreManagerImpl implements ImageDataStoreManager {
|
|||||||
@Override
|
@Override
|
||||||
public ImageDataStore getImageDataStore(long dataStoreId) {
|
public ImageDataStore getImageDataStore(long dataStoreId) {
|
||||||
ImageDataStoreVO dataStore = dataStoreDao.findById(dataStoreId);
|
ImageDataStoreVO dataStore = dataStoreDao.findById(dataStoreId);
|
||||||
long providerId = dataStore.getProvider();
|
String providerName = dataStore.getProviderName();
|
||||||
ImageDataStoreProvider provider = (ImageDataStoreProvider)providerManager.getDataStoreProviderById(providerId);
|
ImageDataStoreProvider provider = (ImageDataStoreProvider)providerManager.getDataStoreProvider(providerName);
|
||||||
ImageDataStore imgStore = DefaultImageDataStoreImpl.getDataStore(dataStore,
|
ImageDataStore imgStore = DefaultImageDataStoreImpl.getDataStore(dataStore,
|
||||||
driverMaps.get(provider.getUuid()), provider
|
driverMaps.get(provider.getName()), provider
|
||||||
);
|
);
|
||||||
// TODO Auto-generated method stub
|
// TODO Auto-generated method stub
|
||||||
return imgStore;
|
return imgStore;
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public boolean registerDriver(String uuid, ImageDataStoreDriver driver) {
|
public boolean registerDriver(String providerName, ImageDataStoreDriver driver) {
|
||||||
if (driverMaps.containsKey(uuid)) {
|
if (driverMaps.containsKey(providerName)) {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
driverMaps.put(uuid, driver);
|
driverMaps.put(providerName, driver);
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@ -19,14 +19,18 @@
|
|||||||
package org.apache.cloudstack.storage.image.store;
|
package org.apache.cloudstack.storage.image.store;
|
||||||
|
|
||||||
import java.util.HashMap;
|
import java.util.HashMap;
|
||||||
|
import java.util.HashSet;
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
|
import java.util.Set;
|
||||||
import java.util.UUID;
|
import java.util.UUID;
|
||||||
|
|
||||||
import javax.inject.Inject;
|
import javax.inject.Inject;
|
||||||
|
|
||||||
|
import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreDriver;
|
||||||
import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreLifeCycle;
|
import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreLifeCycle;
|
||||||
|
import org.apache.cloudstack.engine.subsystem.api.storage.HypervisorHostListener;
|
||||||
|
import org.apache.cloudstack.engine.subsystem.api.storage.ImageDataStoreProvider;
|
||||||
import org.apache.cloudstack.engine.subsystem.api.storage.ScopeType;
|
import org.apache.cloudstack.engine.subsystem.api.storage.ScopeType;
|
||||||
import org.apache.cloudstack.storage.datastore.provider.ImageDataStoreProvider;
|
|
||||||
import org.apache.cloudstack.storage.image.ImageDataStoreDriver;
|
import org.apache.cloudstack.storage.image.ImageDataStoreDriver;
|
||||||
import org.apache.cloudstack.storage.image.datastore.ImageDataStoreHelper;
|
import org.apache.cloudstack.storage.image.datastore.ImageDataStoreHelper;
|
||||||
import org.apache.cloudstack.storage.image.datastore.ImageDataStoreManager;
|
import org.apache.cloudstack.storage.image.datastore.ImageDataStoreManager;
|
||||||
@ -47,10 +51,9 @@ public class AncientImageDataStoreProvider implements ImageDataStoreProvider {
|
|||||||
ImageDataStoreManager storeMgr;
|
ImageDataStoreManager storeMgr;
|
||||||
@Inject
|
@Inject
|
||||||
ImageDataStoreHelper helper;
|
ImageDataStoreHelper helper;
|
||||||
long id;
|
|
||||||
String uuid;
|
|
||||||
@Override
|
@Override
|
||||||
public DataStoreLifeCycle getLifeCycle() {
|
public DataStoreLifeCycle getDataStoreLifeCycle() {
|
||||||
return lifeCycle;
|
return lifeCycle;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -59,23 +62,12 @@ public class AncientImageDataStoreProvider implements ImageDataStoreProvider {
|
|||||||
return this.name;
|
return this.name;
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
|
||||||
public String getUuid() {
|
|
||||||
return this.uuid;
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public long getId() {
|
|
||||||
return this.id;
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public boolean configure(Map<String, Object> params) {
|
public boolean configure(Map<String, Object> params) {
|
||||||
lifeCycle = ComponentContext.inject(DefaultImageDataStoreLifeCycle.class);
|
lifeCycle = ComponentContext.inject(DefaultImageDataStoreLifeCycle.class);
|
||||||
driver = ComponentContext.inject(AncientImageDataStoreDriverImpl.class);
|
driver = ComponentContext.inject(AncientImageDataStoreDriverImpl.class);
|
||||||
uuid = (String)params.get("uuid");
|
|
||||||
id = (Long)params.get("id");
|
storeMgr.registerDriver(this.getName(), driver);
|
||||||
storeMgr.registerDriver(uuid, driver);
|
|
||||||
|
|
||||||
Map<String, Object> infos = new HashMap<String, Object>();
|
Map<String, Object> infos = new HashMap<String, Object>();
|
||||||
String dataStoreName = UUID.nameUUIDFromBytes(this.name.getBytes()).toString();
|
String dataStoreName = UUID.nameUUIDFromBytes(this.name.getBytes()).toString();
|
||||||
@ -83,10 +75,27 @@ public class AncientImageDataStoreProvider implements ImageDataStoreProvider {
|
|||||||
infos.put("uuid", dataStoreName);
|
infos.put("uuid", dataStoreName);
|
||||||
infos.put("protocol", "http");
|
infos.put("protocol", "http");
|
||||||
infos.put("scope", ScopeType.GLOBAL);
|
infos.put("scope", ScopeType.GLOBAL);
|
||||||
infos.put("provider", this.getId());
|
infos.put("providerName", this.getName());
|
||||||
DataStoreLifeCycle lifeCycle = this.getLifeCycle();
|
DataStoreLifeCycle lifeCycle = this.getDataStoreLifeCycle();
|
||||||
lifeCycle.initialize(infos);
|
lifeCycle.initialize(infos);
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public DataStoreDriver getDataStoreDriver() {
|
||||||
|
return this.driver;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public HypervisorHostListener getHostListener() {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public Set<DataStoreProviderType> getTypes() {
|
||||||
|
Set<DataStoreProviderType> types = new HashSet<DataStoreProviderType>();
|
||||||
|
types.add(DataStoreProviderType.IMAGE);
|
||||||
|
return types;
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|||||||
@ -25,13 +25,13 @@ import javax.inject.Inject;
|
|||||||
import org.apache.cloudstack.engine.subsystem.api.storage.DataObject;
|
import org.apache.cloudstack.engine.subsystem.api.storage.DataObject;
|
||||||
import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreDriver;
|
import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreDriver;
|
||||||
import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreRole;
|
import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreRole;
|
||||||
|
import org.apache.cloudstack.engine.subsystem.api.storage.ImageDataStoreProvider;
|
||||||
import org.apache.cloudstack.engine.subsystem.api.storage.Scope;
|
import org.apache.cloudstack.engine.subsystem.api.storage.Scope;
|
||||||
import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotInfo;
|
import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotInfo;
|
||||||
import org.apache.cloudstack.engine.subsystem.api.storage.TemplateInfo;
|
import org.apache.cloudstack.engine.subsystem.api.storage.TemplateInfo;
|
||||||
import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo;
|
import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo;
|
||||||
import org.apache.cloudstack.engine.subsystem.api.storage.ZoneScope;
|
import org.apache.cloudstack.engine.subsystem.api.storage.ZoneScope;
|
||||||
import org.apache.cloudstack.storage.datastore.ObjectInDataStoreManager;
|
import org.apache.cloudstack.storage.datastore.ObjectInDataStoreManager;
|
||||||
import org.apache.cloudstack.storage.datastore.provider.ImageDataStoreProvider;
|
|
||||||
import org.apache.cloudstack.storage.image.ImageDataStoreDriver;
|
import org.apache.cloudstack.storage.image.ImageDataStoreDriver;
|
||||||
import org.apache.cloudstack.storage.image.datastore.ImageDataStore;
|
import org.apache.cloudstack.storage.image.datastore.ImageDataStore;
|
||||||
import org.apache.cloudstack.storage.image.db.ImageDataStoreVO;
|
import org.apache.cloudstack.storage.image.db.ImageDataStoreVO;
|
||||||
|
|||||||
@ -18,12 +18,16 @@
|
|||||||
*/
|
*/
|
||||||
package org.apache.cloudstack.storage.image.store;
|
package org.apache.cloudstack.storage.image.store;
|
||||||
|
|
||||||
|
import java.util.HashSet;
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
|
import java.util.Set;
|
||||||
|
|
||||||
import javax.inject.Inject;
|
import javax.inject.Inject;
|
||||||
|
|
||||||
|
import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreDriver;
|
||||||
import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreLifeCycle;
|
import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreLifeCycle;
|
||||||
import org.apache.cloudstack.storage.datastore.provider.ImageDataStoreProvider;
|
import org.apache.cloudstack.engine.subsystem.api.storage.HypervisorHostListener;
|
||||||
|
import org.apache.cloudstack.engine.subsystem.api.storage.ImageDataStoreProvider;
|
||||||
import org.apache.cloudstack.storage.image.ImageDataStoreDriver;
|
import org.apache.cloudstack.storage.image.ImageDataStoreDriver;
|
||||||
import org.apache.cloudstack.storage.image.datastore.ImageDataStoreManager;
|
import org.apache.cloudstack.storage.image.datastore.ImageDataStoreManager;
|
||||||
import org.apache.cloudstack.storage.image.driver.DefaultImageDataStoreDriverImpl;
|
import org.apache.cloudstack.storage.image.driver.DefaultImageDataStoreDriverImpl;
|
||||||
@ -41,7 +45,7 @@ public class DefaultImageDataStoreProvider implements ImageDataStoreProvider {
|
|||||||
long id;
|
long id;
|
||||||
String uuid;
|
String uuid;
|
||||||
@Override
|
@Override
|
||||||
public DataStoreLifeCycle getLifeCycle() {
|
public DataStoreLifeCycle getDataStoreLifeCycle() {
|
||||||
return lifeCycle;
|
return lifeCycle;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -50,24 +54,29 @@ public class DefaultImageDataStoreProvider implements ImageDataStoreProvider {
|
|||||||
return this.name;
|
return this.name;
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
|
||||||
public String getUuid() {
|
|
||||||
return this.uuid;
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public long getId() {
|
|
||||||
return this.id;
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public boolean configure(Map<String, Object> params) {
|
public boolean configure(Map<String, Object> params) {
|
||||||
lifeCycle = ComponentContext.inject(DefaultImageDataStoreLifeCycle.class);
|
lifeCycle = ComponentContext.inject(DefaultImageDataStoreLifeCycle.class);
|
||||||
driver = ComponentContext.inject(DefaultImageDataStoreDriverImpl.class);
|
driver = ComponentContext.inject(DefaultImageDataStoreDriverImpl.class);
|
||||||
uuid = (String)params.get("uuid");
|
|
||||||
id = (Long)params.get("id");
|
storeMgr.registerDriver(this.getName(), driver);
|
||||||
storeMgr.registerDriver(uuid, driver);
|
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public Set<DataStoreProviderType> getTypes() {
|
||||||
|
Set<DataStoreProviderType> types = new HashSet<DataStoreProviderType>();
|
||||||
|
types.add(DataStoreProviderType.IMAGE);
|
||||||
|
return types;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public DataStoreDriver getDataStoreDriver() {
|
||||||
|
return this.driver;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public HypervisorHostListener getHostListener() {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@ -86,27 +86,22 @@ public class DefaultImageDataStoreLifeCycle implements ImageDataStoreLifeCycle {
|
|||||||
|
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public boolean maintain(long storeId) {
|
public boolean maintain(DataStore store) {
|
||||||
// TODO Auto-generated method stub
|
// TODO Auto-generated method stub
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public boolean cancelMaintain(long storeId) {
|
public boolean cancelMaintain(DataStore store) {
|
||||||
// TODO Auto-generated method stub
|
// TODO Auto-generated method stub
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public boolean deleteDataStore(long storeId) {
|
public boolean deleteDataStore(DataStore store) {
|
||||||
// TODO Auto-generated method stub
|
// TODO Auto-generated method stub
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|||||||
@ -133,7 +133,7 @@ public class StorageAllocatorTest {
|
|||||||
storage.setCapacityBytes(20000);
|
storage.setCapacityBytes(20000);
|
||||||
storage.setHostAddress(UUID.randomUUID().toString());
|
storage.setHostAddress(UUID.randomUUID().toString());
|
||||||
storage.setPath(UUID.randomUUID().toString());
|
storage.setPath(UUID.randomUUID().toString());
|
||||||
storage.setStorageProviderId(provider.getId());
|
storage.setStorageProviderName(provider.getName());
|
||||||
storage = storagePoolDao.persist(storage);
|
storage = storagePoolDao.persist(storage);
|
||||||
storagePoolId = storage.getId();
|
storagePoolId = storage.getId();
|
||||||
|
|
||||||
@ -176,7 +176,7 @@ public class StorageAllocatorTest {
|
|||||||
storage.setCapacityBytes(20000);
|
storage.setCapacityBytes(20000);
|
||||||
storage.setHostAddress(UUID.randomUUID().toString());
|
storage.setHostAddress(UUID.randomUUID().toString());
|
||||||
storage.setPath(UUID.randomUUID().toString());
|
storage.setPath(UUID.randomUUID().toString());
|
||||||
storage.setStorageProviderId(provider.getId());
|
storage.setStorageProviderName(provider.getName());
|
||||||
StoragePoolVO newStorage = storagePoolDao.persist(storage);
|
StoragePoolVO newStorage = storagePoolDao.persist(storage);
|
||||||
newStorageId = newStorage.getId();
|
newStorageId = newStorage.getId();
|
||||||
|
|
||||||
|
|||||||
@ -281,9 +281,9 @@ public class volumeServiceTest extends CloudStackTestNGBase {
|
|||||||
params.put("port", "1");
|
params.put("port", "1");
|
||||||
params.put("roles", DataStoreRole.Primary.toString());
|
params.put("roles", DataStoreRole.Primary.toString());
|
||||||
params.put("uuid", UUID.nameUUIDFromBytes(this.getPrimaryStorageUrl().getBytes()).toString());
|
params.put("uuid", UUID.nameUUIDFromBytes(this.getPrimaryStorageUrl().getBytes()).toString());
|
||||||
params.put("providerId", String.valueOf(provider.getId()));
|
params.put("providerName", String.valueOf(provider.getName()));
|
||||||
|
|
||||||
DataStoreLifeCycle lifeCycle = provider.getLifeCycle();
|
DataStoreLifeCycle lifeCycle = provider.getDataStoreLifeCycle();
|
||||||
this.primaryStore = lifeCycle.initialize(params);
|
this.primaryStore = lifeCycle.initialize(params);
|
||||||
ClusterScope scope = new ClusterScope(clusterId, podId, dcId);
|
ClusterScope scope = new ClusterScope(clusterId, podId, dcId);
|
||||||
lifeCycle.attachCluster(this.primaryStore, scope);
|
lifeCycle.attachCluster(this.primaryStore, scope);
|
||||||
@ -297,8 +297,8 @@ public class volumeServiceTest extends CloudStackTestNGBase {
|
|||||||
params.put("uuid", name);
|
params.put("uuid", name);
|
||||||
params.put("protocol", "http");
|
params.put("protocol", "http");
|
||||||
params.put("scope", ScopeType.GLOBAL.toString());
|
params.put("scope", ScopeType.GLOBAL.toString());
|
||||||
params.put("provider", Long.toString(provider.getId()));
|
params.put("providerName", name);
|
||||||
DataStoreLifeCycle lifeCycle = provider.getLifeCycle();
|
DataStoreLifeCycle lifeCycle = provider.getDataStoreLifeCycle();
|
||||||
DataStore store = lifeCycle.initialize(params);
|
DataStore store = lifeCycle.initialize(params);
|
||||||
return store;
|
return store;
|
||||||
}
|
}
|
||||||
@ -323,9 +323,9 @@ public class volumeServiceTest extends CloudStackTestNGBase {
|
|||||||
params.put("port", "1");
|
params.put("port", "1");
|
||||||
params.put("roles", DataStoreRole.Primary.toString());
|
params.put("roles", DataStoreRole.Primary.toString());
|
||||||
params.put("uuid", UUID.nameUUIDFromBytes(this.getPrimaryStorageUrl().getBytes()).toString());
|
params.put("uuid", UUID.nameUUIDFromBytes(this.getPrimaryStorageUrl().getBytes()).toString());
|
||||||
params.put("providerId", String.valueOf(provider.getId()));
|
params.put("providerName", String.valueOf(provider.getName()));
|
||||||
|
|
||||||
DataStoreLifeCycle lifeCycle = provider.getLifeCycle();
|
DataStoreLifeCycle lifeCycle = provider.getDataStoreLifeCycle();
|
||||||
DataStore store = lifeCycle.initialize(params);
|
DataStore store = lifeCycle.initialize(params);
|
||||||
ClusterScope scope = new ClusterScope(clusterId, podId, dcId);
|
ClusterScope scope = new ClusterScope(clusterId, podId, dcId);
|
||||||
lifeCycle.attachCluster(store, scope);
|
lifeCycle.attachCluster(store, scope);
|
||||||
|
|||||||
@ -242,16 +242,16 @@ public class PrimaryDataStoreEntityImpl implements StorageEntity {
|
|||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
|
||||||
public Long getStorageProviderId() {
|
|
||||||
// TODO Auto-generated method stub
|
|
||||||
return null;
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public boolean isInMaintenance() {
|
public boolean isInMaintenance() {
|
||||||
// TODO Auto-generated method stub
|
// TODO Auto-generated method stub
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String getStorageProviderName() {
|
||||||
|
// TODO Auto-generated method stub
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|||||||
@ -26,6 +26,6 @@ public interface PrimaryDataStoreProviderManager {
|
|||||||
public PrimaryDataStore getPrimaryDataStore(long dataStoreId);
|
public PrimaryDataStore getPrimaryDataStore(long dataStoreId);
|
||||||
public PrimaryDataStore getPrimaryDataStore(String uuid);
|
public PrimaryDataStore getPrimaryDataStore(String uuid);
|
||||||
|
|
||||||
boolean registerDriver(String uuid, PrimaryDataStoreDriver driver);
|
boolean registerDriver(String providerName, PrimaryDataStoreDriver driver);
|
||||||
boolean registerHostListener(String uuid, HypervisorHostListener listener);
|
boolean registerHostListener(String providerName, HypervisorHostListener listener);
|
||||||
}
|
}
|
||||||
|
|||||||
@ -18,21 +18,28 @@
|
|||||||
*/
|
*/
|
||||||
package org.apache.cloudstack.storage.datastore.provider;
|
package org.apache.cloudstack.storage.datastore.provider;
|
||||||
|
|
||||||
|
import java.util.ArrayList;
|
||||||
import java.util.HashMap;
|
import java.util.HashMap;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
import java.util.UUID;
|
import java.util.Set;
|
||||||
|
|
||||||
import javax.inject.Inject;
|
import javax.inject.Inject;
|
||||||
import javax.naming.ConfigurationException;
|
import javax.naming.ConfigurationException;
|
||||||
|
|
||||||
|
import org.apache.cloudstack.api.response.StorageProviderResponse;
|
||||||
import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreProvider;
|
import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreProvider;
|
||||||
|
import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreProvider.DataStoreProviderType;
|
||||||
import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreProviderManager;
|
import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreProviderManager;
|
||||||
|
import org.apache.cloudstack.engine.subsystem.api.storage.ImageDataStoreProvider;
|
||||||
|
import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreDriver;
|
||||||
|
import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreProvider;
|
||||||
|
import org.apache.cloudstack.storage.datastore.PrimaryDataStoreProviderManager;
|
||||||
import org.apache.cloudstack.storage.datastore.db.DataStoreProviderDao;
|
import org.apache.cloudstack.storage.datastore.db.DataStoreProviderDao;
|
||||||
import org.apache.cloudstack.storage.datastore.db.DataStoreProviderVO;
|
|
||||||
import org.apache.log4j.Logger;
|
import org.apache.log4j.Logger;
|
||||||
import org.springframework.stereotype.Component;
|
import org.springframework.stereotype.Component;
|
||||||
|
|
||||||
|
import com.cloud.exception.InvalidParameterValueException;
|
||||||
import com.cloud.utils.component.ManagerBase;
|
import com.cloud.utils.component.ManagerBase;
|
||||||
|
|
||||||
@Component
|
@Component
|
||||||
@ -44,15 +51,11 @@ public class DataStoreProviderManagerImpl extends ManagerBase implements DataSto
|
|||||||
@Inject
|
@Inject
|
||||||
DataStoreProviderDao providerDao;
|
DataStoreProviderDao providerDao;
|
||||||
protected Map<String, DataStoreProvider> providerMap = new HashMap<String, DataStoreProvider>();
|
protected Map<String, DataStoreProvider> providerMap = new HashMap<String, DataStoreProvider>();
|
||||||
@Override
|
@Inject
|
||||||
public DataStoreProvider getDataStoreProviderByUuid(String uuid) {
|
PrimaryDataStoreProviderManager primaryDataStoreProviderMgr;
|
||||||
return providerMap.get(uuid);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public DataStoreProvider getDataStoreProvider(String name) {
|
public DataStoreProvider getDataStoreProvider(String name) {
|
||||||
DataStoreProviderVO dspv = providerDao.findByName(name);
|
return providerMap.get(name);
|
||||||
return providerMap.get(dspv.getUuid());
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
@ -61,58 +64,85 @@ public class DataStoreProviderManagerImpl extends ManagerBase implements DataSto
|
|||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public List<StorageProviderResponse> getPrimayrDataStoreProviders() {
|
||||||
|
List<StorageProviderResponse> providers = new ArrayList<StorageProviderResponse>();
|
||||||
|
for (DataStoreProvider provider : providerMap.values()) {
|
||||||
|
if (provider instanceof PrimaryDataStoreProvider) {
|
||||||
|
StorageProviderResponse response = new StorageProviderResponse();
|
||||||
|
response.setName(provider.getName());
|
||||||
|
response.setType(DataStoreProvider.DataStoreProviderType.PRIMARY.toString());
|
||||||
|
providers.add(response);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return providers;
|
||||||
|
}
|
||||||
|
|
||||||
|
public List<StorageProviderResponse> getImageDataStoreProviders() {
|
||||||
|
List<StorageProviderResponse> providers = new ArrayList<StorageProviderResponse>();
|
||||||
|
for (DataStoreProvider provider : providerMap.values()) {
|
||||||
|
if (provider instanceof ImageDataStoreProvider) {
|
||||||
|
StorageProviderResponse response = new StorageProviderResponse();
|
||||||
|
response.setName(provider.getName());
|
||||||
|
response.setType(DataStoreProvider.DataStoreProviderType.IMAGE.toString());
|
||||||
|
providers.add(response);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return providers;
|
||||||
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public boolean configure(String name, Map<String, Object> params)
|
public boolean configure(String name, Map<String, Object> params)
|
||||||
throws ConfigurationException {
|
throws ConfigurationException {
|
||||||
Map<String, Object> copyParams = new HashMap<String, Object>(params);
|
Map<String, Object> copyParams = new HashMap<String, Object>(params);
|
||||||
|
|
||||||
//TODO: hold global lock
|
|
||||||
List<DataStoreProviderVO> providerVos = providerDao.listAll();
|
|
||||||
for (DataStoreProvider provider : providers) {
|
for (DataStoreProvider provider : providers) {
|
||||||
boolean existingProvider = false;
|
String providerName = provider.getName();
|
||||||
DataStoreProviderVO providerVO = null;
|
if (providerMap.get(providerName) != null) {
|
||||||
for (DataStoreProviderVO prov : providerVos) {
|
s_logger.debug("Failed to register data store provider, provider name: " + providerName + " is not unique");
|
||||||
if (prov.getName().equalsIgnoreCase(provider.getName())) {
|
return false;
|
||||||
existingProvider = true;
|
|
||||||
providerVO = prov;
|
|
||||||
break;
|
|
||||||
}
|
}
|
||||||
}
|
|
||||||
String uuid = null;
|
s_logger.debug("registering data store provider:" + provider.getName());
|
||||||
if (!existingProvider) {
|
|
||||||
uuid = UUID.nameUUIDFromBytes(provider.getName().getBytes()).toString();
|
providerMap.put(providerName, provider);
|
||||||
providerVO = new DataStoreProviderVO();
|
|
||||||
providerVO.setName(provider.getName());
|
|
||||||
providerVO.setUuid(uuid);
|
|
||||||
providerVO = providerDao.persist(providerVO);
|
|
||||||
} else {
|
|
||||||
uuid = providerVO.getUuid();
|
|
||||||
}
|
|
||||||
copyParams.put("uuid", uuid);
|
|
||||||
copyParams.put("id", providerVO.getId());
|
|
||||||
providerMap.put(uuid, provider);
|
|
||||||
try {
|
try {
|
||||||
boolean registrationResult = provider.configure(copyParams);
|
boolean registrationResult = provider.configure(copyParams);
|
||||||
if (!registrationResult) {
|
if (!registrationResult) {
|
||||||
providerMap.remove(uuid);
|
providerMap.remove(providerName);
|
||||||
|
s_logger.debug("Failed to register data store provider: " + providerName);
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
Set<DataStoreProviderType> types = provider.getTypes();
|
||||||
|
if (types.contains(DataStoreProviderType.PRIMARY)) {
|
||||||
|
primaryDataStoreProviderMgr.registerDriver(provider.getName(), (PrimaryDataStoreDriver)provider.getDataStoreDriver());
|
||||||
|
primaryDataStoreProviderMgr.registerHostListener(provider.getName(), provider.getHostListener());
|
||||||
}
|
}
|
||||||
} catch(Exception e) {
|
} catch(Exception e) {
|
||||||
s_logger.debug("configure provider failed", e);
|
s_logger.debug("configure provider failed", e);
|
||||||
providerMap.remove(uuid);
|
providerMap.remove(providerName);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
|
||||||
public DataStoreProvider getDataStoreProviderById(long id) {
|
|
||||||
DataStoreProviderVO provider = providerDao.findById(id);
|
|
||||||
return providerMap.get(provider.getUuid());
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public DataStoreProvider getDefaultPrimaryDataStoreProvider() {
|
public DataStoreProvider getDefaultPrimaryDataStoreProvider() {
|
||||||
return this.getDataStoreProvider("ancient primary data store provider");
|
return this.getDataStoreProvider("ancient primary data store provider");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public List<StorageProviderResponse> getDataStoreProviders(String type) {
|
||||||
|
if (type == null) {
|
||||||
|
throw new InvalidParameterValueException("Invalid parameter, need to specify type: either primary or image");
|
||||||
|
}
|
||||||
|
if (type.equalsIgnoreCase(DataStoreProvider.DataStoreProviderType.PRIMARY.toString())) {
|
||||||
|
return this.getPrimayrDataStoreProviders();
|
||||||
|
} else if (type.equalsIgnoreCase(DataStoreProvider.DataStoreProviderType.IMAGE.toString())) {
|
||||||
|
return this.getImageDataStoreProviders();
|
||||||
|
} else {
|
||||||
|
throw new InvalidParameterValueException("Invalid parameter: " + type);
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@ -1,23 +0,0 @@
|
|||||||
// Licensed to the Apache Software Foundation (ASF) under one
|
|
||||||
// or more contributor license agreements. See the NOTICE file
|
|
||||||
// distributed with this work for additional information
|
|
||||||
// regarding copyright ownership. The ASF licenses this file
|
|
||||||
// to you under the Apache License, Version 2.0 (the
|
|
||||||
// "License"); you may not use this file except in compliance
|
|
||||||
// with the License. You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing,
|
|
||||||
// software distributed under the License is distributed on an
|
|
||||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
|
||||||
// KIND, either express or implied. See the License for the
|
|
||||||
// specific language governing permissions and limitations
|
|
||||||
// under the License.
|
|
||||||
package org.apache.cloudstack.storage.datastore.provider;
|
|
||||||
|
|
||||||
import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreProvider;
|
|
||||||
|
|
||||||
|
|
||||||
public interface PrimaryDataStoreProvider extends DataStoreProvider {
|
|
||||||
}
|
|
||||||
@ -34,14 +34,14 @@ public class ImageDataStoreHelper {
|
|||||||
@Inject
|
@Inject
|
||||||
ImageDataStoreDao imageStoreDao;
|
ImageDataStoreDao imageStoreDao;
|
||||||
public ImageDataStoreVO createImageDataStore(Map<String, Object> params) {
|
public ImageDataStoreVO createImageDataStore(Map<String, Object> params) {
|
||||||
ImageDataStoreVO store = imageStoreDao.findByUuid((String)params.get("uuid"));
|
ImageDataStoreVO store = imageStoreDao.findByName((String)params.get("name"));
|
||||||
if (store != null) {
|
if (store != null) {
|
||||||
return store;
|
return store;
|
||||||
}
|
}
|
||||||
store = new ImageDataStoreVO();
|
store = new ImageDataStoreVO();
|
||||||
store.setName((String)params.get("name"));
|
store.setName((String)params.get("name"));
|
||||||
store.setProtocol((String)params.get("protocol"));
|
store.setProtocol((String)params.get("protocol"));
|
||||||
store.setProvider((Long)params.get("provider"));
|
store.setProviderName((String)params.get("providerName"));
|
||||||
store.setScope((ScopeType)params.get("scope"));
|
store.setScope((ScopeType)params.get("scope"));
|
||||||
store.setUuid((String)params.get("uuid"));
|
store.setUuid((String)params.get("uuid"));
|
||||||
store = imageStoreDao.persist(store);
|
store = imageStoreDao.persist(store);
|
||||||
|
|||||||
@ -45,8 +45,8 @@ public class ImageDataStoreVO {
|
|||||||
@Column(name = "protocol", nullable = false)
|
@Column(name = "protocol", nullable = false)
|
||||||
private String protocol;
|
private String protocol;
|
||||||
|
|
||||||
@Column(name = "image_provider_id", nullable = false)
|
@Column(name = "image_provider_name", nullable = false)
|
||||||
private long provider;
|
private String providerName;
|
||||||
|
|
||||||
@Column(name = "data_center_id")
|
@Column(name = "data_center_id")
|
||||||
private long dcId;
|
private long dcId;
|
||||||
@ -64,16 +64,16 @@ public class ImageDataStoreVO {
|
|||||||
return this.name;
|
return this.name;
|
||||||
}
|
}
|
||||||
|
|
||||||
public long getProvider() {
|
public String getProviderName() {
|
||||||
return this.provider;
|
return this.providerName;
|
||||||
}
|
}
|
||||||
|
|
||||||
public void setName(String name) {
|
public void setName(String name) {
|
||||||
this.name = name;
|
this.name = name;
|
||||||
}
|
}
|
||||||
|
|
||||||
public void setProvider(long provider) {
|
public void setProviderName(String provider) {
|
||||||
this.provider = provider;
|
this.providerName = provider;
|
||||||
}
|
}
|
||||||
|
|
||||||
public void setProtocol(String protocol) {
|
public void setProtocol(String protocol) {
|
||||||
|
|||||||
@ -18,57 +18,181 @@
|
|||||||
*/
|
*/
|
||||||
package org.apache.cloudstack.storage.volume.datastore;
|
package org.apache.cloudstack.storage.volume.datastore;
|
||||||
|
|
||||||
|
import java.util.List;
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
|
|
||||||
import javax.inject.Inject;
|
import javax.inject.Inject;
|
||||||
|
|
||||||
import org.apache.cloudstack.engine.subsystem.api.storage.DataStore;
|
import org.apache.cloudstack.engine.subsystem.api.storage.DataStore;
|
||||||
|
import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager;
|
||||||
|
import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreRole;
|
||||||
|
import org.apache.cloudstack.engine.subsystem.api.storage.HostScope;
|
||||||
|
import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreParameters;
|
||||||
|
import org.apache.cloudstack.engine.subsystem.api.storage.ScopeType;
|
||||||
import org.apache.cloudstack.storage.command.AttachPrimaryDataStoreCmd;
|
import org.apache.cloudstack.storage.command.AttachPrimaryDataStoreCmd;
|
||||||
import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao;
|
import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao;
|
||||||
import org.apache.cloudstack.storage.datastore.db.StoragePoolVO;
|
import org.apache.cloudstack.storage.datastore.db.StoragePoolVO;
|
||||||
|
import org.apache.log4j.Logger;
|
||||||
import org.springframework.stereotype.Component;
|
import org.springframework.stereotype.Component;
|
||||||
|
|
||||||
import com.cloud.storage.Storage.StoragePoolType;
|
import com.cloud.agent.api.Answer;
|
||||||
|
import com.cloud.agent.api.DeleteStoragePoolCommand;
|
||||||
|
import com.cloud.agent.api.StoragePoolInfo;
|
||||||
|
import com.cloud.alert.AlertManager;
|
||||||
|
import com.cloud.capacity.Capacity;
|
||||||
|
import com.cloud.capacity.CapacityVO;
|
||||||
|
import com.cloud.capacity.dao.CapacityDao;
|
||||||
|
import com.cloud.storage.StorageManager;
|
||||||
|
import com.cloud.storage.StoragePool;
|
||||||
|
import com.cloud.storage.StoragePoolHostVO;
|
||||||
|
import com.cloud.storage.StoragePoolStatus;
|
||||||
|
import com.cloud.storage.dao.StoragePoolHostDao;
|
||||||
|
import com.cloud.utils.db.DB;
|
||||||
|
import com.cloud.utils.db.Transaction;
|
||||||
import com.cloud.utils.exception.CloudRuntimeException;
|
import com.cloud.utils.exception.CloudRuntimeException;
|
||||||
|
|
||||||
@Component
|
@Component
|
||||||
public class PrimaryDataStoreHelper {
|
public class PrimaryDataStoreHelper {
|
||||||
|
private static final Logger s_logger = Logger
|
||||||
|
.getLogger(PrimaryDataStoreHelper.class);
|
||||||
@Inject
|
@Inject
|
||||||
private PrimaryDataStoreDao dataStoreDao;
|
private PrimaryDataStoreDao dataStoreDao;
|
||||||
public StoragePoolVO createPrimaryDataStore(Map<String, Object> params) {
|
@Inject
|
||||||
StoragePoolVO dataStoreVO = dataStoreDao.findPoolByUUID((String)params.get("uuid"));
|
DataStoreManager dataStoreMgr;
|
||||||
|
@Inject
|
||||||
|
StorageManager storageMgr;
|
||||||
|
@Inject
|
||||||
|
protected CapacityDao _capacityDao;
|
||||||
|
@Inject
|
||||||
|
protected StoragePoolHostDao storagePoolHostDao;
|
||||||
|
public DataStore createPrimaryDataStore(PrimaryDataStoreParameters params) {
|
||||||
|
StoragePoolVO dataStoreVO = dataStoreDao.findPoolByUUID(params.getUuid());
|
||||||
if (dataStoreVO != null) {
|
if (dataStoreVO != null) {
|
||||||
throw new CloudRuntimeException("duplicate uuid: " + params.get("uuid"));
|
throw new CloudRuntimeException("duplicate uuid: " + params.getUuid());
|
||||||
}
|
}
|
||||||
|
|
||||||
dataStoreVO = new StoragePoolVO();
|
dataStoreVO = new StoragePoolVO();
|
||||||
dataStoreVO.setStorageProviderId(Long.parseLong((String)params.get("providerId")));
|
dataStoreVO.setStorageProviderName(params.getProviderName());
|
||||||
dataStoreVO.setHostAddress((String)params.get("server"));
|
dataStoreVO.setHostAddress(params.getHost());
|
||||||
dataStoreVO.setPath((String)params.get("path"));
|
dataStoreVO.setPath(params.getPath());
|
||||||
dataStoreVO.setPoolType((StoragePoolType)params.get("protocol"));
|
dataStoreVO.setPoolType(params.getType());
|
||||||
dataStoreVO.setPort(Integer.parseInt((String)params.get("port")));
|
dataStoreVO.setPort(params.getPort());
|
||||||
dataStoreVO.setName((String)params.get("name"));
|
dataStoreVO.setName(params.getName());
|
||||||
dataStoreVO.setUuid((String)params.get("uuid"));
|
dataStoreVO.setUuid(params.getUuid());
|
||||||
dataStoreVO = dataStoreDao.persist(dataStoreVO);
|
dataStoreVO.setDataCenterId(params.getZoneId());
|
||||||
return dataStoreVO;
|
dataStoreVO.setPodId(params.getPodId());
|
||||||
|
dataStoreVO.setClusterId(params.getClusterId());
|
||||||
|
dataStoreVO.setStatus(StoragePoolStatus.Initialized);
|
||||||
|
dataStoreVO.setUserInfo(params.getUserInfo());
|
||||||
|
|
||||||
|
Map<String, String> details = params.getDetails();
|
||||||
|
String tags = params.getTags();
|
||||||
|
if (tags != null) {
|
||||||
|
String[] tokens = tags.split(",");
|
||||||
|
|
||||||
|
for (String tag : tokens) {
|
||||||
|
tag = tag.trim();
|
||||||
|
if (tag.length() == 0) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
details.put(tag, "true");
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
public boolean deletePrimaryDataStore(long id) {
|
dataStoreVO = dataStoreDao.persist(dataStoreVO, details);
|
||||||
StoragePoolVO dataStoreVO = dataStoreDao.findById(id);
|
|
||||||
if (dataStoreVO == null) {
|
return dataStoreMgr.getDataStore(dataStoreVO.getId(), DataStoreRole.Primary);
|
||||||
throw new CloudRuntimeException("can't find store: " + id);
|
|
||||||
}
|
}
|
||||||
dataStoreDao.remove(id);
|
|
||||||
|
public DataStore attachHost(DataStore store, HostScope scope, StoragePoolInfo existingInfo) {
|
||||||
|
StoragePoolHostVO poolHost = storagePoolHostDao.findByPoolHost(store.getId(), scope.getScopeId());
|
||||||
|
if (poolHost == null) {
|
||||||
|
poolHost = new StoragePoolHostVO(store.getId(), scope.getScopeId(), existingInfo.getLocalPath());
|
||||||
|
storagePoolHostDao.persist(poolHost);
|
||||||
|
}
|
||||||
|
|
||||||
|
StoragePoolVO pool = this.dataStoreDao.findById(store.getId());
|
||||||
|
pool.setScope(scope.getScopeType());
|
||||||
|
pool.setAvailableBytes(existingInfo.getAvailableBytes());
|
||||||
|
pool.setCapacityBytes(existingInfo.getCapacityBytes());
|
||||||
|
pool.setStatus(StoragePoolStatus.Up);
|
||||||
|
this.dataStoreDao.update(pool.getId(), pool);
|
||||||
|
this.storageMgr.createCapacityEntry(pool, Capacity.CAPACITY_TYPE_LOCAL_STORAGE, pool.getCapacityBytes() - pool.getAvailableBytes());
|
||||||
|
return dataStoreMgr.getDataStore(pool.getId(), DataStoreRole.Primary);
|
||||||
|
}
|
||||||
|
|
||||||
|
public DataStore attachCluster(DataStore store) {
|
||||||
|
StoragePoolVO pool = this.dataStoreDao.findById(store.getId());
|
||||||
|
|
||||||
|
storageMgr.createCapacityEntry(pool.getId());
|
||||||
|
|
||||||
|
pool.setScope(ScopeType.CLUSTER);
|
||||||
|
pool.setStatus(StoragePoolStatus.Up);
|
||||||
|
this.dataStoreDao.update(pool.getId(), pool);
|
||||||
|
return dataStoreMgr.getDataStore(store.getId(), DataStoreRole.Primary);
|
||||||
|
}
|
||||||
|
|
||||||
|
public DataStore attachZone(DataStore store) {
|
||||||
|
StoragePoolVO pool = this.dataStoreDao.findById(store.getId());
|
||||||
|
pool.setScope(ScopeType.ZONE);
|
||||||
|
pool.setStatus(StoragePoolStatus.Up);
|
||||||
|
this.dataStoreDao.update(pool.getId(), pool);
|
||||||
|
return dataStoreMgr.getDataStore(store.getId(), DataStoreRole.Primary);
|
||||||
|
}
|
||||||
|
|
||||||
|
public boolean maintain(DataStore store) {
|
||||||
|
StoragePoolVO pool = this.dataStoreDao.findById(store.getId());
|
||||||
|
pool.setStatus(StoragePoolStatus.Maintenance);
|
||||||
|
this.dataStoreDao.update(pool.getId(), pool);
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
public void attachCluster(DataStore dataStore) {
|
public boolean cancelMaintain(DataStore store) {
|
||||||
//send down AttachPrimaryDataStoreCmd command to all the hosts in the cluster
|
StoragePoolVO pool = this.dataStoreDao.findById(store.getId());
|
||||||
AttachPrimaryDataStoreCmd cmd = new AttachPrimaryDataStoreCmd(dataStore.getUri());
|
pool.setStatus(StoragePoolStatus.Up);
|
||||||
/*for (EndPoint ep : dataStore.getEndPoints()) {
|
dataStoreDao.update(store.getId(), pool);
|
||||||
ep.sendMessage(cmd);
|
return true;
|
||||||
} */
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
protected boolean deletePoolStats(Long poolId) {
|
||||||
|
CapacityVO capacity1 = _capacityDao.findByHostIdType(poolId,
|
||||||
|
CapacityVO.CAPACITY_TYPE_STORAGE);
|
||||||
|
CapacityVO capacity2 = _capacityDao.findByHostIdType(poolId,
|
||||||
|
CapacityVO.CAPACITY_TYPE_STORAGE_ALLOCATED);
|
||||||
|
if (capacity1 != null) {
|
||||||
|
_capacityDao.remove(capacity1.getId());
|
||||||
|
}
|
||||||
|
|
||||||
|
if (capacity2 != null) {
|
||||||
|
_capacityDao.remove(capacity2.getId());
|
||||||
|
}
|
||||||
|
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
public boolean deletePrimaryDataStore(DataStore store) {
|
||||||
|
List<StoragePoolHostVO> hostPoolRecords = this.storagePoolHostDao
|
||||||
|
.listByPoolId(store.getId());
|
||||||
|
StoragePoolVO poolVO = this.dataStoreDao.findById(store.getId());
|
||||||
|
Transaction txn = Transaction.currentTxn();
|
||||||
|
txn.start();
|
||||||
|
for (StoragePoolHostVO host : hostPoolRecords) {
|
||||||
|
storagePoolHostDao.deleteStoragePoolHostDetails(
|
||||||
|
host.getHostId(), host.getPoolId());
|
||||||
|
}
|
||||||
|
poolVO.setUuid(null);
|
||||||
|
this.dataStoreDao.update(poolVO.getId(), poolVO);
|
||||||
|
dataStoreDao.remove(poolVO.getId());
|
||||||
|
deletePoolStats(poolVO.getId());
|
||||||
|
// Delete op_host_capacity entries
|
||||||
|
this._capacityDao.removeBy(Capacity.CAPACITY_TYPE_STORAGE_ALLOCATED,
|
||||||
|
null, null, null, poolVO.getId());
|
||||||
|
txn.commit();
|
||||||
|
|
||||||
|
s_logger.debug("Storage pool id=" + poolVO.getId()
|
||||||
|
+ " is removed successfully");
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|||||||
@ -331,13 +331,13 @@ public class DefaultPrimaryDataStore implements PrimaryDataStore {
|
|||||||
return this.pdsv.getPodId();
|
return this.pdsv.getPodId();
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
|
||||||
public Long getStorageProviderId() {
|
|
||||||
return this.pdsv.getStorageProviderId();
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public boolean isInMaintenance() {
|
public boolean isInMaintenance() {
|
||||||
return this.getStatus() == StoragePoolStatus.Maintenance ? true : false;
|
return this.getStatus() == StoragePoolStatus.Maintenance ? true : false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String getStorageProviderName() {
|
||||||
|
return this.pdsv.getStorageProviderName();
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@ -1,963 +0,0 @@
|
|||||||
/*
|
|
||||||
* Licensed to the Apache Software Foundation (ASF) under one
|
|
||||||
* or more contributor license agreements. See the NOTICE file
|
|
||||||
* distributed with this work for additional information
|
|
||||||
* regarding copyright ownership. The ASF licenses this file
|
|
||||||
* to you under the Apache License, Version 2.0 (the
|
|
||||||
* "License"); you may not use this file except in compliance
|
|
||||||
* with the License. You may obtain a copy of the License at
|
|
||||||
*
|
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
*
|
|
||||||
* Unless required by applicable law or agreed to in writing,
|
|
||||||
* software distributed under the License is distributed on an
|
|
||||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
|
||||||
* KIND, either express or implied. See the License for the
|
|
||||||
* specific language governing permissions and limitations
|
|
||||||
* under the License.
|
|
||||||
*/
|
|
||||||
package org.apache.cloudstack.storage.datastore.lifecycle;
|
|
||||||
|
|
||||||
import java.net.URI;
|
|
||||||
import java.net.URISyntaxException;
|
|
||||||
import java.util.ArrayList;
|
|
||||||
import java.util.List;
|
|
||||||
import java.util.Map;
|
|
||||||
import java.util.UUID;
|
|
||||||
|
|
||||||
import javax.inject.Inject;
|
|
||||||
|
|
||||||
import org.apache.cloudstack.engine.subsystem.api.storage.ClusterScope;
|
|
||||||
import org.apache.cloudstack.engine.subsystem.api.storage.DataStore;
|
|
||||||
import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager;
|
|
||||||
import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreRole;
|
|
||||||
import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreStatus;
|
|
||||||
import org.apache.cloudstack.engine.subsystem.api.storage.HostScope;
|
|
||||||
import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreInfo;
|
|
||||||
import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreLifeCycle;
|
|
||||||
import org.apache.cloudstack.engine.subsystem.api.storage.ScopeType;
|
|
||||||
import org.apache.cloudstack.engine.subsystem.api.storage.ZoneScope;
|
|
||||||
import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao;
|
|
||||||
import org.apache.cloudstack.storage.datastore.db.StoragePoolVO;
|
|
||||||
import org.apache.log4j.Logger;
|
|
||||||
|
|
||||||
import com.cloud.agent.AgentManager;
|
|
||||||
import com.cloud.agent.api.Answer;
|
|
||||||
import com.cloud.agent.api.CreateStoragePoolCommand;
|
|
||||||
import com.cloud.agent.api.DeleteStoragePoolCommand;
|
|
||||||
import com.cloud.agent.api.ModifyStoragePoolCommand;
|
|
||||||
import com.cloud.agent.api.StoragePoolInfo;
|
|
||||||
import com.cloud.alert.AlertManager;
|
|
||||||
import com.cloud.capacity.Capacity;
|
|
||||||
import com.cloud.capacity.CapacityVO;
|
|
||||||
import com.cloud.capacity.dao.CapacityDao;
|
|
||||||
import com.cloud.exception.DiscoveryException;
|
|
||||||
import com.cloud.exception.InvalidParameterValueException;
|
|
||||||
import com.cloud.host.Host;
|
|
||||||
import com.cloud.host.HostVO;
|
|
||||||
import com.cloud.host.Status;
|
|
||||||
import com.cloud.hypervisor.Hypervisor.HypervisorType;
|
|
||||||
import com.cloud.resource.ResourceManager;
|
|
||||||
import com.cloud.server.ManagementServer;
|
|
||||||
import com.cloud.storage.OCFS2Manager;
|
|
||||||
import com.cloud.storage.Storage.StoragePoolType;
|
|
||||||
import com.cloud.storage.StorageManager;
|
|
||||||
import com.cloud.storage.StoragePool;
|
|
||||||
import com.cloud.storage.StoragePoolDiscoverer;
|
|
||||||
import com.cloud.storage.StoragePoolHostVO;
|
|
||||||
import com.cloud.storage.StoragePoolStatus;
|
|
||||||
import com.cloud.storage.StoragePoolWorkVO;
|
|
||||||
import com.cloud.storage.VolumeVO;
|
|
||||||
import com.cloud.storage.dao.StoragePoolHostDao;
|
|
||||||
import com.cloud.storage.dao.StoragePoolWorkDao;
|
|
||||||
import com.cloud.storage.dao.VolumeDao;
|
|
||||||
import com.cloud.user.Account;
|
|
||||||
import com.cloud.user.User;
|
|
||||||
import com.cloud.user.UserContext;
|
|
||||||
import com.cloud.user.dao.UserDao;
|
|
||||||
import com.cloud.utils.NumbersUtil;
|
|
||||||
import com.cloud.utils.UriUtils;
|
|
||||||
import com.cloud.utils.db.DB;
|
|
||||||
import com.cloud.utils.db.Transaction;
|
|
||||||
import com.cloud.utils.exception.CloudRuntimeException;
|
|
||||||
import com.cloud.utils.exception.ExecutionException;
|
|
||||||
import com.cloud.vm.ConsoleProxyVO;
|
|
||||||
import com.cloud.vm.DomainRouterVO;
|
|
||||||
import com.cloud.vm.SecondaryStorageVmVO;
|
|
||||||
import com.cloud.vm.UserVmVO;
|
|
||||||
import com.cloud.vm.VMInstanceVO;
|
|
||||||
import com.cloud.vm.VirtualMachine;
|
|
||||||
import com.cloud.vm.VirtualMachine.State;
|
|
||||||
import com.cloud.vm.VirtualMachineManager;
|
|
||||||
import com.cloud.vm.dao.ConsoleProxyDao;
|
|
||||||
import com.cloud.vm.dao.DomainRouterDao;
|
|
||||||
import com.cloud.vm.dao.SecondaryStorageVmDao;
|
|
||||||
import com.cloud.vm.dao.UserVmDao;
|
|
||||||
import com.cloud.vm.dao.VMInstanceDao;
|
|
||||||
|
|
||||||
public class AncientPrimaryDataStoreLifeCycleImpl implements
|
|
||||||
PrimaryDataStoreLifeCycle {
|
|
||||||
private static final Logger s_logger = Logger
|
|
||||||
.getLogger(AncientPrimaryDataStoreLifeCycleImpl.class);
|
|
||||||
@Inject
|
|
||||||
protected ResourceManager _resourceMgr;
|
|
||||||
protected List<StoragePoolDiscoverer> _discoverers;
|
|
||||||
@Inject
|
|
||||||
PrimaryDataStoreDao primaryDataStoreDao;
|
|
||||||
@Inject
|
|
||||||
protected OCFS2Manager _ocfs2Mgr;
|
|
||||||
@Inject
|
|
||||||
DataStoreManager dataStoreMgr;
|
|
||||||
@Inject
|
|
||||||
AgentManager agentMgr;
|
|
||||||
@Inject
|
|
||||||
StorageManager storageMgr;
|
|
||||||
@Inject
|
|
||||||
protected CapacityDao _capacityDao;
|
|
||||||
|
|
||||||
@Inject
|
|
||||||
VolumeDao volumeDao;
|
|
||||||
@Inject
|
|
||||||
VMInstanceDao vmDao;
|
|
||||||
@Inject
|
|
||||||
ManagementServer server;
|
|
||||||
@Inject
|
|
||||||
protected VirtualMachineManager vmMgr;
|
|
||||||
@Inject
|
|
||||||
protected SecondaryStorageVmDao _secStrgDao;
|
|
||||||
@Inject
|
|
||||||
UserVmDao userVmDao;
|
|
||||||
@Inject
|
|
||||||
protected UserDao _userDao;
|
|
||||||
@Inject
|
|
||||||
protected DomainRouterDao _domrDao;
|
|
||||||
@Inject
|
|
||||||
protected StoragePoolHostDao _storagePoolHostDao;
|
|
||||||
@Inject
|
|
||||||
protected AlertManager _alertMgr;
|
|
||||||
@Inject
|
|
||||||
protected ConsoleProxyDao _consoleProxyDao;
|
|
||||||
|
|
||||||
@Inject
|
|
||||||
protected StoragePoolWorkDao _storagePoolWorkDao;
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public DataStore initialize(Map<String, Object> dsInfos) {
|
|
||||||
Long clusterId = (Long) dsInfos.get("clusterId");
|
|
||||||
Long podId = (Long) dsInfos.get("podId");
|
|
||||||
Long zoneId = (Long) dsInfos.get("zoneId");
|
|
||||||
String url = (String) dsInfos.get("url");
|
|
||||||
Long providerId = (Long)dsInfos.get("providerId");
|
|
||||||
if (clusterId != null && podId == null) {
|
|
||||||
throw new InvalidParameterValueException(
|
|
||||||
"Cluster id requires pod id");
|
|
||||||
}
|
|
||||||
|
|
||||||
URI uri = null;
|
|
||||||
try {
|
|
||||||
uri = new URI(UriUtils.encodeURIComponent(url));
|
|
||||||
if (uri.getScheme() == null) {
|
|
||||||
throw new InvalidParameterValueException("scheme is null "
|
|
||||||
+ url + ", add nfs:// as a prefix");
|
|
||||||
} else if (uri.getScheme().equalsIgnoreCase("nfs")) {
|
|
||||||
String uriHost = uri.getHost();
|
|
||||||
String uriPath = uri.getPath();
|
|
||||||
if (uriHost == null || uriPath == null
|
|
||||||
|| uriHost.trim().isEmpty() || uriPath.trim().isEmpty()) {
|
|
||||||
throw new InvalidParameterValueException(
|
|
||||||
"host or path is null, should be nfs://hostname/path");
|
|
||||||
}
|
|
||||||
} else if (uri.getScheme().equalsIgnoreCase("sharedMountPoint")) {
|
|
||||||
String uriPath = uri.getPath();
|
|
||||||
if (uriPath == null) {
|
|
||||||
throw new InvalidParameterValueException(
|
|
||||||
"host or path is null, should be sharedmountpoint://localhost/path");
|
|
||||||
}
|
|
||||||
} else if (uri.getScheme().equalsIgnoreCase("rbd")) {
|
|
||||||
String uriPath = uri.getPath();
|
|
||||||
if (uriPath == null) {
|
|
||||||
throw new InvalidParameterValueException(
|
|
||||||
"host or path is null, should be rbd://hostname/pool");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} catch (URISyntaxException e) {
|
|
||||||
throw new InvalidParameterValueException(url
|
|
||||||
+ " is not a valid uri");
|
|
||||||
}
|
|
||||||
|
|
||||||
String tags = (String) dsInfos.get("tags");
|
|
||||||
Map<String, String> details = (Map<String, String>) dsInfos
|
|
||||||
.get("details");
|
|
||||||
if (tags != null) {
|
|
||||||
String[] tokens = tags.split(",");
|
|
||||||
|
|
||||||
for (String tag : tokens) {
|
|
||||||
tag = tag.trim();
|
|
||||||
if (tag.length() == 0) {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
details.put(tag, "true");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
String scheme = uri.getScheme();
|
|
||||||
String storageHost = uri.getHost();
|
|
||||||
String hostPath = uri.getPath();
|
|
||||||
Object localStorage = dsInfos.get("localStorage");
|
|
||||||
if (localStorage != null) {
|
|
||||||
hostPath = hostPath.replace("/", "");
|
|
||||||
}
|
|
||||||
String userInfo = uri.getUserInfo();
|
|
||||||
int port = uri.getPort();
|
|
||||||
StoragePoolVO pool = null;
|
|
||||||
if (s_logger.isDebugEnabled()) {
|
|
||||||
s_logger.debug("createPool Params @ scheme - " + scheme
|
|
||||||
+ " storageHost - " + storageHost + " hostPath - "
|
|
||||||
+ hostPath + " port - " + port);
|
|
||||||
}
|
|
||||||
if (scheme.equalsIgnoreCase("nfs")) {
|
|
||||||
if (port == -1) {
|
|
||||||
port = 2049;
|
|
||||||
}
|
|
||||||
pool = new StoragePoolVO(StoragePoolType.NetworkFilesystem,
|
|
||||||
storageHost, port, hostPath);
|
|
||||||
} else if (scheme.equalsIgnoreCase("file")) {
|
|
||||||
if (port == -1) {
|
|
||||||
port = 0;
|
|
||||||
}
|
|
||||||
pool = new StoragePoolVO(StoragePoolType.Filesystem,
|
|
||||||
"localhost", 0, hostPath);
|
|
||||||
} else if (scheme.equalsIgnoreCase("sharedMountPoint")) {
|
|
||||||
pool = new StoragePoolVO(StoragePoolType.SharedMountPoint,
|
|
||||||
storageHost, 0, hostPath);
|
|
||||||
} else if (scheme.equalsIgnoreCase("clvm")) {
|
|
||||||
pool = new StoragePoolVO(StoragePoolType.CLVM, storageHost, 0,
|
|
||||||
hostPath.replaceFirst("/", ""));
|
|
||||||
} else if (scheme.equalsIgnoreCase("rbd")) {
|
|
||||||
if (port == -1) {
|
|
||||||
port = 6789;
|
|
||||||
}
|
|
||||||
pool = new StoragePoolVO(StoragePoolType.RBD, storageHost,
|
|
||||||
port, hostPath.replaceFirst("/", ""));
|
|
||||||
pool.setUserInfo(userInfo);
|
|
||||||
} else if (scheme.equalsIgnoreCase("PreSetup")) {
|
|
||||||
pool = new StoragePoolVO(StoragePoolType.PreSetup,
|
|
||||||
storageHost, 0, hostPath);
|
|
||||||
} else if (scheme.equalsIgnoreCase("iscsi")) {
|
|
||||||
String[] tokens = hostPath.split("/");
|
|
||||||
int lun = NumbersUtil.parseInt(tokens[tokens.length - 1], -1);
|
|
||||||
if (port == -1) {
|
|
||||||
port = 3260;
|
|
||||||
}
|
|
||||||
if (lun != -1) {
|
|
||||||
if (clusterId == null) {
|
|
||||||
throw new IllegalArgumentException(
|
|
||||||
"IscsiLUN need to have clusters specified");
|
|
||||||
}
|
|
||||||
hostPath.replaceFirst("/", "");
|
|
||||||
pool = new StoragePoolVO(StoragePoolType.IscsiLUN,
|
|
||||||
storageHost, port, hostPath);
|
|
||||||
} else {
|
|
||||||
for (StoragePoolDiscoverer discoverer : _discoverers) {
|
|
||||||
Map<StoragePoolVO, Map<String, String>> pools;
|
|
||||||
try {
|
|
||||||
pools = discoverer.find(zoneId, podId, uri, details);
|
|
||||||
} catch (DiscoveryException e) {
|
|
||||||
throw new IllegalArgumentException(
|
|
||||||
"Not enough information for discovery " + uri,
|
|
||||||
e);
|
|
||||||
}
|
|
||||||
if (pools != null) {
|
|
||||||
Map.Entry<StoragePoolVO, Map<String, String>> entry = pools
|
|
||||||
.entrySet().iterator().next();
|
|
||||||
pool = entry.getKey();
|
|
||||||
details = entry.getValue();
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} else if (scheme.equalsIgnoreCase("iso")) {
|
|
||||||
if (port == -1) {
|
|
||||||
port = 2049;
|
|
||||||
}
|
|
||||||
pool = new StoragePoolVO(StoragePoolType.ISO, storageHost,
|
|
||||||
port, hostPath);
|
|
||||||
} else if (scheme.equalsIgnoreCase("vmfs")) {
|
|
||||||
pool = new StoragePoolVO(StoragePoolType.VMFS,
|
|
||||||
"VMFS datastore: " + hostPath, 0, hostPath);
|
|
||||||
} else if (scheme.equalsIgnoreCase("ocfs2")) {
|
|
||||||
port = 7777;
|
|
||||||
pool = new StoragePoolVO(StoragePoolType.OCFS2, "clustered",
|
|
||||||
port, hostPath);
|
|
||||||
} else {
|
|
||||||
StoragePoolType type = Enum.valueOf(StoragePoolType.class, scheme);
|
|
||||||
|
|
||||||
if (type != null) {
|
|
||||||
pool = new StoragePoolVO(type, storageHost,
|
|
||||||
0, hostPath);
|
|
||||||
} else {
|
|
||||||
s_logger.warn("Unable to figure out the scheme for URI: " + uri);
|
|
||||||
throw new IllegalArgumentException(
|
|
||||||
"Unable to figure out the scheme for URI: " + uri);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if (pool == null) {
|
|
||||||
s_logger.warn("Unable to figure out the scheme for URI: " + uri);
|
|
||||||
throw new IllegalArgumentException(
|
|
||||||
"Unable to figure out the scheme for URI: " + uri);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (localStorage == null) {
|
|
||||||
List<StoragePoolVO> pools = primaryDataStoreDao
|
|
||||||
.listPoolByHostPath(storageHost, hostPath);
|
|
||||||
if (!pools.isEmpty() && !scheme.equalsIgnoreCase("sharedmountpoint")) {
|
|
||||||
Long oldPodId = pools.get(0).getPodId();
|
|
||||||
throw new CloudRuntimeException("Storage pool " + uri
|
|
||||||
+ " already in use by another pod (id=" + oldPodId + ")");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
long poolId = primaryDataStoreDao.getNextInSequence(Long.class, "id");
|
|
||||||
Object existingUuid = dsInfos.get("uuid");
|
|
||||||
String uuid = null;
|
|
||||||
|
|
||||||
if (existingUuid != null) {
|
|
||||||
uuid = (String)existingUuid;
|
|
||||||
} else if (scheme.equalsIgnoreCase("sharedmountpoint")
|
|
||||||
|| scheme.equalsIgnoreCase("clvm")) {
|
|
||||||
uuid = UUID.randomUUID().toString();
|
|
||||||
} else if (scheme.equalsIgnoreCase("PreSetup")) {
|
|
||||||
uuid = hostPath.replace("/", "");
|
|
||||||
} else {
|
|
||||||
uuid = UUID.nameUUIDFromBytes(
|
|
||||||
new String(storageHost + hostPath).getBytes()).toString();
|
|
||||||
}
|
|
||||||
|
|
||||||
List<StoragePoolVO> spHandles = primaryDataStoreDao
|
|
||||||
.findIfDuplicatePoolsExistByUUID(uuid);
|
|
||||||
if ((spHandles != null) && (spHandles.size() > 0)) {
|
|
||||||
if (s_logger.isDebugEnabled()) {
|
|
||||||
s_logger.debug("Another active pool with the same uuid already exists");
|
|
||||||
}
|
|
||||||
throw new CloudRuntimeException(
|
|
||||||
"Another active pool with the same uuid already exists");
|
|
||||||
}
|
|
||||||
|
|
||||||
String poolName = (String) dsInfos.get("name");
|
|
||||||
if (s_logger.isDebugEnabled()) {
|
|
||||||
s_logger.debug("In createPool Setting poolId - " + poolId
|
|
||||||
+ " uuid - " + uuid + " zoneId - " + zoneId + " podId - "
|
|
||||||
+ podId + " poolName - " + poolName);
|
|
||||||
}
|
|
||||||
|
|
||||||
pool.setId(poolId);
|
|
||||||
pool.setUuid(uuid);
|
|
||||||
pool.setDataCenterId(zoneId);
|
|
||||||
pool.setPodId(podId);
|
|
||||||
pool.setName(poolName);
|
|
||||||
pool.setClusterId(clusterId);
|
|
||||||
pool.setStorageProviderId(providerId);
|
|
||||||
pool.setStatus(StoragePoolStatus.Initialized);
|
|
||||||
pool = primaryDataStoreDao.persist(pool, details);
|
|
||||||
|
|
||||||
return dataStoreMgr.getDataStore(pool.getId(), DataStoreRole.Primary);
|
|
||||||
}
|
|
||||||
|
|
||||||
protected boolean createStoragePool(long hostId, StoragePool pool) {
|
|
||||||
s_logger.debug("creating pool " + pool.getName() + " on host "
|
|
||||||
+ hostId);
|
|
||||||
if (pool.getPoolType() != StoragePoolType.NetworkFilesystem
|
|
||||||
&& pool.getPoolType() != StoragePoolType.Filesystem
|
|
||||||
&& pool.getPoolType() != StoragePoolType.IscsiLUN
|
|
||||||
&& pool.getPoolType() != StoragePoolType.Iscsi
|
|
||||||
&& pool.getPoolType() != StoragePoolType.VMFS
|
|
||||||
&& pool.getPoolType() != StoragePoolType.SharedMountPoint
|
|
||||||
&& pool.getPoolType() != StoragePoolType.PreSetup
|
|
||||||
&& pool.getPoolType() != StoragePoolType.OCFS2
|
|
||||||
&& pool.getPoolType() != StoragePoolType.RBD
|
|
||||||
&& pool.getPoolType() != StoragePoolType.CLVM) {
|
|
||||||
s_logger.warn(" Doesn't support storage pool type "
|
|
||||||
+ pool.getPoolType());
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
CreateStoragePoolCommand cmd = new CreateStoragePoolCommand(true, pool);
|
|
||||||
final Answer answer = agentMgr.easySend(hostId, cmd);
|
|
||||||
if (answer != null && answer.getResult()) {
|
|
||||||
return true;
|
|
||||||
} else {
|
|
||||||
primaryDataStoreDao.expunge(pool.getId());
|
|
||||||
String msg = "";
|
|
||||||
if (answer != null) {
|
|
||||||
msg = "Can not create storage pool through host " + hostId
|
|
||||||
+ " due to " + answer.getDetails();
|
|
||||||
s_logger.warn(msg);
|
|
||||||
} else {
|
|
||||||
msg = "Can not create storage pool through host " + hostId
|
|
||||||
+ " due to CreateStoragePoolCommand returns null";
|
|
||||||
s_logger.warn(msg);
|
|
||||||
}
|
|
||||||
throw new CloudRuntimeException(msg);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public boolean attachCluster(DataStore store, ClusterScope scope) {
|
|
||||||
PrimaryDataStoreInfo primarystore = (PrimaryDataStoreInfo) store;
|
|
||||||
// Check if there is host up in this cluster
|
|
||||||
List<HostVO> allHosts = _resourceMgr.listAllUpAndEnabledHosts(
|
|
||||||
Host.Type.Routing, primarystore.getClusterId(),
|
|
||||||
primarystore.getPodId(), primarystore.getDataCenterId());
|
|
||||||
if (allHosts.isEmpty()) {
|
|
||||||
throw new CloudRuntimeException(
|
|
||||||
"No host up to associate a storage pool with in cluster "
|
|
||||||
+ primarystore.getClusterId());
|
|
||||||
}
|
|
||||||
|
|
||||||
if (primarystore.getPoolType() == StoragePoolType.OCFS2
|
|
||||||
&& !_ocfs2Mgr.prepareNodes(allHosts, primarystore)) {
|
|
||||||
s_logger.warn("Can not create storage pool " + primarystore
|
|
||||||
+ " on cluster " + primarystore.getClusterId());
|
|
||||||
primaryDataStoreDao.expunge(primarystore.getId());
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
boolean success = false;
|
|
||||||
for (HostVO h : allHosts) {
|
|
||||||
success = createStoragePool(h.getId(), primarystore);
|
|
||||||
if (success) {
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
s_logger.debug("In createPool Adding the pool to each of the hosts");
|
|
||||||
List<HostVO> poolHosts = new ArrayList<HostVO>();
|
|
||||||
for (HostVO h : allHosts) {
|
|
||||||
try {
|
|
||||||
this.storageMgr.connectHostToSharedPool(h.getId(),
|
|
||||||
primarystore.getId());
|
|
||||||
poolHosts.add(h);
|
|
||||||
} catch (Exception e) {
|
|
||||||
s_logger.warn("Unable to establish a connection between " + h
|
|
||||||
+ " and " + primarystore, e);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if (poolHosts.isEmpty()) {
|
|
||||||
s_logger.warn("No host can access storage pool " + primarystore
|
|
||||||
+ " on cluster " + primarystore.getClusterId());
|
|
||||||
primaryDataStoreDao.expunge(primarystore.getId());
|
|
||||||
return false;
|
|
||||||
} else {
|
|
||||||
storageMgr.createCapacityEntry(primarystore.getId());
|
|
||||||
}
|
|
||||||
StoragePoolVO pool = this.primaryDataStoreDao.findById(store.getId());
|
|
||||||
pool.setScope(ScopeType.CLUSTER);
|
|
||||||
pool.setStatus(StoragePoolStatus.Up);
|
|
||||||
this.primaryDataStoreDao.update(pool.getId(), pool);
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public boolean attachZone(DataStore dataStore, ZoneScope scope) {
|
|
||||||
List<HostVO> hosts = _resourceMgr.listAllUpAndEnabledHostsInOneZoneByHypervisor(HypervisorType.KVM, scope.getScopeId());
|
|
||||||
for (HostVO host : hosts) {
|
|
||||||
try {
|
|
||||||
this.storageMgr.connectHostToSharedPool(host.getId(),
|
|
||||||
dataStore.getId());
|
|
||||||
} catch (Exception e) {
|
|
||||||
s_logger.warn("Unable to establish a connection between " + host
|
|
||||||
+ " and " + dataStore, e);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
StoragePoolVO pool = this.primaryDataStoreDao.findById(dataStore.getId());
|
|
||||||
|
|
||||||
pool.setScope(ScopeType.ZONE);
|
|
||||||
pool.setStatus(StoragePoolStatus.Up);
|
|
||||||
this.primaryDataStoreDao.update(pool.getId(), pool);
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public boolean dettach() {
|
|
||||||
// TODO Auto-generated method stub
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public boolean unmanaged() {
|
|
||||||
// TODO Auto-generated method stub
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public boolean maintain(long storeId) {
|
|
||||||
Long userId = UserContext.current().getCallerUserId();
|
|
||||||
User user = _userDao.findById(userId);
|
|
||||||
Account account = UserContext.current().getCaller();
|
|
||||||
StoragePoolVO pool = this.primaryDataStoreDao.findById(storeId);
|
|
||||||
try {
|
|
||||||
StoragePool storagePool = (StoragePool) this.dataStoreMgr
|
|
||||||
.getDataStore(storeId, DataStoreRole.Primary);
|
|
||||||
List<HostVO> hosts = _resourceMgr.listHostsInClusterByStatus(
|
|
||||||
pool.getClusterId(), Status.Up);
|
|
||||||
if (hosts == null || hosts.size() == 0) {
|
|
||||||
pool.setStatus(StoragePoolStatus.Maintenance);
|
|
||||||
primaryDataStoreDao.update(pool.getId(), pool);
|
|
||||||
return true;
|
|
||||||
} else {
|
|
||||||
// set the pool state to prepare for maintenance
|
|
||||||
pool.setStatus(StoragePoolStatus.PrepareForMaintenance);
|
|
||||||
primaryDataStoreDao.update(pool.getId(), pool);
|
|
||||||
}
|
|
||||||
// remove heartbeat
|
|
||||||
for (HostVO host : hosts) {
|
|
||||||
ModifyStoragePoolCommand cmd = new ModifyStoragePoolCommand(
|
|
||||||
false, storagePool);
|
|
||||||
final Answer answer = agentMgr.easySend(host.getId(), cmd);
|
|
||||||
if (answer == null || !answer.getResult()) {
|
|
||||||
if (s_logger.isDebugEnabled()) {
|
|
||||||
s_logger.debug("ModifyStoragePool false failed due to "
|
|
||||||
+ ((answer == null) ? "answer null" : answer
|
|
||||||
.getDetails()));
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
if (s_logger.isDebugEnabled()) {
|
|
||||||
s_logger.debug("ModifyStoragePool false secceeded");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// check to see if other ps exist
|
|
||||||
// if they do, then we can migrate over the system vms to them
|
|
||||||
// if they dont, then just stop all vms on this one
|
|
||||||
List<StoragePoolVO> upPools = primaryDataStoreDao
|
|
||||||
.listByStatusInZone(pool.getDataCenterId(),
|
|
||||||
StoragePoolStatus.Up);
|
|
||||||
boolean restart = true;
|
|
||||||
if (upPools == null || upPools.size() == 0) {
|
|
||||||
restart = false;
|
|
||||||
}
|
|
||||||
|
|
||||||
// 2. Get a list of all the ROOT volumes within this storage pool
|
|
||||||
List<VolumeVO> allVolumes = this.volumeDao.findByPoolId(pool
|
|
||||||
.getId());
|
|
||||||
|
|
||||||
// 3. Enqueue to the work queue
|
|
||||||
for (VolumeVO volume : allVolumes) {
|
|
||||||
VMInstanceVO vmInstance = vmDao
|
|
||||||
.findById(volume.getInstanceId());
|
|
||||||
|
|
||||||
if (vmInstance == null) {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
// enqueue sp work
|
|
||||||
if (vmInstance.getState().equals(State.Running)
|
|
||||||
|| vmInstance.getState().equals(State.Starting)
|
|
||||||
|| vmInstance.getState().equals(State.Stopping)) {
|
|
||||||
|
|
||||||
try {
|
|
||||||
StoragePoolWorkVO work = new StoragePoolWorkVO(
|
|
||||||
vmInstance.getId(), pool.getId(), false, false,
|
|
||||||
server.getId());
|
|
||||||
_storagePoolWorkDao.persist(work);
|
|
||||||
} catch (Exception e) {
|
|
||||||
if (s_logger.isDebugEnabled()) {
|
|
||||||
s_logger.debug("Work record already exists, re-using by re-setting values");
|
|
||||||
}
|
|
||||||
StoragePoolWorkVO work = _storagePoolWorkDao
|
|
||||||
.findByPoolIdAndVmId(pool.getId(),
|
|
||||||
vmInstance.getId());
|
|
||||||
work.setStartedAfterMaintenance(false);
|
|
||||||
work.setStoppedForMaintenance(false);
|
|
||||||
work.setManagementServerId(server.getId());
|
|
||||||
_storagePoolWorkDao.update(work.getId(), work);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// 4. Process the queue
|
|
||||||
List<StoragePoolWorkVO> pendingWork = _storagePoolWorkDao
|
|
||||||
.listPendingWorkForPrepareForMaintenanceByPoolId(pool
|
|
||||||
.getId());
|
|
||||||
|
|
||||||
for (StoragePoolWorkVO work : pendingWork) {
|
|
||||||
// shut down the running vms
|
|
||||||
VMInstanceVO vmInstance = vmDao.findById(work.getVmId());
|
|
||||||
|
|
||||||
if (vmInstance == null) {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
// if the instance is of type consoleproxy, call the console
|
|
||||||
// proxy
|
|
||||||
if (vmInstance.getType().equals(
|
|
||||||
VirtualMachine.Type.ConsoleProxy)) {
|
|
||||||
// call the consoleproxymanager
|
|
||||||
ConsoleProxyVO consoleProxy = _consoleProxyDao
|
|
||||||
.findById(vmInstance.getId());
|
|
||||||
if (!vmMgr.advanceStop(consoleProxy, true, user, account)) {
|
|
||||||
String errorMsg = "There was an error stopping the console proxy id: "
|
|
||||||
+ vmInstance.getId()
|
|
||||||
+ " ,cannot enable storage maintenance";
|
|
||||||
s_logger.warn(errorMsg);
|
|
||||||
throw new CloudRuntimeException(errorMsg);
|
|
||||||
} else {
|
|
||||||
// update work status
|
|
||||||
work.setStoppedForMaintenance(true);
|
|
||||||
_storagePoolWorkDao.update(work.getId(), work);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (restart) {
|
|
||||||
|
|
||||||
if (this.vmMgr.advanceStart(consoleProxy, null, user,
|
|
||||||
account) == null) {
|
|
||||||
String errorMsg = "There was an error starting the console proxy id: "
|
|
||||||
+ vmInstance.getId()
|
|
||||||
+ " on another storage pool, cannot enable primary storage maintenance";
|
|
||||||
s_logger.warn(errorMsg);
|
|
||||||
} else {
|
|
||||||
// update work status
|
|
||||||
work.setStartedAfterMaintenance(true);
|
|
||||||
_storagePoolWorkDao.update(work.getId(), work);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// if the instance is of type uservm, call the user vm manager
|
|
||||||
if (vmInstance.getType().equals(VirtualMachine.Type.User)) {
|
|
||||||
UserVmVO userVm = userVmDao.findById(vmInstance.getId());
|
|
||||||
if (!vmMgr.advanceStop(userVm, true, user, account)) {
|
|
||||||
String errorMsg = "There was an error stopping the user vm id: "
|
|
||||||
+ vmInstance.getId()
|
|
||||||
+ " ,cannot enable storage maintenance";
|
|
||||||
s_logger.warn(errorMsg);
|
|
||||||
throw new CloudRuntimeException(errorMsg);
|
|
||||||
} else {
|
|
||||||
// update work status
|
|
||||||
work.setStoppedForMaintenance(true);
|
|
||||||
_storagePoolWorkDao.update(work.getId(), work);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// if the instance is of type secondary storage vm, call the
|
|
||||||
// secondary storage vm manager
|
|
||||||
if (vmInstance.getType().equals(
|
|
||||||
VirtualMachine.Type.SecondaryStorageVm)) {
|
|
||||||
SecondaryStorageVmVO secStrgVm = _secStrgDao
|
|
||||||
.findById(vmInstance.getId());
|
|
||||||
if (!vmMgr.advanceStop(secStrgVm, true, user, account)) {
|
|
||||||
String errorMsg = "There was an error stopping the ssvm id: "
|
|
||||||
+ vmInstance.getId()
|
|
||||||
+ " ,cannot enable storage maintenance";
|
|
||||||
s_logger.warn(errorMsg);
|
|
||||||
throw new CloudRuntimeException(errorMsg);
|
|
||||||
} else {
|
|
||||||
// update work status
|
|
||||||
work.setStoppedForMaintenance(true);
|
|
||||||
_storagePoolWorkDao.update(work.getId(), work);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (restart) {
|
|
||||||
if (vmMgr.advanceStart(secStrgVm, null, user, account) == null) {
|
|
||||||
String errorMsg = "There was an error starting the ssvm id: "
|
|
||||||
+ vmInstance.getId()
|
|
||||||
+ " on another storage pool, cannot enable primary storage maintenance";
|
|
||||||
s_logger.warn(errorMsg);
|
|
||||||
} else {
|
|
||||||
// update work status
|
|
||||||
work.setStartedAfterMaintenance(true);
|
|
||||||
_storagePoolWorkDao.update(work.getId(), work);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// if the instance is of type domain router vm, call the network
|
|
||||||
// manager
|
|
||||||
if (vmInstance.getType().equals(
|
|
||||||
VirtualMachine.Type.DomainRouter)) {
|
|
||||||
DomainRouterVO domR = _domrDao.findById(vmInstance.getId());
|
|
||||||
if (!vmMgr.advanceStop(domR, true, user, account)) {
|
|
||||||
String errorMsg = "There was an error stopping the domain router id: "
|
|
||||||
+ vmInstance.getId()
|
|
||||||
+ " ,cannot enable primary storage maintenance";
|
|
||||||
s_logger.warn(errorMsg);
|
|
||||||
throw new CloudRuntimeException(errorMsg);
|
|
||||||
} else {
|
|
||||||
// update work status
|
|
||||||
work.setStoppedForMaintenance(true);
|
|
||||||
_storagePoolWorkDao.update(work.getId(), work);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (restart) {
|
|
||||||
if (vmMgr.advanceStart(domR, null, user, account) == null) {
|
|
||||||
String errorMsg = "There was an error starting the domain router id: "
|
|
||||||
+ vmInstance.getId()
|
|
||||||
+ " on another storage pool, cannot enable primary storage maintenance";
|
|
||||||
s_logger.warn(errorMsg);
|
|
||||||
} else {
|
|
||||||
// update work status
|
|
||||||
work.setStartedAfterMaintenance(true);
|
|
||||||
_storagePoolWorkDao.update(work.getId(), work);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// 5. Update the status
|
|
||||||
pool.setStatus(StoragePoolStatus.Maintenance);
|
|
||||||
this.primaryDataStoreDao.update(pool.getId(), pool);
|
|
||||||
|
|
||||||
return true;
|
|
||||||
} catch (Exception e) {
|
|
||||||
s_logger.error(
|
|
||||||
"Exception in enabling primary storage maintenance:", e);
|
|
||||||
setPoolStateToError(pool);
|
|
||||||
throw new CloudRuntimeException(e.getMessage());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
private void setPoolStateToError(StoragePoolVO primaryStorage) {
|
|
||||||
primaryStorage.setStatus(StoragePoolStatus.ErrorInMaintenance);
|
|
||||||
this.primaryDataStoreDao.update(primaryStorage.getId(), primaryStorage);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public boolean cancelMaintain(long storageId) {
|
|
||||||
// Change the storage state back to up
|
|
||||||
Long userId = UserContext.current().getCallerUserId();
|
|
||||||
User user = _userDao.findById(userId);
|
|
||||||
Account account = UserContext.current().getCaller();
|
|
||||||
StoragePoolVO poolVO = this.primaryDataStoreDao
|
|
||||||
.findById(storageId);
|
|
||||||
StoragePool pool = (StoragePool) this.dataStoreMgr.getDataStore(
|
|
||||||
storageId, DataStoreRole.Primary);
|
|
||||||
poolVO.setStatus(StoragePoolStatus.Up);
|
|
||||||
primaryDataStoreDao.update(storageId, poolVO);
|
|
||||||
|
|
||||||
List<HostVO> hosts = _resourceMgr.listHostsInClusterByStatus(
|
|
||||||
pool.getClusterId(), Status.Up);
|
|
||||||
if (hosts == null || hosts.size() == 0) {
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
// add heartbeat
|
|
||||||
for (HostVO host : hosts) {
|
|
||||||
ModifyStoragePoolCommand msPoolCmd = new ModifyStoragePoolCommand(
|
|
||||||
true, pool);
|
|
||||||
final Answer answer = agentMgr.easySend(host.getId(), msPoolCmd);
|
|
||||||
if (answer == null || !answer.getResult()) {
|
|
||||||
if (s_logger.isDebugEnabled()) {
|
|
||||||
s_logger.debug("ModifyStoragePool add failed due to "
|
|
||||||
+ ((answer == null) ? "answer null" : answer
|
|
||||||
.getDetails()));
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
if (s_logger.isDebugEnabled()) {
|
|
||||||
s_logger.debug("ModifyStoragePool add secceeded");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// 2. Get a list of pending work for this queue
|
|
||||||
List<StoragePoolWorkVO> pendingWork = _storagePoolWorkDao
|
|
||||||
.listPendingWorkForCancelMaintenanceByPoolId(poolVO.getId());
|
|
||||||
|
|
||||||
// 3. work through the queue
|
|
||||||
for (StoragePoolWorkVO work : pendingWork) {
|
|
||||||
try {
|
|
||||||
VMInstanceVO vmInstance = vmDao.findById(work.getVmId());
|
|
||||||
|
|
||||||
if (vmInstance == null) {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
// if the instance is of type consoleproxy, call the console
|
|
||||||
// proxy
|
|
||||||
if (vmInstance.getType().equals(
|
|
||||||
VirtualMachine.Type.ConsoleProxy)) {
|
|
||||||
|
|
||||||
ConsoleProxyVO consoleProxy = _consoleProxyDao
|
|
||||||
.findById(vmInstance.getId());
|
|
||||||
if (vmMgr.advanceStart(consoleProxy, null, user, account) == null) {
|
|
||||||
String msg = "There was an error starting the console proxy id: "
|
|
||||||
+ vmInstance.getId()
|
|
||||||
+ " on storage pool, cannot complete primary storage maintenance";
|
|
||||||
s_logger.warn(msg);
|
|
||||||
throw new ExecutionException(msg);
|
|
||||||
} else {
|
|
||||||
// update work queue
|
|
||||||
work.setStartedAfterMaintenance(true);
|
|
||||||
_storagePoolWorkDao.update(work.getId(), work);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// if the instance is of type ssvm, call the ssvm manager
|
|
||||||
if (vmInstance.getType().equals(
|
|
||||||
VirtualMachine.Type.SecondaryStorageVm)) {
|
|
||||||
SecondaryStorageVmVO ssVm = _secStrgDao.findById(vmInstance
|
|
||||||
.getId());
|
|
||||||
if (vmMgr.advanceStart(ssVm, null, user, account) == null) {
|
|
||||||
String msg = "There was an error starting the ssvm id: "
|
|
||||||
+ vmInstance.getId()
|
|
||||||
+ " on storage pool, cannot complete primary storage maintenance";
|
|
||||||
s_logger.warn(msg);
|
|
||||||
throw new ExecutionException(msg);
|
|
||||||
} else {
|
|
||||||
// update work queue
|
|
||||||
work.setStartedAfterMaintenance(true);
|
|
||||||
_storagePoolWorkDao.update(work.getId(), work);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// if the instance is of type ssvm, call the ssvm manager
|
|
||||||
if (vmInstance.getType().equals(
|
|
||||||
VirtualMachine.Type.DomainRouter)) {
|
|
||||||
DomainRouterVO domR = _domrDao.findById(vmInstance.getId());
|
|
||||||
if (vmMgr.advanceStart(domR, null, user, account) == null) {
|
|
||||||
String msg = "There was an error starting the domR id: "
|
|
||||||
+ vmInstance.getId()
|
|
||||||
+ " on storage pool, cannot complete primary storage maintenance";
|
|
||||||
s_logger.warn(msg);
|
|
||||||
throw new ExecutionException(msg);
|
|
||||||
} else {
|
|
||||||
// update work queue
|
|
||||||
work.setStartedAfterMaintenance(true);
|
|
||||||
_storagePoolWorkDao.update(work.getId(), work);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// if the instance is of type user vm, call the user vm manager
|
|
||||||
if (vmInstance.getType().equals(VirtualMachine.Type.User)) {
|
|
||||||
UserVmVO userVm = userVmDao.findById(vmInstance.getId());
|
|
||||||
|
|
||||||
if (vmMgr.advanceStart(userVm, null, user, account) == null) {
|
|
||||||
|
|
||||||
String msg = "There was an error starting the user vm id: "
|
|
||||||
+ vmInstance.getId()
|
|
||||||
+ " on storage pool, cannot complete primary storage maintenance";
|
|
||||||
s_logger.warn(msg);
|
|
||||||
throw new ExecutionException(msg);
|
|
||||||
} else {
|
|
||||||
// update work queue
|
|
||||||
work.setStartedAfterMaintenance(true);
|
|
||||||
_storagePoolWorkDao.update(work.getId(), work);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} catch (Exception e) {
|
|
||||||
s_logger.debug("Failed start vm", e);
|
|
||||||
throw new CloudRuntimeException(e.toString());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
|
|
||||||
@DB
|
|
||||||
@Override
|
|
||||||
public boolean deleteDataStore(long storeId) {
|
|
||||||
// for the given pool id, find all records in the storage_pool_host_ref
|
|
||||||
List<StoragePoolHostVO> hostPoolRecords = this._storagePoolHostDao
|
|
||||||
.listByPoolId(storeId);
|
|
||||||
StoragePoolVO poolVO = this.primaryDataStoreDao.findById(storeId);
|
|
||||||
StoragePool pool = (StoragePool)this.dataStoreMgr.getDataStore(storeId, DataStoreRole.Primary);
|
|
||||||
boolean deleteFlag = false;
|
|
||||||
Transaction txn = Transaction.currentTxn();
|
|
||||||
try {
|
|
||||||
// if not records exist, delete the given pool (base case)
|
|
||||||
if (hostPoolRecords.size() == 0) {
|
|
||||||
|
|
||||||
txn.start();
|
|
||||||
poolVO.setUuid(null);
|
|
||||||
this.primaryDataStoreDao.update(poolVO.getId(), poolVO);
|
|
||||||
primaryDataStoreDao.remove(poolVO.getId());
|
|
||||||
deletePoolStats(poolVO.getId());
|
|
||||||
txn.commit();
|
|
||||||
|
|
||||||
deleteFlag = true;
|
|
||||||
return true;
|
|
||||||
} else {
|
|
||||||
// Remove the SR associated with the Xenserver
|
|
||||||
for (StoragePoolHostVO host : hostPoolRecords) {
|
|
||||||
DeleteStoragePoolCommand deleteCmd = new DeleteStoragePoolCommand(
|
|
||||||
pool);
|
|
||||||
final Answer answer = agentMgr.easySend(host.getHostId(),
|
|
||||||
deleteCmd);
|
|
||||||
|
|
||||||
if (answer != null && answer.getResult()) {
|
|
||||||
deleteFlag = true;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} finally {
|
|
||||||
if (deleteFlag) {
|
|
||||||
// now delete the storage_pool_host_ref and storage_pool records
|
|
||||||
txn.start();
|
|
||||||
for (StoragePoolHostVO host : hostPoolRecords) {
|
|
||||||
_storagePoolHostDao.deleteStoragePoolHostDetails(
|
|
||||||
host.getHostId(), host.getPoolId());
|
|
||||||
}
|
|
||||||
poolVO.setUuid(null);
|
|
||||||
this.primaryDataStoreDao.update(poolVO.getId(), poolVO);
|
|
||||||
primaryDataStoreDao.remove(poolVO.getId());
|
|
||||||
deletePoolStats(poolVO.getId());
|
|
||||||
// Delete op_host_capacity entries
|
|
||||||
this._capacityDao.removeBy(Capacity.CAPACITY_TYPE_STORAGE_ALLOCATED,
|
|
||||||
null, null, null, poolVO.getId());
|
|
||||||
txn.commit();
|
|
||||||
|
|
||||||
s_logger.debug("Storage pool id=" + poolVO.getId()
|
|
||||||
+ " is removed successfully");
|
|
||||||
return true;
|
|
||||||
} else {
|
|
||||||
// alert that the storage cleanup is required
|
|
||||||
s_logger.warn("Failed to Delete storage pool id: " + poolVO.getId());
|
|
||||||
_alertMgr
|
|
||||||
.sendAlert(AlertManager.ALERT_TYPE_STORAGE_DELETE,
|
|
||||||
poolVO.getDataCenterId(), poolVO.getPodId(),
|
|
||||||
"Unable to delete storage pool id= " + poolVO.getId(),
|
|
||||||
"Delete storage pool command failed. Please check logs.");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
@DB
|
|
||||||
private boolean deletePoolStats(Long poolId) {
|
|
||||||
CapacityVO capacity1 = _capacityDao.findByHostIdType(poolId,
|
|
||||||
CapacityVO.CAPACITY_TYPE_STORAGE);
|
|
||||||
CapacityVO capacity2 = _capacityDao.findByHostIdType(poolId,
|
|
||||||
CapacityVO.CAPACITY_TYPE_STORAGE_ALLOCATED);
|
|
||||||
Transaction txn = Transaction.currentTxn();
|
|
||||||
txn.start();
|
|
||||||
if (capacity1 != null) {
|
|
||||||
_capacityDao.remove(capacity1.getId());
|
|
||||||
}
|
|
||||||
|
|
||||||
if (capacity2 != null) {
|
|
||||||
_capacityDao.remove(capacity2.getId());
|
|
||||||
}
|
|
||||||
|
|
||||||
txn.commit();
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public boolean attachHost(DataStore store, HostScope scope, StoragePoolInfo existingInfo) {
|
|
||||||
StoragePoolHostVO poolHost = _storagePoolHostDao.findByPoolHost(store.getId(), scope.getScopeId());
|
|
||||||
if (poolHost == null) {
|
|
||||||
poolHost = new StoragePoolHostVO(store.getId(), scope.getScopeId(), existingInfo.getLocalPath());
|
|
||||||
_storagePoolHostDao.persist(poolHost);
|
|
||||||
}
|
|
||||||
|
|
||||||
StoragePoolVO pool = this.primaryDataStoreDao.findById(store.getId());
|
|
||||||
pool.setScope(scope.getScopeType());
|
|
||||||
pool.setAvailableBytes(existingInfo.getAvailableBytes());
|
|
||||||
pool.setCapacityBytes(existingInfo.getCapacityBytes());
|
|
||||||
pool.setStatus(StoragePoolStatus.Up);
|
|
||||||
this.primaryDataStoreDao.update(pool.getId(), pool);
|
|
||||||
this.storageMgr.createCapacityEntry(pool, Capacity.CAPACITY_TYPE_LOCAL_STORAGE, pool.getCapacityBytes() - pool.getAvailableBytes());
|
|
||||||
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
@ -60,8 +60,8 @@ public class DefaultPrimaryDataStoreLifeCycleImpl implements PrimaryDataStoreLif
|
|||||||
@Override
|
@Override
|
||||||
public DataStore initialize(Map<String, Object> dsInfos) {
|
public DataStore initialize(Map<String, Object> dsInfos) {
|
||||||
|
|
||||||
StoragePoolVO storeVO = primaryStoreHelper.createPrimaryDataStore(dsInfos);
|
DataStore store = primaryStoreHelper.createPrimaryDataStore(null);
|
||||||
return providerMgr.getPrimaryDataStore(storeVO.getId());
|
return providerMgr.getPrimaryDataStore(store.getId());
|
||||||
}
|
}
|
||||||
|
|
||||||
protected void attachCluster(DataStore store) {
|
protected void attachCluster(DataStore store) {
|
||||||
@ -113,26 +113,6 @@ public class DefaultPrimaryDataStoreLifeCycleImpl implements PrimaryDataStoreLif
|
|||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
|
||||||
public boolean maintain(long storeId) {
|
|
||||||
// TODO Auto-generated method stub
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public boolean cancelMaintain(long storeId) {
|
|
||||||
// TODO Auto-generated method stub
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public boolean deleteDataStore(long storeId) {
|
|
||||||
// TODO Auto-generated method stub
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public boolean attachZone(DataStore dataStore, ZoneScope scope) {
|
public boolean attachZone(DataStore dataStore, ZoneScope scope) {
|
||||||
// TODO Auto-generated method stub
|
// TODO Auto-generated method stub
|
||||||
@ -146,4 +126,22 @@ public class DefaultPrimaryDataStoreLifeCycleImpl implements PrimaryDataStoreLif
|
|||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean maintain(DataStore store) {
|
||||||
|
// TODO Auto-generated method stub
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean cancelMaintain(DataStore store) {
|
||||||
|
// TODO Auto-generated method stub
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean deleteDataStore(DataStore store) {
|
||||||
|
// TODO Auto-generated method stub
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|||||||
@ -57,18 +57,18 @@ public class DefaultPrimaryDataStoreProviderManagerImpl implements PrimaryDataSt
|
|||||||
@Override
|
@Override
|
||||||
public PrimaryDataStore getPrimaryDataStore(long dataStoreId) {
|
public PrimaryDataStore getPrimaryDataStore(long dataStoreId) {
|
||||||
StoragePoolVO dataStoreVO = dataStoreDao.findById(dataStoreId);
|
StoragePoolVO dataStoreVO = dataStoreDao.findById(dataStoreId);
|
||||||
long providerId = dataStoreVO.getStorageProviderId();
|
String providerName = dataStoreVO.getStorageProviderName();
|
||||||
DataStoreProvider provider = providerManager.getDataStoreProviderById(providerId);
|
DataStoreProvider provider = providerManager.getDataStoreProvider(providerName);
|
||||||
DefaultPrimaryDataStore dataStore = DefaultPrimaryDataStore.createDataStore(dataStoreVO, driverMaps.get(provider.getUuid()), provider);
|
DefaultPrimaryDataStore dataStore = DefaultPrimaryDataStore.createDataStore(dataStoreVO, driverMaps.get(provider.getName()), provider);
|
||||||
return dataStore;
|
return dataStore;
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public boolean registerDriver(String uuid, PrimaryDataStoreDriver driver) {
|
public boolean registerDriver(String providerName, PrimaryDataStoreDriver driver) {
|
||||||
if (driverMaps.get(uuid) != null) {
|
if (driverMaps.get(providerName) != null) {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
driverMaps.put(uuid, driver);
|
driverMaps.put(providerName, driver);
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -79,7 +79,7 @@ public class DefaultPrimaryDataStoreProviderManagerImpl implements PrimaryDataSt
|
|||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public boolean registerHostListener(String uuid, HypervisorHostListener listener) {
|
public boolean registerHostListener(String providerName, HypervisorHostListener listener) {
|
||||||
return storageMgr.registerHostListener(uuid, listener);
|
return storageMgr.registerHostListener(providerName, listener);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@ -16,24 +16,29 @@
|
|||||||
// under the License.
|
// under the License.
|
||||||
package org.apache.cloudstack.storage.datastore.provider;
|
package org.apache.cloudstack.storage.datastore.provider;
|
||||||
|
|
||||||
|
import java.util.HashSet;
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
|
import java.util.Set;
|
||||||
|
|
||||||
import javax.inject.Inject;
|
import javax.inject.Inject;
|
||||||
|
|
||||||
import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreLifeCycle;
|
import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreLifeCycle;
|
||||||
|
import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreProvider;
|
||||||
import org.apache.cloudstack.engine.subsystem.api.storage.HypervisorHostListener;
|
import org.apache.cloudstack.engine.subsystem.api.storage.HypervisorHostListener;
|
||||||
import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreDriver;
|
import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreDriver;
|
||||||
|
import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreProvider;
|
||||||
|
import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreProvider.DataStoreProviderType;
|
||||||
import org.apache.cloudstack.storage.datastore.PrimaryDataStoreProviderManager;
|
import org.apache.cloudstack.storage.datastore.PrimaryDataStoreProviderManager;
|
||||||
import org.apache.cloudstack.storage.datastore.driver.DefaultPrimaryDataStoreDriverImpl;
|
import org.apache.cloudstack.storage.datastore.driver.DefaultPrimaryDataStoreDriverImpl;
|
||||||
import org.apache.cloudstack.storage.datastore.lifecycle.DefaultPrimaryDataStoreLifeCycleImpl;
|
import org.apache.cloudstack.storage.datastore.lifecycle.DefaultPrimaryDataStoreLifeCycleImpl;
|
||||||
import org.springframework.stereotype.Component;
|
|
||||||
|
|
||||||
import com.cloud.utils.component.ComponentContext;
|
import com.cloud.utils.component.ComponentContext;
|
||||||
|
|
||||||
@Component
|
|
||||||
public class DefaultPrimaryDatastoreProviderImpl implements PrimaryDataStoreProvider {
|
public class DefaultPrimaryDatastoreProviderImpl implements PrimaryDataStoreProvider {
|
||||||
private final String providerName = "default primary data store provider";
|
private final String providerName = "default primary data store provider";
|
||||||
protected PrimaryDataStoreDriver driver;
|
protected PrimaryDataStoreDriver driver;
|
||||||
|
protected HypervisorHostListener listener;
|
||||||
@Inject
|
@Inject
|
||||||
PrimaryDataStoreProviderManager storeMgr;
|
PrimaryDataStoreProviderManager storeMgr;
|
||||||
|
|
||||||
@ -46,7 +51,7 @@ public class DefaultPrimaryDatastoreProviderImpl implements PrimaryDataStoreProv
|
|||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public DataStoreLifeCycle getLifeCycle() {
|
public DataStoreLifeCycle getDataStoreLifeCycle() {
|
||||||
return this.lifecyle;
|
return this.lifecyle;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -54,22 +59,25 @@ public class DefaultPrimaryDatastoreProviderImpl implements PrimaryDataStoreProv
|
|||||||
public boolean configure(Map<String, Object> params) {
|
public boolean configure(Map<String, Object> params) {
|
||||||
lifecyle = ComponentContext.inject(DefaultPrimaryDataStoreLifeCycleImpl.class);
|
lifecyle = ComponentContext.inject(DefaultPrimaryDataStoreLifeCycleImpl.class);
|
||||||
driver = ComponentContext.inject(DefaultPrimaryDataStoreDriverImpl.class);
|
driver = ComponentContext.inject(DefaultPrimaryDataStoreDriverImpl.class);
|
||||||
HypervisorHostListener listener = ComponentContext.inject(DefaultHostListener.class);
|
listener = ComponentContext.inject(DefaultHostListener.class);
|
||||||
uuid = (String)params.get("uuid");
|
|
||||||
id = (Long)params.get("id");
|
|
||||||
storeMgr.registerDriver(uuid, this.driver);
|
|
||||||
storeMgr.registerHostListener(uuid, listener);
|
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public String getUuid() {
|
public PrimaryDataStoreDriver getDataStoreDriver() {
|
||||||
return this.uuid;
|
return this.driver;
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public long getId() {
|
public HypervisorHostListener getHostListener() {
|
||||||
return this.id;
|
return this.listener;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public Set<DataStoreProviderType> getTypes() {
|
||||||
|
Set<DataStoreProviderType> types = new HashSet<DataStoreProviderType>();
|
||||||
|
types.add(DataStoreProviderType.PRIMARY);
|
||||||
|
return types;
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|||||||
@ -28,7 +28,7 @@ import java.util.Map;
|
|||||||
import javax.inject.Inject;
|
import javax.inject.Inject;
|
||||||
import javax.naming.ConfigurationException;
|
import javax.naming.ConfigurationException;
|
||||||
|
|
||||||
import org.apache.cloudstack.storage.datastore.provider.PrimaryDataStoreProvider;
|
import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreProvider;
|
||||||
import org.junit.Before;
|
import org.junit.Before;
|
||||||
import org.junit.Test;
|
import org.junit.Test;
|
||||||
import org.junit.runner.RunWith;
|
import org.junit.runner.RunWith;
|
||||||
|
|||||||
@ -57,6 +57,7 @@
|
|||||||
<module>network-elements/dns-notifier</module>
|
<module>network-elements/dns-notifier</module>
|
||||||
<module>storage/image/s3</module>
|
<module>storage/image/s3</module>
|
||||||
<module>storage/volume/solidfire</module>
|
<module>storage/volume/solidfire</module>
|
||||||
|
<module>storage/volume/default</module>
|
||||||
<module>alert-handlers/snmp-alerts</module>
|
<module>alert-handlers/snmp-alerts</module>
|
||||||
</modules>
|
</modules>
|
||||||
|
|
||||||
|
|||||||
56
plugins/storage/volume/default/pom.xml
Normal file
56
plugins/storage/volume/default/pom.xml
Normal file
@ -0,0 +1,56 @@
|
|||||||
|
<!-- Licensed to the Apache Software Foundation (ASF) under one or more contributor
|
||||||
|
license agreements. See the NOTICE file distributed with this work for additional
|
||||||
|
information regarding copyright ownership. The ASF licenses this file to
|
||||||
|
you under the Apache License, Version 2.0 (the "License"); you may not use
|
||||||
|
this file except in compliance with the License. You may obtain a copy of
|
||||||
|
the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required
|
||||||
|
by applicable law or agreed to in writing, software distributed under the
|
||||||
|
License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS
|
||||||
|
OF ANY KIND, either express or implied. See the License for the specific
|
||||||
|
language governing permissions and limitations under the License. -->
|
||||||
|
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
|
||||||
|
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
|
||||||
|
<modelVersion>4.0.0</modelVersion>
|
||||||
|
<artifactId>cloud-plugin-storage-volume-default</artifactId>
|
||||||
|
<name>Apache CloudStack Plugin - Storage Volume default provider</name>
|
||||||
|
<parent>
|
||||||
|
<groupId>org.apache.cloudstack</groupId>
|
||||||
|
<artifactId>cloudstack-plugins</artifactId>
|
||||||
|
<version>4.2.0-SNAPSHOT</version>
|
||||||
|
<relativePath>../../../pom.xml</relativePath>
|
||||||
|
</parent>
|
||||||
|
<dependencies>
|
||||||
|
<dependency>
|
||||||
|
<groupId>org.apache.cloudstack</groupId>
|
||||||
|
<artifactId>cloud-engine-storage-volume</artifactId>
|
||||||
|
<version>${project.version}</version>
|
||||||
|
</dependency>
|
||||||
|
<dependency>
|
||||||
|
<groupId>mysql</groupId>
|
||||||
|
<artifactId>mysql-connector-java</artifactId>
|
||||||
|
<version>${cs.mysql.version}</version>
|
||||||
|
<scope>provided</scope>
|
||||||
|
</dependency>
|
||||||
|
</dependencies>
|
||||||
|
<build>
|
||||||
|
<defaultGoal>install</defaultGoal>
|
||||||
|
<sourceDirectory>src</sourceDirectory>
|
||||||
|
<testSourceDirectory>test</testSourceDirectory>
|
||||||
|
<plugins>
|
||||||
|
<plugin>
|
||||||
|
<artifactId>maven-surefire-plugin</artifactId>
|
||||||
|
<configuration>
|
||||||
|
<skipTests>true</skipTests>
|
||||||
|
</configuration>
|
||||||
|
<executions>
|
||||||
|
<execution>
|
||||||
|
<phase>integration-test</phase>
|
||||||
|
<goals>
|
||||||
|
<goal>test</goal>
|
||||||
|
</goals>
|
||||||
|
</execution>
|
||||||
|
</executions>
|
||||||
|
</plugin>
|
||||||
|
</plugins>
|
||||||
|
</build>
|
||||||
|
</project>
|
||||||
@ -71,9 +71,9 @@ import com.cloud.utils.exception.CloudRuntimeException;
|
|||||||
import com.cloud.vm.DiskProfile;
|
import com.cloud.vm.DiskProfile;
|
||||||
import com.cloud.vm.dao.VMInstanceDao;
|
import com.cloud.vm.dao.VMInstanceDao;
|
||||||
|
|
||||||
public class AncientPrimaryDataStoreDriverImpl implements PrimaryDataStoreDriver {
|
public class CloudStackPrimaryDataStoreDriverImpl implements PrimaryDataStoreDriver {
|
||||||
private static final Logger s_logger = Logger
|
private static final Logger s_logger = Logger
|
||||||
.getLogger(AncientPrimaryDataStoreDriverImpl.class);
|
.getLogger(CloudStackPrimaryDataStoreDriverImpl.class);
|
||||||
@Inject DiskOfferingDao diskOfferingDao;
|
@Inject DiskOfferingDao diskOfferingDao;
|
||||||
@Inject VMTemplateDao templateDao;
|
@Inject VMTemplateDao templateDao;
|
||||||
@Inject VolumeDao volumeDao;
|
@Inject VolumeDao volumeDao;
|
||||||
@ -0,0 +1,542 @@
|
|||||||
|
/*
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing,
|
||||||
|
* software distributed under the License is distributed on an
|
||||||
|
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||||
|
* KIND, either express or implied. See the License for the
|
||||||
|
* specific language governing permissions and limitations
|
||||||
|
* under the License.
|
||||||
|
*/
|
||||||
|
package org.apache.cloudstack.storage.datastore.lifecycle;
|
||||||
|
|
||||||
|
import java.net.URI;
|
||||||
|
import java.net.URISyntaxException;
|
||||||
|
import java.util.ArrayList;
|
||||||
|
import java.util.List;
|
||||||
|
import java.util.Map;
|
||||||
|
import java.util.UUID;
|
||||||
|
|
||||||
|
import javax.inject.Inject;
|
||||||
|
|
||||||
|
import org.apache.cloudstack.engine.subsystem.api.storage.ClusterScope;
|
||||||
|
import org.apache.cloudstack.engine.subsystem.api.storage.DataStore;
|
||||||
|
import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager;
|
||||||
|
import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreRole;
|
||||||
|
import org.apache.cloudstack.engine.subsystem.api.storage.HostScope;
|
||||||
|
import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreInfo;
|
||||||
|
import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreLifeCycle;
|
||||||
|
import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreParameters;
|
||||||
|
import org.apache.cloudstack.engine.subsystem.api.storage.ScopeType;
|
||||||
|
import org.apache.cloudstack.engine.subsystem.api.storage.ZoneScope;
|
||||||
|
import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao;
|
||||||
|
import org.apache.cloudstack.storage.datastore.db.StoragePoolVO;
|
||||||
|
import org.apache.cloudstack.storage.volume.datastore.PrimaryDataStoreHelper;
|
||||||
|
import org.apache.log4j.Logger;
|
||||||
|
|
||||||
|
import com.cloud.agent.AgentManager;
|
||||||
|
import com.cloud.agent.api.Answer;
|
||||||
|
import com.cloud.agent.api.CreateStoragePoolCommand;
|
||||||
|
import com.cloud.agent.api.DeleteStoragePoolCommand;
|
||||||
|
import com.cloud.agent.api.ModifyStoragePoolCommand;
|
||||||
|
import com.cloud.agent.api.StoragePoolInfo;
|
||||||
|
import com.cloud.alert.AlertManager;
|
||||||
|
import com.cloud.capacity.Capacity;
|
||||||
|
import com.cloud.capacity.CapacityVO;
|
||||||
|
import com.cloud.capacity.dao.CapacityDao;
|
||||||
|
import com.cloud.exception.DiscoveryException;
|
||||||
|
import com.cloud.exception.InvalidParameterValueException;
|
||||||
|
import com.cloud.host.Host;
|
||||||
|
import com.cloud.host.HostVO;
|
||||||
|
import com.cloud.host.Status;
|
||||||
|
import com.cloud.hypervisor.Hypervisor.HypervisorType;
|
||||||
|
import com.cloud.resource.ResourceManager;
|
||||||
|
import com.cloud.server.ManagementServer;
|
||||||
|
import com.cloud.storage.OCFS2Manager;
|
||||||
|
import com.cloud.storage.Storage.StoragePoolType;
|
||||||
|
import com.cloud.storage.StorageManager;
|
||||||
|
import com.cloud.storage.StoragePool;
|
||||||
|
import com.cloud.storage.StoragePoolAutomation;
|
||||||
|
import com.cloud.storage.StoragePoolDiscoverer;
|
||||||
|
import com.cloud.storage.StoragePoolHostVO;
|
||||||
|
import com.cloud.storage.StoragePoolStatus;
|
||||||
|
import com.cloud.storage.StoragePoolWorkVO;
|
||||||
|
import com.cloud.storage.VolumeVO;
|
||||||
|
import com.cloud.storage.dao.StoragePoolHostDao;
|
||||||
|
import com.cloud.storage.dao.StoragePoolWorkDao;
|
||||||
|
import com.cloud.storage.dao.VolumeDao;
|
||||||
|
import com.cloud.user.Account;
|
||||||
|
import com.cloud.user.User;
|
||||||
|
import com.cloud.user.UserContext;
|
||||||
|
import com.cloud.user.dao.UserDao;
|
||||||
|
import com.cloud.utils.NumbersUtil;
|
||||||
|
import com.cloud.utils.UriUtils;
|
||||||
|
import com.cloud.utils.db.DB;
|
||||||
|
import com.cloud.utils.db.Transaction;
|
||||||
|
import com.cloud.utils.exception.CloudRuntimeException;
|
||||||
|
import com.cloud.utils.exception.ExecutionException;
|
||||||
|
import com.cloud.vm.ConsoleProxyVO;
|
||||||
|
import com.cloud.vm.DomainRouterVO;
|
||||||
|
import com.cloud.vm.SecondaryStorageVmVO;
|
||||||
|
import com.cloud.vm.UserVmVO;
|
||||||
|
import com.cloud.vm.VMInstanceVO;
|
||||||
|
import com.cloud.vm.VirtualMachine;
|
||||||
|
import com.cloud.vm.VirtualMachine.State;
|
||||||
|
import com.cloud.vm.VirtualMachineManager;
|
||||||
|
import com.cloud.vm.dao.ConsoleProxyDao;
|
||||||
|
import com.cloud.vm.dao.DomainRouterDao;
|
||||||
|
import com.cloud.vm.dao.SecondaryStorageVmDao;
|
||||||
|
import com.cloud.vm.dao.UserVmDao;
|
||||||
|
import com.cloud.vm.dao.VMInstanceDao;
|
||||||
|
|
||||||
|
public class CloudStackPrimaryDataStoreLifeCycleImpl implements
|
||||||
|
PrimaryDataStoreLifeCycle {
|
||||||
|
private static final Logger s_logger = Logger
|
||||||
|
.getLogger(CloudStackPrimaryDataStoreLifeCycleImpl.class);
|
||||||
|
@Inject
|
||||||
|
protected ResourceManager _resourceMgr;
|
||||||
|
protected List<StoragePoolDiscoverer> _discoverers;
|
||||||
|
@Inject
|
||||||
|
PrimaryDataStoreDao primaryDataStoreDao;
|
||||||
|
@Inject
|
||||||
|
protected OCFS2Manager _ocfs2Mgr;
|
||||||
|
@Inject
|
||||||
|
DataStoreManager dataStoreMgr;
|
||||||
|
@Inject
|
||||||
|
AgentManager agentMgr;
|
||||||
|
@Inject
|
||||||
|
StorageManager storageMgr;
|
||||||
|
|
||||||
|
|
||||||
|
@Inject
|
||||||
|
VolumeDao volumeDao;
|
||||||
|
@Inject
|
||||||
|
VMInstanceDao vmDao;
|
||||||
|
@Inject
|
||||||
|
ManagementServer server;
|
||||||
|
@Inject
|
||||||
|
protected VirtualMachineManager vmMgr;
|
||||||
|
@Inject
|
||||||
|
protected SecondaryStorageVmDao _secStrgDao;
|
||||||
|
@Inject
|
||||||
|
UserVmDao userVmDao;
|
||||||
|
@Inject
|
||||||
|
protected UserDao _userDao;
|
||||||
|
@Inject
|
||||||
|
protected DomainRouterDao _domrDao;
|
||||||
|
@Inject
|
||||||
|
protected StoragePoolHostDao _storagePoolHostDao;
|
||||||
|
@Inject
|
||||||
|
protected AlertManager _alertMgr;
|
||||||
|
@Inject
|
||||||
|
protected ConsoleProxyDao _consoleProxyDao;
|
||||||
|
|
||||||
|
@Inject
|
||||||
|
protected StoragePoolWorkDao _storagePoolWorkDao;
|
||||||
|
@Inject
|
||||||
|
PrimaryDataStoreHelper dataStoreHelper;
|
||||||
|
@Inject
|
||||||
|
StoragePoolAutomation storagePoolAutmation;
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public DataStore initialize(Map<String, Object> dsInfos) {
|
||||||
|
Long clusterId = (Long) dsInfos.get("clusterId");
|
||||||
|
Long podId = (Long) dsInfos.get("podId");
|
||||||
|
Long zoneId = (Long) dsInfos.get("zoneId");
|
||||||
|
String url = (String) dsInfos.get("url");
|
||||||
|
String providerName = (String)dsInfos.get("providerName");
|
||||||
|
if (clusterId != null && podId == null) {
|
||||||
|
throw new InvalidParameterValueException(
|
||||||
|
"Cluster id requires pod id");
|
||||||
|
}
|
||||||
|
|
||||||
|
PrimaryDataStoreParameters parameters = new PrimaryDataStoreParameters();
|
||||||
|
|
||||||
|
URI uri = null;
|
||||||
|
try {
|
||||||
|
uri = new URI(UriUtils.encodeURIComponent(url));
|
||||||
|
if (uri.getScheme() == null) {
|
||||||
|
throw new InvalidParameterValueException("scheme is null "
|
||||||
|
+ url + ", add nfs:// as a prefix");
|
||||||
|
} else if (uri.getScheme().equalsIgnoreCase("nfs")) {
|
||||||
|
String uriHost = uri.getHost();
|
||||||
|
String uriPath = uri.getPath();
|
||||||
|
if (uriHost == null || uriPath == null
|
||||||
|
|| uriHost.trim().isEmpty() || uriPath.trim().isEmpty()) {
|
||||||
|
throw new InvalidParameterValueException(
|
||||||
|
"host or path is null, should be nfs://hostname/path");
|
||||||
|
}
|
||||||
|
} else if (uri.getScheme().equalsIgnoreCase("sharedMountPoint")) {
|
||||||
|
String uriPath = uri.getPath();
|
||||||
|
if (uriPath == null) {
|
||||||
|
throw new InvalidParameterValueException(
|
||||||
|
"host or path is null, should be sharedmountpoint://localhost/path");
|
||||||
|
}
|
||||||
|
} else if (uri.getScheme().equalsIgnoreCase("rbd")) {
|
||||||
|
String uriPath = uri.getPath();
|
||||||
|
if (uriPath == null) {
|
||||||
|
throw new InvalidParameterValueException(
|
||||||
|
"host or path is null, should be rbd://hostname/pool");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} catch (URISyntaxException e) {
|
||||||
|
throw new InvalidParameterValueException(url
|
||||||
|
+ " is not a valid uri");
|
||||||
|
}
|
||||||
|
|
||||||
|
String tags = (String) dsInfos.get("tags");
|
||||||
|
Map<String, String> details = (Map<String, String>) dsInfos
|
||||||
|
.get("details");
|
||||||
|
|
||||||
|
parameters.setTags(tags);
|
||||||
|
parameters.setDetails(details);
|
||||||
|
|
||||||
|
String scheme = uri.getScheme();
|
||||||
|
String storageHost = uri.getHost();
|
||||||
|
String hostPath = uri.getPath();
|
||||||
|
Object localStorage = dsInfos.get("localStorage");
|
||||||
|
if (localStorage != null) {
|
||||||
|
hostPath = hostPath.replace("/", "");
|
||||||
|
}
|
||||||
|
String userInfo = uri.getUserInfo();
|
||||||
|
int port = uri.getPort();
|
||||||
|
StoragePoolVO pool = null;
|
||||||
|
if (s_logger.isDebugEnabled()) {
|
||||||
|
s_logger.debug("createPool Params @ scheme - " + scheme
|
||||||
|
+ " storageHost - " + storageHost + " hostPath - "
|
||||||
|
+ hostPath + " port - " + port);
|
||||||
|
}
|
||||||
|
if (scheme.equalsIgnoreCase("nfs")) {
|
||||||
|
if (port == -1) {
|
||||||
|
port = 2049;
|
||||||
|
}
|
||||||
|
parameters.setType(StoragePoolType.NetworkFilesystem);
|
||||||
|
parameters.setHost(storageHost);
|
||||||
|
parameters.setPort(port);
|
||||||
|
parameters.setPath(hostPath);
|
||||||
|
} else if (scheme.equalsIgnoreCase("file")) {
|
||||||
|
if (port == -1) {
|
||||||
|
port = 0;
|
||||||
|
}
|
||||||
|
parameters.setType(StoragePoolType.Filesystem);
|
||||||
|
parameters.setHost("localhost");
|
||||||
|
parameters.setPort(0);
|
||||||
|
parameters.setPath(hostPath);
|
||||||
|
} else if (scheme.equalsIgnoreCase("sharedMountPoint")) {
|
||||||
|
parameters.setType(StoragePoolType.SharedMountPoint);
|
||||||
|
parameters.setHost(storageHost);
|
||||||
|
parameters.setPort(0);
|
||||||
|
parameters.setPath(hostPath);
|
||||||
|
} else if (scheme.equalsIgnoreCase("clvm")) {
|
||||||
|
parameters.setType(StoragePoolType.CLVM);
|
||||||
|
parameters.setHost(storageHost);
|
||||||
|
parameters.setPort(0);
|
||||||
|
parameters.setPath(hostPath.replaceFirst("/", ""));
|
||||||
|
} else if (scheme.equalsIgnoreCase("rbd")) {
|
||||||
|
if (port == -1) {
|
||||||
|
port = 6789;
|
||||||
|
}
|
||||||
|
parameters.setType(StoragePoolType.RBD);
|
||||||
|
parameters.setHost(storageHost);
|
||||||
|
parameters.setPort(port);
|
||||||
|
parameters.setPath(hostPath.replaceFirst("/", ""));
|
||||||
|
parameters.setUserInfo(userInfo);
|
||||||
|
} else if (scheme.equalsIgnoreCase("PreSetup")) {
|
||||||
|
parameters.setType(StoragePoolType.PreSetup);
|
||||||
|
parameters.setHost(storageHost);
|
||||||
|
parameters.setPort(0);
|
||||||
|
parameters.setPath(hostPath);
|
||||||
|
} else if (scheme.equalsIgnoreCase("iscsi")) {
|
||||||
|
String[] tokens = hostPath.split("/");
|
||||||
|
int lun = NumbersUtil.parseInt(tokens[tokens.length - 1], -1);
|
||||||
|
if (port == -1) {
|
||||||
|
port = 3260;
|
||||||
|
}
|
||||||
|
if (lun != -1) {
|
||||||
|
if (clusterId == null) {
|
||||||
|
throw new IllegalArgumentException(
|
||||||
|
"IscsiLUN need to have clusters specified");
|
||||||
|
}
|
||||||
|
hostPath.replaceFirst("/", "");
|
||||||
|
parameters.setType(StoragePoolType.IscsiLUN);
|
||||||
|
parameters.setHost(storageHost);
|
||||||
|
parameters.setPort(port);
|
||||||
|
parameters.setPath(hostPath);
|
||||||
|
} else {
|
||||||
|
for (StoragePoolDiscoverer discoverer : _discoverers) {
|
||||||
|
Map<StoragePoolVO, Map<String, String>> pools;
|
||||||
|
try {
|
||||||
|
pools = discoverer.find(zoneId, podId, uri, details);
|
||||||
|
} catch (DiscoveryException e) {
|
||||||
|
throw new IllegalArgumentException(
|
||||||
|
"Not enough information for discovery " + uri,
|
||||||
|
e);
|
||||||
|
}
|
||||||
|
if (pools != null) {
|
||||||
|
Map.Entry<StoragePoolVO, Map<String, String>> entry = pools
|
||||||
|
.entrySet().iterator().next();
|
||||||
|
pool = entry.getKey();
|
||||||
|
details = entry.getValue();
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else if (scheme.equalsIgnoreCase("iso")) {
|
||||||
|
if (port == -1) {
|
||||||
|
port = 2049;
|
||||||
|
}
|
||||||
|
parameters.setType(StoragePoolType.ISO);
|
||||||
|
parameters.setHost(storageHost);
|
||||||
|
parameters.setPort(port);
|
||||||
|
parameters.setPath(hostPath);
|
||||||
|
} else if (scheme.equalsIgnoreCase("vmfs")) {
|
||||||
|
parameters.setType(StoragePoolType.VMFS);
|
||||||
|
parameters.setHost("VMFS datastore: " + hostPath);
|
||||||
|
parameters.setPort(0);
|
||||||
|
parameters.setPath(hostPath);
|
||||||
|
} else if (scheme.equalsIgnoreCase("ocfs2")) {
|
||||||
|
port = 7777;
|
||||||
|
parameters.setType(StoragePoolType.OCFS2);
|
||||||
|
parameters.setHost("clustered");
|
||||||
|
parameters.setPort(port);
|
||||||
|
parameters.setPath(hostPath);
|
||||||
|
} else {
|
||||||
|
StoragePoolType type = Enum.valueOf(StoragePoolType.class, scheme);
|
||||||
|
|
||||||
|
if (type != null) {
|
||||||
|
parameters.setType(type);
|
||||||
|
parameters.setHost(storageHost);
|
||||||
|
parameters.setPort(0);
|
||||||
|
parameters.setPath(hostPath);
|
||||||
|
} else {
|
||||||
|
s_logger.warn("Unable to figure out the scheme for URI: " + uri);
|
||||||
|
throw new IllegalArgumentException(
|
||||||
|
"Unable to figure out the scheme for URI: " + uri);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (localStorage == null) {
|
||||||
|
List<StoragePoolVO> pools = primaryDataStoreDao
|
||||||
|
.listPoolByHostPath(storageHost, hostPath);
|
||||||
|
if (!pools.isEmpty() && !scheme.equalsIgnoreCase("sharedmountpoint")) {
|
||||||
|
Long oldPodId = pools.get(0).getPodId();
|
||||||
|
throw new CloudRuntimeException("Storage pool " + uri
|
||||||
|
+ " already in use by another pod (id=" + oldPodId + ")");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Object existingUuid = dsInfos.get("uuid");
|
||||||
|
String uuid = null;
|
||||||
|
|
||||||
|
if (existingUuid != null) {
|
||||||
|
uuid = (String)existingUuid;
|
||||||
|
} else if (scheme.equalsIgnoreCase("sharedmountpoint")
|
||||||
|
|| scheme.equalsIgnoreCase("clvm")) {
|
||||||
|
uuid = UUID.randomUUID().toString();
|
||||||
|
} else if (scheme.equalsIgnoreCase("PreSetup")) {
|
||||||
|
uuid = hostPath.replace("/", "");
|
||||||
|
} else {
|
||||||
|
uuid = UUID.nameUUIDFromBytes(
|
||||||
|
new String(storageHost + hostPath).getBytes()).toString();
|
||||||
|
}
|
||||||
|
|
||||||
|
List<StoragePoolVO> spHandles = primaryDataStoreDao
|
||||||
|
.findIfDuplicatePoolsExistByUUID(uuid);
|
||||||
|
if ((spHandles != null) && (spHandles.size() > 0)) {
|
||||||
|
if (s_logger.isDebugEnabled()) {
|
||||||
|
s_logger.debug("Another active pool with the same uuid already exists");
|
||||||
|
}
|
||||||
|
throw new CloudRuntimeException(
|
||||||
|
"Another active pool with the same uuid already exists");
|
||||||
|
}
|
||||||
|
|
||||||
|
String poolName = (String) dsInfos.get("name");
|
||||||
|
|
||||||
|
parameters.setUuid(uuid);
|
||||||
|
parameters.setZoneId(zoneId);
|
||||||
|
parameters.setPodId(podId);
|
||||||
|
parameters.setName(poolName);
|
||||||
|
parameters.setClusterId(clusterId);
|
||||||
|
parameters.setProviderName(providerName);
|
||||||
|
|
||||||
|
return dataStoreHelper.createPrimaryDataStore(parameters);
|
||||||
|
}
|
||||||
|
|
||||||
|
protected boolean createStoragePool(long hostId, StoragePool pool) {
|
||||||
|
s_logger.debug("creating pool " + pool.getName() + " on host "
|
||||||
|
+ hostId);
|
||||||
|
if (pool.getPoolType() != StoragePoolType.NetworkFilesystem
|
||||||
|
&& pool.getPoolType() != StoragePoolType.Filesystem
|
||||||
|
&& pool.getPoolType() != StoragePoolType.IscsiLUN
|
||||||
|
&& pool.getPoolType() != StoragePoolType.Iscsi
|
||||||
|
&& pool.getPoolType() != StoragePoolType.VMFS
|
||||||
|
&& pool.getPoolType() != StoragePoolType.SharedMountPoint
|
||||||
|
&& pool.getPoolType() != StoragePoolType.PreSetup
|
||||||
|
&& pool.getPoolType() != StoragePoolType.OCFS2
|
||||||
|
&& pool.getPoolType() != StoragePoolType.RBD
|
||||||
|
&& pool.getPoolType() != StoragePoolType.CLVM) {
|
||||||
|
s_logger.warn(" Doesn't support storage pool type "
|
||||||
|
+ pool.getPoolType());
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
CreateStoragePoolCommand cmd = new CreateStoragePoolCommand(true, pool);
|
||||||
|
final Answer answer = agentMgr.easySend(hostId, cmd);
|
||||||
|
if (answer != null && answer.getResult()) {
|
||||||
|
return true;
|
||||||
|
} else {
|
||||||
|
primaryDataStoreDao.expunge(pool.getId());
|
||||||
|
String msg = "";
|
||||||
|
if (answer != null) {
|
||||||
|
msg = "Can not create storage pool through host " + hostId
|
||||||
|
+ " due to " + answer.getDetails();
|
||||||
|
s_logger.warn(msg);
|
||||||
|
} else {
|
||||||
|
msg = "Can not create storage pool through host " + hostId
|
||||||
|
+ " due to CreateStoragePoolCommand returns null";
|
||||||
|
s_logger.warn(msg);
|
||||||
|
}
|
||||||
|
throw new CloudRuntimeException(msg);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean attachCluster(DataStore store, ClusterScope scope) {
|
||||||
|
PrimaryDataStoreInfo primarystore = (PrimaryDataStoreInfo) store;
|
||||||
|
// Check if there is host up in this cluster
|
||||||
|
List<HostVO> allHosts = _resourceMgr.listAllUpAndEnabledHosts(
|
||||||
|
Host.Type.Routing, primarystore.getClusterId(),
|
||||||
|
primarystore.getPodId(), primarystore.getDataCenterId());
|
||||||
|
if (allHosts.isEmpty()) {
|
||||||
|
throw new CloudRuntimeException(
|
||||||
|
"No host up to associate a storage pool with in cluster "
|
||||||
|
+ primarystore.getClusterId());
|
||||||
|
}
|
||||||
|
|
||||||
|
if (primarystore.getPoolType() == StoragePoolType.OCFS2
|
||||||
|
&& !_ocfs2Mgr.prepareNodes(allHosts, primarystore)) {
|
||||||
|
s_logger.warn("Can not create storage pool " + primarystore
|
||||||
|
+ " on cluster " + primarystore.getClusterId());
|
||||||
|
primaryDataStoreDao.expunge(primarystore.getId());
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
boolean success = false;
|
||||||
|
for (HostVO h : allHosts) {
|
||||||
|
success = createStoragePool(h.getId(), primarystore);
|
||||||
|
if (success) {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
s_logger.debug("In createPool Adding the pool to each of the hosts");
|
||||||
|
List<HostVO> poolHosts = new ArrayList<HostVO>();
|
||||||
|
for (HostVO h : allHosts) {
|
||||||
|
try {
|
||||||
|
this.storageMgr.connectHostToSharedPool(h.getId(),
|
||||||
|
primarystore.getId());
|
||||||
|
poolHosts.add(h);
|
||||||
|
} catch (Exception e) {
|
||||||
|
s_logger.warn("Unable to establish a connection between " + h
|
||||||
|
+ " and " + primarystore, e);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (poolHosts.isEmpty()) {
|
||||||
|
s_logger.warn("No host can access storage pool " + primarystore
|
||||||
|
+ " on cluster " + primarystore.getClusterId());
|
||||||
|
primaryDataStoreDao.expunge(primarystore.getId());
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
this.dataStoreHelper.attachCluster(store);
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean attachZone(DataStore dataStore, ZoneScope scope) {
|
||||||
|
List<HostVO> hosts = _resourceMgr.listAllUpAndEnabledHostsInOneZoneByHypervisor(HypervisorType.KVM, scope.getScopeId());
|
||||||
|
for (HostVO host : hosts) {
|
||||||
|
try {
|
||||||
|
this.storageMgr.connectHostToSharedPool(host.getId(),
|
||||||
|
dataStore.getId());
|
||||||
|
} catch (Exception e) {
|
||||||
|
s_logger.warn("Unable to establish a connection between " + host
|
||||||
|
+ " and " + dataStore, e);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
this.dataStoreHelper.attachZone(dataStore);
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean dettach() {
|
||||||
|
// TODO Auto-generated method stub
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean unmanaged() {
|
||||||
|
// TODO Auto-generated method stub
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean maintain(DataStore dataStore) {
|
||||||
|
storagePoolAutmation.maintain(dataStore);
|
||||||
|
this.dataStoreHelper.maintain(dataStore);
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean cancelMaintain(DataStore store) {
|
||||||
|
this.dataStoreHelper.cancelMaintain(store);
|
||||||
|
storagePoolAutmation.cancelMaintain(store);
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
@DB
|
||||||
|
@Override
|
||||||
|
public boolean deleteDataStore(DataStore store) {
|
||||||
|
List<StoragePoolHostVO> hostPoolRecords = this._storagePoolHostDao
|
||||||
|
.listByPoolId(store.getId());
|
||||||
|
StoragePool pool = (StoragePool)store;
|
||||||
|
boolean deleteFlag = false;
|
||||||
|
// Remove the SR associated with the Xenserver
|
||||||
|
for (StoragePoolHostVO host : hostPoolRecords) {
|
||||||
|
DeleteStoragePoolCommand deleteCmd = new DeleteStoragePoolCommand(
|
||||||
|
pool);
|
||||||
|
final Answer answer = agentMgr.easySend(host.getHostId(),
|
||||||
|
deleteCmd);
|
||||||
|
|
||||||
|
if (answer != null && answer.getResult()) {
|
||||||
|
deleteFlag = true;
|
||||||
|
break;
|
||||||
|
} else {
|
||||||
|
if (answer != null) {
|
||||||
|
s_logger.debug("Failed to delete storage pool: " + answer.getResult());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!deleteFlag) {
|
||||||
|
throw new CloudRuntimeException("Failed to delete storage pool on host");
|
||||||
|
}
|
||||||
|
|
||||||
|
this.dataStoreHelper.deletePrimaryDataStore(store);
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean attachHost(DataStore store, HostScope scope, StoragePoolInfo existingInfo) {
|
||||||
|
this.dataStoreHelper.attachHost(store, scope, existingInfo);
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
}
|
||||||
@ -18,61 +18,63 @@
|
|||||||
*/
|
*/
|
||||||
package org.apache.cloudstack.storage.datastore.provider;
|
package org.apache.cloudstack.storage.datastore.provider;
|
||||||
|
|
||||||
|
import java.util.HashSet;
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
|
import java.util.Set;
|
||||||
import javax.inject.Inject;
|
|
||||||
|
|
||||||
import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreLifeCycle;
|
import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreLifeCycle;
|
||||||
import org.apache.cloudstack.engine.subsystem.api.storage.HypervisorHostListener;
|
import org.apache.cloudstack.engine.subsystem.api.storage.HypervisorHostListener;
|
||||||
import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreDriver;
|
import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreDriver;
|
||||||
import org.apache.cloudstack.storage.datastore.PrimaryDataStoreProviderManager;
|
import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreProvider;
|
||||||
import org.apache.cloudstack.storage.datastore.driver.AncientPrimaryDataStoreDriverImpl;
|
import org.apache.cloudstack.storage.datastore.driver.CloudStackPrimaryDataStoreDriverImpl;
|
||||||
import org.apache.cloudstack.storage.datastore.lifecycle.AncientPrimaryDataStoreLifeCycleImpl;
|
import org.apache.cloudstack.storage.datastore.lifecycle.CloudStackPrimaryDataStoreLifeCycleImpl;
|
||||||
import org.springframework.stereotype.Component;
|
|
||||||
|
|
||||||
import com.cloud.utils.component.ComponentContext;
|
import com.cloud.utils.component.ComponentContext;
|
||||||
|
|
||||||
@Component
|
public class CloudStackPrimaryDataStoreProviderImpl implements
|
||||||
public class AncientPrimaryDataStoreProviderImpl implements
|
|
||||||
PrimaryDataStoreProvider {
|
PrimaryDataStoreProvider {
|
||||||
|
|
||||||
private final String providerName = "ancient primary data store provider";
|
private final String providerName = "ancient primary data store provider";
|
||||||
protected PrimaryDataStoreDriver driver;
|
protected PrimaryDataStoreDriver driver;
|
||||||
@Inject
|
protected HypervisorHostListener listener;
|
||||||
PrimaryDataStoreProviderManager storeMgr;
|
|
||||||
protected DataStoreLifeCycle lifecyle;
|
protected DataStoreLifeCycle lifecyle;
|
||||||
protected String uuid;
|
|
||||||
protected long id;
|
CloudStackPrimaryDataStoreProviderImpl() {
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public String getName() {
|
public String getName() {
|
||||||
return providerName;
|
return providerName;
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public DataStoreLifeCycle getLifeCycle() {
|
public DataStoreLifeCycle getDataStoreLifeCycle() {
|
||||||
return this.lifecyle;
|
return this.lifecyle;
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public boolean configure(Map<String, Object> params) {
|
public boolean configure(Map<String, Object> params) {
|
||||||
lifecyle = ComponentContext.inject(AncientPrimaryDataStoreLifeCycleImpl.class);
|
lifecyle = ComponentContext.inject(CloudStackPrimaryDataStoreLifeCycleImpl.class);
|
||||||
driver = ComponentContext.inject(AncientPrimaryDataStoreDriverImpl.class);
|
driver = ComponentContext.inject(CloudStackPrimaryDataStoreDriverImpl.class);
|
||||||
uuid = (String)params.get("uuid");
|
listener = ComponentContext.inject(DefaultHostListener.class);
|
||||||
id = (Long)params.get("id");
|
|
||||||
storeMgr.registerDriver(uuid, this.driver);
|
|
||||||
HypervisorHostListener listener = ComponentContext.inject(DefaultHostListener.class);
|
|
||||||
storeMgr.registerHostListener(uuid, listener);
|
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public String getUuid() {
|
public PrimaryDataStoreDriver getDataStoreDriver() {
|
||||||
return this.uuid;
|
return this.driver;
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public long getId() {
|
public HypervisorHostListener getHostListener() {
|
||||||
return this.id;
|
return this.listener;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public Set<DataStoreProviderType> getTypes() {
|
||||||
|
Set<DataStoreProviderType> types = new HashSet<DataStoreProviderType>();
|
||||||
|
types.add(DataStoreProviderType.PRIMARY);
|
||||||
|
return types;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
@ -26,10 +26,10 @@ import javax.inject.Inject;
|
|||||||
import javax.naming.ConfigurationException;
|
import javax.naming.ConfigurationException;
|
||||||
|
|
||||||
import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreInfo;
|
import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreInfo;
|
||||||
|
import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreProvider;
|
||||||
import org.apache.cloudstack.storage.command.CreateObjectAnswer;
|
import org.apache.cloudstack.storage.command.CreateObjectAnswer;
|
||||||
import org.apache.cloudstack.storage.command.CreateVolumeFromBaseImageCommand;
|
import org.apache.cloudstack.storage.command.CreateVolumeFromBaseImageCommand;
|
||||||
import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao;
|
import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao;
|
||||||
import org.apache.cloudstack.storage.datastore.provider.PrimaryDataStoreProvider;
|
|
||||||
import org.junit.Before;
|
import org.junit.Before;
|
||||||
import org.junit.Test;
|
import org.junit.Test;
|
||||||
import org.junit.runner.RunWith;
|
import org.junit.runner.RunWith;
|
||||||
|
|||||||
@ -2242,6 +2242,7 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe
|
|||||||
cmdList.add(DeleteAlertsCmd.class);
|
cmdList.add(DeleteAlertsCmd.class);
|
||||||
cmdList.add(ArchiveEventsCmd.class);
|
cmdList.add(ArchiveEventsCmd.class);
|
||||||
cmdList.add(DeleteEventsCmd.class);
|
cmdList.add(DeleteEventsCmd.class);
|
||||||
|
cmdList.add(ListStorageProvidersCmd.class);
|
||||||
return cmdList;
|
return cmdList;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@ -712,7 +712,7 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
DataStoreProvider provider = this.dataStoreProviderMgr.getDefaultPrimaryDataStoreProvider();
|
DataStoreProvider provider = this.dataStoreProviderMgr.getDefaultPrimaryDataStoreProvider();
|
||||||
DataStoreLifeCycle lifeCycle = provider.getLifeCycle();
|
DataStoreLifeCycle lifeCycle = provider.getDataStoreLifeCycle();
|
||||||
if (pool == null) {
|
if (pool == null) {
|
||||||
Map<String, Object> params = new HashMap<String, Object>();
|
Map<String, Object> params = new HashMap<String, Object>();
|
||||||
String name = (host.getName() + " Local Storage");
|
String name = (host.getName() + " Local Storage");
|
||||||
@ -724,7 +724,7 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C
|
|||||||
params.put("localStorage", true);
|
params.put("localStorage", true);
|
||||||
params.put("details", pInfo.getDetails());
|
params.put("details", pInfo.getDetails());
|
||||||
params.put("uuid", pInfo.getUuid());
|
params.put("uuid", pInfo.getUuid());
|
||||||
params.put("providerId", provider.getId());
|
params.put("providerName", provider.getName());
|
||||||
|
|
||||||
store = lifeCycle.initialize(params);
|
store = lifeCycle.initialize(params);
|
||||||
} else {
|
} else {
|
||||||
@ -748,15 +748,15 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C
|
|||||||
public PrimaryDataStoreInfo createPool(CreateStoragePoolCmd cmd)
|
public PrimaryDataStoreInfo createPool(CreateStoragePoolCmd cmd)
|
||||||
throws ResourceInUseException, IllegalArgumentException,
|
throws ResourceInUseException, IllegalArgumentException,
|
||||||
UnknownHostException, ResourceUnavailableException {
|
UnknownHostException, ResourceUnavailableException {
|
||||||
String providerUuid = cmd.getStorageProviderUuid();
|
String providerName = cmd.getStorageProviderName();
|
||||||
DataStoreProvider storeProvider = dataStoreProviderMgr
|
DataStoreProvider storeProvider = dataStoreProviderMgr
|
||||||
.getDataStoreProviderByUuid(providerUuid);
|
.getDataStoreProvider(providerName);
|
||||||
|
|
||||||
if (storeProvider == null) {
|
if (storeProvider == null) {
|
||||||
storeProvider = dataStoreProviderMgr.getDefaultPrimaryDataStoreProvider();
|
storeProvider = dataStoreProviderMgr.getDefaultPrimaryDataStoreProvider();
|
||||||
if (storeProvider == null) {
|
if (storeProvider == null) {
|
||||||
throw new InvalidParameterValueException(
|
throw new InvalidParameterValueException(
|
||||||
"can't find storage provider: " + providerUuid);
|
"can't find storage provider: " + providerName);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -821,9 +821,9 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C
|
|||||||
params.put("tags", cmd.getTags());
|
params.put("tags", cmd.getTags());
|
||||||
params.put("name", cmd.getStoragePoolName());
|
params.put("name", cmd.getStoragePoolName());
|
||||||
params.put("details", details);
|
params.put("details", details);
|
||||||
params.put("providerId", storeProvider.getId());
|
params.put("providerName", storeProvider.getName());
|
||||||
|
|
||||||
DataStoreLifeCycle lifeCycle = storeProvider.getLifeCycle();
|
DataStoreLifeCycle lifeCycle = storeProvider.getDataStoreLifeCycle();
|
||||||
DataStore store = null;
|
DataStore store = null;
|
||||||
try {
|
try {
|
||||||
store = lifeCycle.initialize(params);
|
store = lifeCycle.initialize(params);
|
||||||
@ -948,9 +948,11 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C
|
|||||||
s_logger.trace("Released lock for storage pool " + id);
|
s_logger.trace("Released lock for storage pool " + id);
|
||||||
|
|
||||||
DataStoreProvider storeProvider = dataStoreProviderMgr
|
DataStoreProvider storeProvider = dataStoreProviderMgr
|
||||||
.getDataStoreProviderById(sPool.getStorageProviderId());
|
.getDataStoreProvider(sPool.getStorageProviderName());
|
||||||
DataStoreLifeCycle lifeCycle = storeProvider.getLifeCycle();
|
DataStoreLifeCycle lifeCycle = storeProvider.getDataStoreLifeCycle();
|
||||||
lifeCycle.deleteDataStore(id);
|
DataStore store = dataStoreMgr.getDataStore(
|
||||||
|
sPool.getId(), DataStoreRole.Primary);
|
||||||
|
lifeCycle.deleteDataStore(store);
|
||||||
|
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
@ -963,8 +965,8 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C
|
|||||||
s_logger.debug("Adding pool " + pool.getName() + " to host " + hostId);
|
s_logger.debug("Adding pool " + pool.getName() + " to host " + hostId);
|
||||||
|
|
||||||
DataStoreProvider provider = dataStoreProviderMgr
|
DataStoreProvider provider = dataStoreProviderMgr
|
||||||
.getDataStoreProviderById(pool.getStorageProviderId());
|
.getDataStoreProvider(pool.getStorageProviderName());
|
||||||
HypervisorHostListener listener = hostListeners.get(provider.getUuid());
|
HypervisorHostListener listener = hostListeners.get(provider.getName());
|
||||||
listener.hostConnect(hostId, pool.getId());
|
listener.hostConnect(hostId, pool.getId());
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1415,19 +1417,16 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C
|
|||||||
}
|
}
|
||||||
|
|
||||||
DataStoreProvider provider = dataStoreProviderMgr
|
DataStoreProvider provider = dataStoreProviderMgr
|
||||||
.getDataStoreProviderById(primaryStorage.getStorageProviderId());
|
.getDataStoreProvider(primaryStorage.getStorageProviderName());
|
||||||
DataStoreLifeCycle lifeCycle = provider.getLifeCycle();
|
DataStoreLifeCycle lifeCycle = provider.getDataStoreLifeCycle();
|
||||||
lifeCycle.maintain(primaryStorage.getId());
|
DataStore store = dataStoreMgr.getDataStore(
|
||||||
|
primaryStorage.getId(), DataStoreRole.Primary);
|
||||||
|
lifeCycle.maintain(store);
|
||||||
|
|
||||||
return (PrimaryDataStoreInfo) dataStoreMgr.getDataStore(
|
return (PrimaryDataStoreInfo) dataStoreMgr.getDataStore(
|
||||||
primaryStorage.getId(), DataStoreRole.Primary);
|
primaryStorage.getId(), DataStoreRole.Primary);
|
||||||
}
|
}
|
||||||
|
|
||||||
private void setPoolStateToError(StoragePoolVO primaryStorage) {
|
|
||||||
primaryStorage.setStatus(StoragePoolStatus.ErrorInMaintenance);
|
|
||||||
_storagePoolDao.update(primaryStorage.getId(), primaryStorage);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
@DB
|
@DB
|
||||||
public PrimaryDataStoreInfo cancelPrimaryStorageForMaintenance(
|
public PrimaryDataStoreInfo cancelPrimaryStorageForMaintenance(
|
||||||
@ -1457,29 +1456,16 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C
|
|||||||
}
|
}
|
||||||
|
|
||||||
DataStoreProvider provider = dataStoreProviderMgr
|
DataStoreProvider provider = dataStoreProviderMgr
|
||||||
.getDataStoreProviderById(primaryStorage.getStorageProviderId());
|
.getDataStoreProvider(primaryStorage.getStorageProviderName());
|
||||||
DataStoreLifeCycle lifeCycle = provider.getLifeCycle();
|
DataStoreLifeCycle lifeCycle = provider.getDataStoreLifeCycle();
|
||||||
lifeCycle.cancelMaintain(primaryStorage.getId());
|
DataStore store = dataStoreMgr.getDataStore(
|
||||||
|
primaryStorage.getId(), DataStoreRole.Primary);
|
||||||
|
lifeCycle.cancelMaintain(store);
|
||||||
|
|
||||||
return (PrimaryDataStoreInfo) dataStoreMgr.getDataStore(
|
return (PrimaryDataStoreInfo) dataStoreMgr.getDataStore(
|
||||||
primaryStorage.getId(), DataStoreRole.Primary);
|
primaryStorage.getId(), DataStoreRole.Primary);
|
||||||
}
|
}
|
||||||
|
|
||||||
private boolean sendToVmResidesOn(StoragePoolVO PrimaryDataStoreVO,
|
|
||||||
Command cmd) {
|
|
||||||
ClusterVO cluster = _clusterDao.findById(PrimaryDataStoreVO
|
|
||||||
.getClusterId());
|
|
||||||
if ((cluster.getHypervisorType() == HypervisorType.KVM || cluster
|
|
||||||
.getHypervisorType() == HypervisorType.VMware)
|
|
||||||
&& ((cmd instanceof ManageSnapshotCommand) || (cmd instanceof BackupSnapshotCommand))) {
|
|
||||||
return true;
|
|
||||||
} else {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
protected class StorageGarbageCollector implements Runnable {
|
protected class StorageGarbageCollector implements Runnable {
|
||||||
|
|
||||||
public StorageGarbageCollector() {
|
public StorageGarbageCollector() {
|
||||||
@ -1845,9 +1831,9 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C
|
|||||||
|
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public synchronized boolean registerHostListener(String providerUuid,
|
public synchronized boolean registerHostListener(String providerName,
|
||||||
HypervisorHostListener listener) {
|
HypervisorHostListener listener) {
|
||||||
hostListeners.put(providerUuid, listener);
|
hostListeners.put(providerName, listener);
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
26
server/src/com/cloud/storage/StoragePoolAutomation.java
Normal file
26
server/src/com/cloud/storage/StoragePoolAutomation.java
Normal file
@ -0,0 +1,26 @@
|
|||||||
|
/*
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing,
|
||||||
|
* software distributed under the License is distributed on an
|
||||||
|
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||||
|
* KIND, either express or implied. See the License for the
|
||||||
|
* specific language governing permissions and limitations
|
||||||
|
* under the License.
|
||||||
|
*/
|
||||||
|
package com.cloud.storage;
|
||||||
|
|
||||||
|
import org.apache.cloudstack.engine.subsystem.api.storage.DataStore;
|
||||||
|
|
||||||
|
public interface StoragePoolAutomation {
|
||||||
|
public boolean maintain(DataStore store);
|
||||||
|
public boolean cancelMaintain(DataStore store);
|
||||||
|
}
|
||||||
456
server/src/com/cloud/storage/StoragePoolAutomationImpl.java
Normal file
456
server/src/com/cloud/storage/StoragePoolAutomationImpl.java
Normal file
@ -0,0 +1,456 @@
|
|||||||
|
/*
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing,
|
||||||
|
* software distributed under the License is distributed on an
|
||||||
|
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||||
|
* KIND, either express or implied. See the License for the
|
||||||
|
* specific language governing permissions and limitations
|
||||||
|
* under the License.
|
||||||
|
*/
|
||||||
|
package com.cloud.storage;
|
||||||
|
|
||||||
|
import java.util.List;
|
||||||
|
|
||||||
|
import javax.inject.Inject;
|
||||||
|
|
||||||
|
import org.apache.cloudstack.engine.subsystem.api.storage.DataStore;
|
||||||
|
import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreLifeCycle;
|
||||||
|
import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager;
|
||||||
|
import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreProvider;
|
||||||
|
import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreProviderManager;
|
||||||
|
import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreRole;
|
||||||
|
import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao;
|
||||||
|
import org.apache.cloudstack.storage.datastore.db.StoragePoolVO;
|
||||||
|
import org.apache.log4j.Logger;
|
||||||
|
import org.springframework.stereotype.Component;
|
||||||
|
|
||||||
|
import com.cloud.agent.AgentManager;
|
||||||
|
import com.cloud.agent.api.Answer;
|
||||||
|
import com.cloud.agent.api.ModifyStoragePoolCommand;
|
||||||
|
import com.cloud.alert.AlertManager;
|
||||||
|
import com.cloud.host.HostVO;
|
||||||
|
import com.cloud.host.Status;
|
||||||
|
import com.cloud.resource.ResourceManager;
|
||||||
|
import com.cloud.server.ManagementServer;
|
||||||
|
import com.cloud.storage.dao.StoragePoolHostDao;
|
||||||
|
import com.cloud.storage.dao.StoragePoolWorkDao;
|
||||||
|
import com.cloud.storage.dao.VolumeDao;
|
||||||
|
import com.cloud.user.Account;
|
||||||
|
import com.cloud.user.User;
|
||||||
|
import com.cloud.user.UserContext;
|
||||||
|
import com.cloud.user.dao.UserDao;
|
||||||
|
import com.cloud.utils.exception.CloudRuntimeException;
|
||||||
|
import com.cloud.utils.exception.ExecutionException;
|
||||||
|
import com.cloud.vm.ConsoleProxyVO;
|
||||||
|
import com.cloud.vm.DomainRouterVO;
|
||||||
|
import com.cloud.vm.SecondaryStorageVmVO;
|
||||||
|
import com.cloud.vm.UserVmVO;
|
||||||
|
import com.cloud.vm.VMInstanceVO;
|
||||||
|
import com.cloud.vm.VirtualMachine;
|
||||||
|
import com.cloud.vm.VirtualMachine.State;
|
||||||
|
import com.cloud.vm.VirtualMachineManager;
|
||||||
|
import com.cloud.vm.dao.ConsoleProxyDao;
|
||||||
|
import com.cloud.vm.dao.DomainRouterDao;
|
||||||
|
import com.cloud.vm.dao.SecondaryStorageVmDao;
|
||||||
|
import com.cloud.vm.dao.UserVmDao;
|
||||||
|
import com.cloud.vm.dao.VMInstanceDao;
|
||||||
|
|
||||||
|
@Component
|
||||||
|
public class StoragePoolAutomationImpl implements StoragePoolAutomation {
|
||||||
|
private static final Logger s_logger = Logger
|
||||||
|
.getLogger(StoragePoolAutomationImpl.class);
|
||||||
|
@Inject
|
||||||
|
protected VirtualMachineManager vmMgr;
|
||||||
|
@Inject
|
||||||
|
protected SecondaryStorageVmDao _secStrgDao;
|
||||||
|
@Inject
|
||||||
|
UserVmDao userVmDao;
|
||||||
|
@Inject
|
||||||
|
protected UserDao _userDao;
|
||||||
|
@Inject
|
||||||
|
protected DomainRouterDao _domrDao;
|
||||||
|
@Inject
|
||||||
|
protected StoragePoolHostDao _storagePoolHostDao;
|
||||||
|
@Inject
|
||||||
|
protected AlertManager _alertMgr;
|
||||||
|
@Inject
|
||||||
|
protected ConsoleProxyDao _consoleProxyDao;
|
||||||
|
|
||||||
|
@Inject
|
||||||
|
protected StoragePoolWorkDao _storagePoolWorkDao;
|
||||||
|
@Inject
|
||||||
|
PrimaryDataStoreDao primaryDataStoreDao;
|
||||||
|
@Inject
|
||||||
|
DataStoreManager dataStoreMgr;
|
||||||
|
@Inject
|
||||||
|
protected ResourceManager _resourceMgr;
|
||||||
|
@Inject
|
||||||
|
AgentManager agentMgr;
|
||||||
|
@Inject
|
||||||
|
VolumeDao volumeDao;
|
||||||
|
@Inject
|
||||||
|
VMInstanceDao vmDao;
|
||||||
|
@Inject
|
||||||
|
ManagementServer server;
|
||||||
|
@Inject DataStoreProviderManager providerMgr;
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean maintain(DataStore store) {
|
||||||
|
Long userId = UserContext.current().getCallerUserId();
|
||||||
|
User user = _userDao.findById(userId);
|
||||||
|
Account account = UserContext.current().getCaller();
|
||||||
|
StoragePoolVO pool = this.primaryDataStoreDao.findById(store.getId());
|
||||||
|
try {
|
||||||
|
StoragePool storagePool = (StoragePool) store;
|
||||||
|
List<HostVO> hosts = _resourceMgr.listHostsInClusterByStatus(
|
||||||
|
pool.getClusterId(), Status.Up);
|
||||||
|
if (hosts == null || hosts.size() == 0) {
|
||||||
|
pool.setStatus(StoragePoolStatus.Maintenance);
|
||||||
|
primaryDataStoreDao.update(pool.getId(), pool);
|
||||||
|
return true;
|
||||||
|
} else {
|
||||||
|
// set the pool state to prepare for maintenance
|
||||||
|
pool.setStatus(StoragePoolStatus.PrepareForMaintenance);
|
||||||
|
primaryDataStoreDao.update(pool.getId(), pool);
|
||||||
|
}
|
||||||
|
// remove heartbeat
|
||||||
|
for (HostVO host : hosts) {
|
||||||
|
ModifyStoragePoolCommand cmd = new ModifyStoragePoolCommand(
|
||||||
|
false, storagePool);
|
||||||
|
final Answer answer = agentMgr.easySend(host.getId(), cmd);
|
||||||
|
if (answer == null || !answer.getResult()) {
|
||||||
|
if (s_logger.isDebugEnabled()) {
|
||||||
|
s_logger.debug("ModifyStoragePool false failed due to "
|
||||||
|
+ ((answer == null) ? "answer null" : answer
|
||||||
|
.getDetails()));
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if (s_logger.isDebugEnabled()) {
|
||||||
|
s_logger.debug("ModifyStoragePool false secceeded");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// check to see if other ps exist
|
||||||
|
// if they do, then we can migrate over the system vms to them
|
||||||
|
// if they dont, then just stop all vms on this one
|
||||||
|
List<StoragePoolVO> upPools = primaryDataStoreDao
|
||||||
|
.listByStatusInZone(pool.getDataCenterId(),
|
||||||
|
StoragePoolStatus.Up);
|
||||||
|
boolean restart = true;
|
||||||
|
if (upPools == null || upPools.size() == 0) {
|
||||||
|
restart = false;
|
||||||
|
}
|
||||||
|
|
||||||
|
// 2. Get a list of all the ROOT volumes within this storage pool
|
||||||
|
List<VolumeVO> allVolumes = this.volumeDao.findByPoolId(pool
|
||||||
|
.getId());
|
||||||
|
|
||||||
|
// 3. Enqueue to the work queue
|
||||||
|
for (VolumeVO volume : allVolumes) {
|
||||||
|
VMInstanceVO vmInstance = vmDao
|
||||||
|
.findById(volume.getInstanceId());
|
||||||
|
|
||||||
|
if (vmInstance == null) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
// enqueue sp work
|
||||||
|
if (vmInstance.getState().equals(State.Running)
|
||||||
|
|| vmInstance.getState().equals(State.Starting)
|
||||||
|
|| vmInstance.getState().equals(State.Stopping)) {
|
||||||
|
|
||||||
|
try {
|
||||||
|
StoragePoolWorkVO work = new StoragePoolWorkVO(
|
||||||
|
vmInstance.getId(), pool.getId(), false, false,
|
||||||
|
server.getId());
|
||||||
|
_storagePoolWorkDao.persist(work);
|
||||||
|
} catch (Exception e) {
|
||||||
|
if (s_logger.isDebugEnabled()) {
|
||||||
|
s_logger.debug("Work record already exists, re-using by re-setting values");
|
||||||
|
}
|
||||||
|
StoragePoolWorkVO work = _storagePoolWorkDao
|
||||||
|
.findByPoolIdAndVmId(pool.getId(),
|
||||||
|
vmInstance.getId());
|
||||||
|
work.setStartedAfterMaintenance(false);
|
||||||
|
work.setStoppedForMaintenance(false);
|
||||||
|
work.setManagementServerId(server.getId());
|
||||||
|
_storagePoolWorkDao.update(work.getId(), work);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// 4. Process the queue
|
||||||
|
List<StoragePoolWorkVO> pendingWork = _storagePoolWorkDao
|
||||||
|
.listPendingWorkForPrepareForMaintenanceByPoolId(pool
|
||||||
|
.getId());
|
||||||
|
|
||||||
|
for (StoragePoolWorkVO work : pendingWork) {
|
||||||
|
// shut down the running vms
|
||||||
|
VMInstanceVO vmInstance = vmDao.findById(work.getVmId());
|
||||||
|
|
||||||
|
if (vmInstance == null) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
// if the instance is of type consoleproxy, call the console
|
||||||
|
// proxy
|
||||||
|
if (vmInstance.getType().equals(
|
||||||
|
VirtualMachine.Type.ConsoleProxy)) {
|
||||||
|
// call the consoleproxymanager
|
||||||
|
ConsoleProxyVO consoleProxy = _consoleProxyDao
|
||||||
|
.findById(vmInstance.getId());
|
||||||
|
if (!vmMgr.advanceStop(consoleProxy, true, user, account)) {
|
||||||
|
String errorMsg = "There was an error stopping the console proxy id: "
|
||||||
|
+ vmInstance.getId()
|
||||||
|
+ " ,cannot enable storage maintenance";
|
||||||
|
s_logger.warn(errorMsg);
|
||||||
|
throw new CloudRuntimeException(errorMsg);
|
||||||
|
} else {
|
||||||
|
// update work status
|
||||||
|
work.setStoppedForMaintenance(true);
|
||||||
|
_storagePoolWorkDao.update(work.getId(), work);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (restart) {
|
||||||
|
|
||||||
|
if (this.vmMgr.advanceStart(consoleProxy, null, user,
|
||||||
|
account) == null) {
|
||||||
|
String errorMsg = "There was an error starting the console proxy id: "
|
||||||
|
+ vmInstance.getId()
|
||||||
|
+ " on another storage pool, cannot enable primary storage maintenance";
|
||||||
|
s_logger.warn(errorMsg);
|
||||||
|
} else {
|
||||||
|
// update work status
|
||||||
|
work.setStartedAfterMaintenance(true);
|
||||||
|
_storagePoolWorkDao.update(work.getId(), work);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// if the instance is of type uservm, call the user vm manager
|
||||||
|
if (vmInstance.getType().equals(VirtualMachine.Type.User)) {
|
||||||
|
UserVmVO userVm = userVmDao.findById(vmInstance.getId());
|
||||||
|
if (!vmMgr.advanceStop(userVm, true, user, account)) {
|
||||||
|
String errorMsg = "There was an error stopping the user vm id: "
|
||||||
|
+ vmInstance.getId()
|
||||||
|
+ " ,cannot enable storage maintenance";
|
||||||
|
s_logger.warn(errorMsg);
|
||||||
|
throw new CloudRuntimeException(errorMsg);
|
||||||
|
} else {
|
||||||
|
// update work status
|
||||||
|
work.setStoppedForMaintenance(true);
|
||||||
|
_storagePoolWorkDao.update(work.getId(), work);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// if the instance is of type secondary storage vm, call the
|
||||||
|
// secondary storage vm manager
|
||||||
|
if (vmInstance.getType().equals(
|
||||||
|
VirtualMachine.Type.SecondaryStorageVm)) {
|
||||||
|
SecondaryStorageVmVO secStrgVm = _secStrgDao
|
||||||
|
.findById(vmInstance.getId());
|
||||||
|
if (!vmMgr.advanceStop(secStrgVm, true, user, account)) {
|
||||||
|
String errorMsg = "There was an error stopping the ssvm id: "
|
||||||
|
+ vmInstance.getId()
|
||||||
|
+ " ,cannot enable storage maintenance";
|
||||||
|
s_logger.warn(errorMsg);
|
||||||
|
throw new CloudRuntimeException(errorMsg);
|
||||||
|
} else {
|
||||||
|
// update work status
|
||||||
|
work.setStoppedForMaintenance(true);
|
||||||
|
_storagePoolWorkDao.update(work.getId(), work);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (restart) {
|
||||||
|
if (vmMgr.advanceStart(secStrgVm, null, user, account) == null) {
|
||||||
|
String errorMsg = "There was an error starting the ssvm id: "
|
||||||
|
+ vmInstance.getId()
|
||||||
|
+ " on another storage pool, cannot enable primary storage maintenance";
|
||||||
|
s_logger.warn(errorMsg);
|
||||||
|
} else {
|
||||||
|
// update work status
|
||||||
|
work.setStartedAfterMaintenance(true);
|
||||||
|
_storagePoolWorkDao.update(work.getId(), work);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// if the instance is of type domain router vm, call the network
|
||||||
|
// manager
|
||||||
|
if (vmInstance.getType().equals(
|
||||||
|
VirtualMachine.Type.DomainRouter)) {
|
||||||
|
DomainRouterVO domR = _domrDao.findById(vmInstance.getId());
|
||||||
|
if (!vmMgr.advanceStop(domR, true, user, account)) {
|
||||||
|
String errorMsg = "There was an error stopping the domain router id: "
|
||||||
|
+ vmInstance.getId()
|
||||||
|
+ " ,cannot enable primary storage maintenance";
|
||||||
|
s_logger.warn(errorMsg);
|
||||||
|
throw new CloudRuntimeException(errorMsg);
|
||||||
|
} else {
|
||||||
|
// update work status
|
||||||
|
work.setStoppedForMaintenance(true);
|
||||||
|
_storagePoolWorkDao.update(work.getId(), work);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (restart) {
|
||||||
|
if (vmMgr.advanceStart(domR, null, user, account) == null) {
|
||||||
|
String errorMsg = "There was an error starting the domain router id: "
|
||||||
|
+ vmInstance.getId()
|
||||||
|
+ " on another storage pool, cannot enable primary storage maintenance";
|
||||||
|
s_logger.warn(errorMsg);
|
||||||
|
} else {
|
||||||
|
// update work status
|
||||||
|
work.setStartedAfterMaintenance(true);
|
||||||
|
_storagePoolWorkDao.update(work.getId(), work);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
} catch(Exception e) {
|
||||||
|
s_logger.error(
|
||||||
|
"Exception in enabling primary storage maintenance:", e);
|
||||||
|
pool.setStatus(StoragePoolStatus.ErrorInMaintenance);
|
||||||
|
this.primaryDataStoreDao.update(pool.getId(), pool);
|
||||||
|
throw new CloudRuntimeException(e.getMessage());
|
||||||
|
}
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean cancelMaintain(DataStore store) {
|
||||||
|
// Change the storage state back to up
|
||||||
|
Long userId = UserContext.current().getCallerUserId();
|
||||||
|
User user = _userDao.findById(userId);
|
||||||
|
Account account = UserContext.current().getCaller();
|
||||||
|
StoragePoolVO poolVO = this.primaryDataStoreDao
|
||||||
|
.findById(store.getId());
|
||||||
|
StoragePool pool = (StoragePool)store;
|
||||||
|
|
||||||
|
List<HostVO> hosts = _resourceMgr.listHostsInClusterByStatus(
|
||||||
|
pool.getClusterId(), Status.Up);
|
||||||
|
if (hosts == null || hosts.size() == 0) {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
// add heartbeat
|
||||||
|
for (HostVO host : hosts) {
|
||||||
|
ModifyStoragePoolCommand msPoolCmd = new ModifyStoragePoolCommand(
|
||||||
|
true, pool);
|
||||||
|
final Answer answer = agentMgr.easySend(host.getId(), msPoolCmd);
|
||||||
|
if (answer == null || !answer.getResult()) {
|
||||||
|
if (s_logger.isDebugEnabled()) {
|
||||||
|
s_logger.debug("ModifyStoragePool add failed due to "
|
||||||
|
+ ((answer == null) ? "answer null" : answer
|
||||||
|
.getDetails()));
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if (s_logger.isDebugEnabled()) {
|
||||||
|
s_logger.debug("ModifyStoragePool add secceeded");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// 2. Get a list of pending work for this queue
|
||||||
|
List<StoragePoolWorkVO> pendingWork = _storagePoolWorkDao
|
||||||
|
.listPendingWorkForCancelMaintenanceByPoolId(poolVO.getId());
|
||||||
|
|
||||||
|
// 3. work through the queue
|
||||||
|
for (StoragePoolWorkVO work : pendingWork) {
|
||||||
|
try {
|
||||||
|
VMInstanceVO vmInstance = vmDao.findById(work.getVmId());
|
||||||
|
|
||||||
|
if (vmInstance == null) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
// if the instance is of type consoleproxy, call the console
|
||||||
|
// proxy
|
||||||
|
if (vmInstance.getType().equals(
|
||||||
|
VirtualMachine.Type.ConsoleProxy)) {
|
||||||
|
|
||||||
|
ConsoleProxyVO consoleProxy = _consoleProxyDao
|
||||||
|
.findById(vmInstance.getId());
|
||||||
|
if (vmMgr.advanceStart(consoleProxy, null, user, account) == null) {
|
||||||
|
String msg = "There was an error starting the console proxy id: "
|
||||||
|
+ vmInstance.getId()
|
||||||
|
+ " on storage pool, cannot complete primary storage maintenance";
|
||||||
|
s_logger.warn(msg);
|
||||||
|
throw new ExecutionException(msg);
|
||||||
|
} else {
|
||||||
|
// update work queue
|
||||||
|
work.setStartedAfterMaintenance(true);
|
||||||
|
_storagePoolWorkDao.update(work.getId(), work);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// if the instance is of type ssvm, call the ssvm manager
|
||||||
|
if (vmInstance.getType().equals(
|
||||||
|
VirtualMachine.Type.SecondaryStorageVm)) {
|
||||||
|
SecondaryStorageVmVO ssVm = _secStrgDao.findById(vmInstance
|
||||||
|
.getId());
|
||||||
|
if (vmMgr.advanceStart(ssVm, null, user, account) == null) {
|
||||||
|
String msg = "There was an error starting the ssvm id: "
|
||||||
|
+ vmInstance.getId()
|
||||||
|
+ " on storage pool, cannot complete primary storage maintenance";
|
||||||
|
s_logger.warn(msg);
|
||||||
|
throw new ExecutionException(msg);
|
||||||
|
} else {
|
||||||
|
// update work queue
|
||||||
|
work.setStartedAfterMaintenance(true);
|
||||||
|
_storagePoolWorkDao.update(work.getId(), work);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// if the instance is of type ssvm, call the ssvm manager
|
||||||
|
if (vmInstance.getType().equals(
|
||||||
|
VirtualMachine.Type.DomainRouter)) {
|
||||||
|
DomainRouterVO domR = _domrDao.findById(vmInstance.getId());
|
||||||
|
if (vmMgr.advanceStart(domR, null, user, account) == null) {
|
||||||
|
String msg = "There was an error starting the domR id: "
|
||||||
|
+ vmInstance.getId()
|
||||||
|
+ " on storage pool, cannot complete primary storage maintenance";
|
||||||
|
s_logger.warn(msg);
|
||||||
|
throw new ExecutionException(msg);
|
||||||
|
} else {
|
||||||
|
// update work queue
|
||||||
|
work.setStartedAfterMaintenance(true);
|
||||||
|
_storagePoolWorkDao.update(work.getId(), work);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// if the instance is of type user vm, call the user vm manager
|
||||||
|
if (vmInstance.getType().equals(VirtualMachine.Type.User)) {
|
||||||
|
UserVmVO userVm = userVmDao.findById(vmInstance.getId());
|
||||||
|
|
||||||
|
if (vmMgr.advanceStart(userVm, null, user, account) == null) {
|
||||||
|
|
||||||
|
String msg = "There was an error starting the user vm id: "
|
||||||
|
+ vmInstance.getId()
|
||||||
|
+ " on storage pool, cannot complete primary storage maintenance";
|
||||||
|
s_logger.warn(msg);
|
||||||
|
throw new ExecutionException(msg);
|
||||||
|
} else {
|
||||||
|
// update work queue
|
||||||
|
work.setStartedAfterMaintenance(true);
|
||||||
|
_storagePoolWorkDao.update(work.getId(), work);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true;
|
||||||
|
} catch (Exception e) {
|
||||||
|
s_logger.debug("Failed start vm", e);
|
||||||
|
throw new CloudRuntimeException(e.toString());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
@ -29,6 +29,7 @@ DELETE FROM `cloud`.`configuration` where name='vmware.percluster.host.max';
|
|||||||
INSERT IGNORE INTO `cloud`.`configuration` VALUES ('Advanced', 'DEFAULT', 'AgentManager', 'xen.nics.max', '7', 'Maximum allowed nics for Vms created on Xen');
|
INSERT IGNORE INTO `cloud`.`configuration` VALUES ('Advanced', 'DEFAULT', 'AgentManager', 'xen.nics.max', '7', 'Maximum allowed nics for Vms created on Xen');
|
||||||
ALTER TABLE `cloud`.`load_balancer_vm_map` ADD state VARCHAR(40) NULL COMMENT 'service status updated by LB healthcheck manager';
|
ALTER TABLE `cloud`.`load_balancer_vm_map` ADD state VARCHAR(40) NULL COMMENT 'service status updated by LB healthcheck manager';
|
||||||
|
|
||||||
|
alter table storage_pool change storage_provider_id storage_provider_name varchar(255);
|
||||||
alter table template_host_ref add state varchar(255);
|
alter table template_host_ref add state varchar(255);
|
||||||
alter table template_host_ref add update_count bigint unsigned;
|
alter table template_host_ref add update_count bigint unsigned;
|
||||||
alter table template_host_ref add updated datetime;
|
alter table template_host_ref add updated datetime;
|
||||||
@ -70,13 +71,12 @@ CREATE TABLE `cloud`.`data_store_provider` (
|
|||||||
CREATE TABLE `cloud`.`image_data_store` (
|
CREATE TABLE `cloud`.`image_data_store` (
|
||||||
`id` bigint unsigned NOT NULL AUTO_INCREMENT COMMENT 'id',
|
`id` bigint unsigned NOT NULL AUTO_INCREMENT COMMENT 'id',
|
||||||
`name` varchar(255) NOT NULL COMMENT 'name of data store',
|
`name` varchar(255) NOT NULL COMMENT 'name of data store',
|
||||||
`image_provider_id` bigint unsigned NOT NULL COMMENT 'id of image_data_store_provider',
|
`image_provider_name` varchar(255) NOT NULL COMMENT 'id of image_data_store_provider',
|
||||||
`protocol` varchar(255) NOT NULL COMMENT 'protocol of data store',
|
`protocol` varchar(255) NOT NULL COMMENT 'protocol of data store',
|
||||||
`data_center_id` bigint unsigned COMMENT 'datacenter id of data store',
|
`data_center_id` bigint unsigned COMMENT 'datacenter id of data store',
|
||||||
`scope` varchar(255) COMMENT 'scope of data store',
|
`scope` varchar(255) COMMENT 'scope of data store',
|
||||||
`uuid` varchar(255) COMMENT 'uuid of data store',
|
`uuid` varchar(255) COMMENT 'uuid of data store',
|
||||||
PRIMARY KEY(`id`),
|
PRIMARY KEY(`id`)
|
||||||
CONSTRAINT `fk_tags__image_data_store_provider_id` FOREIGN KEY(`image_provider_id`) REFERENCES `data_store_provider`(`id`)
|
|
||||||
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
|
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
|
||||||
|
|
||||||
ALTER TABLE `cloud`.`vm_template` ADD COLUMN `image_data_store_id` bigint unsigned;
|
ALTER TABLE `cloud`.`vm_template` ADD COLUMN `image_data_store_id` bigint unsigned;
|
||||||
|
|||||||
@ -95,6 +95,7 @@ known_categories = {
|
|||||||
'InstanceGroup': 'VM Group',
|
'InstanceGroup': 'VM Group',
|
||||||
'StorageMaintenance': 'Storage Pool',
|
'StorageMaintenance': 'Storage Pool',
|
||||||
'StoragePool': 'Storage Pool',
|
'StoragePool': 'Storage Pool',
|
||||||
|
'StorageProvider': 'Storage Pool',
|
||||||
'SecurityGroup': 'Security Group',
|
'SecurityGroup': 'Security Group',
|
||||||
'SSH': 'SSH',
|
'SSH': 'SSH',
|
||||||
'register': 'Registration',
|
'register': 'Registration',
|
||||||
|
|||||||
Loading…
x
Reference in New Issue
Block a user