Merge branch 'master' into ui-multiple-nics

This commit is contained in:
Brian Federle 2013-03-08 11:37:33 -08:00
commit a9387dfc58
126 changed files with 2662 additions and 1986 deletions

View File

@ -30,4 +30,5 @@ public interface Alert extends Identity, InternalIdentity {
Date getCreatedDate();
Date getLastSent();
Date getResolved();
boolean getArchived();
}

View File

@ -40,4 +40,5 @@ public interface Event extends ControlledEntity, Identity, InternalIdentity {
String getLevel();
long getStartId();
String getParameters();
boolean getArchived();
}

View File

@ -138,7 +138,6 @@ public interface Network extends ControlledEntity, StateObject<Network.State>, I
// NiciraNvp is not an "External" provider, otherwise we get in trouble with NetworkServiceImpl.providersConfiguredForExternalNetworking
public static final Provider NiciraNvp = new Provider("NiciraNvp", false);
public static final Provider MidokuraMidonet = new Provider("MidokuraMidonet", true);
public static final Provider VPCNetscaler = new Provider("VPCNetscaler", true);
private String name;
private boolean isExternal;

View File

@ -29,6 +29,8 @@ import org.apache.cloudstack.api.command.admin.domain.UpdateDomainCmd;
import org.apache.cloudstack.api.command.admin.host.ListHostsCmd;
import org.apache.cloudstack.api.command.admin.host.UpdateHostPasswordCmd;
import org.apache.cloudstack.api.command.admin.pod.ListPodsByCmd;
import org.apache.cloudstack.api.command.admin.resource.ArchiveAlertsCmd;
import org.apache.cloudstack.api.command.admin.resource.DeleteAlertsCmd;
import org.apache.cloudstack.api.command.admin.resource.ListAlertsCmd;
import org.apache.cloudstack.api.command.admin.resource.ListCapacityCmd;
import org.apache.cloudstack.api.command.admin.resource.UploadCustomCertificateCmd;
@ -40,12 +42,12 @@ import org.apache.cloudstack.api.command.admin.systemvm.UpgradeSystemVMCmd;
import org.apache.cloudstack.api.command.admin.vlan.ListVlanIpRangesCmd;
import org.apache.cloudstack.api.command.user.address.ListPublicIpAddressesCmd;
import org.apache.cloudstack.api.command.user.config.ListCapabilitiesCmd;
import org.apache.cloudstack.api.command.user.event.ArchiveEventsCmd;
import org.apache.cloudstack.api.command.user.event.DeleteEventsCmd;
import org.apache.cloudstack.api.command.user.guest.ListGuestOsCategoriesCmd;
import org.apache.cloudstack.api.command.user.guest.ListGuestOsCmd;
import org.apache.cloudstack.api.command.user.iso.ListIsosCmd;
import org.apache.cloudstack.api.command.user.iso.UpdateIsoCmd;
import org.apache.cloudstack.api.command.user.offering.ListDiskOfferingsCmd;
import org.apache.cloudstack.api.command.user.offering.ListServiceOfferingsCmd;
import org.apache.cloudstack.api.command.user.ssh.CreateSSHKeyPairCmd;
import org.apache.cloudstack.api.command.user.ssh.DeleteSSHKeyPairCmd;
import org.apache.cloudstack.api.command.user.ssh.ListSSHKeyPairsCmd;
@ -55,12 +57,10 @@ import org.apache.cloudstack.api.command.user.template.UpdateTemplateCmd;
import org.apache.cloudstack.api.command.user.vm.GetVMPasswordCmd;
import org.apache.cloudstack.api.command.user.vmgroup.UpdateVMGroupCmd;
import org.apache.cloudstack.api.command.user.volume.ExtractVolumeCmd;
import org.apache.cloudstack.api.command.user.zone.ListZonesByCmd;
import com.cloud.alert.Alert;
import com.cloud.capacity.Capacity;
import com.cloud.configuration.Configuration;
import com.cloud.dc.DataCenter;
import com.cloud.dc.Pod;
import com.cloud.dc.Vlan;
import com.cloud.domain.Domain;
@ -72,8 +72,6 @@ import com.cloud.host.Host;
import com.cloud.hypervisor.Hypervisor.HypervisorType;
import com.cloud.hypervisor.HypervisorCapabilities;
import com.cloud.network.IpAddress;
import com.cloud.offering.DiskOffering;
import com.cloud.offering.ServiceOffering;
import com.cloud.org.Cluster;
import com.cloud.storage.GuestOS;
import com.cloud.storage.GuestOsCategory;
@ -194,6 +192,34 @@ public interface ManagementService {
*/
Pair<List<? extends Alert>, Integer> searchForAlerts(ListAlertsCmd cmd);
/**
* Archive alerts
* @param cmd
* @return True on success. False otherwise.
*/
boolean archiveAlerts(ArchiveAlertsCmd cmd);
/**
* Delete alerts
* @param cmd
* @return True on success. False otherwise.
*/
boolean deleteAlerts(DeleteAlertsCmd cmd);
/**
* Archive events
* @param cmd
* @return True on success. False otherwise.
*/
boolean archiveEvents(ArchiveEventsCmd cmd);
/**
* Delete events
* @param cmd
* @return True on success. False otherwise.
*/
boolean deleteEvents(DeleteEventsCmd cmd);
/**
* list all the capacity rows in capacity operations table
*

View File

@ -459,6 +459,7 @@ public class ApiConstants {
public static final String UCS_BLADE_DN = "bladedn";
public static final String UCS_BLADE_ID = "bladeid";
public static final String VM_GUEST_IP = "vmguestip";
public static final String OLDER_THAN = "olderthan";
public enum HostDetails {
all, capacity, events, stats, min;

View File

@ -78,6 +78,7 @@ public class CreateAccountCmd extends BaseCmd {
@Parameter(name = ApiConstants.ACCOUNT_DETAILS, type = CommandType.MAP, description = "details for account used to store specific parameters")
private Map<String, String> details;
/////////////////////////////////////////////////////
/////////////////// Accessors ///////////////////////
/////////////////////////////////////////////////////
@ -149,7 +150,8 @@ public class CreateAccountCmd extends BaseCmd {
@Override
public void execute(){
UserContext.current().setEventDetails("Account Name: "+getAccountName()+", Domain Id:"+getDomainId());
UserAccount userAccount = _accountService.createUserAccount(getUsername(), getPassword(), getFirstName(), getLastName(), getEmail(), getTimeZone(), getAccountName(), getAccountType(), getDomainId(), getNetworkDomain(), getDetails());
UserAccount userAccount = _accountService.createUserAccount(getUsername(), getPassword(), getFirstName(), getLastName(), getEmail(), getTimeZone(), getAccountName(), getAccountType(),
getDomainId(), getNetworkDomain(), getDetails());
if (userAccount != null) {
AccountResponse response = _responseGenerator.createUserAccountResponse(userAccount);
response.setResponseName(getCommandName());

View File

@ -49,7 +49,7 @@ public class DeleteAccountCmd extends BaseAsyncCmd {
private Long id;
@Inject RegionService _regionService;
/////////////////////////////////////////////////////
/////////////////// Accessors ///////////////////////
/////////////////////////////////////////////////////
@ -96,7 +96,7 @@ public class DeleteAccountCmd extends BaseAsyncCmd {
@Override
public void execute(){
UserContext.current().setEventDetails("Account Id: "+getId());
boolean result = _regionService.deleteUserAccount(this);
if (result) {
SuccessResponse response = new SuccessResponse(getCommandName());

View File

@ -59,7 +59,7 @@ public class DisableAccountCmd extends BaseAsyncCmd {
private Boolean lockRequested;
@Inject RegionService _regionService;
/////////////////////////////////////////////////////
/////////////////// Accessors ///////////////////////
/////////////////////////////////////////////////////

View File

@ -51,7 +51,7 @@ public class EnableAccountCmd extends BaseCmd {
private Long domainId;
@Inject RegionService _regionService;
/////////////////////////////////////////////////////
/////////////////// Accessors ///////////////////////
/////////////////////////////////////////////////////
@ -68,7 +68,6 @@ public class EnableAccountCmd extends BaseCmd {
return domainId;
}
/////////////////////////////////////////////////////
/////////////// API Implementation///////////////////
/////////////////////////////////////////////////////

View File

@ -64,7 +64,7 @@ public class UpdateAccountCmd extends BaseCmd{
private Map details;
@Inject RegionService _regionService;
/////////////////////////////////////////////////////
/////////////////// Accessors ///////////////////////
/////////////////////////////////////////////////////
@ -99,7 +99,6 @@ public class UpdateAccountCmd extends BaseCmd{
return params;
}
/////////////////////////////////////////////////////
/////////////// API Implementation///////////////////
/////////////////////////////////////////////////////

View File

@ -51,7 +51,7 @@ public class DeleteDomainCmd extends BaseAsyncCmd {
private Boolean cleanup;
@Inject RegionService _regionService;
/////////////////////////////////////////////////////
/////////////////// Accessors ///////////////////////
/////////////////////////////////////////////////////

View File

@ -53,7 +53,7 @@ public class UpdateDomainCmd extends BaseCmd {
private String networkDomain;
@Inject RegionService _regionService;
/////////////////////////////////////////////////////
/////////////////// Accessors ///////////////////////
/////////////////////////////////////////////////////
@ -88,7 +88,7 @@ public class UpdateDomainCmd extends BaseCmd {
public void execute(){
UserContext.current().setEventDetails("Domain Id: "+getId());
Domain domain = _regionService.updateDomain(this);
if (domain != null) {
DomainResponse response = _responseGenerator.createDomainResponse(domain);
response.setResponseName(getCommandName());

View File

@ -46,7 +46,7 @@ public class DeleteUserCmd extends BaseCmd {
private Long id;
@Inject RegionService _regionService;
/////////////////////////////////////////////////////
/////////////////// Accessors ///////////////////////
/////////////////////////////////////////////////////

View File

@ -50,7 +50,7 @@ public class DisableUserCmd extends BaseAsyncCmd {
private Long id;
@Inject RegionService _regionService;
/////////////////////////////////////////////////////
/////////////////// Accessors ///////////////////////
/////////////////////////////////////////////////////
@ -93,7 +93,7 @@ public class DisableUserCmd extends BaseAsyncCmd {
public void execute(){
UserContext.current().setEventDetails("UserId: "+getId());
UserAccount user = _regionService.disableUser(this);
if (user != null){
UserResponse response = _responseGenerator.createUserResponse(user);
response.setResponseName(getCommandName());

View File

@ -47,7 +47,7 @@ public class EnableUserCmd extends BaseCmd {
private Long id;
@Inject RegionService _regionService;
/////////////////////////////////////////////////////
/////////////////// Accessors ///////////////////////
/////////////////////////////////////////////////////
@ -79,7 +79,7 @@ public class EnableUserCmd extends BaseCmd {
public void execute(){
UserContext.current().setEventDetails("UserId: "+getId());
UserAccount user = _regionService.enableUser(this);
if (user != null){
UserResponse response = _responseGenerator.createUserResponse(user);
response.setResponseName(getCommandName());

View File

@ -72,7 +72,7 @@ public class UpdateUserCmd extends BaseCmd {
private String username;
@Inject RegionService _regionService;
/////////////////////////////////////////////////////
/////////////////// Accessors ///////////////////////
/////////////////////////////////////////////////////
@ -136,7 +136,7 @@ public class UpdateUserCmd extends BaseCmd {
public void execute(){
UserContext.current().setEventDetails("UserId: "+getId());
UserAccount user = _regionService.updateUser(this);
if (user != null){
UserResponse response = _responseGenerator.createUserResponse(user);
response.setResponseName(getCommandName());

View File

@ -38,41 +38,41 @@ import com.cloud.user.UserAccount;
public interface RegionService {
/**
* Adds a Region to the local Region
* @param id
* @param name
* @param endPoint
* @param apiKey
* @param secretKey
* @return Return added Region object
*/
public Region addRegion(int id, String name, String endPoint, String apiKey, String secretKey);
/**
* Update details of the Region with specified Id
* @param id
* @param name
* @param endPoint
* @param apiKey
* @param secretKey
* @return Return updated Region object
*/
public Region updateRegion(int id, String name, String endPoint, String apiKey, String secretKey);
/**
* @param id
* @return True if region is successfully removed
*/
public boolean removeRegion(int id);
/** List all Regions or by Id/Name
* @param id
* @param name
* @return List of Regions
*/
public List<? extends Region> listRegions(ListRegionsCmd cmd);
/**
* Adds a Region to the local Region
* @param id
* @param name
* @param endPoint
* @param apiKey
* @param secretKey
* @return Return added Region object
*/
public Region addRegion(int id, String name, String endPoint, String apiKey, String secretKey);
/**
* Update details of the Region with specified Id
* @param id
* @param name
* @param endPoint
* @param apiKey
* @param secretKey
* @return Return updated Region object
*/
public Region updateRegion(int id, String name, String endPoint, String apiKey, String secretKey);
/**
* @param id
* @return True if region is successfully removed
*/
public boolean removeRegion(int id);
/** List all Regions or by Id/Name
* @param id
* @param name
* @return List of Regions
*/
public List<? extends Region> listRegions(ListRegionsCmd cmd);
/**
* Deletes a user by userId
* isPopagate flag is set to true if sent from peer Region
@ -80,8 +80,8 @@ public interface RegionService {
*
* @return true if delete was successful, false otherwise
*/
boolean deleteUserAccount(DeleteAccountCmd cmd);
boolean deleteUserAccount(DeleteAccountCmd cmd);
/**
* Updates an account
* isPopagate falg is set to true if sent from peer Region
@ -91,22 +91,22 @@ public interface RegionService {
* @return updated account object
*/
Account updateAccount(UpdateAccountCmd cmd);
/**
* Disables an account by accountName and domainId or accountId
* @param cmd
* @return
* @throws ResourceUnavailableException
* @throws ConcurrentOperationException
*/
Account disableAccount(DisableAccountCmd cmd) throws ConcurrentOperationException, ResourceUnavailableException;
/**
* Enables an account by accountId
* @param cmd
* @return
*/
Account enableAccount(EnableAccountCmd cmd);
/**
* Disables an account by accountName and domainId or accountId
* @param cmd
* @return
* @throws ResourceUnavailableException
* @throws ConcurrentOperationException
*/
Account disableAccount(DisableAccountCmd cmd) throws ConcurrentOperationException, ResourceUnavailableException;
/**
* Enables an account by accountId
* @param cmd
* @return
*/
Account enableAccount(EnableAccountCmd cmd);
/**
* Deletes user by Id
@ -114,7 +114,7 @@ public interface RegionService {
* @return true if delete was successful, false otherwise
*/
boolean deleteUser(DeleteUserCmd deleteUserCmd);
/**
* update an existing domain
*
@ -122,36 +122,36 @@ public interface RegionService {
* - the command containing domainId and new domainName
* @return Domain object if the command succeeded
*/
public Domain updateDomain(UpdateDomainCmd updateDomainCmd);
/**
* Deletes domain
* @param cmd
* @return true if delete was successful, false otherwise
*/
public boolean deleteDomain(DeleteDomainCmd cmd);
public Domain updateDomain(UpdateDomainCmd updateDomainCmd);
/**
* Deletes domain
* @param cmd
* @return true if delete was successful, false otherwise
*/
public boolean deleteDomain(DeleteDomainCmd cmd);
/**
* Update a user by userId
*
* @param userId
* @return UserAccount object
*/
public UserAccount updateUser(UpdateUserCmd updateUserCmd);
public UserAccount updateUser(UpdateUserCmd updateUserCmd);
/**
* Disables a user by userId
*
* @param cmd
* @return UserAccount object
*/
public UserAccount disableUser(DisableUserCmd cmd);
public UserAccount disableUser(DisableUserCmd cmd);
/**
* Enables a user
*
* @param cmd
* @return UserAccount object
*/
public UserAccount enableUser(EnableUserCmd cmd);
public UserAccount enableUser(EnableUserCmd cmd);
}

View File

@ -218,9 +218,13 @@ listZones=15
#### events commands
listEvents=15
listEventTypes=15
archiveEvents=15
deleteEvents=15
#### alerts commands
listAlerts=3
archiveAlerts=1
deleteAlerts=1
#### system capacity commands
listCapacity=3

View File

@ -859,35 +859,29 @@ public class VirtualRoutingResource implements Manager {
}
public void assignVpcIpToRouter(final String routerIP, final boolean add, final String pubIP,
final String nicname, final String gateway, final String netmask, final String subnet) throws Exception {
try {
String args = "";
final String nicname, final String gateway, final String netmask, final String subnet) throws InternalErrorException {
String args = "";
if (add) {
args += " -A ";
} else {
args += " -D ";
}
if (add) {
args += " -A ";
} else {
args += " -D ";
}
args += " -l ";
args += pubIP;
args += " -c ";
args += nicname;
args += " -g ";
args += gateway;
args += " -m ";
args += netmask;
args += " -n ";
args += subnet;
args += " -l ";
args += pubIP;
args += " -c ";
args += nicname;
args += " -g ";
args += gateway;
args += " -m ";
args += netmask;
args += " -n ";
args += subnet;
String result = routerProxy("vpc_ipassoc.sh", routerIP, args);
if (result != null) {
throw new InternalErrorException("KVM plugin \"vpc_ipassoc\" failed:"+result);
}
} catch (Exception e) {
String msg = "Unable to assign public IP address due to " + e.toString();
s_logger.warn(msg, e);
throw new Exception(msg);
String result = routerProxy("vpc_ipassoc.sh", routerIP, args);
if (result != null) {
throw new InternalErrorException("KVM plugin \"vpc_ipassoc\" failed:"+result);
}
}

View File

@ -28,9 +28,7 @@ import javax.persistence.Table;
import javax.persistence.Temporal;
import javax.persistence.TemporalType;
import org.apache.cloudstack.api.Identity;
import com.cloud.utils.db.GenericDao;
import org.apache.cloudstack.api.InternalIdentity;
@Entity
@Table(name="alert")
@ -68,16 +66,19 @@ public class AlertVO implements Alert {
@Temporal(TemporalType.TIMESTAMP)
@Column(name="resolved", updatable=true, nullable=true)
private Date resolved;
@Column(name="uuid")
private String uuid;
@Column(name="archived")
private boolean archived;
public AlertVO() {
this.uuid = UUID.randomUUID().toString();
this.uuid = UUID.randomUUID().toString();
}
public AlertVO(Long id) {
this.id = id;
this.uuid = UUID.randomUUID().toString();
this.uuid = UUID.randomUUID().toString();
}
@Override
@ -103,12 +104,12 @@ public class AlertVO implements Alert {
}
public Long getClusterId() {
return clusterId;
}
public void setClusterId(Long clusterId) {
this.clusterId = clusterId;
}
@Override
return clusterId;
}
public void setClusterId(Long clusterId) {
this.clusterId = clusterId;
}
@Override
public Long getPodId() {
return podId;
}
@ -164,10 +165,19 @@ public class AlertVO implements Alert {
@Override
public String getUuid() {
return this.uuid;
return this.uuid;
}
public void setUuid(String uuid) {
this.uuid = uuid;
this.uuid = uuid;
}
@Override
public boolean getArchived() {
return archived;
}
public void setArchived(Boolean archived) {
this.archived = archived;
}
}

View File

@ -29,74 +29,75 @@ import javax.persistence.Id;
import javax.persistence.Table;
import javax.persistence.Transient;
import org.apache.cloudstack.api.Identity;
import com.cloud.utils.db.GenericDao;
import org.apache.cloudstack.api.InternalIdentity;
@Entity
@Table(name="event")
public class EventVO implements Event {
@Id
@Id
@GeneratedValue(strategy=GenerationType.IDENTITY)
@Column(name="id")
private long id = -1;
private long id = -1;
@Column(name="type")
private String type;
@Enumerated(value=EnumType.STRING)
@Column(name="state")
@Column(name="type")
private String type;
@Enumerated(value=EnumType.STRING)
@Column(name="state")
private State state = State.Completed;
@Column(name="description", length=1024)
private String description;
@Column(name="description", length=1024)
private String description;
@Column(name=GenericDao.CREATED_COLUMN)
private Date createDate;
@Column(name=GenericDao.CREATED_COLUMN)
private Date createDate;
@Column(name="user_id")
private long userId;
@Column(name="account_id")
private long accountId;
@Column(name="account_id")
private long accountId;
@Column(name="domain_id")
private long domainId;
@Column(name="level")
private String level = LEVEL_INFO;
@Column(name="start_id")
@Column(name="level")
private String level = LEVEL_INFO;
@Column(name="start_id")
private long startId;
@Column(name="parameters", length=1024)
private String parameters;
@Column(name="uuid")
private String uuid;
@Column(name="parameters", length=1024)
private String parameters;
@Transient
private int totalSize;
@Column(name="uuid")
private String uuid;
public static final String LEVEL_INFO = "INFO";
public static final String LEVEL_WARN = "WARN";
public static final String LEVEL_ERROR = "ERROR";
public EventVO() {
this.uuid = UUID.randomUUID().toString();
}
public long getId() {
return id;
}
@Override
@Column(name="archived")
private boolean archived;
@Transient
private int totalSize;
public static final String LEVEL_INFO = "INFO";
public static final String LEVEL_WARN = "WARN";
public static final String LEVEL_ERROR = "ERROR";
public EventVO() {
this.uuid = UUID.randomUUID().toString();
}
public long getId() {
return id;
}
@Override
public String getType() {
return type;
}
public void setType(String type) {
this.type = type;
}
@Override
return type;
}
public void setType(String type) {
this.type = type;
}
@Override
public State getState() {
return state;
}
@ -105,27 +106,27 @@ public class EventVO implements Event {
this.state = state;
}
@Override
@Override
public String getDescription() {
return description;
}
public void setDescription(String description) {
this.description = description;
}
@Override
return description;
}
public void setDescription(String description) {
this.description = description;
}
@Override
public Date getCreateDate() {
return createDate;
}
public void setCreatedDate(Date createdDate) {
createDate = createdDate;
}
@Override
return createDate;
}
public void setCreatedDate(Date createdDate) {
createDate = createdDate;
}
@Override
public long getUserId() {
return userId;
}
public void setUserId(long userId) {
this.userId = userId;
}
return userId;
}
public void setUserId(long userId) {
this.userId = userId;
}
@Override
public long getAccountId() {
return accountId;
@ -165,21 +166,29 @@ public class EventVO implements Event {
this.startId = startId;
}
@Override
@Override
public String getParameters() {
return parameters;
}
public void setParameters(String parameters) {
this.parameters = parameters;
}
@Override
public String getUuid() {
return this.uuid;
}
public void setUuid(String uuid) {
this.uuid = uuid;
}
return parameters;
}
public void setParameters(String parameters) {
this.parameters = parameters;
}
@Override
public String getUuid() {
return this.uuid;
}
public void setUuid(String uuid) {
this.uuid = uuid;
}
@Override
public boolean getArchived() {
return archived;
}
public void setArchived(Boolean archived) {
this.archived = archived;
}
}

View File

@ -30,4 +30,9 @@ public interface EventDao extends GenericDao<EventVO, Long> {
public List<EventVO> listOlderEvents(Date oldTime);
EventVO findCompletedEvent(long startId);
public List<EventVO> listToArchiveOrDeleteEvents(List<Long> ids, String type, Date olderThan, Long accountId);
public void archiveEvents(List<EventVO> events);
}

View File

@ -30,24 +30,34 @@ import com.cloud.utils.db.Filter;
import com.cloud.utils.db.GenericDaoBase;
import com.cloud.utils.db.SearchBuilder;
import com.cloud.utils.db.SearchCriteria;
import com.cloud.utils.db.Transaction;
import com.cloud.utils.db.SearchCriteria.Op;
@Component
@Local(value={EventDao.class})
public class EventDaoImpl extends GenericDaoBase<EventVO, Long> implements EventDao {
public static final Logger s_logger = Logger.getLogger(EventDaoImpl.class.getName());
protected final SearchBuilder<EventVO> CompletedEventSearch;
public EventDaoImpl () {
CompletedEventSearch = createSearchBuilder();
CompletedEventSearch.and("state",CompletedEventSearch.entity().getState(),SearchCriteria.Op.EQ);
CompletedEventSearch.and("startId", CompletedEventSearch.entity().getStartId(), SearchCriteria.Op.EQ);
CompletedEventSearch.done();
}
public static final Logger s_logger = Logger.getLogger(EventDaoImpl.class.getName());
protected final SearchBuilder<EventVO> CompletedEventSearch;
protected final SearchBuilder<EventVO> ToArchiveOrDeleteEventSearch;
@Override
public List<EventVO> searchAllEvents(SearchCriteria<EventVO> sc, Filter filter) {
return listIncludingRemovedBy(sc, filter);
}
public EventDaoImpl () {
CompletedEventSearch = createSearchBuilder();
CompletedEventSearch.and("state",CompletedEventSearch.entity().getState(),SearchCriteria.Op.EQ);
CompletedEventSearch.and("startId", CompletedEventSearch.entity().getStartId(), SearchCriteria.Op.EQ);
CompletedEventSearch.done();
ToArchiveOrDeleteEventSearch = createSearchBuilder();
ToArchiveOrDeleteEventSearch.and("id", ToArchiveOrDeleteEventSearch.entity().getId(), Op.IN);
ToArchiveOrDeleteEventSearch.and("type", ToArchiveOrDeleteEventSearch.entity().getType(), Op.EQ);
ToArchiveOrDeleteEventSearch.and("accountId", ToArchiveOrDeleteEventSearch.entity().getAccountId(), Op.EQ);
ToArchiveOrDeleteEventSearch.and("createDateL", ToArchiveOrDeleteEventSearch.entity().getCreateDate(), Op.LT);
ToArchiveOrDeleteEventSearch.done();
}
@Override
public List<EventVO> searchAllEvents(SearchCriteria<EventVO> sc, Filter filter) {
return listIncludingRemovedBy(sc, filter);
}
@Override
public List<EventVO> listOlderEvents(Date oldTime) {
@ -55,9 +65,8 @@ public class EventDaoImpl extends GenericDaoBase<EventVO, Long> implements Event
SearchCriteria<EventVO> sc = createSearchCriteria();
sc.addAnd("createDate", SearchCriteria.Op.LT, oldTime);
return listIncludingRemovedBy(sc, null);
}
@Override
public EventVO findCompletedEvent(long startId) {
SearchCriteria<EventVO> sc = CompletedEventSearch.create();
@ -65,4 +74,36 @@ public class EventDaoImpl extends GenericDaoBase<EventVO, Long> implements Event
sc.setParameters("startId", startId);
return findOneIncludingRemovedBy(sc);
}
@Override
public List<EventVO> listToArchiveOrDeleteEvents(List<Long> ids, String type, Date olderThan, Long accountId) {
SearchCriteria<EventVO> sc = ToArchiveOrDeleteEventSearch.create();
if (ids != null) {
sc.setParameters("id", ids.toArray(new Object[ids.size()]));
}
if (type != null) {
sc.setParameters("type", type);
}
if (olderThan != null) {
sc.setParameters("createDateL", olderThan);
}
if (accountId != null) {
sc.setParameters("accountId", accountId);
}
return search(sc, null);
}
@Override
public void archiveEvents(List<EventVO> events) {
Transaction txn = Transaction.currentTxn();
txn.start();
for (EventVO event : events) {
event = lockRow(event.getId(), true);
event.setArchived(true);
update(event.getId(), event);
txn.commit();
}
txn.close();
}
}

View File

@ -68,32 +68,32 @@ public class AccountVO implements Account {
@Column(name="region_id")
private int regionId;
public AccountVO() {
this.uuid = UUID.randomUUID().toString();
this.uuid = UUID.randomUUID().toString();
}
public AccountVO(long id) {
this.id = id;
this.uuid = UUID.randomUUID().toString();
this.uuid = UUID.randomUUID().toString();
}
public AccountVO(String accountName, long domainId, String networkDomain, short type, int regionId) {
public AccountVO(String accountName, long domainId, String networkDomain, short type, String uuid, int regionId) {
this.accountName = accountName;
this.domainId = domainId;
this.networkDomain = networkDomain;
this.type = type;
this.state = State.enabled;
this.uuid = UUID.randomUUID().toString();
this.uuid = uuid;
this.regionId = regionId;
}
public void setNeedsCleanup(boolean value) {
needsCleanup = value;
needsCleanup = value;
}
public boolean getNeedsCleanup() {
return needsCleanup;
return needsCleanup;
}
@Override
@ -102,10 +102,10 @@ public class AccountVO implements Account {
}
public void setId(long id) {
this.id = id;
}
this.id = id;
}
@Override
@Override
public String getAccountName() {
return accountName;
}
@ -134,11 +134,11 @@ public class AccountVO implements Account {
@Override
public Long getDefaultZoneId() {
return defaultZoneId;
return defaultZoneId;
}
public void setDefaultZoneId(Long defaultZoneId) {
this.defaultZoneId = defaultZoneId;
this.defaultZoneId = defaultZoneId;
}
@Override
@ -176,14 +176,18 @@ public class AccountVO implements Account {
@Override
public String getUuid() {
return this.uuid;
return this.uuid;
}
public void setUuid(String uuid) {
this.uuid = uuid;
this.uuid = uuid;
}
public int getRegionId() {
return regionId;
}
public int getRegionId() {
return regionId;
}
public void setRegionId(int regionId) {
this.regionId = regionId;
}
}

View File

@ -95,7 +95,7 @@ public class UserVO implements User, Identity, InternalIdentity {
@Column(name="region_id")
private int regionId;
public UserVO() {
this.uuid = UUID.randomUUID().toString();
}
@ -104,8 +104,8 @@ public class UserVO implements User, Identity, InternalIdentity {
this.id = id;
this.uuid = UUID.randomUUID().toString();
}
public UserVO(long accountId, String username, String password, String firstName, String lastName, String email, String timezone, int regionId) {
public UserVO(long accountId, String username, String password, String firstName, String lastName, String email, String timezone, String uuid, int regionId) {
this.accountId = accountId;
this.username = username;
this.password = password;
@ -114,10 +114,10 @@ public class UserVO implements User, Identity, InternalIdentity {
this.email = email;
this.timezone = timezone;
this.state = State.enabled;
this.uuid = UUID.randomUUID().toString();
this.regionId = regionId;
this.uuid = uuid;
this.regionId = regionId;
}
@Override
public long getId() {
return id;
@ -265,9 +265,12 @@ public class UserVO implements User, Identity, InternalIdentity {
public void setUuid(String uuid) {
this.uuid = uuid;
}
public int getRegionId() {
return regionId;
}
public int getRegionId() {
return regionId;
}
public void setRegionId(int regionId) {
this.regionId = regionId;
}
}

View File

@ -23,3 +23,4 @@
/usr/bin/cloud-ssh
/var/log/cloudstack/agent
/usr/share/cloudstack-agent/lib/*
/usr/share/cloudstack-agent/plugins

2
debian/control vendored
View File

@ -22,7 +22,7 @@ Description: CloudStack server library
Package: cloudstack-agent
Architecture: all
Depends: openjdk-6-jre | openjdk-7-jre, cloudstack-common (= ${source:Version}), lsb-base (>= 3.2), libcommons-daemon-java, libjna-java, openssh-client, libvirt0, sysvinit-utils, chkconfig, qemu-kvm, libvirt-bin, uuid-runtime, rsync, grep, iproute, ebtables, vlan, wget, jsvc
Depends: openjdk-6-jre | openjdk-7-jre, cloudstack-common (= ${source:Version}), lsb-base (>= 3.2), libcommons-daemon-java, libjna-java, openssh-client, libvirt0, sysvinit-utils, chkconfig, qemu-kvm, libvirt-bin, uuid-runtime, rsync, grep, iproute, perl-base, perl-modules, ebtables, vlan, wget, jsvc
Conflicts: cloud-agent, cloud-agent-libs, cloud-agent-deps, cloud-agent-scripts
Description: CloudStack agent
The CloudStack agent is in charge of managing shared computing resources in

134
debian/rules vendored
View File

@ -14,6 +14,7 @@ DEBVERS := $(shell dpkg-parsechangelog | sed -n -e 's/^Version: //p')
VERSION := $(shell echo '$(DEBVERS)' | sed -e 's/^[[:digit:]]*://' -e 's/[~-].*//')
PACKAGE = $(shell dh_listpackages|head -n 1|cut -d '-' -f 1)
SYSCONFDIR = "/etc"
DESTDIR = "debian/tmp"
# Uncomment this to turn on verbose mode.
export DH_VERBOSE=1
@ -50,83 +51,86 @@ install:
dh_prep -s
# Common packages
mkdir -p debian/tmp$(SYSCONFDIR)/$(PACKAGE)
mkdir -p debian/tmp$(SYSCONFDIR)/init.d
mkdir -p debian/tmp/var/cache/$(PACKAGE)
mkdir -p debian/tmp/var/log/$(PACKAGE)
mkdir -p debian/tmp/var/lib/$(PACKAGE)
mkdir -p debian/tmp/usr/bin
mkdir -p $(DESTDIR)/$(SYSCONFDIR)/$(PACKAGE)
mkdir -p $(DESTDIR)/$(SYSCONFDIR)/init.d
mkdir -p $(DESTDIR)/var/cache/$(PACKAGE)
mkdir -p $(DESTDIR)/var/log/$(PACKAGE)
mkdir -p $(DESTDIR)/var/lib/$(PACKAGE)
mkdir -p $(DESTDIR)/usr/bin
mkdir -p $(DESTDIR)/usr/share
# cloudstack-agent
mkdir debian/tmp$(SYSCONFDIR)/$(PACKAGE)/agent
mkdir debian/tmp/var/log/$(PACKAGE)/agent
install -D agent/target/cloud-agent-4.2.0-SNAPSHOT.jar debian/tmp/usr/share/$(PACKAGE)-agent/lib/$(PACKAGE)-agent.jar
install -D plugins/hypervisors/kvm/target/cloud-plugin-hypervisor-kvm-4.2.0-SNAPSHOT.jar debian/tmp/usr/share/$(PACKAGE)-agent/lib/
install -D plugins/hypervisors/kvm/target/dependencies/* debian/tmp/usr/share/$(PACKAGE)-agent/lib/
install -D packaging/debian/init/cloud-agent debian/tmp$(SYSCONFDIR)/init.d/$(PACKAGE)-agent
install -D agent/bindir/cloud-setup-agent.in debian/tmp/usr/bin/cloud-setup-agent
install -D agent/bindir/cloud-ssh.in debian/tmp/usr/bin/cloud-ssh
install -D agent/target/transformed/* debian/tmp$(SYSCONFDIR)/$(PACKAGE)/agent
mkdir $(DESTDIR)/$(SYSCONFDIR)/$(PACKAGE)/agent
mkdir $(DESTDIR)/var/log/$(PACKAGE)/agent
mkdir $(DESTDIR)/usr/share/$(PACKAGE)-agent
mkdir $(DESTDIR)/usr/share/$(PACKAGE)-agent/plugins
install -D agent/target/cloud-agent-4.2.0-SNAPSHOT.jar $(DESTDIR)/usr/share/$(PACKAGE)-agent/lib/$(PACKAGE)-agent.jar
install -D plugins/hypervisors/kvm/target/cloud-plugin-hypervisor-kvm-4.2.0-SNAPSHOT.jar $(DESTDIR)/usr/share/$(PACKAGE)-agent/lib/
install -D plugins/hypervisors/kvm/target/dependencies/* $(DESTDIR)/usr/share/$(PACKAGE)-agent/lib/
install -D packaging/debian/init/cloud-agent $(DESTDIR)/$(SYSCONFDIR)/init.d/$(PACKAGE)-agent
install -D agent/bindir/cloud-setup-agent.in $(DESTDIR)/usr/bin/cloud-setup-agent
install -D agent/bindir/cloud-ssh.in $(DESTDIR)/usr/bin/cloud-ssh
install -D agent/target/transformed/* $(DESTDIR)/$(SYSCONFDIR)/$(PACKAGE)/agent
# cloudstack-management
mkdir debian/tmp$(SYSCONFDIR)/$(PACKAGE)/server
mkdir debian/tmp$(SYSCONFDIR)/$(PACKAGE)/management
mkdir -p debian/tmp/usr/share/$(PACKAGE)-management
mkdir -p debian/tmp/usr/share/$(PACKAGE)-management/webapps/client
mkdir debian/tmp/usr/share/$(PACKAGE)-management/setup
mkdir debian/tmp/var/log/$(PACKAGE)/management
mkdir debian/tmp/var/cache/$(PACKAGE)/management
mkdir debian/tmp/var/cache/$(PACKAGE)/management/work
mkdir debian/tmp/var/cache/$(PACKAGE)/management/temp
mkdir debian/tmp/var/log/$(PACKAGE)/ipallocator
mkdir debian/tmp/var/lib/$(PACKAGE)/management
mkdir debian/tmp/var/lib/$(PACKAGE)/mnt
cp -r client/target/utilities/scripts/db/* debian/tmp/usr/share/$(PACKAGE)-management/setup/
cp -r client/target/cloud-client-ui-4.2.0-SNAPSHOT/* debian/tmp/usr/share/$(PACKAGE)-management/webapps/client/
cp server/target/conf/* debian/tmp$(SYSCONFDIR)/$(PACKAGE)/server/
cp client/target/conf/* debian/tmp$(SYSCONFDIR)/$(PACKAGE)/management/
ln -s tomcat6-nonssl.conf debian/tmp$(SYSCONFDIR)/$(PACKAGE)/management/tomcat6.conf
mkdir -p debian/tmp$(SYSCONFDIR)/$(PACKAGE)/management/Catalina/localhost/client
install -D packaging/debian/init/cloud-management debian/tmp$(SYSCONFDIR)/init.d/$(PACKAGE)-management
install -D client/bindir/cloud-update-xenserver-licenses.in debian/tmp/usr/bin/cloud-update-xenserver-licenses
install -D server/target/cloud-server-4.2.0-SNAPSHOT.jar debian/tmp/usr/share/$(PACKAGE)-management/lib/$(PACKAGE)-server.jar
ln -s /usr/share/tomcat6/bin debian/tmp/usr/share/$(PACKAGE)-management/bin
ln -s ../../..$(SYSCONFDIR)/$(PACKAGE)/management debian/tmp/usr/share/$(PACKAGE)-management/conf
ln -s /usr/share/tomcat6/lib debian/tmp/usr/share/$(PACKAGE)-management/lib
ln -s ../../../var/log/$(PACKAGE)/management debian/tmp/usr/share/$(PACKAGE)-management/logs
ln -s ../../../var/cache/$(PACKAGE)/management/temp debian/tmp/usr/share/$(PACKAGE)-management/temp
ln -s ../../../var/cache/$(PACKAGE)/management/work debian/tmp/usr/share/$(PACKAGE)-management/work
mkdir $(DESTDIR)/$(SYSCONFDIR)/$(PACKAGE)/server
mkdir $(DESTDIR)/$(SYSCONFDIR)/$(PACKAGE)/management
mkdir -p $(DESTDIR)/usr/share/$(PACKAGE)-management
mkdir -p $(DESTDIR)/usr/share/$(PACKAGE)-management/webapps/client
mkdir $(DESTDIR)/usr/share/$(PACKAGE)-management/setup
mkdir $(DESTDIR)/var/log/$(PACKAGE)/management
mkdir $(DESTDIR)/var/cache/$(PACKAGE)/management
mkdir $(DESTDIR)/var/cache/$(PACKAGE)/management/work
mkdir $(DESTDIR)/var/cache/$(PACKAGE)/management/temp
mkdir $(DESTDIR)/var/log/$(PACKAGE)/ipallocator
mkdir $(DESTDIR)/var/lib/$(PACKAGE)/management
mkdir $(DESTDIR)/var/lib/$(PACKAGE)/mnt
cp -r client/target/utilities/scripts/db/* $(DESTDIR)/usr/share/$(PACKAGE)-management/setup/
cp -r client/target/cloud-client-ui-4.2.0-SNAPSHOT/* $(DESTDIR)/usr/share/$(PACKAGE)-management/webapps/client/
cp server/target/conf/* $(DESTDIR)/$(SYSCONFDIR)/$(PACKAGE)/server/
cp client/target/conf/* $(DESTDIR)/$(SYSCONFDIR)/$(PACKAGE)/management/
ln -s tomcat6-nonssl.conf $(DESTDIR)/$(SYSCONFDIR)/$(PACKAGE)/management/tomcat6.conf
mkdir -p $(DESTDIR)/$(SYSCONFDIR)/$(PACKAGE)/management/Catalina/localhost/client
install -D packaging/debian/init/cloud-management $(DESTDIR)/$(SYSCONFDIR)/init.d/$(PACKAGE)-management
install -D client/bindir/cloud-update-xenserver-licenses.in $(DESTDIR)/usr/bin/cloud-update-xenserver-licenses
install -D server/target/cloud-server-4.2.0-SNAPSHOT.jar $(DESTDIR)/usr/share/$(PACKAGE)-management/lib/$(PACKAGE)-server.jar
ln -s /usr/share/tomcat6/bin $(DESTDIR)/usr/share/$(PACKAGE)-management/bin
ln -s ../../..$(SYSCONFDIR)/$(PACKAGE)/management $(DESTDIR)/usr/share/$(PACKAGE)-management/conf
ln -s /usr/share/tomcat6/lib $(DESTDIR)/usr/share/$(PACKAGE)-management/lib
ln -s ../../../var/log/$(PACKAGE)/management $(DESTDIR)/usr/share/$(PACKAGE)-management/logs
ln -s ../../../var/cache/$(PACKAGE)/management/temp $(DESTDIR)/usr/share/$(PACKAGE)-management/temp
ln -s ../../../var/cache/$(PACKAGE)/management/work $(DESTDIR)/usr/share/$(PACKAGE)-management/work
# cloudstack-common
mkdir -p debian/tmp/usr/share/$(PACKAGE)-common
mkdir debian/tmp/usr/share/$(PACKAGE)-common/scripts
mkdir debian/tmp/usr/share/$(PACKAGE)-common/setup
cp -r scripts/installer debian/tmp/usr/share/$(PACKAGE)-common/scripts
cp -r scripts/network debian/tmp/usr/share/$(PACKAGE)-common/scripts
cp -r scripts/storage debian/tmp/usr/share/$(PACKAGE)-common/scripts
cp -r scripts/util debian/tmp/usr/share/$(PACKAGE)-common/scripts
cp -r scripts/vm debian/tmp/usr/share/$(PACKAGE)-common/scripts
install -D client/target/utilities/bin/cloud-migrate-databases debian/tmp/usr/bin
install -D client/target/utilities/bin/cloud-set-guest-password debian/tmp/usr/bin
install -D client/target/utilities/bin/cloud-set-guest-sshkey debian/tmp/usr/bin
install -D client/target/utilities/bin/cloud-setup-databases debian/tmp/usr/bin
install -D client/target/utilities/bin/cloud-setup-management debian/tmp/usr/bin
install -D services/console-proxy/server/dist/systemvm.iso debian/tmp/usr/share/$(PACKAGE)-common/vms/systemvm.iso
mkdir -p $(DESTDIR)/usr/share/$(PACKAGE)-common
mkdir $(DESTDIR)/usr/share/$(PACKAGE)-common/scripts
mkdir $(DESTDIR)/usr/share/$(PACKAGE)-common/setup
cp -r scripts/installer $(DESTDIR)/usr/share/$(PACKAGE)-common/scripts
cp -r scripts/network $(DESTDIR)/usr/share/$(PACKAGE)-common/scripts
cp -r scripts/storage $(DESTDIR)/usr/share/$(PACKAGE)-common/scripts
cp -r scripts/util $(DESTDIR)/usr/share/$(PACKAGE)-common/scripts
cp -r scripts/vm $(DESTDIR)/usr/share/$(PACKAGE)-common/scripts
install -D client/target/utilities/bin/cloud-migrate-databases $(DESTDIR)/usr/bin
install -D client/target/utilities/bin/cloud-set-guest-password $(DESTDIR)/usr/bin
install -D client/target/utilities/bin/cloud-set-guest-sshkey $(DESTDIR)/usr/bin
install -D client/target/utilities/bin/cloud-setup-databases $(DESTDIR)/usr/bin
install -D client/target/utilities/bin/cloud-setup-management $(DESTDIR)/usr/bin
install -D services/console-proxy/server/dist/systemvm.iso $(DESTDIR)/usr/share/$(PACKAGE)-common/vms/systemvm.iso
# cloudstack-python
mkdir -p debian/tmp/usr/lib/python2.7/dist-packages
cp -r python/lib/cloud* debian/tmp/usr/lib/python2.7/dist-packages
mkdir -p $(DESTDIR)/usr/lib/python2.7/dist-packages
cp -r python/lib/cloud* $(DESTDIR)/usr/lib/python2.7/dist-packages
# cloudstack-usage
mkdir debian/tmp$(SYSCONFDIR)/$(PACKAGE)/usage
mkdir debian/tmp/var/log/$(PACKAGE)/usage
install -D usage/target/cloud-usage-4.2.0-SNAPSHOT.jar debian/tmp/usr/share/$(PACKAGE)-usage/lib/$(PACKAGE)-usage.jar
cp usage/target/transformed/* debian/tmp$(SYSCONFDIR)/$(PACKAGE)/usage/
ln -s ../management/db.properties debian/tmp$(SYSCONFDIR)/$(PACKAGE)/usage/db.properties
install -D packaging/debian/init/cloud-usage debian/tmp$(SYSCONFDIR)/init.d/$(PACKAGE)-usage
mkdir $(DESTDIR)/$(SYSCONFDIR)/$(PACKAGE)/usage
mkdir $(DESTDIR)/var/log/$(PACKAGE)/usage
install -D usage/target/cloud-usage-4.2.0-SNAPSHOT.jar $(DESTDIR)/usr/share/$(PACKAGE)-usage/lib/$(PACKAGE)-usage.jar
cp usage/target/transformed/* $(DESTDIR)/$(SYSCONFDIR)/$(PACKAGE)/usage/
ln -s ../management/db.properties $(DESTDIR)/$(SYSCONFDIR)/$(PACKAGE)/usage/db.properties
install -D packaging/debian/init/cloud-usage $(DESTDIR)/$(SYSCONFDIR)/init.d/$(PACKAGE)-usage
# cloudstack-awsapi
mkdir debian/tmp/var/log/$(PACKAGE)/awsapi
mkdir $(DESTDIR)/var/log/$(PACKAGE)/awsapi
dh_installdirs
dh_install

View File

@ -11,9 +11,7 @@
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
@ -22,6 +20,12 @@
under the License.
-->
<section id="advanced-zone-guest-ip-addresses">
<title>Advanced Zone Guest IP Addresses</title>
<para>When advanced networking is used, the administrator can create additional networks for use by the guests. These networks can span the zone and be available to all accounts, or they can be scoped to a single account, in which case only the named account may create guests that attach to these networks. The networks are defined by a VLAN ID, IP range, and gateway. The administrator may provision thousands of these networks if desired.</para>
<title>Advanced Zone Guest IP Addresses</title>
<para>When advanced networking is used, the administrator can create additional networks for use
by the guests. These networks can span the zone and be available to all accounts, or they can be
scoped to a single account, in which case only the named account may create guests that attach
to these networks. The networks are defined by a VLAN ID, IP range, and gateway. The
administrator may provision thousands of these networks if desired. Additionally, the
administrator can reserve a part of the IP address space for non-&PRODUCT; VMs and
servers.</para>
</section>

View File

@ -51,7 +51,7 @@
and packaging them into DEBs by issuing the following command.
</para>
<screen>
<command>$ dpkg-buildpackge -uc -us</command>
<command>$ dpkg-buildpackage -uc -us</command>
</screen>
<para>

View File

@ -5,78 +5,82 @@
]>
<!-- Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
-->
<section id="sect-source-buildrpm">
<title>Building RPMs</title>
<para>
While we have defined, and you have presumably already installed the
bootstrap prerequisites, there are a number of build time prerequisites
that need to be resolved. &PRODUCT; uses maven for dependency resolution.
You can resolve the buildtime depdencies for CloudStack by running the
following command:
<programlisting><prompt>$</prompt> <command>mvn</command> -P deps</programlisting>
</para>
<para>
Now that we have resolved the dependencies we can move on to building &PRODUCT;
and packaging them into RPMs by issuing the following command.
<programlisting><prompt>$</prompt> <command>./waf</command> rpm</programlisting>
</para>
<para>
Once this completes, you should find assembled RPMs in
<filename>artifacts/rpmbuild/RPMS/x86_64</filename>
</para>
<section id="sect-source-buildrpm-repo">
<title>Creating a yum repo</title>
<para>
While RPMs is an ideal packaging format - it's most easily consumed from
yum repositories over a network. We'll move into the directory with the
newly created RPMs by issuing the following command:
<programlisting><prompt>$</prompt> <command>cd</command> artifacts/rpmbuild/RPMS/x86_64</programlisting>
</para>
<para>
Next we'll issue a command to create the repository metadata by
issuing the following command:
<programlisting><prompt>$</prompt> <command>createrepo</command> ./</programlisting>
</para>
<para>
The files and directories within our current working directory can now
be uploaded to a web server and serve as a yum repository
</para>
<title>Building RPMs from Source</title>
<para>As mentioned previously in <xref linkend="sect-source-prereq" />, you will need to install several prerequisites before you can build packages for &PRODUCT;. Here we'll assume you're working with a 64-bit build of CentOS or Red Hat Enterprise Linux.</para>
<para><programlisting># yum groupinstall "Development Tools"</programlisting></para>
<para><programlisting># yum install java-1.6.0-openjdk-devel.x86_64 genisoimage mysql mysql-server ws-common-utils MySQL-python tomcat6 createrepo</programlisting></para>
<para>Next, you'll need to install build-time dependencies for CloudStack with
Maven. We're using Maven 3, so you'll want to
<ulink url="http://maven.apache.org/download.cgi">grab a Maven 3 tarball</ulink>
and uncompress it in your home directory (or whatever location you prefer):</para>
<para><programlisting>$ tar zxvf apache-maven-3.0.4-bin.tar.gz</programlisting></para>
<para><programlisting>$ export PATH=/usr/local/apache-maven-3.0.4//bin:$PATH</programlisting></para>
<para>Maven also needs to know where Java is, and expects the JAVA_HOME environment
variable to be set:</para>
<para><programlisting>$ export JAVA_HOME=/usr/lib/jvm/jre-1.6.0-openjdk.x86_64/</programlisting></para>
<para>Verify that Maven is installed correctly:</para>
<para><programlisting>$ mvn --version</programlisting></para>
<para>You probably want to ensure that your environment variables will survive a logout/reboot.
Be sure to update <filename>~/.bashrc</filename> with the PATH and JAVA_HOME variables.</para>
<para>Building RPMs for $PRODUCT; is fairly simple. Assuming you already have the source downloaded and have uncompressed the tarball into a local directory, you're going to be able to generate packages in just a few minutes.</para>
<note><title>Packaging has Changed</title>
<para>If you've created packages for $PRODUCT; previously, you should be aware that the process has changed considerably since the project has moved to using Apache Maven. Please be sure to follow the steps in this section closely.</para>
</note>
<section id="generating-rpms">
<title>Generating RPMS</title>
<para>Now that we have the prerequisites and source, you will cd to the <filename>packaging/centos63/</filename> directory.</para>
<para>Generating RPMs is done using the <filename>package.sh</filename> script:
<programlisting><prompt>$</prompt>./package.sh</programlisting>
</para>
<para>That will run for a bit and then place the finished packages in <filename>dist/rpmbuild/RPMS/x86_64/</filename>.</para>
<para>You should see seven RPMs in that directory: <filename>cloudstack-agent-4.1.0-SNAPSHOT.el6.x86_64.rpm</filename>, <filename>cloudstack-awsapi-4.1.0-SNAPSHOT.el6.x86_64.rpm</filename>, <filename>cloudstack-cli-4.1.0-SNAPSHOT.el6.x86_64.rpm</filename>, <filename>cloudstack-common-4.1.0-SNAPSHOT.el6.x86_64.rpm</filename>, <filename>cloudstack-docs-4.1.0-SNAPSHOT.el6.x86_64.rpm</filename>, <filename>cloudstack-management-4.1.0-SNAPSHOT.el6.x86_64.rpm</filename>, and <filename>cloudstack-usage-4.1.0-SNAPSHOT.el6.x86_64.rpm</filename>.</para>
<section id="sect-source-buildrpm-repo">
<title>Creating a yum repo</title>
<para>
While RPMs is a useful packaging format - it's most easily consumed from Yum repositories over a network. The next step is to create a Yum Repo with the finished packages:
<programlisting><prompt>$</prompt> mkdir -p ~/tmp/repo</programlisting>
<programlisting><prompt>$</prompt> cp dist/rpmbuild/RPMS/x86_64/*rpm ~/tmp/repo/</programlisting>
<programlisting><prompt>$</prompt> createrepo ~/tmp/repo</programlisting>
</para>
<para>
The files and directories within <filename>~/tmp/repo</filename> can now be uploaded to a web server and serve as a yum repository.
</para>
</section>
<section id="sect-source-buildrpm-repo2">
<title>Configuring your systems to use your new yum repository</title>
<para>
Now that your yum repository is populated with RPMs and metadata
we need to configure the machines that need to install $PRODUCT;.
Create a file named <filename>/etc/yum.repos.d/cloudstack.repo</filename> with this information:
<programlisting>
[apache-cloudstack]
name=Apache CloudStack
baseurl=http://<replaceable>webserver.tld/path/to/repo</replaceable>
enabled=1
gpgcheck=0
</programlisting>
</para>
<para> Completing this step will allow you to easily install $PRODUCT; on a number of machines across the network.
</para>
</section>
</section>
</section>
<section id="sect-source-buildrpm-repo2">
<title>Configuring your systems to use your new yum repository</title>
<para>
Now that your yum repository is populated with RPMs and metadata
we need to configure our machines that need to install CloudStack.
We will create a file at <filename>/etc/yum.repos.d/cloudstack.repo</filename>
with the following content:
<programlisting>
[apache-cloudstack]
name=Apache CloudStack
baseurl=http://<replaceable>webserver.tld/path/to/repo</replaceable>
enabled=1
gpgcheck=0
</programlisting>
</para>
<para>
Completing this step will allow you to easily install CloudStack on a number of
machines across the network.
</para>
</section>
</section>

View File

@ -29,35 +29,13 @@
<ulink url="http://incubator.apache.org/cloudstack/downloads.html">
Apache CloudStack project download page</ulink>.
</para>
<para>
You'll notice several links under the 'Latest release' section.
</para>
<orderedlist>
<listitem>
<para>
<ulink url="http://www.apache.org/dyn/closer.cgi/dist/incubator/cloudstack/releases/4.0.0-incubating/apache-cloudstack-4.0.0-incubating-src.tar.bz2">
apache-cloudstack-4.0.0-incubating-src.tar.bz2</ulink>
- This is the link to the release itself.
</para>
</listitem>
<listitem>
<para>
<ulink url="http://www.apache.org/dist/incubator/cloudstack/releases/4.0.0-incubating/apache-cloudstack-4.0.0-incubating-src.tar.bz2.asc">PGP</ulink>
- This is a detached cryptographic signature that can be used to help
verify the authenticity of the release.
</para>
</listitem>
<listitem>
<para>
<ulink url="http://www.apache.org/dist/incubator/cloudstack/releases/4.0.0-incubating/apache-cloudstack-4.0.0-incubating-src.tar.bz2.md5">MD5</ulink>
- An MD5 hash of the release to aid in verify the validity of the release download.
</para>
</listitem>
<listitem>
<para>
<ulink url="http://www.apache.org/dist/incubator/cloudstack/releases/4.0.0-incubating/apache-cloudstack-4.0.0-incubating-src.tar.bz2.sha">SHA512</ulink>
- A SHA512 hash of the release to aid in verify the validity of the release download.
</para>
</listitem>
</orderedlist>
<para>Prior releases are available via archive.apache.org at <ulink url="http://archive.apache.org/dist/incubator/cloudstack/releases/">http://archive.apache.org/dist/incubator/cloudstack/releases/</ulink>.
</para>
<para>You'll notice several links under the 'Latest release' section. A link to a file ending in <filename>tar.bz2</filename>, as well as a PGP/GPG signature, MD5, and SHA512 file.</para>
<itemizedlist>
<listitem><para>The <filename>tar.bz2</filename> file contains the Bzip2-compressed tarball with the source code.</para></listitem>
<listitem><para>The <filename>.asc</filename> file is a detached cryptographic signature that can be used to help verify the authenticity of the release.</para></listitem>
<listitem><para>The <filename>.md5</filename> file is an MD5 hash of the release to aid in verify the validity of the release download.</para></listitem>
<listitem><para>The <filename>.sha</filename> file is a SHA512 hash of the release to aid in verify the validity of the release download.</para></listitem>
</itemizedlist>
</section>

View File

@ -22,7 +22,7 @@
under the License.
-->
<section id="globally-configured-limit">
<section id="globally-configured-limits">
<title>Globally Configured Limits</title>
<para>In a zone, the guest virtual network has a 24 bit CIDR by default. This limits the guest virtual network to 254 running instances. It can be adjusted as needed, but this must be done before any instances are created in the zone. For example, 10.1.1.0/22 would provide for ~1000 addresses.</para>
<para>The following table lists limits set in the Global Configuration:</para>

View File

@ -23,7 +23,14 @@
-->
<section id="guest-traffic">
<title>Guest Traffic</title>
<para>A network can carry guest traffic only between VMs within one zone. Virtual machines in different zones cannot communicate with each other using their IP addresses; they must communicate with each other by routing through a public IP address.</para>
<para>A network can carry guest traffic only between VMs within one zone. Virtual machines in different zones cannot communicate with each other using their IP addresses; they must communicate with each other by routing through a public IP address.</para>
<para>This figure illustrates a typical guest traffic setup:</para>
<mediaobject>
<imageobject>
<imagedata fileref="./images/guest-traffic-setup.png" />
</imageobject>
<textobject><phrase>Depicts a guest traffic setup.</phrase></textobject>
</mediaobject>
<para>The Management Server automatically creates a virtual router for each network. A virtual router is a special virtual machine that runs on the hosts. Each virtual router has three network interfaces. Its eth0 interface serves as the gateway for the guest traffic and has the IP address of 10.1.1.1. Its eth1 interface is used by the system to configure the virtual router. Its eth2 interface is assigned a public IP address for public traffic.</para>
<para>The virtual router provides DHCP and will automatically assign an IP address for each guest VM within the IP range assigned for the network. The user can manually reconfigure guest VMs to assume different IP addresses.</para>
<para>Source NAT is automatically configured in the virtual router to forward outbound traffic for all guest VMs</para>

View File

@ -31,6 +31,7 @@
<xi:include href="advanced-zone-physical-network-configuration.xml"
xmlns:xi="http://www.w3.org/2001/XInclude"/>
<xi:include href="using-multiple-guest-networks.xml" xmlns:xi="http://www.w3.org/2001/XInclude"/>
<xi:include href="reserved-ip-addresses-non-csvms.xml" xmlns:xi="http://www.w3.org/2001/XInclude"/>
<xi:include href="security-groups.xml" xmlns:xi="http://www.w3.org/2001/XInclude"/>
<xi:include href="external-firewalls-and-load-balancers.xml"
xmlns:xi="http://www.w3.org/2001/XInclude"/>

View File

@ -0,0 +1,163 @@
<?xml version='1.0' encoding='utf-8' ?>
<!DOCTYPE section PUBLIC "-//OASIS//DTD DocBook XML V4.5//EN" "http://www.oasis-open.org/docbook/xml/4.5/docbookx.dtd" [
<!ENTITY % BOOK_ENTITIES SYSTEM "cloudstack.ent">
%BOOK_ENTITIES;
]>
<!-- Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
-->
<section id="reserved-ip-addresses-non-csvms">
<title>IP Reservation in Isolated Guest Networks</title>
<para>In isolated guest networks, a part of the guest IP address space can be reserved for
non-&PRODUCT; VMs or physical servers. To do so, you configure a range of Reserved IP addresses
by specifying the CIDR when a guest network is in Implemented state. If your customers wish to
have non-&PRODUCT; controlled VMs or physical servers on the same network, they can share a part
of the IP address space that is primarily provided to the guest network.</para>
<para>In an Advanced zone, an IP address range or a CIDR is assigned to a network when the network
is defined. The &PRODUCT; virtual router acts as the DHCP server and uses CIDR for assigning IP
addresses to the guest VMs. If you decide to reserve IP ranges for non-&PRODUCT; purposes, you
can specify a part of the IP address range or the CIDR that should only be allocated by the DHCP
service of the virtual router to the guest VMs created in &PRODUCT;. The remaining IPs in that
network are called Reserved IP Range. When IP reservation is configured, the administrator can
add additional VMs or physical servers that are not part of &PRODUCT; to the same network and
assign them the Reserved IP addresses. &PRODUCT; guest VMs cannot acquire IPs from the Reserved
IP Range.</para>
<section id="ip-reserve-consider">
<title>IP Reservation Considerations</title>
<para>Consider the following before you reserve an IP range for non-&PRODUCT; machines:</para>
<itemizedlist>
<listitem>
<para>IP Reservation can be applied only when the network is in Implemented state.</para>
</listitem>
<listitem>
<para>No IP Reservation is done by default.</para>
</listitem>
<listitem>
<para>Guest VM CIDR you specify must be a subset of the network CIDR.</para>
</listitem>
<listitem>
<para>Specify a valid Guest VM CIDR. IP Reservation is applied only if no active IPs exist
outside the Guest VM CIDR. </para>
<para>You cannot apply IP Reservation if any VM is alloted with an IP address that is
outside the Guest VM CIDR.</para>
</listitem>
<listitem>
<para>To reset an existing IP Reservation, apply IP reservation by specifying the value of
network CIDR in the CIDR field.</para>
<para>For example, the following table describes three scenarios of guest network
creation:</para>
<informaltable>
<tgroup cols="5" align="left" colsep="1" rowsep="1">
<colspec colnum="1" colname="c1" colwidth="33.0pt"/>
<colspec colnum="2" colname="c2" colwidth="84.75pt"/>
<colspec colnum="3" colname="c3" colwidth="97.5pt"/>
<colspec colnum="4" colname="c4" colwidth="129.0pt"/>
<colspec colnum="5" colname="c5" colwidth="336.0pt"/>
<thead>
<row>
<entry><para>Case</para></entry>
<entry><para> CIDR</para></entry>
<entry><para>Network CIDR</para></entry>
<entry><para>Reserved IP Range for Non-&PRODUCT; VMs</para></entry>
<entry><para>Description</para></entry>
</row>
</thead>
<tbody>
<row>
<entry><para>1</para></entry>
<entry><para>10.1.1.0/24</para></entry>
<entry><para> None</para></entry>
<entry><para>None</para></entry>
<entry><para>No IP Reservation.</para></entry>
</row>
<row>
<entry><para>2</para></entry>
<entry><para>10.1.1.0/26</para></entry>
<entry><para>10.1.1.0/24</para></entry>
<entry><para>10.1.1.64 to 10.1.1.254</para></entry>
<entry><para>IP Reservation configured by the UpdateNetwork API with
guestvmcidr=10.1.1.0/26 or enter 10.1.1.0/26 in the CIDR field in the
UI.</para></entry>
</row>
<row>
<entry><para>3</para></entry>
<entry><para>10.1.1.0/24</para></entry>
<entry><para>None</para></entry>
<entry><para>None</para></entry>
<entry><para>Removing IP Reservation by the UpdateNetwork API with
guestvmcidr=10.1.1.0/24 or enter 10.1.1.0/24 in the CIDR field in the UI.
</para></entry>
</row>
</tbody>
</tgroup>
</informaltable>
</listitem>
</itemizedlist>
</section>
<section id="ip-reserv-limition">
<title>Limitations</title>
<itemizedlist>
<listitem>
<para>The IP Reservation is not supported if active IPs that are found outside the Guest VM
CIDR. </para>
</listitem>
<listitem>
<para>Upgrading network offering which causes a change in CIDR (such as upgrading an
offering with no external devices to one with external devices) IP Reservation becomes
void if any. Reconfigure IP Reservation in the new re-implemeted network.</para>
</listitem>
</itemizedlist>
</section>
<section id="best-practice-ipreserv">
<title>Best Practices</title>
<para>Apply IP Reservation to the guest network as soon as the network state changes to
Implemented. If you apply reservation soon after the first guest VM is deployed, lesser
conflicts occurs while applying reservation. </para>
</section>
<section id="reserve-ip">
<title>Reserving an IP Range</title>
<orderedlist>
<listitem>
<para>Log in to the &PRODUCT; UI as an administrator or end user.</para>
</listitem>
<listitem>
<para>In the left navigation, choose Network.</para>
</listitem>
<listitem>
<para>Click the name of the network you want to modify.</para>
</listitem>
<listitem>
<para>In the Details tab, click Edit.<inlinemediaobject>
<imageobject>
<imagedata fileref="./images/edit-icon.png"/>
</imageobject>
<textobject>
<phrase>edit-icon.png: button to edit a network</phrase>
</textobject>
</inlinemediaobject></para>
<para>The CIDR field changes to editable one.</para>
</listitem>
<listitem>
<para>In CIDR, specify the Guest VM CIDR.</para>
</listitem>
<listitem>
<para>Click Apply.</para>
<para>Wait for the update to complete. The Network CIDR and the Reserved IP Range are
displayed on the Details page.</para>
</listitem>
</orderedlist>
</section>
</section>

View File

@ -30,12 +30,15 @@
for package management.
</para>
<para>
The minimum bootstrapped prerequisites for building &PRODUCT; includes
the following:
You will need, at a minimum, the following to compile &PRODUCT;:
<orderedlist>
<listitem><para>ant</para></listitem>
<listitem><para>maven (version 3)</para></listitem>
<listitem><para>Java (Java 6/OpenJDK 1.6) </para></listitem>
<listitem><para>Maven (version 3)</para></listitem>
<listitem><para>Java (OpenJDK 1.6 or Java 7/OpenJDK 1.7)</para></listitem>
<listitem><para>Apache Web Services Common Utilities (ws-commons-util)</para></listitem>
<listitem><para>MySQL</para></listitem>
<listitem><para>MySQLdb (provides Python database API)</para></listitem>
<listitem><para>Tomcat 6 (not 6.0.35)</para></listitem>
<listitem><para>genisoimage</para></listitem>
<listitem><para>rpmbuild or dpkg-dev</para></listitem>
</orderedlist>
</para>

View File

@ -24,20 +24,10 @@
<chapter id="sect-source">
<title>Building from Source</title>
<para>The official &PRODUCT; release is always in source code form. While there may
exist convenience binaries in various forms from a number of places, the
source is the canonical release will be source. In this document we'll cover
acquiring the source release, building that into binary, deployable packages.
</para>
<para>
While building and deploying directly from source is certainly possible, the reality
of Infrastructure-as-a-Service cloud computing implies a need to deploy packages on
a potentially large number of systems, which RPMs and DEBs fill nicely.
</para>
<para>
Building and deploying directly from source is thus outside the scope of this
document, but is documented in the INSTALL.md file in the release.
</para>
<para>The official &PRODUCT; release is always in source code form. You will likely be able to find "convenience binaries," the source is the canonical release. In this section, we'll cover acquiring the source release and building that so that you can deploy it using Maven or create Debian packages or RPMs.</para>
<para>Note that building and deploying directly from source is typically not the most efficient way to deploy an IaaS. However, we will cover that method as well as building RPMs or Debian packages for deploying &PRODUCT;.</para>
<para>The instructions here are likely version-specific. That is, the method for building from source for the 4.0.x series is different from the 4.1.x series.</para>
<para>If you are working with a unreleased version of &PRODUCT;, see the INSTALL.md file in the top-level directory of the release.</para>
<xi:include href="getting-release.xml" xmlns:xi="http://www.w3.org/2001/XInclude" />
<xi:include href="verifying-source.xml" xmlns:xi="http://www.w3.org/2001/XInclude" />
<xi:include href="source-prereqs.xml" xmlns:xi="http://www.w3.org/2001/XInclude" />

View File

@ -36,7 +36,7 @@
file.
</para>
<para>
You next need to import those keys, which you can do by running the following command:
You next need to import those keys, which you can do by running:
<programlisting><prompt>#</prompt> <command>gpg</command> --import KEYS</programlisting>
</para>
</section>

View File

@ -29,4 +29,8 @@
<para>Users can create snapshots manually or by setting up automatic recurring snapshot policies. Users can also create disk volumes from snapshots, which may be attached to a VM like any other disk volume. Snapshots of both root disks and data disks are supported. However, &PRODUCT; does not currently support booting a VM from a recovered root disk. A disk recovered from snapshot of a root disk is treated as a regular data disk; the data on recovered disk can be accessed by attaching the disk to a VM.</para>
<para>A completed snapshot is copied from primary storage to secondary storage, where it is stored until deleted or purged by newer snapshot.</para>
<xi:include href="snapshot-throttling.xml" xmlns:xi="http://www.w3.org/2001/XInclude" />
<xi:include href="automatic-snapshot-creation-retention.xml" xmlns:xi="http://www.w3.org/2001/XInclude" />
<xi:include href="incremental-snapshots-backup.xml" xmlns:xi="http://www.w3.org/2001/XInclude" />
<xi:include href="volume-status.xml" xmlns:xi="http://www.w3.org/2001/XInclude" />
<xi:include href="snapshot-restore.xml" xmlns:xi="http://www.w3.org/2001/XInclude" />
</section>

View File

@ -31,4 +31,5 @@
<xi:include href="usage-types.xml" xmlns:xi="http://www.w3.org/2001/XInclude" />
<xi:include href="example-response-from-listUsageRecords.xml" xmlns:xi="http://www.w3.org/2001/XInclude" />
<xi:include href="dates-in-usage-record.xml" xmlns:xi="http://www.w3.org/2001/XInclude" />
<xi:include href="globally-configured-limits.xml" xmlns:xi="http://www.w3.org/2001/XInclude" />
</chapter>

View File

@ -62,7 +62,7 @@ msgstr ""
#. Tag: screen
#, no-c-format
msgid "\n"
"<command>$ dpkg-buildpackge -uc -us</command>\n"
"<command>$ dpkg-buildpackage -uc -us</command>\n"
""
msgstr ""

View File

@ -1,116 +1,112 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.apache.cloudstack.storage.datastore.db;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreStatus;
import org.apache.cloudstack.engine.subsystem.api.storage.ScopeType;
import com.cloud.storage.StoragePoolStatus;
import com.cloud.utils.db.GenericDao;
/**
* Data Access Object for storage_pool table
*/
public interface PrimaryDataStoreDao extends GenericDao<StoragePoolVO, Long> {
/**
* @param datacenterId
* -- the id of the datacenter (availability zone)
*/
List<StoragePoolVO> listByDataCenterId(long datacenterId);
/**
* @param datacenterId
* -- the id of the datacenter (availability zone)
*/
List<StoragePoolVO> listBy(long datacenterId, long podId, Long clusterId);
/**
* Set capacity of storage pool in bytes
*
* @param id
* pool id.
* @param capacity
* capacity in bytes
*/
/**
* @param datacenterId -- the id of the datacenter (availability zone)
*/
List<StoragePoolVO> listByDataCenterId(long datacenterId);
/**
* @param datacenterId -- the id of the datacenter (availability zone)
*/
List<StoragePoolVO> listBy(long datacenterId, long podId, Long clusterId, ScopeType scope);
/**
* Set capacity of storage pool in bytes
* @param id pool id.
* @param capacity capacity in bytes
*/
void updateCapacity(long id, long capacity);
/**
* Set available bytes of storage pool in bytes
*
* @param id
* pool id.
* @param available
* available capacity in bytes
*/
/**
* Set available bytes of storage pool in bytes
* @param id pool id.
* @param available available capacity in bytes
*/
void updateAvailable(long id, long available);
StoragePoolVO persist(StoragePoolVO pool, Map<String, String> details);
/**
* Find pool by name.
*
* @param name
* name of pool.
* @return the single StoragePoolVO
* @param name name of pool.
* @return the single StoragePoolVO
*/
List<StoragePoolVO> findPoolByName(String name);
/**
* Find pools by the pod that matches the details.
*
* @param podId
* pod id to find the pools in.
* @param details
* details to match. All must match for the pool to be returned.
* @param podId pod id to find the pools in.
* @param details details to match. All must match for the pool to be returned.
* @return List of StoragePoolVO
*/
List<StoragePoolVO> findPoolsByDetails(long dcId, long podId, Long clusterId, Map<String, String> details);
List<StoragePoolVO> findPoolsByTags(long dcId, long podId, Long clusterId, String[] tags, Boolean shared);
List<StoragePoolVO> findPoolsByDetails(long dcId, long podId, Long clusterId, Map<String, String> details, ScopeType scope);
List<StoragePoolVO> findPoolsByTags(long dcId, long podId, Long clusterId, String[] tags);
/**
* Find pool by UUID.
*
* @param uuid
* uuid of pool.
* @return the single StoragePoolVO
* @param uuid uuid of pool.
* @return the single StoragePoolVO
*/
StoragePoolVO findPoolByUUID(String uuid);
List<StoragePoolVO> listByStorageHost(String hostFqdnOrIp);
StoragePoolVO findPoolByHostPath(long dcId, Long podId, String host, String path, String uuid);
List<StoragePoolVO> listPoolByHostPath(String host, String path);
void updateDetails(long poolId, Map<String, String> details);
Map<String, String> getDetails(long poolId);
List<String> searchForStoragePoolDetails(long poolId, String value);
List<String> searchForStoragePoolDetails(long poolId, String value);
List<StoragePoolVO> findIfDuplicatePoolsExistByUUID(String uuid);
List<StoragePoolVO> findIfDuplicatePoolsExistByUUID(String uuid);
List<StoragePoolVO> listByStatus(StoragePoolStatus status);
List<StoragePoolVO> listByStatus(DataStoreStatus status);
long countPoolsByStatus(DataStoreStatus... statuses);
List<StoragePoolVO> listByStatusInZone(long dcId, DataStoreStatus status);
long countPoolsByStatus(StoragePoolStatus... statuses);
List<StoragePoolVO> listByStatusInZone(long dcId, StoragePoolStatus status);
List<StoragePoolVO> listPoolsByCluster(long clusterId);
}
List<StoragePoolVO> findLocalStoragePoolsByTags(long dcId, long podId,
Long clusterId, String[] tags);
List<StoragePoolVO> findZoneWideStoragePoolsByTags(long dcId, String[] tags);
}

View File

@ -1,21 +1,19 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.apache.cloudstack.storage.datastore.db;
import java.sql.PreparedStatement;
@ -26,12 +24,17 @@ import java.util.HashMap;
import java.util.List;
import java.util.Map;
import javax.ejb.Local;
import javax.inject.Inject;
import javax.naming.ConfigurationException;
import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreStatus;
import org.apache.cloudstack.engine.subsystem.api.storage.ScopeType;
import org.springframework.stereotype.Component;
import com.cloud.host.Status;
import com.cloud.storage.StoragePoolStatus;
import com.cloud.utils.db.DB;
import com.cloud.utils.db.GenericDaoBase;
import com.cloud.utils.db.GenericSearchBuilder;
@ -39,128 +42,147 @@ import com.cloud.utils.db.SearchBuilder;
import com.cloud.utils.db.SearchCriteria;
import com.cloud.utils.db.SearchCriteria.Func;
import com.cloud.utils.db.SearchCriteria.Op;
import com.cloud.utils.db.SearchCriteria2;
import com.cloud.utils.db.SearchCriteriaService;
import com.cloud.utils.db.Transaction;
import com.cloud.utils.exception.CloudRuntimeException;
@Component
public class PrimaryDataStoreDaoImpl extends GenericDaoBase<StoragePoolVO, Long> implements PrimaryDataStoreDao {
@Local(value={PrimaryDataStoreDao.class}) @DB(txn=false)
public class PrimaryDataStoreDaoImpl extends GenericDaoBase<StoragePoolVO, Long> implements PrimaryDataStoreDao {
protected final SearchBuilder<StoragePoolVO> AllFieldSearch;
protected final SearchBuilder<StoragePoolVO> DcPodSearch;
protected final SearchBuilder<StoragePoolVO> DcPodSearch;
protected final SearchBuilder<StoragePoolVO> DcPodAnyClusterSearch;
protected final SearchBuilder<StoragePoolVO> DeleteLvmSearch;
protected final GenericSearchBuilder<StoragePoolVO, Long> StatusCountSearch;
@Inject protected PrimaryDataStoreDetailsDao _detailsDao;
private final String DetailsSqlPrefix = "SELECT storage_pool.* from storage_pool LEFT JOIN storage_pool_details ON storage_pool.id = storage_pool_details.pool_id WHERE storage_pool.removed is null and storage_pool.data_center_id = ? and (storage_pool.pod_id = ? or storage_pool.pod_id is null) and (";
private final String DetailsSqlSuffix = ") GROUP BY storage_pool_details.pool_id HAVING COUNT(storage_pool_details.name) >= ?";
private final String FindPoolTagDetails = "SELECT storage_pool_details.name FROM storage_pool_details WHERE pool_id = ? and value = ?";
@Inject protected StoragePoolDetailsDao _detailsDao;
private final String DetailsSqlPrefix = "SELECT storage_pool.* from storage_pool LEFT JOIN storage_pool_details ON storage_pool.id = storage_pool_details.pool_id WHERE storage_pool.removed is null and storage_pool.status = 'Up' and storage_pool.data_center_id = ? and (storage_pool.pod_id = ? or storage_pool.pod_id is null) and storage_pool.scope = ? and (";
private final String DetailsSqlSuffix = ") GROUP BY storage_pool_details.pool_id HAVING COUNT(storage_pool_details.name) >= ?";
private final String ZoneWideDetailsSqlPrefix = "SELECT storage_pool.* from storage_pool LEFT JOIN storage_pool_details ON storage_pool.id = storage_pool_details.pool_id WHERE storage_pool.removed is null and storage_pool.status = 'Up' and storage_pool.data_center_id = ? and storage_pool.scope = ? and (";
private final String ZoneWideDetailsSqlSuffix = ") GROUP BY storage_pool_details.pool_id HAVING COUNT(storage_pool_details.name) >= ?";
private final String FindPoolTagDetails = "SELECT storage_pool_details.name FROM storage_pool_details WHERE pool_id = ? and value = ?";
public PrimaryDataStoreDaoImpl() {
AllFieldSearch = createSearchBuilder();
AllFieldSearch.and("name", AllFieldSearch.entity().getName(), SearchCriteria.Op.EQ);
AllFieldSearch.and("uuid", AllFieldSearch.entity().getUuid(), SearchCriteria.Op.EQ);
AllFieldSearch.and("datacenterId", AllFieldSearch.entity().getDataCenterId(), SearchCriteria.Op.EQ);
AllFieldSearch.and("hostAddress", AllFieldSearch.entity().getHostAddress(), SearchCriteria.Op.EQ);
AllFieldSearch.and("status", AllFieldSearch.entity().getStatus(), SearchCriteria.Op.EQ);
AllFieldSearch.and("status",AllFieldSearch.entity().getStatus(),SearchCriteria.Op.EQ);
AllFieldSearch.and("path", AllFieldSearch.entity().getPath(), SearchCriteria.Op.EQ);
AllFieldSearch.and("podId", AllFieldSearch.entity().getPodId(), Op.EQ);
AllFieldSearch.and("clusterId", AllFieldSearch.entity().getClusterId(), Op.EQ);
AllFieldSearch.done();
DcPodSearch = createSearchBuilder();
DcPodSearch.and("datacenterId", DcPodSearch.entity().getDataCenterId(), SearchCriteria.Op.EQ);
DcPodSearch.and().op("nullpod", DcPodSearch.entity().getPodId(), SearchCriteria.Op.NULL);
DcPodSearch.or("podId", DcPodSearch.entity().getPodId(), SearchCriteria.Op.EQ);
DcPodSearch.cp();
DcPodSearch.and().op("nullcluster", DcPodSearch.entity().getClusterId(), SearchCriteria.Op.NULL);
DcPodSearch.or("cluster", DcPodSearch.entity().getClusterId(), SearchCriteria.Op.EQ);
DcPodSearch.cp();
DcPodSearch.done();
DcPodAnyClusterSearch = createSearchBuilder();
AllFieldSearch.done();
DcPodSearch = createSearchBuilder();
DcPodSearch.and("datacenterId", DcPodSearch.entity().getDataCenterId(), SearchCriteria.Op.EQ);
DcPodSearch.and("status", DcPodSearch.entity().getStatus(), SearchCriteria.Op.EQ);
DcPodSearch.and("scope", DcPodSearch.entity().getScope(), SearchCriteria.Op.EQ);
DcPodSearch.and().op("nullpod", DcPodSearch.entity().getPodId(), SearchCriteria.Op.NULL);
DcPodSearch.or("podId", DcPodSearch.entity().getPodId(), SearchCriteria.Op.EQ);
DcPodSearch.cp();
DcPodSearch.and().op("nullcluster", DcPodSearch.entity().getClusterId(), SearchCriteria.Op.NULL);
DcPodSearch.or("cluster", DcPodSearch.entity().getClusterId(), SearchCriteria.Op.EQ);
DcPodSearch.cp();
DcPodSearch.done();
DcPodAnyClusterSearch = createSearchBuilder();
DcPodAnyClusterSearch.and("datacenterId", DcPodAnyClusterSearch.entity().getDataCenterId(), SearchCriteria.Op.EQ);
DcPodAnyClusterSearch.and("status", DcPodAnyClusterSearch.entity().getStatus(), SearchCriteria.Op.EQ);
DcPodAnyClusterSearch.and("scope", DcPodAnyClusterSearch.entity().getScope(), SearchCriteria.Op.EQ);
DcPodAnyClusterSearch.and().op("nullpod", DcPodAnyClusterSearch.entity().getPodId(), SearchCriteria.Op.NULL);
DcPodAnyClusterSearch.or("podId", DcPodAnyClusterSearch.entity().getPodId(), SearchCriteria.Op.EQ);
DcPodAnyClusterSearch.cp();
DcPodAnyClusterSearch.done();
DeleteLvmSearch = createSearchBuilder();
DeleteLvmSearch.and("ids", DeleteLvmSearch.entity().getId(), SearchCriteria.Op.IN);
DeleteLvmSearch.and().op("LVM", DeleteLvmSearch.entity().getPoolType(), SearchCriteria.Op.EQ);
DeleteLvmSearch.or("Filesystem", DeleteLvmSearch.entity().getPoolType(), SearchCriteria.Op.EQ);
DeleteLvmSearch.cp();
DeleteLvmSearch.done();
DeleteLvmSearch.done();
StatusCountSearch = createSearchBuilder(Long.class);
StatusCountSearch.and("status", StatusCountSearch.entity().getStatus(), SearchCriteria.Op.IN);
StatusCountSearch.select(null, Func.COUNT, null);
StatusCountSearch.done();
}
@Override
public List<StoragePoolVO> findPoolByName(String name) {
SearchCriteria<StoragePoolVO> sc = AllFieldSearch.create();
}
@Override
public List<StoragePoolVO> findPoolByName(String name) {
SearchCriteria<StoragePoolVO> sc = AllFieldSearch.create();
sc.setParameters("name", name);
return listIncludingRemovedBy(sc);
}
}
@Override
public StoragePoolVO findPoolByUUID(String uuid) {
SearchCriteria<StoragePoolVO> sc = AllFieldSearch.create();
@Override
public StoragePoolVO findPoolByUUID(String uuid) {
SearchCriteria<StoragePoolVO> sc = AllFieldSearch.create();
sc.setParameters("uuid", uuid);
return findOneIncludingRemovedBy(sc);
}
}
@Override
public List<StoragePoolVO> findIfDuplicatePoolsExistByUUID(String uuid) {
SearchCriteria<StoragePoolVO> sc = AllFieldSearch.create();
@Override
public List<StoragePoolVO> findIfDuplicatePoolsExistByUUID(String uuid) {
SearchCriteria<StoragePoolVO> sc = AllFieldSearch.create();
sc.setParameters("uuid", uuid);
return listBy(sc);
}
}
@Override
public List<StoragePoolVO> listByDataCenterId(long datacenterId) {
SearchCriteria<StoragePoolVO> sc = AllFieldSearch.create();
@Override
public List<StoragePoolVO> listByDataCenterId(long datacenterId) {
SearchCriteria<StoragePoolVO> sc = AllFieldSearch.create();
sc.setParameters("datacenterId", datacenterId);
return listBy(sc);
}
}
@Override
public void updateAvailable(long id, long available) {
StoragePoolVO pool = createForUpdate(id);
pool.setAvailableBytes(available);
update(id, pool);
}
@Override
public void updateCapacity(long id, long capacity) {
StoragePoolVO pool = createForUpdate(id);
pool.setCapacityBytes(capacity);
update(id, pool);
@Override
public void updateAvailable(long id, long available) {
StoragePoolVO pool = createForUpdate(id);
pool.setAvailableBytes(available);
update(id, pool);
}
}
@Override
public void updateCapacity(long id, long capacity) {
StoragePoolVO pool = createForUpdate(id);
pool.setCapacityBytes(capacity);
update(id, pool);
}
@Override
public List<StoragePoolVO> listByStorageHost(String hostFqdnOrIp) {
SearchCriteria<StoragePoolVO> sc = AllFieldSearch.create();
sc.setParameters("hostAddress", hostFqdnOrIp);
return listIncludingRemovedBy(sc);
}
@Override
public List<StoragePoolVO> listByStatus(DataStoreStatus status) {
public List<StoragePoolVO> listByStatus(StoragePoolStatus status){
SearchCriteria<StoragePoolVO> sc = AllFieldSearch.create();
sc.setParameters("status", status);
return listBy(sc);
sc.setParameters("status", status);
return listBy(sc);
}
@Override
public List<StoragePoolVO> listByStatusInZone(long dcId, DataStoreStatus status) {
public List<StoragePoolVO> listByStatusInZone(long dcId, StoragePoolStatus status){
SearchCriteria<StoragePoolVO> sc = AllFieldSearch.create();
sc.setParameters("status", status);
sc.setParameters("datacenterId", dcId);
return listBy(sc);
sc.setParameters("status", status);
sc.setParameters("datacenterId", dcId);
return listBy(sc);
}
@Override
@ -171,190 +193,238 @@ public class PrimaryDataStoreDaoImpl extends GenericDaoBase<StoragePoolVO, Long>
sc.setParameters("datacenterId", datacenterId);
sc.setParameters("podId", podId);
sc.setParameters("uuid", uuid);
return findOneBy(sc);
}
@Override
public List<StoragePoolVO> listBy(long datacenterId, long podId, Long clusterId) {
if (clusterId != null) {
SearchCriteria<StoragePoolVO> sc = DcPodSearch.create();
@Override
public List<StoragePoolVO> listBy(long datacenterId, long podId, Long clusterId, ScopeType scope) {
if (clusterId != null) {
SearchCriteria<StoragePoolVO> sc = DcPodSearch.create();
sc.setParameters("datacenterId", datacenterId);
sc.setParameters("podId", podId);
sc.setParameters("status", Status.Up);
sc.setParameters("scope", scope);
sc.setParameters("cluster", clusterId);
return listBy(sc);
} else {
SearchCriteria<StoragePoolVO> sc = DcPodAnyClusterSearch.create();
sc.setParameters("datacenterId", datacenterId);
sc.setParameters("podId", podId);
return listBy(sc);
}
}
} else {
SearchCriteria<StoragePoolVO> sc = DcPodAnyClusterSearch.create();
sc.setParameters("datacenterId", datacenterId);
sc.setParameters("podId", podId);
sc.setParameters("status", Status.Up);
sc.setParameters("scope", scope);
return listBy(sc);
}
}
@Override
public List<StoragePoolVO> listPoolByHostPath(String host, String path) {
@Override
public List<StoragePoolVO> listPoolByHostPath(String host, String path) {
SearchCriteria<StoragePoolVO> sc = AllFieldSearch.create();
sc.setParameters("hostAddress", host);
sc.setParameters("path", path);
return listBy(sc);
}
public StoragePoolVO listById(Integer id) {
}
public StoragePoolVO listById(Integer id)
{
SearchCriteria<StoragePoolVO> sc = AllFieldSearch.create();
sc.setParameters("id", id);
return findOneIncludingRemovedBy(sc);
}
@Override @DB
public StoragePoolVO persist(StoragePoolVO pool, Map<String, String> details) {
Transaction txn = Transaction.currentTxn();
txn.start();
pool = super.persist(pool);
if (details != null) {
for (Map.Entry<String, String> detail : details.entrySet()) {
StoragePoolDetailVO vo = new StoragePoolDetailVO(pool.getId(), detail.getKey(), detail.getValue());
_detailsDao.persist(vo);
}
}
txn.commit();
return pool;
}
@DB
@Override
public List<StoragePoolVO> findPoolsByDetails(long dcId, long podId, Long clusterId, Map<String, String> details, ScopeType scope) {
StringBuilder sql = new StringBuilder(DetailsSqlPrefix);
if (clusterId != null) {
sql.append("storage_pool.cluster_id = ? OR storage_pool.cluster_id IS NULL) AND (");
}
for (Map.Entry<String, String> detail : details.entrySet()) {
sql.append("((storage_pool_details.name='").append(detail.getKey()).append("') AND (storage_pool_details.value='").append(detail.getValue()).append("')) OR ");
}
sql.delete(sql.length() - 4, sql.length());
sql.append(DetailsSqlSuffix);
Transaction txn = Transaction.currentTxn();
PreparedStatement pstmt = null;
try {
pstmt = txn.prepareAutoCloseStatement(sql.toString());
int i = 1;
pstmt.setLong(i++, dcId);
pstmt.setLong(i++, podId);
pstmt.setString(i++, scope.toString());
if (clusterId != null) {
pstmt.setLong(i++, clusterId);
}
pstmt.setInt(i++, details.size());
ResultSet rs = pstmt.executeQuery();
List<StoragePoolVO> pools = new ArrayList<StoragePoolVO>();
while (rs.next()) {
pools.add(toEntityBean(rs, false));
}
return pools;
} catch (SQLException e) {
throw new CloudRuntimeException("Unable to execute " + pstmt, e);
}
}
protected Map<String, String> tagsToDetails(String[] tags) {
Map<String, String> details = new HashMap<String, String>(tags.length);
for (String tag: tags) {
details.put(tag, "true");
}
return details;
}
@Override
public List<StoragePoolVO> findPoolsByTags(long dcId, long podId, Long clusterId, String[] tags) {
List<StoragePoolVO> storagePools = null;
if (tags == null || tags.length == 0) {
storagePools = listBy(dcId, podId, clusterId, ScopeType.CLUSTER);
} else {
Map<String, String> details = tagsToDetails(tags);
storagePools = findPoolsByDetails(dcId, podId, clusterId, details, ScopeType.CLUSTER);
}
return storagePools;
}
@Override
public List<StoragePoolVO> findLocalStoragePoolsByTags(long dcId, long podId, Long clusterId, String[] tags) {
List<StoragePoolVO> storagePools = null;
if (tags == null || tags.length == 0) {
storagePools = listBy(dcId, podId, clusterId, ScopeType.HOST);
} else {
Map<String, String> details = tagsToDetails(tags);
storagePools = findPoolsByDetails(dcId, podId, clusterId, details, ScopeType.HOST);
}
return storagePools;
}
@Override
public List<StoragePoolVO> findZoneWideStoragePoolsByTags(long dcId, String[] tags) {
List<StoragePoolVO> storagePools = null;
if (tags == null || tags.length == 0) {
SearchCriteriaService<StoragePoolVO, StoragePoolVO> sc = SearchCriteria2.create(StoragePoolVO.class);
sc.addAnd(sc.getEntity().getDataCenterId(), Op.EQ, dcId);
sc.addAnd(sc.getEntity().getStatus(), Op.EQ, Status.Up);
sc.addAnd(sc.getEntity().getScope(), Op.EQ, ScopeType.ZONE);
return sc.list();
} else {
Map<String, String> details = tagsToDetails(tags);
StringBuilder sql = new StringBuilder(ZoneWideDetailsSqlPrefix);
for (Map.Entry<String, String> detail : details.entrySet()) {
sql.append("((storage_pool_details.name='").append(detail.getKey()).append("') AND (storage_pool_details.value='").append(detail.getValue()).append("')) OR ");
}
sql.delete(sql.length() - 4, sql.length());
sql.append(ZoneWideDetailsSqlSuffix);
Transaction txn = Transaction.currentTxn();
PreparedStatement pstmt = null;
try {
pstmt = txn.prepareAutoCloseStatement(sql.toString());
int i = 1;
pstmt.setLong(i++, dcId);
pstmt.setString(i++, ScopeType.ZONE.toString());
pstmt.setInt(i++, details.size());
ResultSet rs = pstmt.executeQuery();
List<StoragePoolVO> pools = new ArrayList<StoragePoolVO>();
while (rs.next()) {
pools.add(toEntityBean(rs, false));
}
return pools;
} catch (SQLException e) {
throw new CloudRuntimeException("Unable to execute " + pstmt, e);
}
}
}
@Override
@DB
public List<String> searchForStoragePoolDetails(long poolId, String value){
StringBuilder sql = new StringBuilder(FindPoolTagDetails);
Transaction txn = Transaction.currentTxn();
PreparedStatement pstmt = null;
try {
pstmt = txn.prepareAutoCloseStatement(sql.toString());
pstmt.setLong(1, poolId);
pstmt.setString(2, value);
ResultSet rs = pstmt.executeQuery();
List<String> tags = new ArrayList<String>();
while (rs.next()) {
tags.add(rs.getString("name"));
}
return tags;
} catch (SQLException e) {
throw new CloudRuntimeException("Unable to execute " + pstmt.toString(), e);
}
}
@Override
public void updateDetails(long poolId, Map<String, String> details) {
if (details != null) {
_detailsDao.update(poolId, details);
}
}
@Override
@DB
public StoragePoolVO persist(StoragePoolVO pool, Map<String, String> details) {
Transaction txn = Transaction.currentTxn();
txn.start();
pool = super.persist(pool);
if (details != null) {
for (Map.Entry<String, String> detail : details.entrySet()) {
PrimaryDataStoreDetailVO vo = new PrimaryDataStoreDetailVO(pool.getId(), detail.getKey(), detail.getValue());
_detailsDao.persist(vo);
}
}
txn.commit();
return pool;
}
@DB
@Override
public List<StoragePoolVO> findPoolsByDetails(long dcId, long podId, Long clusterId, Map<String, String> details) {
StringBuilder sql = new StringBuilder(DetailsSqlPrefix);
if (clusterId != null) {
sql.append("storage_pool.cluster_id = ? OR storage_pool.cluster_id IS NULL) AND (");
}
for (Map.Entry<String, String> detail : details.entrySet()) {
sql.append("((storage_pool_details.name='").append(detail.getKey()).append("') AND (storage_pool_details.value='").append(detail.getValue()).append("')) OR ");
}
sql.delete(sql.length() - 4, sql.length());
sql.append(DetailsSqlSuffix);
Transaction txn = Transaction.currentTxn();
PreparedStatement pstmt = null;
try {
pstmt = txn.prepareAutoCloseStatement(sql.toString());
int i = 1;
pstmt.setLong(i++, dcId);
pstmt.setLong(i++, podId);
if (clusterId != null) {
pstmt.setLong(i++, clusterId);
}
pstmt.setInt(i++, details.size());
ResultSet rs = pstmt.executeQuery();
List<StoragePoolVO> pools = new ArrayList<StoragePoolVO>();
while (rs.next()) {
pools.add(toEntityBean(rs, false));
}
return pools;
} catch (SQLException e) {
throw new CloudRuntimeException("Unable to execute " + pstmt, e);
}
}
protected Map<String, String> tagsToDetails(String[] tags) {
Map<String, String> details = new HashMap<String, String>(tags.length);
for (String tag : tags) {
details.put(tag, "true");
}
return details;
}
@Override
public List<StoragePoolVO> findPoolsByTags(long dcId, long podId, Long clusterId, String[] tags, Boolean shared) {
List<StoragePoolVO> storagePools = null;
if (tags == null || tags.length == 0) {
storagePools = listBy(dcId, podId, clusterId);
} else {
Map<String, String> details = tagsToDetails(tags);
storagePools = findPoolsByDetails(dcId, podId, clusterId, details);
}
if (shared == null) {
return storagePools;
} else {
List<StoragePoolVO> filteredStoragePools = new ArrayList<StoragePoolVO>(storagePools);
for (StoragePoolVO pool : storagePools) {
/*
* if (shared != pool.isShared()) {
* filteredStoragePools.remove(pool); }
*/
}
return filteredStoragePools;
}
}
@Override
@DB
public List<String> searchForStoragePoolDetails(long poolId, String value) {
StringBuilder sql = new StringBuilder(FindPoolTagDetails);
Transaction txn = Transaction.currentTxn();
PreparedStatement pstmt = null;
try {
pstmt = txn.prepareAutoCloseStatement(sql.toString());
pstmt.setLong(1, poolId);
pstmt.setString(2, value);
ResultSet rs = pstmt.executeQuery();
List<String> tags = new ArrayList<String>();
while (rs.next()) {
tags.add(rs.getString("name"));
}
return tags;
} catch (SQLException e) {
throw new CloudRuntimeException("Unable to execute " + pstmt.toString(), e);
}
}
@Override
public void updateDetails(long poolId, Map<String, String> details) {
if (details != null) {
_detailsDao.update(poolId, details);
}
}
@Override
public Map<String, String> getDetails(long poolId) {
return _detailsDao.getDetails(poolId);
}
@Override
@Override
public Map<String, String> getDetails(long poolId) {
return _detailsDao.getDetails(poolId);
}
@Override
public boolean configure(String name, Map<String, Object> params) throws ConfigurationException {
super.configure(name, params);
_detailsDao.configure("DetailsDao", params);
return true;
}
super.configure(name, params);
_detailsDao.configure("DetailsDao", params);
return true;
}
@Override
public long countPoolsByStatus(DataStoreStatus... statuses) {
public long countPoolsByStatus( StoragePoolStatus... statuses) {
SearchCriteria<Long> sc = StatusCountSearch.create();
sc.setParameters("status", (Object[]) statuses);
sc.setParameters("status", (Object[])statuses);
List<Long> rs = customSearchIncludingRemoved(sc, null);
if (rs.size() == 0) {
return 0;
}
return rs.get(0);
}
@Override
public List<StoragePoolVO> listPoolsByCluster(long clusterId) {
SearchCriteria<StoragePoolVO> sc = AllFieldSearch.create();
sc.setParameters("clusterId", clusterId);
return listBy(sc);
}
}

View File

@ -14,7 +14,7 @@
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package com.cloud.storage;
package org.apache.cloudstack.storage.datastore.db;
import org.apache.cloudstack.api.InternalIdentity;

View File

@ -14,11 +14,11 @@
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package com.cloud.storage.dao;
package org.apache.cloudstack.storage.datastore.db;
import java.util.Map;
import com.cloud.storage.StoragePoolDetailVO;
import com.cloud.utils.db.GenericDao;
public interface StoragePoolDetailsDao extends GenericDao<StoragePoolDetailVO, Long> {

View File

@ -32,12 +32,13 @@ import javax.persistence.TemporalType;
import org.apache.cloudstack.engine.subsystem.api.storage.ScopeType;
import com.cloud.storage.Storage.StoragePoolType;
import com.cloud.storage.StoragePool;
import com.cloud.storage.StoragePoolStatus;
import com.cloud.utils.db.GenericDao;
@Entity
@Table(name="storage_pool")
public class StoragePoolVO {
public class StoragePoolVO implements StoragePool{
@Id
@TableGenerator(name = "storage_pool_sq", table = "sequence", pkColumnName = "name", valueColumnName = "value", pkColumnValue = "storage_pool_seq", allocationSize = 1)
@Column(name = "id", updatable = false, nullable = false)
@ -301,4 +302,10 @@ public class StoragePoolVO {
public boolean isLocal() {
return !isShared();
}
@Override
public boolean isInMaintenance() {
// TODO Auto-generated method stub
return false;
}
}

View File

@ -28,6 +28,7 @@ import org.apache.cloudstack.engine.cloud.entity.api.db.VMReservationVO;
import org.apache.cloudstack.engine.cloud.entity.api.db.dao.VMEntityDao;
import org.apache.cloudstack.engine.cloud.entity.api.db.dao.VMReservationDao;
import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager;
import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao;
import org.springframework.stereotype.Component;
import com.cloud.dc.DataCenter;
@ -50,7 +51,6 @@ import com.cloud.storage.StoragePool;
import com.cloud.storage.Volume;
import com.cloud.storage.VolumeVO;
import com.cloud.storage.dao.DiskOfferingDao;
import com.cloud.storage.dao.StoragePoolDao;
import com.cloud.storage.dao.VMTemplateDao;
import com.cloud.storage.dao.VolumeDao;
import com.cloud.user.dao.AccountDao;
@ -101,7 +101,7 @@ public class VMEntityManagerImpl implements VMEntityManager {
protected VolumeDao _volsDao;
@Inject
protected StoragePoolDao _storagePoolDao;
protected PrimaryDataStoreDao _storagePoolDao;
@Inject
DataStoreManager dataStoreMgr;

View File

@ -30,6 +30,9 @@ import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreProvider;
import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreProviderManager;
import org.apache.cloudstack.engine.subsystem.api.storage.ScopeType;
import org.apache.cloudstack.engine.subsystem.api.storage.StoragePoolAllocator;
import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao;
import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailVO;
import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailsDao;
import org.apache.cloudstack.storage.datastore.db.StoragePoolVO;
import org.junit.Before;
import org.junit.Test;
@ -54,14 +57,11 @@ import com.cloud.org.Managed.ManagedState;
import com.cloud.storage.DiskOfferingVO;
import com.cloud.storage.StorageManager;
import com.cloud.storage.StoragePool;
import com.cloud.storage.StoragePoolDetailVO;
import com.cloud.storage.StoragePoolStatus;
import com.cloud.storage.Volume;
import com.cloud.storage.VolumeVO;
import com.cloud.storage.Storage.StoragePoolType;
import com.cloud.storage.dao.DiskOfferingDao;
import com.cloud.storage.dao.StoragePoolDao;
import com.cloud.storage.dao.StoragePoolDetailsDao;
import com.cloud.storage.dao.VolumeDao;
import com.cloud.utils.component.ComponentContext;
import com.cloud.vm.DiskProfile;
@ -71,7 +71,7 @@ import com.cloud.vm.VirtualMachineProfile;
@ContextConfiguration(locations = "classpath:/storageContext.xml")
public class StorageAllocatorTest {
@Inject
StoragePoolDao storagePoolDao;
PrimaryDataStoreDao storagePoolDao;
@Inject
StorageManager storageMgr;
@Inject

View File

@ -19,6 +19,7 @@ package org.apache.cloudstack.storage.allocator;
import java.io.IOException;
import org.apache.cloudstack.storage.allocator.StorageAllocatorTestConfiguration.Library;
import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDaoImpl;
import org.mockito.Mockito;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.ComponentScan;
@ -33,7 +34,6 @@ import com.cloud.dc.dao.DataCenterDaoImpl;
import com.cloud.domain.dao.DomainDaoImpl;
import com.cloud.host.dao.HostDaoImpl;
import com.cloud.storage.StorageManager;
import com.cloud.storage.dao.StoragePoolDaoImpl;
import com.cloud.storage.dao.StoragePoolDetailsDaoImpl;
import com.cloud.storage.dao.VMTemplateDaoImpl;
import com.cloud.utils.component.SpringComponentScanUtils;
@ -43,7 +43,7 @@ import com.cloud.vm.UserVmManager;
@Configuration
@ComponentScan(basePackageClasses={
StoragePoolDetailsDaoImpl.class,
StoragePoolDaoImpl.class,
PrimaryDataStoreDaoImpl.class,
VMTemplateDaoImpl.class,
HostDaoImpl.class,
DomainDaoImpl.class,

View File

@ -44,6 +44,7 @@ import org.apache.cloudstack.framework.async.AsyncCallbackDispatcher;
import org.apache.cloudstack.framework.async.AsyncCompletionCallback;
import org.apache.cloudstack.framework.async.AsyncRpcConext;
import org.apache.cloudstack.storage.datastore.ObjectInDataStoreManager;
import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao;
import org.apache.cloudstack.storage.motion.DataMotionService;
import org.apache.cloudstack.storage.snapshot.SnapshotObject;
import org.apache.cloudstack.storage.snapshot.SnapshotStateMachineManager;
@ -71,7 +72,6 @@ import com.cloud.storage.StoragePool;
import com.cloud.storage.VolumeManager;
import com.cloud.storage.VolumeVO;
import com.cloud.storage.dao.SnapshotDao;
import com.cloud.storage.dao.StoragePoolDao;
import com.cloud.storage.dao.VolumeDao;
import com.cloud.storage.s3.S3Manager;
import com.cloud.storage.snapshot.SnapshotManager;
@ -96,7 +96,7 @@ public class AncientSnasphotStrategy implements SnapshotStrategy {
@Inject
protected UserVmDao _vmDao;
@Inject
protected StoragePoolDao _storagePoolDao;
protected PrimaryDataStoreDao _storagePoolDao;
@Inject
protected ClusterDao _clusterDao;
@Inject

View File

@ -29,6 +29,7 @@ import javax.naming.ConfigurationException;
import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager;
import org.apache.cloudstack.engine.subsystem.api.storage.StoragePoolAllocator;
import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao;
import org.apache.log4j.Logger;
import com.cloud.configuration.dao.ConfigurationDao;
@ -43,7 +44,6 @@ import com.cloud.storage.StoragePool;
import com.cloud.storage.Volume;
import com.cloud.storage.Volume.Type;
import com.cloud.storage.dao.DiskOfferingDao;
import com.cloud.storage.dao.StoragePoolDao;
import com.cloud.storage.dao.VolumeDao;
import com.cloud.user.Account;
import com.cloud.utils.NumbersUtil;
@ -55,7 +55,7 @@ import com.cloud.vm.VirtualMachineProfile;
public abstract class AbstractStoragePoolAllocator extends AdapterBase implements StoragePoolAllocator {
private static final Logger s_logger = Logger.getLogger(AbstractStoragePoolAllocator.class);
@Inject StorageManager storageMgr;
protected @Inject StoragePoolDao _storagePoolDao;
protected @Inject PrimaryDataStoreDao _storagePoolDao;
@Inject VolumeDao _volumeDao;
@Inject ConfigurationDao _configDao;
@Inject ClusterDao _clusterDao;

View File

@ -22,6 +22,7 @@ import java.util.List;
import javax.inject.Inject;
import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager;
import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao;
import org.apache.cloudstack.storage.datastore.db.StoragePoolVO;
import org.apache.log4j.Logger;
import org.springframework.stereotype.Component;
@ -31,7 +32,6 @@ import com.cloud.deploy.DeploymentPlanner.ExcludeList;
import com.cloud.hypervisor.Hypervisor.HypervisorType;
import com.cloud.storage.StoragePool;
import com.cloud.storage.Volume;
import com.cloud.storage.dao.StoragePoolDao;
import com.cloud.vm.DiskProfile;
import com.cloud.vm.VirtualMachine;
import com.cloud.vm.VirtualMachineProfile;
@ -39,7 +39,7 @@ import com.cloud.vm.VirtualMachineProfile;
@Component
public class ZoneWideStoragePoolAllocator extends AbstractStoragePoolAllocator {
private static final Logger s_logger = Logger.getLogger(ZoneWideStoragePoolAllocator.class);
@Inject StoragePoolDao _storagePoolDao;
@Inject PrimaryDataStoreDao _storagePoolDao;
@Inject DataStoreManager dataStoreMgr;
@Override

View File

@ -532,7 +532,7 @@ public class AncientPrimaryDataStoreLifeCycleImpl implements
// if they dont, then just stop all vms on this one
List<StoragePoolVO> upPools = primaryDataStoreDao
.listByStatusInZone(pool.getDataCenterId(),
DataStoreStatus.Up);
StoragePoolStatus.Up);
boolean restart = true;
if (upPools == null || upPools.size() == 0) {
restart = false;

View File

@ -57,11 +57,12 @@ done
export JAVA_HOME
ACP=`ls /usr/share/cloudstack-agent/lib/*.jar | tr '\n' ':' | sed s'/.$//'`
PCP=`ls /usr/share/cloudstack-agent/plugins/*.jar 2>/dev/null | tr '\n' ':' | sed s'/.$//'`
# We need to append the JSVC daemon JAR to the classpath
# AgentShell implements the JSVC daemon methods
# We also need JNA in the classpath (from the distribution) for the Libvirt Java bindings
export CLASSPATH="/usr/share/java/commons-daemon.jar:/usr/share/java/jna.jar:$ACP:/etc/cloudstack/agent:/usr/share/cloudstack-common/scripts"
export CLASSPATH="/usr/share/java/commons-daemon.jar:/usr/share/java/jna.jar:$ACP:$PCP:/etc/cloudstack/agent:/usr/share/cloudstack-common/scripts"
start() {
echo -n $"Starting $PROGNAME: "

View File

@ -116,6 +116,7 @@ Requires: ebtables
Requires: jsvc
Requires: jakarta-commons-daemon
Requires: jakarta-commons-daemon-jsvc
Requires: perl
Provides: cloud-agent
Obsoletes: cloud-agent < 4.1.0
Obsoletes: cloud-test < 4.1.0
@ -256,6 +257,7 @@ chmod 770 ${RPM_BUILD_ROOT}%{_localstatedir}/log/%{name}/agent
mkdir -p ${RPM_BUILD_ROOT}%{_sysconfdir}/%{name}/agent
mkdir -p ${RPM_BUILD_ROOT}%{_localstatedir}/log/%{name}/agent
mkdir -p ${RPM_BUILD_ROOT}%{_datadir}/%{name}-agent/lib
mkdir -p ${RPM_BUILD_ROOT}%{_datadir}/%{name}-agent/plugins
install -D packaging/centos63/cloud-agent.rc ${RPM_BUILD_ROOT}%{_sysconfdir}/init.d/%{name}-agent
install -D agent/target/transformed/agent.properties ${RPM_BUILD_ROOT}%{_sysconfdir}/%{name}/agent/agent.properties
install -D agent/target/transformed/environment.properties ${RPM_BUILD_ROOT}%{_sysconfdir}/%{name}/agent/environment.properties
@ -395,6 +397,7 @@ fi
%config(noreplace) %{_sysconfdir}/%{name}/agent
%dir %{_localstatedir}/log/%{name}/agent
%attr(0644,root,root) %{_datadir}/%{name}-agent/lib/*.jar
%dir %{_datadir}/%{name}-agent/plugins
%doc LICENSE
%doc NOTICE

View File

@ -55,12 +55,13 @@ for jdir in $JDK_DIRS; do
done
export JAVA_HOME
ACP=`ls /usr/share/cloudstack-agent/lib/* | tr '\n' ':' | sed s'/.$//'`
ACP=`ls /usr/share/cloudstack-agent/lib/*.jar | tr '\n' ':' | sed s'/.$//'`
PCP=`ls /usr/share/cloudstack-agent/plugins/*.jar 2>/dev/null | tr '\n' ':' | sed s'/.$//'`
# We need to append the JSVC daemon JAR to the classpath
# AgentShell implements the JSVC daemon methods
# We also need JNA in the classpath (from the distribution) for the Libvirt Java bindings
export CLASSPATH="/usr/share/java/commons-daemon.jar:/usr/share/java/jna.jar:$ACP:/etc/cloudstack/agent"
export CLASSPATH="/usr/share/java/commons-daemon.jar:/usr/share/java/jna.jar:$ACP:$PCP:/etc/cloudstack/agent"
wait_for_network() {
i=1

View File

@ -108,14 +108,17 @@ get_boot_params() {
sed -i "s/%/ /g" /var/cache/cloud/cmdline
;;
kvm)
# KVM needs to mount another disk, to get cmdline
mkdir -p $EXTRA_MOUNT
mount /dev/vdb $EXTRA_MOUNT
cp -f $EXTRA_MOUNT/cmdline /var/cache/cloud/cmdline
cp -f $EXTRA_MOUNT/authorized_keys /var/cache/cloud/authorized_keys
privkey=/var/cache/cloud/authorized_keys
umount $EXTRA_MOUNT
cp -f $privkey /root/.ssh/ && chmod go-rwx /root/.ssh/authorized_keys
while read line; do
if [[ $line == cmdline:* ]]; then
cmd=${line//cmdline:/}
echo $cmd > /var/cache/cloud/cmdline
elif [[ $line == pubkey:* ]]; then
pubkey=${line//pubkey:/}
echo $pubkey > /var/cache/cloud/authorized_keys
echo $pubkey > /root/.ssh/authorized_keys
fi
done < /dev/vport0p1
chmod go-rwx /root/.ssh/authorized_keys
;;
vmware)
vmtoolsd --cmd 'machine.id.get' > /var/cache/cloud/cmdline

View File

@ -23,6 +23,7 @@ import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.Future;
import org.apache.log4j.Logger;
import org.libvirt.LibvirtException;
import org.libvirt.StoragePool;
import org.libvirt.StoragePoolInfo;
@ -33,6 +34,7 @@ import com.cloud.utils.script.OutputInterpreter.AllLinesParser;
import com.cloud.utils.script.Script;
public class KVMHABase {
private static final Logger s_logger = Logger.getLogger(KVMHABase.class);
private long _timeout = 60000; /* 1 minutes */
protected static String _heartBeatPath;
protected long _heartBeatUpdateTimeout = 60000;
@ -124,14 +126,14 @@ public class KVMHABase {
}
poolName = pool.getName();
} catch (LibvirtException e) {
s_logger.debug("Ignoring libvirt error.", e);
} finally {
try {
if (pool != null) {
pool.free();
}
} catch (LibvirtException e) {
s_logger.debug("Ignoring libvirt error.", e);
}
}

View File

@ -255,7 +255,7 @@ ServerResource {
private String _modifyVlanPath;
private String _versionstringpath;
private String _patchdomrPath;
private String _patchViaSocketPath;
private String _createvmPath;
private String _manageSnapshotPath;
private String _resizeVolumePath;
@ -521,10 +521,10 @@ ServerResource {
throw new ConfigurationException("Unable to find versions.sh");
}
_patchdomrPath = Script.findScript(kvmScriptsDir + "/patch/",
"rundomrpre.sh");
if (_patchdomrPath == null) {
throw new ConfigurationException("Unable to find rundomrpre.sh");
_patchViaSocketPath = Script.findScript(kvmScriptsDir + "/patch/",
"patchviasocket.pl");
if (_patchViaSocketPath == null) {
throw new ConfigurationException("Unable to find patchviasocket.pl");
}
_heartBeatPath = Script.findScript(kvmScriptsDir, "kvmheartbeat.sh");
@ -692,7 +692,7 @@ ServerResource {
_hvVersion = conn.getVersion();
_hvVersion = (_hvVersion % 1000000) / 1000;
} catch (LibvirtException e) {
s_logger.trace("Ignoring libvirt error.", e);
}
String[] info = NetUtils.getNetworkParams(_privateNic);
@ -764,8 +764,8 @@ ServerResource {
if (tokens.length == 2) {
try {
_migrateSpeed = Integer.parseInt(tokens[0]);
} catch (Exception e) {
} catch (NumberFormatException e) {
s_logger.trace("Ignoring migrateSpeed extraction error.", e);
}
s_logger.debug("device " + _pifs.get("public") + " has speed: " + String.valueOf(_migrateSpeed));
}
@ -846,8 +846,8 @@ ServerResource {
throw new ConfigurationException("Unable to find class for libvirt.vif.driver " + e);
} catch (InstantiationException e) {
throw new ConfigurationException("Unable to instantiate class for libvirt.vif.driver " + e);
} catch (Exception e) {
throw new ConfigurationException("Failed to initialize libvirt.vif.driver " + e);
} catch (IllegalAccessException e) {
throw new ConfigurationException("Unable to instantiate class for libvirt.vif.driver " + e);
}
return vifDriver;
}
@ -1014,13 +1014,11 @@ ServerResource {
return vnetId;
}
private void patchSystemVm(String cmdLine, String dataDiskPath,
String vmName) throws InternalErrorException {
private void passCmdLine(String vmName, String cmdLine)
throws InternalErrorException {
final Script command = new Script(_patchViaSocketPath, _timeout, s_logger);
String result;
final Script command = new Script(_patchdomrPath, _timeout, s_logger);
command.add("-l", vmName);
command.add("-t", "all");
command.add("-d", dataDiskPath);
command.add("-n",vmName);
command.add("-p", cmdLine.replaceAll(" ", "%"));
result = command.execute();
if (result != null) {
@ -1043,7 +1041,6 @@ ServerResource {
protected String startVM(Connect conn, String vmName, String domainXML)
throws LibvirtException, InternalErrorException {
Domain dm = null;
try {
/*
We create a transient domain here. When this method gets
@ -1053,12 +1050,11 @@ ServerResource {
This also makes sure we never have any old "garbage" defined
in libvirt which might haunt us.
*/
dm = conn.domainCreateXML(domainXML, 0);
conn.domainCreateXML(domainXML, 0);
} catch (final LibvirtException e) {
s_logger.warn("Failed to start domain " + vmName + ": "
+ e.getMessage());
+ e.getMessage(), e);
}
return null;
}
@ -1068,6 +1064,7 @@ ServerResource {
Connect conn = LibvirtConnection.getConnection();
conn.close();
} catch (LibvirtException e) {
s_logger.trace("Ignoring libvirt error.", e);
}
return true;
@ -1461,24 +1458,6 @@ ServerResource {
pool.deletePhysicalDisk(vol.getPath());
String vmName = cmd.getVmName();
String poolPath = pool.getLocalPath();
/* if vol is a root disk for a system vm, try to remove accompanying patch disk as well
this is a bit tricky since the patchdisk is only a LibvirtComputingResource construct
and not tracked anywhere in cloudstack */
if (vol.getType() == Volume.Type.ROOT && vmName.matches("^[rsv]-\\d+-.+$")) {
File patchVbd = new File(poolPath + File.separator + vmName + "-patchdisk");
if(patchVbd.exists()){
try {
_storagePoolMgr.deleteVbdByPath(vol.getPoolType(),patchVbd.getAbsolutePath());
} catch(CloudRuntimeException e) {
s_logger.warn("unable to destroy patch disk '" + patchVbd.getAbsolutePath() +
"' while removing root disk for " + vmName + " : " + e);
}
} else {
s_logger.debug("file '" +patchVbd.getAbsolutePath()+ "' not found");
}
}
return new Answer(cmd, true, "Success");
} catch (CloudRuntimeException e) {
s_logger.debug("Failed to delete volume: " + e.toString());
@ -1516,11 +1495,10 @@ ServerResource {
}
private PlugNicAnswer execute(PlugNicCommand cmd) {
Connect conn;
NicTO nic = cmd.getNic();
String vmName = cmd.getVmName();
try {
conn = LibvirtConnection.getConnection();
Connect conn = LibvirtConnection.getConnection();
Domain vm = getDomain(conn, vmName);
List<InterfaceDef> pluggedNics = getInterfaces(conn, vmName);
Integer nicnum = 0;
@ -1533,7 +1511,11 @@ ServerResource {
}
vm.attachDevice(getVifDriver(nic.getType()).plug(nic, "Other PV (32-bit)").toString());
return new PlugNicAnswer(cmd, true, "success");
} catch (Exception e) {
} catch (LibvirtException e) {
String msg = " Plug Nic failed due to " + e.toString();
s_logger.warn(msg, e);
return new PlugNicAnswer(cmd, false, msg);
} catch (InternalErrorException e) {
String msg = " Plug Nic failed due to " + e.toString();
s_logger.warn(msg, e);
return new PlugNicAnswer(cmd, false, msg);
@ -1555,7 +1537,7 @@ ServerResource {
}
}
return new UnPlugNicAnswer(cmd, true, "success");
} catch (Exception e) {
} catch (LibvirtException e) {
String msg = " Unplug Nic failed due to " + e.toString();
s_logger.warn(msg, e);
return new UnPlugNicAnswer(cmd, false, msg);
@ -1609,7 +1591,7 @@ ServerResource {
return new SetupGuestNetworkAnswer(cmd, false, "Creating guest network failed due to " + result);
}
return new SetupGuestNetworkAnswer(cmd, true, "success");
} catch (Exception e) {
} catch (LibvirtException e) {
String msg = "Creating guest network failed due to " + e.toString();
s_logger.warn(msg, e);
return new SetupGuestNetworkAnswer(cmd, false, msg);
@ -1649,7 +1631,7 @@ ServerResource {
}
return new SetNetworkACLAnswer(cmd, true, results);
} catch (Exception e) {
} catch (LibvirtException e) {
String msg = "SetNetworkACL failed due to " + e.toString();
s_logger.error(msg, e);
return new SetNetworkACLAnswer(cmd, false, results);
@ -1694,7 +1676,7 @@ ServerResource {
return new SetSourceNatAnswer(cmd, false, "KVM plugin \"vpc_snat\" failed:"+result);
}
return new SetSourceNatAnswer(cmd, true, "success");
} catch (Exception e) {
} catch (LibvirtException e) {
String msg = "Ip SNAT failure due to " + e.toString();
s_logger.error(msg, e);
return new SetSourceNatAnswer(cmd, false, msg);
@ -1739,7 +1721,10 @@ ServerResource {
results[i++] = ip.getPublicIp() + " - success";
}
} catch (Exception e) {
} catch (LibvirtException e) {
s_logger.error("Ip Assoc failure on applying one ip due to exception: ", e);
results[i++] = IpAssocAnswer.errorResult;
} catch (InternalErrorException e) {
s_logger.error("Ip Assoc failure on applying one ip due to exception: ", e);
results[i++] = IpAssocAnswer.errorResult;
}
@ -1819,7 +1804,7 @@ ServerResource {
vm = getDomain(conn, cmd.getVmName());
state = vm.getInfo().state;
} catch (LibvirtException e) {
s_logger.trace("Ignoring libvirt error.", e);
}
}
@ -1938,7 +1923,7 @@ ServerResource {
vm = getDomain(conn, cmd.getVmName());
state = vm.getInfo().state;
} catch (LibvirtException e) {
s_logger.trace("Ignoring libvirt error.", e);
}
}
@ -2380,7 +2365,7 @@ ServerResource {
Connect conn = LibvirtConnection.getConnection();
Integer vncPort = getVncPort(conn, cmd.getName());
return new GetVncPortAnswer(cmd, _privateIp, 5900 + vncPort);
} catch (Exception e) {
} catch (LibvirtException e) {
return new GetVncPortAnswer(cmd, e.toString());
}
}
@ -2501,16 +2486,13 @@ ServerResource {
} catch (final LibvirtException e) {
s_logger.warn("Can't get vm state " + vmName + e.getMessage()
+ "retry:" + retry);
} catch (Exception e) {
s_logger.warn("Can't get vm state " + vmName + e.getMessage()
+ "retry:" + retry);
} finally {
try {
if (vms != null) {
vms.free();
}
} catch (final LibvirtException e) {
} catch (final LibvirtException l) {
s_logger.trace("Ignoring libvirt error.", l);
}
}
}
@ -2603,9 +2585,6 @@ ServerResource {
} catch (LibvirtException e) {
s_logger.debug("Can't migrate domain: " + e.getMessage());
result = e.getMessage();
} catch (Exception e) {
s_logger.debug("Can't migrate domain: " + e.getMessage());
result = e.getMessage();
} finally {
try {
if (dm != null) {
@ -2618,7 +2597,7 @@ ServerResource {
destDomain.free();
}
} catch (final LibvirtException e) {
s_logger.trace("Ignoring libvirt error.", e);
}
}
@ -2795,8 +2774,8 @@ ServerResource {
Integer vncPort = null;
try {
vncPort = getVncPort(conn, cmd.getVmName());
} catch (Exception e) {
} catch (LibvirtException e) {
s_logger.trace("Ignoring libvirt error.", e);
}
get_rule_logs_for_vms();
return new RebootAnswer(cmd, null, vncPort);
@ -3122,10 +3101,27 @@ ServerResource {
}
}
// pass cmdline info to system vms
if (vmSpec.getType() != VirtualMachine.Type.User) {
passCmdLine(vmName, vmSpec.getBootArgs() );
}
state = State.Running;
return new StartAnswer(cmd);
} catch (Exception e) {
s_logger.warn("Exception ", e);
} catch (LibvirtException e) {
s_logger.warn("LibvirtException ", e);
if (conn != null) {
handleVmStartFailure(conn, vmName, vm);
}
return new StartAnswer(cmd, e.getMessage());
} catch (InternalErrorException e) {
s_logger.warn("InternalErrorException ", e);
if (conn != null) {
handleVmStartFailure(conn, vmName, vm);
}
return new StartAnswer(cmd, e.getMessage());
} catch (URISyntaxException e) {
s_logger.warn("URISyntaxException ", e);
if (conn != null) {
handleVmStartFailure(conn, vmName, vm);
}
@ -3237,8 +3233,6 @@ ServerResource {
iso.defISODisk(_sysvmISOPath);
vm.getDevices().addDevice(iso);
}
createPatchVbd(conn, vmName, vm, vmSpec);
}
}
@ -3252,64 +3246,6 @@ ServerResource {
return null;
}
private void createPatchVbd(Connect conn, String vmName, LibvirtVMDef vm,
VirtualMachineTO vmSpec) throws LibvirtException,
InternalErrorException {
List<DiskDef> disks = vm.getDevices().getDisks();
DiskDef rootDisk = disks.get(0);
VolumeTO rootVol = getVolume(vmSpec, Volume.Type.ROOT);
String patchName = vmName + "-patchdisk";
KVMStoragePool pool = _storagePoolMgr.getStoragePool(
rootVol.getPoolType(),
rootVol.getPoolUuid());
String patchDiskPath = pool.getLocalPath() + "/" + patchName;
List<KVMPhysicalDisk> phyDisks = pool.listPhysicalDisks();
boolean foundDisk = false;
for (KVMPhysicalDisk phyDisk : phyDisks) {
if (phyDisk.getPath().equals(patchDiskPath)) {
foundDisk = true;
break;
}
}
if (!foundDisk) {
s_logger.debug("generating new patch disk for " + vmName + " since none was found");
KVMPhysicalDisk disk = pool.createPhysicalDisk(patchName, KVMPhysicalDisk.PhysicalDiskFormat.RAW,
10L * 1024 * 1024);
} else {
s_logger.debug("found existing patch disk at " + patchDiskPath + " using it for " + vmName);
}
/* Format/create fs on this disk */
final Script command = new Script(_createvmPath, _timeout, s_logger);
command.add("-f", patchDiskPath);
String result = command.execute();
if (result != null) {
s_logger.debug("Failed to create data disk: " + result);
throw new InternalErrorException("Failed to create data disk: "
+ result);
}
/* add patch disk */
DiskDef patchDisk = new DiskDef();
if (pool.getType() == StoragePoolType.CLVM) {
patchDisk.defBlockBasedDisk(patchDiskPath, 1, rootDisk.getBusType());
} else {
patchDisk.defFileBasedDisk(patchDiskPath, 1, rootDisk.getBusType(),
DiskDef.diskFmtType.RAW);
}
disks.add(patchDisk);
String bootArgs = vmSpec.getBootArgs();
patchSystemVm(bootArgs, patchDiskPath, vmName);
}
private void createVif(LibvirtVMDef vm, NicTO nic)
throws InternalErrorException, LibvirtException {
vm.getDevices().addDevice(
@ -3325,14 +3261,10 @@ ServerResource {
s_logger.debug("Ping command port, " + privateIp + ":" + cmdPort);
}
try {
String result = _virtRouterResource.connect(privateIp, cmdPort);
if (result != null) {
return new CheckSshAnswer(cmd, "Can not ping System vm "
+ vmName + "due to:" + result);
}
} catch (Exception e) {
return new CheckSshAnswer(cmd, e);
String result = _virtRouterResource.connect(privateIp, cmdPort);
if (result != null) {
return new CheckSshAnswer(cmd, "Can not ping System vm "
+ vmName + "due to:" + result);
}
if (s_logger.isDebugEnabled()) {
@ -3479,14 +3411,12 @@ ServerResource {
+ e.getMessage());
}
throw e;
} catch (Exception e) {
throw new InternalErrorException(e.toString());
} finally {
if (dm != null) {
try {
dm.free();
} catch (LibvirtException l) {
s_logger.trace("Ignoring libvirt error.", l);
}
}
}
@ -3704,22 +3634,21 @@ ServerResource {
return convertToState(vps);
}
} catch (final LibvirtException e) {
s_logger.trace(e.getMessage());
} catch (Exception e) {
s_logger.trace(e.getMessage());
s_logger.trace("Ignoring libvirt error.", e);
} finally {
try {
if (dm != null) {
dm.free();
}
} catch (final LibvirtException e) {
} catch (final LibvirtException l) {
s_logger.trace("Ignoring libvirt error.", l);
}
}
try {
Thread.sleep(1000);
} catch (InterruptedException e) {
s_logger.trace("Ignoring InterruptedException.", e);
}
}
return State.Stopped;
@ -3757,7 +3686,7 @@ ServerResource {
dm.free();
}
} catch (final LibvirtException e) {
s_logger.trace("Ignoring libvirt error.", e);
}
}
}
@ -3812,7 +3741,7 @@ ServerResource {
dm.free();
}
} catch (LibvirtException e) {
s_logger.trace("Ignoring libvirt error.", e);
}
}
}
@ -3832,15 +3761,13 @@ ServerResource {
vmStates.put(vmName, state);
} catch (final LibvirtException e) {
s_logger.warn("Unable to get vms", e);
} catch (Exception e) {
s_logger.warn("Unable to get vms", e);
} finally {
try {
if (dm != null) {
dm.free();
}
} catch (LibvirtException e) {
s_logger.trace("Ignoring libvirt error.", e);
}
}
}
@ -3891,7 +3818,7 @@ ServerResource {
}
}
} catch (LibvirtException e) {
s_logger.trace("Ignoring libvirt error.", e);
}
if (isSnapshotSupported()) {
@ -3938,7 +3865,7 @@ ServerResource {
} catch (LibvirtException e) {
s_logger.warn("Failed to create vm", e);
msg = e.getMessage();
} catch (Exception e) {
} catch (InternalErrorException e) {
s_logger.warn("Failed to create vm", e);
msg = e.getMessage();
} finally {
@ -3947,7 +3874,7 @@ ServerResource {
dm.free();
}
} catch (LibvirtException e) {
s_logger.trace("Ignoring libvirt error.", e);
}
}
@ -3977,15 +3904,13 @@ ServerResource {
break;
} catch (LibvirtException e) {
s_logger.debug("Failed to get vm status:" + e.getMessage());
} catch (Exception e) {
s_logger.debug("Failed to get vm status:" + e.getMessage());
} finally {
try {
if (dm != null) {
dm.free();
}
} catch (LibvirtException l) {
s_logger.trace("Ignoring libvirt error.", l);
}
}
}
@ -4040,15 +3965,13 @@ ServerResource {
} catch (InterruptedException ie) {
s_logger.debug("Interrupted sleep");
return ie.getMessage();
} catch (Exception e) {
s_logger.debug("Failed to stop VM :" + vmName + " :", e);
return e.getMessage();
} finally {
try {
if (dm != null) {
dm.free();
}
} catch (LibvirtException e) {
s_logger.trace("Ignoring libvirt error.", e);
}
}
@ -4094,7 +4017,7 @@ ServerResource {
dm.free();
}
} catch (LibvirtException l) {
s_logger.trace("Ignoring libvirt error.", l);
}
}
}
@ -4110,7 +4033,7 @@ ServerResource {
}
}
} catch (LibvirtException e) {
s_logger.trace("Ignoring libvirt error.", e);
}
return false;
}
@ -4135,8 +4058,7 @@ ServerResource {
parser.parseDomainXML(xmlDesc);
return parser.getDescription();
} catch (LibvirtException e) {
return null;
} catch (Exception e) {
s_logger.trace("Ignoring libvirt error.", e);
return null;
} finally {
try {
@ -4144,7 +4066,7 @@ ServerResource {
dm.free();
}
} catch (LibvirtException l) {
s_logger.trace("Ignoring libvirt error.", l);
}
}
}
@ -4242,16 +4164,13 @@ ServerResource {
} catch (LibvirtException e) {
s_logger.debug("Failed to get dom xml: " + e.toString());
return new ArrayList<InterfaceDef>();
} catch (Exception e) {
s_logger.debug("Failed to get dom xml: " + e.toString());
return new ArrayList<InterfaceDef>();
} finally {
try {
if (dm != null) {
dm.free();
}
} catch (LibvirtException e) {
s_logger.trace("Ignoring libvirt error.", e);
}
}
}
@ -4268,16 +4187,13 @@ ServerResource {
} catch (LibvirtException e) {
s_logger.debug("Failed to get dom xml: " + e.toString());
return new ArrayList<DiskDef>();
} catch (Exception e) {
s_logger.debug("Failed to get dom xml: " + e.toString());
return new ArrayList<DiskDef>();
} finally {
try {
if (dm != null) {
dm.free();
}
} catch (LibvirtException e) {
s_logger.trace("Ignoring libvirt error.", e);
}
}
}
@ -4625,8 +4541,7 @@ ServerResource {
conn = LibvirtConnection.getConnection();
success = default_network_rules_for_systemvm(conn, cmd.getVmName());
} catch (LibvirtException e) {
// TODO Auto-generated catch block
e.printStackTrace();
s_logger.trace("Ignoring libvirt error.", e);
}
return new Answer(cmd, success, "");

View File

@ -864,8 +864,8 @@ public class LibvirtVMDef {
virtioSerialBuilder.append("<channel type='unix'>\n");
virtioSerialBuilder.append("<source mode='bind' path='" + _path
+ "/" + _name + ".agent'/>\n");
virtioSerialBuilder.append("<target type='virtio' name='org.qemu.guest_agent.0'/>\n");
virtioSerialBuilder.append("<address type='virtio-serial' controller='0' bus='0' port='1'/>\n");
virtioSerialBuilder.append("<target type='virtio' name='" + _name + ".vport'/>\n");
virtioSerialBuilder.append("<address type='virtio-serial'/>\n");
virtioSerialBuilder.append("</channel>\n");
return virtioSerialBuilder.toString();
}

View File

@ -19,6 +19,7 @@ package com.cloud.hypervisor.kvm.resource;
import java.io.IOException;
import java.io.StringReader;
import javax.xml.parsers.ParserConfigurationException;
import javax.xml.parsers.SAXParser;
import javax.xml.parsers.SAXParserFactory;
@ -40,13 +41,14 @@ public class LibvirtXMLParser extends DefaultHandler {
protected boolean _initialized = false;
public LibvirtXMLParser() {
try {
_sp = s_spf.newSAXParser();
_initialized = true;
} catch (Exception ex) {
} catch (ParserConfigurationException e) {
s_logger.trace("Ignoring xml parser error.", e);
} catch (SAXException e) {
s_logger.trace("Ignoring xml parser error.", e);
}
}
public boolean parseDomainXML(String domXML) {

View File

@ -345,6 +345,7 @@
<exclude>**/*.patch</exclude>
<exclude>**/.classpath</exclude>
<exclude>**/.project</exclude>
<exclude>**/.idea/**</exclude>
<exclude>**/*.iml</exclude>
<exclude>**/.settings/**</exclude>
<exclude>.metadata/**</exclude>
@ -362,6 +363,7 @@
<exclude>**/*.zip</exclude>
<exclude>**/target/**</exclude>
<exclude>**/.vagrant</exclude>
<exclude>awsapi/overlays/**</exclude>
<exclude>build/build.number</exclude>
<exclude>services/console-proxy/server/js/jquery.js</exclude>
<exclude>debian/compat</exclude>
@ -436,7 +438,6 @@
<exclude>patches/systemvm/debian/config/var/www/html/userdata/.htaccess</exclude>
<exclude>patches/systemvm/debian/config/var/www/html/latest/.htaccess</exclude>
<exclude>patches/systemvm/debian/vpn/etc/ipsec.d/l2tp.conf</exclude>
</excludes>
</configuration>
</plugin>

View File

@ -0,0 +1,58 @@
#!/usr/bin/perl -w
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#############################################################
# This script connects to the system vm socket and writes the
# authorized_keys and cmdline data to it. The system VM then
# reads it from /dev/vport0p1 in cloud_early_config
#############################################################
use strict;
use Getopt::Std;
use IO::Socket;
$|=1;
my $opts = {};
getopt('pn',$opts);
my $name = $opts->{n};
my $cmdline = $opts->{p};
my $sockfile = "/var/lib/libvirt/qemu/$name.agent";
my $pubkeyfile = "/root/.ssh/id_rsa.pub.cloud";
if (! -S $sockfile) {
print "ERROR: $sockfile socket not found\n";
exit 1;
}
if (! -f $pubkeyfile) {
print "ERROR: ssh public key not found on host at $pubkeyfile\n";
exit 1;
}
open(FILE,$pubkeyfile) or die "ERROR: unable to open $pubkeyfile - $^E";
my $key = <FILE>;
close FILE;
$cmdline =~ s/%/ /g;
my $msg = "pubkey:" . $key . "\ncmdline:" . $cmdline;
my $socket = IO::Socket::UNIX->new(Peer=>$sockfile,Type=>SOCK_STREAM)
or die "ERROR: unable to connect to $sockfile - $^E\n";
print $socket "$msg\r\n";
close $socket;

View File

@ -1,147 +0,0 @@
#!/bin/bash
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# $Id: rundomrpre.sh 10427 2010-07-09 03:30:48Z edison $ $HeadURL: svn://svn.lab.vmops.com/repos/vmdev/java/scripts/vm/hypervisor/kvm/rundomrpre.sh $
set -x
pubKey="/root/.ssh/id_rsa.pub.cloud"
mntpath() {
local vmname=$1
if [ ! -d /mnt/$vmname ]
then
mkdir -p /mnt/$vmname
fi
echo "/mnt/$vmname"
}
mount_raw_disk() {
local vmname=$1
local datadisk=$2
local path=$(mntpath $vmname)
if [ ! -f $datadisk -a ! -b $datadisk ]
then
printf "$datadisk doesn't exist" >&2
return 2
fi
retry=10
while [ $retry -gt 0 ]
do
if [ -b $datadisk ]; then
mount $datadisk $path &>/dev/null
ret=$?
else
mount $datadisk $path -o loop &>/dev/null
ret=$?
fi
sleep 10
if [ $ret -gt 0 ]
then
sleep 5
else
break
fi
retry=$(($retry-1))
done
return 0
}
umount_raw_disk() {
local vmname=$1
local datadisk=$2
local path=$(mntpath $vmname)
retry=10
sync
while [ $retry -gt 0 ]
do
umount -d $path &>/dev/null
if [ $? -gt 0 ]
then
sleep 5
else
rm -rf $path
break
fi
retry=$(($retry-1))
done
return $?
}
patch_all() {
local vmname=$1
local cmdline=$2
local datadisk=$3
local path=$(mntpath $vmname)
if [ -f $pubKey ]
then
cp $pubKey $path/authorized_keys
fi
echo $cmdline > $path/cmdline
sed -i "s/%/\ /g" $path/cmdline
return 0
}
lflag=
dflag=
while getopts 't:v:i:m:e:E:a:A:g:l:n:d:b:B:p:I:N:Mx:X:' OPTION
do
case $OPTION in
l) lflag=1
vmname="$OPTARG"
;;
t) tflag=1
vmtype="$OPTARG"
;;
d) dflag=1
rootdisk="$OPTARG"
;;
p) pflag=1
cmdline="$OPTARG"
;;
*) ;;
esac
done
if [ "$lflag$tflag$dflag" != "111" ]
then
printf "Error: No enough parameter\n" >&2
exit 1
fi
if [ "$vmtype" = "all" ]
then
mount_raw_disk $vmname $rootdisk
if [ $? -gt 0 ]
then
printf "Failed to mount $rootdisk"
exit $?
fi
patch_all $vmname $cmdline $rootdisk
umount_raw_disk $vmname $rootdisk
exit $?
fi
exit $?

View File

@ -39,6 +39,7 @@ import javax.ejb.Local;
import javax.inject.Inject;
import javax.naming.ConfigurationException;
import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao;
import org.apache.log4j.Logger;
import org.springframework.stereotype.Component;
@ -100,7 +101,6 @@ import com.cloud.resource.ServerResource;
import com.cloud.server.ManagementService;
import com.cloud.storage.StorageManager;
import com.cloud.storage.StorageService;
import com.cloud.storage.dao.StoragePoolDao;
import com.cloud.storage.dao.StoragePoolHostDao;
import com.cloud.storage.dao.VolumeDao;
import com.cloud.storage.resource.DummySecondaryStorageResource;
@ -172,7 +172,7 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl
@Inject
protected ConfigurationDao _configDao = null;
@Inject
protected StoragePoolDao _storagePoolDao = null;
protected PrimaryDataStoreDao _storagePoolDao = null;
@Inject
protected StoragePoolHostDao _storagePoolHostDao = null;
@Inject

View File

@ -27,6 +27,7 @@ import javax.ejb.Local;
import javax.inject.Inject;
import javax.naming.ConfigurationException;
import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao;
import org.apache.log4j.Logger;
import org.springframework.stereotype.Component;
@ -49,7 +50,6 @@ import com.cloud.host.dao.HostDao;
import com.cloud.org.Grouping;
import com.cloud.resource.ResourceManager;
import com.cloud.storage.VolumeVO;
import com.cloud.storage.dao.StoragePoolDao;
import com.cloud.storage.dao.VolumeDao;
import com.cloud.utils.Pair;
import com.cloud.vm.VirtualMachine;
@ -61,7 +61,7 @@ public class RecreateHostAllocator extends FirstFitRoutingAllocator {
private final static Logger s_logger = Logger.getLogger(RecreateHostAllocator.class);
@Inject HostPodDao _podDao;
@Inject StoragePoolDao _poolDao;
@Inject PrimaryDataStoreDao _poolDao;
@Inject ClusterDao _clusterDao;
@Inject VolumeDao _volsDao;
@Inject DataCenterDao _dcDao;

View File

@ -16,6 +16,9 @@
// under the License.
package com.cloud.alert.dao;
import java.util.Date;
import java.util.List;
import com.cloud.alert.AlertVO;
import com.cloud.utils.db.GenericDao;
@ -23,4 +26,8 @@ public interface AlertDao extends GenericDao<AlertVO, Long> {
AlertVO getLastAlert(short type, long dataCenterId, Long podId, Long clusterId);
// This is for backward compatibility
AlertVO getLastAlert(short type, long dataCenterId, Long podId);
public boolean deleteAlert(List<Long> Ids, String type, Date olderThan, Long zoneId);
public boolean archiveAlert(List<Long> Ids, String type, Date olderThan, Long zoneId);
public List<AlertVO> listOlderAlerts(Date oldTime);
}

View File

@ -16,6 +16,7 @@
// under the License.
package com.cloud.alert.dao;
import java.util.Date;
import java.util.List;
import javax.ejb.Local;
@ -25,11 +26,26 @@ import org.springframework.stereotype.Component;
import com.cloud.alert.AlertVO;
import com.cloud.utils.db.Filter;
import com.cloud.utils.db.GenericDaoBase;
import com.cloud.utils.db.SearchBuilder;
import com.cloud.utils.db.SearchCriteria;
import com.cloud.utils.db.SearchCriteria.Op;
import com.cloud.utils.db.Transaction;
@Component
@Local(value = { AlertDao.class })
public class AlertDaoImpl extends GenericDaoBase<AlertVO, Long> implements AlertDao {
protected final SearchBuilder<AlertVO> AlertSearchByIdsAndType;
public AlertDaoImpl() {
AlertSearchByIdsAndType = createSearchBuilder();
AlertSearchByIdsAndType.and("id", AlertSearchByIdsAndType.entity().getId(), Op.IN);
AlertSearchByIdsAndType.and("type", AlertSearchByIdsAndType.entity().getType(), Op.EQ);
AlertSearchByIdsAndType.and("createdDateL", AlertSearchByIdsAndType.entity().getCreatedDate(), Op.LT);
AlertSearchByIdsAndType.and("data_center_id", AlertSearchByIdsAndType.entity().getDataCenterId(), Op.EQ);
AlertSearchByIdsAndType.done();
}
@Override
public AlertVO getLastAlert(short type, long dataCenterId, Long podId, Long clusterId) {
Filter searchFilter = new Filter(AlertVO.class, "createdDate", Boolean.FALSE, Long.valueOf(0), Long.valueOf(1));
@ -68,4 +84,73 @@ public class AlertDaoImpl extends GenericDaoBase<AlertVO, Long> implements Alert
}
return null;
}
@Override
public boolean archiveAlert(List<Long> Ids, String type, Date olderThan, Long zoneId) {
SearchCriteria<AlertVO> sc = AlertSearchByIdsAndType.create();
if (Ids != null) {
sc.setParameters("id", Ids.toArray(new Object[Ids.size()]));
}
if(type != null) {
sc.setParameters("type", type);
}
if(zoneId != null) {
sc.setParameters("data_center_id", zoneId);
}
if(olderThan != null) {
sc.setParameters("createdDateL", olderThan);
}
boolean result = true;;
List<AlertVO> alerts = listBy(sc);
if (Ids != null && alerts.size() < Ids.size()) {
result = false;
return result;
}
Transaction txn = Transaction.currentTxn();
txn.start();
for (AlertVO alert : alerts) {
alert = lockRow(alert.getId(), true);
alert.setArchived(true);
update(alert.getId(), alert);
txn.commit();
}
txn.close();
return result;
}
@Override
public boolean deleteAlert(List<Long> ids, String type, Date olderThan, Long zoneId) {
SearchCriteria<AlertVO> sc = AlertSearchByIdsAndType.create();
if (ids != null) {
sc.setParameters("id", ids.toArray(new Object[ids.size()]));
}
if(type != null) {
sc.setParameters("type", type);
}
if(zoneId != null) {
sc.setParameters("data_center_id", zoneId);
}
if(olderThan != null) {
sc.setParameters("createdDateL", olderThan);
}
boolean result = true;
List<AlertVO> alerts = listBy(sc);
if (ids != null && alerts.size() < ids.size()) {
result = false;
return result;
}
remove(sc);
return result;
}
@Override
public List<AlertVO> listOlderAlerts(Date oldTime) {
if (oldTime == null) return null;
SearchCriteria<AlertVO> sc = createSearchCriteria();
sc.addAnd("createDate", SearchCriteria.Op.LT, oldTime);
return listIncludingRemovedBy(sc, null);
}
}

View File

@ -45,6 +45,7 @@ import org.apache.cloudstack.api.response.UserResponse;
import org.apache.cloudstack.api.response.UserVmResponse;
import org.apache.cloudstack.api.response.VolumeResponse;
import org.apache.cloudstack.api.response.ZoneResponse;
import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao;
import org.apache.cloudstack.storage.datastore.db.StoragePoolVO;
import org.springframework.stereotype.Component;
@ -254,7 +255,7 @@ public class ApiDBUtils {
static HostPodDao _podDao;
static ServiceOfferingDao _serviceOfferingDao;
static SnapshotDao _snapshotDao;
static StoragePoolDao _storagePoolDao;
static PrimaryDataStoreDao _storagePoolDao;
static VMTemplateDao _templateDao;
static VMTemplateDetailsDao _templateDetailsDao;
static VMTemplateHostDao _templateHostDao;
@ -357,7 +358,7 @@ public class ApiDBUtils {
@Inject private HostPodDao podDao;
@Inject private ServiceOfferingDao serviceOfferingDao;
@Inject private SnapshotDao snapshotDao;
@Inject private StoragePoolDao storagePoolDao;
@Inject private PrimaryDataStoreDao storagePoolDao;
@Inject private VMTemplateDao templateDao;
@Inject private VMTemplateDetailsDao templateDetailsDao;
@Inject private VMTemplateHostDao templateHostDao;

View File

@ -50,6 +50,8 @@ import org.apache.cloudstack.api.InternalIdentity;
import org.apache.cloudstack.api.Parameter;
import org.apache.cloudstack.api.ServerApiException;
import org.apache.cloudstack.api.Validate;
import org.apache.cloudstack.api.command.user.event.ArchiveEventsCmd;
import org.apache.cloudstack.api.command.user.event.DeleteEventsCmd;
import org.apache.cloudstack.api.command.user.event.ListEventsCmd;
import org.apache.log4j.Logger;
import org.springframework.stereotype.Component;
@ -391,7 +393,7 @@ public class ApiDispatcher {
// This piece of code is for maintaining backward compatibility
// and support both the date formats(Bug 9724)
// Do the date messaging for ListEventsCmd only
if (cmdObj instanceof ListEventsCmd) {
if (cmdObj instanceof ListEventsCmd || cmdObj instanceof DeleteEventsCmd || cmdObj instanceof ArchiveEventsCmd) {
boolean isObjInNewDateFormat = isObjInNewDateFormat(paramObj.toString());
if (isObjInNewDateFormat) {
DateFormat newFormat = BaseCmd.NEW_INPUT_FORMAT;
@ -406,6 +408,8 @@ public class ApiDispatcher {
date = messageDate(date, 0, 0, 0);
} else if (field.getName().equals("endDate")) {
date = messageDate(date, 23, 59, 59);
} else if (field.getName().equals("olderThan")) {
date = messageDate(date, 0, 0, 0);
}
field.set(cmdObj, date);
}

View File

@ -397,6 +397,7 @@ public class QueryManagerImpl extends ManagerBase implements QueryService {
sb.and("state", sb.entity().getState(), SearchCriteria.Op.NEQ);
sb.and("startId", sb.entity().getStartId(), SearchCriteria.Op.EQ);
sb.and("createDate", sb.entity().getCreateDate(), SearchCriteria.Op.BETWEEN);
sb.and("archived", sb.entity().getArchived(), SearchCriteria.Op.EQ);
SearchCriteria<EventJoinVO> sc = sb.create();
// building ACL condition
@ -430,6 +431,8 @@ public class QueryManagerImpl extends ManagerBase implements QueryService {
sc.setParameters("createDateL", endDate);
}
sc.setParameters("archived", false);
Pair<List<EventJoinVO>, Integer> eventPair = null;
// event_view will not have duplicate rows for each event, so searchAndCount should be good enough.
if ((entryTime != null) && (duration != null)) {

View File

@ -170,6 +170,29 @@ public class DomainRouterJoinDaoImpl extends GenericDaoBase<DomainRouterJoinVO,
public DomainRouterResponse setDomainRouterResponse(DomainRouterResponse vrData, DomainRouterJoinVO vr) {
long nic_id = vr.getNicId();
if (nic_id > 0) {
TrafficType ty = vr.getTrafficType();
if (ty != null) {
// legacy code, public/control/guest nic info is kept in
// nics response object
if (ty == TrafficType.Public) {
vrData.setPublicIp(vr.getIpAddress());
vrData.setPublicMacAddress(vr.getMacAddress());
vrData.setPublicNetmask(vr.getNetmask());
vrData.setGateway(vr.getGateway());
vrData.setPublicNetworkId(vr.getNetworkUuid());
} else if (ty == TrafficType.Control) {
vrData.setLinkLocalIp(vr.getIpAddress());
vrData.setLinkLocalMacAddress(vr.getMacAddress());
vrData.setLinkLocalNetmask(vr.getNetmask());
vrData.setLinkLocalNetworkId(vr.getNetworkUuid());
} else if (ty == TrafficType.Guest) {
vrData.setGuestIpAddress(vr.getIpAddress());
vrData.setGuestMacAddress(vr.getMacAddress());
vrData.setGuestNetmask(vr.getNetmask());
vrData.setGuestNetworkId(vr.getNetworkUuid());
vrData.setNetworkDomain(vr.getNetworkDomain());
}
}
NicResponse nicResponse = new NicResponse();
nicResponse.setId(vr.getNicUuid());
nicResponse.setIpaddress(vr.getIpAddress());

View File

@ -104,6 +104,8 @@ public class EventJoinVO extends BaseViewVO implements ControlledViewEntity {
@Column(name="project_name")
private String projectName;
@Column(name="archived")
private boolean archived;
public EventJoinVO() {
@ -313,5 +315,12 @@ public class EventJoinVO extends BaseViewVO implements ControlledViewEntity {
this.parameters = parameters;
}
public boolean getArchived() {
return archived;
}
public void setArchived(Boolean archived) {
this.archived = archived;
}
}

View File

@ -27,6 +27,7 @@ import java.util.Map;
import javax.ejb.Local;
import javax.inject.Inject;
import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao;
import org.apache.cloudstack.storage.datastore.db.StoragePoolVO;
import org.apache.log4j.Logger;
import org.springframework.stereotype.Component;
@ -34,7 +35,6 @@ import org.springframework.stereotype.Component;
import com.cloud.capacity.Capacity;
import com.cloud.capacity.CapacityVO;
import com.cloud.storage.Storage;
import com.cloud.storage.dao.StoragePoolDao;
import com.cloud.utils.Pair;
import com.cloud.utils.db.Filter;
import com.cloud.utils.db.GenericDaoBase;
@ -62,7 +62,7 @@ public class CapacityDaoImpl extends GenericDaoBase<CapacityVO, Long> implements
private final SearchBuilder<CapacityVO> _hostIdTypeSearch;
private final SearchBuilder<CapacityVO> _hostOrPoolIdSearch;
private final SearchBuilder<CapacityVO> _allFieldsSearch;
@Inject protected StoragePoolDao _storagePoolDao;
@Inject protected PrimaryDataStoreDao _storagePoolDao;
private static final String LIST_HOSTS_IN_CLUSTER_WITH_ENOUGH_CAPACITY = " SELECT host_capacity.host_id FROM (`cloud`.`host` JOIN `cloud`.`op_host_capacity` host_capacity ON (host.id = host_capacity.host_id AND host.cluster_id = ?) JOIN `cloud`.`cluster_details` cluster_details ON (host_capacity.cluster_id = cluster_details.cluster_id) AND host.type = ? AND cluster_details.name='cpuOvercommitRatio' AND ((host_capacity.total_capacity *cluster_details.value ) - host_capacity.used_capacity) >= ? and host_capacity.capacity_type = '1' " +

View File

@ -204,9 +204,10 @@ public enum Config {
SecStorageSessionMax("Advanced", AgentManager.class, Integer.class, "secstorage.session.max", "50", "The max number of command execution sessions that a SSVM can handle", null),
SecStorageCmdExecutionTimeMax("Advanced", AgentManager.class, Integer.class, "secstorage.cmd.execution.time.max", "30", "The max command execution time in minute", null),
SecStorageProxy("Advanced", AgentManager.class, String.class, "secstorage.proxy", null, "http proxy used by ssvm, in http://username:password@proxyserver:port format", null),
AlertPurgeInterval("Advanced", ManagementServer.class, Integer.class, "alert.purge.interval", "86400", "The interval (in seconds) to wait before running the alert purge thread", null),
AlertPurgeDelay("Advanced", ManagementServer.class, Integer.class, "alert.purge.delay", "0", "Alerts older than specified number days will be purged. Set this value to 0 to never delete alerts", null),
DirectAttachNetworkEnabled("Advanced", ManagementServer.class, Boolean.class, "direct.attach.network.externalIpAllocator.enabled", "false", "Direct-attach VMs using external DHCP server", "true,false"),
DirectAttachNetworkEnabled("Advanced", ManagementServer.class, Boolean.class, "direct.attach.network.externalIpAllocator.enabled", "false", "Direct-attach VMs using external DHCP server", "true,false"),
DirectAttachNetworkExternalAPIURL("Advanced", ManagementServer.class, String.class, "direct.attach.network.externalIpAllocator.url", null, "Direct-attach VMs using external DHCP server (API url)", null),
CheckPodCIDRs("Advanced", ManagementServer.class, String.class, "check.pod.cidrs", "true", "If true, different pods must belong to different CIDR subnets.", "true,false"),
NetworkGcWait("Advanced", ManagementServer.class, Integer.class, "network.gc.wait", "600", "Time (in seconds) to wait before shutting down a network that's not in used", null),

View File

@ -2330,10 +2330,10 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati
Vlan vlan = createVlanAndPublicIpRange(zoneId, networkId, physicalNetworkId, forVirtualNetwork, podId, startIP,
endIP, vlanGateway, vlanNetmask, vlanId, vlanOwner, startIPv6, endIPv6, ip6Gateway, ip6Cidr);
txn.commit();
if (associateIpRangeToAccount) {
_networkMgr.associateIpAddressListToAccount(userId, vlanOwner.getId(), zoneId, vlan.getId(), null);
}
txn.commit();
// Associate ips to the network
if (associateIpRangeToAccount) {

View File

@ -31,6 +31,7 @@ import javax.inject.Inject;
import javax.naming.ConfigurationException;
import org.apache.cloudstack.api.ServerApiException;
import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao;
import org.apache.cloudstack.storage.datastore.db.StoragePoolVO;
import org.apache.log4j.Logger;
@ -115,7 +116,6 @@ import com.cloud.storage.VMTemplateHostVO;
import com.cloud.storage.VMTemplateVO;
import com.cloud.storage.VMTemplateStorageResourceAssoc.Status;
import com.cloud.storage.dao.DiskOfferingDao;
import com.cloud.storage.dao.StoragePoolDao;
import com.cloud.storage.dao.VMTemplateDao;
import com.cloud.storage.dao.VMTemplateHostDao;
import com.cloud.template.TemplateManager;
@ -221,7 +221,7 @@ public class ConsoleProxyManagerImpl extends ManagerBase implements ConsoleProxy
@Inject
NetworkOfferingDao _networkOfferingDao;
@Inject
StoragePoolDao _storagePoolDao;
PrimaryDataStoreDao _storagePoolDao;
@Inject
UserVmDetailsDao _vmDetailsDao;
@Inject

View File

@ -29,6 +29,7 @@ import javax.naming.ConfigurationException;
import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager;
import org.apache.cloudstack.engine.subsystem.api.storage.StoragePoolAllocator;
import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao;
import org.apache.log4j.Logger;
import com.cloud.agent.manager.allocator.HostAllocator;
@ -67,7 +68,6 @@ import com.cloud.storage.VolumeVO;
import com.cloud.storage.dao.DiskOfferingDao;
import com.cloud.storage.dao.GuestOSCategoryDao;
import com.cloud.storage.dao.GuestOSDao;
import com.cloud.storage.dao.StoragePoolDao;
import com.cloud.storage.dao.StoragePoolHostDao;
import com.cloud.storage.dao.VolumeDao;
import com.cloud.user.AccountManager;
@ -96,7 +96,7 @@ public class FirstFitPlanner extends PlannerBase implements DeploymentPlanner {
@Inject protected VolumeDao _volsDao;
@Inject protected CapacityManager _capacityMgr;
@Inject protected ConfigurationDao _configDao;
@Inject protected StoragePoolDao _storagePoolDao;
@Inject protected PrimaryDataStoreDao _storagePoolDao;
@Inject protected CapacityDao _capacityDao;
@Inject protected AccountManager _accountMgr;
@Inject protected StorageManager _storageMgr;

View File

@ -21,12 +21,12 @@ import java.util.List;
import javax.ejb.Local;
import javax.inject.Inject;
import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao;
import org.apache.log4j.Logger;
import org.springframework.stereotype.Component;
import com.cloud.host.HostVO;
import com.cloud.storage.VolumeVO;
import com.cloud.storage.dao.StoragePoolDao;
import com.cloud.storage.dao.VolumeDao;
import com.cloud.utils.component.AdapterBase;
import com.cloud.vm.VMInstanceVO;
@ -37,7 +37,7 @@ import com.cloud.vm.VirtualMachine;
public class RecreatableFencer extends AdapterBase implements FenceBuilder {
private static final Logger s_logger = Logger.getLogger(RecreatableFencer.class);
@Inject VolumeDao _volsDao;
@Inject StoragePoolDao _poolDao;
@Inject PrimaryDataStoreDao _poolDao;
public RecreatableFencer() {
super();

View File

@ -819,7 +819,8 @@ public abstract class ExternalLoadBalancerDeviceManagerImpl extends AdapterBase
}
} else {
s_logger.debug("Revoking a rule for an inline load balancer that has not been programmed yet.");
return null;
nic.setNic(null);
return nic;
}
}
@ -877,9 +878,9 @@ public abstract class ExternalLoadBalancerDeviceManagerImpl extends AdapterBase
MappingNic nic = getLoadBalancingIpNic(zone, network, rule.getSourceIpAddressId(), revoked, null);
mappingStates.add(nic.getState());
NicVO loadBalancingIpNic = nic.getNic();
if (loadBalancingIpNic == null) {
continue;
}
if (loadBalancingIpNic == null) {
continue;
}
// Change the source IP address for the load balancing rule to be the load balancing IP address
srcIp = loadBalancingIpNic.getIp4Address();

View File

@ -1164,7 +1164,6 @@ public class VpcManagerImpl extends ManagerBase implements VpcManager{
if (vpcElements == null) {
vpcElements = new ArrayList<VpcProvider>();
vpcElements.add((VpcProvider)_ntwkModel.getElementImplementingProvider(Provider.VPCVirtualRouter.getName()));
vpcElements.add((VpcProvider)_ntwkModel.getElementImplementingProvider(Provider.VPCNetscaler.getName()));
}
if (vpcElements == null) {

View File

@ -204,7 +204,7 @@ public class ProjectManagerImpl extends ManagerBase implements ProjectManager {
StringBuilder acctNm = new StringBuilder("PrjAcct-");
acctNm.append(name).append("-").append(owner.getDomainId());
Account projectAccount = _accountMgr.createAccount(acctNm.toString(), Account.ACCOUNT_TYPE_PROJECT, domainId, null, null);
Account projectAccount = _accountMgr.createAccount(acctNm.toString(), Account.ACCOUNT_TYPE_PROJECT, domainId, null, null, "", 0);
Project project = _projectDao.persist(new ProjectVO(name, displayText, owner.getDomainId(), projectAccount.getId()));

View File

@ -45,6 +45,7 @@ import org.apache.cloudstack.api.command.admin.storage.AddS3Cmd;
import org.apache.cloudstack.api.command.admin.storage.ListS3sCmd;
import org.apache.cloudstack.api.command.admin.swift.AddSwiftCmd;
import org.apache.cloudstack.api.command.admin.swift.ListSwiftsCmd;
import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao;
import org.apache.cloudstack.storage.datastore.db.StoragePoolVO;
import org.apache.log4j.Logger;
import org.springframework.stereotype.Component;
@ -117,7 +118,6 @@ import com.cloud.storage.Swift;
import com.cloud.storage.SwiftVO;
import com.cloud.storage.VMTemplateVO;
import com.cloud.storage.dao.GuestOSCategoryDao;
import com.cloud.storage.dao.StoragePoolDao;
import com.cloud.storage.dao.StoragePoolHostDao;
import com.cloud.storage.dao.VMTemplateDao;
import com.cloud.storage.s3.S3Manager;
@ -193,7 +193,7 @@ public class ResourceManagerImpl extends ManagerBase implements ResourceManager,
@Inject
protected GuestOSCategoryDao _guestOSCategoryDao;
@Inject
protected StoragePoolDao _storagePoolDao;
protected PrimaryDataStoreDao _storagePoolDao;
@Inject
protected DataCenterIpAddressDao _privateIPAddressDao;
@Inject

View File

@ -628,7 +628,7 @@ public class ResourceLimitManagerImpl extends ManagerBase implements ResourceLim
ResourceType resourceType = null;
if (typeId != null) {
for (ResourceType type : resourceTypes) {
for (ResourceType type : Resource.ResourceType.values()) {
if (type.getOrdinal() == typeId.intValue()) {
resourceType = type;
}

View File

@ -603,8 +603,16 @@ public class ConfigurationServerImpl extends ManagerBase implements Configuratio
throw new CloudRuntimeException("No home directory was detected for the user '" + username + "'. Please check the profile of this user.");
}
File privkeyfile = new File(homeDir + "/.ssh/id_rsa");
File pubkeyfile = new File(homeDir + "/.ssh/id_rsa.pub");
// Using non-default file names (id_rsa.cloud and id_rsa.cloud.pub) in developer mode. This is to prevent SSH keys overwritten for user running management server
File privkeyfile = null;
File pubkeyfile = null;
if (devel) {
privkeyfile = new File(homeDir + "/.ssh/id_rsa.cloud");
pubkeyfile = new File(homeDir + "/.ssh/id_rsa.cloud.pub");
} else {
privkeyfile = new File(homeDir + "/.ssh/id_rsa");
pubkeyfile = new File(homeDir + "/.ssh/id_rsa.pub");
}
if (already == null || already.isEmpty()) {
if (s_logger.isInfoEnabled()) {
@ -661,13 +669,8 @@ public class ConfigurationServerImpl extends ManagerBase implements Configuratio
}
} else {
s_logger.info("Keypairs already in database");
if (username.equalsIgnoreCase("cloud")) {
s_logger.info("Keypairs already in database, updating local copy");
updateKeyPairsOnDisk(homeDir);
} else {
s_logger.info("Keypairs already in database, skip updating local copy (not running as cloud user)");
}
s_logger.info("Keypairs already in database, updating local copy");
updateKeyPairsOnDisk(homeDir);
}
s_logger.info("Going to update systemvm iso with generated keypairs if needed");
try {
@ -726,14 +729,22 @@ public class ConfigurationServerImpl extends ManagerBase implements Configuratio
private void updateKeyPairsOnDisk(String homeDir) {
File keyDir = new File(homeDir + "/.ssh");
Boolean devel = Boolean.valueOf(_configDao.getValue("developer"));
if (!keyDir.isDirectory()) {
s_logger.warn("Failed to create " + homeDir + "/.ssh for storing the SSH keypars");
keyDir.mkdir();
}
String pubKey = _configDao.getValue("ssh.publickey");
String prvKey = _configDao.getValue("ssh.privatekey");
writeKeyToDisk(prvKey, homeDir + "/.ssh/id_rsa");
writeKeyToDisk(pubKey, homeDir + "/.ssh/id_rsa.pub");
// Using non-default file names (id_rsa.cloud and id_rsa.cloud.pub) in developer mode. This is to prevent SSH keys overwritten for user running management server
if( devel ) {
writeKeyToDisk(prvKey, homeDir + "/.ssh/id_rsa.cloud");
writeKeyToDisk(pubKey, homeDir + "/.ssh/id_rsa.cloud.pub");
} else {
writeKeyToDisk(prvKey, homeDir + "/.ssh/id_rsa");
writeKeyToDisk(pubKey, homeDir + "/.ssh/id_rsa.pub");
}
}
protected void injectSshKeysIntoSystemVmIsoPatch(String publicKeyPath, String privKeyPath) {

View File

@ -47,6 +47,7 @@ import javax.management.MalformedObjectNameException;
import javax.management.NotCompliantMBeanException;
import javax.naming.ConfigurationException;
import org.apache.cloudstack.acl.ControlledEntity;
import org.apache.cloudstack.acl.SecurityChecker.AccessType;
import org.apache.cloudstack.api.ApiConstants;
@ -109,6 +110,7 @@ import org.apache.cloudstack.api.command.user.vpn.*;
import org.apache.cloudstack.api.command.user.zone.*;
import org.apache.cloudstack.api.response.ExtractResponse;
import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager;
import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao;
import org.apache.cloudstack.storage.datastore.db.StoragePoolVO;
import org.apache.commons.codec.binary.Base64;
import org.apache.log4j.Logger;
@ -124,6 +126,7 @@ import com.cloud.alert.AlertManager;
import com.cloud.alert.AlertVO;
import com.cloud.alert.dao.AlertDao;
import com.cloud.api.ApiDBUtils;
import com.cloud.api.query.vo.EventJoinVO;
import com.cloud.async.AsyncJobExecutor;
import com.cloud.async.AsyncJobManager;
import com.cloud.async.AsyncJobResult;
@ -187,6 +190,7 @@ import com.cloud.hypervisor.dao.HypervisorCapabilitiesDao;
import com.cloud.info.ConsoleProxyInfo;
import com.cloud.keystore.KeystoreManager;
import com.cloud.network.IpAddress;
import com.cloud.network.as.ConditionVO;
import com.cloud.network.dao.IPAddressDao;
import com.cloud.network.dao.IPAddressVO;
import com.cloud.network.dao.LoadBalancerDao;
@ -220,7 +224,6 @@ import com.cloud.storage.VolumeVO;
import com.cloud.storage.dao.DiskOfferingDao;
import com.cloud.storage.dao.GuestOSCategoryDao;
import com.cloud.storage.dao.GuestOSDao;
import com.cloud.storage.dao.StoragePoolDao;
import com.cloud.storage.dao.UploadDao;
import com.cloud.storage.dao.VMTemplateDao;
import com.cloud.storage.dao.VolumeDao;
@ -262,6 +265,7 @@ import com.cloud.utils.db.GenericDaoBase;
import com.cloud.utils.db.GlobalLock;
import com.cloud.utils.db.JoinBuilder;
import com.cloud.utils.db.JoinBuilder.JoinType;
import com.cloud.utils.db.SearchCriteria.Op;
import com.cloud.utils.db.SearchBuilder;
import com.cloud.utils.db.SearchCriteria;
import com.cloud.utils.db.Transaction;
@ -295,7 +299,7 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe
public static final Logger s_logger = Logger.getLogger(ManagementServerImpl.class.getName());
@Inject
private AccountManager _accountMgr;
public AccountManager _accountMgr;
@Inject
private AgentManager _agentMgr;
@Inject
@ -311,7 +315,7 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe
@Inject
private SecondaryStorageVmDao _secStorageVmDao;
@Inject
private EventDao _eventDao;
public EventDao _eventDao;
@Inject
private DataCenterDao _dcDao;
@Inject
@ -347,7 +351,7 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe
@Inject
private AccountDao _accountDao;
@Inject
private AlertDao _alertDao;
public AlertDao _alertDao;
@Inject
private CapacityDao _capacityDao;
@Inject
@ -355,7 +359,7 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe
@Inject
private GuestOSCategoryDao _guestOSCategoryDao;
@Inject
private StoragePoolDao _poolDao;
private PrimaryDataStoreDao _poolDao;
@Inject
private NetworkDao _networkDao;
@Inject
@ -371,6 +375,7 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe
@Inject
private AsyncJobManager _asyncMgr;
private int _purgeDelay;
private int _alertPurgeDelay;
@Inject
private InstanceGroupDao _vmGroupDao;
@Inject
@ -417,6 +422,7 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe
EventUtils _forceEventUtilsRef;
*/
private final ScheduledExecutorService _eventExecutor = Executors.newScheduledThreadPool(1, new NamedThreadFactory("EventChecker"));
private final ScheduledExecutorService _alertExecutor = Executors.newScheduledThreadPool(1, new NamedThreadFactory("AlertChecker"));
private KeystoreManager _ksMgr;
private Map<String, String> _configs;
@ -446,6 +452,15 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe
_eventExecutor.scheduleAtFixedRate(new EventPurgeTask(), cleanup, cleanup, TimeUnit.SECONDS);
}
//Alerts purge configurations
int alertPurgeInterval = NumbersUtil.parseInt(_configDao.getValue(Config.AlertPurgeInterval.key()),
60 * 60 * 24); // 1 day.
_alertPurgeDelay = NumbersUtil.parseInt(_configDao.getValue(Config.AlertPurgeDelay.key()), 0);
if (_alertPurgeDelay != 0) {
_alertExecutor.scheduleAtFixedRate(new AlertPurgeTask(), alertPurgeInterval, alertPurgeInterval,
TimeUnit.SECONDS);
}
String[] availableIds = TimeZone.getAvailableIDs();
_availableIdsMap = new HashMap<String, Boolean>(availableIds.length);
for (String id : availableIds) {
@ -538,6 +553,42 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe
return _eventDao.search(sc, null);
}
@Override
public boolean archiveEvents(ArchiveEventsCmd cmd) {
List<Long> ids = cmd.getIds();
boolean result =true;
List<EventVO> events = _eventDao.listToArchiveOrDeleteEvents(ids, cmd.getType(), cmd.getOlderThan(), cmd.getEntityOwnerId());
ControlledEntity[] sameOwnerEvents = events.toArray(new ControlledEntity[events.size()]);
_accountMgr.checkAccess(UserContext.current().getCaller(), null, true, sameOwnerEvents);
if (ids != null && events.size() < ids.size()) {
result = false;
return result;
}
_eventDao.archiveEvents(events);
return result;
}
@Override
public boolean deleteEvents(DeleteEventsCmd cmd) {
List<Long> ids = cmd.getIds();
boolean result =true;
List<EventVO> events = _eventDao.listToArchiveOrDeleteEvents(ids, cmd.getType(), cmd.getOlderThan(), cmd.getEntityOwnerId());
ControlledEntity[] sameOwnerEvents = events.toArray(new ControlledEntity[events.size()]);
_accountMgr.checkAccess(UserContext.current().getCaller(), null, true, sameOwnerEvents);
if (ids != null && events.size() < ids.size()) {
result = false;
return result;
}
for (EventVO event : events) {
_eventDao.remove(event.getId());
}
return result;
}
private Date massageDate(Date date, int hourOfDay, int minute, int second) {
Calendar cal = Calendar.getInstance();
cal.setTime(date);
@ -1663,10 +1714,25 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe
sc.addAnd("type", SearchCriteria.Op.EQ, type);
}
sc.addAnd("archived", SearchCriteria.Op.EQ, false);
Pair<List<AlertVO>, Integer> result = _alertDao.searchAndCount(sc, searchFilter);
return new Pair<List<? extends Alert>, Integer>(result.first(), result.second());
}
@Override
public boolean archiveAlerts(ArchiveAlertsCmd cmd) {
Long zoneId = _accountMgr.checkAccessAndSpecifyAuthority(UserContext.current().getCaller(), null);
boolean result = _alertDao.archiveAlert(cmd.getIds(), cmd.getType(), cmd.getOlderThan(), zoneId);
return result;
}
@Override
public boolean deleteAlerts(DeleteAlertsCmd cmd) {
Long zoneId = _accountMgr.checkAccessAndSpecifyAuthority(UserContext.current().getCaller(), null);
boolean result = _alertDao.deleteAlert(cmd.getIds(), cmd.getType(), cmd.getOlderThan(), zoneId);
return result;
}
@Override
public List<CapacityVO> listTopConsumedResources(ListCapacityCmd cmd) {
@ -2168,6 +2234,10 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe
cmdList.add(AddIpToVmNicCmd.class);
cmdList.add(RemoveIpFromVmNicCmd.class);
cmdList.add(ListNicsCmd.class);
cmdList.add(ArchiveAlertsCmd.class);
cmdList.add(DeleteAlertsCmd.class);
cmdList.add(ArchiveEventsCmd.class);
cmdList.add(DeleteEventsCmd.class);
return cmdList;
}
@ -2205,6 +2275,39 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe
}
}
protected class AlertPurgeTask implements Runnable {
@Override
public void run() {
try {
GlobalLock lock = GlobalLock.getInternLock("AlertPurge");
if (lock == null) {
s_logger.debug("Couldn't get the global lock");
return;
}
if (!lock.lock(30)) {
s_logger.debug("Couldn't lock the db");
return;
}
try {
final Calendar purgeCal = Calendar.getInstance();
purgeCal.add(Calendar.DAY_OF_YEAR, - _alertPurgeDelay);
Date purgeTime = purgeCal.getTime();
s_logger.debug("Deleting alerts older than: " + purgeTime.toString());
List<AlertVO> oldAlerts = _alertDao.listOlderAlerts(purgeTime);
s_logger.debug("Found " + oldAlerts.size() + " events to be purged");
for (AlertVO alert : oldAlerts) {
_alertDao.expunge(alert.getId());
}
} catch (Exception e) {
s_logger.error("Exception ", e);
} finally {
lock.unlock();
}
} catch (Exception e) {
s_logger.error("Exception ", e);
}
}
}
@Override
public Pair<List<StoragePoolVO>, Integer> searchForStoragePools(Criteria c) {

View File

@ -18,6 +18,7 @@ package com.cloud.storage;
import javax.inject.Inject;
import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao;
import org.apache.log4j.Logger;
import com.cloud.agent.Listener;
@ -33,13 +34,12 @@ import com.cloud.dc.dao.DataCenterDao;
import com.cloud.exception.ConnectionException;
import com.cloud.host.HostVO;
import com.cloud.host.Status;
import com.cloud.storage.dao.StoragePoolDao;
import com.cloud.storage.dao.StoragePoolHostDao;
import com.cloud.utils.db.DB;
public class LocalStoragePoolListener implements Listener {
private final static Logger s_logger = Logger.getLogger(LocalStoragePoolListener.class);
@Inject StoragePoolDao _storagePoolDao;
@Inject PrimaryDataStoreDao _storagePoolDao;
@Inject StoragePoolHostDao _storagePoolHostDao;
@Inject CapacityDao _capacityDao;
@Inject StorageManager _storageMgr;

View File

@ -25,6 +25,7 @@ import javax.ejb.Local;
import javax.inject.Inject;
import javax.naming.ConfigurationException;
import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao;
import org.apache.cloudstack.storage.datastore.db.StoragePoolVO;
import org.apache.log4j.Logger;
import org.springframework.stereotype.Component;
@ -43,7 +44,6 @@ import com.cloud.resource.ResourceListener;
import com.cloud.resource.ResourceManager;
import com.cloud.resource.ServerResource;
import com.cloud.storage.Storage.StoragePoolType;
import com.cloud.storage.dao.StoragePoolDao;
import com.cloud.storage.dao.StoragePoolHostDao;
import com.cloud.utils.Ternary;
import com.cloud.utils.component.ManagerBase;
@ -63,7 +63,7 @@ public class OCFS2ManagerImpl extends ManagerBase implements OCFS2Manager, Resou
@Inject ClusterDao _clusterDao;
@Inject ResourceManager _resourceMgr;
@Inject StoragePoolHostDao _poolHostDao;
@Inject StoragePoolDao _poolDao;
@Inject PrimaryDataStoreDao _poolDao;
@Override
public boolean configure(String name, Map<String, Object> params) throws ConfigurationException {

View File

@ -1395,7 +1395,7 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C
List<StoragePoolVO> spes = _storagePoolDao.listBy(
primaryStorage.getDataCenterId(), primaryStorage.getPodId(),
primaryStorage.getClusterId());
primaryStorage.getClusterId(), ScopeType.CLUSTER);
for (StoragePoolVO sp : spes) {
if (sp.getStatus() == StoragePoolStatus.PrepareForMaintenance) {
throw new CloudRuntimeException(

View File

@ -1444,8 +1444,8 @@ public class VolumeManagerImpl extends ManagerBase implements VolumeManager {
List<StoragePoolVO> matchingVMPools = _storagePoolDao
.findPoolsByTags(vmRootVolumePool.getDataCenterId(),
vmRootVolumePool.getPodId(),
vmRootVolumePool.getClusterId(), volumeTags,
isVolumeOnSharedPool);
vmRootVolumePool.getClusterId(), volumeTags
);
boolean moveVolumeNeeded = true;
if (matchingVMPools.size() == 0) {

View File

@ -1,113 +0,0 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package com.cloud.storage.dao;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import org.apache.cloudstack.engine.subsystem.api.storage.ScopeType;
import org.apache.cloudstack.storage.datastore.db.StoragePoolVO;
import com.cloud.storage.StoragePoolStatus;
import com.cloud.utils.db.GenericDao;
/**
* Data Access Object for storage_pool table
*/
public interface StoragePoolDao extends GenericDao<StoragePoolVO, Long> {
/**
* @param datacenterId -- the id of the datacenter (availability zone)
*/
List<StoragePoolVO> listByDataCenterId(long datacenterId);
/**
* @param datacenterId -- the id of the datacenter (availability zone)
*/
List<StoragePoolVO> listBy(long datacenterId, long podId, Long clusterId, ScopeType scope);
/**
* Set capacity of storage pool in bytes
* @param id pool id.
* @param capacity capacity in bytes
*/
void updateCapacity(long id, long capacity);
/**
* Set available bytes of storage pool in bytes
* @param id pool id.
* @param available available capacity in bytes
*/
void updateAvailable(long id, long available);
StoragePoolVO persist(StoragePoolVO pool, Map<String, String> details);
/**
* Find pool by name.
*
* @param name name of pool.
* @return the single StoragePoolVO
*/
List<StoragePoolVO> findPoolByName(String name);
/**
* Find pools by the pod that matches the details.
*
* @param podId pod id to find the pools in.
* @param details details to match. All must match for the pool to be returned.
* @return List of StoragePoolVO
*/
List<StoragePoolVO> findPoolsByDetails(long dcId, long podId, Long clusterId, Map<String, String> details, ScopeType scope);
List<StoragePoolVO> findPoolsByTags(long dcId, long podId, Long clusterId, String[] tags);
/**
* Find pool by UUID.
*
* @param uuid uuid of pool.
* @return the single StoragePoolVO
*/
StoragePoolVO findPoolByUUID(String uuid);
List<StoragePoolVO> listByStorageHost(String hostFqdnOrIp);
StoragePoolVO findPoolByHostPath(long dcId, Long podId, String host, String path, String uuid);
List<StoragePoolVO> listPoolByHostPath(String host, String path);
void updateDetails(long poolId, Map<String, String> details);
Map<String, String> getDetails(long poolId);
List<String> searchForStoragePoolDetails(long poolId, String value);
List<StoragePoolVO> findIfDuplicatePoolsExistByUUID(String uuid);
List<StoragePoolVO> listByStatus(StoragePoolStatus status);
long countPoolsByStatus(StoragePoolStatus... statuses);
List<StoragePoolVO> listByStatusInZone(long dcId, StoragePoolStatus status);
List<StoragePoolVO> listPoolsByCluster(long clusterId);
List<StoragePoolVO> findLocalStoragePoolsByTags(long dcId, long podId,
Long clusterId, String[] tags);
List<StoragePoolVO> findZoneWideStoragePoolsByTags(long dcId, String[] tags);
}

View File

@ -1,432 +0,0 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package com.cloud.storage.dao;
import java.sql.PreparedStatement;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import javax.ejb.Local;
import javax.inject.Inject;
import javax.naming.ConfigurationException;
import org.apache.cloudstack.engine.subsystem.api.storage.ScopeType;
import org.apache.cloudstack.storage.datastore.db.StoragePoolVO;
import org.springframework.stereotype.Component;
import com.cloud.host.Status;
import com.cloud.storage.StoragePoolDetailVO;
import com.cloud.storage.StoragePoolStatus;
import com.cloud.utils.db.DB;
import com.cloud.utils.db.GenericDaoBase;
import com.cloud.utils.db.GenericSearchBuilder;
import com.cloud.utils.db.SearchBuilder;
import com.cloud.utils.db.SearchCriteria;
import com.cloud.utils.db.SearchCriteria.Func;
import com.cloud.utils.db.SearchCriteria.Op;
import com.cloud.utils.db.SearchCriteria2;
import com.cloud.utils.db.SearchCriteriaService;
import com.cloud.utils.db.Transaction;
import com.cloud.utils.exception.CloudRuntimeException;
@Component
@Local(value={StoragePoolDao.class}) @DB(txn=false)
public class StoragePoolDaoImpl extends GenericDaoBase<StoragePoolVO, Long> implements StoragePoolDao {
protected final SearchBuilder<StoragePoolVO> AllFieldSearch;
protected final SearchBuilder<StoragePoolVO> DcPodSearch;
protected final SearchBuilder<StoragePoolVO> DcPodAnyClusterSearch;
protected final SearchBuilder<StoragePoolVO> DeleteLvmSearch;
protected final GenericSearchBuilder<StoragePoolVO, Long> StatusCountSearch;
@Inject protected StoragePoolDetailsDao _detailsDao;
private final String DetailsSqlPrefix = "SELECT storage_pool.* from storage_pool LEFT JOIN storage_pool_details ON storage_pool.id = storage_pool_details.pool_id WHERE storage_pool.removed is null and storage_pool.status = 'Up' and storage_pool.data_center_id = ? and (storage_pool.pod_id = ? or storage_pool.pod_id is null) and storage_pool.scope = ? and (";
private final String DetailsSqlSuffix = ") GROUP BY storage_pool_details.pool_id HAVING COUNT(storage_pool_details.name) >= ?";
private final String ZoneWideDetailsSqlPrefix = "SELECT storage_pool.* from storage_pool LEFT JOIN storage_pool_details ON storage_pool.id = storage_pool_details.pool_id WHERE storage_pool.removed is null and storage_pool.status = 'Up' and storage_pool.data_center_id = ? and storage_pool.scope = ? and (";
private final String ZoneWideDetailsSqlSuffix = ") GROUP BY storage_pool_details.pool_id HAVING COUNT(storage_pool_details.name) >= ?";
private final String FindPoolTagDetails = "SELECT storage_pool_details.name FROM storage_pool_details WHERE pool_id = ? and value = ?";
protected StoragePoolDaoImpl() {
AllFieldSearch = createSearchBuilder();
AllFieldSearch.and("name", AllFieldSearch.entity().getName(), SearchCriteria.Op.EQ);
AllFieldSearch.and("uuid", AllFieldSearch.entity().getUuid(), SearchCriteria.Op.EQ);
AllFieldSearch.and("datacenterId", AllFieldSearch.entity().getDataCenterId(), SearchCriteria.Op.EQ);
AllFieldSearch.and("hostAddress", AllFieldSearch.entity().getHostAddress(), SearchCriteria.Op.EQ);
AllFieldSearch.and("status",AllFieldSearch.entity().getStatus(),SearchCriteria.Op.EQ);
AllFieldSearch.and("path", AllFieldSearch.entity().getPath(), SearchCriteria.Op.EQ);
AllFieldSearch.and("podId", AllFieldSearch.entity().getPodId(), Op.EQ);
AllFieldSearch.and("clusterId", AllFieldSearch.entity().getClusterId(), Op.EQ);
AllFieldSearch.done();
DcPodSearch = createSearchBuilder();
DcPodSearch.and("datacenterId", DcPodSearch.entity().getDataCenterId(), SearchCriteria.Op.EQ);
DcPodSearch.and("status", DcPodSearch.entity().getStatus(), SearchCriteria.Op.EQ);
DcPodSearch.and("scope", DcPodSearch.entity().getScope(), SearchCriteria.Op.EQ);
DcPodSearch.and().op("nullpod", DcPodSearch.entity().getPodId(), SearchCriteria.Op.NULL);
DcPodSearch.or("podId", DcPodSearch.entity().getPodId(), SearchCriteria.Op.EQ);
DcPodSearch.cp();
DcPodSearch.and().op("nullcluster", DcPodSearch.entity().getClusterId(), SearchCriteria.Op.NULL);
DcPodSearch.or("cluster", DcPodSearch.entity().getClusterId(), SearchCriteria.Op.EQ);
DcPodSearch.cp();
DcPodSearch.done();
DcPodAnyClusterSearch = createSearchBuilder();
DcPodAnyClusterSearch.and("datacenterId", DcPodAnyClusterSearch.entity().getDataCenterId(), SearchCriteria.Op.EQ);
DcPodAnyClusterSearch.and("status", DcPodAnyClusterSearch.entity().getStatus(), SearchCriteria.Op.EQ);
DcPodAnyClusterSearch.and("scope", DcPodAnyClusterSearch.entity().getScope(), SearchCriteria.Op.EQ);
DcPodAnyClusterSearch.and().op("nullpod", DcPodAnyClusterSearch.entity().getPodId(), SearchCriteria.Op.NULL);
DcPodAnyClusterSearch.or("podId", DcPodAnyClusterSearch.entity().getPodId(), SearchCriteria.Op.EQ);
DcPodAnyClusterSearch.cp();
DcPodAnyClusterSearch.done();
DeleteLvmSearch = createSearchBuilder();
DeleteLvmSearch.and("ids", DeleteLvmSearch.entity().getId(), SearchCriteria.Op.IN);
DeleteLvmSearch.and().op("LVM", DeleteLvmSearch.entity().getPoolType(), SearchCriteria.Op.EQ);
DeleteLvmSearch.or("Filesystem", DeleteLvmSearch.entity().getPoolType(), SearchCriteria.Op.EQ);
DeleteLvmSearch.cp();
DeleteLvmSearch.done();
StatusCountSearch = createSearchBuilder(Long.class);
StatusCountSearch.and("status", StatusCountSearch.entity().getStatus(), SearchCriteria.Op.IN);
StatusCountSearch.select(null, Func.COUNT, null);
StatusCountSearch.done();
}
@Override
public List<StoragePoolVO> findPoolByName(String name) {
SearchCriteria<StoragePoolVO> sc = AllFieldSearch.create();
sc.setParameters("name", name);
return listIncludingRemovedBy(sc);
}
@Override
public StoragePoolVO findPoolByUUID(String uuid) {
SearchCriteria<StoragePoolVO> sc = AllFieldSearch.create();
sc.setParameters("uuid", uuid);
return findOneIncludingRemovedBy(sc);
}
@Override
public List<StoragePoolVO> findIfDuplicatePoolsExistByUUID(String uuid) {
SearchCriteria<StoragePoolVO> sc = AllFieldSearch.create();
sc.setParameters("uuid", uuid);
return listBy(sc);
}
@Override
public List<StoragePoolVO> listByDataCenterId(long datacenterId) {
SearchCriteria<StoragePoolVO> sc = AllFieldSearch.create();
sc.setParameters("datacenterId", datacenterId);
return listBy(sc);
}
@Override
public void updateAvailable(long id, long available) {
StoragePoolVO pool = createForUpdate(id);
pool.setAvailableBytes(available);
update(id, pool);
}
@Override
public void updateCapacity(long id, long capacity) {
StoragePoolVO pool = createForUpdate(id);
pool.setCapacityBytes(capacity);
update(id, pool);
}
@Override
public List<StoragePoolVO> listByStorageHost(String hostFqdnOrIp) {
SearchCriteria<StoragePoolVO> sc = AllFieldSearch.create();
sc.setParameters("hostAddress", hostFqdnOrIp);
return listIncludingRemovedBy(sc);
}
@Override
public List<StoragePoolVO> listByStatus(StoragePoolStatus status){
SearchCriteria<StoragePoolVO> sc = AllFieldSearch.create();
sc.setParameters("status", status);
return listBy(sc);
}
@Override
public List<StoragePoolVO> listByStatusInZone(long dcId, StoragePoolStatus status){
SearchCriteria<StoragePoolVO> sc = AllFieldSearch.create();
sc.setParameters("status", status);
sc.setParameters("datacenterId", dcId);
return listBy(sc);
}
@Override
public StoragePoolVO findPoolByHostPath(long datacenterId, Long podId, String host, String path, String uuid) {
SearchCriteria<StoragePoolVO> sc = AllFieldSearch.create();
sc.setParameters("hostAddress", host);
sc.setParameters("path", path);
sc.setParameters("datacenterId", datacenterId);
sc.setParameters("podId", podId);
sc.setParameters("uuid", uuid);
return findOneBy(sc);
}
@Override
public List<StoragePoolVO> listBy(long datacenterId, long podId, Long clusterId, ScopeType scope) {
if (clusterId != null) {
SearchCriteria<StoragePoolVO> sc = DcPodSearch.create();
sc.setParameters("datacenterId", datacenterId);
sc.setParameters("podId", podId);
sc.setParameters("status", Status.Up);
sc.setParameters("scope", scope);
sc.setParameters("cluster", clusterId);
return listBy(sc);
} else {
SearchCriteria<StoragePoolVO> sc = DcPodAnyClusterSearch.create();
sc.setParameters("datacenterId", datacenterId);
sc.setParameters("podId", podId);
sc.setParameters("status", Status.Up);
sc.setParameters("scope", scope);
return listBy(sc);
}
}
@Override
public List<StoragePoolVO> listPoolByHostPath(String host, String path) {
SearchCriteria<StoragePoolVO> sc = AllFieldSearch.create();
sc.setParameters("hostAddress", host);
sc.setParameters("path", path);
return listBy(sc);
}
public StoragePoolVO listById(Integer id)
{
SearchCriteria<StoragePoolVO> sc = AllFieldSearch.create();
sc.setParameters("id", id);
return findOneIncludingRemovedBy(sc);
}
@Override @DB
public StoragePoolVO persist(StoragePoolVO pool, Map<String, String> details) {
Transaction txn = Transaction.currentTxn();
txn.start();
pool = super.persist(pool);
if (details != null) {
for (Map.Entry<String, String> detail : details.entrySet()) {
StoragePoolDetailVO vo = new StoragePoolDetailVO(pool.getId(), detail.getKey(), detail.getValue());
_detailsDao.persist(vo);
}
}
txn.commit();
return pool;
}
@DB
@Override
public List<StoragePoolVO> findPoolsByDetails(long dcId, long podId, Long clusterId, Map<String, String> details, ScopeType scope) {
StringBuilder sql = new StringBuilder(DetailsSqlPrefix);
if (clusterId != null) {
sql.append("storage_pool.cluster_id = ? OR storage_pool.cluster_id IS NULL) AND (");
}
for (Map.Entry<String, String> detail : details.entrySet()) {
sql.append("((storage_pool_details.name='").append(detail.getKey()).append("') AND (storage_pool_details.value='").append(detail.getValue()).append("')) OR ");
}
sql.delete(sql.length() - 4, sql.length());
sql.append(DetailsSqlSuffix);
Transaction txn = Transaction.currentTxn();
PreparedStatement pstmt = null;
try {
pstmt = txn.prepareAutoCloseStatement(sql.toString());
int i = 1;
pstmt.setLong(i++, dcId);
pstmt.setLong(i++, podId);
pstmt.setString(i++, scope.toString());
if (clusterId != null) {
pstmt.setLong(i++, clusterId);
}
pstmt.setInt(i++, details.size());
ResultSet rs = pstmt.executeQuery();
List<StoragePoolVO> pools = new ArrayList<StoragePoolVO>();
while (rs.next()) {
pools.add(toEntityBean(rs, false));
}
return pools;
} catch (SQLException e) {
throw new CloudRuntimeException("Unable to execute " + pstmt, e);
}
}
protected Map<String, String> tagsToDetails(String[] tags) {
Map<String, String> details = new HashMap<String, String>(tags.length);
for (String tag: tags) {
details.put(tag, "true");
}
return details;
}
@Override
public List<StoragePoolVO> findPoolsByTags(long dcId, long podId, Long clusterId, String[] tags) {
List<StoragePoolVO> storagePools = null;
if (tags == null || tags.length == 0) {
storagePools = listBy(dcId, podId, clusterId, ScopeType.CLUSTER);
} else {
Map<String, String> details = tagsToDetails(tags);
storagePools = findPoolsByDetails(dcId, podId, clusterId, details, ScopeType.CLUSTER);
}
return storagePools;
}
@Override
public List<StoragePoolVO> findLocalStoragePoolsByTags(long dcId, long podId, Long clusterId, String[] tags) {
List<StoragePoolVO> storagePools = null;
if (tags == null || tags.length == 0) {
storagePools = listBy(dcId, podId, clusterId, ScopeType.HOST);
} else {
Map<String, String> details = tagsToDetails(tags);
storagePools = findPoolsByDetails(dcId, podId, clusterId, details, ScopeType.HOST);
}
return storagePools;
}
@Override
public List<StoragePoolVO> findZoneWideStoragePoolsByTags(long dcId, String[] tags) {
List<StoragePoolVO> storagePools = null;
if (tags == null || tags.length == 0) {
SearchCriteriaService<StoragePoolVO, StoragePoolVO> sc = SearchCriteria2.create(StoragePoolVO.class);
sc.addAnd(sc.getEntity().getDataCenterId(), Op.EQ, dcId);
sc.addAnd(sc.getEntity().getStatus(), Op.EQ, Status.Up);
sc.addAnd(sc.getEntity().getScope(), Op.EQ, ScopeType.ZONE);
return sc.list();
} else {
Map<String, String> details = tagsToDetails(tags);
StringBuilder sql = new StringBuilder(ZoneWideDetailsSqlPrefix);
for (Map.Entry<String, String> detail : details.entrySet()) {
sql.append("((storage_pool_details.name='").append(detail.getKey()).append("') AND (storage_pool_details.value='").append(detail.getValue()).append("')) OR ");
}
sql.delete(sql.length() - 4, sql.length());
sql.append(ZoneWideDetailsSqlSuffix);
Transaction txn = Transaction.currentTxn();
PreparedStatement pstmt = null;
try {
pstmt = txn.prepareAutoCloseStatement(sql.toString());
int i = 1;
pstmt.setLong(i++, dcId);
pstmt.setString(i++, ScopeType.ZONE.toString());
pstmt.setInt(i++, details.size());
ResultSet rs = pstmt.executeQuery();
List<StoragePoolVO> pools = new ArrayList<StoragePoolVO>();
while (rs.next()) {
pools.add(toEntityBean(rs, false));
}
return pools;
} catch (SQLException e) {
throw new CloudRuntimeException("Unable to execute " + pstmt, e);
}
}
}
@Override
@DB
public List<String> searchForStoragePoolDetails(long poolId, String value){
StringBuilder sql = new StringBuilder(FindPoolTagDetails);
Transaction txn = Transaction.currentTxn();
PreparedStatement pstmt = null;
try {
pstmt = txn.prepareAutoCloseStatement(sql.toString());
pstmt.setLong(1, poolId);
pstmt.setString(2, value);
ResultSet rs = pstmt.executeQuery();
List<String> tags = new ArrayList<String>();
while (rs.next()) {
tags.add(rs.getString("name"));
}
return tags;
} catch (SQLException e) {
throw new CloudRuntimeException("Unable to execute " + pstmt.toString(), e);
}
}
@Override
public void updateDetails(long poolId, Map<String, String> details) {
if (details != null) {
_detailsDao.update(poolId, details);
}
}
@Override
public Map<String, String> getDetails(long poolId) {
return _detailsDao.getDetails(poolId);
}
@Override
public boolean configure(String name, Map<String, Object> params) throws ConfigurationException {
super.configure(name, params);
_detailsDao.configure("DetailsDao", params);
return true;
}
@Override
public long countPoolsByStatus( StoragePoolStatus... statuses) {
SearchCriteria<Long> sc = StatusCountSearch.create();
sc.setParameters("status", (Object[])statuses);
List<Long> rs = customSearchIncludingRemoved(sc, null);
if (rs.size() == 0) {
return 0;
}
return rs.get(0);
}
@Override
public List<StoragePoolVO> listPoolsByCluster(long clusterId) {
SearchCriteria<StoragePoolVO> sc = AllFieldSearch.create();
sc.setParameters("clusterId", clusterId);
return listBy(sc);
}
}

View File

@ -22,9 +22,10 @@ import java.util.Map;
import javax.ejb.Local;
import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailVO;
import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailsDao;
import org.springframework.stereotype.Component;
import com.cloud.storage.StoragePoolDetailVO;
import com.cloud.utils.db.GenericDaoBase;
import com.cloud.utils.db.SearchBuilder;
import com.cloud.utils.db.SearchCriteria;

View File

@ -20,6 +20,7 @@ import java.util.List;
import javax.inject.Inject;
import org.apache.cloudstack.engine.subsystem.api.storage.ScopeType;
import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao;
import org.apache.cloudstack.storage.datastore.db.StoragePoolVO;
import org.apache.log4j.Logger;
@ -75,7 +76,8 @@ public class StoragePoolMonitor implements Listener {
StartupRoutingCommand scCmd = (StartupRoutingCommand)cmd;
if (scCmd.getHypervisorType() == HypervisorType.XenServer || scCmd.getHypervisorType() == HypervisorType.KVM ||
scCmd.getHypervisorType() == HypervisorType.VMware || scCmd.getHypervisorType() == HypervisorType.Simulator || scCmd.getHypervisorType() == HypervisorType.Ovm) {
List<StoragePoolVO> pools = _poolDao.listBy(host.getDataCenterId(), host.getPodId(), host.getClusterId());
List<StoragePoolVO> pools = _poolDao.listBy(host.getDataCenterId(), host.getPodId(), host.getClusterId(), ScopeType.CLUSTER);
pools.addAll(_poolDao.findZoneWideStoragePoolsByTags(host.getDataCenterId(), null));
for (StoragePoolVO pool : pools) {
if (pool.getStatus() != StoragePoolStatus.Up) {
continue;

View File

@ -37,6 +37,7 @@ import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotInfo;
import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotStrategy;
import org.apache.cloudstack.engine.subsystem.api.storage.VolumeDataFactory;
import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo;
import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao;
import org.apache.cloudstack.storage.datastore.db.StoragePoolVO;
import org.apache.log4j.Logger;
import org.springframework.stereotype.Component;
@ -92,7 +93,6 @@ import com.cloud.storage.dao.DiskOfferingDao;
import com.cloud.storage.dao.SnapshotDao;
import com.cloud.storage.dao.SnapshotPolicyDao;
import com.cloud.storage.dao.SnapshotScheduleDao;
import com.cloud.storage.dao.StoragePoolDao;
import com.cloud.storage.dao.VMTemplateDao;
import com.cloud.storage.dao.VolumeDao;
import com.cloud.storage.s3.S3Manager;
@ -158,7 +158,7 @@ public class SnapshotManagerImpl extends ManagerBase implements SnapshotManager,
@Inject
protected SnapshotDao _snapshotDao;
@Inject
protected StoragePoolDao _storagePoolDao;
protected PrimaryDataStoreDao _storagePoolDao;
@Inject
protected EventDao _eventDao;
@Inject

View File

@ -66,6 +66,7 @@ import org.apache.cloudstack.engine.subsystem.api.storage.VolumeDataFactory;
import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo;
import org.apache.cloudstack.engine.subsystem.api.storage.ZoneScope;
import org.apache.cloudstack.framework.async.AsyncCallFuture;
import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao;
import org.apache.cloudstack.storage.datastore.db.StoragePoolVO;
import org.apache.log4j.Logger;
import org.springframework.stereotype.Component;
@ -143,7 +144,6 @@ import com.cloud.storage.VolumeVO;
import com.cloud.storage.dao.GuestOSDao;
import com.cloud.storage.dao.LaunchPermissionDao;
import com.cloud.storage.dao.SnapshotDao;
import com.cloud.storage.dao.StoragePoolDao;
import com.cloud.storage.dao.StoragePoolHostDao;
import com.cloud.storage.dao.UploadDao;
import com.cloud.storage.dao.VMTemplateDao;
@ -197,7 +197,7 @@ public class TemplateManagerImpl extends ManagerBase implements TemplateManager,
@Inject
protected VMTemplateDetailsDao _templateDetailsDao;
@Inject VMInstanceDao _vmInstanceDao;
@Inject StoragePoolDao _poolDao;
@Inject PrimaryDataStoreDao _poolDao;
@Inject StoragePoolHostDao _poolHostDao;
@Inject EventDao _eventDao;
@Inject DownloadMonitor _downloadMonitor;

View File

@ -47,14 +47,14 @@ public interface AccountManager extends AccountService {
boolean deleteAccount(AccountVO account, long callerUserId, Account caller);
boolean cleanupAccount(AccountVO account, long callerUserId, Account caller);
Long checkAccessAndSpecifyAuthority(Account caller, Long zoneId);
Account createAccount(String accountName, short accountType, Long domainId, String networkDomain, Map details);
UserVO createUser(long accountId, String userName, String password, String firstName, String lastName, String email, String timezone);
boolean cleanupAccount(AccountVO account, long callerUserId, Account caller);
Long checkAccessAndSpecifyAuthority(Account caller, Long zoneId);
Account createAccount(String accountName, short accountType, Long domainId, String networkDomain, Map details, String uuid, int regionId);
UserVO createUser(long accountId, String userName, String password, String firstName, String lastName, String email, String timezone);
/**
* Logs out a user
* @param userId

View File

@ -763,8 +763,8 @@ public class AccountManagerImpl extends ManagerBase implements AccountManager, M
@Override
@DB
@ActionEvent(eventType = EventTypes.EVENT_ACCOUNT_CREATE, eventDescription = "creating Account")
public UserAccount createUserAccount(String userName, String password, String firstName, String lastName, String email, String timezone, String accountName, short accountType, Long domainId, String networkDomain,
Map<String, String> details) {
public UserAccount createUserAccount(String userName, String password, String firstName, String lastName, String email, String timezone, String accountName, short accountType,
Long domainId, String networkDomain, Map<String, String> details) {
if (accountName == null) {
accountName = userName;
@ -806,27 +806,26 @@ public class AccountManagerImpl extends ManagerBase implements AccountManager, M
}
}
Transaction txn = Transaction.currentTxn();
txn.start();
Transaction txn = Transaction.currentTxn();
txn.start();
// create account
AccountVO account = createAccount(accountName, accountType, domainId, networkDomain, details, UUID.randomUUID().toString(), _regionMgr.getId());
long accountId = account.getId();
// create account
AccountVO account = createAccount(accountName, accountType, domainId, networkDomain, details);
long accountId = account.getId();
// create the first user for the account
UserVO user = createUser(accountId, userName, password, firstName, lastName, email, timezone);
// create the first user for the account
UserVO user = createUser(accountId, userName, password, firstName, lastName, email, timezone);
if (accountType == Account.ACCOUNT_TYPE_RESOURCE_DOMAIN_ADMIN) {
// set registration token
byte[] bytes = (domainId + accountName + userName + System.currentTimeMillis()).getBytes();
String registrationToken = UUID.nameUUIDFromBytes(bytes).toString();
user.setRegistrationToken(registrationToken);
}
txn.commit();
//check success
return _userAccountDao.findById(user.getId());
if (accountType == Account.ACCOUNT_TYPE_RESOURCE_DOMAIN_ADMIN) {
// set registration token
byte[] bytes = (domainId + accountName + userName + System.currentTimeMillis()).getBytes();
String registrationToken = UUID.nameUUIDFromBytes(bytes).toString();
user.setRegistrationToken(registrationToken);
}
txn.commit();
//check success
return _userAccountDao.findById(user.getId());
}
@Override
@ -858,8 +857,9 @@ public class AccountManagerImpl extends ManagerBase implements AccountManager, M
if (!_userAccountDao.validateUsernameInDomain(userName, domainId)) {
throw new CloudRuntimeException("The user " + userName + " already exists in domain " + domainId);
}
return createUser(account.getId(), userName, password, firstName, lastName, email, timeZone);
UserVO user = null;
user = createUser(account.getId(), userName, password, firstName, lastName, email, timeZone);
return user;
}
@Override
@ -1646,7 +1646,7 @@ public class AccountManagerImpl extends ManagerBase implements AccountManager, M
@Override
@DB
public AccountVO createAccount(String accountName, short accountType, Long domainId, String networkDomain, Map details) {
public AccountVO createAccount(String accountName, short accountType, Long domainId, String networkDomain, Map details, String uuid, int regionId) {
// Validate domain
Domain domain = _domainMgr.getDomain(domainId);
if (domain == null) {
@ -1690,7 +1690,7 @@ public class AccountManagerImpl extends ManagerBase implements AccountManager, M
Transaction txn = Transaction.currentTxn();
txn.start();
AccountVO account = _accountDao.persist(new AccountVO(accountName, domainId, networkDomain, accountType, _accountDao.getRegionId()));
AccountVO account = _accountDao.persist(new AccountVO(accountName, domainId, networkDomain, accountType, uuid, regionId));
if (account == null) {
throw new CloudRuntimeException("Failed to create account name " + accountName + " in domain id=" + domainId);
@ -1730,7 +1730,30 @@ public class AccountManagerImpl extends ManagerBase implements AccountManager, M
throw new CloudRuntimeException("Failed to encode password");
}
UserVO user = _userDao.persist(new UserVO(accountId, userName, encodedPassword, firstName, lastName, email, timezone, _userDao.getRegionId()));
UserVO user = _userDao.persist(new UserVO(accountId, userName, encodedPassword, firstName, lastName, email, timezone, UUID.randomUUID().toString(), _regionMgr.getId()));
return user;
}
//ToDo Add events??
public UserVO createUser(long accountId, String userName, String password, String firstName, String lastName, String email, String timezone, String uuid, int regionId) {
if (s_logger.isDebugEnabled()) {
s_logger.debug("Creating user: " + userName + ", accountId: " + accountId + " timezone:" + timezone);
}
String encodedPassword = null;
for (Iterator<UserAuthenticator> en = _userAuthenticators.iterator(); en.hasNext();) {
UserAuthenticator authenticator = en.next();
encodedPassword = authenticator.encode(password);
if (encodedPassword != null) {
break;
}
}
if (encodedPassword == null) {
throw new CloudRuntimeException("Failed to encode password");
}
UserVO user = _userDao.persist(new UserVO(accountId, userName, encodedPassword, firstName, lastName, email, timezone, uuid, regionId));
return user;
}

View File

@ -161,7 +161,6 @@ public class DomainManagerImpl extends ManagerBase implements DomainManager, Dom
throw new InvalidParameterValueException("Domain with name " + name + " already exists for the parent id=" + parentId);
}
Transaction txn = Transaction.currentTxn();
txn.start();
@ -471,8 +470,8 @@ public class DomainManagerImpl extends ManagerBase implements DomainManager, Dom
// check if domain exists in the system
DomainVO domain = _domainDao.findById(domainId);
if (domain == null) {
InvalidParameterValueException ex = new InvalidParameterValueException("Unable to find domain with specified domain id");
ex.addProxyObject(domain, domainId, "domainId");
InvalidParameterValueException ex = new InvalidParameterValueException("Unable to find domain with specified domain id");
ex.addProxyObject(domain, domainId, "domainId");
throw ex;
} else if (domain.getParent() == null && domainName != null) {
// check if domain is ROOT domain - and deny to edit it with the new name
@ -494,7 +493,7 @@ public class DomainManagerImpl extends ManagerBase implements DomainManager, Dom
if (!domains.isEmpty() && !sameDomain) {
InvalidParameterValueException ex = new InvalidParameterValueException("Failed to update specified domain id with name '" + domainName + "' since it already exists in the system");
ex.addProxyObject(domain, domainId, "domainId");
throw ex;
throw ex;
}
}
@ -552,5 +551,5 @@ public class DomainManagerImpl extends ManagerBase implements DomainManager, Dom
_domainDao.update(dom.getId(), dom);
}
}
}

Some files were not shown because too many files have changed in this diff Show More