diff --git a/api/src/com/cloud/agent/api/to/NfsTO.java b/api/src/com/cloud/agent/api/to/NfsTO.java index 5490fd1e588..415c95ce3f5 100644 --- a/api/src/com/cloud/agent/api/to/NfsTO.java +++ b/api/src/com/cloud/agent/api/to/NfsTO.java @@ -18,7 +18,7 @@ package com.cloud.agent.api.to; import com.cloud.storage.DataStoreRole; -public final class NfsTO implements DataStoreTO { +public class NfsTO implements DataStoreTO { private String _url; private DataStoreRole _role; diff --git a/engine/storage/src/org/apache/cloudstack/storage/LocalHostEndpoint.java b/engine/storage/src/org/apache/cloudstack/storage/LocalHostEndpoint.java index 18fcd7178d2..932a07fbd0f 100644 --- a/engine/storage/src/org/apache/cloudstack/storage/LocalHostEndpoint.java +++ b/engine/storage/src/org/apache/cloudstack/storage/LocalHostEndpoint.java @@ -16,10 +16,14 @@ // under the License. package org.apache.cloudstack.storage; +import java.io.File; import java.util.concurrent.Executors; import java.util.concurrent.ScheduledExecutorService; import java.util.concurrent.TimeUnit; +import com.cloud.configuration.Config; +import com.cloud.configuration.dao.ConfigurationDao; +import com.cloud.configuration.dao.ConfigurationDaoImpl; import org.apache.cloudstack.engine.subsystem.api.storage.EndPoint; import org.apache.cloudstack.framework.async.AsyncCompletionCallback; import org.apache.cloudstack.storage.command.CopyCommand; @@ -31,12 +35,24 @@ import com.cloud.agent.api.Command; import com.cloud.resource.ServerResource; import com.cloud.utils.net.NetUtils; +import javax.inject.Inject; + public class LocalHostEndpoint implements EndPoint { private ScheduledExecutorService executor; protected ServerResource resource; + @Inject + ConfigurationDao configDao; public LocalHostEndpoint() { - resource = new LocalNfsSecondaryStorageResource(); + // get mount parent folder configured in global setting, if set, this will overwrite _parent in NfsSecondaryStorageResource to work + // around permission issue for default /mnt folder + String mountParent = configDao.getValue(Config.MountParent.key()); + + String path = mountParent + File.separator + "secStorage"; + + LocalNfsSecondaryStorageResource localResource = new LocalNfsSecondaryStorageResource(); + localResource.setParentPath(path); + resource = localResource; executor = Executors.newScheduledThreadPool(10); } diff --git a/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/resource/LibvirtComputingResource.java b/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/resource/LibvirtComputingResource.java index e51fbda07f7..da86612ebcc 100755 --- a/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/resource/LibvirtComputingResource.java +++ b/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/resource/LibvirtComputingResource.java @@ -1072,13 +1072,13 @@ ServerResource { private void passCmdLine(String vmName, String cmdLine) throws InternalErrorException { - final Script command = new Script(_patchViaSocketPath, _timeout, s_logger); + final Script command = new Script(_patchViaSocketPath, 5*1000, s_logger); String result; command.add("-n",vmName); command.add("-p", cmdLine.replaceAll(" ", "%")); result = command.execute(); if (result != null) { - throw new InternalErrorException(result); + s_logger.debug("passcmd failed:" + result); } } diff --git a/scripts/storage/qcow2/managesnapshot.sh b/scripts/storage/qcow2/managesnapshot.sh index 30148de66e7..368ff549ee6 100755 --- a/scripts/storage/qcow2/managesnapshot.sh +++ b/scripts/storage/qcow2/managesnapshot.sh @@ -67,8 +67,10 @@ create_snapshot() { local disk=$1 local snapshotname="$2" local failed=0 + is_lv ${disk} + islv_ret=$? - if [ ${dmsnapshot} = "yes" ] && is_lv ${disk}; then + if [ ${dmsnapshot} = "yes" ] && [ "$islv_ret" == "1" ]; then local lv=`get_lv ${disk}` local vg=`get_vg ${disk}` local lv_dm=`double_hyphens ${lv}` @@ -120,8 +122,10 @@ destroy_snapshot() { local disk=$1 local snapshotname="$2" local failed=0 + is_lv ${disk} + islv_ret=$? - if is_lv ${disk}; then + if [ "$islv_ret" == "1" ]; then local lv=`get_lv ${disk}` local vg=`get_vg ${disk}` local lv_dm=`double_hyphens ${lv}` @@ -187,7 +191,10 @@ backup_snapshot() { fi fi - if [ ${dmsnapshot} = "yes" ] && is_lv ${disk}; then + is_lv ${disk} + islv_ret=$? + + if [ ${dmsnapshot} = "yes" ] && [ "$islv_ret" == "1" ] ; then local vg=`get_vg ${disk}` local vg_dm=`double_hyphens ${vg}` local scriptdir=`dirname ${0}` diff --git a/scripts/storage/secondary/swift b/scripts/storage/secondary/swift index 8224b4d393b..4138db8b17e 100755 --- a/scripts/storage/secondary/swift +++ b/scripts/storage/secondary/swift @@ -1,5 +1,5 @@ #!/usr/bin/python -u -# Copyright (c) 2010-2012 OpenStack, LLC. +# Copyright (c) 2010-2011 OpenStack, LLC. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,51 +13,896 @@ # implied. # See the License for the specific language governing permissions and # limitations under the License. -import signal -import socket -import logging from errno import EEXIST, ENOENT from hashlib import md5 -from optparse import OptionParser, SUPPRESS_HELP -from os import environ, listdir, makedirs, utime, _exit as os_exit +from optparse import OptionParser +from os import environ, listdir, makedirs, utime from os.path import basename, dirname, getmtime, getsize, isdir, join -from Queue import Queue -from random import shuffle +from Queue import Empty, Queue from sys import argv, exc_info, exit, stderr, stdout -from threading import Thread -from time import sleep, time, gmtime, strftime +from threading import enumerate as threading_enumerate, Thread +from time import sleep from traceback import format_exception -from urllib import quote, unquote + + +# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # +# Inclusion of swift.common.client for convenience of single file distribution + +import socket +from cStringIO import StringIO +from re import compile, DOTALL +from tokenize import generate_tokens, STRING, NAME, OP +from urllib import quote as _quote, unquote +from urlparse import urlparse, urlunparse try: - import simplejson as json + from eventlet.green.httplib import HTTPException, HTTPSConnection except ImportError: - import json + from httplib import HTTPException, HTTPSConnection -from swiftclient import Connection, ClientException, HTTPException, utils -from swiftclient.version import version_info +try: + from eventlet import sleep +except ImportError: + from time import sleep + +try: + from swift.common.bufferedhttp \ + import BufferedHTTPConnection as HTTPConnection +except ImportError: + try: + from eventlet.green.httplib import HTTPConnection + except ImportError: + from httplib import HTTPConnection -def get_conn(options): +def quote(value, safe='/'): """ - Return a connection building it from the options. + Patched version of urllib.quote that encodes utf8 strings before quoting """ - return Connection(options.auth, - options.user, - options.key, - auth_version=options.auth_version, - os_options=options.os_options, - snet=options.snet, - cacert=options.os_cacert, - insecure=options.insecure, - ssl_compression=options.ssl_compression) + if isinstance(value, unicode): + value = value.encode('utf8') + return _quote(value, safe) + + +# look for a real json parser first +try: + # simplejson is popular and pretty good + from simplejson import loads as json_loads +except ImportError: + try: + # 2.6 will have a json module in the stdlib + from json import loads as json_loads + except ImportError: + # fall back on local parser otherwise + comments = compile(r'/\*.*\*/|//[^\r\n]*', DOTALL) + + def json_loads(string): + ''' + Fairly competent json parser exploiting the python tokenizer and + eval(). -- From python-cloudfiles + + _loads(serialized_json) -> object + ''' + try: + res = [] + consts = {'true': True, 'false': False, 'null': None} + string = '(' + comments.sub('', string) + ')' + for type, val, _junk, _junk, _junk in \ + generate_tokens(StringIO(string).readline): + if (type == OP and val not in '[]{}:,()-') or \ + (type == NAME and val not in consts): + raise AttributeError() + elif type == STRING: + res.append('u') + res.append(val.replace('\\/', '/')) + else: + res.append(val) + return eval(''.join(res), {}, consts) + except Exception: + raise AttributeError() + + +class ClientException(Exception): + + def __init__(self, msg, http_scheme='', http_host='', http_port='', + http_path='', http_query='', http_status=0, http_reason='', + http_device=''): + Exception.__init__(self, msg) + self.msg = msg + self.http_scheme = http_scheme + self.http_host = http_host + self.http_port = http_port + self.http_path = http_path + self.http_query = http_query + self.http_status = http_status + self.http_reason = http_reason + self.http_device = http_device + + def __str__(self): + a = self.msg + b = '' + if self.http_scheme: + b += '%s://' % self.http_scheme + if self.http_host: + b += self.http_host + if self.http_port: + b += ':%s' % self.http_port + if self.http_path: + b += self.http_path + if self.http_query: + b += '?%s' % self.http_query + if self.http_status: + if b: + b = '%s %s' % (b, self.http_status) + else: + b = str(self.http_status) + if self.http_reason: + if b: + b = '%s %s' % (b, self.http_reason) + else: + b = '- %s' % self.http_reason + if self.http_device: + if b: + b = '%s: device %s' % (b, self.http_device) + else: + b = 'device %s' % self.http_device + return b and '%s: %s' % (a, b) or a + + +def http_connection(url, proxy=None): + """ + Make an HTTPConnection or HTTPSConnection + + :param url: url to connect to + :param proxy: proxy to connect through, if any; None by default; str of the + format 'http://127.0.0.1:8888' to set one + :returns: tuple of (parsed url, connection object) + :raises ClientException: Unable to handle protocol scheme + """ + parsed = urlparse(url) + proxy_parsed = urlparse(proxy) if proxy else None + if parsed.scheme == 'http': + conn = HTTPConnection((proxy_parsed if proxy else parsed).netloc) + elif parsed.scheme == 'https': + conn = HTTPSConnection((proxy_parsed if proxy else parsed).netloc) + else: + raise ClientException('Cannot handle protocol scheme %s for url %s' % + (parsed.scheme, repr(url))) + if proxy: + conn._set_tunnel(parsed.hostname, parsed.port) + return parsed, conn + + +def get_auth(url, user, key, snet=False): + """ + Get authentication/authorization credentials. + + The snet parameter is used for Rackspace's ServiceNet internal network + implementation. In this function, it simply adds *snet-* to the beginning + of the host name for the returned storage URL. With Rackspace Cloud Files, + use of this network path causes no bandwidth charges but requires the + client to be running on Rackspace's ServiceNet network. + + :param url: authentication/authorization URL + :param user: user to authenticate as + :param key: key or password for authorization + :param snet: use SERVICENET internal network (see above), default is False + :returns: tuple of (storage URL, auth token) + :raises ClientException: HTTP GET request to auth URL failed + """ + parsed, conn = http_connection(url) + conn.request('GET', parsed.path, '', + {'X-Auth-User': user, 'X-Auth-Key': key}) + resp = conn.getresponse() + resp.read() + if resp.status < 200 or resp.status >= 300: + raise ClientException('Auth GET failed', http_scheme=parsed.scheme, + http_host=conn.host, http_port=conn.port, + http_path=parsed.path, http_status=resp.status, + http_reason=resp.reason) + url = resp.getheader('x-storage-url') + if snet: + parsed = list(urlparse(url)) + # Second item in the list is the netloc + parsed[1] = 'snet-' + parsed[1] + url = urlunparse(parsed) + return url, resp.getheader('x-storage-token', + resp.getheader('x-auth-token')) + + +def get_account(url, token, marker=None, limit=None, prefix=None, + http_conn=None, full_listing=False): + """ + Get a listing of containers for the account. + + :param url: storage URL + :param token: auth token + :param marker: marker query + :param limit: limit query + :param prefix: prefix query + :param http_conn: HTTP connection object (If None, it will create the + conn object) + :param full_listing: if True, return a full listing, else returns a max + of 10000 listings + :returns: a tuple of (response headers, a list of containers) The response + headers will be a dict and all header names will be lowercase. + :raises ClientException: HTTP GET request failed + """ + if not http_conn: + http_conn = http_connection(url) + if full_listing: + rv = get_account(url, token, marker, limit, prefix, http_conn) + listing = rv[1] + while listing: + marker = listing[-1]['name'] + listing = \ + get_account(url, token, marker, limit, prefix, http_conn)[1] + if listing: + rv[1].extend(listing) + return rv + parsed, conn = http_conn + qs = 'format=json' + if marker: + qs += '&marker=%s' % quote(marker) + if limit: + qs += '&limit=%d' % limit + if prefix: + qs += '&prefix=%s' % quote(prefix) + conn.request('GET', '%s?%s' % (parsed.path, qs), '', + {'X-Auth-Token': token}) + resp = conn.getresponse() + resp_headers = {} + for header, value in resp.getheaders(): + resp_headers[header.lower()] = value + if resp.status < 200 or resp.status >= 300: + resp.read() + raise ClientException('Account GET failed', http_scheme=parsed.scheme, + http_host=conn.host, http_port=conn.port, + http_path=parsed.path, http_query=qs, http_status=resp.status, + http_reason=resp.reason) + if resp.status == 204: + resp.read() + return resp_headers, [] + return resp_headers, json_loads(resp.read()) + + +def head_account(url, token, http_conn=None): + """ + Get account stats. + + :param url: storage URL + :param token: auth token + :param http_conn: HTTP connection object (If None, it will create the + conn object) + :returns: a dict containing the response's headers (all header names will + be lowercase) + :raises ClientException: HTTP HEAD request failed + """ + if http_conn: + parsed, conn = http_conn + else: + parsed, conn = http_connection(url) + conn.request('HEAD', parsed.path, '', {'X-Auth-Token': token}) + resp = conn.getresponse() + resp.read() + if resp.status < 200 or resp.status >= 300: + raise ClientException('Account HEAD failed', http_scheme=parsed.scheme, + http_host=conn.host, http_port=conn.port, + http_path=parsed.path, http_status=resp.status, + http_reason=resp.reason) + resp_headers = {} + for header, value in resp.getheaders(): + resp_headers[header.lower()] = value + return resp_headers + + +def post_account(url, token, headers, http_conn=None): + """ + Update an account's metadata. + + :param url: storage URL + :param token: auth token + :param headers: additional headers to include in the request + :param http_conn: HTTP connection object (If None, it will create the + conn object) + :raises ClientException: HTTP POST request failed + """ + if http_conn: + parsed, conn = http_conn + else: + parsed, conn = http_connection(url) + headers['X-Auth-Token'] = token + conn.request('POST', parsed.path, '', headers) + resp = conn.getresponse() + resp.read() + if resp.status < 200 or resp.status >= 300: + raise ClientException('Account POST failed', + http_scheme=parsed.scheme, http_host=conn.host, + http_port=conn.port, http_path=path, http_status=resp.status, + http_reason=resp.reason) + + +def get_container(url, token, container, marker=None, limit=None, + prefix=None, delimiter=None, http_conn=None, + full_listing=False): + """ + Get a listing of objects for the container. + + :param url: storage URL + :param token: auth token + :param container: container name to get a listing for + :param marker: marker query + :param limit: limit query + :param prefix: prefix query + :param delimeter: string to delimit the queries on + :param http_conn: HTTP connection object (If None, it will create the + conn object) + :param full_listing: if True, return a full listing, else returns a max + of 10000 listings + :returns: a tuple of (response headers, a list of objects) The response + headers will be a dict and all header names will be lowercase. + :raises ClientException: HTTP GET request failed + """ + if not http_conn: + http_conn = http_connection(url) + if full_listing: + rv = get_container(url, token, container, marker, limit, prefix, + delimiter, http_conn) + listing = rv[1] + while listing: + if not delimiter: + marker = listing[-1]['name'] + else: + marker = listing[-1].get('name', listing[-1].get('subdir')) + listing = get_container(url, token, container, marker, limit, + prefix, delimiter, http_conn)[1] + if listing: + rv[1].extend(listing) + return rv + parsed, conn = http_conn + path = '%s/%s' % (parsed.path, quote(container)) + qs = 'format=json' + if marker: + qs += '&marker=%s' % quote(marker) + if limit: + qs += '&limit=%d' % limit + if prefix: + qs += '&prefix=%s' % quote(prefix) + if delimiter: + qs += '&delimiter=%s' % quote(delimiter) + conn.request('GET', '%s?%s' % (path, qs), '', {'X-Auth-Token': token}) + resp = conn.getresponse() + if resp.status < 200 or resp.status >= 300: + resp.read() + raise ClientException('Container GET failed', + http_scheme=parsed.scheme, http_host=conn.host, + http_port=conn.port, http_path=path, http_query=qs, + http_status=resp.status, http_reason=resp.reason) + resp_headers = {} + for header, value in resp.getheaders(): + resp_headers[header.lower()] = value + if resp.status == 204: + resp.read() + return resp_headers, [] + return resp_headers, json_loads(resp.read()) + + +def head_container(url, token, container, http_conn=None): + """ + Get container stats. + + :param url: storage URL + :param token: auth token + :param container: container name to get stats for + :param http_conn: HTTP connection object (If None, it will create the + conn object) + :returns: a dict containing the response's headers (all header names will + be lowercase) + :raises ClientException: HTTP HEAD request failed + """ + if http_conn: + parsed, conn = http_conn + else: + parsed, conn = http_connection(url) + path = '%s/%s' % (parsed.path, quote(container)) + conn.request('HEAD', path, '', {'X-Auth-Token': token}) + resp = conn.getresponse() + resp.read() + if resp.status < 200 or resp.status >= 300: + raise ClientException('Container HEAD failed', + http_scheme=parsed.scheme, http_host=conn.host, + http_port=conn.port, http_path=path, http_status=resp.status, + http_reason=resp.reason) + resp_headers = {} + for header, value in resp.getheaders(): + resp_headers[header.lower()] = value + return resp_headers + + +def put_container(url, token, container, headers=None, http_conn=None): + """ + Create a container + + :param url: storage URL + :param token: auth token + :param container: container name to create + :param headers: additional headers to include in the request + :param http_conn: HTTP connection object (If None, it will create the + conn object) + :raises ClientException: HTTP PUT request failed + """ + if http_conn: + parsed, conn = http_conn + else: + parsed, conn = http_connection(url) + path = '%s/%s' % (parsed.path, quote(container)) + if not headers: + headers = {} + headers['X-Auth-Token'] = token + conn.request('PUT', path, '', headers) + resp = conn.getresponse() + resp.read() + if resp.status < 200 or resp.status >= 300: + raise ClientException('Container PUT failed', + http_scheme=parsed.scheme, http_host=conn.host, + http_port=conn.port, http_path=path, http_status=resp.status, + http_reason=resp.reason) + + +def post_container(url, token, container, headers, http_conn=None): + """ + Update a container's metadata. + + :param url: storage URL + :param token: auth token + :param container: container name to update + :param headers: additional headers to include in the request + :param http_conn: HTTP connection object (If None, it will create the + conn object) + :raises ClientException: HTTP POST request failed + """ + if http_conn: + parsed, conn = http_conn + else: + parsed, conn = http_connection(url) + path = '%s/%s' % (parsed.path, quote(container)) + headers['X-Auth-Token'] = token + conn.request('POST', path, '', headers) + resp = conn.getresponse() + resp.read() + if resp.status < 200 or resp.status >= 300: + raise ClientException('Container POST failed', + http_scheme=parsed.scheme, http_host=conn.host, + http_port=conn.port, http_path=path, http_status=resp.status, + http_reason=resp.reason) + + +def delete_container(url, token, container, http_conn=None): + """ + Delete a container + + :param url: storage URL + :param token: auth token + :param container: container name to delete + :param http_conn: HTTP connection object (If None, it will create the + conn object) + :raises ClientException: HTTP DELETE request failed + """ + if http_conn: + parsed, conn = http_conn + else: + parsed, conn = http_connection(url) + path = '%s/%s' % (parsed.path, quote(container)) + conn.request('DELETE', path, '', {'X-Auth-Token': token}) + resp = conn.getresponse() + resp.read() + if resp.status < 200 or resp.status >= 300: + raise ClientException('Container DELETE failed', + http_scheme=parsed.scheme, http_host=conn.host, + http_port=conn.port, http_path=path, http_status=resp.status, + http_reason=resp.reason) + + +def get_object(url, token, container, name, http_conn=None, + resp_chunk_size=None): + """ + Get an object + + :param url: storage URL + :param token: auth token + :param container: container name that the object is in + :param name: object name to get + :param http_conn: HTTP connection object (If None, it will create the + conn object) + :param resp_chunk_size: if defined, chunk size of data to read. NOTE: If + you specify a resp_chunk_size you must fully read + the object's contents before making another + request. + :returns: a tuple of (response headers, the object's contents) The response + headers will be a dict and all header names will be lowercase. + :raises ClientException: HTTP GET request failed + """ + if http_conn: + parsed, conn = http_conn + else: + parsed, conn = http_connection(url) + path = '%s/%s/%s' % (parsed.path, quote(container), quote(name)) + conn.request('GET', path, '', {'X-Auth-Token': token}) + resp = conn.getresponse() + if resp.status < 200 or resp.status >= 300: + resp.read() + raise ClientException('Object GET failed', http_scheme=parsed.scheme, + http_host=conn.host, http_port=conn.port, http_path=path, + http_status=resp.status, http_reason=resp.reason) + if resp_chunk_size: + + def _object_body(): + buf = resp.read(resp_chunk_size) + while buf: + yield buf + buf = resp.read(resp_chunk_size) + object_body = _object_body() + else: + object_body = resp.read() + resp_headers = {} + for header, value in resp.getheaders(): + resp_headers[header.lower()] = value + return resp_headers, object_body + + +def head_object(url, token, container, name, http_conn=None): + """ + Get object info + + :param url: storage URL + :param token: auth token + :param container: container name that the object is in + :param name: object name to get info for + :param http_conn: HTTP connection object (If None, it will create the + conn object) + :returns: a dict containing the response's headers (all header names will + be lowercase) + :raises ClientException: HTTP HEAD request failed + """ + if http_conn: + parsed, conn = http_conn + else: + parsed, conn = http_connection(url) + path = '%s/%s/%s' % (parsed.path, quote(container), quote(name)) + conn.request('HEAD', path, '', {'X-Auth-Token': token}) + resp = conn.getresponse() + resp.read() + if resp.status < 200 or resp.status >= 300: + raise ClientException('Object HEAD failed', http_scheme=parsed.scheme, + http_host=conn.host, http_port=conn.port, http_path=path, + http_status=resp.status, http_reason=resp.reason) + resp_headers = {} + for header, value in resp.getheaders(): + resp_headers[header.lower()] = value + return resp_headers + + +def put_object(url, token=None, container=None, name=None, contents=None, + content_length=None, etag=None, chunk_size=65536, + content_type=None, headers=None, http_conn=None, proxy=None): + """ + Put an object + + :param url: storage URL + :param token: auth token; if None, no token will be sent + :param container: container name that the object is in; if None, the + container name is expected to be part of the url + :param name: object name to put; if None, the object name is expected to be + part of the url + :param contents: a string or a file like object to read object data from; + if None, a zero-byte put will be done + :param content_length: value to send as content-length header; also limits + the amount read from contents; if None, it will be + computed via the contents or chunked transfer + encoding will be used + :param etag: etag of contents; if None, no etag will be sent + :param chunk_size: chunk size of data to write; default 65536 + :param content_type: value to send as content-type header; if None, no + content-type will be set (remote end will likely try + to auto-detect it) + :param headers: additional headers to include in the request, if any + :param http_conn: HTTP connection object (If None, it will create the + conn object) + :param proxy: proxy to connect through, if any; None by default; str of the + format 'http://127.0.0.1:8888' to set one + :returns: etag from server response + :raises ClientException: HTTP PUT request failed + """ + if http_conn: + parsed, conn = http_conn + else: + parsed, conn = http_connection(url, proxy=proxy) + path = parsed.path + if container: + path = '%s/%s' % (path.rstrip('/'), quote(container)) + if name: + path = '%s/%s' % (path.rstrip('/'), quote(name)) + if headers: + headers = dict(headers) + else: + headers = {} + if token: + headers['X-Auth-Token'] = token + if etag: + headers['ETag'] = etag.strip('"') + if content_length is not None: + headers['Content-Length'] = str(content_length) + else: + for n, v in headers.iteritems(): + if n.lower() == 'content-length': + content_length = int(v) + if content_type is not None: + headers['Content-Type'] = content_type + if not contents: + headers['Content-Length'] = '0' + if hasattr(contents, 'read'): + conn.putrequest('PUT', path) + for header, value in headers.iteritems(): + conn.putheader(header, value) + if content_length is None: + conn.putheader('Transfer-Encoding', 'chunked') + conn.endheaders() + chunk = contents.read(chunk_size) + while chunk: + conn.send('%x\r\n%s\r\n' % (len(chunk), chunk)) + chunk = contents.read(chunk_size) + conn.send('0\r\n\r\n') + else: + conn.endheaders() + left = content_length + while left > 0: + size = chunk_size + if size > left: + size = left + chunk = contents.read(size) + conn.send(chunk) + left -= len(chunk) + else: + conn.request('PUT', path, contents, headers) + resp = conn.getresponse() + resp.read() + if resp.status < 200 or resp.status >= 300: + raise ClientException('Object PUT failed', http_scheme=parsed.scheme, + http_host=conn.host, http_port=conn.port, http_path=path, + http_status=resp.status, http_reason=resp.reason) + return resp.getheader('etag', '').strip('"') + + +def post_object(url, token, container, name, headers, http_conn=None): + """ + Update object metadata + + :param url: storage URL + :param token: auth token + :param container: container name that the object is in + :param name: name of the object to update + :param headers: additional headers to include in the request + :param http_conn: HTTP connection object (If None, it will create the + conn object) + :raises ClientException: HTTP POST request failed + """ + if http_conn: + parsed, conn = http_conn + else: + parsed, conn = http_connection(url) + path = '%s/%s/%s' % (parsed.path, quote(container), quote(name)) + headers['X-Auth-Token'] = token + conn.request('POST', path, '', headers) + resp = conn.getresponse() + resp.read() + if resp.status < 200 or resp.status >= 300: + raise ClientException('Object POST failed', http_scheme=parsed.scheme, + http_host=conn.host, http_port=conn.port, http_path=path, + http_status=resp.status, http_reason=resp.reason) + + +def delete_object(url, token=None, container=None, name=None, http_conn=None, + headers=None, proxy=None): + """ + Delete object + + :param url: storage URL + :param token: auth token; if None, no token will be sent + :param container: container name that the object is in; if None, the + container name is expected to be part of the url + :param name: object name to delete; if None, the object name is expected to + be part of the url + :param http_conn: HTTP connection object (If None, it will create the + conn object) + :param headers: additional headers to include in the request + :param proxy: proxy to connect through, if any; None by default; str of the + format 'http://127.0.0.1:8888' to set one + :raises ClientException: HTTP DELETE request failed + """ + if http_conn: + parsed, conn = http_conn + else: + parsed, conn = http_connection(url, proxy=proxy) + path = parsed.path + if container: + path = '%s/%s' % (path.rstrip('/'), quote(container)) + if name: + path = '%s/%s' % (path.rstrip('/'), quote(name)) + if headers: + headers = dict(headers) + else: + headers = {} + if token: + headers['X-Auth-Token'] = token + conn.request('DELETE', path, '', headers) + resp = conn.getresponse() + resp.read() + if resp.status < 200 or resp.status >= 300: + raise ClientException('Object DELETE failed', + http_scheme=parsed.scheme, http_host=conn.host, + http_port=conn.port, http_path=path, http_status=resp.status, + http_reason=resp.reason) + + +class Connection(object): + """Convenience class to make requests that will also retry the request""" + + def __init__(self, authurl, user, key, retries=5, preauthurl=None, + preauthtoken=None, snet=False, starting_backoff=1): + """ + :param authurl: authenitcation URL + :param user: user name to authenticate as + :param key: key/password to authenticate with + :param retries: Number of times to retry the request before failing + :param preauthurl: storage URL (if you have already authenticated) + :param preauthtoken: authentication token (if you have already + authenticated) + :param snet: use SERVICENET internal network default is False + """ + self.authurl = authurl + self.user = user + self.key = key + self.retries = retries + self.http_conn = None + self.url = preauthurl + self.token = preauthtoken + self.attempts = 0 + self.snet = snet + self.starting_backoff = starting_backoff + + def get_auth(self): + return get_auth(self.authurl, self.user, self.key, snet=self.snet) + + def http_connection(self): + return http_connection(self.url) + + def _retry(self, reset_func, func, *args, **kwargs): + self.attempts = 0 + backoff = self.starting_backoff + while self.attempts <= self.retries: + self.attempts += 1 + try: + if not self.url or not self.token: + self.url, self.token = self.get_auth() + self.http_conn = None + if not self.http_conn: + self.http_conn = self.http_connection() + kwargs['http_conn'] = self.http_conn + rv = func(self.url, self.token, *args, **kwargs) + return rv + except (socket.error, HTTPException): + if self.attempts > self.retries: + raise + self.http_conn = None + except ClientException, err: + if self.attempts > self.retries: + raise + if err.http_status == 401: + self.url = self.token = None + if self.attempts > 1: + raise + elif err.http_status == 408: + self.http_conn = None + elif 500 <= err.http_status <= 599: + pass + else: + raise + sleep(backoff) + backoff *= 2 + if reset_func: + reset_func(func, *args, **kwargs) + + def head_account(self): + """Wrapper for :func:`head_account`""" + return self._retry(None, head_account) + + def get_account(self, marker=None, limit=None, prefix=None, + full_listing=False): + """Wrapper for :func:`get_account`""" + # TODO(unknown): With full_listing=True this will restart the entire + # listing with each retry. Need to make a better version that just + # retries where it left off. + return self._retry(None, get_account, marker=marker, limit=limit, + prefix=prefix, full_listing=full_listing) + + def post_account(self, headers): + """Wrapper for :func:`post_account`""" + return self._retry(None, post_account, headers) + + def head_container(self, container): + """Wrapper for :func:`head_container`""" + return self._retry(None, head_container, container) + + def get_container(self, container, marker=None, limit=None, prefix=None, + delimiter=None, full_listing=False): + """Wrapper for :func:`get_container`""" + # TODO(unknown): With full_listing=True this will restart the entire + # listing with each retry. Need to make a better version that just + # retries where it left off. + return self._retry(None, get_container, container, marker=marker, + limit=limit, prefix=prefix, delimiter=delimiter, + full_listing=full_listing) + + def put_container(self, container, headers=None): + """Wrapper for :func:`put_container`""" + return self._retry(None, put_container, container, headers=headers) + + def post_container(self, container, headers): + """Wrapper for :func:`post_container`""" + return self._retry(None, post_container, container, headers) + + def delete_container(self, container): + """Wrapper for :func:`delete_container`""" + return self._retry(None, delete_container, container) + + def head_object(self, container, obj): + """Wrapper for :func:`head_object`""" + return self._retry(None, head_object, container, obj) + + def get_object(self, container, obj, resp_chunk_size=None): + """Wrapper for :func:`get_object`""" + return self._retry(None, get_object, container, obj, + resp_chunk_size=resp_chunk_size) + + def put_object(self, container, obj, contents, content_length=None, + etag=None, chunk_size=65536, content_type=None, + headers=None): + """Wrapper for :func:`put_object`""" + + def _default_reset(*args, **kwargs): + raise ClientException('put_object(%r, %r, ...) failure and no ' + 'ability to reset contents for reupload.' % (container, obj)) + + reset_func = _default_reset + tell = getattr(contents, 'tell', None) + seek = getattr(contents, 'seek', None) + if tell and seek: + orig_pos = tell() + reset_func = lambda *a, **k: seek(orig_pos) + elif not contents: + reset_func = lambda *a, **k: None + + return self._retry(reset_func, put_object, container, obj, contents, + content_length=content_length, etag=etag, chunk_size=chunk_size, + content_type=content_type, headers=headers) + + def post_object(self, container, obj, headers): + """Wrapper for :func:`post_object`""" + return self._retry(None, post_object, container, obj, headers) + + def delete_object(self, container, obj): + """Wrapper for :func:`delete_object`""" + return self._retry(None, delete_object, container, obj) + +# End inclusion of swift.common.client +# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # def mkdirs(path): try: makedirs(path) - except OSError as err: + except OSError, err: if err.errno != EEXIST: raise @@ -80,64 +925,39 @@ def put_errors_from_threads(threads, error_queue): return was_error -class StopWorkerThreadSignal(object): - pass - - class QueueFunctionThread(Thread): def __init__(self, queue, func, *args, **kwargs): - """ - Calls func for each item in queue; func is called with a queued - item as the first arg followed by *args and **kwargs. Use the - PriorityQueue for sending quit signal when Ctrl-C is pressed. - """ + """ Calls func for each item in queue; func is called with a queued + item as the first arg followed by *args and **kwargs. Use the abort + attribute to have the thread empty the queue (without processing) + and exit. """ Thread.__init__(self) + self.abort = False self.queue = queue self.func = func self.args = args self.kwargs = kwargs self.exc_infos = [] - self.results = [] - self.store_results = kwargs.pop('store_results', False) def run(self): - while True: - try: - item = self.queue.get() - if isinstance(item, StopWorkerThreadSignal): - break - except: - # This catch is important and it may occur when ctrl-C is - # pressed, in this case simply quit the thread - break - else: + try: + while True: try: - self.func(item, *self.args, **self.kwargs) - except Exception: - self.exc_infos.append(exc_info()) - - -def shutdown_worker_threads(queue, thread_list): - """ - Takes a job queue and a list of associated QueueFunctionThread objects, - puts a StopWorkerThreadSignal object into the queue, and waits for the - queue to flush. - """ - for thread in [t for t in thread_list if t.isAlive()]: - queue.put(StopWorkerThreadSignal()) - - while any(map(QueueFunctionThread.is_alive, thread_list)): - sleep(0.05) - - -def immediate_exit(signum, frame): - stderr.write(" Aborted\n") - os_exit(2) + item = self.queue.get_nowait() + if not self.abort: + self.func(item, *self.args, **self.kwargs) + self.queue.task_done() + except Empty: + if self.abort: + break + sleep(0.01) + except Exception: + self.exc_infos.append(exc_info()) st_delete_help = ''' -delete [options] --all OR delete container [options] [object] [object] ... +delete --all OR delete container [--leave-segments] [object] [object] ... Deletes everything in the account (with --all), or everything in a container, or a list of objects depending on the args given. Segments of manifest objects will be deleted as well, unless you specify the @@ -145,21 +965,12 @@ delete [options] --all OR delete container [options] [object] [object] ... def st_delete(parser, args, print_queue, error_queue): - parser.add_option( - '-a', '--all', action='store_true', dest='yes_all', + parser.add_option('-a', '--all', action='store_true', dest='yes_all', default=False, help='Indicates that you really want to delete ' 'everything in the account') - parser.add_option( - '', '--leave-segments', action='store_true', - dest='leave_segments', default=False, - help='Indicates that you want the segments of manifest' - 'objects left alone') - parser.add_option( - '', '--object-threads', type=int, - default=10, help='Number of threads to use for deleting objects') - parser.add_option('', '--container-threads', type=int, - default=10, help='Number of threads to use for ' - 'deleting containers') + parser.add_option('', '--leave-segments', action='store_true', + dest='leave_segments', default=False, help='Indicates that you want ' + 'the segments of manifest objects left alone') (options, args) = parse_args(parser, args) args = args[1:] if (not args and not options.yes_all) or (args and options.yes_all): @@ -181,34 +992,32 @@ def st_delete(parser, args, print_queue, error_queue): def _delete_object((container, obj), conn): try: old_manifest = None - query_string = None if not options.leave_segments: try: - headers = conn.head_object(container, obj) - old_manifest = headers.get('x-object-manifest') - if utils.config_true_value( - headers.get('x-static-large-object')): - query_string = 'multipart-manifest=delete' - except ClientException as err: + old_manifest = conn.head_object(container, obj).get( + 'x-object-manifest') + except ClientException, err: if err.http_status != 404: raise - conn.delete_object(container, obj, query_string=query_string) + conn.delete_object(container, obj) if old_manifest: segment_queue = Queue(10000) scontainer, sprefix = old_manifest.split('/', 1) - scontainer = unquote(scontainer) - sprefix = unquote(sprefix).rstrip('/') + '/' for delobj in conn.get_container(scontainer, prefix=sprefix)[1]: segment_queue.put((scontainer, delobj['name'])) if not segment_queue.empty(): - segment_threads = [QueueFunctionThread( - segment_queue, + segment_threads = [QueueFunctionThread(segment_queue, _delete_segment, create_connection()) for _junk in - xrange(options.object_threads)] + xrange(10)] for thread in segment_threads: thread.start() - shutdown_worker_threads(segment_queue, segment_threads) + while not segment_queue.empty(): + sleep(0.01) + for thread in segment_threads: + thread.abort = True + while thread.isAlive(): + thread.join(0.01) put_errors_from_threads(segment_threads, error_queue) if options.verbose: path = options.yes_all and join(container, obj) or obj @@ -219,7 +1028,7 @@ def st_delete(parser, args, print_queue, error_queue): (path, conn.attempts)) else: print_queue.put(path) - except ClientException as err: + except ClientException, err: if err.http_status != 404: raise error_queue.put('Object %s not found' % @@ -239,109 +1048,97 @@ def st_delete(parser, args, print_queue, error_queue): object_queue.put((container, obj)) marker = objects[-1] while not object_queue.empty(): - sleep(0.05) + sleep(0.01) attempts = 1 while True: try: conn.delete_container(container) break - except ClientException as err: + except ClientException, err: if err.http_status != 409: raise if attempts > 10: raise attempts += 1 sleep(1) - except ClientException as err: + except ClientException, err: if err.http_status != 404: raise error_queue.put('Container %s not found' % repr(container)) - create_connection = lambda: get_conn(options) - object_threads = \ - [QueueFunctionThread(object_queue, _delete_object, create_connection()) - for _junk in xrange(options.object_threads)] + url, token = get_auth(options.auth, options.user, options.key, + snet=options.snet) + create_connection = lambda: Connection(options.auth, options.user, + options.key, preauthurl=url, preauthtoken=token, snet=options.snet) + object_threads = [QueueFunctionThread(object_queue, _delete_object, + create_connection()) for _junk in xrange(10)] for thread in object_threads: thread.start() - container_threads = \ - [QueueFunctionThread(container_queue, _delete_container, - create_connection()) - for _junk in xrange(options.container_threads)] + container_threads = [QueueFunctionThread(container_queue, + _delete_container, create_connection()) for _junk in xrange(10)] for thread in container_threads: thread.start() - - try: - if not args: - conn = create_connection() - try: - marker = '' - while True: - containers = [ - c['name'] for c in conn.get_account(marker=marker)[1]] - if not containers: - break - for container in containers: - container_queue.put(container) - marker = containers[-1] - except ClientException as err: - if err.http_status != 404: - raise - error_queue.put('Account not found') - elif len(args) == 1: - if '/' in args[0]: - print >> stderr, 'WARNING: / in container name; you might ' \ - 'have meant %r instead of %r.' % ( - args[0].replace('/', ' ', 1), args[0]) - conn = create_connection() - _delete_container(args[0], conn) - else: - for obj in args[1:]: - object_queue.put((args[0], obj)) - finally: - shutdown_worker_threads(container_queue, container_threads) - put_errors_from_threads(container_threads, error_queue) - - shutdown_worker_threads(object_queue, object_threads) - put_errors_from_threads(object_threads, error_queue) + if not args: + conn = create_connection() + try: + marker = '' + while True: + containers = \ + [c['name'] for c in conn.get_account(marker=marker)[1]] + if not containers: + break + for container in containers: + container_queue.put(container) + marker = containers[-1] + while not container_queue.empty(): + sleep(0.01) + while not object_queue.empty(): + sleep(0.01) + except ClientException, err: + if err.http_status != 404: + raise + error_queue.put('Account not found') + elif len(args) == 1: + if '/' in args[0]: + print >> stderr, 'WARNING: / in container name; you might have ' \ + 'meant %r instead of %r.' % \ + (args[0].replace('/', ' ', 1), args[0]) + conn = create_connection() + _delete_container(args[0], conn) + else: + for obj in args[1:]: + object_queue.put((args[0], obj)) + while not container_queue.empty(): + sleep(0.01) + for thread in container_threads: + thread.abort = True + while thread.isAlive(): + thread.join(0.01) + put_errors_from_threads(container_threads, error_queue) + while not object_queue.empty(): + sleep(0.01) + for thread in object_threads: + thread.abort = True + while thread.isAlive(): + thread.join(0.01) + put_errors_from_threads(object_threads, error_queue) st_download_help = ''' -download --all [options] OR download container [options] [object] [object] ... - Downloads everything in the account (with --all), or everything in all - containers in the account matching a prefix (with --all and -p [--prefix]), - or everything in a container, or a subset of a container with -p - [--prefix], or a list of objects depending on the args given. -p or - --prefix is an option that will only download items beginning with that - prefix. For a single object download, you may use the -o [--output] - option to redirect the output to a specific file or if "-" then - just redirect to stdout.'''.strip('\n') +download --all OR download container [options] [object] [object] ... + Downloads everything in the account (with --all), or everything in a + container, or a list of objects depending on the args given. For a single + object download, you may use the -o [--output] option to + redirect the output to a specific file or if "-" then just redirect to + stdout.'''.strip('\n') -def st_download(parser, args, print_queue, error_queue): - parser.add_option( - '-a', '--all', action='store_true', dest='yes_all', +def st_download(options, args, print_queue, error_queue): + parser.add_option('-a', '--all', action='store_true', dest='yes_all', default=False, help='Indicates that you really want to download ' 'everything in the account') - parser.add_option( - '-m', '--marker', dest='marker', - default='', help='Marker to use when starting a container or ' - 'account download') - parser.add_option( - '-p', '--prefix', dest='prefix', - help='Will only download items beginning with the prefix') - parser.add_option( - '-o', '--output', dest='out_file', help='For a single ' + parser.add_option('-o', '--output', dest='out_file', help='For a single ' 'file download, stream the output to an alternate location ') - parser.add_option( - '', '--object-threads', type=int, - default=10, help='Number of threads to use for downloading objects') - parser.add_option( - '', '--container-threads', type=int, default=10, - help='Number of threads to use for listing containers') - parser.add_option( - '', '--no-download', action='store_true', - default=False, - help="Perform download(s), but don't actually write anything to disk") (options, args) = parse_args(parser, args) args = args[1:] if options.out_file == '-': @@ -364,10 +1161,8 @@ def st_download(parser, args, print_queue, error_queue): else: raise Exception("Invalid queue_arg length of %s" % len(queue_arg)) try: - start_time = time() headers, body = \ conn.get_object(container, obj, resp_chunk_size=65536) - header_receipt = time() content_type = headers.get('content-type') if 'content-length' in headers: content_length = int(headers.get('content-length')) @@ -378,13 +1173,12 @@ def st_download(parser, args, print_queue, error_queue): if path[:1] in ('/', '\\'): path = path[1:] md5sum = None - make_dir = not options.no_download and out_file != "-" + make_dir = out_file != "-" if content_type.split(';', 1)[0] == 'text/directory': if make_dir and not isdir(path): mkdirs(path) read_length = 0 - if 'x-object-manifest' not in headers and \ - 'x-static-large-object' not in headers: + if 'x-object-manifest' not in headers: md5sum = md5() for chunk in body: read_length += len(chunk) @@ -394,47 +1188,37 @@ def st_download(parser, args, print_queue, error_queue): dirpath = dirname(path) if make_dir and dirpath and not isdir(dirpath): mkdirs(dirpath) - if not options.no_download: - if out_file == "-": - fp = stdout - elif out_file: - fp = open(out_file, 'wb') - else: - fp = open(path, 'wb') + if out_file == "-": + fp = stdout + elif out_file: + fp = open(out_file, 'wb') + else: + fp = open(path, 'wb') read_length = 0 - if 'x-object-manifest' not in headers and \ - 'x-static-large-object' not in headers: + if 'x-object-manifest' not in headers: md5sum = md5() for chunk in body: - if not options.no_download: - fp.write(chunk) + fp.write(chunk) read_length += len(chunk) if md5sum: md5sum.update(chunk) - if not options.no_download: - fp.close() + fp.close() if md5sum and md5sum.hexdigest() != etag: error_queue.put('%s: md5sum != etag, %s != %s' % (path, md5sum.hexdigest(), etag)) if content_length is not None and read_length != content_length: error_queue.put('%s: read_length != content_length, %d != %d' % (path, read_length, content_length)) - if 'x-object-meta-mtime' in headers and not options.out_file \ - and not options.no_download: - + if 'x-object-meta-mtime' in headers and not options.out_file: mtime = float(headers['x-object-meta-mtime']) utime(path, (mtime, mtime)) if options.verbose: - finish_time = time() - time_str = 'headers %.3fs, total %.3fs, %.3fs MB/s' % ( - header_receipt - start_time, finish_time - start_time, - float(read_length) / (finish_time - start_time) / 1000000) if conn.attempts > 1: - print_queue.put('%s [%s after %d attempts]' % - (path, time_str, conn.attempts)) + print_queue.put('%s [after %d attempts' % + (path, conn.attempts)) else: - print_queue.put('%s [%s]' % (path, time_str)) - except ClientException as err: + print_queue.put(path) + except ClientException, err: if err.http_status != 404: raise error_queue.put('Object %s not found' % @@ -442,212 +1226,120 @@ def st_download(parser, args, print_queue, error_queue): container_queue = Queue(10000) - def _download_container(container, conn, prefix=None): + def _download_container(container, conn): try: - marker = options.marker + marker = '' while True: - objects = [ - o['name'] for o in - conn.get_container(container, marker=marker, - prefix=prefix)[1]] + objects = [o['name'] for o in + conn.get_container(container, marker=marker)[1]] if not objects: break - marker = objects[-1] - shuffle(objects) for obj in objects: object_queue.put((container, obj)) - except ClientException as err: + marker = objects[-1] + except ClientException, err: if err.http_status != 404: raise error_queue.put('Container %s not found' % repr(container)) - create_connection = lambda: get_conn(options) - object_threads = [QueueFunctionThread( - object_queue, _download_object, - create_connection()) for _junk in xrange(options.object_threads)] + url, token = get_auth(options.auth, options.user, options.key, + snet=options.snet) + create_connection = lambda: Connection(options.auth, options.user, + options.key, preauthurl=url, preauthtoken=token, snet=options.snet) + object_threads = [QueueFunctionThread(object_queue, _download_object, + create_connection()) for _junk in xrange(10)] for thread in object_threads: thread.start() - container_threads = [QueueFunctionThread( - container_queue, - _download_container, create_connection()) - for _junk in xrange(options.container_threads)] + container_threads = [QueueFunctionThread(container_queue, + _download_container, create_connection()) for _junk in xrange(10)] for thread in container_threads: thread.start() - - # We musn't let the main thread die with an exception while non-daemonic - # threads exist or the process with hang and ignore Ctrl-C. So we catch - # anything and tidy up the threads in a finally block. - try: - if not args: - # --all case - conn = create_connection() - try: - marker = options.marker - while True: - containers = [ - c['name'] for c in conn.get_account( - marker=marker, prefix=options.prefix)[1]] - if not containers: - break - marker = containers[-1] - shuffle(containers) - for container in containers: - container_queue.put(container) - except ClientException as err: - if err.http_status != 404: - raise - error_queue.put('Account not found') - elif len(args) == 1: - if '/' in args[0]: - print >> stderr, ('WARNING: / in container name; you might ' - 'have meant %r instead of %r.' % ( - args[0].replace('/', ' ', 1), args[0])) - _download_container(args[0], create_connection(), - options.prefix) - else: - if len(args) == 2: - obj = args[1] - object_queue.put((args[0], obj, options.out_file)) - else: - for obj in args[1:]: - object_queue.put((args[0], obj)) - finally: - shutdown_worker_threads(container_queue, container_threads) - put_errors_from_threads(container_threads, error_queue) - - shutdown_worker_threads(object_queue, object_threads) - put_errors_from_threads(object_threads, error_queue) - - -def prt_bytes(bytes, human_flag): - """ - convert a number > 1024 to printable format, either in 4 char -h format as - with ls -lh or return as 12 char right justified string - """ - - if human_flag: - suffix = '' - mods = 'KMGTPEZY' - temp = float(bytes) - if temp > 0: - while (temp > 1023): - temp /= 1024.0 - suffix = mods[0] - mods = mods[1:] - if suffix != '': - if temp >= 10: - bytes = '%3d%s' % (temp, suffix) - else: - bytes = '%.1f%s' % (temp, suffix) - if suffix == '': # must be < 1024 - bytes = '%4s' % bytes + if not args: + conn = create_connection() + try: + marker = '' + while True: + containers = [c['name'] + for c in conn.get_account(marker=marker)[1]] + if not containers: + break + for container in containers: + container_queue.put(container) + marker = containers[-1] + except ClientException, err: + if err.http_status != 404: + raise + error_queue.put('Account not found') + elif len(args) == 1: + if '/' in args[0]: + print >> stderr, 'WARNING: / in container name; you might have ' \ + 'meant %r instead of %r.' % \ + (args[0].replace('/', ' ', 1), args[0]) + _download_container(args[0], create_connection()) else: - bytes = '%12s' % bytes - - return(bytes) + if len(args) == 2: + obj = args[1] + object_queue.put((args[0], obj, options.out_file)) + else: + for obj in args[1:]: + object_queue.put((args[0], obj)) + while not container_queue.empty(): + sleep(0.01) + for thread in container_threads: + thread.abort = True + while thread.isAlive(): + thread.join(0.01) + put_errors_from_threads(container_threads, error_queue) + while not object_queue.empty(): + sleep(0.01) + for thread in object_threads: + thread.abort = True + while thread.isAlive(): + thread.join(0.01) + put_errors_from_threads(object_threads, error_queue) st_list_help = ''' list [options] [container] Lists the containers for the account or the objects for a container. -p or --prefix is an option that will only list items beginning with that prefix. - -l produces output formatted like 'ls -l' and --lh like 'ls -lh'. - -t used with -l or --lh, only report totals -d or --delimiter is option (for container listings only) that will roll up - items with the given delimiter (see http://docs.openstack.org/ - api/openstack-object-storage/1.0/content/list-objects.html) + items with the given delimiter (see Cloud Files general documentation for + what this means). '''.strip('\n') -def st_list(parser, args, print_queue, error_queue): - parser.add_option( - '-l', '--long', dest='long', help='Long listing ' - 'similar to ls -l command', action='store_true', default=False) - parser.add_option( - '--lh', dest='human', help='report sizes as human ' - "similar to ls -lh switch, but -h taken", action='store_true', - default=False) - parser.add_option( - '-t', dest='totals', help='used with -l or --ls, only report totals', - action='store_true', default=False) - parser.add_option( - '-p', '--prefix', dest='prefix', - help='Will only list items beginning with the prefix') - parser.add_option( - '-d', '--delimiter', dest='delimiter', - help='Will roll up items with the given delimiter' - ' (see OpenStack Swift API documentation for what this means)') +def st_list(options, args, print_queue, error_queue): + parser.add_option('-p', '--prefix', dest='prefix', help='Will only list ' + 'items beginning with the prefix') + parser.add_option('-d', '--delimiter', dest='delimiter', help='Will roll ' + 'up items with the given delimiter (see Cloud Files general ' + 'documentation for what this means)') (options, args) = parse_args(parser, args) args = args[1:] if options.delimiter and not args: exit('-d option only allowed for container listings') - if len(args) > 1 or len(args) == 1 and args[0].find('/') >= 0: + if len(args) > 1: error_queue.put('Usage: %s [options] %s' % (basename(argv[0]), st_list_help)) return - - conn = get_conn(options) + conn = Connection(options.auth, options.user, options.key, + snet=options.snet) try: marker = '' - total_count = total_bytes = 0 while True: if not args: items = \ conn.get_account(marker=marker, prefix=options.prefix)[1] else: - items = conn.get_container( - args[0], marker=marker, + items = conn.get_container(args[0], marker=marker, prefix=options.prefix, delimiter=options.delimiter)[1] if not items: break for item in items: - item_name = item.get('name') - - if not options.long and not options.human: - print_queue.put(item.get('name', item.get('subdir'))) - else: - item_bytes = item.get('bytes') - total_bytes += item_bytes - if len(args) == 0: # listing containers - bytes = prt_bytes(item_bytes, options.human) - count = item.get('count') - total_count += count - try: - meta = conn.head_container(item_name) - utc = gmtime(float(meta.get('x-timestamp'))) - datestamp = strftime('%Y-%m-%d %H:%M:%S', utc) - except ClientException: - datestamp = '????-??-?? ??:??:??' - if not options.totals: - print_queue.put("%5s %s %s %s" % - (count, bytes, datestamp, - item_name)) - else: # list container contents - subdir = item.get('subdir') - if subdir is None: - bytes = prt_bytes(item_bytes, options.human) - date, xtime = item.get('last_modified').split('T') - xtime = xtime.split('.')[0] - else: - bytes = prt_bytes(0, options.human) - date = xtime = '' - item_name = subdir - if not options.totals: - print_queue.put("%s %10s %8s %s" % - (bytes, date, xtime, item_name)) - - marker = items[-1].get('name', items[-1].get('subdir')) - - # report totals - if options.long or options.human: - if len(args) == 0: - print_queue.put("%5s %s" % (prt_bytes(total_count, True), - prt_bytes(total_bytes, - options.human))) - else: - print_queue.put("%s" % (prt_bytes(total_bytes, options.human))) - - except ClientException as err: + print_queue.put(item.get('name', item.get('subdir'))) + marker = items[-1].get('name', items[-1].get('subdir')) + except ClientException, err: if err.http_status != 404: raise if not args: @@ -655,20 +1347,17 @@ def st_list(parser, args, print_queue, error_queue): else: error_queue.put('Container %s not found' % repr(args[0])) + st_stat_help = ''' stat [container] [object] Displays information for the account, container, or object depending on the - args given (if any). --lh will print number of objects and total sizes - like 'list --lh' noting number of objs a multiple of 1024'''.strip('\n') + args given (if any).'''.strip('\n') -def st_stat(parser, args, print_queue, error_queue): - parser.add_option( - '--lh', dest='human', help="report totals like 'list --lh'", - action='store_true', default=False) +def st_stat(options, args, print_queue, error_queue): (options, args) = parse_args(parser, args) args = args[1:] - conn = get_conn(options) + conn = Connection(options.auth, options.user, options.key) if not args: try: headers = conn.head_account() @@ -678,20 +1367,17 @@ StorageURL: %s Auth Token: %s '''.strip('\n') % (conn.url, conn.token)) container_count = int(headers.get('x-account-container-count', 0)) - object_count = prt_bytes(headers.get('x-account-object-count', 0), - options.human).lstrip() - bytes_used = prt_bytes(headers.get('x-account-bytes-used', 0), - options.human).lstrip() + object_count = int(headers.get('x-account-object-count', 0)) + bytes_used = int(headers.get('x-account-bytes-used', 0)) print_queue.put(''' Account: %s Containers: %d - Objects: %s - Bytes: %s'''.strip('\n') % (conn.url.rsplit('/', 1)[-1], container_count, + Objects: %d + Bytes: %d'''.strip('\n') % (conn.url.rsplit('/', 1)[-1], container_count, object_count, bytes_used)) for key, value in headers.items(): if key.startswith('x-account-meta-'): - print_queue.put( - '%10s: %s' % ('Meta %s' % + print_queue.put('%10s: %s' % ('Meta %s' % key[len('x-account-meta-'):].title(), value)) for key, value in headers.items(): if not key.startswith('x-account-meta-') and key not in ( @@ -699,7 +1385,7 @@ Containers: %d 'x-account-object-count', 'x-account-bytes-used'): print_queue.put( '%10s: %s' % (key.title(), value)) - except ClientException as err: + except ClientException, err: if err.http_status != 404: raise error_queue.put('Account not found') @@ -710,16 +1396,13 @@ Containers: %d (args[0].replace('/', ' ', 1), args[0]) try: headers = conn.head_container(args[0]) - object_count = prt_bytes( - headers.get('x-container-object-count', 0), - options.human).lstrip() - bytes_used = prt_bytes(headers.get('x-container-bytes-used', 0), - options.human).lstrip() + object_count = int(headers.get('x-container-object-count', 0)) + bytes_used = int(headers.get('x-container-bytes-used', 0)) print_queue.put(''' Account: %s Container: %s - Objects: %s - Bytes: %s + Objects: %d + Bytes: %d Read ACL: %s Write ACL: %s Sync To: %s @@ -731,8 +1414,7 @@ Write ACL: %s headers.get('x-container-sync-key', ''))) for key, value in headers.items(): if key.startswith('x-container-meta-'): - print_queue.put( - '%9s: %s' % ('Meta %s' % + print_queue.put('%9s: %s' % ('Meta %s' % key[len('x-container-meta-'):].title(), value)) for key, value in headers.items(): if not key.startswith('x-container-meta-') and key not in ( @@ -742,7 +1424,7 @@ Write ACL: %s 'x-container-sync-key'): print_queue.put( '%9s: %s' % (key.title(), value)) - except ClientException as err: + except ClientException, err: if err.http_status != 404: raise error_queue.put('Container %s not found' % repr(args[0])) @@ -757,8 +1439,7 @@ Write ACL: %s args[1], headers.get('content-type'))) if 'content-length' in headers: print_queue.put('Content Length: %s' % - prt_bytes(headers['content-length'], - options.human).lstrip()) + headers['content-length']) if 'last-modified' in headers: print_queue.put(' Last Modified: %s' % headers['last-modified']) @@ -769,8 +1450,7 @@ Write ACL: %s headers['x-object-manifest']) for key, value in headers.items(): if key.startswith('x-object-meta-'): - print_queue.put( - '%14s: %s' % ('Meta %s' % + print_queue.put('%14s: %s' % ('Meta %s' % key[len('x-object-meta-'):].title(), value)) for key, value in headers.items(): if not key.startswith('x-object-meta-') and key not in ( @@ -778,7 +1458,7 @@ Write ACL: %s 'etag', 'date', 'x-object-manifest'): print_queue.put( '%14s: %s' % (key.title(), value)) - except ClientException as err: + except ClientException, err: if err.http_status != 404: raise error_queue.put('Object %s not found' % @@ -799,41 +1479,35 @@ post [options] [container] [object] post -m Color:Blue -m Size:Large'''.strip('\n') -def st_post(parser, args, print_queue, error_queue): - parser.add_option( - '-r', '--read-acl', dest='read_acl', help='Sets the ' +def st_post(options, args, print_queue, error_queue): + parser.add_option('-r', '--read-acl', dest='read_acl', help='Sets the ' 'Read ACL for containers. Quick summary of ACL syntax: .r:*, ' '.r:-.example.com, .r:www.example.com, account1, account2:user2') - parser.add_option( - '-w', '--write-acl', dest='write_acl', help='Sets the ' + parser.add_option('-w', '--write-acl', dest='write_acl', help='Sets the ' 'Write ACL for containers. Quick summary of ACL syntax: account1, ' 'account2:user2') - parser.add_option( - '-t', '--sync-to', dest='sync_to', help='Sets the ' + parser.add_option('-t', '--sync-to', dest='sync_to', help='Sets the ' 'Sync To for containers, for multi-cluster replication.') - parser.add_option( - '-k', '--sync-key', dest='sync_key', help='Sets the ' + parser.add_option('-k', '--sync-key', dest='sync_key', help='Sets the ' 'Sync Key for containers, for multi-cluster replication.') - parser.add_option( - '-m', '--meta', action='append', dest='meta', default=[], + parser.add_option('-m', '--meta', action='append', dest='meta', default=[], help='Sets a meta data item with the syntax name:value. This option ' 'may be repeated. Example: -m Color:Blue -m Size:Large') - parser.add_option( - '-H', '--header', action='append', dest='header', - default=[], help='Set request headers with the syntax header:value. ' - ' This option may be repeated. Example -H content-type:text/plain ' - '-H "Content-Length: 4000"') (options, args) = parse_args(parser, args) args = args[1:] if (options.read_acl or options.write_acl or options.sync_to or - options.sync_key) and not args: + options.sync_key) and not args: exit('-r, -w, -t, and -k options only allowed for containers') - conn = get_conn(options) + conn = Connection(options.auth, options.user, options.key) if not args: - headers = split_headers(options.meta, 'X-Account-Meta-', error_queue) + headers = {} + for item in options.meta: + split_item = item.split(':') + headers['X-Account-Meta-' + split_item[0]] = \ + len(split_item) > 1 and split_item[1] try: conn.post_account(headers=headers) - except ClientException as err: + except ClientException, err: if err.http_status != 404: raise error_queue.put('Account not found') @@ -842,7 +1516,11 @@ def st_post(parser, args, print_queue, error_queue): print >> stderr, 'WARNING: / in container name; you might have ' \ 'meant %r instead of %r.' % \ (args[0].replace('/', ' ', 1), args[0]) - headers = split_headers(options.meta, 'X-Container-Meta-', error_queue) + headers = {} + for item in options.meta: + split_item = item.split(':') + headers['X-Container-Meta-' + split_item[0]] = \ + len(split_item) > 1 and split_item[1] if options.read_acl is not None: headers['X-Container-Read'] = options.read_acl if options.write_acl is not None: @@ -853,17 +1531,19 @@ def st_post(parser, args, print_queue, error_queue): headers['X-Container-Sync-Key'] = options.sync_key try: conn.post_container(args[0], headers=headers) - except ClientException as err: + except ClientException, err: if err.http_status != 404: raise conn.put_container(args[0], headers=headers) elif len(args) == 2: - headers = split_headers(options.meta, 'X-Object-Meta-', error_queue) - # add header options to the headers object for the request. - headers.update(split_headers(options.header, '', error_queue)) + headers = {} + for item in options.meta: + split_item = item.split(':') + headers['X-Object-Meta-' + split_item[0]] = \ + len(split_item) > 1 and split_item[1] try: conn.post_object(args[0], args[1], headers=headers) - except ClientException as err: + except ClientException, err: if err.http_status != 404: raise error_queue.put('Object %s not found' % @@ -878,48 +1558,24 @@ upload [options] container file_or_directory [file_or_directory] [...] Uploads to the given container the files and directories specified by the remaining args. -c or --changed is an option that will only upload files that have changed since the last upload. -S or --segment-size - will upload the files in segments no larger than size. -C or - --segment-container will specify the location of the segments - to . --leave-segments are options as well (see --help for more). + and --leave-segments are options as well (see --help for more). '''.strip('\n') -def st_upload(parser, args, print_queue, error_queue): - parser.add_option( - '-c', '--changed', action='store_true', dest='changed', +def st_upload(options, args, print_queue, error_queue): + parser.add_option('-c', '--changed', action='store_true', dest='changed', default=False, help='Will only upload files that have changed since ' 'the last upload') - parser.add_option( - '-S', '--segment-size', dest='segment_size', help='Will ' + parser.add_option('-S', '--segment-size', dest='segment_size', help='Will ' 'upload files in segments no larger than and then create a ' '"manifest" file that will download all the segments as if it were ' - 'the original file.') - parser.add_option( - '-C', '--segment-container', dest='segment_container', - help='Will upload the segments into the specified container.' - 'If not specified, the segments will be uploaded to ' + 'the original file. The segments will be uploaded to a ' '_segments container so as to not pollute the main ' ' listings.') - parser.add_option( - '', '--leave-segments', action='store_true', + parser.add_option('', '--leave-segments', action='store_true', dest='leave_segments', default=False, help='Indicates that you want ' 'the older segments of manifest objects left alone (in the case of ' 'overwrites)') - parser.add_option( - '', '--object-threads', type=int, default=10, - help='Number of threads to use for uploading full objects') - parser.add_option( - '', '--segment-threads', type=int, default=10, - help='Number of threads to use for uploading object segments') - parser.add_option( - '-H', '--header', action='append', dest='header', - default=[], help='Set request headers with the syntax header:value. ' - ' This option may be repeated. Example -H content-type:text/plain ' - '-H "Content-Length: 4000"') - parser.add_option('', '--use-slo', action='store_true', default=False, - help='When used in conjuction with --segment-size will ' - 'create a Static Large Object instead of the default ' - 'Dynamic Large Object.') (options, args) = parse_args(parser, args) args = args[1:] if len(args) < 2: @@ -934,21 +1590,14 @@ def st_upload(parser, args, print_queue, error_queue): else: fp = open(job['path'], 'rb') fp.seek(job['segment_start']) - seg_container = args[0] + '_segments' - if options.segment_container: - seg_container = options.segment_container - etag = conn.put_object(job.get('container', seg_container), - job['obj'], fp, - content_length=job['segment_size']) - job['segment_location'] = '/%s/%s' % (seg_container, job['obj']) - job['segment_etag'] = etag + conn.put_object(job.get('container', args[0] + '_segments'), + job['obj'], fp, content_length=job['segment_size']) if options.verbose and 'log_line' in job: if conn.attempts > 1: print_queue.put('%s [after %d attempts]' % (job['log_line'], conn.attempts)) else: print_queue.put(job['log_line']) - return job def _object_job(job, conn): path = job['path'] @@ -960,7 +1609,7 @@ def st_upload(parser, args, print_queue, error_queue): obj = obj[2:] if obj.startswith('/'): obj = obj[1:] - put_headers = {'x-object-meta-mtime': "%f" % getmtime(path)} + put_headers = {'x-object-meta-mtime': str(getmtime(path))} if dir_marker: if options.changed: try: @@ -974,7 +1623,7 @@ def st_upload(parser, args, print_queue, error_queue): et == 'd41d8cd98f00b204e9800998ecf8427e' and \ mt == put_headers['x-object-meta-mtime']: return - except ClientException as err: + except ClientException, err: if err.http_status != 404: raise conn.put_object(container, obj, '', content_length=0, @@ -985,8 +1634,6 @@ def st_upload(parser, args, print_queue, error_queue): # manifest object and need to delete the old segments # ourselves. old_manifest = None - old_slo_manifest_paths = [] - new_slo_manifest_paths = set() if options.changed or not options.leave_segments: try: headers = conn.head_object(container, obj) @@ -997,134 +1644,73 @@ def st_upload(parser, args, print_queue, error_queue): return if not options.leave_segments: old_manifest = headers.get('x-object-manifest') - if utils.config_true_value( - headers.get('x-static-large-object')): - headers, manifest_data = conn.get_object( - container, obj, - query_string='multipart-manifest=get') - for old_seg in json.loads(manifest_data): - seg_path = old_seg['name'].lstrip('/') - if isinstance(seg_path, unicode): - seg_path = seg_path.encode('utf-8') - old_slo_manifest_paths.append(seg_path) - except ClientException as err: + except ClientException, err: if err.http_status != 404: raise - # Merge the command line header options to the put_headers - put_headers.update(split_headers(options.header, '', - error_queue)) - # Don't do segment job if object is not big enough if options.segment_size and \ - getsize(path) > int(options.segment_size): - seg_container = container + '_segments' - if options.segment_container: - seg_container = options.segment_container + getsize(path) < options.segment_size: full_size = getsize(path) segment_queue = Queue(10000) - segment_threads = [ - QueueFunctionThread( - segment_queue, _segment_job, - create_connection(), store_results=True) - for _junk in xrange(options.segment_threads)] + segment_threads = [QueueFunctionThread(segment_queue, + _segment_job, create_connection()) for _junk in + xrange(10)] for thread in segment_threads: thread.start() - try: - segment = 0 - segment_start = 0 - while segment_start < full_size: - segment_size = int(options.segment_size) - if segment_start + segment_size > full_size: - segment_size = full_size - segment_start - if options.use_slo: - segment_name = '%s/slo/%s/%s/%s/%08d' % ( - obj, put_headers['x-object-meta-mtime'], - full_size, options.segment_size, segment) - else: - segment_name = '%s/%s/%s/%s/%08d' % ( - obj, put_headers['x-object-meta-mtime'], - full_size, options.segment_size, segment) - segment_queue.put( - {'path': path, 'obj': segment_name, - 'segment_start': segment_start, - 'segment_size': segment_size, - 'segment_index': segment, - 'log_line': '%s segment %s' % (obj, segment)}) - segment += 1 - segment_start += segment_size - finally: - shutdown_worker_threads(segment_queue, segment_threads) - if put_errors_from_threads(segment_threads, - error_queue): - raise ClientException( - 'Aborting manifest creation ' - 'because not all segments could be uploaded. ' - '%s/%s' % (container, obj)) - if options.use_slo: - slo_segments = [] - for thread in segment_threads: - slo_segments += thread.results - slo_segments.sort(key=lambda d: d['segment_index']) - for seg in slo_segments: - seg_loc = seg['segment_location'].lstrip('/') - if isinstance(seg_loc, unicode): - seg_loc = seg_loc.encode('utf-8') - new_slo_manifest_paths.add(seg_loc) - - manifest_data = json.dumps([ - {'path': d['segment_location'], - 'etag': d['segment_etag'], - 'size_bytes': d['segment_size']} - for d in slo_segments]) - - put_headers['x-static-large-object'] = 'true' - conn.put_object(container, obj, manifest_data, - headers=put_headers, - query_string='multipart-manifest=put') - else: - new_object_manifest = '%s/%s/%s/%s/%s/' % ( - quote(seg_container), quote(obj), - put_headers['x-object-meta-mtime'], full_size, - options.segment_size) - if old_manifest and old_manifest.rstrip('/') == \ - new_object_manifest.rstrip('/'): - old_manifest = None - put_headers['x-object-manifest'] = new_object_manifest - conn.put_object(container, obj, '', content_length=0, - headers=put_headers) + segment = 0 + segment_start = 0 + while segment_start < full_size: + segment_size = int(options.segment_size) + if segment_start + segment_size > full_size: + segment_size = full_size - segment_start + segment_queue.put({'path': path, + 'obj': '%s/%s/%s/%08d' % (obj, + put_headers['x-object-meta-mtime'], full_size, + segment), + 'segment_start': segment_start, + 'segment_size': segment_size, + 'log_line': '%s segment %s' % (obj, segment)}) + segment += 1 + segment_start += segment_size + while not segment_queue.empty(): + sleep(0.01) + for thread in segment_threads: + thread.abort = True + while thread.isAlive(): + thread.join(0.01) + if put_errors_from_threads(segment_threads, error_queue): + raise ClientException('Aborting manifest creation ' + 'because not all segments could be uploaded. %s/%s' + % (container, obj)) + new_object_manifest = '%s_segments/%s/%s/%s/' % ( + container, obj, put_headers['x-object-meta-mtime'], + full_size) + if old_manifest == new_object_manifest: + old_manifest = None + put_headers['x-object-manifest'] = new_object_manifest + conn.put_object(container, obj, '', content_length=0, + headers=put_headers) else: - conn.put_object( - container, obj, open(path, 'rb'), + conn.put_object(container, obj, open(path, 'rb'), content_length=getsize(path), headers=put_headers) - if old_manifest or old_slo_manifest_paths: + if old_manifest: segment_queue = Queue(10000) - if old_manifest: - scontainer, sprefix = old_manifest.split('/', 1) - scontainer = unquote(scontainer) - sprefix = unquote(sprefix).rstrip('/') + '/' - for delobj in conn.get_container(scontainer, - prefix=sprefix)[1]: - segment_queue.put( - {'delete': True, - 'container': scontainer, - 'obj': delobj['name']}) - if old_slo_manifest_paths: - for seg_to_delete in old_slo_manifest_paths: - if seg_to_delete in new_slo_manifest_paths: - continue - scont, sobj = \ - seg_to_delete.split('/', 1) - segment_queue.put( - {'delete': True, - 'container': scont, 'obj': sobj}) + scontainer, sprefix = old_manifest.split('/', 1) + for delobj in conn.get_container(scontainer, + prefix=sprefix)[1]: + segment_queue.put({'delete': True, + 'container': scontainer, 'obj': delobj['name']}) if not segment_queue.empty(): - segment_threads = [ - QueueFunctionThread( - segment_queue, - _segment_job, create_connection()) - for _junk in xrange(options.segment_threads)] + segment_threads = [QueueFunctionThread(segment_queue, + _segment_job, create_connection()) for _junk in + xrange(10)] for thread in segment_threads: thread.start() - shutdown_worker_threads(segment_queue, segment_threads) + while not segment_queue.empty(): + sleep(0.01) + for thread in segment_threads: + thread.abort = True + while thread.isAlive(): + thread.join(0.01) put_errors_from_threads(segment_threads, error_queue) if options.verbose: if conn.attempts > 1: @@ -1132,7 +1718,7 @@ def st_upload(parser, args, print_queue, error_queue): '%s [after %d attempts]' % (obj, conn.attempts)) else: print_queue.put(obj) - except OSError as err: + except OSError, err: if err.errno != ENOENT: raise error_queue.put('Local file %s not found' % repr(path)) @@ -1149,10 +1735,12 @@ def st_upload(parser, args, print_queue, error_queue): else: object_queue.put({'path': subpath}) - create_connection = lambda: get_conn(options) - object_threads = [ - QueueFunctionThread(object_queue, _object_job, create_connection()) - for _junk in xrange(options.object_threads)] + url, token = get_auth(options.auth, options.user, options.key, + snet=options.snet) + create_connection = lambda: Connection(options.auth, options.user, + options.key, preauthurl=url, preauthtoken=token, snet=options.snet) + object_threads = [QueueFunctionThread(object_queue, _object_job, + create_connection()) for _junk in xrange(10)] for thread in object_threads: thread.start() conn = create_connection() @@ -1163,110 +1751,42 @@ def st_upload(parser, args, print_queue, error_queue): try: conn.put_container(args[0]) if options.segment_size is not None: - seg_container = args[0] + '_segments' - if options.segment_container: - seg_container = options.segment_container - conn.put_container(seg_container) - except ClientException as err: - msg = ' '.join(str(x) for x in (err.http_status, err.http_reason)) - if err.http_response_content: - if msg: - msg += ': ' - msg += err.http_response_content[:60] - error_queue.put( - 'Error trying to create container %r: %s' % (args[0], msg)) - except Exception as err: - error_queue.put( - 'Error trying to create container %r: %s' % (args[0], err)) - + conn.put_container(args[0] + '_segments') + except Exception: + pass try: for arg in args[1:]: if isdir(arg): _upload_dir(arg) else: object_queue.put({'path': arg}) - except ClientException as err: + while not object_queue.empty(): + sleep(0.01) + for thread in object_threads: + thread.abort = True + while thread.isAlive(): + thread.join(0.01) + put_errors_from_threads(object_threads, error_queue) + except ClientException, err: if err.http_status != 404: raise error_queue.put('Account not found') - finally: - shutdown_worker_threads(object_queue, object_threads) - put_errors_from_threads(object_threads, error_queue) - - -def split_headers(options, prefix='', error_queue=None): - """ - Splits 'Key: Value' strings and returns them as a dictionary. - - :param options: An array of 'Key: Value' strings - :param prefix: String to prepend to all of the keys in the dictionary. - :param error_queue: Queue for thread safe error reporting. - """ - headers = {} - for item in options: - split_item = item.split(':', 1) - if len(split_item) == 2: - headers[(prefix + split_item[0]).title()] = split_item[1] - else: - error_string = "Metadata parameter %s must contain a ':'.\n%s" \ - % (item, st_post_help) - if error_queue: - error_queue.put(error_string) - else: - exit(error_string) - return headers def parse_args(parser, args, enforce_requires=True): if not args: args = ['-h'] (options, args) = parser.parse_args(args) - - if (not (options.auth and options.user and options.key)): - # Use 2.0 auth if none of the old args are present - options.auth_version = '2.0' - - # Use new-style args if old ones not present - if not options.auth and options.os_auth_url: - options.auth = options.os_auth_url - if not options.user and options.os_username: - options.user = options.os_username - if not options.key and options.os_password: - options.key = options.os_password - - # Specific OpenStack options - options.os_options = { - 'tenant_id': options.os_tenant_id, - 'tenant_name': options.os_tenant_name, - 'service_type': options.os_service_type, - 'endpoint_type': options.os_endpoint_type, - 'auth_token': options.os_auth_token, - 'object_storage_url': options.os_storage_url, - 'region_name': options.os_region_name, - } - - if (options.os_options.get('object_storage_url') and - options.os_options.get('auth_token') and - options.auth_version == '2.0'): - return options, args - if enforce_requires and \ not (options.auth and options.user and options.key): exit(''' -Auth version 1.0 requires ST_AUTH, ST_USER, and ST_KEY environment variables -to be set or overridden with -A, -U, or -K. - -Auth version 2.0 requires OS_AUTH_URL, OS_USERNAME, OS_PASSWORD, and -OS_TENANT_NAME OS_TENANT_ID to be set or overridden with --os-auth-url, ---os-username, --os-password, --os-tenant-name or os-tenant-id. Note: -adding "-V 2" is necessary for this.'''.strip('\n')) +Requires ST_AUTH, ST_USER, and ST_KEY environment variables be set or +overridden with -A, -U, or -K.'''.strip('\n')) return options, args if __name__ == '__main__': - version = version_info.version_string() - parser = OptionParser(version='%%prog %s' % version, - usage=''' + parser = OptionParser(version='%prog 1.0', usage=''' Usage: %%prog [options] [args] Commands: @@ -1277,130 +1797,24 @@ Commands: %(st_download_help)s %(st_delete_help)s -Examples: +Example: %%prog -A https://auth.api.rackspacecloud.com/v1.0 -U user -K key stat - - %%prog --os-auth-url https://api.example.com/v2.0 --os-tenant-name tenant \\ - --os-usernameuser --os-password password list - - %%prog --os-auth-token 6ee5eb33efad4e45ab46806eac010566 \\ - --os-storage-url https://10.1.5.2:8080/v1/AUTH_ced809b6a4baea7aeab61a \\ - list - - %%prog list --lh '''.strip('\n') % globals()) parser.add_option('-s', '--snet', action='store_true', dest='snet', default=False, help='Use SERVICENET internal network') parser.add_option('-v', '--verbose', action='count', dest='verbose', default=1, help='Print more info') - parser.add_option('--debug', action='store_true', dest='debug', - default=False, help='Show the curl commands of all http ' - 'queries.') parser.add_option('-q', '--quiet', action='store_const', dest='verbose', const=0, default=1, help='Suppress status output') parser.add_option('-A', '--auth', dest='auth', default=environ.get('ST_AUTH'), help='URL for obtaining an auth token') - parser.add_option('-V', '--auth-version', - dest='auth_version', - default=environ.get('ST_AUTH_VERSION', '1.0'), - type=str, - help='Specify a version for authentication. ' - 'Defaults to 1.0.') parser.add_option('-U', '--user', dest='user', default=environ.get('ST_USER'), - help='User name for obtaining an auth token.') + help='User name for obtaining an auth token') parser.add_option('-K', '--key', dest='key', default=environ.get('ST_KEY'), - help='Key for obtaining an auth token.') - parser.add_option('--os-username', - metavar='', - default=environ.get('OS_USERNAME'), - help='Openstack username. Defaults to env[OS_USERNAME].') - parser.add_option('--os_username', - help=SUPPRESS_HELP) - parser.add_option('--os-password', - metavar='', - default=environ.get('OS_PASSWORD'), - help='Openstack password. Defaults to env[OS_PASSWORD].') - parser.add_option('--os_password', - help=SUPPRESS_HELP) - parser.add_option('--os-tenant-id', - metavar='', - default=environ.get('OS_TENANT_ID'), - help='OpenStack tenant ID. ' - 'Defaults to env[OS_TENANT_ID]') - parser.add_option('--os_tenant_id', - help=SUPPRESS_HELP) - parser.add_option('--os-tenant-name', - metavar='', - default=environ.get('OS_TENANT_NAME'), - help='Openstack tenant name. ' - 'Defaults to env[OS_TENANT_NAME].') - parser.add_option('--os_tenant_name', - help=SUPPRESS_HELP) - parser.add_option('--os-auth-url', - metavar='', - default=environ.get('OS_AUTH_URL'), - help='Openstack auth URL. Defaults to env[OS_AUTH_URL].') - parser.add_option('--os_auth_url', - help=SUPPRESS_HELP) - parser.add_option('--os-auth-token', - metavar='', - default=environ.get('OS_AUTH_TOKEN'), - help='Openstack token. Defaults to env[OS_AUTH_TOKEN]. ' - 'Used with --os-storage-url to bypass the ' - 'usual username/password authentication.') - parser.add_option('--os_auth_token', - help=SUPPRESS_HELP) - parser.add_option('--os-storage-url', - metavar='', - default=environ.get('OS_STORAGE_URL'), - help='Openstack storage URL. ' - 'Defaults to env[OS_STORAGE_URL]. ' - 'Used with --os-auth-token to bypass the ' - 'usual username/password authentication.') - parser.add_option('--os_storage_url', - help=SUPPRESS_HELP) - parser.add_option('--os-region-name', - metavar='', - default=environ.get('OS_REGION_NAME'), - help='Openstack region name. ' - 'Defaults to env[OS_REGION_NAME]') - parser.add_option('--os_region_name', - help=SUPPRESS_HELP) - parser.add_option('--os-service-type', - metavar='', - default=environ.get('OS_SERVICE_TYPE'), - help='Openstack Service type. ' - 'Defaults to env[OS_SERVICE_TYPE]') - parser.add_option('--os_service_type', - help=SUPPRESS_HELP) - parser.add_option('--os-endpoint-type', - metavar='', - default=environ.get('OS_ENDPOINT_TYPE'), - help='Openstack Endpoint type. ' - 'Defaults to env[OS_ENDPOINT_TYPE]') - parser.add_option('--os-cacert', - metavar='', - default=environ.get('OS_CACERT'), - help='Specify a CA bundle file to use in verifying a ' - 'TLS (https) server certificate. ' - 'Defaults to env[OS_CACERT]') - default_val = utils.config_true_value(environ.get('SWIFTCLIENT_INSECURE')) - parser.add_option('--insecure', - action="store_true", dest="insecure", - default=default_val, - help='Allow swiftclient to access insecure keystone ' - 'server. The keystone\'s certificate will not ' - 'be verified. ' - 'Defaults to env[SWIFTCLIENT_INSECURE] ' - '(set to \'true\' to enable).') - parser.add_option('--no-ssl-compression', - action='store_false', dest='ssl_compression', - default=True, - help='Disable SSL compression when using https. ' - 'This may increase performance.') + help='Key for obtaining an auth token') parser.disable_interspersed_args() (options, args) = parse_args(parser, argv[1:], enforce_requires=False) parser.enable_interspersed_args() @@ -1412,12 +1826,6 @@ Examples: exit('no such command: %s' % args[0]) exit() - signal.signal(signal.SIGINT, immediate_exit) - - if options.debug: - logger = logging.getLogger("swiftclient") - logging.basicConfig(level=logging.DEBUG) - print_queue = Queue(10000) def _print(item): @@ -1428,12 +1836,9 @@ Examples: print_thread = QueueFunctionThread(print_queue, _print) print_thread.start() - error_count = 0 error_queue = Queue(10000) def _error(item): - global error_count - error_count += 1 if isinstance(item, unicode): item = item.encode('utf8') print >> stderr, item @@ -1441,15 +1846,24 @@ Examples: error_thread = QueueFunctionThread(error_queue, _error) error_thread.start() - parser.usage = globals()['st_%s_help' % args[0]] try: - globals()['st_%s' % args[0]](parser, argv[1:], print_queue, - error_queue) - except (ClientException, HTTPException, socket.error) as err: - error_queue.put(str(err)) - finally: - shutdown_worker_threads(print_queue, [print_thread]) - shutdown_worker_threads(error_queue, [error_thread]) - - if error_count: - exit(1) + parser.usage = globals()['st_%s_help' % args[0]] + try: + globals()['st_%s' % args[0]](parser, argv[1:], print_queue, + error_queue) + except (ClientException, HTTPException, socket.error), err: + error_queue.put(str(err)) + while not print_queue.empty(): + sleep(0.01) + print_thread.abort = True + while print_thread.isAlive(): + print_thread.join(0.01) + while not error_queue.empty(): + sleep(0.01) + error_thread.abort = True + while error_thread.isAlive(): + error_thread.join(0.01) + except (SystemExit, Exception): + for thread in threading_enumerate(): + thread.abort = True + raise diff --git a/server/test/com/cloud/async/CleanupDelegate.java b/server/test/com/cloud/async/CleanupDelegate.java deleted file mode 100644 index 385e4cbdc12..00000000000 --- a/server/test/com/cloud/async/CleanupDelegate.java +++ /dev/null @@ -1,29 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. -package com.cloud.async; - -import org.apache.log4j.Logger; - -public class CleanupDelegate implements com.cloud.utils.CleanupDelegate { - private static final Logger s_logger = Logger.getLogger(CleanupDelegate.class); - - @Override - public boolean cleanup(String param, Object managerContext) { - s_logger.info("Action called with param: " + param); - return true; - } -} diff --git a/server/test/com/cloud/async/TestAsync.java b/server/test/com/cloud/async/TestAsync.java deleted file mode 100644 index 6f67fe2227f..00000000000 --- a/server/test/com/cloud/async/TestAsync.java +++ /dev/null @@ -1,281 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. -package com.cloud.async; - - -import java.util.List; - -import junit.framework.Assert; - -import org.apache.log4j.Logger; - -import com.cloud.cluster.CheckPointVO; -import com.cloud.cluster.dao.StackMaidDao; -import com.cloud.cluster.dao.StackMaidDaoImpl; -import com.cloud.utils.db.Transaction; -import com.cloud.utils.testcase.Log4jEnabledTestCase; - - -public class TestAsync extends Log4jEnabledTestCase { - private static final Logger s_logger = Logger.getLogger(TestAsync.class); - - /* - public static class SampleAsyncResult { - @Param(name="name", propName="name") - private final String _name; - - @Param - private final int count; - - public SampleAsyncResult(String name, int count) { - _name = name; - this.count = count; - } - - public String getName() { return _name; } - public int getCount() { return count; } - } - - public void testDao() { - AsyncJobDao dao = new AsyncJobDaoImpl(); - AsyncJobVO job = new AsyncJobVO(1, 1, "TestCmd", null); - job.setInstanceType("user_vm"); - job.setInstanceId(1000L); - - char[] buf = new char[1024]; - for(int i = 0; i < 1024; i++) - buf[i] = 'a'; - - job.setResult(new String(buf)); - dao.persist(job); - - AsyncJobVO jobVerify = dao.findById(job.getId()); - - Assert.assertTrue(jobVerify.getCmd().equals(job.getCmd())); - Assert.assertTrue(jobVerify.getUserId() == 1); - Assert.assertTrue(jobVerify.getAccountId() == 1); - - String result = jobVerify.getResult(); - for(int i = 0; i < 1024; i++) - Assert.assertTrue(result.charAt(i) == 'a'); - - jobVerify = dao.findInstancePendingAsyncJob("user_vm", 1000L); - Assert.assertTrue(jobVerify != null); - Assert.assertTrue(jobVerify.getCmd().equals(job.getCmd())); - Assert.assertTrue(jobVerify.getUserId() == 1); - Assert.assertTrue(jobVerify.getAccountId() == 1); - } - - public void testSerialization() { - List> l; - int value = 1; - l = SerializerHelper.toPairList(value, "result"); - Assert.assertTrue(l.size() == 1); - Assert.assertTrue(l.get(0).first().equals("result")); - Assert.assertTrue(l.get(0).second().equals("1")); - l.clear(); - - SampleAsyncResult result = new SampleAsyncResult("vmops", 1); - l = SerializerHelper.toPairList(result, "result"); - - Assert.assertTrue(l.size() == 2); - Assert.assertTrue(l.get(0).first().equals("name")); - Assert.assertTrue(l.get(0).second().equals("vmops")); - Assert.assertTrue(l.get(1).first().equals("count")); - Assert.assertTrue(l.get(1).second().equals("1")); - } - - public void testAsyncResult() { - AsyncJobResult result = new AsyncJobResult(1); - - result.setResultObject(100); - Assert.assertTrue(result.getResult().equals("java.lang.Integer/100")); - - Object obj = result.getResultObject(); - Assert.assertTrue(obj instanceof Integer); - Assert.assertTrue(((Integer)obj).intValue() == 100); - } - - public void testTransaction() { - Transaction txn = Transaction.open("testTransaction"); - try { - txn.start(); - - AsyncJobDao dao = new AsyncJobDaoImpl(); - AsyncJobVO job = new AsyncJobVO(1, 1, "TestCmd", null); - job.setInstanceType("user_vm"); - job.setInstanceId(1000L); - job.setResult(""); - dao.persist(job); - txn.rollback(); - } finally { - txn.close(); - } - } - - public void testMorevingian() { - int threadCount = 10; - final int testCount = 10; - - Thread[] threads = new Thread[threadCount]; - for(int i = 0; i < threadCount; i++) { - final int threadNum = i + 1; - threads[i] = new Thread(new Runnable() { - public void run() { - for(int i = 0; i < testCount; i++) { - Transaction txn = Transaction.open(Transaction.CLOUD_DB); - try { - AsyncJobDao dao = new AsyncJobDaoImpl(); - - s_logger.info("Thread " + threadNum + " acquiring lock"); - AsyncJobVO job = dao.acquire(1L, 30); - if(job != null) { - s_logger.info("Thread " + threadNum + " acquired lock"); - - try { - Thread.sleep(Log4jEnabledTestCase.getRandomMilliseconds(1000, 3000)); - } catch (InterruptedException e) { - } - - s_logger.info("Thread " + threadNum + " acquiring lock nestly"); - AsyncJobVO job2 = dao.acquire(1L, 30); - if(job2 != null) { - s_logger.info("Thread " + threadNum + " acquired lock nestly"); - - try { - Thread.sleep(Log4jEnabledTestCase.getRandomMilliseconds(1000, 3000)); - } catch (InterruptedException e) { - } - - s_logger.info("Thread " + threadNum + " releasing lock (nestly acquired)"); - dao.release(1L); - s_logger.info("Thread " + threadNum + " released lock (nestly acquired)"); - - } else { - s_logger.info("Thread " + threadNum + " was unable to acquire lock nestly"); - } - - s_logger.info("Thread " + threadNum + " releasing lock"); - dao.release(1L); - s_logger.info("Thread " + threadNum + " released lock"); - } else { - s_logger.info("Thread " + threadNum + " was unable to acquire lock"); - } - } finally { - txn.close(); - } - - try { - Thread.sleep(Log4jEnabledTestCase.getRandomMilliseconds(1000, 10000)); - } catch (InterruptedException e) { - } - } - } - }); - } - - for(int i = 0; i < threadCount; i++) { - threads[i].start(); - } - - for(int i = 0; i < threadCount; i++) { - try { - threads[i].join(); - } catch (InterruptedException e) { - } - } - } - */ - - public void testMaid() { - Transaction txn = Transaction.open(Transaction.CLOUD_DB); - - StackMaidDao dao = new StackMaidDaoImpl(); - dao.pushCleanupDelegate(1L, 0, "delegate1", "Hello, world"); - dao.pushCleanupDelegate(1L, 1, "delegate2", new Long(100)); - dao.pushCleanupDelegate(1L, 2, "delegate3", null); - - CheckPointVO item = dao.popCleanupDelegate(1L); - Assert.assertTrue(item.getDelegate().equals("delegate3")); - Assert.assertTrue(item.getContext() == null); - - item = dao.popCleanupDelegate(1L); - Assert.assertTrue(item.getDelegate().equals("delegate2")); - s_logger.info(item.getContext()); - - item = dao.popCleanupDelegate(1L); - Assert.assertTrue(item.getDelegate().equals("delegate1")); - s_logger.info(item.getContext()); - - txn.close(); - } - - public void testMaidClear() { - Transaction txn = Transaction.open(Transaction.CLOUD_DB); - - StackMaidDao dao = new StackMaidDaoImpl(); - dao.pushCleanupDelegate(1L, 0, "delegate1", "Hello, world"); - dao.pushCleanupDelegate(1L, 1, "delegate2", new Long(100)); - dao.pushCleanupDelegate(1L, 2, "delegate3", null); - - dao.clearStack(1L); - Assert.assertTrue(dao.popCleanupDelegate(1L) == null); - txn.close(); - } - - - public void testMaidLeftovers() { - - Thread[] threads = new Thread[3]; - for(int i = 0; i < 3; i++) { - final int threadNum = i+1; - threads[i] = new Thread(new Runnable() { - @Override - public void run() { - Transaction txn = Transaction.open(Transaction.CLOUD_DB); - - StackMaidDao dao = new StackMaidDaoImpl(); - dao.pushCleanupDelegate(1L, 0, "delegate-" + threadNum, "Hello, world"); - dao.pushCleanupDelegate(1L, 1, "delegate-" + threadNum, new Long(100)); - dao.pushCleanupDelegate(1L, 2, "delegate-" + threadNum, null); - - txn.close(); - } - }); - - threads[i].start(); - } - - for(int i = 0; i < 3; i++) { - try { - threads[i].join(); - } catch (InterruptedException e) { - } - } - - - Transaction txn = Transaction.open(Transaction.CLOUD_DB); - - StackMaidDao dao = new StackMaidDaoImpl(); - List l = dao.listLeftoversByMsid(1L); - for(CheckPointVO maid : l) { - s_logger.info("" + maid.getThreadId() + " " + maid.getDelegate() + " " + maid.getContext()); - } - - txn.close(); - } -} diff --git a/server/test/com/cloud/async/TestAsyncJobManager.java b/server/test/com/cloud/async/TestAsyncJobManager.java deleted file mode 100644 index e3233939bc5..00000000000 --- a/server/test/com/cloud/async/TestAsyncJobManager.java +++ /dev/null @@ -1,252 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. -package com.cloud.async; - -import java.util.Collections; -import java.util.HashMap; -import java.util.Map; - -import javax.inject.Inject; - -import junit.framework.Assert; -import junit.framework.TestCase; - -import org.apache.log4j.Logger; - -import com.cloud.domain.DomainVO; -import com.cloud.domain.dao.DomainDao; -import com.cloud.domain.dao.DomainDaoImpl; -import com.cloud.exception.PermissionDeniedException; -import com.cloud.host.HostVO; -import com.cloud.host.dao.HostDao; -import com.cloud.host.dao.HostDaoImpl; -import com.cloud.utils.db.Transaction; - -public class TestAsyncJobManager extends TestCase { - public static final Logger s_logger = Logger.getLogger(TestAsyncJobManager.class.getName()); - - volatile long s_count = 0; - - @Inject AsyncJobManager asyncMgr; - - public void asyncCall() { -// long jobId = mgr.rebootVirtualMachineAsync(1, 1); - long jobId = 0L; - s_logger.info("Async-call job id: " + jobId); - - while(true) { - AsyncJobResult result; - try { - result = asyncMgr.queryAsyncJobResult(jobId); - - if(result.getJobStatus() != AsyncJobResult.STATUS_IN_PROGRESS) { - s_logger.info("Async-call completed, result: " + result.toString()); - break; - } - s_logger.info("Async-call is in progress, progress: " + result.toString()); - - } catch (PermissionDeniedException e1) { - } - - try { - Thread.sleep(1000); - } catch (InterruptedException e) { - } - } - } - - public void sequence() { - final HostDao hostDao = new HostDaoImpl(); - long seq = hostDao.getNextSequence(1); - s_logger.info("******* seq : " + seq + " ********"); - - HashMap hashMap = new HashMap(); - final Map map = Collections.synchronizedMap(hashMap); - - s_count = 0; - final long maxCount = 1000000; // test one million times - - Thread t1 = new Thread(new Runnable() { - @Override - public void run() { - while(s_count < maxCount) { - s_count++; - long seq = hostDao.getNextSequence(1); - Assert.assertTrue(map.put(seq, seq) == null); - } - } - }); - - Thread t2 = new Thread(new Runnable() { - @Override - public void run() { - while(s_count < maxCount) { - s_count++; - long seq = hostDao.getNextSequence(1); - Assert.assertTrue(map.put(seq, seq) == null); - } - } - }); - - t1.start(); - t2.start(); - - try { - t1.join(); - t2.join(); - } catch (InterruptedException e) { - } - } - - /* - public void ipAssignment() { - final IPAddressDao ipAddressDao = new IPAddressDaoImpl(); - - final ConcurrentHashMap map = new ConcurrentHashMap(); - //final Map map = Collections.synchronizedMap(hashMap); - - s_count = 0; - final long maxCount = 1000000; // test one million times - - Thread t1 = new Thread(new Runnable() { - public void run() { - while(s_count < maxCount) { - s_count++; - - Transaction txn = Transaction.open("Alex1"); - try { - IPAddressVO addr = ipAddressDao.assignIpAddress(1, 0, 1, false); - IPAddressVO returnStr = map.put(addr.getAddress(), addr); - if(returnStr != null) { - System.out.println("addr : " + addr.getAddress()); - } - Assert.assertTrue(returnStr == null); - } finally { - txn.close(); - } - } - } - }); - - Thread t2 = new Thread(new Runnable() { - public void run() { - while(s_count < maxCount) { - s_count++; - - Transaction txn = Transaction.open("Alex2"); - try { - IPAddressVO addr = ipAddressDao.assignIpAddress(1, 0, 1, false); - Assert.assertTrue(map.put(addr.getAddress(), addr) == null); - } finally { - txn.close(); - } - } - } - }); - - t1.start(); - t2.start(); - - try { - t1.join(); - t2.join(); - } catch (InterruptedException e) { - } - } - */ - - private long getRandomLockId() { - return 1L; - - /* - * will use in the future test cases - int i = new Random().nextInt(); - if(i % 2 == 0) - return 1L; - return 2L; - */ - } - - public void tstLocking() { - - int testThreads = 20; - Thread[] threads = new Thread[testThreads]; - - for(int i = 0; i < testThreads; i++) { - final int current = i; - threads[i] = new Thread(new Runnable() { - @Override - public void run() { - - final HostDao hostDao = new HostDaoImpl(); - while(true) { - Transaction txn = Transaction.currentTxn(); - try { - HostVO host = hostDao.acquireInLockTable(getRandomLockId(), 10); - if(host != null) { - s_logger.info("Thread " + (current + 1) + " acquired lock"); - - try { Thread.sleep(1000); } catch (InterruptedException e) {} - - s_logger.info("Thread " + (current + 1) + " released lock"); - hostDao.releaseFromLockTable(host.getId()); - - try { Thread.sleep(1000); } catch (InterruptedException e) {} - } else { - s_logger.info("Thread " + (current + 1) + " is not able to acquire lock"); - } - } finally { - txn.close(); - } - } - } - }); - threads[i].start(); - } - - try { - for(int i = 0; i < testThreads; i++) - threads[i].join(); - } catch(InterruptedException e) { - } - } - - public void testDomain() { - DomainDao domainDao = new DomainDaoImpl(); - - DomainVO domain1 = new DomainVO("d1", 2L, 1L, null, 1); - domainDao.create(domain1); - - DomainVO domain2 = new DomainVO("d2", 2L, 1L, null, 1); - domainDao.create(domain2); - - DomainVO domain3 = new DomainVO("d3", 2L, 1L, null, 1); - domainDao.create(domain3); - - DomainVO domain11 = new DomainVO("d11", 2L, domain1.getId(), null, 1); - domainDao.create(domain11); - - domainDao.remove(domain11.getId()); - - DomainVO domain12 = new DomainVO("d12", 2L, domain1.getId(), null, 1); - domainDao.create(domain12); - - domainDao.remove(domain3.getId()); - DomainVO domain4 = new DomainVO("d4", 2L, 1L, null, 1); - domainDao.create(domain4); - } -} diff --git a/server/test/com/cloud/async/TestSyncQueueManager.java b/server/test/com/cloud/async/TestSyncQueueManager.java deleted file mode 100644 index 59afb11e1c6..00000000000 --- a/server/test/com/cloud/async/TestSyncQueueManager.java +++ /dev/null @@ -1,208 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. -package com.cloud.async; - -import java.util.List; - -import javax.inject.Inject; - -import junit.framework.TestCase; - -import org.apache.log4j.Logger; -import org.junit.Assert; - - -public class TestSyncQueueManager extends TestCase { - public static final Logger s_logger = Logger.getLogger(TestSyncQueueManager.class.getName()); - - private volatile int count = 0; - private volatile long expectingCurrent = 1; - @Inject SyncQueueManager mgr; - - public void leftOverItems() { - - List l = mgr.getActiveQueueItems(1L, false); - if(l != null && l.size() > 0) { - for(SyncQueueItemVO item : l) { - s_logger.info("Left over item: " + item.toString()); - mgr.purgeItem(item.getId()); - } - } - } - - public void dequeueFromOneQueue() { - final int totalRuns = 5000; - final SyncQueueVO queue = mgr.queue("vm_instance", 1L, "Async-job", 1, 1); - for(int i = 1; i < totalRuns; i++) - mgr.queue("vm_instance", 1L, "Async-job", i+1, 1); - - count = 0; - expectingCurrent = 1; - Thread thread1 = new Thread(new Runnable() { - @Override - public void run() { - while(count < totalRuns) { - SyncQueueItemVO item = mgr.dequeueFromOne(queue.getId(), 1L); - if(item != null) { - s_logger.info("Thread 1 process item: " + item.toString()); - - Assert.assertEquals(expectingCurrent, item.getContentId().longValue()); - expectingCurrent++; - count++; - - mgr.purgeItem(item.getId()); - } - try { - Thread.sleep(100); - } catch (InterruptedException e) { - } - } - } - } - ); - - Thread thread2 = new Thread(new Runnable() { - @Override - public void run() { - while(count < totalRuns) { - SyncQueueItemVO item = mgr.dequeueFromOne(queue.getId(), 1L); - if(item != null) { - s_logger.info("Thread 2 process item: " + item.toString()); - - Assert.assertEquals(expectingCurrent, item.getContentId().longValue()); - expectingCurrent++; - count++; - mgr.purgeItem(item.getId()); - } - - try { - Thread.sleep(100); - } catch (InterruptedException e) { - } - } - } - } - ); - - thread1.start(); - thread2.start(); - try { - thread1.join(); - } catch (InterruptedException e) { - } - try { - thread2.join(); - } catch (InterruptedException e) { - } - - Assert.assertEquals(totalRuns, count); - } - - public void dequeueFromAnyQueue() { - // simulate 30 queues - final int queues = 30; - final int totalRuns = 100; - final int itemsPerRun = 20; - for(int q = 1; q <= queues; q++) - for(int i = 0; i < totalRuns; i++) - mgr.queue("vm_instance", q, "Async-job", i+1, 1); - - count = 0; - Thread thread1 = new Thread(new Runnable() { - @Override - public void run() { - while(count < totalRuns*queues) { - List l = mgr.dequeueFromAny(1L, itemsPerRun); - if(l != null && l.size() > 0) { - s_logger.info("Thread 1 get " + l.size() + " dequeued items"); - - for(SyncQueueItemVO item : l) { - s_logger.info("Thread 1 process item: " + item.toString()); - count++; - - mgr.purgeItem(item.getId()); - } - } - try { - Thread.sleep(100); - } catch (InterruptedException e) { - } - } - } - } - ); - - Thread thread2 = new Thread(new Runnable() { - @Override - public void run() { - while(count < totalRuns*queues) { - List l = mgr.dequeueFromAny(1L, itemsPerRun); - if(l != null && l.size() > 0) { - s_logger.info("Thread 2 get " + l.size() + " dequeued items"); - - for(SyncQueueItemVO item : l) { - s_logger.info("Thread 2 process item: " + item.toString()); - count++; - mgr.purgeItem(item.getId()); - } - } - - try { - Thread.sleep(100); - } catch (InterruptedException e) { - } - } - } - } - ); - - thread1.start(); - thread2.start(); - try { - thread1.join(); - } catch (InterruptedException e) { - } - try { - thread2.join(); - } catch (InterruptedException e) { - } - Assert.assertEquals(queues*totalRuns, count); - } - - public void testPopulateQueueData() { - final int queues = 30000; - final int totalRuns = 100; - - for(int q = 1; q <= queues; q++) - for(int i = 0; i < totalRuns; i++) - mgr.queue("vm_instance", q, "Async-job", i+1, 1); - } - - public void testSyncQueue() { - - mgr.queue("vm_instance", 1, "Async-job", 1, 1); - mgr.queue("vm_instance", 1, "Async-job", 2, 1); - mgr.queue("vm_instance", 1, "Async-job", 3, 1); - mgr.dequeueFromAny(100L, 1); - - List l = mgr.getBlockedQueueItems(100000, false); - for(SyncQueueItemVO item : l) { - System.out.println("Blocked item. " + item.getContentType() + "-" + item.getContentId()); - mgr.purgeItem(item.getId()); - } - } -} diff --git a/server/test/com/cloud/upgrade/AdvanceZone217To224UpgradeTest.java b/server/test/com/cloud/upgrade/AdvanceZone217To224UpgradeTest.java deleted file mode 100644 index 532a62f3cba..00000000000 --- a/server/test/com/cloud/upgrade/AdvanceZone217To224UpgradeTest.java +++ /dev/null @@ -1,117 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. -package com.cloud.upgrade; - - -import java.sql.Connection; -import java.sql.PreparedStatement; -import java.sql.ResultSet; -import java.sql.SQLException; - -import javax.inject.Inject; - -import junit.framework.TestCase; - -import org.apache.log4j.Logger; -import org.junit.After; -import org.junit.Before; - -import com.cloud.upgrade.dao.VersionDaoImpl; - -import com.cloud.utils.db.DbTestUtils; -import com.cloud.utils.db.Transaction; - -public class AdvanceZone217To224UpgradeTest extends TestCase { - private static final Logger s_logger = Logger.getLogger(AdvanceZone217To224UpgradeTest.class); - @Inject VersionDaoImpl dao; - @Inject DatabaseUpgradeChecker checker; - - @Override - @Before - public void setUp() throws Exception { - DbTestUtils.executeScript("cleanup.sql", false, true); - } - - @Override - @After - public void tearDown() throws Exception { - } - - public void test217to22Upgrade() throws SQLException { - s_logger.debug("Finding sample data from 2.1.7"); - DbTestUtils.executeScript("fake.sql", false, true); - - Connection conn; - PreparedStatement pstmt; - - String version = dao.getCurrentVersion(); - assert version.equals("2.1.7") : "Version returned is not 2.1.7 but " + version; - - checker.upgrade("2.1.7", "2.2.4"); - - conn = Transaction.getStandaloneConnection(); - try { - pstmt = conn.prepareStatement("SELECT version FROM version ORDER BY id DESC LIMIT 1"); - ResultSet rs = pstmt.executeQuery(); - assert rs.next() : "No version selected"; - assert rs.getString(1).equals("2.2.4") : "VERSION stored is not 2.2.4: " + rs.getString(1); - rs.close(); - pstmt.close(); - - pstmt = conn.prepareStatement("SELECT COUNT(*) FROM network_offerings"); - rs = pstmt.executeQuery(); - assert rs.next() : "Unable to get the count of network offerings."; - assert (rs.getInt(1) == 7) : "Didn't find 7 network offerings but found " + rs.getInt(1); - rs.close(); - pstmt.close(); - - pstmt = conn.prepareStatement("SELECT DISTINCT networktype FROM data_center"); - rs = pstmt.executeQuery(); - assert rs.next() && rs.getString(1).equals("Advanced") : "Network type is not advanced? " + rs.getString(1); - assert !rs.next() : "Why do we have another one? " + rs.getString(1); - rs.close(); - pstmt.close(); - - pstmt = conn.prepareStatement("SELECT COUNT(*) FROM disk_offering WHERE removed IS NULL AND system_use=1 AND type='Service' AND recreatable=1"); - rs = pstmt.executeQuery(); - assert (rs.next() && rs.getInt(1) == 3) : "DiskOffering for system VMs are incorrect. Expecting 3 but got " + rs.getInt(1); - rs.close(); - pstmt.close(); - - pstmt = conn.prepareStatement("SELECT COUNT(*) FROM op_dc_link_local_ip_address_alloc WHERE nic_id IS NOT NULL"); - rs = pstmt.executeQuery(); - rs.next(); - int controlNics = rs.getInt(1); - rs.close(); - pstmt.close(); - - pstmt = conn.prepareStatement("SELECT COUNT(*) FROM nics WHERE reserver_name='ControlNetworkGuru' and ip4_address is NOT NULL"); - rs = pstmt.executeQuery(); - assert (rs.next() && controlNics == rs.getInt(1)) : "Allocated nics should have been " + controlNics + " but it is " + rs.getInt(1); - rs.close(); - pstmt.close(); - - - } finally { - try { - conn.close(); - } catch (SQLException e) { - } - } - } - -} diff --git a/server/test/com/cloud/upgrade/AdvanceZone223To224UpgradeTest.java b/server/test/com/cloud/upgrade/AdvanceZone223To224UpgradeTest.java deleted file mode 100644 index 519ae704c91..00000000000 --- a/server/test/com/cloud/upgrade/AdvanceZone223To224UpgradeTest.java +++ /dev/null @@ -1,57 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. -package com.cloud.upgrade; - -import java.sql.SQLException; - -import javax.inject.Inject; - -import junit.framework.TestCase; - -import org.apache.log4j.Logger; -import org.junit.After; -import org.junit.Before; - -import com.cloud.upgrade.dao.VersionDaoImpl; - - -public class AdvanceZone223To224UpgradeTest extends TestCase { - private static final Logger s_logger = Logger.getLogger(AdvanceZone223To224UpgradeTest.class); - @Inject VersionDaoImpl dao; - @Inject DatabaseUpgradeChecker checker; - - @Override - @Before - public void setUp() throws Exception { -// DbTestUtils.executeScript("PreviousDatabaseSchema/clean-db.sql", false, true); - } - - @Override - @After - public void tearDown() throws Exception { - } - - public void test223to224Upgrade() throws SQLException { - - - String version = dao.getCurrentVersion(); - assert version.equals("2.2.3") : "Version returned is not 2.2.3 but " + version; - - checker.upgrade("2.2.3", "2.2.4"); - } - -} diff --git a/server/test/com/cloud/upgrade/BasicZone218To224UpgradeTest.java b/server/test/com/cloud/upgrade/BasicZone218To224UpgradeTest.java deleted file mode 100644 index 8bd9f0625ef..00000000000 --- a/server/test/com/cloud/upgrade/BasicZone218To224UpgradeTest.java +++ /dev/null @@ -1,211 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. -package com.cloud.upgrade; - - -import java.sql.Connection; -import java.sql.PreparedStatement; -import java.sql.ResultSet; -import java.sql.SQLException; - -import javax.inject.Inject; - -import junit.framework.TestCase; - -import org.apache.log4j.Logger; -import org.junit.After; -import org.junit.Before; - -import com.cloud.upgrade.dao.VersionDaoImpl; - -import com.cloud.utils.db.DbTestUtils; -import com.cloud.utils.db.Transaction; - -public class BasicZone218To224UpgradeTest extends TestCase { - private static final Logger s_logger = Logger.getLogger(BasicZone218To224UpgradeTest.class); - - @Inject VersionDaoImpl dao; - @Inject DatabaseUpgradeChecker checker; - - @Override - @Before - public void setUp() throws Exception { - DbTestUtils.executeScript("cleanup.sql", false, true); - } - - @Override - @After - public void tearDown() throws Exception { - } - - public void test217to22Upgrade() throws SQLException { - s_logger.debug("Finding sample data from 2.1.8"); - DbTestUtils.executeScript("fake.sql", false, true); - - Connection conn = Transaction.getStandaloneConnection(); - PreparedStatement pstmt; - - String version = dao.getCurrentVersion(); - - if (!version.equals("2.1.8")) { - s_logger.error("Version returned is not 2.1.8 but " + version); - } else { - s_logger.debug("Basic zone test version is " + version); - } - - checker.upgrade("2.1.8", "2.2.4"); - - conn = Transaction.getStandaloneConnection(); - try { - - s_logger.debug("Starting tesing upgrade from 2.1.8 to 2.2.4 for Basic zone..."); - - //Version check - pstmt = conn.prepareStatement(" SELECT version FROM version ORDER BY id DESC LIMIT 1"); - ResultSet rs = pstmt.executeQuery(); - - if (!rs.next()) { - s_logger.error("ERROR: No version selected"); - } else if (!rs.getString(1).equals("2.2.4")) { - s_logger.error("ERROR: VERSION stored is not 2.2.4: " + rs.getString(1)); - } - rs.close(); - pstmt.close(); - - //Check that default network offerings are present - pstmt = conn.prepareStatement("SELECT COUNT(*) FROM network_offerings"); - rs = pstmt.executeQuery(); - - if (!rs.next()) { - s_logger.error("ERROR: Unable to get the count of network offerings."); - } else if (rs.getInt(1) != 7) { - s_logger.error("ERROR: Didn't find 7 network offerings but found " + rs.getInt(1)); - } else { - s_logger.debug("Network offerings test passed"); - } - - rs.close(); - pstmt.close(); - - - //Zone network type check - pstmt = conn.prepareStatement("SELECT DISTINCT networktype FROM data_center"); - rs = pstmt.executeQuery(); - - if (!rs.next()) { - s_logger.error("No zone exists after upgrade"); - } else if (!rs.getString(1).equals("Basic")) { - s_logger.error("ERROR: Zone type is not Basic"); - } else if (rs.next()) { - s_logger.error("ERROR: Why do we have more than 1 zone with different types??"); - System.exit(2); - } else { - s_logger.debug("Test passed. Zone was updated properly with type Basic"); - } - rs.close(); - pstmt.close(); - - //Check that vnet/cidr were set to NULL for basic zone - pstmt = conn.prepareStatement("SELECT vnet, guest_network_cidr FROM data_center"); - rs = pstmt.executeQuery(); - - if (!rs.next()) { - s_logger.error("ERROR: vnet field is missing for the zone"); - } else if (rs.getString(1) != null || rs.getString(2) != null) { - s_logger.error("ERROR: vnet/guestCidr should be NULL for basic zone; instead it's " + rs.getString(1)); - } else { - s_logger.debug("Test passed. Vnet and cidr are set to NULL for the basic zone"); - } - - rs.close(); - pstmt.close(); - - //Verify that default Direct guest network got created, and it's Shared and Default - pstmt = conn.prepareStatement("SELECT traffic_type, guest_type, shared, is_default, id FROM networks WHERE name LIKE '%BasicZoneDirectNetwork%'"); - rs = pstmt.executeQuery(); - - if (!rs.next()) { - s_logger.error("Direct network is missing for the Basic zone"); - } else if (!rs.getString(1).equalsIgnoreCase("Guest") || !rs.getString(2).equalsIgnoreCase("Direct") || !rs.getBoolean(3) || !rs.getBoolean(4)) { - s_logger.error("Direct network for basic zone has incorrect setting"); - } else { - s_logger.debug("Test passed. Default Direct Basic zone network parameters were set correctly"); - } - - long defaultDirectNetworkId = rs.getInt(5); - rs.close(); - pstmt.close(); - - //Verify that all vlans in the zone belong to default Direct network - pstmt = conn.prepareStatement("SELECT network_id FROM vlan"); - rs = pstmt.executeQuery(); - - while (rs.next()) { - if (rs.getInt(1) != defaultDirectNetworkId) { - s_logger.error("ERROR: network_id is set incorrectly for public untagged vlans in Basic zone"); - System.exit(2); - } - } - - s_logger.debug("Test passed for vlan table in Basic zone"); - - rs.close(); - pstmt.close(); - - //Verify user_ip_address table - pstmt = conn.prepareStatement("SELECT source_network_id FROM user_ip_address"); - rs = pstmt.executeQuery(); - - while (rs.next()) { - if (rs.getInt(1) != defaultDirectNetworkId) { - s_logger.error("ERROR: network_id is set incorrectly for public Ip addresses (user_ip_address table) in Basic zone"); - System.exit(2); - } - } - - s_logger.debug("Test passed for user_ip_address table in Basic zone"); - - rs.close(); - pstmt.close(); - - //Verify domain_router table - pstmt = conn.prepareStatement("SELECT network_id FROM domain_router"); - rs = pstmt.executeQuery(); - - while (rs.next()) { - if (rs.getInt(1) != defaultDirectNetworkId) { - s_logger.error("ERROR: network_id is set incorrectly for domain routers (domain_router table) in Basic zone"); - System.exit(2); - } - } - - s_logger.debug("Test passed for domain_router table in Basic zone"); - - rs.close(); - pstmt.close(); - - s_logger.debug("Basic zone test is finished"); - - } finally { - try { - conn.close(); - } catch (SQLException e) { - } - } - } - -} diff --git a/server/test/com/cloud/upgrade/DbUpgrade22Test.java b/server/test/com/cloud/upgrade/DbUpgrade22Test.java deleted file mode 100644 index e0db82db0bc..00000000000 --- a/server/test/com/cloud/upgrade/DbUpgrade22Test.java +++ /dev/null @@ -1,29 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. -package com.cloud.upgrade; - -import org.junit.Ignore; -import org.junit.runner.RunWith; -import org.junit.runners.Suite; - -@RunWith(Suite.class) -@Suite.SuiteClasses({ AdvanceZone217To224UpgradeTest.class, AdvanceZone223To224UpgradeTest.class, PortForwarding218To224UpgradeTest.class, InstanceGroup218To224UpgradeTest.class, - BasicZone218To224UpgradeTest.class, UsageEvents218To224UpgradeTest.class }) -@Ignore("maven-sure-fire discovered") -public class DbUpgrade22Test { - -} diff --git a/server/test/com/cloud/upgrade/HostCapacity218to22Test.java b/server/test/com/cloud/upgrade/HostCapacity218to22Test.java deleted file mode 100644 index 76ad12eeb19..00000000000 --- a/server/test/com/cloud/upgrade/HostCapacity218to22Test.java +++ /dev/null @@ -1,66 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. -package com.cloud.upgrade; - -import java.sql.SQLException; - -import javax.inject.Inject; - -import junit.framework.TestCase; - -import org.apache.log4j.Logger; -import org.junit.After; -import org.junit.Before; - -import com.cloud.upgrade.dao.VersionDaoImpl; - -import com.cloud.utils.db.DbTestUtils; - -public class HostCapacity218to22Test extends TestCase { - private static final Logger s_logger = Logger.getLogger(HostCapacity218to22Test.class); - - @Inject VersionDaoImpl dao; - @Inject DatabaseUpgradeChecker checker; - - @Override - @Before - public void setUp() throws Exception { - DbTestUtils.executeScript("cleanup.sql", false, true); - } - - @Override - @After - public void tearDown() throws Exception { - } - - public void test218to22Upgrade() throws SQLException { - s_logger.debug("Finding sample data from 2.1.8"); - DbTestUtils.executeScript("fake.sql", false, true); - - String version = dao.getCurrentVersion(); - - if (!version.equals("2.1.8")) { - s_logger.error("Version returned is not 2.1.8 but " + version); - } else { - s_logger.debug("Instance group test version is " + version); - } - - checker.upgrade("2.1.8", "2.2.4"); - - // manually check into database for now to verify - } -} diff --git a/server/test/com/cloud/upgrade/InstanceGroup218To224UpgradeTest.java b/server/test/com/cloud/upgrade/InstanceGroup218To224UpgradeTest.java deleted file mode 100644 index 41f334dab6a..00000000000 --- a/server/test/com/cloud/upgrade/InstanceGroup218To224UpgradeTest.java +++ /dev/null @@ -1,216 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. -package com.cloud.upgrade; - - -import java.sql.Connection; -import java.sql.PreparedStatement; -import java.sql.ResultSet; -import java.sql.SQLException; -import java.util.ArrayList; - -import javax.inject.Inject; - -import junit.framework.TestCase; - -import org.apache.log4j.Logger; -import org.junit.After; -import org.junit.Before; - -import com.cloud.upgrade.dao.VersionDaoImpl; - -import com.cloud.utils.db.DbTestUtils; -import com.cloud.utils.db.Transaction; - -public class InstanceGroup218To224UpgradeTest extends TestCase { - private static final Logger s_logger = Logger.getLogger(InstanceGroup218To224UpgradeTest.class); - - @Inject VersionDaoImpl dao; - @Inject DatabaseUpgradeChecker checker; - - @Override - @Before - public void setUp() throws Exception { - DbTestUtils.executeScript("cleanup.sql", false, true); - } - - @Override - @After - public void tearDown() throws Exception { - } - - public void test217to22Upgrade() throws SQLException { - s_logger.debug("Finding sample data from 2.1.8"); - DbTestUtils.executeScript("fake.sql", false, true); - - PreparedStatement pstmt; - ResultSet rs; - - String version = dao.getCurrentVersion(); - - if (!version.equals("2.1.8")) { - s_logger.error("Version returned is not 2.1.8 but " + version); - } else { - s_logger.debug("Instance group test version is " + version); - } - - Long groupNumberVmInstance = 0L; - ArrayList groups = new ArrayList(); - Connection conn = Transaction.getStandaloneConnection(); - ArrayList groupVmMaps = new ArrayList(); - - try { - //Check that correct number of instance groups were created - pstmt = conn.prepareStatement("SELECT DISTINCT v.group, u.account_id from vm_instance v, user_vm u where v.group is not null and v.id=u.id"); - s_logger.debug("Query is" + pstmt); - rs = pstmt.executeQuery(); - - while (rs.next()) { - groupNumberVmInstance++; - } - - rs.close(); - pstmt.close(); - //For each instance group from vm_instance table check that 1) entry was created in the instance_group table 2) vm to group map exists in instance_group_vm_map table - //Check 1) - pstmt = conn.prepareStatement("SELECT DISTINCT v.group, u.account_id from vm_instance v, user_vm u where v.group is not null and v.id=u.id"); - s_logger.debug("Query is" + pstmt); - rs = pstmt.executeQuery(); - while (rs.next()) { - Object[] group = new Object[10]; - group[0] = rs.getString(1); // group name - group[1] = rs.getLong(2); // accountId - groups.add(group); - } - rs.close(); - pstmt.close(); - - - } finally { - conn.close(); - } - - checker.upgrade("2.1.8", "2.2.4"); - - conn = Transaction.getStandaloneConnection(); - try { - - s_logger.debug("Starting tesing upgrade from 2.1.8 to 2.2.4 for Instance groups..."); - - //Version check - pstmt = conn.prepareStatement("SELECT version FROM version"); - rs = pstmt.executeQuery(); - - if (!rs.next()) { - s_logger.error("ERROR: No version selected"); - } else if (!rs.getString(1).equals("2.2.4")) { - s_logger.error("ERROR: VERSION stored is not 2.2.4: " + rs.getString(1)); - } - rs.close(); - pstmt.close(); - - pstmt = conn.prepareStatement("SELECT COUNT(*) FROM instance_group"); - rs = pstmt.executeQuery(); - - Long groupNumber = 0L; - if (rs.next()) { - groupNumber = rs.getLong(1); - } - - rs.close(); - pstmt.close(); - - if (groupNumber != groupNumberVmInstance) { - s_logger.error("ERROR: instance groups were updated incorrectly. Have " + groupNumberVmInstance + " groups in vm_instance table, and " + groupNumber + " where created in instance_group table. Stopping the test"); - System.exit(2); - } - - for (Object[] group : groups) { - String groupName = (String)group[0]; - Long accountId = (Long)group[1]; - if (!checkInstanceGroup(conn, groupName, accountId)) { - s_logger.error("ERROR: Unable to find group with name " + groupName + " for account id=" + accountId + ", stopping the test"); - System.exit(2); - } - } - - rs.close(); - pstmt.close(); - - //Check 2) -// pstmt = conn.prepareStatement("SELECT v.id from vm_instance v, instance_group g WHERE g.account_id=v.account_id and v.group=?"); -// s_logger.debug("Query is" + pstmt); -// rs = pstmt.executeQuery(); -// -// while (rs.next()) { -// Object[] groupMaps = new Object[10]; -// groupMaps[0] = rs.getLong(1); // vmId -// groupMaps[1] = rs.getLong(2); // groupId -// groupVmMaps.add(groupMaps); -// } -// rs.close(); -// pstmt.close(); -// -// for (Object[] groupMap : groupVmMaps) { -// Long groupId = (Long)groupMap[0]; -// Long instanceId = (Long)groupMap[1]; -// if (!checkInstanceGroupVmMap(conn, groupId, instanceId)) { -// s_logger.error("ERROR: unable to find instanceGroupVMMap for vm id=" + instanceId + " and group id=" + groupId + ", stopping the test"); -// System.exit(2); -// } -// } -// -// rs.close(); -// pstmt.close(); - - s_logger.debug("Instance group upgrade test is passed"); - - } finally { - conn.close(); - } - } - - protected boolean checkInstanceGroup(Connection conn, String groupName, long accountId) throws SQLException{ - - PreparedStatement pstmt = conn.prepareStatement("SELECT * FROM instance_group WHERE name = ? and account_id = ?"); - pstmt.setString(1, groupName); - pstmt.setLong(2, accountId); - ResultSet rs = pstmt.executeQuery(); - - if (!rs.next()) { - return false; - } else { - return true; - } - } - - protected boolean checkInstanceGroupVmMap(Connection conn, long groupId, long vmId) throws SQLException{ - - PreparedStatement pstmt = conn.prepareStatement("SELECT * FROM instance_group_vm_map WHERE group_id = ? and instance_id = ?"); - pstmt.setLong(1, groupId); - pstmt.setLong(2, vmId); - ResultSet rs = pstmt.executeQuery(); - - if (!rs.next()) { - return false; - } else { - return true; - } - } - -} - diff --git a/server/test/com/cloud/upgrade/PortForwarding218To224UpgradeTest.java b/server/test/com/cloud/upgrade/PortForwarding218To224UpgradeTest.java deleted file mode 100644 index a9cb51fe00c..00000000000 --- a/server/test/com/cloud/upgrade/PortForwarding218To224UpgradeTest.java +++ /dev/null @@ -1,132 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. -package com.cloud.upgrade; - - -import java.sql.Connection; -import java.sql.PreparedStatement; -import java.sql.ResultSet; -import java.sql.SQLException; - -import javax.inject.Inject; - -import junit.framework.TestCase; - -import org.apache.log4j.Logger; -import org.junit.After; -import org.junit.Before; - -import com.cloud.upgrade.dao.VersionDaoImpl; - -import com.cloud.utils.db.DbTestUtils; -import com.cloud.utils.db.Transaction; - -public class PortForwarding218To224UpgradeTest extends TestCase { - private static final Logger s_logger = Logger.getLogger(PortForwarding218To224UpgradeTest.class); - - @Inject VersionDaoImpl dao; - @Inject DatabaseUpgradeChecker checker; - - @Override - @Before - public void setUp() throws Exception { - DbTestUtils.executeScript("cleanup.sql", false, true); - } - - @Override - @After - public void tearDown() throws Exception { - } - - public void test217to22Upgrade() throws SQLException { - s_logger.debug("Finding sample data from 2.1.8"); - DbTestUtils.executeScript("fake.sql", false, true); - - Connection conn; - PreparedStatement pstmt; - ResultSet rs; - - String version = dao.getCurrentVersion(); - - if (!version.equals("2.1.8")) { - s_logger.error("Version returned is not 2.1.8 but " + version); - } else { - s_logger.debug("Port forwarding test version is " + version); - } - - - Long count21 = 0L; - conn = Transaction.getStandaloneConnection(); - try { - //Check that correct number of port forwarding rules were created - pstmt = conn.prepareStatement("SELECT COUNT(*) FROM ip_forwarding WHERE forwarding=1"); - rs = pstmt.executeQuery(); - - while (rs.next()) { - count21 = rs.getLong(1); - } - - rs.close(); - pstmt.close(); - } finally { - conn.close(); - } - - checker.upgrade("2.1.8", "2.2.4"); - - conn = Transaction.getStandaloneConnection(); - try { - s_logger.debug("Starting tesing upgrade from 2.1.8 to 2.2.4 for Port forwarding rules..."); - - //Version check - pstmt = conn.prepareStatement("SELECT version FROM version"); - rs = pstmt.executeQuery(); - - if (!rs.next()) { - s_logger.error("ERROR: No version selected"); - } else if (!rs.getString(1).equals("2.2.4")) { - s_logger.error("ERROR: VERSION stored is not 2.2.4: " + rs.getString(1)); - } - rs.close(); - pstmt.close(); - - - Long count22 = 0L; - pstmt = conn.prepareStatement("SELECT COUNT(*) FROM port_forwarding_rules"); - rs = pstmt.executeQuery(); - - if (rs.next()) { - count22 = rs.getLong(1); - } - - rs.close(); - pstmt.close(); - - if (count21.longValue() != count22.longValue()) { - s_logger.error("ERROR: port forwarding rules were updated incorrectly. Have " + count21 + " rulrs in ip_forwarding table branch 21, and " + count22 + " in port_forwarding_rules table branch 22. Stopping the test"); - System.exit(2); - } - - s_logger.debug("Port forwarding rules test is passed"); - - } finally { - conn.close(); - } - } - -} - diff --git a/server/test/com/cloud/upgrade/Sanity220To224UpgradeTest.java b/server/test/com/cloud/upgrade/Sanity220To224UpgradeTest.java deleted file mode 100644 index d33192fbf9c..00000000000 --- a/server/test/com/cloud/upgrade/Sanity220To224UpgradeTest.java +++ /dev/null @@ -1,95 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. -package com.cloud.upgrade; - -import java.sql.Connection; -import java.sql.PreparedStatement; -import java.sql.ResultSet; -import java.sql.SQLException; - -import javax.inject.Inject; - -import junit.framework.TestCase; - -import org.apache.log4j.Logger; -import org.junit.After; -import org.junit.Before; - -import com.cloud.upgrade.dao.VersionDaoImpl; - -import com.cloud.utils.db.DbTestUtils; -import com.cloud.utils.db.Transaction; - -public class Sanity220To224UpgradeTest extends TestCase { - private static final Logger s_logger = Logger.getLogger(Sanity220To224UpgradeTest.class); - - @Inject VersionDaoImpl dao; - @Inject DatabaseUpgradeChecker checker; - - @Override - @Before - public void setUp() throws Exception { - DbTestUtils.executeScript("cleanup.sql", false, true); - } - - @Override - @After - public void tearDown() throws Exception { - } - - public void test217to22Upgrade() throws SQLException { - s_logger.debug("Finding sample data from 2.2.1"); - DbTestUtils.executeScript("fake.sql", false, true); - - Connection conn; - PreparedStatement pstmt; - ResultSet rs; - - String version = dao.getCurrentVersion(); - - if (!version.equals("2.2.1")) { - s_logger.error("Version returned is not 2.2.1 but " + version); - } else { - s_logger.debug("Sanity 2.2.1 to 2.2.4 test version is " + version); - } - - checker.upgrade("2.2.1", "2.2.4"); - - conn = Transaction.getStandaloneConnection(); - try { - s_logger.debug("Starting tesing upgrade from 2.2.1 to 2.2.4..."); - - // Version check - pstmt = conn.prepareStatement("SELECT version FROM version"); - rs = pstmt.executeQuery(); - - if (!rs.next()) { - s_logger.error("ERROR: No version selected"); - } else if (!rs.getString(1).equals("2.2.4")) { - s_logger.error("ERROR: VERSION stored is not 2.2.4: " + rs.getString(1)); - } - rs.close(); - pstmt.close(); - - s_logger.debug("Sanity 2.2.1 to 2.2.4 DB upgrade test passed"); - - } finally { - conn.close(); - } - } - -} diff --git a/server/test/com/cloud/upgrade/Sanity222To224UpgradeTest.java b/server/test/com/cloud/upgrade/Sanity222To224UpgradeTest.java deleted file mode 100644 index 108eca919a6..00000000000 --- a/server/test/com/cloud/upgrade/Sanity222To224UpgradeTest.java +++ /dev/null @@ -1,96 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. -package com.cloud.upgrade; - -import java.sql.Connection; -import java.sql.PreparedStatement; -import java.sql.ResultSet; -import java.sql.SQLException; - -import javax.inject.Inject; - -import junit.framework.TestCase; - -import org.apache.log4j.Logger; -import org.junit.After; -import org.junit.Before; - -import com.cloud.upgrade.dao.VersionDaoImpl; - -import com.cloud.utils.db.DbTestUtils; -import com.cloud.utils.db.Transaction; - -public class Sanity222To224UpgradeTest extends TestCase { - private static final Logger s_logger = Logger.getLogger(Sanity222To224UpgradeTest.class); - - @Inject VersionDaoImpl dao; - @Inject DatabaseUpgradeChecker checker; - - @Override - @Before - public void setUp() throws Exception { - DbTestUtils.executeScript("cleanup.sql", false, true); - } - - @Override - @After - public void tearDown() throws Exception { - } - - public void test222to224Upgrade() throws SQLException { - s_logger.debug("Finding sample data from 2.2.2"); - DbTestUtils.executeScript("fake.sql", false, true); - - Connection conn; - PreparedStatement pstmt; - ResultSet rs; - - - String version = dao.getCurrentVersion(); - - if (!version.equals("2.2.2")) { - s_logger.error("Version returned is not 2.2.2 but " + version); - } else { - s_logger.debug("Sanity 2.2.2 to 2.2.4 test version is " + version); - } - - checker.upgrade("2.2.2", "2.2.4"); - - conn = Transaction.getStandaloneConnection(); - try { - s_logger.debug("Starting tesing upgrade from 2.2.2 to 2.2.4..."); - - // Version check - pstmt = conn.prepareStatement("SELECT version FROM version"); - rs = pstmt.executeQuery(); - - if (!rs.next()) { - s_logger.error("ERROR: No version selected"); - } else if (!rs.getString(1).equals("2.2.4")) { - s_logger.error("ERROR: VERSION stored is not 2.2.4: " + rs.getString(1)); - } - rs.close(); - pstmt.close(); - - s_logger.debug("Sanity 2.2.2 to 2.2.4 DB upgrade test passed"); - - } finally { - conn.close(); - } - } - -} diff --git a/server/test/com/cloud/upgrade/Sanity223To225UpgradeTest.java b/server/test/com/cloud/upgrade/Sanity223To225UpgradeTest.java deleted file mode 100644 index fd0b219af7e..00000000000 --- a/server/test/com/cloud/upgrade/Sanity223To225UpgradeTest.java +++ /dev/null @@ -1,94 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. -package com.cloud.upgrade; - -import java.sql.Connection; -import java.sql.PreparedStatement; -import java.sql.ResultSet; -import java.sql.SQLException; - -import javax.inject.Inject; - -import junit.framework.TestCase; - -import org.apache.log4j.Logger; -import org.junit.After; -import org.junit.Before; - -import com.cloud.upgrade.dao.VersionDaoImpl; - -import com.cloud.utils.db.Transaction; - -public class Sanity223To225UpgradeTest extends TestCase { - private static final Logger s_logger = Logger.getLogger(Sanity223To225UpgradeTest.class); - - @Inject VersionDaoImpl dao; - @Inject DatabaseUpgradeChecker checker; - - @Override - @Before - public void setUp() throws Exception { - //DbTestUtils.executeScript("PreviousDatabaseSchema/clean-db.sql", false, true); - } - - @Override - @After - public void tearDown() throws Exception { - } - - public void test224to225Upgrade() throws SQLException { - s_logger.debug("Finding sample data from 2.2.3"); - //DbTestUtils.executeScript("/home/alena/Downloads/mac/160511preprod.bak", false, true); - - Connection conn; - PreparedStatement pstmt; - ResultSet rs; - - String version = dao.getCurrentVersion(); - - if (!version.equals("2.2.3")) { - s_logger.error("Version returned is not 2.2.3 but " + version); - } else { - s_logger.debug("Sanity 2.2.3 to 2.2.5 test version is " + version); - } - - checker.upgrade("2.2.3", "2.2.5"); - - conn = Transaction.getStandaloneConnection(); - try { - s_logger.debug("Starting tesing upgrade from 2.2.3 to 2.2.5..."); - - // Version check - pstmt = conn.prepareStatement("SELECT version FROM version"); - rs = pstmt.executeQuery(); - - if (!rs.next()) { - s_logger.error("ERROR: No version selected"); - } else if (!rs.getString(1).equals("2.2.5")) { - s_logger.error("ERROR: VERSION stored is not 2.2.5: " + rs.getString(1)); - } - rs.close(); - pstmt.close(); - - s_logger.debug("Sanity 2.2.3 to 2.2.5 DB upgrade test passed"); - - } finally { - conn.close(); - } - } - -} diff --git a/server/test/com/cloud/upgrade/Sanity224To225UpgradeTest.java b/server/test/com/cloud/upgrade/Sanity224To225UpgradeTest.java deleted file mode 100644 index 775a62ee501..00000000000 --- a/server/test/com/cloud/upgrade/Sanity224To225UpgradeTest.java +++ /dev/null @@ -1,95 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. -package com.cloud.upgrade; - -import java.sql.Connection; -import java.sql.PreparedStatement; -import java.sql.ResultSet; -import java.sql.SQLException; - -import javax.inject.Inject; - -import junit.framework.TestCase; - -import org.apache.log4j.Logger; -import org.junit.After; -import org.junit.Before; - -import com.cloud.upgrade.dao.VersionDaoImpl; - -import com.cloud.utils.db.DbTestUtils; -import com.cloud.utils.db.Transaction; - -public class Sanity224To225UpgradeTest extends TestCase { - private static final Logger s_logger = Logger.getLogger(Sanity224To225UpgradeTest.class); - - @Inject VersionDaoImpl dao; - @Inject DatabaseUpgradeChecker checker; - - @Override - @Before - public void setUp() throws Exception { - DbTestUtils.executeScript("cleanup.sql", false, true); - } - - @Override - @After - public void tearDown() throws Exception { - } - - public void test224to225Upgrade() throws SQLException { - s_logger.debug("Finding sample data from 2.2.4"); - DbTestUtils.executeScript("fake.sql", false, true); - - Connection conn; - PreparedStatement pstmt; - ResultSet rs; - - String version = dao.getCurrentVersion(); - - if (!version.equals("2.2.4")) { - s_logger.error("Version returned is not 2.2.4 but " + version); - } else { - s_logger.debug("Sanity 2.2.4 to 2.2.5 test version is " + version); - } - - checker.upgrade("2.2.4", "2.2.5"); - - conn = Transaction.getStandaloneConnection(); - try { - s_logger.debug("Starting tesing upgrade from 2.2.4 to 2.2.5..."); - - // Version check - pstmt = conn.prepareStatement("SELECT version FROM version"); - rs = pstmt.executeQuery(); - - if (!rs.next()) { - s_logger.error("ERROR: No version selected"); - } else if (!rs.getString(1).equals("2.2.5")) { - s_logger.error("ERROR: VERSION stored is not 2.2.5: " + rs.getString(1)); - } - rs.close(); - pstmt.close(); - - s_logger.debug("Sanity 2.2.4 to 2.2.5 DB upgrade test passed"); - - } finally { - conn.close(); - } - } - -} diff --git a/server/test/com/cloud/upgrade/Template2214To30UpgradeTest.java b/server/test/com/cloud/upgrade/Template2214To30UpgradeTest.java deleted file mode 100644 index 06835b56774..00000000000 --- a/server/test/com/cloud/upgrade/Template2214To30UpgradeTest.java +++ /dev/null @@ -1,123 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. -package com.cloud.upgrade; - -import java.sql.Connection; -import java.sql.PreparedStatement; -import java.sql.ResultSet; -import java.sql.SQLException; -import java.util.ArrayList; -import java.util.List; - -import javax.inject.Inject; - -import junit.framework.TestCase; - -import org.apache.log4j.Logger; -import org.junit.After; -import org.junit.Before; - - -import com.cloud.utils.db.DbTestUtils; -import com.cloud.utils.db.Transaction; -import com.cloud.utils.exception.CloudRuntimeException; - -public class Template2214To30UpgradeTest extends TestCase { - private static final Logger s_logger = Logger - .getLogger(Template2214To30UpgradeTest.class); - @Inject DatabaseUpgradeChecker checker; - - @Override - @Before - public void setUp() throws Exception { - DbTestUtils.executeScript("cleanup.sql", false, - true); - } - - @Override - @After - public void tearDown() throws Exception { - } - - public void test2214to30Upgrade() throws SQLException { - s_logger.debug("Finding sample data from 2.2.14"); - DbTestUtils.executeScript( - "fake.sql", false, - true); - - - checker.upgrade("2.2.14", "3.0.0"); - - Connection conn = Transaction.getStandaloneConnection(); - - try { - checkSystemVm(conn); - } finally { - try { - conn.close(); - } catch (SQLException e) { - } - } - } - - protected void checkSystemVm(Connection conn) throws SQLException { - PreparedStatement pstmt; - - pstmt = conn - .prepareStatement("SELECT version FROM `cloud`.`version` ORDER BY id DESC LIMIT 1"); - ResultSet rs = pstmt.executeQuery(); - assert rs.next() : "No version selected"; - assert rs.getString(1).equals("3.0.0") : "VERSION stored is not 3.0.0: " - + rs.getString(1); - rs.close(); - pstmt.close(); - - pstmt = conn.prepareStatement("select id from vm_template where name='systemvm-xenserver-3.0.0' and removed is null"); - rs = pstmt.executeQuery(); - long templateId1 = rs.getLong(1); - rs.close(); - pstmt.close(); - - pstmt = conn.prepareStatement("select distinct(vm_template_id) from vm_instance where type <> 'USER' and hypervisor_type = 'XenServer'"); - rs = pstmt.executeQuery(); - long templateId = rs.getLong(1); - rs.close(); - pstmt.close(); - - assert (templateId == templateId1) : "XenServer System Vms not using 3.0.0 template"; - rs.close(); - pstmt.close(); - - pstmt = conn.prepareStatement("select id from vm_template where name='systemvm-kvm-3.0.0' and removed is null"); - rs = pstmt.executeQuery(); - long templateId3 = rs.getLong(1); - rs.close(); - pstmt.close(); - - pstmt = conn.prepareStatement("select distinct(vm_template_id) from vm_instance where type <> 'USER' and hypervisor_type = 'KVM'"); - rs = pstmt.executeQuery(); - long templateId4 = rs.getLong(1); - rs.close(); - pstmt.close(); - - assert (templateId3 == templateId4) : "KVM System Vms not using 3.0.0 template"; - rs.close(); - pstmt.close(); - - } - -} diff --git a/server/test/com/cloud/upgrade/Test2214To30DBUpgrade.java b/server/test/com/cloud/upgrade/Test2214To30DBUpgrade.java deleted file mode 100644 index ff448033764..00000000000 --- a/server/test/com/cloud/upgrade/Test2214To30DBUpgrade.java +++ /dev/null @@ -1,199 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. -package com.cloud.upgrade; - -import java.sql.Connection; -import java.sql.PreparedStatement; -import java.sql.ResultSet; -import java.sql.SQLException; -import java.util.ArrayList; -import java.util.List; - -import javax.inject.Inject; - -import junit.framework.TestCase; - -import org.apache.log4j.Logger; -import org.junit.After; -import org.junit.Before; - - -import com.cloud.utils.db.DbTestUtils; -import com.cloud.utils.db.Transaction; -import com.cloud.utils.exception.CloudRuntimeException; - - -public class Test2214To30DBUpgrade extends TestCase { - private static final Logger s_logger = Logger - .getLogger(Test2214To30DBUpgrade.class); - - @Inject DatabaseUpgradeChecker checker; - - @Override - @Before - public void setUp() throws Exception { - DbTestUtils.executeScript("cleanup.sql", false, - true); - } - - @Override - @After - public void tearDown() throws Exception { - } - - public void test2214to30Upgrade() throws SQLException { - s_logger.debug("Finding sample data from 2.2.14"); - DbTestUtils.executeScript( - "fake.sql", false, - true); - - checker.upgrade("2.2.14", "3.0.0"); - - Connection conn = Transaction.getStandaloneConnection(); - - try { - checkPhysicalNetworks(conn); - checkNetworkOfferings(conn); - checkNetworks(conn); - } finally { - try { - conn.close(); - } catch (SQLException e) { - } - } - } - - protected void checkPhysicalNetworks(Connection conn) throws SQLException { - PreparedStatement pstmt; - - pstmt = conn - .prepareStatement("SELECT version FROM `cloud`.`version` ORDER BY id DESC LIMIT 1"); - ResultSet rs = pstmt.executeQuery(); - assert rs.next() : "No version selected"; - assert rs.getString(1).equals("3.0.0") : "VERSION stored is not 3.0.0: " - + rs.getString(1); - rs.close(); - pstmt.close(); - - pstmt = conn.prepareStatement("SELECT COUNT(*) FROM `cloud`.`physical_network`"); - rs = pstmt.executeQuery(); - assert rs.next() : "No physical networks setup."; - rs.close(); - pstmt.close(); - - } - - protected void checkNetworkOfferings(Connection conn) throws SQLException { - // 1) verify that all fields are present - List fields = new ArrayList(); - fields.add("id"); - fields.add("name"); - fields.add("unique_name"); - fields.add("display_text"); - fields.add("nw_rate"); - fields.add("mc_rate"); - fields.add("traffic_type"); - fields.add("specify_vlan"); - fields.add("system_only"); - fields.add("service_offering_id"); - fields.add("tags"); - fields.add("default"); - fields.add("availability"); - fields.add("state"); - fields.add("removed"); - fields.add("created"); - fields.add("guest_type"); - fields.add("dedicated_lb_service"); - fields.add("shared_source_nat_service"); - fields.add("specify_ip_ranges"); - fields.add("sort_key"); - fields.add("uuid"); - fields.add("redundant_router_service"); - fields.add("conserve_mode"); - fields.add("elastic_ip_service"); - fields.add("elastic_lb_service"); - - PreparedStatement pstmt; - for (String field : fields) { - pstmt = conn - .prepareStatement("SHOW COLUMNS FROM `cloud`.`network_offerings` LIKE ?"); - pstmt.setString(1, field); - ResultSet rs = pstmt.executeQuery(); - if (!rs.next()) { - throw new CloudRuntimeException("Field " + field - + " is missing in upgraded network_offerings table"); - } - rs.close(); - pstmt.close(); - - } - - // 2) compare default network offerings - } - - protected void checkNetworks(Connection conn) throws SQLException { - - // 1) verify that all fields are present - List fields = new ArrayList(); - fields.add("id"); - fields.add("name"); - - fields.add("mode"); - fields.add("broadcast_domain_type"); - fields.add("traffic_type"); - fields.add("display_text"); - fields.add("broadcast_uri"); - fields.add("gateway"); - fields.add("cidr"); - fields.add("network_offering_id"); - fields.add("physical_network_id"); - fields.add("data_center_id"); - fields.add("related"); - fields.add("guru_name"); - fields.add("state"); - fields.add("dns1"); - fields.add("domain_id"); - fields.add("account_id"); - fields.add("set_fields"); - fields.add("guru_data"); - fields.add("dns2"); - fields.add("network_domain"); - fields.add("created"); - fields.add("removed"); - fields.add("reservation_id"); - fields.add("uuid"); - fields.add("guest_type"); - fields.add("restart_required"); - fields.add("specify_ip_ranges"); - fields.add("acl_type"); - - PreparedStatement pstmt; - for (String field : fields) { - pstmt = conn.prepareStatement("SHOW COLUMNS FROM `cloud`.`networks` LIKE ?"); - pstmt.setString(1, field); - ResultSet rs = pstmt.executeQuery(); - if (!rs.next()) { - throw new CloudRuntimeException("Field " + field - + " is missing in upgraded networks table"); - } - rs.close(); - pstmt.close(); - - } - - } -} diff --git a/server/test/com/cloud/upgrade/Usage217To224UpgradeTest.java b/server/test/com/cloud/upgrade/Usage217To224UpgradeTest.java deleted file mode 100644 index 741af5a03f0..00000000000 --- a/server/test/com/cloud/upgrade/Usage217To224UpgradeTest.java +++ /dev/null @@ -1,94 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. -package com.cloud.upgrade; - - -import java.sql.Connection; -import java.sql.PreparedStatement; -import java.sql.ResultSet; -import java.sql.SQLException; - -import javax.inject.Inject; - -import junit.framework.TestCase; - -import org.apache.log4j.Logger; -import org.junit.After; -import org.junit.Before; - -import com.cloud.upgrade.dao.VersionDaoImpl; - -import com.cloud.utils.db.DbTestUtils; -import com.cloud.utils.db.Transaction; - -public class Usage217To224UpgradeTest extends TestCase { - private static final Logger s_logger = Logger.getLogger(Usage217To224UpgradeTest.class); - - @Inject VersionDaoImpl dao; - @Inject PremiumDatabaseUpgradeChecker checker; - - @Override - @Before - public void setUp() throws Exception { - DbTestUtils.executeScript("cleanup.sql", false, true); - DbTestUtils.executeUsageScript("cleanup.sql", false, true); - } - - @Override - @After - public void tearDown() throws Exception { - } - - public void test21to22Upgrade() throws SQLException { - s_logger.debug("Finding sample data from 2.1.7"); - DbTestUtils.executeScript("fake.sql", false, true); - DbTestUtils.executeUsageScript("fake.sql", false, true); - - Connection conn; - PreparedStatement pstmt; - - - String version = dao.getCurrentVersion(); - assert version.equals("2.1.7") : "Version returned is not 2.1.7 but " + version; - - checker.upgrade("2.1.7", "2.2.4"); - - conn = Transaction.getStandaloneConnection(); - try { - pstmt = conn.prepareStatement("SELECT version FROM version ORDER BY id DESC LIMIT 1"); - ResultSet rs = pstmt.executeQuery(); - assert rs.next() : "No version selected"; - assert rs.getString(1).equals("2.2.4") : "VERSION stored is not 2.2.4: " + rs.getString(1); - rs.close(); - pstmt.close(); - - pstmt = conn.prepareStatement("SELECT COUNT(*) FROM usage_event"); - rs = pstmt.executeQuery(); - assert rs.next() : "Unable to get the count of usage events"; - assert (rs.getInt(1) == 182) : "Didn't find 182 usage events but found " + rs.getInt(1); - rs.close(); - pstmt.close(); - - } finally { - try { - conn.close(); - } catch (SQLException e) { - } - } - } - -} diff --git a/server/test/com/cloud/upgrade/UsageEvents218To224UpgradeTest.java b/server/test/com/cloud/upgrade/UsageEvents218To224UpgradeTest.java deleted file mode 100644 index cde114b5e63..00000000000 --- a/server/test/com/cloud/upgrade/UsageEvents218To224UpgradeTest.java +++ /dev/null @@ -1,91 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. -package com.cloud.upgrade; - - -import java.sql.Connection; -import java.sql.PreparedStatement; -import java.sql.ResultSet; -import java.sql.SQLException; - -import javax.inject.Inject; - -import junit.framework.TestCase; - -import org.apache.log4j.Logger; -import org.junit.After; -import org.junit.Before; - -import com.cloud.upgrade.dao.VersionDaoImpl; - -import com.cloud.utils.db.DbTestUtils; -import com.cloud.utils.db.Transaction; - -public class UsageEvents218To224UpgradeTest extends TestCase { - private static final Logger s_logger = Logger.getLogger(UsageEvents218To224UpgradeTest.class); - - @Inject VersionDaoImpl dao; - @Inject DatabaseUpgradeChecker checker; - - @Override - @Before - public void setUp() throws Exception { - DbTestUtils.executeScript("cleanup.sql", false, true); - } - - @Override - @After - public void tearDown() throws Exception { - } - - public void test21to22Upgrade() throws SQLException { - s_logger.debug("Finding sample data from 2.1.8"); - DbTestUtils.executeScript("fake.sql", false, true); - - Connection conn; - PreparedStatement pstmt; - - String version = dao.getCurrentVersion(); - assert version.equals("2.1.8") : "Version returned is not 2.1.8 but " + version; - - checker.upgrade("2.1.8", "2.2.4"); - - conn = Transaction.getStandaloneConnection(); - try { - pstmt = conn.prepareStatement("SELECT version FROM version ORDER BY id DESC LIMIT 1"); - ResultSet rs = pstmt.executeQuery(); - assert rs.next() : "No version selected"; - assert rs.getString(1).equals("2.2.4") : "VERSION stored is not 2.2.4: " + rs.getString(1); - rs.close(); - pstmt.close(); - - pstmt = conn.prepareStatement("SELECT COUNT(*) FROM usage_event"); - rs = pstmt.executeQuery(); - assert rs.next() : "Unable to get the count of usage events"; - assert (rs.getInt(1) == 37) : "Didn't find 37 usage events but found " + rs.getInt(1); - rs.close(); - pstmt.close(); - - } finally { - try { - conn.close(); - } catch (SQLException e) { - } - } - } - -} diff --git a/services/secondary-storage/pom.xml b/services/secondary-storage/pom.xml index 124fa5e085c..7d2089b1cd4 100644 --- a/services/secondary-storage/pom.xml +++ b/services/secondary-storage/pom.xml @@ -65,6 +65,14 @@ src test + + org.apache.maven.plugins + maven-surefire-plugin + 2.14 + + true + + org.codehaus.mojo exec-maven-plugin diff --git a/services/secondary-storage/src/org/apache/cloudstack/storage/resource/LocalNfsSecondaryStorageResource.java b/services/secondary-storage/src/org/apache/cloudstack/storage/resource/LocalNfsSecondaryStorageResource.java index 1a6dcf8eee8..fd6e2967fe7 100644 --- a/services/secondary-storage/src/org/apache/cloudstack/storage/resource/LocalNfsSecondaryStorageResource.java +++ b/services/secondary-storage/src/org/apache/cloudstack/storage/resource/LocalNfsSecondaryStorageResource.java @@ -65,13 +65,10 @@ public class LocalNfsSecondaryStorageResource extends NfsSecondaryStorageResourc ((DownloadManagerImpl) _dlMgr).setThreadPool(Executors.newFixedThreadPool(10)); _storage = new JavaStorageLayer(); this._inSystemVM = false; - // get mount parent folder configured in global setting, if set, this will overwrite _parent in NfsSecondaryStorageResource to work - // around permission issue for default /mnt folder - ConfigurationDaoImpl configDao = new ConfigurationDaoImpl(); - String mountParent = configDao.getValue(Config.MountParent.key()); - if (mountParent != null) { - _parent = mountParent + File.separator + "secStorage"; - } + } + + public void setParentPath(String path) { + this._parent = path; } @Override diff --git a/services/secondary-storage/src/org/apache/cloudstack/storage/resource/NfsSecondaryStorageResource.java b/services/secondary-storage/src/org/apache/cloudstack/storage/resource/NfsSecondaryStorageResource.java index 88e37841430..054edd9fe72 100755 --- a/services/secondary-storage/src/org/apache/cloudstack/storage/resource/NfsSecondaryStorageResource.java +++ b/services/secondary-storage/src/org/apache/cloudstack/storage/resource/NfsSecondaryStorageResource.java @@ -536,7 +536,7 @@ public class NfsSecondaryStorageResource extends ServerResourceBase implements S } } - protected File downloadFromUrlToNfs(String url, NfsTO nfs, String path) { + protected File downloadFromUrlToNfs(String url, NfsTO nfs, String path, String name) { HttpClient client = new DefaultHttpClient(); HttpGet get = new HttpGet(url); try { @@ -548,10 +548,19 @@ public class NfsSecondaryStorageResource extends ServerResourceBase implements S } String nfsMountPath = getRootDir(nfs.getUrl()); + String filePath = nfsMountPath + File.separator + path; - FileOutputStream outputStream = new FileOutputStream(filePath); + File directory = new File(filePath); + if (!directory.exists()) { + _storage.mkdirs(filePath); + } + File destFile = new File(filePath + File.separator + name); + if (!destFile.exists()) { + destFile.createNewFile(); + } + FileOutputStream outputStream = new FileOutputStream(destFile); entity.writeTo(outputStream); - return new File(filePath); + return new File(destFile.getAbsolutePath()); } catch (IOException e) { s_logger.debug("Faild to get url:"+ url + ", due to " + e.toString()); throw new CloudRuntimeException(e); @@ -565,9 +574,11 @@ public class NfsSecondaryStorageResource extends ServerResourceBase implements S return new DownloadAnswer("cache store can't be null", VMTemplateStorageResourceAssoc.Status.DOWNLOAD_ERROR); } + File file = null; try { NfsTO nfsCacheStore = (NfsTO)cacheStore; - File file = downloadFromUrlToNfs(cmd.getUrl(), nfsCacheStore, path); + String fileName = UUID.randomUUID().toString() + "." + cmd.getFormat().getFileExtension(); + file = downloadFromUrlToNfs(cmd.getUrl(), nfsCacheStore, path, fileName); String swiftPath = SwiftUtil.putObject(swiftTO, file, "T-" + cmd.getId()); String md5sum = null; try { @@ -576,14 +587,17 @@ public class NfsSecondaryStorageResource extends ServerResourceBase implements S s_logger.debug("Failed to get md5sum: " + file.getAbsoluteFile()); } - file.delete(); - - return new DownloadAnswer(null, 100, null, VMTemplateStorageResourceAssoc.Status.DOWNLOADED, + DownloadAnswer answer = new DownloadAnswer(null, 100, null, VMTemplateStorageResourceAssoc.Status.DOWNLOADED, swiftPath, swiftPath, file.length(), file.length(), md5sum ); + return answer; } catch (Exception e) { s_logger.debug("Failed to register template into swift", e); return new DownloadAnswer(e.toString(), VMTemplateStorageResourceAssoc.Status.DOWNLOAD_ERROR); + } finally { + if (file != null) { + file.delete(); + } } } diff --git a/services/secondary-storage/test/org/apache/cloudstack/storage/resource/LocalNfsSecondaryStorageResourceTest.java b/services/secondary-storage/test/org/apache/cloudstack/storage/resource/LocalNfsSecondaryStorageResourceTest.java new file mode 100644 index 00000000000..7723321060d --- /dev/null +++ b/services/secondary-storage/test/org/apache/cloudstack/storage/resource/LocalNfsSecondaryStorageResourceTest.java @@ -0,0 +1,90 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.cloudstack.storage.resource; + +import com.cloud.agent.api.Answer; +import com.cloud.agent.api.storage.DownloadAnswer; +import com.cloud.agent.api.to.DataObjectType; +import com.cloud.agent.api.to.NfsTO; +import com.cloud.agent.api.to.SwiftTO; +import com.cloud.storage.DataStoreRole; +import com.cloud.storage.Storage; +import com.cloud.utils.SwiftUtil; +import junit.framework.Assert; +import junit.framework.TestCase; +import org.apache.cloudstack.storage.command.CopyCmdAnswer; +import org.apache.cloudstack.storage.command.CopyCommand; +import org.apache.cloudstack.storage.command.DownloadCommand; +import org.apache.cloudstack.storage.to.TemplateObjectTO; +import org.junit.Before; +import org.junit.Test; +import org.mockito.Mockito; + + +import javax.naming.ConfigurationException; +import java.util.HashMap; + +public class LocalNfsSecondaryStorageResourceTest extends TestCase { + LocalNfsSecondaryStorageResource resource; + @Before + @Override + public void setUp() throws ConfigurationException { + resource = new LocalNfsSecondaryStorageResource(); + resource.setParentPath("/mnt"); + System.setProperty("paths.script", "/Users/edison/develop/asf-master/script"); + //resource.configure("test", new HashMap()); + } + @Test + public void testExecuteRequest() throws Exception { + TemplateObjectTO template = Mockito.mock(TemplateObjectTO.class); + NfsTO cacheStore = Mockito.mock(NfsTO.class); + Mockito.when(cacheStore.getUrl()).thenReturn("nfs://nfs2.lab.vmops.com/export/home/edison/"); + SwiftTO swift = Mockito.mock(SwiftTO.class); + Mockito.when(swift.getEndPoint()).thenReturn("https://objects.dreamhost.com/auth"); + Mockito.when(swift.getAccount()).thenReturn("cloudstack"); + Mockito.when(swift.getUserName()).thenReturn("images"); + //Mockito.when(swift.getKey()).thenReturn("something"); + + Mockito.when(template.getDataStore()).thenReturn(swift); + Mockito.when(template.getPath()).thenReturn("template/1/1/"); + Mockito.when(template.isRequiresHvm()).thenReturn(true); + Mockito.when(template.getId()).thenReturn(1L); + Mockito.when(template.getFormat()).thenReturn(Storage.ImageFormat.VHD); + Mockito.when(template.getOrigUrl()).thenReturn("http://nfs1.lab.vmops.com/templates/ttylinux_pv.vhd"); + Mockito.when(template.getObjectType()).thenReturn(DataObjectType.TEMPLATE); + + DownloadCommand cmd = new DownloadCommand(template, 100000L); + cmd.setCacheStore(cacheStore); + DownloadAnswer answer = (DownloadAnswer)resource.executeRequest(cmd); + Assert.assertTrue(answer.getResult()); + + Mockito.when(template.getPath()).thenReturn(answer.getInstallPath()); + Mockito.when(template.getDataStore()).thenReturn(swift); + //download swift: + Mockito.when(cacheStore.getRole()).thenReturn(DataStoreRole.ImageCache); + TemplateObjectTO destTemplate = Mockito.mock(TemplateObjectTO.class); + Mockito.when(destTemplate.getPath()).thenReturn("template/1/2"); + Mockito.when(destTemplate.getDataStore()).thenReturn(cacheStore); + Mockito.when(destTemplate.getObjectType()).thenReturn(DataObjectType.TEMPLATE); + CopyCommand cpyCmd = new CopyCommand(template, destTemplate, 10000, true); + CopyCmdAnswer copyCmdAnswer = (CopyCmdAnswer)resource.executeRequest(cpyCmd); + Assert.assertTrue(copyCmdAnswer.getResult()); + + } +} diff --git a/tools/marvin/marvin/deployDataCenter.py b/tools/marvin/marvin/deployDataCenter.py index 20908ca59b6..100f4425d44 100644 --- a/tools/marvin/marvin/deployDataCenter.py +++ b/tools/marvin/marvin/deployDataCenter.py @@ -159,7 +159,7 @@ specify a valid config file" % cfgFile) secondarycmd.provider = secondary.provider secondarycmd.details = [] - if secondarycmd.provider == 'S3': + if secondarycmd.provider == 'S3' or secondarycmd.provider == "Swift": for key, value in vars(secondary.details).iteritems(): secondarycmd.details.append({ 'key': key, diff --git a/utils/src/com/cloud/utils/SwiftUtil.java b/utils/src/com/cloud/utils/SwiftUtil.java index b95249b2c6c..c01de86e948 100644 --- a/utils/src/com/cloud/utils/SwiftUtil.java +++ b/utils/src/com/cloud/utils/SwiftUtil.java @@ -50,6 +50,7 @@ public class SwiftUtil { String srcDirectory = srcFile.getParent(); Script command = new Script("/bin/bash", logger); long size = srcFile.length(); + command.add("-c"); if (size <= SWIFT_MAX_SIZE) { command.add("cd " + srcDirectory + ";/usr/bin/python " + swiftCli + " -A "