Merge branch '4.20' into fenchCheck

This commit is contained in:
dahn 2025-12-08 16:29:38 +01:00 committed by GitHub
commit a6c20f47b8
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
4240 changed files with 144417 additions and 48709 deletions

View File

@ -51,14 +51,15 @@ github:
collaborators:
- acs-robot
- kiranchavala
- rajujith
- alexandremattioli
- vishesh92
- GaOrtiga
- SadiJr
- winterhazel
- rp-
- gpordeus
- hsato03
- bernardodemarco
- abh1sar
- FelipeM525
protected_branches: ~

View File

@ -22,8 +22,11 @@
# E224 Tab after operator
# E227 Missing whitespace around bitwise or shift operator
# E242 Tab after ','
# E271 Multiple spaces after keyword
# E272 Multiple spaces before keyword
# E273 Tab after keyword
# E274 Tab before keyword
# E713 Test for membership should be 'not in'
# E742 Do not define classes named 'I', 'O', or 'l'
# E743 Do not define functions named 'I', 'O', or 'l'
# E901 SyntaxError or IndentationError
@ -37,4 +40,4 @@
exclude =
.git,
venv
select = E112,E113,E133,E223,E224,E227,E242,E273,E274,E742,E743,E901,E902,W291,W292,W293,W391
select = E112,E113,E133,E223,E224,E227,E242,E271,E272,E273,E274,E713,E742,E743,E901,E902,W291,W292,W293,W391

100
.github/linters/.markdown-lint.yml vendored Normal file
View File

@ -0,0 +1,100 @@
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# MD001/heading-increment Heading levels should only increment by one level at a time
MD001: false
# MD003/heading-style Heading style
MD003: false
# MD004/ul-style Unordered list style
MD004: false
# MD007/ul-indent Unordered list indentation
MD007: false
# MD009/no-trailing-spaces Trailing spaces
MD009: false
# MD010/no-hard-tabs Hard tabs
MD010: false
# MD012/no-multiple-blanks Multiple consecutive blank lines
MD012: false
# MD013/line-length Line length
MD013: false
# MD014/commands-show-output Dollar signs used before commands without showing output
MD014: false
# MD018/no-missing-space-atx No space after hash on atx style heading
MD018: false
# MD019/no-multiple-space-atx Multiple spaces after hash on atx style heading
MD019: false
# MD022/blanks-around-headings Headings should be surrounded by blank lines
MD022: false
# MD023/heading-start-left Headings must start at the beginning of the line
MD023: false
# MD024/no-duplicate-heading Multiple headings with the same content
MD024: false
# MD025/single-title/single-h1 Multiple top-level headings in the same document
MD025: false
# MD026/no-trailing-punctuation Trailing punctuation in heading
MD026: false
# MD028/no-blanks-blockquote Blank line inside blockquote
MD028: false
# MD029/ol-prefix Ordered list item prefix
MD029: false
# MD031/blanks-around-fences Fenced code blocks should be surrounded by blank lines
MD031: false
# MD032/blanks-around-lists Lists should be surrounded by blank lines
MD032: false
# MD033/no-inline-html Inline HTML
MD033: false
# MD034/no-bare-urls Bare URL used
MD034: false
# MD036/no-emphasis-as-heading Emphasis used instead of a heading
MD036: false
# MD037/no-space-in-emphasis Spaces inside emphasis markers
MD037: false
# MD040/fenced-code-language Fenced code blocks should have a language specified
MD040: false
# MD041/first-line-heading/first-line-h1 First line in a file should be a top-level heading
MD041: false
# MD046/code-block-style Code block style
MD046: false
# MD052/reference-links-images Reference links and images should use a label that is defined
MD052: false

526
.github/linters/codespell.txt vendored Normal file
View File

@ -0,0 +1,526 @@
accouns
acheived
acount
actuall
acuiring
acumulate
addreess
addtion
adminstrator
afer
afrer
afterall
againt
ags
aktive
algoritm
allo
alloacate
allocted
alocation
alogrithm
alpha-numeric
alue
ammended
ammount
ans
anull
apche
aplication
apllication
applicatio
apporpriate
appropritate
aqcuire
aqcuired
aquire
aquiring
assiciate
assigne
assoication
assosiate
asssert
astroid
asynchroniously
asyncronous
atleast
atomation
attache
attch
attches
authenciation
authenitcation
authenitication
availiability
avialable
bais
beacause
beacuse
becase
becasue
becaues
behviour
birdge
bject
boardcast
bootstraper
bu
cant
capabilites
capablity
capcity
carrefully
cavaet
chaing
checkd
childs
choosen
chould
clenup
cliente
clinet
cluser
cna
collison
comman
commited
comparision
comparisions
complient
concious
conectix
confg
configruation
configuable
conneciton
connexion
constrait
constraits
containg
contex
continuesly
contro
controler
controles
controll
convienient
convinience
coputer
correcponding
correspoding
correspoonds
cosole
coudl
couldnt
craete
craeted
crate
crated
createa
createing
credentail
cros
crresponding
curren
currentl
datas
decalared
declatory
decocdes
decypher
defalut
defaut
defered
definiton
deleteable
dependancy
dependant
dependend
deployement
deply
deplying
dervied
descktop
descrption
deserialzed
desination
detination
detroy
detroying
dettach
dettached
dettaching
diabling
diasbled
dictonary
didnt
differnet
differnt
direcotry
directroy
disale
disbale
discrepency
disover
dissapper
dissassociated
divice
doesn'
doesnot
doesnt
dont'
doubleclick
dows
eanbled
earch
ect
elemnt
eles
elments
emmited
enble
encryted
enebled
enmpty
entires
enviornment
environmnet
equivalant
erro
erronous
everthing
everytime
excetion
excption
excute
execept
execption
execut
executeable
exeeded
exisitng
exisits
existin
existsing
exitting
expcted
expection
explaination
explicitely
faield
faild
failes
falied
fasion
feild
filenname
fillled
findout
fisrt
fo
folowing
fowarding
frist
fro
frontent
fuctionality
genarate
generallly
gernerate
get's
gloabal
gorry
gracefull
gradiant
handeling
hanling
happend
hasing
hasnt
hda
hostanme
hould
hsould
hte
identifers
identifyer
identifyers
igoring
immediatley
implememented
implementor
implementors
implemnt
implict
implmeneted
implmentation
incase
includeing
incosistency
indecates
indien
infor
informations
informaton
infrastrcuture
ingore
inital
initalize
initator
initilization
inspite
instace
instal
instnace
intefaces
intepret
intereface
interfer
interpretted
intialize
intializes
intializing
invlaid
invokation
isnt
ist
klunky
lable
leve
lief
limite
linke
listner
lokal
lokales
maintainence
maintenace
maintenence
mamagement
mambers
manaully
manuel
maxium
mehtod
mergable
mesage
messge
metatdata
milisecond
minumum
mis
modifers
mor
mot
mulitply
multipl
multple
mutliple
nast
nd
neccessary
necesary
netowrk
nin
nodel
nome
noone
nowe
numbe
numer
occured
occurence
occuring
offfering
ofthe
omited
onother
opeation
optin
orginal
otherwse
outter
overriden
overwritting
paramater
paramemeter
paramenter
paramete
parametrs
pararmeter
parms
parralels
particualr
passowrd
perfromed
permissble
physcial
plugable
pluging
polcies
policys
poluting
possiblity
potenial
prefered
preffered
pressenter
previleges
primay
priviledged
procuct
programatically
progres
properites
propertie
propertys
propogate
provison
psudo
pyhsical
readabilty
readd
reccuring
recevied
recieved
recursivelly
redunant
refference
releease
relese
remaning
remore
remvoing
renabling
repeatly
reponse
reqest
reqiured
requieres
requried
reserv
reserverd
reseted
reseting
resorce
responser
resposne
resturns
retreive
retreiving
retrive
retrived
retriving
retrun
retuned
returing
re-use
rever
rocessor
runing
runnign
sate
scalled
scipt
scirpt
scrip
seconadry
seconday
seesion
sepcified
sepcify
seprated
ser
servies
seting
settig
sevices
shoul
shoule
sie
signle
simplier
singature
skiping
snaphsot
snpashot
specied
specifed
specifiy
splitted
spped
standy
statics
stickyness
stil
stip
storeage
strat
streched
strutural
succesfull
successfull
suceessful
suces
sucessfully
suiteable
suppots
suppport
syncronous
syste
tage
te
tempdate
testng
tha
thats
ther
therefor
theres
theses
thi
thorugh
throught
ths
tipically
transction
tring
trough
tyoe
ue
ues
unavailibility
uncommited
uncompressible
uneccessarily
unexepected
unexpect
unknow
unkonw
unkown
unneccessary
unparseable
unrecoginized
unsupport
unxpected
updat
uptodate
usera
usign
usin
utlization
vaidate
valiate
valule
valus
varibles
verfy
verfying
verifing
virutal
visable
wakup
wil
wit
wll
wth

View File

@ -41,9 +41,9 @@ jobs:
cache: maven
- name: Set up Python
uses: actions/setup-python@v4
uses: actions/setup-python@v5
with:
python-version: '3.8'
python-version: '3.10'
architecture: 'x64'
- name: Install Build Dependencies

View File

@ -29,7 +29,7 @@ permissions:
jobs:
build:
if: github.repository == 'apache/cloudstack'
runs-on: ubuntu-22.04
runs-on: ubuntu-24.04
strategy:
fail-fast: false
@ -87,7 +87,9 @@ jobs:
smoke/test_migration
smoke/test_multipleips_per_nic
smoke/test_nested_virtualization
smoke/test_set_sourcenat",
smoke/test_set_sourcenat
smoke/test_webhook_lifecycle
smoke/test_purge_expunged_vms",
"smoke/test_network
smoke/test_network_acl
smoke/test_network_ipv6
@ -133,6 +135,7 @@ jobs:
smoke/test_usage
smoke/test_usage_events
smoke/test_vm_deployment_planner
smoke/test_vm_strict_host_tags
smoke/test_vm_schedule
smoke/test_vm_life_cycle
smoke/test_vm_lifecycle_unmanage_import
@ -181,7 +184,8 @@ jobs:
"component/test_project_usage
component/test_protocol_number_security_group
component/test_public_ip
component/test_resource_limits",
component/test_resource_limits
component/test_resource_limit_tags",
"component/test_regions_accounts
component/test_routers
component/test_snapshots
@ -228,7 +232,25 @@ jobs:
- name: Install Build Dependencies
run: |
sudo apt-get update
sudo apt-get install -y git uuid-runtime genisoimage netcat ipmitool build-essential libgcrypt20 libgpg-error-dev libgpg-error0 libopenipmi0 ipmitool libpython3-dev libssl-dev libffi-dev python3-openssl python3-dev python3-setuptools
sudo apt-get install -y git uuid-runtime genisoimage netcat-openbsd ipmitool build-essential libgcrypt20 libgpg-error-dev libgpg-error0 libopenipmi0 ipmitool libpython3-dev libssl-dev libffi-dev python3-openssl python3-dev python3-setuptools
- name: Setup IPMI Tool for CloudStack
run: |
# Create cloudstack-common directory if it doesn't exist
sudo mkdir -p /usr/share/cloudstack-common
# Copy ipmitool to cloudstack-common directory if it doesn't exist
if [ ! -f /usr/share/cloudstack-common/ipmitool ]; then
sudo cp /usr/bin/ipmitool /usr/share/cloudstack-common/ipmitool
sudo chmod 755 /usr/share/cloudstack-common/ipmitool
fi
# Create ipmitool-C3 wrapper script
sudo tee /usr/bin/ipmitool > /dev/null << 'EOF'
#!/bin/bash
/usr/share/cloudstack-common/ipmitool -C3 $@
EOF
sudo chmod 755 /usr/bin/ipmitool
- name: Install Python dependencies
run: |
@ -267,7 +289,7 @@ jobs:
- name: Setup Simulator Prerequisites
run: |
sudo python3 -m pip install --upgrade netaddr mysql-connector-python
python3 -m pip install --user --upgrade tools/marvin/dist/Marvin-*.tar.gz
python3 -m pip install --user --upgrade tools/marvin/dist/[mM]arvin-*.tar.gz
mvn -q -Pdeveloper -pl developer -Ddeploydb
mvn -q -Pdeveloper -pl developer -Ddeploydb-simulator
@ -280,7 +302,7 @@ jobs:
- name: Start CloudStack Management Server with Simulator
run: |
export MAVEN_OPTS="-Xmx4096m -XX:MaxPermSize=800m -Djava.security.egd=file:/dev/urandom -javaagent:jacoco/lib/jacocoagent.jar=address=*,port=36320,output=tcpserver"
export MAVEN_OPTS="-Xmx4096m -XX:MaxMetaspaceSize=800m -Djava.security.egd=file:/dev/urandom -javaagent:jacoco/lib/jacocoagent.jar=address=*,port=36320,output=tcpserver --add-opens=java.base/java.lang=ALL-UNNAMED --add-exports=java.base/sun.security.x509=ALL-UNNAMED --add-opens=java.base/jdk.internal.reflect=ALL-UNNAMED"
echo -e "\nStarting simulator"
set +e
mvn -Dsimulator -Dorg.eclipse.jetty.annotations.maxWait=120 -pl :cloud-client-ui jetty:run 2>&1 > /tmp/jetty-log || true &

View File

@ -36,11 +36,11 @@ jobs:
with:
fetch-depth: 0
- name: Set up JDK11
- name: Set up JDK 17
uses: actions/setup-java@v4
with:
distribution: 'temurin'
java-version: '11'
java-version: '17'
cache: 'maven'
- name: Build CloudStack with Quality Checks

View File

@ -39,7 +39,7 @@ jobs:
pip install pre-commit
- name: Set PY
run: echo "PY=$(python -VV | sha256sum | cut -d' ' -f1)" >> $GITHUB_ENV
- uses: actions/cache@v3
- uses: actions/cache@v4
with:
path: ~/.cache/pre-commit
key: pre-commit|${{ env.PY }}|${{ hashFiles('.pre-commit-config.yaml') }}

View File

@ -36,25 +36,25 @@ jobs:
with:
fetch-depth: 0
- name: Set up JDK11
- name: Set up JDK17
uses: actions/setup-java@v4
with:
distribution: 'temurin'
java-version: '11'
java-version: '17'
cache: 'maven'
- name: Cache SonarCloud packages
uses: actions/cache@v3
uses: actions/cache@v4
with:
path: ~/.sonar/cache
key: ${{ runner.os }}-sonar
restore-keys: ${{ runner.os }}-sonar
- name: Cache local Maven repository
uses: actions/cache@v3
uses: actions/cache@v4
with:
path: ~/.m2/repository
key: ${{ runner.os }}-m2-${{ hashFiles('**/pom.xml') }}
key: ${{ runner.os }}-m2-${{ hashFiles('pom.xml', '*/pom.xml', '*/*/pom.xml', '*/*/*/pom.xml') }}
restore-keys: |
${{ runner.os }}-m2

View File

@ -31,10 +31,10 @@ jobs:
runs-on: ubuntu-22.04
steps:
- uses: actions/checkout@v4
- name: Set up JDK 11
- name: Set up JDK 17
uses: actions/setup-java@v4
with:
java-version: '11'
java-version: '17'
distribution: 'adopt'
architecture: x64
cache: maven

View File

@ -38,25 +38,25 @@ jobs:
ref: "refs/pull/${{ github.event.number }}/merge"
fetch-depth: 0
- name: Set up JDK11
- name: Set up JDK17
uses: actions/setup-java@v4
with:
distribution: 'temurin'
java-version: '11'
java-version: '17'
cache: 'maven'
- name: Cache SonarCloud packages
uses: actions/cache@v3
uses: actions/cache@v4
with:
path: ~/.sonar/cache
key: ${{ runner.os }}-sonar
restore-keys: ${{ runner.os }}-sonar
- name: Cache local Maven repository
uses: actions/cache@v3
uses: actions/cache@v4
with:
path: ~/.m2/repository
key: ${{ runner.os }}-m2-${{ hashFiles('**/pom.xml') }}
key: ${{ runner.os }}-m2-${{ hashFiles('pom.xml', '*/pom.xml', '*/*/pom.xml', '*/*/*/pom.xml') }}
restore-keys: |
${{ runner.os }}-m2

View File

@ -36,7 +36,7 @@ jobs:
- name: Set up Node
uses: actions/setup-node@v3
with:
node-version: 14
node-version: 16
- name: Env details
run: |

150
.gitignore vendored
View File

@ -15,92 +15,92 @@
# specific language governing permissions and limitations
# under the License.
build/build.number
.lock-wscript
.waf-*
waf-*
target/
override/
.metadata
dist/
*~
*_flymake.js
*.bak
cloud-*.tar.bz2
*.log
*.pyc
*.patch
*.css.map
*.egginfo/
*.egg-info/
*.prefs
build.number
*.log.*
cloud.log.*.*
unittest
deps/cloud.userlibraries
.DS_Store
.idea
*.iml
git-remote-https.exe.stackdump
*.swp
tools/devcloud/devcloudbox/.vagrant
tools/cli/cloudmonkey/marvin/
tools/cli/cloudmonkey/precache.py
tools/marvin/marvin/cloudstackAPI/
tools/marvin/build/
tools/cli/build/
tools/appliance/systemvmtemplate/packer_cache/
*.jar
*.war
*.mar
*.iso
*.jar
*.log
*.log.*
*.mar
*.orig
*.patch
*.prefs
*.pyc
*.qcow2
*.raw
*.swp
*.tar.gz
*.tgz
*.vscode
*.css.map
*.war
.DS_Store
.checkstyle
.classpath
.idea
.lock-wscript
.metadata
.pmd
.pmdruleset.xml
.project
.pydevproject
.reviewboardrc
.settings.xml
.settings/
.vscode
.waf-*
Gemfile.lock
build/build.number
build.number
build-indep-stamp
cloud.log.*.*
cloud-*.tar.bz2
configure-stamp
db.properties.override
debian/*.debhelper
debian/*.substvars
debian/cloudstack-*/*
debian/files
debian/tmp
deps/cloud.userlibraries
dist/
docs/publish
docs/runbook/publish
docs/runbook/tmp
docs/tmp
engine/storage/integration-test/test-output
git-remote-https.exe.stackdump
node_modules
override/
plugins/hypervisors/kvm/.pydevproject
plugins/network-elements/juniper-contrail/logs/
replace.properties.override
replace.properties.tmp
scripts/.pydevproject
scripts/vm/hypervisor/xenserver/vhd-util
systemvm/.pydevproject
target/
target-eclipse
test/.pydevprojec
tools/apidoc/log/
tools/appliance/box/
tools/appliance/systemvmtemplate/packer_cache/
tools/cli/build/
tools/cli/cloudmonkey/marvin/
tools/cli/cloudmonkey/precache.py
tools/devcloud/devcloudbox/.vagrant
tools/marvin/build/
tools/marvin/marvin/cloudstackAPI/
tools/marvin/marvin/cloudstackAPI/*
unittest
venv
waf-*
# this ignores _all files starting with '.'. Don't do that!
#.*
target-eclipse
!.gitignore
.classpath
.settings.xml
.settings/
db.properties.override
replace.properties.override
tools/marvin/marvin/cloudstackAPI/*
docs/tmp
docs/publish
docs/runbook/tmp
docs/runbook/publish
.project
Gemfile.lock
debian/tmp
debian/files
debian/cloudstack-*/*
debian/*.substvars
debian/*.debhelper
replace.properties.tmp
build-indep-stamp
configure-stamp
*_flymake.js
engine/storage/integration-test/test-output
tools/apidoc/log/
plugins/network-elements/juniper-contrail/logs/
scripts/vm/hypervisor/xenserver/vhd-util
*.orig
tools/appliance/box/
.reviewboardrc
.checkstyle
.pmd
.pmdruleset.xml
.pydevproject
systemvm/.pydevproject
test/.pydevprojec
plugins/hypervisors/kvm/.pydevproject
scripts/.pydevproject
*.qcow2
*.raw
venv
node_modules
.vscode

View File

@ -26,7 +26,7 @@ repos:
- id: identity
- id: check-hooks-apply
- repo: https://github.com/pre-commit/pre-commit-hooks
rev: v4.4.0
rev: v4.6.0
hooks:
#- id: check-added-large-files
- id: check-case-conflict
@ -36,6 +36,8 @@ repos:
- id: check-vcs-permalinks
#- id: check-yaml
- id: destroyed-symlinks
- id: detect-aws-credentials
args: [--allow-missing-credentials]
- id: detect-private-key
exclude: >
(?x)
@ -53,11 +55,23 @@ repos:
- id: end-of-file-fixer
exclude: \.vhd$
#- id: fix-byte-order-marker
- id: forbid-submodules
- id: mixed-line-ending
exclude: \.(cs|xml)$
# - id: trailing-whitespace
- id: trailing-whitespace
files: \.(header|in|java|md|properties|py|rb|sh|sql|txt|vue|xml|yaml|yml)$
args: [--markdown-linebreak-ext=md]
exclude: ^services/console-proxy/rdpconsole/src/test/doc/freerdp-debug-log\.txt$
- repo: https://github.com/codespell-project/codespell
rev: v2.2.6
hooks:
- id: codespell
name: run codespell
description: Check spelling with codespell
args: [--ignore-words=.github/linters/codespell.txt]
exclude: ^ui/package\.json$|^ui/package-lock\.json$|^ui/public/js/less\.min\.js$|^ui/public/locales/.*[^n].*\.json$
- repo: https://github.com/pycqa/flake8
rev: 6.1.0
rev: 7.0.0
hooks:
- id: flake8
args: [--config, .github/linters/.flake8]
@ -72,3 +86,12 @@ repos:
^scripts/vm/hypervisor/xenserver/vmopspremium$|
^setup/bindir/cloud-setup-encryption\.in$|
^venv/.*$
- repo: https://github.com/igorshubovych/markdownlint-cli
rev: v0.40.0
hooks:
- id: markdownlint
name: run markdownlint
description: check Markdown files with markdownlint
args: [--config=.github/linters/.markdown-lint.yml]
types: [markdown]
files: \.(md|mdown|markdown)$

View File

@ -1 +1 @@
3.6
3.10

View File

@ -430,11 +430,11 @@ Bug ID | Description
[CLOUDSTACK-6099](https://issues.apache.org/jira/browse/CLOUDSTACK-6099) | live migration is failing for vm deployed using dynamic compute offerings with NPE
[CLOUDSTACK-7528](https://issues.apache.org/jira/browse/CLOUDSTACK-7528) | More verbose logging when sending alert fails
[CLOUDSTACK-6624](https://issues.apache.org/jira/browse/CLOUDSTACK-6624) | set specifyIpRanges to true if specifyVlan is set to true
[CLOUDSTACK-7404](https://issues.apache.org/jira/browse/CLOUDSTACK-7404) | Failed to start an instance when originating template has been deleted
[CLOUDSTACK-7404](https://issues.apache.org/jira/browse/CLOUDSTACK-7404) | Failed to start an instance when originating template has been deleted
[CLOUDSTACK-6531](https://issues.apache.org/jira/browse/CLOUDSTACK-6531) | Stopping the router in case of command failures
[CLOUDSTACK-6115](https://issues.apache.org/jira/browse/CLOUDSTACK-6115) | TravisCI configuration
[CLOUDSTACK-7405](https://issues.apache.org/jira/browse/CLOUDSTACK-7405) | allowing VR meta-data to be accessed without trailing slash
[CLOUDSTACK-7260](https://issues.apache.org/jira/browse/CLOUDSTACK-7260) | Management server not responding after some time for Vmware due to Oom
[CLOUDSTACK-7260](https://issues.apache.org/jira/browse/CLOUDSTACK-7260) | Management server not responding after some time for Vmware due to Oom
[CLOUDSTACK-7038](https://issues.apache.org/jira/browse/CLOUDSTACK-7038) | Add mysql client dependency for mgmt server pkg for debian
[CLOUDSTACK-6892](https://issues.apache.org/jira/browse/CLOUDSTACK-6892) | Create separate package for the mysql HA component
[CLOUDSTACK-7038](https://issues.apache.org/jira/browse/CLOUDSTACK-7038) | Add mysql client dependency for mgmt server/rpms
@ -449,12 +449,12 @@ Bug ID | Description
[CLOUDSTACK-7006](https://issues.apache.org/jira/browse/CLOUDSTACK-7006) | Restore template ID in ROOT volume usages
[CLOUDSTACK-6747](https://issues.apache.org/jira/browse/CLOUDSTACK-6747) | test to allow all cidrs on other end of vpc
[CLOUDSTACK-6272](https://issues.apache.org/jira/browse/CLOUDSTACK-6272) | Fix recover/restore VM actions
[CLOUDSTACK-6927](https://issues.apache.org/jira/browse/CLOUDSTACK-6927) | store virsh list in list instead of querying libvirt
[CLOUDSTACK-6927](https://issues.apache.org/jira/browse/CLOUDSTACK-6927) | store virsh list in list instead of querying libvirt
[CLOUDSTACK-6317](https://issues.apache.org/jira/browse/CLOUDSTACK-6317) | [VMware] Tagged VLAN support broken for Management/Control/Storage traffic
[CLOUDSTACK-5891](https://issues.apache.org/jira/browse/CLOUDSTACK-5891) | [VMware] If a template has been registered and "cpu.corespersocket=X" ,
[CLOUDSTACK-6478](https://issues.apache.org/jira/browse/CLOUDSTACK-6478) | Failed to download Template when having 3 SSVM's in one
[CLOUDSTACK-6464](https://issues.apache.org/jira/browse/CLOUDSTACK-6464) | if guest network type is vlan://untagged, and traffic label is used
[CLOUDSTACK-6816](https://issues.apache.org/jira/browse/CLOUDSTACK-6816) | bugfix: cloudstack-setup-management make /root directory's permission 0777
[CLOUDSTACK-6816](https://issues.apache.org/jira/browse/CLOUDSTACK-6816) | bugfix: cloudstack-setup-management make /root directory's permission 0777
[CLOUDSTACK-6204](https://issues.apache.org/jira/browse/CLOUDSTACK-6204) | applying missed patch
[CLOUDSTACK-6472](https://issues.apache.org/jira/browse/CLOUDSTACK-6472) | (4.3 specific) listUsageRecords: Pull information from removed items as well
[CLOUDSTACK-5976](https://issues.apache.org/jira/browse/CLOUDSTACK-5976) | Typo in "ssh_keypairs" table's foreign key constraints on the Upgraded Setup
@ -657,11 +657,11 @@ Version 4.1.0
-------------
This is the second major release of CloudStack from within the Apache Software Foundation, and the
first major release as a Top-Level Project (TLP).
first major release as a Top-Level Project (TLP).
Build Tool Changes:
* The project now uses Maven 3 exclusively to build.
* The project now uses Maven 3 exclusively to build.
New Features:
* CLOUDSTACK-101: OVS support in KVM
@ -976,14 +976,14 @@ Issues fixed in this release:
* CLOUDSTACK-1845: KVM - storage migration often fails
* CLOUDSTACK-1846: KVM - storage pools can silently fail to be unregistered, leading to failure to register later
* CLOUDSTACK-2003: Deleting domain while deleted account is cleaning up leaves VMs expunging forever due to 'Failed to update resource count'
* CLOUDSTACK-2090: Upgrade from version 4.0.1 to version 4.0.2 triggers the 4.0.0 to 4.0.1.
* CLOUDSTACK-2090: Upgrade from version 4.0.1 to version 4.0.2 triggers the 4.0.0 to 4.0.1.
* CLOUDSTACK-2091: Error in API documentation for 4.0.x.
Version 4.0.1-incubating
------------------------
This is a bugfix release for Apache CloudStack 4.0.0-incubating, with no new features.
This is a bugfix release for Apache CloudStack 4.0.0-incubating, with no new features.
Security Fixes:

View File

@ -3,7 +3,7 @@ Contributing to Apache CloudStack (ACS)
Summary
-------
This document covers how to contribute to the ACS project. ACS uses GitHub PRs to manage code contributions.
This document covers how to contribute to the ACS project. ACS uses GitHub PRs to manage code contributions.
These instructions assume you have a GitHub.com account, so if you don't have one you will have to create one. Your proposed code changes will be published to your own fork of the ACS project and you will submit a Pull Request for your changes to be added.
_Lets get started!!!_
@ -11,17 +11,17 @@ _Lets get started!!!_
Bug fixes
---------
It's very important that we can easily track bug fix commits, so their hashes should remain the same in all branches.
Therefore, a pull request (PR) that fixes a bug, should be sent against a release branch.
This can be either the "current release" or the "previous release", depending on which ones are maintained.
It's very important that we can easily track bug fix commits, so their hashes should remain the same in all branches.
Therefore, a pull request (PR) that fixes a bug, should be sent against a release branch.
This can be either the "current release" or the "previous release", depending on which ones are maintained.
Since the goal is a stable main, bug fixes should be "merged forward" to the next branch in order: "previous release" -> "current release" -> main (in other words: old to new)
Developing new features
-----------------------
Development should be done in a feature branch, branched off of main.
Send a PR(steps below) to get it into main (2x LGTM applies).
PR will only be merged when main is open, will be held otherwise until main is open again.
Development should be done in a feature branch, branched off of main.
Send a PR(steps below) to get it into main (2x LGTM applies).
PR will only be merged when main is open, will be held otherwise until main is open again.
No back porting / cherry-picking features to existing branches!
PendingReleaseNotes file
@ -33,7 +33,7 @@ When adding information to the PendingReleaseNotes file make sure that you write
Updating the PendingReleaseNotes file is preferably a part of the original Pull Request, but that is up to the developers' discretion.
Fork the code
Fork the code
-------------
In your browser, navigate to: [https://github.com/apache/cloudstack](https://github.com/apache/cloudstack)
@ -136,4 +136,4 @@ $ git push origin :feature_x
Release Principles
------------------
Detailed information about ACS release principles is available at https://cwiki.apache.org/confluence/display/CLOUDSTACK/Release+principles+for+Apache+CloudStack+4.6+and+up
Detailed information about ACS release principles is available at https://cwiki.apache.org/confluence/display/CLOUDSTACK/Release+principles+for+Apache+CloudStack+4.6+and+up

View File

@ -15,7 +15,7 @@ was tested against a CentOS 7 x86_64 setup.
Install tools and dependencies used for development:
# yum -y install git java-11-openjdk java-11-openjdk-devel \
# yum -y install git java-17-openjdk java-17-openjdk-devel \
mysql mysql-server mkisofs git gcc python MySQL-python openssh-clients wget
Set up Maven (3.6.0):
@ -78,7 +78,7 @@ Clear old database (if any) and deploy the database schema:
Export the following variable if you need to run and debug the management server:
$ export MAVEN_OPTS="-Xmx1024m -XX:MaxPermSize=500m -Xdebug -Xrunjdwp:transport=dt_socket,address=8787,server=y,suspend=n"
$ export MAVEN_OPTS="-Xmx1024m -XX:MaxMetaspaceSize=500m -Xdebug -Xrunjdwp:transport=dt_socket,address=8787,server=y,suspend=n"
Start the management server:

View File

@ -142,7 +142,7 @@ This distribution includes cryptographic software. The country in which you curr
reside may have restrictions on the import, possession, use, and/or re-export to another
country, of encryption software. BEFORE using any encryption software, please check your
country's laws, regulations and policies concerning the import, possession, or use, and
re-export of encryption software, to see if this is permitted. See [The Wassenaar Arrangement](http://www.wassenaar.org/)
re-export of encryption software, to see if this is permitted. See [The Wassenaar Arrangement](http://www.wassenaar.org/)
for more information.
The U.S. Government Department of Commerce, Bureau of Industry and Security (BIS), has

View File

@ -6,9 +6,9 @@
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
@ -20,6 +20,19 @@ import os
import logging
import sys
import socket
# ---- This snippet of code adds the sources path and the waf configured PYTHONDIR to the Python path ----
# ---- We do this so cloud_utils can be looked up in the following order:
# ---- 1) Sources directory
# ---- 2) waf configured PYTHONDIR
# ---- 3) System Python path
for pythonpath in (
"@PYTHONDIR@",
os.path.join(os.path.dirname(__file__),os.path.pardir,os.path.pardir,"python","lib"),
):
if os.path.isdir(pythonpath): sys.path.insert(0,pythonpath)
# ---- End snippet of code ----
from cloudutils.cloudException import CloudRuntimeException, CloudInternalException
from cloudutils.utilities import initLoging, bash
from cloudutils.configFileOps import configFileOps

View File

@ -6,9 +6,9 @@
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY

View File

@ -20,6 +20,19 @@ import sys
import os
import subprocess
from threading import Timer
# ---- This snippet of code adds the sources path and the waf configured PYTHONDIR to the Python path ----
# ---- We do this so cloud_utils can be looked up in the following order:
# ---- 1) Sources directory
# ---- 2) waf configured PYTHONDIR
# ---- 3) System Python path
for pythonpath in (
"@PYTHONDIR@",
os.path.join(os.path.dirname(__file__),os.path.pardir,os.path.pardir,"python","lib"),
):
if os.path.isdir(pythonpath): sys.path.insert(0,pythonpath)
# ---- End snippet of code ----
from xml.dom.minidom import parse
from cloudutils.configFileOps import configFileOps
from cloudutils.networkConfig import networkConfig

View File

@ -286,6 +286,7 @@ hypervisor.type=kvm
# The model of Watchdog timer to present to the Guest.
# For all models refer to the libvirt documentation.
# PLEASE NOTE: to disable the watchdogs definitions, use value: none
#vm.watchdog.model=i6300esb
# Action to take when the Guest/Instance is no longer notifying the Watchdog timer.
@ -430,3 +431,13 @@ iscsi.session.cleanup.enabled=false
# If set to "true", the agent will register for libvirt domain events, allowing for immediate updates on crashed or
# unexpectedly stopped. Experimental, requires agent restart.
# libvirt.events.enabled=false
# Implicit host tags managed by agent.properties
# host.tags=
# Timeout(in seconds) for SSL handshake when agent connects to server. When no value is set then default value of 30s
# will be used
#ssl.handshake.timeout=
# Wait(in seconds) during agent reconnections. When no value is set then default value of 5s will be used
#backoff.seconds=

View File

@ -15,11 +15,13 @@
# specific language governing permissions and limitations
# under the License.
/var/log/cloudstack/agent/security_group.log /var/log/cloudstack/agent/resizevolume.log /var/log/cloudstack/agent/rolling-maintenance.log {
/var/log/cloudstack/agent/security_group.log /var/log/cloudstack/agent/resizevolume.log /var/log/cloudstack/agent/rolling-maintenance.log /var/log/cloudstack/agent/agent.out /var/log/cloudstack/agent/agent.err {
copytruncate
daily
rotate 5
compress
missingok
size 10M
dateext
dateformat -%Y-%m-%d
}

View File

@ -5,9 +5,9 @@
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY

View File

@ -17,91 +17,60 @@ KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
-->
<!DOCTYPE log4j:configuration SYSTEM "log4j.dtd">
<Configuration monitorInterval="60">
<Appenders>
<log4j:configuration xmlns:log4j="http://jakarta.apache.org/log4j/" debug="false">
<!-- ================================= -->
<!-- Preserve messages in a local file -->
<!-- ================================= -->
<!-- ================================= -->
<!-- Preserve messages in a local file -->
<!-- ================================= -->
<!-- A time/date based rolling appender -->
<RollingFile name="FILE" append="true" fileName="@AGENTLOG@" filePattern="@AGENTLOG@.%d{yyyy-MM-dd}.gz">
<ThresholdFilter level="INFO" onMatch="ACCEPT" onMismatch="DENY"/>
<Policies>
<TimeBasedTriggeringPolicy/>
</Policies>
<PatternLayout pattern="%d{DEFAULT} %-5p [%c{3}] (%t:%x) (logid:%X{logcontextid}) %m%ex%n"/>
</RollingFile>
<!-- A time/date based rolling appender -->
<appender name="FILE" class="org.apache.log4j.rolling.RollingFileAppender">
<param name="Append" value="true"/>
<param name="Threshold" value="INFO"/>
<rollingPolicy class="org.apache.log4j.rolling.TimeBasedRollingPolicy">
<param name="FileNamePattern" value="@AGENTLOG@.%d{yyyy-MM-dd}.gz"/>
<param name="ActiveFileName" value="@AGENTLOG@"/>
</rollingPolicy>
<layout class="org.apache.log4j.EnhancedPatternLayout">
<param name="ConversionPattern" value="%d{ISO8601} %-5p [%c{3}] (%t:%x) (logid:%X{logcontextid}) %m%n"/>
</layout>
</appender>
<!-- ============================== -->
<!-- Append messages to the console -->
<!-- ============================== -->
<!-- ============================== -->
<!-- Append messages to the console -->
<!-- ============================== -->
<appender name="CONSOLE" class="org.apache.log4j.ConsoleAppender">
<param name="Target" value="System.out"/>
<param name="Threshold" value="INFO"/>
<Console name="CONSOLE" target="SYSTEM_OUT">
<ThresholdFilter level="OFF" onMatch="ACCEPT" onMismatch="DENY"/>
<PatternLayout pattern="%-5p [%c{3}] (%t:%x) (logid:%X{logcontextid}) %m%ex%n"/>
</Console>
</Appenders>
<layout class="org.apache.log4j.PatternLayout">
<param name="ConversionPattern" value="%-5p [%c{3}] (%t:%x) (logid:%X{logcontextid}) %m%n"/>
</layout>
</appender>
<Loggers>
<!-- ================ -->
<!-- Limit categories -->
<!-- ================ -->
<!-- ================ -->
<!-- Limit categories -->
<!-- ================ -->
<category name="com.cloud">
<priority value="INFO"/>
</category>
<category name="com.cloud.agent.metrics">
<priority value="INFO"/>
</category>
<category name="com.cloud.agent.resource.computing.ComputingResource$StorageMonitorTask">
<priority value="INFO"/>
</category>
<Logger name="com.cloud" level="INFO"/>
<!-- Limit the org.apache category to INFO as its DEBUG is verbose -->
<category name="org.apache">
<priority value="INFO"/>
</category>
<Logger name="org.apache" level="INFO"/>
<category name="org">
<priority value="INFO"/>
</category>
<category name="net">
<priority value="INFO"/>
</category>
<Logger name="org" level="INFO"/>
<!-- Limit the com.amazonaws category to INFO as its DEBUG is verbose -->
<category name="com.amazonaws">
<priority value="INFO"/>
</category>
<Logger name="net" level="INFO"/>
<!-- Limit the httpclient.wire category to INFO as its DEBUG is verbose -->
<category name="httpclient.wire">
<priority value="INFO"/>
</category>
<Logger name="com.amazonaws" level="INFO"/>
<category name="org.apache.http.wire">
<priority value="INFO"/>
</category>
<Logger name="httpclient.wire" level="INFO"/>
<!-- ======================= -->
<!-- Setup the Root category -->
<!-- ======================= -->
<Logger name="org.apache.http.wire" level="INFO"/>
<root>
<level value="INFO"/>
<appender-ref ref="CONSOLE"/>
<appender-ref ref="FILE"/>
</root>
<!-- ======================= -->
<!-- Setup the Root category -->
<!-- ======================= -->
</log4j:configuration>
<Root level="INFO">
<AppenderRef ref="CONSOLE"/>
<AppenderRef ref="FILE"/>
</Root>
</Loggers>
</Configuration>

View File

@ -24,7 +24,7 @@
<parent>
<groupId>org.apache.cloudstack</groupId>
<artifactId>cloudstack</artifactId>
<version>4.19.4.0-SNAPSHOT</version>
<version>4.20.3.0-SNAPSHOT</version>
</parent>
<dependencies>
<dependency>

File diff suppressed because it is too large Load Diff

View File

@ -16,28 +16,6 @@
// under the License.
package com.cloud.agent;
import com.cloud.agent.Agent.ExitStatus;
import com.cloud.agent.dao.StorageComponent;
import com.cloud.agent.dao.impl.PropertiesStorage;
import com.cloud.agent.properties.AgentProperties;
import com.cloud.agent.properties.AgentPropertiesFileHandler;
import com.cloud.resource.ServerResource;
import com.cloud.utils.LogUtils;
import com.cloud.utils.ProcessUtil;
import com.cloud.utils.PropertiesUtil;
import com.cloud.utils.backoff.BackoffAlgorithm;
import com.cloud.utils.backoff.impl.ConstantTimeBackoff;
import com.cloud.utils.exception.CloudRuntimeException;
import org.apache.commons.daemon.Daemon;
import org.apache.commons.daemon.DaemonContext;
import org.apache.commons.daemon.DaemonInitException;
import org.apache.commons.lang.math.NumberUtils;
import org.apache.commons.lang3.BooleanUtils;
import org.apache.commons.lang3.StringUtils;
import org.apache.log4j.Logger;
import org.apache.log4j.xml.DOMConfigurator;
import javax.naming.ConfigurationException;
import java.io.File;
import java.io.FileNotFoundException;
import java.io.IOException;
@ -52,8 +30,33 @@ import java.util.Map;
import java.util.Properties;
import java.util.UUID;
import javax.naming.ConfigurationException;
import org.apache.commons.daemon.Daemon;
import org.apache.commons.daemon.DaemonContext;
import org.apache.commons.daemon.DaemonInitException;
import org.apache.commons.lang.math.NumberUtils;
import org.apache.commons.lang3.BooleanUtils;
import org.apache.commons.lang3.StringUtils;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.apache.logging.log4j.core.config.Configurator;
import com.cloud.agent.Agent.ExitStatus;
import com.cloud.agent.dao.StorageComponent;
import com.cloud.agent.dao.impl.PropertiesStorage;
import com.cloud.agent.properties.AgentProperties;
import com.cloud.agent.properties.AgentPropertiesFileHandler;
import com.cloud.resource.ServerResource;
import com.cloud.utils.LogUtils;
import com.cloud.utils.ProcessUtil;
import com.cloud.utils.PropertiesUtil;
import com.cloud.utils.backoff.BackoffAlgorithm;
import com.cloud.utils.backoff.impl.ConstantTimeBackoff;
import com.cloud.utils.exception.CloudRuntimeException;
public class AgentShell implements IAgentShell, Daemon {
private static final Logger s_logger = Logger.getLogger(AgentShell.class.getName());
protected static Logger LOGGER = LogManager.getLogger(AgentShell.class);
private final Properties _properties = new Properties();
private final Map<String, Object> _cmdLineProperties = new HashMap<String, Object>();
@ -221,7 +224,7 @@ public class AgentShell implements IAgentShell, Daemon {
throw new ConfigurationException("Unable to find agent.properties.");
}
s_logger.info("agent.properties found at " + file.getAbsolutePath());
LOGGER.info("agent.properties found at {}", file.getAbsolutePath());
try {
PropertiesUtil.loadFromFile(_properties, file);
@ -349,7 +352,7 @@ public class AgentShell implements IAgentShell, Daemon {
@Override
public void init(DaemonContext dc) throws DaemonInitException {
s_logger.debug("Initializing AgentShell from JSVC");
LOGGER.debug("Initializing AgentShell from JSVC");
try {
init(dc.getArguments());
} catch (ConfigurationException ex) {
@ -369,11 +372,11 @@ public class AgentShell implements IAgentShell, Daemon {
}
if (null != file) {
DOMConfigurator.configureAndWatch(file.getAbsolutePath());
Configurator.initialize(null, file.getAbsolutePath());
s_logger.info("Agent started");
LOGGER.info("Agent started");
} else {
s_logger.error("Could not start the Agent because the absolute path of the \"log4j-cloud.xml\" file cannot be determined.");
LOGGER.error("Could not start the Agent because the absolute path of the \"log4j-cloud.xml\" file cannot be determined.");
}
final Class<?> c = this.getClass();
@ -381,19 +384,19 @@ public class AgentShell implements IAgentShell, Daemon {
if (_version == null) {
throw new CloudRuntimeException("Unable to find the implementation version of this agent");
}
s_logger.info("Implementation Version is " + _version);
LOGGER.info("Implementation Version is {}", _version);
loadProperties();
parseCommand(args);
if (s_logger.isDebugEnabled()) {
if (LOGGER.isDebugEnabled()) {
List<String> properties = Collections.list((Enumeration<String>)_properties.propertyNames());
for (String property : properties) {
s_logger.debug("Found property: " + property);
LOGGER.debug("Found property: {}", property);
}
}
s_logger.info("Defaulting to using properties file for storage");
LOGGER.info("Defaulting to using properties file for storage");
_storage = new PropertiesStorage();
_storage.configure("Storage", new HashMap<String, Object>());
@ -403,14 +406,16 @@ public class AgentShell implements IAgentShell, Daemon {
_properties.put(cmdLineProp.getKey(), cmdLineProp.getValue());
}
s_logger.info("Defaulting to the constant time backoff algorithm");
LOGGER.info("Defaulting to the constant time backoff algorithm");
_backoff = new ConstantTimeBackoff();
_backoff.configure("ConstantTimeBackoff", new HashMap<String, Object>());
Map<String, Object> map = new HashMap<>();
map.put("seconds", _properties.getProperty("backoff.seconds"));
_backoff.configure("ConstantTimeBackoff", map);
}
private void launchAgent() throws ConfigurationException {
String resourceClassNames = AgentPropertiesFileHandler.getPropertyValue(AgentProperties.RESOURCE);
s_logger.trace("resource=" + resourceClassNames);
LOGGER.trace("resource={}", resourceClassNames);
if (resourceClassNames != null) {
launchAgentFromClassInfo(resourceClassNames);
return;
@ -440,10 +445,10 @@ public class AgentShell implements IAgentShell, Daemon {
private void launchAgentFromTypeInfo() throws ConfigurationException {
String typeInfo = getProperty(null, "type");
if (typeInfo == null) {
s_logger.error("Unable to retrieve the type");
LOGGER.error("Unable to retrieve the type");
throw new ConfigurationException("Unable to retrieve the type of this agent.");
}
s_logger.trace("Launching agent based on type=" + typeInfo);
LOGGER.trace("Launching agent based on type={}", typeInfo);
}
public void launchNewAgent(ServerResource resource) throws ConfigurationException {
@ -454,6 +459,11 @@ public class AgentShell implements IAgentShell, Daemon {
agent.start();
}
@Override
public Integer getSslHandshakeTimeout() {
return AgentPropertiesFileHandler.getPropertyValue(AgentProperties.SSL_HANDSHAKE_TIMEOUT);
}
public synchronized int getNextAgentId() {
return _nextAgentId++;
}
@ -477,17 +487,17 @@ public class AgentShell implements IAgentShell, Daemon {
}
if (ipv6disabled) {
s_logger.info("Preferring IPv4 address family for agent connection");
LOGGER.info("Preferring IPv4 address family for agent connection");
System.setProperty("java.net.preferIPv4Stack", "true");
if (ipv6prefer) {
s_logger.info("ipv6prefer is set to true, but ipv6disabled is false. Not preferring IPv6 for agent connection");
LOGGER.info("ipv6prefer is set to true, but ipv6disabled is false. Not preferring IPv6 for agent connection");
}
} else {
if (ipv6prefer) {
s_logger.info("Preferring IPv6 address family for agent connection");
LOGGER.info("Preferring IPv6 address family for agent connection");
System.setProperty("java.net.preferIPv6Addresses", "true");
} else {
s_logger.info("Using default Java settings for IPv6 preference for agent connection");
LOGGER.info("Using default Java settings for IPv6 preference for agent connection");
}
}
@ -505,7 +515,7 @@ public class AgentShell implements IAgentShell, Daemon {
String pidDir = getProperty(null, "piddir");
final String run = "agent." + instance + "pid";
s_logger.debug("Checking to see if " + run + " exists.");
LOGGER.debug("Checking to see if {} exists.", run);
ProcessUtil.pidCheck(pidDir, run);
launchAgent();
@ -514,11 +524,11 @@ public class AgentShell implements IAgentShell, Daemon {
while (!_exit)
Thread.sleep(1000);
} catch (InterruptedException e) {
s_logger.debug("[ignored] AgentShell was interrupted.");
LOGGER.debug("[ignored] AgentShell was interrupted.");
}
} catch (final Exception e) {
s_logger.error("Unable to start agent: ", e);
LOGGER.error("Unable to start agent: ", e);
System.exit(ExitStatus.Error.value());
}
}
@ -535,7 +545,7 @@ public class AgentShell implements IAgentShell, Daemon {
public static void main(String[] args) {
try {
s_logger.debug("Initializing AgentShell from main");
LOGGER.debug("Initializing AgentShell from main");
AgentShell shell = new AgentShell();
shell.init(args);
shell.start();

View File

@ -70,4 +70,6 @@ public interface IAgentShell {
String getConnectedHost();
void launchNewAgent(ServerResource resource) throws ConfigurationException;
Integer getSslHandshakeTimeout();
}

View File

@ -24,7 +24,8 @@ import java.util.Map;
import java.util.Properties;
import org.apache.commons.io.IOUtils;
import org.apache.log4j.Logger;
import org.apache.logging.log4j.Logger;
import org.apache.logging.log4j.LogManager;
import com.cloud.agent.dao.StorageComponent;
import com.cloud.utils.PropertiesUtil;
@ -36,7 +37,7 @@ import com.cloud.utils.PropertiesUtil;
* path to the properties _file | String | db/db.properties || * }
**/
public class PropertiesStorage implements StorageComponent {
private static final Logger s_logger = Logger.getLogger(PropertiesStorage.class);
protected Logger logger = LogManager.getLogger(getClass());
Properties _properties = new Properties();
File _file;
String _name;
@ -49,7 +50,7 @@ public class PropertiesStorage implements StorageComponent {
@Override
public synchronized void persist(String key, String value) {
if (!loadFromFile(_file)) {
s_logger.error("Failed to load changes and then write to them");
logger.error("Failed to load changes and then write to them");
}
_properties.setProperty(key, value);
FileOutputStream output = null;
@ -59,7 +60,7 @@ public class PropertiesStorage implements StorageComponent {
output.flush();
output.close();
} catch (IOException e) {
s_logger.error("Uh-oh: ", e);
logger.error("Uh-oh: ", e);
} finally {
IOUtils.closeQuietly(output);
}
@ -70,10 +71,10 @@ public class PropertiesStorage implements StorageComponent {
PropertiesUtil.loadFromFile(_properties, file);
_file = file;
} catch (FileNotFoundException e) {
s_logger.error("How did we get here? ", e);
logger.error("How did we get here? ", e);
return false;
} catch (IOException e) {
s_logger.error("IOException: ", e);
logger.error("IOException: ", e);
return false;
}
return true;
@ -92,14 +93,12 @@ public class PropertiesStorage implements StorageComponent {
file = new File(path);
try {
if (!file.createNewFile()) {
s_logger.error(String.format("Unable to create _file: %s", file.getAbsolutePath()));
logger.error("Unable to create _file: {}", file.getAbsolutePath());
return false;
}
} catch (IOException e) {
s_logger.error(String.format("Unable to create file: %s", file.getAbsolutePath()));
if (s_logger.isDebugEnabled()) {
s_logger.debug(String.format("IOException while trying to create file: %s", file.getAbsolutePath()), e);
}
logger.error("Unable to create file: {}", file.getAbsolutePath());
logger.debug("IOException while trying to create file: {}", file.getAbsolutePath(), e);
return false;
}
}

View File

@ -25,12 +25,13 @@ import java.util.concurrent.LinkedBlockingQueue;
import java.util.concurrent.ThreadPoolExecutor;
import java.util.concurrent.TimeUnit;
import org.apache.log4j.Logger;
import com.cloud.utils.concurrency.NamedThreadFactory;
import org.apache.logging.log4j.Logger;
import org.apache.logging.log4j.LogManager;
public class DhcpProtocolParserServer extends Thread {
private static final Logger s_logger = Logger.getLogger(DhcpProtocolParserServer.class);;
protected Logger logger = LogManager.getLogger(DhcpProtocolParserServer.class);;
protected ExecutorService _executor;
private int dhcpServerPort = 67;
private int bufferSize = 300;
@ -54,7 +55,7 @@ public class DhcpProtocolParserServer extends Thread {
dhcpSocket.receive(dgp);
}
} catch (IOException e) {
s_logger.debug(e.getMessage());
logger.debug(e.getMessage());
}
}
}

View File

@ -22,14 +22,15 @@ import java.util.Map;
import java.util.Random;
import java.util.Set;
import org.apache.log4j.Logger;
import org.apache.logging.log4j.Logger;
import org.apache.logging.log4j.LogManager;
import com.cloud.agent.api.to.VirtualMachineTO;
import com.cloud.utils.exception.CloudRuntimeException;
import com.cloud.vm.VirtualMachine.State;
public class MockVmMgr implements VmMgr {
private static final Logger s_logger = Logger.getLogger(MockVmMgr.class);
protected Logger logger = LogManager.getLogger(getClass());
private static final int DEFAULT_DOM0_MEM_MB = 128;
private static final Random randSeed = new Random();
@ -56,14 +57,14 @@ public class MockVmMgr implements VmMgr {
public String startVM(String vmName, String vnetId, String gateway, String dns, String privateIP, String privateMac, String privateMask, String publicIP,
String publicMac, String publicMask, int cpuCount, int cpuUtilization, long ramSize, String localPath, String vncPassword) {
if (s_logger.isInfoEnabled()) {
if (logger.isInfoEnabled()) {
StringBuffer sb = new StringBuffer();
sb.append("Start VM. name: " + vmName + ", vnet: " + vnetId + ", dns: " + dns);
sb.append(", privateIP: " + privateIP + ", privateMac: " + privateMac + ", privateMask: " + privateMask);
sb.append(", publicIP: " + publicIP + ", publicMac: " + publicMac + ", publicMask: " + publicMask);
sb.append(", cpu count: " + cpuCount + ", cpuUtilization: " + cpuUtilization + ", ram : " + ramSize);
sb.append(", localPath: " + localPath);
s_logger.info(sb.toString());
logger.info(sb.toString());
}
synchronized (this) {
@ -86,8 +87,7 @@ public class MockVmMgr implements VmMgr {
@Override
public String stopVM(String vmName, boolean force) {
if (s_logger.isInfoEnabled())
s_logger.info("Stop VM. name: " + vmName);
logger.info("Stop VM. name: {}", vmName);
synchronized (this) {
MockVm vm = vms.get(vmName);
@ -102,8 +102,7 @@ public class MockVmMgr implements VmMgr {
@Override
public String rebootVM(String vmName) {
if (s_logger.isInfoEnabled())
s_logger.info("Reboot VM. name: " + vmName);
logger.info("Reboot VM. name: {}", vmName);
synchronized (this) {
MockVm vm = vms.get(vmName);
@ -115,8 +114,7 @@ public class MockVmMgr implements VmMgr {
@Override
public boolean migrate(String vmName, String params) {
if (s_logger.isInfoEnabled())
s_logger.info("Migrate VM. name: " + vmName);
logger.info("Migrate VM. name: {}", vmName);
synchronized (this) {
MockVm vm = vms.get(vmName);
@ -258,13 +256,13 @@ public class MockVmMgr implements VmMgr {
vm = vms.get(vmName);
if (vm == null) {
if (ramSize > getHostFreeMemory()) {
s_logger.debug("host is out of memory");
logger.debug("host is out of memory");
throw new CloudRuntimeException("Host is out of Memory");
}
int vncPort = allocVncPort();
if (vncPort < 0) {
s_logger.debug("Unable to allocate VNC port");
logger.debug("Unable to allocate VNC port");
throw new CloudRuntimeException("Unable to allocate vnc port");
}

View File

@ -516,6 +516,7 @@ public class AgentProperties{
/**
* The model of Watchdog timer to present to the Guest.<br>
* For all models refer to the libvirt documentation.<br>
* PLEASE NOTE: to disable the watchdogs definitions, use value: none
* Data type: String.<br>
* Default value: <code>i6300esb</code>
*/
@ -803,12 +804,26 @@ public class AgentProperties{
*/
public static final Property<String> KEYSTORE_PASSPHRASE = new Property<>(KeyStoreUtils.KS_PASSPHRASE_PROPERTY, null, String.class);
/**
* Implicit host tags
* Data type: String.<br>
* Default value: <code>null</code>
*/
public static final Property<String> HOST_TAGS = new Property<>("host.tags", null, String.class);
/**
* Timeout for SSL handshake in seconds
* Data type: Integer.<br>
* Default value: <code>null</code>
*/
public static final Property<Integer> SSL_HANDSHAKE_TIMEOUT = new Property<>("ssl.handshake.timeout", null, Integer.class);
public static class Property <T>{
private String name;
private T defaultValue;
private Class<T> typeClass;
Property(String name, T value) {
public Property(String name, T value) {
init(name, value);
}

View File

@ -22,7 +22,8 @@ import org.apache.commons.beanutils.ConvertUtils;
import org.apache.commons.beanutils.converters.IntegerConverter;
import org.apache.commons.beanutils.converters.LongConverter;
import org.apache.commons.lang3.StringUtils;
import org.apache.log4j.Logger;
import org.apache.logging.log4j.Logger;
import org.apache.logging.log4j.LogManager;
/**
* This class provides a facility to read the agent's properties file and get
@ -31,7 +32,7 @@ import org.apache.log4j.Logger;
*/
public class AgentPropertiesFileHandler {
private static final Logger logger = Logger.getLogger(AgentPropertiesFileHandler.class);
protected static Logger LOGGER = LogManager.getLogger(AgentPropertiesFileHandler.class);
/**
* This method reads the property in the agent.properties file.
@ -47,7 +48,7 @@ public class AgentPropertiesFileHandler {
File agentPropertiesFile = PropertiesUtil.findConfigFile(KeyStoreUtils.AGENT_PROPSFILE);
if (agentPropertiesFile == null) {
logger.debug(String.format("File [%s] was not found, we will use default defined values. Property [%s]: [%s].", KeyStoreUtils.AGENT_PROPSFILE, name, defaultValue));
LOGGER.debug("File [{}] was not found, we will use default defined values. Property [{}]: [{}].", KeyStoreUtils.AGENT_PROPSFILE, name, defaultValue);
return defaultValue;
}
@ -55,7 +56,7 @@ public class AgentPropertiesFileHandler {
try {
String configValue = PropertiesUtil.loadFromFile(agentPropertiesFile).getProperty(name);
if (StringUtils.isBlank(configValue)) {
logger.debug(String.format("Property [%s] has empty or null value. Using default value [%s].", name, defaultValue));
LOGGER.debug("Property [{}] has empty or null value. Using default value [{}].", name, defaultValue);
return defaultValue;
}
@ -67,11 +68,11 @@ public class AgentPropertiesFileHandler {
ConvertUtils.register(new LongConverter(defaultValue), Long.class);
}
logger.debug(String.format("Property [%s] was altered. Now using the value [%s].", name, configValue));
LOGGER.debug("Property [{}] was altered. Now using the value [{}].", name, configValue);
return (T)ConvertUtils.convert(configValue, property.getTypeClass());
} catch (IOException ex) {
logger.debug(String.format("Failed to get property [%s]. Using default value [%s].", name, defaultValue), ex);
LOGGER.debug("Failed to get property [{}]. Using default value [{}].", name, defaultValue, ex);
}
return defaultValue;

View File

@ -34,7 +34,6 @@ import javax.naming.ConfigurationException;
import com.cloud.agent.api.proxy.AllowConsoleAccessCommand;
import org.apache.cloudstack.managed.context.ManagedContextRunnable;
import org.apache.log4j.Logger;
import com.cloud.agent.Agent.ExitStatus;
import com.cloud.agent.api.AgentControlAnswer;
@ -81,7 +80,6 @@ import com.google.gson.Gson;
*
*/
public class ConsoleProxyResource extends ServerResourceBase implements ServerResource {
static final Logger s_logger = Logger.getLogger(ConsoleProxyResource.class);
private final Properties properties = new Properties();
private Thread consoleProxyMain = null;
@ -101,7 +99,7 @@ public class ConsoleProxyResource extends ServerResourceBase implements ServerRe
} else if (cmd instanceof WatchConsoleProxyLoadCommand) {
return execute((WatchConsoleProxyLoadCommand)cmd);
} else if (cmd instanceof ReadyCommand) {
s_logger.info("Receive ReadyCommand, response with ReadyAnswer");
logger.info("Receive ReadyCommand, response with ReadyAnswer");
return new ReadyAnswer((ReadyCommand)cmd);
} else if (cmd instanceof CheckHealthCommand) {
return new CheckHealthAnswer((CheckHealthCommand)cmd, true);
@ -123,13 +121,13 @@ public class ConsoleProxyResource extends ServerResourceBase implements ServerRe
return new Answer(cmd);
} catch (SecurityException | NoSuchMethodException | ClassNotFoundException | InvocationTargetException | IllegalAccessException e) {
String errorMsg = "Unable to add allowed session due to: " + e.getMessage();
s_logger.error(errorMsg, e);
logger.error(errorMsg, e);
return new Answer(cmd, false, errorMsg);
}
}
private Answer execute(StartConsoleProxyAgentHttpHandlerCommand cmd) {
s_logger.info("Invoke launchConsoleProxy() in responding to StartConsoleProxyAgentHttpHandlerCommand");
logger.info("Invoke launchConsoleProxy() in responding to StartConsoleProxyAgentHttpHandlerCommand");
launchConsoleProxy(cmd.getKeystoreBits(), cmd.getKeystorePassword(), cmd.getEncryptorPassword(), cmd.isSourceIpCheckEnabled());
return new Answer(cmd);
}
@ -140,7 +138,7 @@ public class ConsoleProxyResource extends ServerResourceBase implements ServerRe
{
out.write("0");
} catch (IOException e) {
s_logger.warn("Unable to disable rp_filter");
logger.warn("Unable to disable rp_filter");
}
}
@ -177,12 +175,12 @@ public class ConsoleProxyResource extends ServerResourceBase implements ServerRe
try {
is.close();
} catch (final IOException e) {
s_logger.warn("Exception when closing , console proxy address : " + proxyManagementIp);
logger.warn("Exception when closing , console proxy address : {}", proxyManagementIp);
success = false;
}
}
} catch (final IOException e) {
s_logger.warn("Unable to open console proxy command port url, console proxy address : " + proxyManagementIp);
logger.warn("Unable to open console proxy command port url, console proxy address : {}", proxyManagementIp);
success = false;
}
@ -227,14 +225,14 @@ public class ConsoleProxyResource extends ServerResourceBase implements ServerRe
if (eth1Ip != null) {
params.put("private.network.device", "eth1");
} else {
s_logger.info("eth1ip parameter has not been configured, assuming that we are not inside a system vm");
logger.info("eth1ip parameter has not been configured, assuming that we are not inside a system vm");
}
String eth2ip = (String)params.get("eth2ip");
if (eth2ip != null) {
params.put("public.network.device", "eth2");
} else {
s_logger.info("eth2ip parameter is not found, assuming that we are not inside a system vm");
logger.info("eth2ip parameter is not found, assuming that we are not inside a system vm");
}
super.configure(name, params);
@ -262,7 +260,7 @@ public class ConsoleProxyResource extends ServerResourceBase implements ServerRe
}
String internalDns1 = (String) params.get("internaldns1");
if (internalDns1 == null) {
s_logger.warn("No DNS entry found during configuration of ConsoleProxy");
logger.warn("No DNS entry found during configuration of ConsoleProxy");
} else {
addRouteToInternalIpOrCidr(localGateway, eth1Ip, eth1Mask, internalDns1);
}
@ -280,20 +278,19 @@ public class ConsoleProxyResource extends ServerResourceBase implements ServerRe
disableRpFilter();
}
if (s_logger.isInfoEnabled())
s_logger.info("Receive proxyVmId in ConsoleProxyResource configuration as " + proxyVmId);
logger.info("Receive proxyVmId in ConsoleProxyResource configuration as {}", proxyVmId);
return true;
}
private void addRouteToInternalIpOrCidr(String localgw, String eth1ip, String eth1mask, String destIpOrCidr) {
s_logger.debug("addRouteToInternalIp: localgw=" + localgw + ", eth1ip=" + eth1ip + ", eth1mask=" + eth1mask + ",destIp=" + destIpOrCidr);
logger.debug("addRouteToInternalIp: localgw={}, eth1ip={}, eth1mask={}, destIp={}", localgw, eth1ip, eth1mask, destIpOrCidr);
if (destIpOrCidr == null) {
s_logger.debug("addRouteToInternalIp: destIp is null");
logger.debug("addRouteToInternalIp: destIp is null");
return;
}
if (!NetUtils.isValidIp4(destIpOrCidr) && !NetUtils.isValidIp4Cidr(destIpOrCidr)) {
s_logger.warn(" destIp is not a valid ip address or cidr destIp=" + destIpOrCidr);
logger.warn(" destIp is not a valid ip address or cidr destIp={}", destIpOrCidr);
return;
}
boolean inSameSubnet = false;
@ -301,27 +298,27 @@ public class ConsoleProxyResource extends ServerResourceBase implements ServerRe
if (eth1ip != null && eth1mask != null) {
inSameSubnet = NetUtils.sameSubnet(eth1ip, destIpOrCidr, eth1mask);
} else {
s_logger.warn("addRouteToInternalIp: unable to determine same subnet: eth1ip=" + eth1ip + ", dest ip=" + destIpOrCidr + ", eth1mask=" + eth1mask);
logger.warn("addRouteToInternalIp: unable to determine same subnet: eth1ip={}, dest ip={}, eth1mask={}", eth1ip, destIpOrCidr, eth1mask);
}
} else {
inSameSubnet = NetUtils.isNetworkAWithinNetworkB(destIpOrCidr, NetUtils.ipAndNetMaskToCidr(eth1ip, eth1mask));
}
if (inSameSubnet) {
s_logger.debug("addRouteToInternalIp: dest ip " + destIpOrCidr + " is in the same subnet as eth1 ip " + eth1ip);
logger.debug("addRouteToInternalIp: dest ip {} is in the same subnet as eth1 ip {}", destIpOrCidr, eth1ip);
return;
}
Script command = new Script("/bin/bash", s_logger);
Script command = new Script("/bin/bash", logger);
command.add("-c");
command.add("ip route delete " + destIpOrCidr);
command.execute();
command = new Script("/bin/bash", s_logger);
command = new Script("/bin/bash", logger);
command.add("-c");
command.add("ip route add " + destIpOrCidr + " via " + localgw);
String result = command.execute();
if (result != null) {
s_logger.warn("Error in configuring route to internal ip err=" + result);
logger.warn("Error in configuring route to internal ip err={}", result);
} else {
s_logger.debug("addRouteToInternalIp: added route to internal ip=" + destIpOrCidr + " via " + localgw);
logger.debug("addRouteToInternalIp: added route to internal ip={} via {}", destIpOrCidr, localgw);
}
}
@ -332,36 +329,36 @@ public class ConsoleProxyResource extends ServerResourceBase implements ServerRe
private void launchConsoleProxy(final byte[] ksBits, final String ksPassword, final String encryptorPassword, final Boolean isSourceIpCheckEnabled) {
final Object resource = this;
s_logger.info("Building class loader for com.cloud.consoleproxy.ConsoleProxy");
logger.info("Building class loader for com.cloud.consoleproxy.ConsoleProxy");
if (consoleProxyMain == null) {
s_logger.info("Running com.cloud.consoleproxy.ConsoleProxy with encryptor password=" + encryptorPassword);
logger.info("Running com.cloud.consoleproxy.ConsoleProxy with encryptor password={}", encryptorPassword);
consoleProxyMain = new Thread(new ManagedContextRunnable() {
@Override
protected void runInContext() {
try {
Class<?> consoleProxyClazz = Class.forName("com.cloud.consoleproxy.ConsoleProxy");
try {
s_logger.info("Invoke startWithContext()");
logger.info("Invoke startWithContext()");
Method method = consoleProxyClazz.getMethod("startWithContext", Properties.class, Object.class, byte[].class, String.class, String.class, Boolean.class);
method.invoke(null, properties, resource, ksBits, ksPassword, encryptorPassword, isSourceIpCheckEnabled);
} catch (SecurityException e) {
s_logger.error("Unable to launch console proxy due to SecurityException", e);
logger.error("Unable to launch console proxy due to SecurityException", e);
System.exit(ExitStatus.Error.value());
} catch (NoSuchMethodException e) {
s_logger.error("Unable to launch console proxy due to NoSuchMethodException", e);
logger.error("Unable to launch console proxy due to NoSuchMethodException", e);
System.exit(ExitStatus.Error.value());
} catch (IllegalArgumentException e) {
s_logger.error("Unable to launch console proxy due to IllegalArgumentException", e);
logger.error("Unable to launch console proxy due to IllegalArgumentException", e);
System.exit(ExitStatus.Error.value());
} catch (IllegalAccessException e) {
s_logger.error("Unable to launch console proxy due to IllegalAccessException", e);
logger.error("Unable to launch console proxy due to IllegalAccessException", e);
System.exit(ExitStatus.Error.value());
} catch (InvocationTargetException e) {
s_logger.error("Unable to launch console proxy due to InvocationTargetException " + e.getTargetException().toString(), e);
logger.error("Unable to launch console proxy due to InvocationTargetException {}", e.getTargetException().toString(), e);
System.exit(ExitStatus.Error.value());
}
} catch (final ClassNotFoundException e) {
s_logger.error("Unable to launch console proxy due to ClassNotFoundException");
logger.error("Unable to launch console proxy due to ClassNotFoundException");
System.exit(ExitStatus.Error.value());
}
}
@ -369,7 +366,7 @@ public class ConsoleProxyResource extends ServerResourceBase implements ServerRe
consoleProxyMain.setDaemon(true);
consoleProxyMain.start();
} else {
s_logger.info("com.cloud.consoleproxy.ConsoleProxy is already running");
logger.info("com.cloud.consoleproxy.ConsoleProxy is already running");
try {
Class<?> consoleProxyClazz = Class.forName("com.cloud.consoleproxy.ConsoleProxy");
@ -378,22 +375,22 @@ public class ConsoleProxyResource extends ServerResourceBase implements ServerRe
methodSetup = consoleProxyClazz.getMethod("setIsSourceIpCheckEnabled", Boolean.class);
methodSetup.invoke(null, isSourceIpCheckEnabled);
} catch (SecurityException e) {
s_logger.error("Unable to launch console proxy due to SecurityException", e);
logger.error("Unable to launch console proxy due to SecurityException", e);
System.exit(ExitStatus.Error.value());
} catch (NoSuchMethodException e) {
s_logger.error("Unable to launch console proxy due to NoSuchMethodException", e);
logger.error("Unable to launch console proxy due to NoSuchMethodException", e);
System.exit(ExitStatus.Error.value());
} catch (IllegalArgumentException e) {
s_logger.error("Unable to launch console proxy due to IllegalArgumentException", e);
logger.error("Unable to launch console proxy due to IllegalArgumentException", e);
System.exit(ExitStatus.Error.value());
} catch (IllegalAccessException e) {
s_logger.error("Unable to launch console proxy due to IllegalAccessException", e);
logger.error("Unable to launch console proxy due to IllegalAccessException", e);
System.exit(ExitStatus.Error.value());
} catch (InvocationTargetException e) {
s_logger.error("Unable to launch console proxy due to InvocationTargetException " + e.getTargetException().toString(), e);
logger.error("Unable to launch console proxy due to InvocationTargetException " + e.getTargetException().toString(), e);
System.exit(ExitStatus.Error.value());
} catch (final ClassNotFoundException e) {
s_logger.error("Unable to launch console proxy due to ClassNotFoundException", e);
logger.error("Unable to launch console proxy due to ClassNotFoundException", e);
System.exit(ExitStatus.Error.value());
}
}
@ -420,10 +417,10 @@ public class ConsoleProxyResource extends ServerResourceBase implements ServerRe
result.setTunnelUrl(authAnswer.getTunnelUrl());
result.setTunnelSession(authAnswer.getTunnelSession());
} else {
s_logger.error("Authentication failed for vm: " + vmId + " with sid: " + sid);
logger.error("Authentication failed for vm: {} with sid: {}", vmId, sid);
}
} catch (AgentControlChannelException e) {
s_logger.error("Unable to send out console access authentication request due to " + e.getMessage(), e);
logger.error("Unable to send out console access authentication request due to {}", e.getMessage(), e);
}
return new Gson().toJson(result);
@ -433,18 +430,15 @@ public class ConsoleProxyResource extends ServerResourceBase implements ServerRe
ConsoleProxyLoadReportCommand cmd = new ConsoleProxyLoadReportCommand(proxyVmId, gsonLoadInfo);
try {
getAgentControl().postRequest(cmd);
if (s_logger.isDebugEnabled())
s_logger.debug("Report proxy load info, proxy : " + proxyVmId + ", load: " + gsonLoadInfo);
logger.debug("Report proxy load info, proxy : {}, load: {}", proxyVmId, gsonLoadInfo);
} catch (AgentControlChannelException e) {
s_logger.error("Unable to send out load info due to " + e.getMessage(), e);
logger.error("Unable to send out load info due to {}", e.getMessage(), e);
}
}
public void ensureRoute(String address) {
if (localGateway != null) {
if (s_logger.isDebugEnabled())
s_logger.debug("Ensure route for " + address + " via " + localGateway);
logger.debug("Ensure route for {} via {}", address, localGateway);
// this method won't be called in high frequency, serialize access
// to script execution
@ -452,7 +446,7 @@ public class ConsoleProxyResource extends ServerResourceBase implements ServerRe
try {
addRouteToInternalIpOrCidr(localGateway, eth1Ip, eth1Mask, address);
} catch (Throwable e) {
s_logger.warn("Unexpected exception while adding internal route to " + address, e);
logger.warn("Unexpected exception while adding internal route to {}", address, e);
}
}
}

View File

@ -350,4 +350,23 @@ public class AgentShellTest {
Mockito.verify(agentShellSpy).setHosts(expected);
}
@Test
public void updateAndGetConnectedHost() {
String expected = "test";
AgentShell shell = new AgentShell();
shell.setHosts("test");
shell.getNextHost();
shell.updateConnectedHost();
Assert.assertEquals(expected, shell.getConnectedHost());
}
@Test
public void testGetSslHandshakeTimeout() {
Integer expected = 1;
agentPropertiesFileHandlerMocked.when(() -> AgentPropertiesFileHandler.getPropertyValue(Mockito.eq(AgentProperties.SSL_HANDSHAKE_TIMEOUT))).thenReturn(expected);
Assert.assertEquals(expected, agentShellSpy.getSslHandshakeTimeout());
}
}

View File

@ -0,0 +1,257 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package com.cloud.agent;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertSame;
import static org.junit.Assert.assertTrue;
import static org.mockito.Mockito.any;
import static org.mockito.Mockito.doReturn;
import static org.mockito.Mockito.doThrow;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.eq;
import static org.mockito.Mockito.times;
import static org.mockito.Mockito.verify;
import static org.mockito.Mockito.when;
import java.io.IOException;
import java.net.InetSocketAddress;
import javax.naming.ConfigurationException;
import org.apache.logging.log4j.Logger;
import org.junit.Before;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.mockito.junit.MockitoJUnitRunner;
import org.springframework.test.util.ReflectionTestUtils;
import com.cloud.resource.ServerResource;
import com.cloud.utils.backoff.impl.ConstantTimeBackoff;
import com.cloud.utils.nio.Link;
import com.cloud.utils.nio.NioConnection;
@RunWith(MockitoJUnitRunner.class)
public class AgentTest {
Agent agent;
private AgentShell shell;
private ServerResource serverResource;
private Logger logger;
@Before
public void setUp() throws ConfigurationException {
shell = mock(AgentShell.class);
serverResource = mock(ServerResource.class);
doReturn(true).when(serverResource).configure(any(), any());
doReturn(1).when(shell).getWorkers();
doReturn(1).when(shell).getPingRetries();
agent = new Agent(shell, 1, serverResource);
logger = mock(Logger.class);
ReflectionTestUtils.setField(agent, "logger", logger);
}
@Test
public void testGetLinkLogNullLinkReturnsEmptyString() {
Link link = null;
String result = agent.getLinkLog(link);
assertEquals("", result);
}
@Test
public void testGetLinkLogLinkWithTraceEnabledReturnsLinkLogWithHashCode() {
Link link = mock(Link.class);
InetSocketAddress socketAddress = new InetSocketAddress("192.168.1.100", 1111);
when(link.getSocketAddress()).thenReturn(socketAddress);
when(logger.isTraceEnabled()).thenReturn(true);
String result = agent.getLinkLog(link);
System.out.println(result);
assertTrue(result.startsWith(System.identityHashCode(link) + "-"));
assertTrue(result.contains("192.168.1.100"));
}
@Test
public void testGetAgentNameWhenServerResourceIsNull() {
ReflectionTestUtils.setField(agent, "serverResource", null);
assertEquals("Agent", agent.getAgentName());
}
@Test
public void testGetAgentNameWhenAppendAgentNameIsTrue() {
when(serverResource.isAppendAgentNameToLogs()).thenReturn(true);
when(serverResource.getName()).thenReturn("TestAgent");
String agentName = agent.getAgentName();
assertEquals("TestAgent", agentName);
}
@Test
public void testGetAgentNameWhenAppendAgentNameIsFalse() {
when(serverResource.isAppendAgentNameToLogs()).thenReturn(false);
String agentName = agent.getAgentName();
assertEquals("Agent", agentName);
}
@Test
public void testAgentInitialization() {
Runtime.getRuntime().removeShutdownHook(agent.shutdownThread);
when(shell.getPingRetries()).thenReturn(3);
when(shell.getWorkers()).thenReturn(5);
agent.setupShutdownHookAndInitExecutors();
assertNotNull(agent.selfTaskExecutor);
assertNotNull(agent.outRequestHandler);
assertNotNull(agent.requestHandler);
}
@Test
public void testAgentShutdownHookAdded() {
Runtime.getRuntime().removeShutdownHook(agent.shutdownThread);
agent.setupShutdownHookAndInitExecutors();
verify(logger).trace("Adding shutdown hook");
}
@Test
public void testGetResourceGuidValidGuidAndResourceName() {
when(shell.getGuid()).thenReturn("12345");
String result = agent.getResourceGuid();
assertTrue(result.startsWith("12345-" + ServerResource.class.getSimpleName()));
}
@Test
public void testGetZoneReturnsValidZone() {
when(shell.getZone()).thenReturn("ZoneA");
String result = agent.getZone();
assertEquals("ZoneA", result);
}
@Test
public void testGetPodReturnsValidPod() {
when(shell.getPod()).thenReturn("PodA");
String result = agent.getPod();
assertEquals("PodA", result);
}
@Test
public void testSetLinkAssignsLink() {
Link mockLink = mock(Link.class);
agent.setLink(mockLink);
assertEquals(mockLink, agent.link);
}
@Test
public void testGetResourceReturnsServerResource() {
ServerResource mockResource = mock(ServerResource.class);
ReflectionTestUtils.setField(agent, "serverResource", mockResource);
ServerResource result = agent.getResource();
assertSame(mockResource, result);
}
@Test
public void testGetResourceName() {
String result = agent.getResourceName();
assertTrue(result.startsWith(ServerResource.class.getSimpleName()));
}
@Test
public void testUpdateLastPingResponseTimeUpdatesCurrentTime() {
long beforeUpdate = System.currentTimeMillis();
agent.updateLastPingResponseTime();
long updatedTime = agent.lastPingResponseTime.get();
assertTrue(updatedTime >= beforeUpdate);
assertTrue(updatedTime <= System.currentTimeMillis());
}
@Test
public void testGetNextSequenceIncrementsSequence() {
long initialSequence = agent.getNextSequence();
long nextSequence = agent.getNextSequence();
assertEquals(initialSequence + 1, nextSequence);
long thirdSequence = agent.getNextSequence();
assertEquals(nextSequence + 1, thirdSequence);
}
@Test
public void testRegisterControlListenerAddsListener() {
IAgentControlListener listener = mock(IAgentControlListener.class);
agent.registerControlListener(listener);
assertTrue(agent.controlListeners.contains(listener));
}
@Test
public void testUnregisterControlListenerRemovesListener() {
IAgentControlListener listener = mock(IAgentControlListener.class);
agent.registerControlListener(listener);
assertTrue(agent.controlListeners.contains(listener));
agent.unregisterControlListener(listener);
assertFalse(agent.controlListeners.contains(listener));
}
@Test
public void testCloseAndTerminateLinkLinkIsNullDoesNothing() {
agent.closeAndTerminateLink(null);
}
@Test
public void testCloseAndTerminateLinkValidLinkCallsCloseAndTerminate() {
Link mockLink = mock(Link.class);
agent.closeAndTerminateLink(mockLink);
verify(mockLink).close();
verify(mockLink).terminated();
}
@Test
public void testStopAndCleanupConnectionConnectionIsNullDoesNothing() {
agent.connection = null;
agent.stopAndCleanupConnection(false);
}
@Test
public void testStopAndCleanupConnectionValidConnectionNoWaitStopsAndCleansUp() throws IOException {
NioConnection mockConnection = mock(NioConnection.class);
agent.connection = mockConnection;
agent.stopAndCleanupConnection(false);
verify(mockConnection).stop();
verify(mockConnection).cleanUp();
}
@Test
public void testStopAndCleanupConnectionCleanupThrowsIOExceptionLogsWarning() throws IOException {
NioConnection mockConnection = mock(NioConnection.class);
agent.connection = mockConnection;
doThrow(new IOException("Cleanup failed")).when(mockConnection).cleanUp();
agent.stopAndCleanupConnection(false);
verify(mockConnection).stop();
verify(logger).warn(eq("Fail to clean up old connection. {}"), any(IOException.class));
}
@Test
public void testStopAndCleanupConnectionValidConnectionWaitForStopWaitsForStartupToStop() throws IOException {
NioConnection mockConnection = mock(NioConnection.class);
ConstantTimeBackoff mockBackoff = mock(ConstantTimeBackoff.class);
mockBackoff.setTimeToWait(0);
agent.connection = mockConnection;
when(shell.getBackoffAlgorithm()).thenReturn(mockBackoff);
when(mockConnection.isStartup()).thenReturn(true, true, false);
agent.stopAndCleanupConnection(true);
verify(mockConnection).stop();
verify(mockConnection).cleanUp();
verify(mockBackoff, times(3)).waitBeforeRetry();
}
}

View File

@ -24,7 +24,7 @@
<parent>
<groupId>org.apache.cloudstack</groupId>
<artifactId>cloudstack</artifactId>
<version>4.19.4.0-SNAPSHOT</version>
<version>4.20.3.0-SNAPSHOT</version>
</parent>
<dependencies>
<dependency>

View File

@ -20,6 +20,8 @@ import java.util.HashMap;
import java.util.Map;
import com.cloud.agent.api.LogLevel.Log4jLevel;
import org.apache.logging.log4j.Logger;
import org.apache.logging.log4j.LogManager;
/**
* implemented by classes that extends the Command class. Command specifies
@ -27,6 +29,8 @@ import com.cloud.agent.api.LogLevel.Log4jLevel;
*/
public abstract class Command {
protected transient Logger logger = LogManager.getLogger(getClass());
public static enum OnError {
Continue, Stop
}

View File

@ -23,8 +23,8 @@ import static java.lang.annotation.RetentionPolicy.RUNTIME;
import java.lang.annotation.Retention;
import java.lang.annotation.Target;
import org.apache.log4j.Level;
import org.apache.log4j.Logger;
import org.apache.logging.log4j.Level;
import org.apache.logging.log4j.Logger;
/**
*/
@ -41,7 +41,7 @@ public @interface LogLevel {
}
public boolean enabled(Logger logger) {
return _level != Level.OFF && logger.isEnabledFor(_level);
return _level != Level.OFF && logger.isEnabled(_level);
}
}

View File

@ -39,7 +39,8 @@ import org.apache.cloudstack.utils.security.ParserUtils;
import org.apache.commons.collections.CollectionUtils;
import org.apache.commons.lang.math.NumberUtils;
import org.apache.commons.lang3.StringUtils;
import org.apache.log4j.Logger;
import org.apache.logging.log4j.Logger;
import org.apache.logging.log4j.LogManager;
import org.w3c.dom.Document;
import org.w3c.dom.Element;
import org.w3c.dom.Node;
@ -63,7 +64,7 @@ import com.cloud.utils.compression.CompressionUtil;
import com.cloud.utils.exception.CloudRuntimeException;
public class OVFHelper {
private static final Logger s_logger = Logger.getLogger(OVFHelper.class);
protected Logger logger = LogManager.getLogger(getClass());
private final OVFParser ovfParser;
@ -118,7 +119,7 @@ public class OVFHelper {
boolean password = StringUtils.isNotBlank(passStr) && passStr.equalsIgnoreCase("true");
String label = ovfParser.getChildNodeValue(node, "Label");
String description = ovfParser.getChildNodeValue(node, "Description");
s_logger.debug("Creating OVF property index " + index + (category == null ? "" : " for category " + category)
logger.debug("Creating OVF property index " + index + (category == null ? "" : " for category " + category)
+ " with key = " + key);
return new OVFPropertyTO(key, type, value, qualifiers, userConfigurable,
label, description, password, index, category);
@ -151,7 +152,7 @@ public class OVFHelper {
if (child.getNodeName().equalsIgnoreCase("Category") ||
child.getNodeName().endsWith(":Category")) {
lastCategoryFound = child.getTextContent();
s_logger.info("Category found " + lastCategoryFound);
logger.info("Category found " + lastCategoryFound);
} else if (child.getNodeName().equalsIgnoreCase("Property") ||
child.getNodeName().endsWith(":Property")) {
OVFPropertyTO prop = createOVFPropertyFromNode(child, propertyIndex, lastCategoryFound);
@ -249,13 +250,13 @@ public class OVFHelper {
int diskNumber = 0;
for (OVFVirtualHardwareItemTO diskItem : diskHardwareItems) {
if (StringUtils.isBlank(diskItem.getHostResource())) {
s_logger.error("Missing disk information for hardware item " + diskItem.getElementName() + " " + diskItem.getInstanceId());
logger.error("Missing disk information for hardware item " + diskItem.getElementName() + " " + diskItem.getInstanceId());
continue;
}
String diskId = extractDiskIdFromDiskHostResource(diskItem.getHostResource());
OVFDisk diskDefinition = getDiskDefinitionFromDiskId(diskId, disks);
if (diskDefinition == null) {
s_logger.error("Missing disk definition for disk ID " + diskId);
logger.error("Missing disk definition for disk ID " + diskId);
}
OVFFile fileDefinition = getFileDefinitionFromDiskDefinition(diskDefinition._fileRef, files);
DatadiskTO datadiskTO = generateDiskTO(fileDefinition, diskDefinition, ovfParentPath, diskNumber, diskItem);
@ -277,7 +278,7 @@ public class OVFHelper {
if (StringUtils.isNotBlank(path)) {
File f = new File(path);
if (!f.exists() || f.isDirectory()) {
s_logger.error("One of the attached disk or iso does not exists " + path);
logger.error("One of the attached disk or iso does not exists " + path);
throw new InternalErrorException("One of the attached disk or iso as stated on OVF does not exists " + path);
}
}
@ -333,8 +334,8 @@ public class OVFHelper {
od._controller = getControllerType(items, od._diskId);
vd.add(od);
}
if (s_logger.isTraceEnabled()) {
s_logger.trace(String.format("found %d disk definitions",vd.size()));
if (logger.isTraceEnabled()) {
logger.trace(String.format("found %d disk definitions",vd.size()));
}
return vd;
}
@ -365,8 +366,8 @@ public class OVFHelper {
vf.add(of);
}
}
if (s_logger.isTraceEnabled()) {
s_logger.trace(String.format("found %d file definitions in %s",vf.size(), ovfFile.getPath()));
if (logger.isTraceEnabled()) {
logger.trace(String.format("found %d file definitions in %s",vf.size(), ovfFile.getPath()));
}
return vf;
}
@ -461,7 +462,7 @@ public class OVFHelper {
Element disk = (Element)disks.item(i);
String fileRef = ovfParser.getNodeAttribute(disk, "fileRef");
if (keepfile == null) {
s_logger.info("FATAL: OVA format error");
logger.info("FATAL: OVA format error");
} else if (keepfile.equals(fileRef)) {
keepdisk = ovfParser.getNodeAttribute(disk, "diskId");
} else {
@ -505,7 +506,7 @@ public class OVFHelper {
outfile.write(writer.toString());
outfile.close();
} catch (IOException | TransformerException e) {
s_logger.info("Unexpected exception caught while rewriting OVF:" + e.getMessage(), e);
logger.info("Unexpected exception caught while rewriting OVF:" + e.getMessage(), e);
throw new CloudRuntimeException(e);
}
}
@ -521,8 +522,8 @@ public class OVFHelper {
public List<OVFNetworkTO> getNetPrerequisitesFromDocument(Document doc) throws InternalErrorException {
if (doc == null) {
if (s_logger.isTraceEnabled()) {
s_logger.trace("no document to parse; returning no prerequisite networks");
if (logger.isTraceEnabled()) {
logger.trace("no document to parse; returning no prerequisite networks");
}
return Collections.emptyList();
}
@ -539,8 +540,8 @@ public class OVFHelper {
private void matchNicsToNets(Map<String, OVFNetworkTO> nets, Node systemElement) {
final DocumentTraversal traversal = (DocumentTraversal) systemElement;
final NodeIterator iterator = traversal.createNodeIterator(systemElement, NodeFilter.SHOW_ELEMENT, null, true);
if (s_logger.isTraceEnabled()) {
s_logger.trace(String.format("starting out with %d network-prerequisites, parsing hardware",nets.size()));
if (logger.isTraceEnabled()) {
logger.trace(String.format("starting out with %d network-prerequisites, parsing hardware",nets.size()));
}
int nicCount = 0;
for (Node n = iterator.nextNode(); n != null; n = iterator.nextNode()) {
@ -549,8 +550,8 @@ public class OVFHelper {
nicCount++;
String name = e.getTextContent(); // should be in our nets
if(nets.get(name) == null) {
if(s_logger.isInfoEnabled()) {
s_logger.info(String.format("found a nic definition without a network definition byname %s, adding it to the list.", name));
if(logger.isInfoEnabled()) {
logger.info(String.format("found a nic definition without a network definition byname %s, adding it to the list.", name));
}
nets.put(name, new OVFNetworkTO());
}
@ -560,8 +561,8 @@ public class OVFHelper {
}
}
}
if (s_logger.isTraceEnabled()) {
s_logger.trace(String.format("ending up with %d network-prerequisites, parsed %d nics", nets.size(), nicCount));
if (logger.isTraceEnabled()) {
logger.trace(String.format("ending up with %d network-prerequisites, parsed %d nics", nets.size(), nicCount));
}
}
@ -584,7 +585,7 @@ public class OVFHelper {
int addressOnParent = Integer.parseInt(addressOnParentStr);
nic.setAddressOnParent(addressOnParent);
} catch (NumberFormatException e) {
s_logger.warn("Encountered element of type \"AddressOnParent\", that could not be parse to an integer number: " + addressOnParentStr);
logger.warn("Encountered element of type \"AddressOnParent\", that could not be parse to an integer number: " + addressOnParentStr);
}
boolean automaticAllocation = StringUtils.isNotBlank(automaticAllocationStr) && Boolean.parseBoolean(automaticAllocationStr);
@ -596,7 +597,7 @@ public class OVFHelper {
int instanceId = Integer.parseInt(instanceIdStr);
nic.setInstanceID(instanceId);
} catch (NumberFormatException e) {
s_logger.warn("Encountered element of type \"InstanceID\", that could not be parse to an integer number: " + instanceIdStr);
logger.warn("Encountered element of type \"InstanceID\", that could not be parse to an integer number: " + instanceIdStr);
}
nic.setResourceSubType(resourceSubType);
@ -608,7 +609,7 @@ public class OVFHelper {
NodeList systemElements = ovfParser.getElementsFromOVFDocument(doc, "VirtualSystem");
if (systemElements.getLength() != 1) {
String msg = "found " + systemElements.getLength() + " system definitions in OVA, can only handle exactly one.";
s_logger.warn(msg);
logger.warn(msg);
throw new InternalErrorException(msg);
}
}
@ -629,8 +630,8 @@ public class OVFHelper {
nets.put(networkName,network);
}
if (s_logger.isTraceEnabled()) {
s_logger.trace(String.format("found %d networks in template", nets.size()));
if (logger.isTraceEnabled()) {
logger.trace(String.format("found %d networks in template", nets.size()));
}
return nets;
}
@ -770,7 +771,7 @@ public class OVFHelper {
try {
return Long.parseLong(value);
} catch (NumberFormatException e) {
s_logger.debug("Could not parse the value: " + value + ", ignoring it");
logger.debug("Could not parse the value: " + value + ", ignoring it");
}
}
return null;
@ -781,7 +782,7 @@ public class OVFHelper {
try {
return Integer.parseInt(value);
} catch (NumberFormatException e) {
s_logger.debug("Could not parse the value: " + value + ", ignoring it");
logger.debug("Could not parse the value: " + value + ", ignoring it");
}
}
return null;
@ -819,7 +820,7 @@ public class OVFHelper {
try {
compressedLicense = compressOVFEula(eulaLicense);
} catch (IOException e) {
s_logger.error("Could not compress the license for info " + eulaInfo);
logger.error("Could not compress the license for info " + eulaInfo);
continue;
}
OVFEulaSectionTO eula = new OVFEulaSectionTO(eulaInfo, compressedLicense, eulaIndex);

View File

@ -27,7 +27,8 @@ import javax.xml.parsers.ParserConfigurationException;
import org.apache.cloudstack.utils.security.ParserUtils;
import org.apache.commons.lang3.StringUtils;
import org.apache.log4j.Logger;
import org.apache.logging.log4j.Logger;
import org.apache.logging.log4j.LogManager;
import org.w3c.dom.Document;
import org.w3c.dom.Element;
import org.w3c.dom.Node;
@ -36,7 +37,7 @@ import org.xml.sax.InputSource;
import org.xml.sax.SAXException;
public class OVFParser {
private static final Logger s_logger = Logger.getLogger(OVFParser.class);
protected Logger logger = LogManager.getLogger(getClass());
private static final String DEFAULT_OVF_SCHEMA = "http://schemas.dmtf.org/ovf/envelope/1";
private static final String VMW_SCHEMA = "http://www.vmware.com/schema/ovf";
@ -53,7 +54,7 @@ public class OVFParser {
documentBuilderFactory.setNamespaceAware(true);
documentBuilder = documentBuilderFactory.newDocumentBuilder();
} catch (ParserConfigurationException e) {
s_logger.error("Cannot start the OVF parser: " + e.getMessage(), e);
logger.error("Cannot start the OVF parser: " + e.getMessage(), e);
}
}
@ -69,7 +70,7 @@ public class OVFParser {
try {
return documentBuilder.parse(new File(ovfFilePath));
} catch (SAXException | IOException e) {
s_logger.error("Error parsing " + ovfFilePath + " " + e.getMessage(), e);
logger.error("Error parsing " + ovfFilePath + " " + e.getMessage(), e);
return null;
}
}

View File

@ -0,0 +1,50 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package com.cloud.agent.api.to;
import org.apache.cloudstack.storage.object.Bucket;
public final class BucketTO {
private String name;
private String accessKey;
private String secretKey;
public BucketTO(Bucket bucket) {
this.name = bucket.getName();
this.accessKey = bucket.getAccessKey();
this.secretKey = bucket.getSecretKey();
}
public BucketTO(String name) {
this.name = name;
}
public String getName() {
return this.name;
}
public String getAccessKey() {
return this.accessKey;
}
public String getSecretKey() {
return this.secretKey;
}
}

View File

@ -155,9 +155,7 @@ public class FirewallRuleTO implements InternalIdentity {
rule.getIcmpType(),
rule.getIcmpCode());
this.trafficType = trafficType;
if (FirewallRule.Purpose.Ipv6Firewall.equals(purpose)) {
this.destCidrList = rule.getDestinationCidrList();
}
this.destCidrList = rule.getDestinationCidrList();
}
public FirewallRuleTO(FirewallRule rule, String srcVlanTag, String srcIp, FirewallRule.Purpose purpose, FirewallRule.TrafficType trafficType,

View File

@ -374,13 +374,15 @@ public class LoadBalancerTO {
public static class CounterTO implements Serializable {
private static final long serialVersionUID = 2L;
private final Long id;
private final String uuid;
private final String name;
private final Counter.Source source;
private final String value;
private final String provider;
public CounterTO(Long id, String name, Counter.Source source, String value, String provider) {
public CounterTO(Long id, String uuid, String name, Counter.Source source, String value, String provider) {
this.id = id;
this.uuid = uuid;
this.name = name;
this.source = source;
this.value = value;
@ -391,6 +393,10 @@ public class LoadBalancerTO {
return id;
}
public String getUuid() {
return uuid;
}
public String getName() {
return name;
}
@ -411,12 +417,14 @@ public class LoadBalancerTO {
public static class ConditionTO implements Serializable {
private static final long serialVersionUID = 2L;
private final Long id;
private final String uuid;
private final long threshold;
private final Condition.Operator relationalOperator;
private final CounterTO counter;
public ConditionTO(Long id, long threshold, Condition.Operator relationalOperator, CounterTO counter) {
public ConditionTO(Long id, String uuid, long threshold, Condition.Operator relationalOperator, CounterTO counter) {
this.id = id;
this.uuid = uuid;
this.threshold = threshold;
this.relationalOperator = relationalOperator;
this.counter = counter;
@ -426,6 +434,10 @@ public class LoadBalancerTO {
return id;
}
public String getUuid() {
return uuid;
}
public long getThreshold() {
return threshold;
}
@ -442,6 +454,7 @@ public class LoadBalancerTO {
public static class AutoScalePolicyTO implements Serializable {
private static final long serialVersionUID = 2L;
private final long id;
private final String uuid;
private final int duration;
private final int quietTime;
private final Date lastQuietTime;
@ -449,8 +462,9 @@ public class LoadBalancerTO {
boolean revoked;
private final List<ConditionTO> conditions;
public AutoScalePolicyTO(long id, int duration, int quietTime, Date lastQuietTime, AutoScalePolicy.Action action, List<ConditionTO> conditions, boolean revoked) {
public AutoScalePolicyTO(long id, String uuid, int duration, int quietTime, Date lastQuietTime, AutoScalePolicy.Action action, List<ConditionTO> conditions, boolean revoked) {
this.id = id;
this.uuid = uuid;
this.duration = duration;
this.quietTime = quietTime;
this.lastQuietTime = lastQuietTime;
@ -463,6 +477,10 @@ public class LoadBalancerTO {
return id;
}
public String getUuid() {
return uuid;
}
public int getDuration() {
return duration;
}

View File

@ -17,6 +17,7 @@
package com.cloud.agent.api.to;
import com.cloud.storage.DataStoreRole;
import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils;
public class NfsTO implements DataStoreTO {
@ -41,6 +42,13 @@ public class NfsTO implements DataStoreTO {
}
@Override
public String toString() {
return String.format("NfsTO %s",
ReflectionToStringBuilderUtils.reflectOnlySelectedFields(
this, "uuid", "_url", "_role", "nfsVersion"));
}
@Override
public String getUrl() {
return _url;

View File

@ -32,6 +32,9 @@ public class NicTO extends NetworkTO {
Map<NetworkOffering.Detail, String> details;
boolean dpdkEnabled;
Integer mtu;
Long networkId;
String networkSegmentName;
public NicTO() {
super();
@ -127,4 +130,20 @@ public class NicTO extends NetworkTO {
public void setMtu(Integer mtu) {
this.mtu = mtu;
}
public Long getNetworkId() {
return networkId;
}
public void setNetworkId(Long networkId) {
this.networkId = networkId;
}
public String getNetworkSegmentName() {
return networkSegmentName;
}
public void setNetworkSegmentName(String networkSegmentName) {
this.networkSegmentName = networkSegmentName;
}
}

View File

@ -22,6 +22,7 @@ import com.cloud.agent.api.LogLevel;
import com.cloud.agent.api.LogLevel.Log4jLevel;
import com.cloud.storage.DataStoreRole;
import com.cloud.utils.storage.S3.ClientOptions;
import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils;
public final class S3TO implements ClientOptions, DataStoreTO {
@ -68,6 +69,13 @@ public final class S3TO implements ClientOptions, DataStoreTO {
}
@Override
public String toString() {
return String.format("S3TO %s",
ReflectionToStringBuilderUtils.reflectOnlySelectedFields(
this, "id", "uuid", "bucketName"));
}
public Long getId() {
return this.id;
}

View File

@ -19,6 +19,7 @@ package com.cloud.agent.api.to;
import com.cloud.agent.api.LogLevel;
import com.cloud.storage.Storage.StoragePoolType;
import com.cloud.storage.StoragePool;
import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils;
public class StorageFilerTO {
long id;
@ -73,6 +74,6 @@ public class StorageFilerTO {
@Override
public String toString() {
return new StringBuilder("Pool[").append(id).append("|").append(host).append(":").append(port).append("|").append(path).append("]").toString();
return String.format("Pool %s", ReflectionToStringBuilderUtils.reflectOnlySelectedFields(this, "id", "uuid", "host", "port", "path"));
}
}

View File

@ -18,6 +18,7 @@ package com.cloud.agent.api.to;
import com.cloud.storage.DataStoreRole;
import com.cloud.utils.SwiftUtil;
import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils;
public class SwiftTO implements DataStoreTO, SwiftUtil.SwiftClientCfg {
Long id;
@ -41,6 +42,13 @@ public class SwiftTO implements DataStoreTO, SwiftUtil.SwiftClientCfg {
this.storagePolicy = storagePolicy;
}
@Override
public String toString() {
return String.format("SwiftTO %s",
ReflectionToStringBuilderUtils.reflectOnlySelectedFields(
this, "id", "account", "userName"));
}
public Long getId() {
return id;
}

View File

@ -82,7 +82,10 @@ public class VirtualMachineTO {
Map<String, String> guestOsDetails = new HashMap<String, String>();
Map<String, String> extraConfig = new HashMap<>();
Map<Long, String> networkIdToNetworkNameMap = new HashMap<>();
DeployAsIsInfoTO deployAsIsInfo;
String metadataManufacturer;
String metadataProductName;
public VirtualMachineTO(long id, String instanceName, VirtualMachine.Type type, int cpus, Integer speed, long minRam, long maxRam, BootloaderType bootloader,
String os, boolean enableHA, boolean limitCpuUse, String vncPassword) {
@ -392,6 +395,14 @@ public class VirtualMachineTO {
return extraConfig;
}
public Map<Long, String> getNetworkIdToNetworkNameMap() {
return networkIdToNetworkNameMap;
}
public void setNetworkIdToNetworkNameMap(Map<Long, String> networkIdToNetworkNameMap) {
this.networkIdToNetworkNameMap = networkIdToNetworkNameMap;
}
public String getBootType() {
return bootType;
}
@ -420,6 +431,22 @@ public class VirtualMachineTO {
this.deployAsIsInfo = deployAsIsInfo;
}
public String getMetadataManufacturer() {
return metadataManufacturer;
}
public void setMetadataManufacturer(String metadataManufacturer) {
this.metadataManufacturer = metadataManufacturer;
}
public String getMetadataProductName() {
return metadataProductName;
}
public void setMetadataProductName(String metadataProductName) {
this.metadataProductName = metadataProductName;
}
@Override
public String toString() {
return String.format("VM {id: \"%s\", name: \"%s\", uuid: \"%s\", type: \"%s\"}", id, name, uuid, type);

View File

@ -0,0 +1,38 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package com.cloud.bgp;
import org.apache.cloudstack.acl.InfrastructureEntity;
import org.apache.cloudstack.api.Identity;
import org.apache.cloudstack.api.InternalIdentity;
import java.util.Date;
public interface ASNumber extends InfrastructureEntity, InternalIdentity, Identity {
Long getAccountId();
Long getDomainId();
long getAsNumber();
long getAsNumberRangeId();
long getDataCenterId();
Date getAllocatedTime();
boolean isAllocated();
Long getNetworkId();
Long getVpcId();
Date getCreated();
Date getRemoved();
}

View File

@ -0,0 +1,31 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package com.cloud.bgp;
import org.apache.cloudstack.acl.InfrastructureEntity;
import org.apache.cloudstack.api.Identity;
import org.apache.cloudstack.api.InternalIdentity;
import java.util.Date;
public interface ASNumberRange extends InfrastructureEntity, InternalIdentity, Identity {
long getStartASNumber();
long getEndASNumber();
long getDataCenterId();
Date getCreated();
}

View File

@ -0,0 +1,44 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package com.cloud.bgp;
import com.cloud.exception.ResourceUnavailableException;
import com.cloud.network.Network;
import com.cloud.network.vpc.Vpc;
import com.cloud.utils.Pair;
import org.apache.cloudstack.api.command.user.bgp.ListASNumbersCmd;
import org.apache.cloudstack.network.BgpPeer;
import java.util.List;
public interface BGPService {
ASNumberRange createASNumberRange(long zoneId, long startASNumber, long endASNumber);
List<ASNumberRange> listASNumberRanges(Long zoneId);
Pair<List<ASNumber>, Integer> listASNumbers(ListASNumbersCmd cmd);
boolean allocateASNumber(long zoneId, Long asNumber, Long networkId, Long vpcId);
Pair<Boolean, String> releaseASNumber(long zoneId, long asNumber, boolean isReleaseNetworkDestroy);
boolean deleteASRange(long id);
boolean applyBgpPeers(Network network, boolean continueOnError) throws ResourceUnavailableException;
boolean applyBgpPeers(Vpc vpc, boolean continueOnError) throws ResourceUnavailableException;
List<? extends BgpPeer> getBgpPeersForNetwork(Network network);
List<? extends BgpPeer> getBgpPeersForVpc(Vpc vpc);
}

View File

@ -16,6 +16,8 @@
// under the License.
package com.cloud.capacity;
import java.util.List;
import org.apache.cloudstack.api.Identity;
import org.apache.cloudstack.api.InternalIdentity;
@ -35,6 +37,11 @@ public interface Capacity extends InternalIdentity, Identity {
public static final short CAPACITY_TYPE_CPU_CORE = 90;
public static final List<Short> STORAGE_CAPACITY_TYPES = List.of(CAPACITY_TYPE_STORAGE,
CAPACITY_TYPE_STORAGE_ALLOCATED,
CAPACITY_TYPE_SECONDARY_STORAGE,
CAPACITY_TYPE_LOCAL_STORAGE);
public Long getHostOrPoolId();
public Long getDataCenterId();
@ -54,4 +61,6 @@ public interface Capacity extends InternalIdentity, Identity {
public Float getUsedPercentage();
public Long getAllocatedCapacity();
public String getTag();
}

View File

@ -85,5 +85,6 @@ public interface Resource {
long getOwnerId();
ResourceOwnerType getResourceOwnerType();
String getTag();
}

View File

@ -0,0 +1,70 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package com.cloud.cpu;
import org.apache.commons.lang3.StringUtils;
public class CPU {
public enum CPUArch {
x86("i686", 32),
amd64("x86_64", 64),
arm64("aarch64", 64);
private final String type;
private final int bits;
CPUArch(String type, int bits) {
this.type = type;
this.bits = bits;
}
public static CPUArch getDefault() {
return amd64;
}
public String getType() {
return type;
}
public int getBits() {
return bits;
}
public static CPUArch fromType(String type) {
if (StringUtils.isBlank(type)) {
return getDefault();
}
for (CPUArch arch : values()) {
if (arch.type.equals(type)) {
return arch;
}
}
throw new IllegalArgumentException("Unsupported arch type: " + type);
}
public static String getTypesAsCSV() {
StringBuilder sb = new StringBuilder();
for (CPUArch arch : values()) {
sb.append(arch.getType()).append(",");
}
if (sb.length() > 0) {
sb.setLength(sb.length() - 1);
}
return sb.toString();
}
}
}

View File

@ -21,6 +21,10 @@ import org.apache.cloudstack.api.Identity;
import org.apache.cloudstack.api.InternalIdentity;
public interface DedicatedResources extends InfrastructureEntity, InternalIdentity, Identity {
enum Type {
Zone, Pod, Cluster, Host
}
@Override
long getId();

View File

@ -57,6 +57,17 @@ public interface DeploymentClusterPlanner extends DeploymentPlanner {
false,
ConfigKey.Scope.Global);
static final ConfigKey<String> VmAllocationAlgorithm = new ConfigKey<>(
String.class,
"vm.allocation.algorithm",
"Advanced",
"random",
"Order in which hosts within a cluster will be considered for VM/volume allocation. The value can be 'random', 'firstfit', 'userdispersing', 'userconcentratedpod_random', 'userconcentratedpod_firstfit', or 'firstfitleastconsumed'.",
true,
ConfigKey.Scope.Global, null, null, null, null, null,
ConfigKey.Kind.Select,
"random,firstfit,userdispersing,userconcentratedpod_random,userconcentratedpod_firstfit,firstfitleastconsumed");
/**
* This is called to determine list of possible clusters where a virtual
* machine can be deployed.

View File

@ -21,8 +21,12 @@ import java.util.Collection;
import java.util.HashSet;
import java.util.Set;
import org.apache.logging.log4j.Logger;
import org.apache.logging.log4j.LogManager;
import com.cloud.dc.DataCenter;
import com.cloud.dc.Pod;
import com.cloud.exception.CloudException;
import com.cloud.exception.InsufficientCapacityException;
import com.cloud.exception.InsufficientServerCapacityException;
import com.cloud.exception.ResourceUnavailableException;
@ -75,7 +79,7 @@ public interface DeploymentPlanner extends Adapter {
public static class ExcludeList implements Serializable {
private static final long serialVersionUID = -482175549460148301L;
protected static Logger LOGGER = LogManager.getLogger(ExcludeList.class);
private Set<Long> _dcIds;
private Set<Long> _podIds;
private Set<Long> _clusterIds;
@ -104,13 +108,26 @@ public interface DeploymentPlanner extends Adapter {
}
}
private void logAvoid(Class<?> scope, CloudException e) {
Long id = null;
if (e instanceof InsufficientCapacityException) {
id = ((InsufficientCapacityException) e).getId();
} else if (e instanceof ResourceUnavailableException) {
id = ((ResourceUnavailableException) e).getResourceId();
} else {
LOGGER.debug("Failed to log avoided component due to unexpected exception type [{}].", e.getMessage());
return;
}
LOGGER.debug("Adding {} [{}] to the avoid set due to [{}].", scope.getSimpleName(), id, e.getMessage());
}
public boolean add(InsufficientCapacityException e) {
Class<?> scope = e.getScope();
if (scope == null) {
return false;
}
logAvoid(scope, e);
if (Host.class.isAssignableFrom(scope)) {
addHost(e.getId());
} else if (Pod.class.isAssignableFrom(scope)) {
@ -128,13 +145,14 @@ public interface DeploymentPlanner extends Adapter {
return true;
}
public boolean add(ResourceUnavailableException e) {
Class<?> scope = e.getScope();
if (scope == null) {
return false;
}
logAvoid(scope, e);
if (Host.class.isAssignableFrom(scope)) {
addHost(e.getResourceId());
} else if (Pod.class.isAssignableFrom(scope)) {

View File

@ -28,8 +28,12 @@ import org.apache.cloudstack.api.response.HostResponse;
import org.apache.cloudstack.api.response.PodResponse;
import org.apache.cloudstack.api.response.ZoneResponse;
import org.apache.cloudstack.config.Configuration;
import org.apache.cloudstack.datacenter.DataCenterIpv4GuestSubnet;
import org.apache.cloudstack.ha.HAConfig;
import org.apache.cloudstack.network.BgpPeer;
import org.apache.cloudstack.network.Ipv4GuestSubnetNetworkMap;
import org.apache.cloudstack.quota.QuotaTariff;
import org.apache.cloudstack.storage.sharedfs.SharedFS;
import org.apache.cloudstack.storage.object.Bucket;
import org.apache.cloudstack.storage.object.ObjectStore;
import org.apache.cloudstack.usage.Usage;
@ -242,6 +246,8 @@ public class EventTypes {
public static final String EVENT_ROLE_UPDATE = "ROLE.UPDATE";
public static final String EVENT_ROLE_DELETE = "ROLE.DELETE";
public static final String EVENT_ROLE_IMPORT = "ROLE.IMPORT";
public static final String EVENT_ROLE_ENABLE = "ROLE.ENABLE";
public static final String EVENT_ROLE_DISABLE = "ROLE.DISABLE";
public static final String EVENT_ROLE_PERMISSION_CREATE = "ROLE.PERMISSION.CREATE";
public static final String EVENT_ROLE_PERMISSION_UPDATE = "ROLE.PERMISSION.UPDATE";
public static final String EVENT_ROLE_PERMISSION_DELETE = "ROLE.PERMISSION.DELETE";
@ -286,6 +292,7 @@ public class EventTypes {
//register for user API and secret keys
public static final String EVENT_REGISTER_FOR_SECRET_API_KEY = "REGISTER.USER.KEY";
public static final String API_KEY_ACCESS_UPDATE = "API.KEY.ACCESS.UPDATE";
// Template Events
public static final String EVENT_TEMPLATE_CREATE = "TEMPLATE.CREATE";
@ -333,6 +340,7 @@ public class EventTypes {
public static final String EVENT_SNAPSHOT_OFF_PRIMARY = "SNAPSHOT.OFF_PRIMARY";
public static final String EVENT_SNAPSHOT_DELETE = "SNAPSHOT.DELETE";
public static final String EVENT_SNAPSHOT_REVERT = "SNAPSHOT.REVERT";
public static final String EVENT_SNAPSHOT_EXTRACT = "SNAPSHOT.EXTRACT";
public static final String EVENT_SNAPSHOT_POLICY_CREATE = "SNAPSHOTPOLICY.CREATE";
public static final String EVENT_SNAPSHOT_POLICY_UPDATE = "SNAPSHOTPOLICY.UPDATE";
public static final String EVENT_SNAPSHOT_POLICY_DELETE = "SNAPSHOTPOLICY.DELETE";
@ -390,6 +398,11 @@ public class EventTypes {
public static final String EVENT_VLAN_IP_RANGE_RELEASE = "VLAN.IP.RANGE.RELEASE";
public static final String EVENT_VLAN_IP_RANGE_UPDATE = "VLAN.IP.RANGE.UPDATE";
// AS Number
public static final String EVENT_AS_RANGE_CREATE = "AS.RANGE.CREATE";
public static final String EVENT_AS_RANGE_DELETE = "AS.RANGE.DELETE";
public static final String EVENT_AS_NUMBER_RELEASE = "AS.NUMBER.RELEASE";
public static final String EVENT_MANAGEMENT_IP_RANGE_CREATE = "MANAGEMENT.IP.RANGE.CREATE";
public static final String EVENT_MANAGEMENT_IP_RANGE_DELETE = "MANAGEMENT.IP.RANGE.DELETE";
public static final String EVENT_MANAGEMENT_IP_RANGE_UPDATE = "MANAGEMENT.IP.RANGE.UPDATE";
@ -448,6 +461,7 @@ public class EventTypes {
public static final String EVENT_MAINTENANCE_PREPARE_PRIMARY_STORAGE = "MAINT.PREPARE.PS";
// Primary storage pool
public static final String EVENT_UPDATE_PRIMARY_STORAGE = "UPDATE.PS";
public static final String EVENT_ENABLE_PRIMARY_STORAGE = "ENABLE.PS";
public static final String EVENT_DISABLE_PRIMARY_STORAGE = "DISABLE.PS";
public static final String EVENT_SYNC_STORAGE_POOL = "SYNC.STORAGE.POOL";
@ -722,6 +736,8 @@ public class EventTypes {
// SystemVM
public static final String EVENT_LIVE_PATCH_SYSTEMVM = "LIVE.PATCH.SYSTEM.VM";
//Purge resources
public static final String EVENT_PURGE_EXPUNGED_RESOURCES = "PURGE.EXPUNGED.RESOURCES";
// OBJECT STORE
public static final String EVENT_OBJECT_STORE_CREATE = "OBJECT.STORE.CREATE";
@ -738,6 +754,37 @@ public class EventTypes {
public static final String EVENT_QUOTA_TARIFF_DELETE = "QUOTA.TARIFF.DELETE";
public static final String EVENT_QUOTA_TARIFF_UPDATE = "QUOTA.TARIFF.UPDATE";
// Routing
public static final String EVENT_ZONE_IP4_SUBNET_CREATE = "ZONE.IP4.SUBNET.CREATE";
public static final String EVENT_ZONE_IP4_SUBNET_UPDATE = "ZONE.IP4.SUBNET.UPDATE";
public static final String EVENT_ZONE_IP4_SUBNET_DELETE = "ZONE.IP4.SUBNET.DELETE";
public static final String EVENT_ZONE_IP4_SUBNET_DEDICATE = "ZONE.IP4.SUBNET.DEDICATE";
public static final String EVENT_ZONE_IP4_SUBNET_RELEASE = "ZONE.IP4.SUBNET.RELEASE";
public static final String EVENT_IP4_GUEST_SUBNET_CREATE = "IP4.GUEST.SUBNET.CREATE";
public static final String EVENT_IP4_GUEST_SUBNET_DELETE = "IP4.GUEST.SUBNET.DELETE";
public static final String EVENT_ROUTING_IPV4_FIREWALL_RULE_CREATE = "ROUTING.IPV4.FIREWALL.RULE.CREATE";
public static final String EVENT_ROUTING_IPV4_FIREWALL_RULE_UPDATE = "ROUTING.IPV4.FIREWALL.RULE.UPDATE";
public static final String EVENT_ROUTING_IPV4_FIREWALL_RULE_DELETE = "ROUTING.IPV4.FIREWALL.RULE.DELETE";
public static final String EVENT_BGP_PEER_CREATE = "BGP.PEER.CREATE";
public static final String EVENT_BGP_PEER_UPDATE = "BGP.PEER.UPDATE";
public static final String EVENT_BGP_PEER_DELETE = "BGP.PEER.DELETE";
public static final String EVENT_BGP_PEER_DEDICATE = "BGP.PEER.DEDICATE";
public static final String EVENT_BGP_PEER_RELEASE = "BGP.PEER.RELEASE";
public static final String EVENT_NETWORK_BGP_PEER_UPDATE = "NETWORK.BGP.PEER.UPDATE";
public static final String EVENT_VPC_BGP_PEER_UPDATE = "VPC.BGP.PEER.UPDATE";
// SharedFS
public static final String EVENT_SHAREDFS_CREATE = "SHAREDFS.CREATE";
public static final String EVENT_SHAREDFS_START = "SHAREDFS.START";
public static final String EVENT_SHAREDFS_UPDATE = "SHAREDFS.UPDATE";
public static final String EVENT_SHAREDFS_CHANGE_SERVICE_OFFERING = "SHAREDFS.CHANGE.SERVICE.OFFERING";
public static final String EVENT_SHAREDFS_CHANGE_DISK_OFFERING = "SHAREDFS.CHANGE.DISK.OFFERING";
public static final String EVENT_SHAREDFS_STOP = "SHAREDFS.STOP";
public static final String EVENT_SHAREDFS_RESTART = "SHAREDFS.RESTART";
public static final String EVENT_SHAREDFS_DESTROY = "SHAREDFS.DESTROY";
public static final String EVENT_SHAREDFS_EXPUNGE = "SHAREDFS.EXPUNGE";
public static final String EVENT_SHAREDFS_RECOVER = "SHAREDFS.RECOVER";
static {
// TODO: need a way to force author adding event types to declare the entity details as well, with out braking
@ -839,6 +886,8 @@ public class EventTypes {
entityEventDetails.put(EVENT_ROLE_UPDATE, Role.class);
entityEventDetails.put(EVENT_ROLE_DELETE, Role.class);
entityEventDetails.put(EVENT_ROLE_IMPORT, Role.class);
entityEventDetails.put(EVENT_ROLE_ENABLE, Role.class);
entityEventDetails.put(EVENT_ROLE_DISABLE, Role.class);
entityEventDetails.put(EVENT_ROLE_PERMISSION_CREATE, RolePermission.class);
entityEventDetails.put(EVENT_ROLE_PERMISSION_UPDATE, RolePermission.class);
entityEventDetails.put(EVENT_ROLE_PERMISSION_DELETE, RolePermission.class);
@ -895,6 +944,7 @@ public class EventTypes {
// Snapshots
entityEventDetails.put(EVENT_SNAPSHOT_CREATE, Snapshot.class);
entityEventDetails.put(EVENT_SNAPSHOT_DELETE, Snapshot.class);
entityEventDetails.put(EVENT_SNAPSHOT_EXTRACT, Snapshot.class);
entityEventDetails.put(EVENT_SNAPSHOT_ON_PRIMARY, Snapshot.class);
entityEventDetails.put(EVENT_SNAPSHOT_OFF_PRIMARY, Snapshot.class);
entityEventDetails.put(EVENT_SNAPSHOT_POLICY_CREATE, SnapshotPolicy.class);
@ -999,6 +1049,7 @@ public class EventTypes {
entityEventDetails.put(EVENT_MAINTENANCE_PREPARE_PRIMARY_STORAGE, Host.class);
// Primary storage pool
entityEventDetails.put(EVENT_UPDATE_PRIMARY_STORAGE, StoragePool.class);
entityEventDetails.put(EVENT_ENABLE_PRIMARY_STORAGE, StoragePool.class);
entityEventDetails.put(EVENT_DISABLE_PRIMARY_STORAGE, StoragePool.class);
entityEventDetails.put(EVENT_CHANGE_STORAGE_POOL_SCOPE, StoragePool.class);
@ -1193,6 +1244,35 @@ public class EventTypes {
entityEventDetails.put(EVENT_QUOTA_TARIFF_CREATE, QuotaTariff.class);
entityEventDetails.put(EVENT_QUOTA_TARIFF_DELETE, QuotaTariff.class);
entityEventDetails.put(EVENT_QUOTA_TARIFF_UPDATE, QuotaTariff.class);
// Routing
entityEventDetails.put(EVENT_ZONE_IP4_SUBNET_CREATE, DataCenterIpv4GuestSubnet.class);
entityEventDetails.put(EVENT_ZONE_IP4_SUBNET_UPDATE, DataCenterIpv4GuestSubnet.class);
entityEventDetails.put(EVENT_ZONE_IP4_SUBNET_DELETE, DataCenterIpv4GuestSubnet.class);
entityEventDetails.put(EVENT_ZONE_IP4_SUBNET_DEDICATE, DataCenterIpv4GuestSubnet.class);
entityEventDetails.put(EVENT_ZONE_IP4_SUBNET_RELEASE, DataCenterIpv4GuestSubnet.class);
entityEventDetails.put(EVENT_IP4_GUEST_SUBNET_CREATE, Ipv4GuestSubnetNetworkMap.class);
entityEventDetails.put(EVENT_IP4_GUEST_SUBNET_DELETE, Ipv4GuestSubnetNetworkMap.class);
entityEventDetails.put(EVENT_ROUTING_IPV4_FIREWALL_RULE_CREATE, FirewallRule.class);
entityEventDetails.put(EVENT_ROUTING_IPV4_FIREWALL_RULE_UPDATE, FirewallRule.class);
entityEventDetails.put(EVENT_ROUTING_IPV4_FIREWALL_RULE_DELETE, FirewallRule.class);
entityEventDetails.put(EVENT_BGP_PEER_CREATE, BgpPeer.class);
entityEventDetails.put(EVENT_BGP_PEER_UPDATE, BgpPeer.class);
entityEventDetails.put(EVENT_BGP_PEER_DELETE, BgpPeer.class);
entityEventDetails.put(EVENT_BGP_PEER_DEDICATE, BgpPeer.class);
entityEventDetails.put(EVENT_BGP_PEER_RELEASE, BgpPeer.class);
// SharedFS
entityEventDetails.put(EVENT_SHAREDFS_CREATE, SharedFS.class);
entityEventDetails.put(EVENT_SHAREDFS_START, SharedFS.class);
entityEventDetails.put(EVENT_SHAREDFS_STOP, SharedFS.class);
entityEventDetails.put(EVENT_SHAREDFS_UPDATE, SharedFS.class);
entityEventDetails.put(EVENT_SHAREDFS_CHANGE_SERVICE_OFFERING, SharedFS.class);
entityEventDetails.put(EVENT_SHAREDFS_CHANGE_DISK_OFFERING, SharedFS.class);
entityEventDetails.put(EVENT_SHAREDFS_RESTART, SharedFS.class);
entityEventDetails.put(EVENT_SHAREDFS_DESTROY, SharedFS.class);
entityEventDetails.put(EVENT_SHAREDFS_EXPUNGE, SharedFS.class);
entityEventDetails.put(EVENT_SHAREDFS_RECOVER, SharedFS.class);
}
public static boolean isNetworkEvent(String eventType) {

View File

@ -16,6 +16,7 @@
// under the License.
package com.cloud.host;
import com.cloud.cpu.CPU;
import com.cloud.hypervisor.Hypervisor.HypervisorType;
import com.cloud.resource.ResourceState;
import com.cloud.utils.fsm.StateObject;
@ -52,9 +53,12 @@ public interface Host extends StateObject<Status>, Identity, Partition, HAResour
return strs;
}
}
public static final String HOST_UEFI_ENABLE = "host.uefi.enable";
public static final String HOST_VOLUME_ENCRYPTION = "host.volume.encryption";
public static final String HOST_INSTANCE_CONVERSION = "host.instance.conversion";
String HOST_UEFI_ENABLE = "host.uefi.enable";
String HOST_VOLUME_ENCRYPTION = "host.volume.encryption";
String HOST_INSTANCE_CONVERSION = "host.instance.conversion";
String HOST_OVFTOOL_VERSION = "host.ovftool.version";
String HOST_VIRTV2V_VERSION = "host.virtv2v.version";
/**
* @return name of the machine.
@ -208,4 +212,6 @@ public interface Host extends StateObject<Status>, Identity, Partition, HAResour
boolean isDisabled();
ResourceState getResourceState();
CPU.CPUArch getArch();
}

View File

@ -17,55 +17,67 @@
package com.cloud.hypervisor;
import com.cloud.storage.Storage.ImageFormat;
import org.apache.commons.lang3.StringUtils;
import java.util.HashMap;
import java.util.LinkedHashMap;
import java.util.List;
import java.util.Locale;
import java.util.Map;
import java.util.Objects;
import java.util.Set;
import java.util.EnumSet;
import java.util.stream.Collectors;
import static com.cloud.hypervisor.Hypervisor.HypervisorType.Functionality.DirectDownloadTemplate;
import static com.cloud.hypervisor.Hypervisor.HypervisorType.Functionality.RootDiskSizeOverride;
import static com.cloud.hypervisor.Hypervisor.HypervisorType.Functionality.VmStorageMigration;
import static com.cloud.hypervisor.Hypervisor.HypervisorType.Functionality.VmStorageMigrationWithSnapshots;
public class Hypervisor {
public static class HypervisorType {
public enum Functionality {
DirectDownloadTemplate,
RootDiskSizeOverride,
VmStorageMigration,
VmStorageMigrationWithSnapshots
}
static Map<String, HypervisorType> hypervisorTypeMap;
static Map<HypervisorType, ImageFormat> supportedImageFormatMap;
private static final Map<String, HypervisorType> hypervisorTypeMap = new LinkedHashMap<>();
public static final HypervisorType None = new HypervisorType("None"); //for storage hosts
public static final HypervisorType XenServer = new HypervisorType("XenServer", ImageFormat.VHD, EnumSet.of(RootDiskSizeOverride, VmStorageMigration));
public static final HypervisorType KVM = new HypervisorType("KVM", ImageFormat.QCOW2, EnumSet.of(DirectDownloadTemplate, RootDiskSizeOverride, VmStorageMigration));
public static final HypervisorType VMware = new HypervisorType("VMware", ImageFormat.OVA, EnumSet.of(RootDiskSizeOverride, VmStorageMigration, VmStorageMigrationWithSnapshots));
public static final HypervisorType Hyperv = new HypervisorType("Hyperv");
public static final HypervisorType VirtualBox = new HypervisorType("VirtualBox");
public static final HypervisorType Parralels = new HypervisorType("Parralels");
public static final HypervisorType BareMetal = new HypervisorType("BareMetal");
public static final HypervisorType Simulator = new HypervisorType("Simulator", null, EnumSet.of(RootDiskSizeOverride, VmStorageMigration));
public static final HypervisorType Ovm = new HypervisorType("Ovm", ImageFormat.RAW);
public static final HypervisorType Ovm3 = new HypervisorType("Ovm3", ImageFormat.RAW);
public static final HypervisorType LXC = new HypervisorType("LXC");
public static final HypervisorType Custom = new HypervisorType("Custom", null, EnumSet.of(RootDiskSizeOverride));
public static final HypervisorType Any = new HypervisorType("Any"); /*If you don't care about the hypervisor type*/
private final String name;
private final ImageFormat imageFormat;
private final Set<Functionality> supportedFunctionalities;
public enum HypervisorType {
None, //for storage hosts
XenServer,
KVM,
VMware,
Hyperv,
VirtualBox,
Parralels,
BareMetal,
Simulator,
Ovm,
Ovm3,
LXC,
Custom,
public HypervisorType(String name) {
this(name, null, EnumSet.noneOf(Functionality.class));
}
Any; /*If you don't care about the hypervisor type*/
public HypervisorType(String name, ImageFormat imageFormat) {
this(name, imageFormat, EnumSet.noneOf(Functionality.class));
}
static {
hypervisorTypeMap = new HashMap<>();
hypervisorTypeMap.put("xenserver", HypervisorType.XenServer);
hypervisorTypeMap.put("kvm", HypervisorType.KVM);
hypervisorTypeMap.put("vmware", HypervisorType.VMware);
hypervisorTypeMap.put("hyperv", HypervisorType.Hyperv);
hypervisorTypeMap.put("virtualbox", HypervisorType.VirtualBox);
hypervisorTypeMap.put("parallels", HypervisorType.Parralels);
hypervisorTypeMap.put("baremetal", HypervisorType.BareMetal);
hypervisorTypeMap.put("simulator", HypervisorType.Simulator);
hypervisorTypeMap.put("ovm", HypervisorType.Ovm);
hypervisorTypeMap.put("lxc", HypervisorType.LXC);
hypervisorTypeMap.put("any", HypervisorType.Any);
hypervisorTypeMap.put("ovm3", HypervisorType.Ovm3);
hypervisorTypeMap.put("custom", HypervisorType.Custom);
supportedImageFormatMap = new HashMap<>();
supportedImageFormatMap.put(HypervisorType.XenServer, ImageFormat.VHD);
supportedImageFormatMap.put(HypervisorType.KVM, ImageFormat.QCOW2);
supportedImageFormatMap.put(HypervisorType.VMware, ImageFormat.OVA);
supportedImageFormatMap.put(HypervisorType.Ovm, ImageFormat.RAW);
supportedImageFormatMap.put(HypervisorType.Ovm3, ImageFormat.RAW);
public HypervisorType(String name, ImageFormat imageFormat, Set<Functionality> supportedFunctionalities) {
this.name = name;
this.imageFormat = imageFormat;
this.supportedFunctionalities = supportedFunctionalities;
if (name.equals("Parralels")){ // typo in the original code
hypervisorTypeMap.put("parallels", this);
} else {
hypervisorTypeMap.putIfAbsent(name.toLowerCase(Locale.ROOT), this);
}
}
public static HypervisorType getType(String hypervisor) {
@ -75,24 +87,77 @@ public class Hypervisor {
hypervisorTypeMap.getOrDefault(hypervisor.toLowerCase(Locale.ROOT), HypervisorType.None));
}
public static HypervisorType[] values() {
return hypervisorTypeMap.values().toArray(HypervisorType[]::new).clone();
}
public static HypervisorType valueOf(String name) {
if (StringUtils.isBlank(name)) {
return null;
}
HypervisorType hypervisorType = hypervisorTypeMap.get(name.toLowerCase(Locale.ROOT));
if (hypervisorType == null) {
throw new IllegalArgumentException("HypervisorType '" + name + "' not found");
}
return hypervisorType;
}
public static List<HypervisorType> getListOfHypervisorsSupportingFunctionality(Functionality functionality) {
return hypervisorTypeMap.values().stream()
.filter(hypervisor -> hypervisor.supportedFunctionalities.contains(functionality))
.collect(Collectors.toList());
}
/**
* Returns the display name of a hypervisor type in case the custom hypervisor is used,
* using the 'hypervisor.custom.display.name' setting. Otherwise, returns hypervisor name
*/
public String getHypervisorDisplayName() {
return !Hypervisor.HypervisorType.Custom.equals(this) ?
this.toString() :
HypervisorGuru.HypervisorCustomDisplayName.value();
return HypervisorType.Custom.equals(this) ? HypervisorGuru.HypervisorCustomDisplayName.value() : name;
}
/**
* This method really needs to be part of the properties of the hypervisor type itself.
*
* @param hyperType
* @return
*/
public static ImageFormat getSupportedImageFormat(HypervisorType hyperType) {
return supportedImageFormatMap.getOrDefault(hyperType, null);
public ImageFormat getSupportedImageFormat() {
return imageFormat;
}
public String name() {
return name;
}
/**
* Make this method to be part of the properties of the hypervisor type itself.
*
* @return true if the hypervisor plugin support the specified functionality
*/
public boolean isFunctionalitySupported(Functionality functionality) {
return supportedFunctionalities.contains(functionality);
}
@Override
public int hashCode() {
return Objects.hash(name);
}
@Override
public boolean equals(Object o) {
if (o == this) {
return true;
} else if (o == null || getClass() != o.getClass()) {
return false;
}
HypervisorType that = (HypervisorType) o;
return Objects.equals(name, that.name);
}
@Override
public String toString() {
return name;
}
}

View File

@ -25,6 +25,7 @@ import com.cloud.utils.component.Adapter;
public interface KubernetesServiceHelper extends Adapter {
ControlledEntity findByUuid(String uuid);
ControlledEntity findByVmId(long vmId);
void checkVmCanBeDestroyed(UserVm userVm);
void cleanupForAccount(Account account);
}

View File

@ -97,4 +97,6 @@ public interface IpAddress extends ControlledEntity, Identity, InternalIdentity,
void setRuleState(State ruleState);
boolean isForSystemVms();
}

View File

@ -58,7 +58,7 @@ public interface Ipv6Service extends PluggableService, Configurable {
Pair<Integer, Integer> getUsedTotalIpv6SubnetForZone(long zoneId);
Pair<String, String> preAllocateIpv6SubnetForNetwork(long zoneId) throws ResourceAllocationException;
Pair<String, String> preAllocateIpv6SubnetForNetwork(DataCenter zone) throws ResourceAllocationException;
void assignIpv6SubnetToNetwork(String subnet, long networkId);

View File

@ -103,7 +103,7 @@ public interface Network extends ControlledEntity, StateObject<Network.State>, I
public static final Service Vpn = new Service("Vpn", Capability.SupportedVpnProtocols, Capability.VpnTypes);
public static final Service Dhcp = new Service("Dhcp", Capability.ExtraDhcpOptions);
public static final Service Dns = new Service("Dns", Capability.AllowDnsSuffixModification);
public static final Service Gateway = new Service("Gateway");
public static final Service Gateway = new Service("Gateway", Capability.RedundantRouter);
public static final Service Firewall = new Service("Firewall", Capability.SupportedProtocols, Capability.MultipleIps, Capability.TrafficStatistics,
Capability.SupportedTrafficDirection, Capability.SupportedEgressProtocols);
public static final Service Lb = new Service("Lb", Capability.SupportedLBAlgorithms, Capability.SupportedLBIsolation, Capability.SupportedProtocols,
@ -205,6 +205,8 @@ public interface Network extends ControlledEntity, StateObject<Network.State>, I
//Add Tungsten Fabric provider
public static final Provider Tungsten = new Provider("Tungsten", false);
public static final Provider Nsx = new Provider("Nsx", false);
private final String name;
private final boolean isExternal;
@ -410,12 +412,16 @@ public interface Network extends ControlledEntity, StateObject<Network.State>, I
String getGateway();
void setGateway(String gateway);
// "cidr" is the Cloudstack managed address space, all CloudStack managed vms get IP address from "cidr",
// In general "cidr" also serves as the network CIDR
// But in case IP reservation is configured for a Guest network, "networkcidr" is the Effective network CIDR for that network,
// "cidr" will still continue to be the effective address space for CloudStack managed vms in that Guest network
String getCidr();
void setCidr(String cidr);
// "networkcidr" is the network CIDR of the guest network which uses IP reservation.
// It is the summation of "cidr" and the reservedIPrange(the address space used for non CloudStack purposes).
// For networks not configured with IP reservation, "networkcidr" is always null
@ -427,6 +433,8 @@ public interface Network extends ControlledEntity, StateObject<Network.State>, I
long getDataCenterId();
long getAccountId();
long getNetworkOfferingId();
@Override
@ -499,4 +507,6 @@ public interface Network extends ControlledEntity, StateObject<Network.State>, I
Integer getPublicMtu();
Integer getPrivateMtu();
Integer getNetworkCidrSize();
}

View File

@ -173,6 +173,8 @@ public interface NetworkModel {
boolean isProviderSupportServiceInNetwork(long networkId, Service service, Provider provider);
boolean isAnyServiceSupportedInNetwork(long networkId, Provider provider, Service... services);
boolean isProviderEnabledInPhysicalNetwork(long physicalNetowrkId, String providerName);
String getNetworkTag(HypervisorType hType, Network network);
@ -356,4 +358,8 @@ public interface NetworkModel {
void verifyIp6DnsPair(final String ip6Dns1, final String ip6Dns2);
boolean isSecurityGroupSupportedForZone(Long zoneId);
boolean checkSecurityGroupSupportForNetwork(Account account, DataCenter zone, List<Long> networkIds,
List<Long> securityGroupsIds);
}

View File

@ -22,10 +22,9 @@ import java.util.Date;
import com.cloud.network.Networks.BroadcastDomainType;
import com.cloud.network.Networks.Mode;
import com.cloud.network.Networks.TrafficType;
import org.apache.log4j.Logger;
import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils;
public class NetworkProfile implements Network {
static final Logger s_logger = Logger.getLogger(NetworkProfile.class);
private final long id;
private final String uuid;
private final long dataCenterId;
@ -43,8 +42,8 @@ public class NetworkProfile implements Network {
private final Mode mode;
private final BroadcastDomainType broadcastDomainType;
private TrafficType trafficType;
private final String gateway;
private final String cidr;
private String gateway;
private String cidr;
private final String networkCidr;
private final String ip6Gateway;
private final String ip6Cidr;
@ -64,6 +63,7 @@ public class NetworkProfile implements Network {
private final String guruName;
private boolean strechedL2Subnet;
private String externalId;
private Integer networkCidrSize;
public NetworkProfile(Network network) {
id = network.getId();
@ -100,6 +100,7 @@ public class NetworkProfile implements Network {
isRedundant = network.isRedundant();
isRollingRestart = network.isRollingRestart();
externalId = network.getExternalId();
networkCidrSize = network.getNetworkCidrSize();
}
@Override
@ -212,11 +213,21 @@ public class NetworkProfile implements Network {
return gateway;
}
@Override
public void setGateway(String gateway) {
this.gateway = gateway;
}
@Override
public String getCidr() {
return cidr;
}
@Override
public void setCidr(String cidr) {
this.cidr = cidr;
}
@Override
public String getNetworkCidr() {
return networkCidr;
@ -369,4 +380,16 @@ public class NetworkProfile implements Network {
return null;
}
@Override
public Integer getNetworkCidrSize() {
return networkCidrSize;
}
@Override
public String toString() {
return String.format("NetworkProfile %s",
ReflectionToStringBuilderUtils.reflectOnlySelectedFields(
this, "id", "uuid", "name", "networkOfferingId"));
}
}

View File

@ -19,6 +19,7 @@ package com.cloud.network;
import java.util.List;
import java.util.Map;
import com.cloud.dc.DataCenter;
import org.apache.cloudstack.acl.ControlledEntity;
import org.apache.cloudstack.api.command.admin.address.ReleasePodIpCmdByAdmin;
import org.apache.cloudstack.api.command.admin.network.DedicateGuestVlanRangeCmd;
@ -56,6 +57,7 @@ import com.cloud.utils.Pair;
import com.cloud.utils.exception.CloudRuntimeException;
import com.cloud.vm.Nic;
import com.cloud.vm.NicSecondaryIp;
import org.apache.cloudstack.network.element.InternalLoadBalancerElementService;
/**
* The NetworkService interface is the "public" api to entities that make requests to the orchestration engine
@ -88,6 +90,8 @@ public interface NetworkService {
IpAddress reserveIpAddress(Account account, Boolean displayIp, Long ipAddressId) throws ResourceAllocationException;
IpAddress reserveIpAddressWithVlanDetail(Account account, DataCenter zone, Boolean displayIp, String vlanDetailKey) throws ResourceAllocationException;
boolean releaseReservedIpAddress(long ipAddressId) throws InsufficientAddressCapacityException;
boolean releaseIpAddress(long ipAddressId) throws InsufficientAddressCapacityException;
@ -259,4 +263,9 @@ public interface NetworkService {
PublicIpQuarantine updatePublicIpAddressInQuarantine(UpdateQuarantinedIpCmd cmd);
void removePublicIpAddressFromQuarantine(RemoveQuarantinedIpCmd cmd);
InternalLoadBalancerElementService getInternalLoadBalancerElementByType(VirtualRouterProvider.Type type);
InternalLoadBalancerElementService getInternalLoadBalancerElementByNetworkServiceProviderId(long networkProviderId);
InternalLoadBalancerElementService getInternalLoadBalancerElementById(long providerId);
List<InternalLoadBalancerElementService> getInternalLoadBalancerElements();
}

View File

@ -78,7 +78,7 @@ public class Networks {
}
@Override
public String getValueFrom(URI uri) {
return uri.getAuthority();
return uri == null ? null : uri.getAuthority();
}
},
Vswitch("vs", String.class), LinkLocal(null, null), Vnet("vnet", Long.class), Storage("storage", Integer.class), Lswitch("lswitch", String.class) {
@ -96,7 +96,7 @@ public class Networks {
*/
@Override
public String getValueFrom(URI uri) {
return uri.getSchemeSpecificPart();
return uri == null ? null : uri.getSchemeSpecificPart();
}
},
Mido("mido", String.class), Pvlan("pvlan", String.class),
@ -128,7 +128,8 @@ public class Networks {
},
UnDecided(null, null),
OpenDaylight("opendaylight", String.class),
TUNGSTEN("tf", String.class);
TUNGSTEN("tf", String.class),
NSX("nsx", String.class);
private final String scheme;
private final Class<?> type;
@ -175,7 +176,7 @@ public class Networks {
* @return the scheme as BroadcastDomainType
*/
public static BroadcastDomainType getSchemeValue(URI uri) {
return toEnumValue(uri.getScheme());
return toEnumValue(uri == null ? null : uri.getScheme());
}
/**
@ -189,7 +190,7 @@ public class Networks {
if (com.cloud.dc.Vlan.UNTAGGED.equalsIgnoreCase(str)) {
return Native;
}
return getSchemeValue(new URI(str));
return getSchemeValue(str == null ? null : new URI(str));
}
/**
@ -218,7 +219,7 @@ public class Networks {
* @return the host part as String
*/
public String getValueFrom(URI uri) {
return uri.getHost();
return uri == null ? null : uri.getHost();
}
/**
@ -241,7 +242,7 @@ public class Networks {
* @throws URISyntaxException the string is not even an uri
*/
public static String getValue(String uriString) throws URISyntaxException {
return getValue(new URI(uriString));
return getValue(uriString == null ? null : new URI(uriString));
}
/**

View File

@ -21,7 +21,7 @@ import org.apache.cloudstack.api.InternalIdentity;
public interface VirtualRouterProvider extends InternalIdentity, Identity {
public enum Type {
VirtualRouter, ElasticLoadBalancerVm, VPCVirtualRouter, InternalLbVm, NetScalerVm
VirtualRouter, ElasticLoadBalancerVm, VPCVirtualRouter, InternalLbVm, NetScalerVm, Nsx
}
public Type getType();

View File

@ -0,0 +1,31 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package com.cloud.network.element;
import com.cloud.exception.ResourceUnavailableException;
import com.cloud.network.Network;
import com.cloud.network.vpc.Vpc;
import org.apache.cloudstack.network.BgpPeer;
import java.util.List;
public interface BgpServiceProvider extends NetworkElement {
boolean applyBgpPeers(Vpc vpc, Network network, List<? extends BgpPeer> bgpPeers) throws ResourceUnavailableException;
}

View File

@ -48,4 +48,7 @@ public interface LoadBalancingServiceProvider extends NetworkElement, IpDeployin
List<LoadBalancerTO> updateHealthChecks(Network network, List<LoadBalancingRule> lbrules);
boolean handlesOnlyRulesInTransitionState();
default void expungeLbVmRefs(List<Long> vmIds, Long batchSize) {
}
}

View File

@ -21,6 +21,7 @@ import java.util.List;
import com.cloud.exception.ResourceUnavailableException;
import com.cloud.network.Network;
import com.cloud.network.vpc.NetworkACLItem;
import com.cloud.network.vpc.Vpc;
public interface NetworkACLServiceProvider extends NetworkElement {
@ -32,4 +33,6 @@ public interface NetworkACLServiceProvider extends NetworkElement {
*/
boolean applyNetworkACLs(Network config, List<? extends NetworkACLItem> rules) throws ResourceUnavailableException;
boolean reorderAclRules(Vpc vpc, List<? extends Network> networks, List<? extends NetworkACLItem> networkACLItems);
}

View File

@ -22,6 +22,7 @@ import com.cloud.deploy.DeployDestination;
import com.cloud.exception.ConcurrentOperationException;
import com.cloud.exception.InsufficientCapacityException;
import com.cloud.exception.ResourceUnavailableException;
import com.cloud.network.IpAddress;
import com.cloud.network.vpc.NetworkACLItem;
import com.cloud.network.vpc.PrivateGateway;
import com.cloud.network.vpc.StaticRouteProfile;
@ -52,4 +53,6 @@ public interface VpcProvider extends NetworkElement {
boolean applyStaticRoutes(Vpc vpc, List<StaticRouteProfile> routes) throws ResourceUnavailableException;
boolean applyACLItemsToPrivateGw(PrivateGateway gateway, List<? extends NetworkACLItem> rules) throws ResourceUnavailableException;
boolean updateVpcSourceNatIp(Vpc vpc, IpAddress address);
}

View File

@ -79,20 +79,24 @@ public interface NetworkGuru extends Adapter {
* be used to make determination can be isolation methods, services
* provided on the guest network and the service provider that's on the
* guest network.
*
* <p>
* If a network is already fully substantiated with the necessary resources
* during this design phase, then the state should be set to Setup. If
* the resources are not allocated at this point, the state should be set
* to Allocated.
*
* @param offering network offering that contains the package of services
* the end user intends to use on that network.
* @param plan where is this network being deployed.
* @param offering network offering that contains the package of services
* the end user intends to use on that network.
* @param plan where is this network being deployed.
* @param userSpecified user specified parameters for this network.
* @param owner owner of this network.
* @param name
* @param vpcId
* @param owner owner of this network.
* @return Network
*/
Network design(NetworkOffering offering, DeploymentPlan plan, Network userSpecified, Account owner);
Network design(NetworkOffering offering, DeploymentPlan plan, Network userSpecified, String name, Long vpcId, Account owner);
void setup(Network network, long networkId);
/**
* For guest networks that are in Allocated state after the design stage,

View File

@ -63,6 +63,10 @@ public class LoadBalancingRule {
return lb.getId();
}
public LoadBalancer getLb() {
return lb;
}
public String getName() {
return lb.getName();
}

View File

@ -0,0 +1,34 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package com.cloud.network.nsx;
import org.apache.cloudstack.api.Identity;
import org.apache.cloudstack.api.InternalIdentity;
public interface NsxProvider extends InternalIdentity, Identity {
String getHostname();
String getPort();
String getProviderName();
String getUsername();
long getZoneId();
String getTier0Gateway();
String getEdgeCluster();
String getTransportZone();
}

View File

@ -0,0 +1,36 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package com.cloud.network.nsx;
import com.cloud.network.IpAddress;
import com.cloud.network.vpc.Vpc;
import org.apache.cloudstack.framework.config.ConfigKey;
public interface NsxService {
ConfigKey<Integer> NSX_API_FAILURE_RETRIES = new ConfigKey<>("Advanced", Integer.class,
"nsx.api.failure.retries", "30",
"Number of retries for NSX API operations in case of failures",
true, ConfigKey.Scope.Zone);
ConfigKey<Integer> NSX_API_FAILURE_INTERVAL = new ConfigKey<>("Advanced", Integer.class,
"nsx.api.failure.interval", "60",
"Waiting time (in seconds) before retrying an NSX API operation in case of failure",
true, ConfigKey.Scope.Zone);
boolean createVpcNetwork(Long zoneId, long accountId, long domainId, Long vpcId, String vpcName, boolean sourceNatEnabled);
boolean updateVpcSourceNatIp(Vpc vpc, IpAddress address);
}

View File

@ -18,6 +18,7 @@ package com.cloud.network.vpc;
import java.util.Date;
import com.cloud.offering.NetworkOffering;
import org.apache.cloudstack.api.Identity;
import org.apache.cloudstack.api.InternalIdentity;
@ -29,6 +30,8 @@ public interface VpcOffering extends InternalIdentity, Identity {
public static final String defaultVPCOfferingName = "Default VPC offering";
public static final String defaultVPCNSOfferingName = "Default VPC offering with Netscaler";
public static final String redundantVPCOfferingName = "Redundant VPC offering";
public static final String DEFAULT_VPC_NAT_NSX_OFFERING_NAME = "VPC offering with NSX - NAT Mode";
public static final String DEFAULT_VPC_ROUTE_NSX_OFFERING_NAME = "VPC offering with NSX - Route Mode";
/**
*
@ -53,6 +56,10 @@ public interface VpcOffering extends InternalIdentity, Identity {
*/
boolean isDefault();
boolean isForNsx();
NetworkOffering.NetworkMode getNetworkMode();
/**
* @return service offering id used by VPC virtual router
*/
@ -73,4 +80,8 @@ public interface VpcOffering extends InternalIdentity, Identity {
Date getRemoved();
Date getCreated();
NetworkOffering.RoutingMode getRoutingMode();
Boolean isSpecifyAsNumber();
}

View File

@ -24,6 +24,7 @@ import org.apache.cloudstack.api.command.admin.vpc.CreateVPCOfferingCmd;
import org.apache.cloudstack.api.command.admin.vpc.UpdateVPCOfferingCmd;
import org.apache.cloudstack.api.command.user.vpc.ListVPCOfferingsCmd;
import com.cloud.offering.NetworkOffering;
import com.cloud.utils.Pair;
import com.cloud.utils.net.NetUtils;
@ -36,7 +37,10 @@ public interface VpcProvisioningService {
VpcOffering createVpcOffering(String name, String displayText, List<String> supportedServices,
Map<String, List<String>> serviceProviders,
Map serviceCapabilitystList, NetUtils.InternetProtocol internetProtocol,
Long serviceOfferingId, List<Long> domainIds, List<Long> zoneIds, VpcOffering.State state);
Long serviceOfferingId, Boolean forNsx, NetworkOffering.NetworkMode networkMode,
List<Long> domainIds, List<Long> zoneIds, VpcOffering.State state,
NetworkOffering.RoutingMode routingMode, boolean specifyAsNumber);
Pair<List<? extends VpcOffering>,Integer> listVpcOfferings(ListVPCOfferingsCmd cmd);

View File

@ -56,7 +56,8 @@ public interface VpcService {
* @throws ResourceAllocationException TODO
*/
Vpc createVpc(long zoneId, long vpcOffId, long vpcOwnerId, String vpcName, String displayText, String cidr, String networkDomain,
String ip4Dns1, String ip4Dns2, String ip6Dns1, String ip6Dns2, Boolean displayVpc, Integer publicMtu)
String ip4Dns1, String ip4Dns2, String ip6Dns1, String ip6Dns2, Boolean displayVpc, Integer publicMtu, Integer cidrSize,
Long asNumber, List<Long> bgpPeerIds)
throws ResourceAllocationException;
/**

View File

@ -39,7 +39,7 @@ public interface RemoteAccessVpnService {
VpnUser addVpnUser(long vpnOwnerId, String userName, String password);
boolean removeVpnUser(long vpnOwnerId, String userName, Account caller);
boolean removeVpnUser(Account vpnOwner, String userName, Account caller);
List<? extends VpnUser> listVpnUsers(long vpnOwnerId, String userName);

View File

@ -43,6 +43,15 @@ public interface NetworkOffering extends InfrastructureEntity, InternalIdentity,
InternalLbProvider, PublicLbProvider, servicepackageuuid, servicepackagedescription, PromiscuousMode, MacAddressChanges, ForgedTransmits, MacLearning, RelatedNetworkOffering, domainid, zoneid, pvlanType, internetProtocol
}
public enum NetworkMode {
NATTED,
ROUTED
}
enum RoutingMode {
Static, Dynamic
}
public final static String SystemPublicNetwork = "System-Public-Network";
public final static String SystemControlNetwork = "System-Control-Network";
public final static String SystemManagementNetwork = "System-Management-Network";
@ -52,6 +61,11 @@ public interface NetworkOffering extends InfrastructureEntity, InternalIdentity,
public final static String DefaultSharedNetworkOfferingWithSGService = "DefaultSharedNetworkOfferingWithSGService";
public static final String DEFAULT_TUNGSTEN_SHARED_NETWORK_OFFERING_WITH_SGSERVICE = "DefaultTungstenSharedNetworkOfferingWithSGService";
public static final String DEFAULT_NAT_NSX_OFFERING_FOR_VPC = "DefaultNATNSXNetworkOfferingForVpc";
public static final String DEFAULT_NAT_NSX_OFFERING_FOR_VPC_WITH_ILB = "DefaultNATNSXNetworkOfferingForVpcWithInternalLB";
public static final String DEFAULT_ROUTED_NSX_OFFERING_FOR_VPC = "DefaultRoutedNSXNetworkOfferingForVpc";
public static final String DEFAULT_NAT_NSX_OFFERING = "DefaultNATNSXNetworkOffering";
public static final String DEFAULT_ROUTED_NSX_OFFERING = "DefaultRoutedNSXNetworkOffering";
public final static String QuickCloudNoServices = "QuickCloudNoServices";
public final static String DefaultIsolatedNetworkOfferingWithSourceNatService = "DefaultIsolatedNetworkOfferingWithSourceNatService";
public final static String OvsIsolatedNetworkOfferingWithSourceNatService = "OvsIsolatedNetworkOfferingWithSourceNatService";
@ -90,6 +104,10 @@ public interface NetworkOffering extends InfrastructureEntity, InternalIdentity,
boolean isForTungsten();
boolean isForNsx();
NetworkMode getNetworkMode();
TrafficType getTrafficType();
boolean isSpecifyVlan();
@ -151,4 +169,8 @@ public interface NetworkOffering extends InfrastructureEntity, InternalIdentity,
String getServicePackage();
Date getCreated();
RoutingMode getRoutingMode();
Boolean isSpecifyAsNumber();
}

View File

@ -33,6 +33,9 @@ public interface ServiceOffering extends InfrastructureEntity, InternalIdentity,
static final String internalLbVmDefaultOffUniqueName = "Cloud.Com-InternalLBVm";
// leaving cloud.com references as these are identifyers and no real world addresses (check against DB)
static final String PURGE_DB_ENTITIES_KEY = "purge.db.entities";
enum State {
Inactive, Active,
}

View File

@ -16,6 +16,7 @@
// under the License.
package com.cloud.org;
import com.cloud.cpu.CPU;
import com.cloud.hypervisor.Hypervisor.HypervisorType;
import com.cloud.org.Managed.ManagedState;
import org.apache.cloudstack.kernel.Partition;
@ -38,4 +39,6 @@ public interface Cluster extends Grouping, Partition {
AllocationState getAllocationState();
ManagedState getManagedState();
CPU.CPUArch getArch();
}

View File

@ -19,6 +19,7 @@ package com.cloud.region.ha;
import java.util.List;
import com.cloud.user.Account;
import org.apache.cloudstack.api.command.user.region.ha.gslb.AssignToGlobalLoadBalancerRuleCmd;
import org.apache.cloudstack.api.command.user.region.ha.gslb.CreateGlobalLoadBalancerRuleCmd;
import org.apache.cloudstack.api.command.user.region.ha.gslb.DeleteGlobalLoadBalancerRuleCmd;
@ -39,7 +40,7 @@ public interface GlobalLoadBalancingRulesService {
GlobalLoadBalancerRule updateGlobalLoadBalancerRule(UpdateGlobalLoadBalancerRuleCmd updateGslbCmd);
boolean revokeAllGslbRulesForAccount(com.cloud.user.Account caller, long accountId) throws com.cloud.exception.ResourceUnavailableException;
boolean revokeAllGslbRulesForAccount(com.cloud.user.Account caller, Account account) throws com.cloud.exception.ResourceUnavailableException;
/*
* methods for managing sites participating in global load balancing

View File

@ -32,6 +32,8 @@ public interface ManagementServerHostStats {
String getManagementServerHostUuid();
long getManagementServerRunId();
long getSessions();
double getCpuUtilization();

View File

@ -20,7 +20,6 @@ import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import com.cloud.user.UserData;
import org.apache.cloudstack.api.command.admin.cluster.ListClustersCmd;
import org.apache.cloudstack.api.command.admin.config.ListCfgGroupsByCmd;
import org.apache.cloudstack.api.command.admin.config.ListCfgsByCmd;
@ -66,6 +65,7 @@ import org.apache.cloudstack.api.command.user.vm.GetVMPasswordCmd;
import org.apache.cloudstack.api.command.user.vmgroup.UpdateVMGroupCmd;
import org.apache.cloudstack.config.Configuration;
import org.apache.cloudstack.config.ConfigurationGroup;
import org.apache.cloudstack.framework.config.ConfigKey;
import com.cloud.alert.Alert;
import com.cloud.capacity.Capacity;
@ -85,6 +85,7 @@ import com.cloud.storage.GuestOSHypervisor;
import com.cloud.storage.GuestOsCategory;
import com.cloud.storage.StoragePool;
import com.cloud.user.SSHKeyPair;
import com.cloud.user.UserData;
import com.cloud.utils.Pair;
import com.cloud.utils.Ternary;
import com.cloud.vm.InstanceGroup;
@ -98,6 +99,14 @@ import com.cloud.vm.VirtualMachine.Type;
public interface ManagementService {
static final String Name = "management-server";
ConfigKey<Boolean> JsInterpretationEnabled = new ConfigKey<>("Hidden"
, Boolean.class
, "js.interpretation.enabled"
, "false"
, "Enable/Disable all JavaScript interpretation related functionalities to create or update Javascript rules."
, false
, ConfigKey.Scope.Global);
/**
* returns the a map of the names/values in the configuration table
*
@ -481,4 +490,6 @@ public interface ManagementService {
Pair<Boolean, String> patchSystemVM(PatchSystemVMCmd cmd);
void checkJsInterpretationAllowedIfNeededForParameterValue(String paramName, boolean paramValue);
}

View File

@ -18,6 +18,7 @@ package com.cloud.server;
public interface ResourceManagerUtil {
long getResourceId(String resourceId, ResourceTag.ResourceObjectType resourceType);
long getResourceId(String resourceId, ResourceTag.ResourceObjectType resourceType, boolean checkAccess);
String getUuid(String resourceId, ResourceTag.ResourceObjectType resourceType);
ResourceTag.ResourceObjectType getResourceType(String resourceTypeStr);
void checkResourceAccessible(Long accountId, Long domainId, String exceptionMessage);

View File

@ -20,6 +20,7 @@ package com.cloud.storage;
import com.cloud.utils.exception.CloudRuntimeException;
public enum DataStoreRole {
Primary("primary"), Image("image"), ImageCache("imagecache"), Backup("backup"), Object("object");

View File

@ -150,6 +150,17 @@ public class Storage {
Storage
}
/**
* StoragePoolTypes carry some details about the format and capabilities of a storage pool. While not necessarily a
* 1:1 with PrimaryDataStoreDriver (and for KVM agent, KVMStoragePool and StorageAdaptor) implementations, it is
* often used to decide which storage plugin or storage command to call, so it may be necessary for new storage
* plugins to add a StoragePoolType. This can be done by adding it below, or by creating a new public static final
* instance of StoragePoolType in the plugin itself, which registers it with the map.
*
* Note that if the StoragePoolType is for KVM and defined in plugin code rather than below, care must be taken to
* ensure this is available on the agent side as well. This is best done by defining the StoragePoolType in a common
* package available on both management server and agent plugin jars.
*/
public static enum StoragePoolType {
Filesystem(false, true, EncryptionSupport.Hypervisor), // local directory
NetworkFilesystem(true, true, EncryptionSupport.Hypervisor), // NFS

View File

@ -95,6 +95,10 @@ public interface StorageService {
StoragePool updateStoragePool(UpdateStoragePoolCmd cmd) throws IllegalArgumentException;
StoragePool enablePrimaryStoragePool(Long id);
StoragePool disablePrimaryStoragePool(Long id);
StoragePool getStoragePool(long id);
boolean deleteImageStore(DeleteImageStoreCmd cmd);

View File

@ -26,4 +26,7 @@ public interface StorageStats {
* @return bytes capacity of the storage server
*/
public long getCapacityBytes();
Long getCapacityIops();
Long getUsedIops();
}

View File

@ -40,7 +40,7 @@ public interface Upload extends InternalIdentity, Identity {
}
public static enum Type {
VOLUME, TEMPLATE, ISO
VOLUME, SNAPSHOT, TEMPLATE, ISO
}
public static enum Mode {

View File

@ -17,6 +17,7 @@
package com.cloud.storage;
import java.util.Date;
import java.util.List;
import org.apache.cloudstack.api.InternalIdentity;
@ -25,6 +26,8 @@ public interface VMTemplateStorageResourceAssoc extends InternalIdentity {
UNKNOWN, DOWNLOAD_ERROR, NOT_DOWNLOADED, DOWNLOAD_IN_PROGRESS, DOWNLOADED, ABANDONED, UPLOADED, NOT_UPLOADED, UPLOAD_ERROR, UPLOAD_IN_PROGRESS, CREATING, CREATED, BYPASSED
}
List<Status> PENDING_DOWNLOAD_STATES = List.of(Status.NOT_DOWNLOADED, Status.DOWNLOAD_IN_PROGRESS);
String getInstallPath();
long getTemplateId();

View File

@ -30,6 +30,8 @@ import com.cloud.utils.fsm.StateObject;
public interface Volume extends ControlledEntity, Identity, InternalIdentity, BasedOn, StateObject<Volume.State>, Displayable {
static final long DISK_OFFERING_SUITABILITY_CHECK_VOLUME_ID = -1;
// Managed storage volume parameters (specified in the compute/disk offering for PowerFlex)
String BANDWIDTH_LIMIT_IN_MBPS = "bandwidthLimitInMbps";
String IOPS_LIMIT = "iopsLimit";
@ -269,11 +271,13 @@ public interface Volume extends ControlledEntity, Identity, InternalIdentity, Ba
void setExternalUuid(String externalUuid);
public Long getPassphraseId();
Long getPassphraseId();
public void setPassphraseId(Long id);
void setPassphraseId(Long id);
public String getEncryptFormat();
String getEncryptFormat();
public void setEncryptFormat(String encryptFormat);
void setEncryptFormat(String encryptFormat);
boolean isDeleteProtection();
}

Some files were not shown because too many files have changed in this diff Show More